file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
gen_mike_input_rf_linux.py | import connection as con_params
from db_adapter.base import get_Pool, destroy_Pool
from db_adapter.constants import CURW_SIM_DATABASE, CURW_SIM_PASSWORD, CURW_SIM_USERNAME, CURW_SIM_PORT, CURW_SIM_HOST
from db_adapter.curw_sim.timeseries import Timeseries
from db_adapter.constants import COMMON_DATE_TIME_FORMAT
ROOT_DIRECTORY = '/home/uwcc-admin/curw_mike_data_handler'
# ROOT_DIRECTORY = 'D:\curw_mike_data_handlers'
OUTPUT_DIRECTORY = "/mnt/disks/wrf_nfs/mike/inputs"
def write_to_file(file_name, data):
with open(file_name, 'w+') as f:
f.write('\n'.join(data))
def | (file_name, data):
with open(file_name, 'a+') as f:
f.write('\n'.join(data))
def append_file_to_file(file_name, file_content):
with open(file_name, 'a+') as f:
f.write('\n')
f.write(file_content)
def makedir_if_not_exist_given_filepath(filename):
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
pass
def read_attribute_from_config_file(attribute, config, compulsory=False):
"""
:param attribute: key name of the config json file
:param config: loaded json file
:param compulsory: Boolean value: whether the attribute is must present or not in the config file
:return:
"""
if attribute in config and (config[attribute] != ""):
return config[attribute]
elif compulsory:
print("{} not specified in config file.".format(attribute))
exit(1)
else:
# print("{} not specified in config file.".format(attribute))
return None
def check_time_format(time):
try:
time = datetime.strptime(time, DATE_TIME_FORMAT)
if time.strftime('%S') != '00':
print("Seconds should be always 00")
exit(1)
if time.strftime('%M') not in ('00', '15', '30', '45'):
print("Minutes should be always multiple of 15")
exit(1)
return True
except Exception:
print("Time {} is not in proper format".format(time))
exit(1)
def list_of_lists_to_df_first_row_as_columns(data):
"""
:param data: data in list of lists format
:return: equivalent pandas dataframe
"""
return pd.DataFrame.from_records(data[1:], columns=data[0])
def replace_negative_numbers_with_nan(df):
num = df._get_numeric_data()
num[num < 0] = np.nan
return df
def replace_nan_with_row_average(df):
m = df.mean(axis=1)
for i, col in enumerate(df):
df.iloc[:, i] = df.iloc[:, i].fillna(m)
return df
def get_all_obs_rain_hashids_from_curw_sim(pool):
obs_id_hash_id_mappings = {}
expected_earliest_obs_end = (datetime.now() - timedelta(days=1)).strftime(COMMON_DATE_TIME_FORMAT)
connection = pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "SELECT `id`, `grid_id` FROM `run` where `model`=%s and `obs_end`>=%s;"
row_count = cursor.execute(sql_statement, ("hechms", expected_earliest_obs_end))
if row_count > 0:
results = cursor.fetchall()
for dict in results:
grid_id = dict.get("grid_id")
grid_id_parts = grid_id.split("_")
obs_id_hash_id_mappings[grid_id_parts[1]] = dict.get("id")
return obs_id_hash_id_mappings
else:
return None
except Exception as exception:
traceback.print_exc()
finally:
if connection is not None:
connection.close()
def prepare_mike_rf_input(start, end, coefficients):
try:
#### process staton based hybrid timeseries ####
distinct_obs_ids = coefficients.curw_obs_id.unique()
hybrid_ts_df = pd.DataFrame()
hybrid_ts_df['time'] = pd.date_range(start=start, end=end, freq='5min')
pool = get_Pool(host=con_params.CURW_SIM_HOST, port=con_params.CURW_SIM_PORT, user=con_params.CURW_SIM_USERNAME,
password=con_params.CURW_SIM_PASSWORD,
db=con_params.CURW_SIM_DATABASE)
TS = Timeseries(pool)
obs_id_hash_id_mapping = get_all_obs_rain_hashids_from_curw_sim(pool)
for obs_id in distinct_obs_ids:
# taking data from curw_sim database (data prepared based on active stations for hechms)
ts = TS.get_timeseries(id_=obs_id_hash_id_mapping.get(str(obs_id)), start_date=start, end_date=end)
ts.insert(0, ['time', obs_id])
ts_df = list_of_lists_to_df_first_row_as_columns(ts)
ts_df[obs_id] = ts_df[obs_id].astype('float64')
hybrid_ts_df = pd.merge(hybrid_ts_df, ts_df, how="left", on='time')
hybrid_ts_df.set_index('time', inplace=True)
hybrid_ts_df = hybrid_ts_df.resample('15min', label='right', closed='right').sum()
# pd.set_option('display.max_rows', hybrid_ts_df.shape[0]+1)
# pd.set_option('display.max_columns', hybrid_ts_df.shape[1]+1)
# print(hybrid_ts_df)
hybrid_ts_df = replace_negative_numbers_with_nan(hybrid_ts_df)
# print(hybrid_ts_df)
hybrid_ts_df = replace_nan_with_row_average(hybrid_ts_df)
# print(hybrid_ts_df)
#### process mike input ####
distinct_names = coefficients.name.unique()
mike_input = pd.DataFrame()
mike_input_initialized = False
for name in distinct_names:
catchment_coefficients = coefficients[coefficients.name == name]
# print(catchment_coefficients)
catchment = pd.DataFrame()
catchment_initialized = False
for index, row in catchment_coefficients.iterrows():
# print(index, row['curw_obs_id'], row['coefficient'])
if not catchment_initialized:
catchment = (hybrid_ts_df[row['curw_obs_id']] * row['coefficient']).to_frame(name=row['curw_obs_id'])
catchment_initialized = True
else:
new = (hybrid_ts_df[row['curw_obs_id']] * row['coefficient']).to_frame(name=row['curw_obs_id'])
catchment = pd.merge(catchment, new, how="left", on='time')
if not mike_input_initialized:
mike_input[name] = catchment.sum(axis=1)
mike_input_initialized = True
else:
mike_input = pd.merge(mike_input, (catchment.sum(axis=1)).to_frame(name=name), how="left", on='time')
mike_input.round(1)
return mike_input
except Exception:
traceback.print_exc()
finally:
destroy_Pool(pool)
def usage():
usageText = """
Usage: ./inputs/gen_mike_input_rf_linux.py [-s "YYYY-MM-DD HH:MM:SS"] [-e "YYYY-MM-DD HH:MM:SS"]
-h --help Show usage
-s --start_time Mike rainfall timeseries start time (e.g: "2019-06-05 00:00:00"). Default is 00:00:00, 3 days before today.
-e --end_time Mike rainfall timeseries end time (e.g: "2019-06-05 23:00:00"). Default is 00:00:00, 2 days after.
"""
print(usageText)
if __name__ == "__main__":
set_db_config_file_path(os.path.join(ROOT_DIRECTORY, 'db_adapter_config.json'))
try:
start_time = None
end_time = None
try:
opts, args = getopt.getopt(sys.argv[1:], "h:s:e:",
["help", "start_time=", "end_time="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-s", "--start_time"):
start_time = arg.strip()
elif opt in ("-e", "--end_time"):
end_time = arg.strip()
# Load config params
config = json.loads(open(os.path.join('inputs', 'configs', 'rain_config.json')).read())
output_dir = read_attribute_from_config_file('output_dir', config)
file_name = read_attribute_from_config_file('output_file_name', config)
if start_time is None:
start_time = (datetime.now() - timedelta(days=3)).strftime('%Y-%m-%d 00:00:00')
else:
check_time_format(time=start_time)
if end_time is None:
end_time = (datetime.now() + timedelta(days=2)).strftime('%Y-%m-%d 00:00:00')
else:
check_time_format(time=end_time)
if output_dir is None:
output_dir | append_to_file | identifier_name |
debug.rs | at_core::{HasSchema, SQLValueType, TxReport};
/// Represents a *datom* (assertion) in the store.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub struct Datom {
// TODO: generalize this.
pub e: EntidOrIdent,
pub a: EntidOrIdent,
pub v: edn::Value,
pub tx: i64,
pub added: Option<bool>,
}
/// Represents a set of datoms (assertions) in the store.
///
/// To make comparision easier, we deterministically order. The ordering is the ascending tuple
/// ordering determined by `(e, a, (value_type_tag, v), tx)`, where `value_type_tag` is an internal
/// value that is not exposed but is deterministic.
pub struct Datoms(pub Vec<Datom>);
/// Represents an ordered sequence of transactions in the store.
///
/// To make comparision easier, we deterministically order. The ordering is the ascending tuple
/// ordering determined by `(e, a, (value_type_tag, v), tx, added)`, where `value_type_tag` is an
/// internal value that is not exposed but is deterministic, and `added` is ordered such that
/// retracted assertions appear before added assertions.
pub struct Transactions(pub Vec<Datoms>);
/// Represents the fulltext values in the store.
pub struct FulltextValues(pub Vec<(i64, String)>);
impl Datom {
pub fn to_edn(&self) -> edn::Value {
let f = |entid: &EntidOrIdent| -> edn::Value {
match *entid {
EntidOrIdent::Entid(ref y) => edn::Value::Integer(*y),
EntidOrIdent::Ident(ref y) => edn::Value::Keyword(y.clone()),
}
};
let mut v = vec![f(&self.e), f(&self.a), self.v.clone()];
if let Some(added) = self.added {
v.push(edn::Value::Integer(self.tx));
v.push(edn::Value::Boolean(added));
}
edn::Value::Vector(v)
}
}
impl Datoms {
pub fn to_edn(&self) -> edn::Value {
edn::Value::Vector((&self.0).iter().map(|x| x.to_edn()).collect())
}
}
impl Transactions {
pub fn to_edn(&self) -> edn::Value {
edn::Value::Vector((&self.0).iter().map(|x| x.to_edn()).collect())
}
}
impl FulltextValues {
pub fn to_edn(&self) -> edn::Value {
edn::Value::Vector(
(&self.0)
.iter()
.map(|&(x, ref y)| {
edn::Value::Vector(vec![edn::Value::Integer(x), edn::Value::Text(y.clone())])
})
.collect(),
)
}
}
/// Turn TypedValue::Ref into TypedValue::Keyword when it is possible.
trait ToIdent {
fn map_ident(self, schema: &Schema) -> Self;
}
impl ToIdent for TypedValue {
fn map_ident(self, schema: &Schema) -> Self {
if let TypedValue::Ref(e) = self {
schema
.get_ident(e)
.cloned()
.map(|i| i.into())
.unwrap_or(TypedValue::Ref(e))
} else {
self
}
}
}
/// Convert a numeric entid to an ident `Entid` if possible, otherwise a numeric `Entid`.
pub fn to_entid(schema: &Schema, entid: i64) -> EntidOrIdent {
schema
.get_ident(entid)
.map_or(EntidOrIdent::Entid(entid), |ident| {
EntidOrIdent::Ident(ident.clone())
})
}
// /// Convert a symbolic ident to an ident `Entid` if possible, otherwise a numeric `Entid`.
// pub fn to_ident(schema: &Schema, entid: i64) -> Entid {
// schema.get_ident(entid).map_or(Entid::Entid(entid), |ident| Entid::Ident(ident.clone()))
// }
/// Return the set of datoms in the store, ordered by (e, a, v, tx), but not including any datoms of
/// the form [... :db/txInstant ...].
pub fn datoms<S: Borrow<Schema>>(conn: &rusqlite::Connection, schema: &S) -> Result<Datoms> {
datoms_after(conn, schema, bootstrap::TX0 - 1)
}
/// Return the set of datoms in the store with transaction ID strictly greater than the given `tx`,
/// ordered by (e, a, v, tx).
///
/// The datom set returned does not include any datoms of the form [... :db/txInstant ...].
pub fn datoms_after<S: Borrow<Schema>>(
conn: &rusqlite::Connection,
schema: &S,
tx: i64,
) -> Result<Datoms> {
let borrowed_schema = schema.borrow();
let mut stmt: rusqlite::Statement = conn.prepare("SELECT e, a, v, value_type_tag, tx FROM datoms WHERE tx > ? ORDER BY e ASC, a ASC, value_type_tag ASC, v ASC, tx ASC")?;
let r: Result<Vec<_>> = stmt
.query_and_then(&[&tx], |row| {
let e: i64 = row.get(0)?;
let a: i64 = row.get(1)?;
if a == entids::DB_TX_INSTANT {
return Ok(None);
}
let v: rusqlite::types::Value = row.get(2)?;
let value_type_tag: i32 = row.get(3)?;
let attribute = borrowed_schema.require_attribute_for_entid(a)?;
let value_type_tag = if !attribute.fulltext {
value_type_tag
} else {
ValueType::Long.value_type_tag()
};
let typed_value =
TypedValue::from_sql_value_pair(v, value_type_tag)?.map_ident(borrowed_schema);
let (value, _) = typed_value.to_edn_value_pair();
let tx: i64 = row.get(4)?;
Ok(Some(Datom {
e: EntidOrIdent::Entid(e),
a: to_entid(borrowed_schema, a),
v: value,
tx,
added: None,
}))
})?
.collect();
Ok(Datoms(r?.into_iter().filter_map(|x| x).collect()))
}
/// Return the sequence of transactions in the store with transaction ID strictly greater than the
/// given `tx`, ordered by (tx, e, a, v).
///
/// Each transaction returned includes the [(transaction-tx) :db/txInstant ...] datom.
pub fn transactions_after<S: Borrow<Schema>>(
conn: &rusqlite::Connection,
schema: &S,
tx: i64,
) -> Result<Transactions> {
let borrowed_schema = schema.borrow();
let mut stmt: rusqlite::Statement = conn.prepare("SELECT e, a, v, value_type_tag, tx, added FROM transactions WHERE tx > ? ORDER BY tx ASC, e ASC, a ASC, value_type_tag ASC, v ASC, added ASC")?;
let r: Result<Vec<_>> = stmt
.query_and_then(&[&tx], |row| {
let e: i64 = row.get(0)?;
let a: i64 = row.get(1)?;
let v: rusqlite::types::Value = row.get(2)?;
let value_type_tag: i32 = row.get(3)?;
let attribute = borrowed_schema.require_attribute_for_entid(a)?;
let value_type_tag = if !attribute.fulltext {
value_type_tag
} else | ;
let typed_value =
TypedValue::from_sql_value_pair(v, value_type_tag)?.map_ident(borrowed_schema);
let (value, _) = typed_value.to_edn_value_pair();
let tx: i64 = row.get(4)?;
let added: bool = row.get(5)?;
Ok(Datom {
e: EntidOrIdent::Entid(e),
a: to_entid(borrowed_schema, a),
v: value,
tx,
added: Some(added),
})
})?
.collect();
// Group by tx.
let r: Vec<Datoms> = r?
.into_iter()
.group_by(|x| x.tx)
.into_iter()
.map(|(_key, group)| Datoms(group.collect()))
.collect();
Ok(Transactions(r))
}
/// Return the set of fulltext values in the store, ordered by rowid.
pub fn fulltext_values(conn: &rusqlite::Connection) -> Result<FulltextValues> {
let mut stmt: rusqlite::Statement =
conn.prepare("SELECT rowid, text FROM fulltext_values ORDER BY rowid")?;
let r: Result<Vec<_>> = stmt
.query_and_then([], |row| {
let rowid: i64 = row.get(0)?;
let text: String = row.get(1)?;
Ok((rowid, text))
})?
.collect();
r | {
ValueType::Long.value_type_tag()
} | conditional_block |
debug.rs | ))
} else {
self
}
}
}
/// Convert a numeric entid to an ident `Entid` if possible, otherwise a numeric `Entid`.
pub fn to_entid(schema: &Schema, entid: i64) -> EntidOrIdent {
schema
.get_ident(entid)
.map_or(EntidOrIdent::Entid(entid), |ident| {
EntidOrIdent::Ident(ident.clone())
})
}
// /// Convert a symbolic ident to an ident `Entid` if possible, otherwise a numeric `Entid`.
// pub fn to_ident(schema: &Schema, entid: i64) -> Entid {
// schema.get_ident(entid).map_or(Entid::Entid(entid), |ident| Entid::Ident(ident.clone()))
// }
/// Return the set of datoms in the store, ordered by (e, a, v, tx), but not including any datoms of
/// the form [... :db/txInstant ...].
pub fn datoms<S: Borrow<Schema>>(conn: &rusqlite::Connection, schema: &S) -> Result<Datoms> {
datoms_after(conn, schema, bootstrap::TX0 - 1)
}
/// Return the set of datoms in the store with transaction ID strictly greater than the given `tx`,
/// ordered by (e, a, v, tx).
///
/// The datom set returned does not include any datoms of the form [... :db/txInstant ...].
pub fn datoms_after<S: Borrow<Schema>>(
conn: &rusqlite::Connection,
schema: &S,
tx: i64,
) -> Result<Datoms> {
let borrowed_schema = schema.borrow();
let mut stmt: rusqlite::Statement = conn.prepare("SELECT e, a, v, value_type_tag, tx FROM datoms WHERE tx > ? ORDER BY e ASC, a ASC, value_type_tag ASC, v ASC, tx ASC")?;
let r: Result<Vec<_>> = stmt
.query_and_then(&[&tx], |row| {
let e: i64 = row.get(0)?;
let a: i64 = row.get(1)?;
if a == entids::DB_TX_INSTANT {
return Ok(None);
}
let v: rusqlite::types::Value = row.get(2)?;
let value_type_tag: i32 = row.get(3)?;
let attribute = borrowed_schema.require_attribute_for_entid(a)?;
let value_type_tag = if !attribute.fulltext {
value_type_tag
} else {
ValueType::Long.value_type_tag()
};
let typed_value =
TypedValue::from_sql_value_pair(v, value_type_tag)?.map_ident(borrowed_schema);
let (value, _) = typed_value.to_edn_value_pair();
let tx: i64 = row.get(4)?;
Ok(Some(Datom {
e: EntidOrIdent::Entid(e),
a: to_entid(borrowed_schema, a),
v: value,
tx,
added: None,
}))
})?
.collect();
Ok(Datoms(r?.into_iter().filter_map(|x| x).collect()))
}
/// Return the sequence of transactions in the store with transaction ID strictly greater than the
/// given `tx`, ordered by (tx, e, a, v).
///
/// Each transaction returned includes the [(transaction-tx) :db/txInstant ...] datom.
pub fn transactions_after<S: Borrow<Schema>>(
conn: &rusqlite::Connection,
schema: &S,
tx: i64,
) -> Result<Transactions> {
let borrowed_schema = schema.borrow();
let mut stmt: rusqlite::Statement = conn.prepare("SELECT e, a, v, value_type_tag, tx, added FROM transactions WHERE tx > ? ORDER BY tx ASC, e ASC, a ASC, value_type_tag ASC, v ASC, added ASC")?;
let r: Result<Vec<_>> = stmt
.query_and_then(&[&tx], |row| {
let e: i64 = row.get(0)?;
let a: i64 = row.get(1)?;
let v: rusqlite::types::Value = row.get(2)?;
let value_type_tag: i32 = row.get(3)?;
let attribute = borrowed_schema.require_attribute_for_entid(a)?;
let value_type_tag = if !attribute.fulltext {
value_type_tag
} else {
ValueType::Long.value_type_tag()
};
let typed_value =
TypedValue::from_sql_value_pair(v, value_type_tag)?.map_ident(borrowed_schema);
let (value, _) = typed_value.to_edn_value_pair();
let tx: i64 = row.get(4)?;
let added: bool = row.get(5)?;
Ok(Datom {
e: EntidOrIdent::Entid(e),
a: to_entid(borrowed_schema, a),
v: value,
tx,
added: Some(added),
})
})?
.collect();
// Group by tx.
let r: Vec<Datoms> = r?
.into_iter()
.group_by(|x| x.tx)
.into_iter()
.map(|(_key, group)| Datoms(group.collect()))
.collect();
Ok(Transactions(r))
}
/// Return the set of fulltext values in the store, ordered by rowid.
pub fn fulltext_values(conn: &rusqlite::Connection) -> Result<FulltextValues> {
let mut stmt: rusqlite::Statement =
conn.prepare("SELECT rowid, text FROM fulltext_values ORDER BY rowid")?;
let r: Result<Vec<_>> = stmt
.query_and_then([], |row| {
let rowid: i64 = row.get(0)?;
let text: String = row.get(1)?;
Ok((rowid, text))
})?
.collect();
r.map(FulltextValues)
}
/// Execute the given `sql` query with the given `params` and format the results as a
/// tab-and-newline formatted string suitable for debug printing.
///
/// The query is printed followed by a newline, then the returned columns followed by a newline, and
/// then the data rows and columns. All columns are aligned.
pub fn dump_sql_query(
conn: &rusqlite::Connection,
sql: &str,
params: &[&dyn ToSql],
) -> Result<String> {
let mut stmt: rusqlite::Statement = conn.prepare(sql)?;
let mut tw = TabWriter::new(Vec::new()).padding(2);
writeln!(&mut tw, "{}", sql).unwrap();
for column_name in stmt.column_names() {
write!(&mut tw, "{}\t", column_name).unwrap();
}
writeln!(&mut tw).unwrap();
let r: Result<Vec<_>> = stmt
.query_and_then(params, |row| {
for i in 0..row.as_ref().column_count() {
let value: rusqlite::types::Value = row.get(i)?;
write!(&mut tw, "{:?}\t", value).unwrap();
}
writeln!(&mut tw).unwrap();
Ok(())
})?
.collect();
r?;
let dump = String::from_utf8(tw.into_inner().unwrap()).unwrap();
Ok(dump)
}
// A connection that doesn't try to be clever about possibly sharing its `Schema`. Compare to
// `mentat::Conn`.
pub struct TestConn {
pub sqlite: rusqlite::Connection,
pub partition_map: PartitionMap,
pub schema: Schema,
}
impl TestConn {
fn assert_materialized_views(&self) {
let materialized_ident_map = read_ident_map(&self.sqlite).expect("ident map");
let materialized_attribute_map = read_attribute_map(&self.sqlite).expect("schema map");
let materialized_schema = Schema::from_ident_map_and_attribute_map(
materialized_ident_map,
materialized_attribute_map,
)
.expect("schema");
assert_eq!(materialized_schema, self.schema);
}
pub fn transact<I>(&mut self, transaction: I) -> Result<TxReport>
where
I: Borrow<str>,
{
// Failure to parse the transaction is a coding error, so we unwrap.
let entities = edn::parse::entities(transaction.borrow()).unwrap_or_else(|_| {
panic!("to be able to parse {} into entities", transaction.borrow())
});
let details = {
// The block scopes the borrow of self.sqlite.
// We're about to write, so go straight ahead and get an IMMEDIATE transaction.
let tx = self
.sqlite
.transaction_with_behavior(TransactionBehavior::Immediate)?;
// Applying the transaction can fail, so we don't unwrap.
let details = transact(
&tx,
self.partition_map.clone(),
&self.schema,
&self.schema,
NullWatcher(),
entities,
)?;
tx.commit()?;
details
};
let (report, next_partition_map, next_schema, _watcher) = details;
self.partition_map = next_partition_map;
if let Some(next_schema) = next_schema {
self.schema = next_schema;
}
// Verify that we've updated the materialized views during transacting.
self.assert_materialized_views();
Ok(report)
}
pub fn | transact_simple_terms | identifier_name |
|
debug.rs | <bool>,
}
/// Represents a set of datoms (assertions) in the store.
///
/// To make comparision easier, we deterministically order. The ordering is the ascending tuple
/// ordering determined by `(e, a, (value_type_tag, v), tx)`, where `value_type_tag` is an internal
/// value that is not exposed but is deterministic.
pub struct Datoms(pub Vec<Datom>);
/// Represents an ordered sequence of transactions in the store.
///
/// To make comparision easier, we deterministically order. The ordering is the ascending tuple
/// ordering determined by `(e, a, (value_type_tag, v), tx, added)`, where `value_type_tag` is an
/// internal value that is not exposed but is deterministic, and `added` is ordered such that
/// retracted assertions appear before added assertions.
pub struct Transactions(pub Vec<Datoms>);
/// Represents the fulltext values in the store.
pub struct FulltextValues(pub Vec<(i64, String)>);
impl Datom {
pub fn to_edn(&self) -> edn::Value {
let f = |entid: &EntidOrIdent| -> edn::Value {
match *entid {
EntidOrIdent::Entid(ref y) => edn::Value::Integer(*y),
EntidOrIdent::Ident(ref y) => edn::Value::Keyword(y.clone()),
}
};
let mut v = vec![f(&self.e), f(&self.a), self.v.clone()];
if let Some(added) = self.added {
v.push(edn::Value::Integer(self.tx));
v.push(edn::Value::Boolean(added));
}
edn::Value::Vector(v)
}
}
impl Datoms {
pub fn to_edn(&self) -> edn::Value {
edn::Value::Vector((&self.0).iter().map(|x| x.to_edn()).collect())
}
}
impl Transactions {
pub fn to_edn(&self) -> edn::Value {
edn::Value::Vector((&self.0).iter().map(|x| x.to_edn()).collect())
}
}
impl FulltextValues {
pub fn to_edn(&self) -> edn::Value {
edn::Value::Vector(
(&self.0)
.iter()
.map(|&(x, ref y)| {
edn::Value::Vector(vec![edn::Value::Integer(x), edn::Value::Text(y.clone())])
})
.collect(),
)
}
}
/// Turn TypedValue::Ref into TypedValue::Keyword when it is possible.
trait ToIdent {
fn map_ident(self, schema: &Schema) -> Self;
}
impl ToIdent for TypedValue {
fn map_ident(self, schema: &Schema) -> Self {
if let TypedValue::Ref(e) = self {
schema
.get_ident(e)
.cloned()
.map(|i| i.into())
.unwrap_or(TypedValue::Ref(e))
} else {
self
}
}
}
/// Convert a numeric entid to an ident `Entid` if possible, otherwise a numeric `Entid`.
pub fn to_entid(schema: &Schema, entid: i64) -> EntidOrIdent {
schema
.get_ident(entid)
.map_or(EntidOrIdent::Entid(entid), |ident| {
EntidOrIdent::Ident(ident.clone())
})
}
// /// Convert a symbolic ident to an ident `Entid` if possible, otherwise a numeric `Entid`.
// pub fn to_ident(schema: &Schema, entid: i64) -> Entid {
// schema.get_ident(entid).map_or(Entid::Entid(entid), |ident| Entid::Ident(ident.clone()))
// }
/// Return the set of datoms in the store, ordered by (e, a, v, tx), but not including any datoms of
/// the form [... :db/txInstant ...].
pub fn datoms<S: Borrow<Schema>>(conn: &rusqlite::Connection, schema: &S) -> Result<Datoms> {
datoms_after(conn, schema, bootstrap::TX0 - 1)
}
/// Return the set of datoms in the store with transaction ID strictly greater than the given `tx`,
/// ordered by (e, a, v, tx).
///
/// The datom set returned does not include any datoms of the form [... :db/txInstant ...].
pub fn datoms_after<S: Borrow<Schema>>(
conn: &rusqlite::Connection,
schema: &S,
tx: i64,
) -> Result<Datoms> {
let borrowed_schema = schema.borrow();
let mut stmt: rusqlite::Statement = conn.prepare("SELECT e, a, v, value_type_tag, tx FROM datoms WHERE tx > ? ORDER BY e ASC, a ASC, value_type_tag ASC, v ASC, tx ASC")?;
let r: Result<Vec<_>> = stmt
.query_and_then(&[&tx], |row| {
let e: i64 = row.get(0)?;
let a: i64 = row.get(1)?;
if a == entids::DB_TX_INSTANT {
return Ok(None);
}
let v: rusqlite::types::Value = row.get(2)?;
let value_type_tag: i32 = row.get(3)?;
let attribute = borrowed_schema.require_attribute_for_entid(a)?;
let value_type_tag = if !attribute.fulltext {
value_type_tag
} else {
ValueType::Long.value_type_tag()
};
let typed_value =
TypedValue::from_sql_value_pair(v, value_type_tag)?.map_ident(borrowed_schema);
let (value, _) = typed_value.to_edn_value_pair();
let tx: i64 = row.get(4)?;
Ok(Some(Datom {
e: EntidOrIdent::Entid(e),
a: to_entid(borrowed_schema, a),
v: value,
tx,
added: None,
}))
})?
.collect();
Ok(Datoms(r?.into_iter().filter_map(|x| x).collect()))
}
/// Return the sequence of transactions in the store with transaction ID strictly greater than the
/// given `tx`, ordered by (tx, e, a, v).
///
/// Each transaction returned includes the [(transaction-tx) :db/txInstant ...] datom.
pub fn transactions_after<S: Borrow<Schema>>(
conn: &rusqlite::Connection,
schema: &S,
tx: i64,
) -> Result<Transactions> {
let borrowed_schema = schema.borrow();
let mut stmt: rusqlite::Statement = conn.prepare("SELECT e, a, v, value_type_tag, tx, added FROM transactions WHERE tx > ? ORDER BY tx ASC, e ASC, a ASC, value_type_tag ASC, v ASC, added ASC")?;
let r: Result<Vec<_>> = stmt
.query_and_then(&[&tx], |row| {
let e: i64 = row.get(0)?;
let a: i64 = row.get(1)?;
let v: rusqlite::types::Value = row.get(2)?;
let value_type_tag: i32 = row.get(3)?;
let attribute = borrowed_schema.require_attribute_for_entid(a)?;
let value_type_tag = if !attribute.fulltext {
value_type_tag
} else {
ValueType::Long.value_type_tag()
};
let typed_value =
TypedValue::from_sql_value_pair(v, value_type_tag)?.map_ident(borrowed_schema);
let (value, _) = typed_value.to_edn_value_pair();
let tx: i64 = row.get(4)?;
let added: bool = row.get(5)?;
Ok(Datom {
e: EntidOrIdent::Entid(e),
a: to_entid(borrowed_schema, a),
v: value,
tx,
added: Some(added),
})
})?
.collect();
// Group by tx.
let r: Vec<Datoms> = r?
.into_iter()
.group_by(|x| x.tx)
.into_iter()
.map(|(_key, group)| Datoms(group.collect()))
.collect();
Ok(Transactions(r))
}
/// Return the set of fulltext values in the store, ordered by rowid.
pub fn fulltext_values(conn: &rusqlite::Connection) -> Result<FulltextValues> {
let mut stmt: rusqlite::Statement =
conn.prepare("SELECT rowid, text FROM fulltext_values ORDER BY rowid")?;
let r: Result<Vec<_>> = stmt
.query_and_then([], |row| {
let rowid: i64 = row.get(0)?;
let text: String = row.get(1)?;
Ok((rowid, text))
})?
.collect();
r.map(FulltextValues)
}
/// Execute the given `sql` query with the given `params` and format the results as a
/// tab-and-newline formatted string suitable for debug printing.
///
/// The query is printed followed by a newline, then the returned columns followed by a newline, and
/// then the data rows and columns. All columns are aligned.
pub fn dump_sql_query(
conn: &rusqlite::Connection, | sql: &str, | random_line_split |
|
sub_files.py | Rrbs.as' % encValData],
('bed', 'enhancerAssay'): ['-type=bed9+1', chromInfo, '-as=%s/as/enhancerAssay.as' % encValData],
('bigBed', 'enhancerAssay'): ['-type=bigBed9+1', chromInfo, '-as=%s/as/enhancerAssay.as' % encValData],
('bed', 'modPepMap'): ['-type=bed9+7', chromInfo, '-as=%s/as/modPepMap.as' % encValData],
('bigBed', 'modPepMap'): ['-type=bigBed9+7', chromInfo, '-as=%s/as/modPepMap.as' % encValData],
('bed', 'pepMap'): ['-type=bed9+7', chromInfo, '-as=%s/as/pepMap.as' % encValData],
('bigBed', 'pepMap'): ['-type=bigBed9+7', chromInfo, '-as=%s/as/pepMap.as' % encValData],
('bed', 'openChromCombinedPeaks'): ['-type=bed9+12', chromInfo, '-as=%s/as/openChromCombinedPeaks.as' % encValData],
('bigBed', 'openChromCombinedPeaks'): ['-type=bigBed9+12', chromInfo, '-as=%s/as/openChromCombinedPeaks.as' % encValData],
('bed', 'peptideMapping'): ['-type=bed6+4', chromInfo, '-as=%s/as/peptideMapping.as' % encValData],
('bigBed', 'peptideMapping'): ['-type=bigBed6+4', chromInfo, '-as=%s/as/peptideMapping.as' % encValData],
('bed', 'shortFrags'): ['-type=bed6+21', chromInfo, '-as=%s/as/shortFrags.as' % encValData],
('bigBed', 'shortFrags'): ['-type=bigBed6+21', chromInfo, '-as=%s/as/shortFrags.as' % encValData],
('rcc', None): ['-type=rcc'],
('idat', None): ['-type=idat'],
('bedpe', None): ['-type=bed3+', chromInfo],
('bedpe', 'mango'): ['-type=bed3+', chromInfo],
('gtf', None): None,
('tar', None): None,
('tsv', None): None,
('csv', None): None,
('2bit', None): None,
('csfasta', None): ['-type=csfasta'],
('csqual', None): ['-type=csqual'],
('CEL', None): None,
('sam', None): None,
('wig', None): None,
('hdf5', None): None,
('gff', None): None
}
validate_args = validate_map.get((file_format, file_format_type))
if validate_args is None:
logger.warning('No rules to validate file_format %s and file_format_type %s' %(file_format, file_format_type))
return False
if (file_format, file_format_type) in [('bed', 'bed3'), ('bed', 'bed3+')] and as_file: #TODO: Update file schema and change to bed3+
validate_args = ['-type=bed3+', chromInfo] #TODO: Update file schema. This is to foce bed3+ for validateFiles but pass bed3 to file_format_type
validate_args.append(as_file)
tokens = ['validateFiles'] + validate_args + [path]
logger.debug('Running: %s' %(tokens))
try:
subprocess.check_output(tokens)
except subprocess.CalledProcessError as e:
logger.error("validateFiles returned %s" %(e.output))
return False
else:
logger.debug("%s: validateFiles passed" %(path))
return True
def post_file(file_metadata, server, keypair, dryrun=False):
local_path = file_metadata.get('submitted_file_name')
if not file_metadata.get('md5sum'):
file_metadata['md5sum'] = md5(local_path)
try:
logger.debug("POST JSON: %s" %(json.dumps(file_metadata)))
except:
pass
if dryrun:
file_obj = copy.copy(file_metadata)
file_obj.update({'accession':None})
return file_obj
else:
url = urlparse.urljoin(server,'/files/')
r = requests.post(url, auth=keypair, headers=POST_HEADERS, data=json.dumps(file_metadata))
try:
r.raise_for_status()
except:
logger.warning('POST failed: %s %s' %(r.status_code, r.reason))
logger.warning(r.text)
return None
else:
return r.json()['@graph'][0]
def upload_file(file_obj, dryrun=False):
if dryrun:
return None
else:
creds = file_obj['upload_credentials']
logger.debug('AWS creds: %s' %(creds))
env = os.environ.copy()
env.update({
'AWS_ACCESS_KEY_ID': creds['access_key'],
'AWS_SECRET_ACCESS_KEY': creds['secret_key'],
'AWS_SECURITY_TOKEN': creds['session_token'],
})
path = file_obj.get('submitted_file_name')
try:
subprocess.check_call(['aws', 's3', 'cp', path, creds['upload_url']], env=env)
except subprocess.CalledProcessError as e:
# The aws command returns a non-zero exit code on error.
logger.error("AWS upload failed with exit code %d" %(e.returncode))
return e.returncode
else:
return 0
def get_asfile(uri_json, server, keypair):
try:
uris = json.loads(uri_json)
except:
logger.error("Could not parse as JSON: %s" %(uri_json))
return None
for uri in uris:
url = server + '/' + uri
r = requests.get(url, headers=GET_HEADERS, auth=keypair)
try:
r.raise_for_status()
except:
logger.error("Failed to get ENCODE object %s" %(uri))
return None
document_obj = r.json()
r = requests.get(urlparse.urljoin(server, document_obj['uuid'] + '/' + document_obj['attachment']['href']), auth=keypair)
try:
r.raise_for_status()
except:
logger.error("Failed to download ENCODE document %s" %(uri))
return None
f = tempfile.NamedTemporaryFile(delete=False)
f.write(r.text)
return f
def process_row(row):
json_payload = {}
for key,value in row.iteritems():
if not key:
continue
try:
json_payload.update({key:json.loads(value)})
except:
try:
json_payload.update({key:json.loads('"%s"' %(value))})
except:
logger.warning('Could not convert field %s value %s to JSON' %(n,key,value))
return None
return json_payload
def main():
args = get_args()
server = args.server
keypair = (args.authid, args.authpw)
if not test_encode_keys(server, keypair):
logger.error("Invalid ENCODE server or keys: server=%s authid=%s authpw=%s" %(args.server,args.authid,args.authpw))
sys.exit(1)
try:
subprocess.check_output('which validateFiles', shell=True)
except:
logger.error("validateFiles is not in path. See http://hgdownload.cse.ucsc.edu/admin/exe/")
sys.exit(1)
input_csv, output_csv = init_csvs(args.infile, args.outfile)
output_csv.writeheader()
for n,row in enumerate(input_csv,start=2): #row 1 is the header
as_file = get_asfile(row.get('file_format_specifications'), server, keypair)
if as_file:
as_file.close() #validateFiles needs a closed file for -as, otherwise it gives a return code of -11
validated = validate_file(row, args.encvaldata, row.get('assembly'), as_file.name)
os.unlink(as_file.name)
else:
validated = validate_file(row, args.encvaldata, row.get('assembly'))
if not validated:
logger.warning('Skipping row %d: file %s failed validation' %(n,row['submitted_file_name']))
continue
json_payload = process_row(row)
if not json_payload:
logger.warning('Skipping row %d: invalid field format for JSON' %(n))
continue
file_object = post_file(json_payload, server, keypair, args.dryrun)
if not file_object:
logger.warning('Skipping row %d: POST file object failed' %(n))
continue
aws_return_code = upload_file(file_object, args.dryrun)
if aws_return_code:
logger.warning('Row %d: Non-zero AWS upload return code %d' %(aws_return_code))
output_row = {}
for key in output_csv.fieldnames:
output_row.update({key:file_object.get(key)})
output_row.update({'aws_return':aws_return_code})
output_csv.writerow(output_row)
if __name__ == '__main__':
| main() | conditional_block |
|
sub_files.py | ():
import argparse
parser = argparse.ArgumentParser(
description=__doc__, epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('infile', help='CSV file metadata to POST', nargs='?', type=argparse.FileType('rU'), default=sys.stdin)
parser.add_argument('--outfile', help='CSV output report', type=argparse.FileType(mode='wb',bufsize=0), default=sys.stdout)
parser.add_argument('--debug', help="Print debug messages", default=False, action='store_true')
parser.add_argument('--server', help="The server to POST to.", default=os.getenv('ENCODE_SERVER',None))
parser.add_argument('--authid', help="The authorization key ID for the server.", default=os.getenv('ENCODE_AUTHID',None))
parser.add_argument('--authpw', help="The authorization key for the server.", default=os.getenv('ENCODE_AUTHPW',None))
parser.add_argument('--dryrun', help="Don't POST to the database, just validate input.", default=False, action='store_true')
parser.add_argument('--encvaldata', help="Directory in which https://github.com/ENCODE-DCC/encValData.git is cloned.", default=os.path.expanduser("~/encValData/"))
args = parser.parse_args()
if args.debug:
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
else: #use the defaulf logging level
logging.basicConfig(format='%(levelname)s:%(message)s')
if args.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
if not args.server:
logger.error('Server name must be specified on the command line or in environment ENCODE_SERVER')
sys.exit(1)
if not args.authid or not args.authpw:
logger.error('Authorization keypair must be specified on the command line or in environment ENCODE_AUTHID, ENCODE_AUTHPW')
sys.exit(1)
if not os.path.isdir(args.encvaldata):
logger.error('No ENCODE validation data. git clone https://github.com/ENCODE-DCC/encValData.git')
sys.exit(1)
return args
def md5(path):
md5sum = hashlib.md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(1024*1024), b''):
md5sum.update(chunk)
return md5sum.hexdigest()
# This does not depend on hashlib
# if 'md5_command' not in globals():
# global md5_command
# if subprocess.check_output('which md5', shell=True):
# md5_command = 'md5 -q'
# elif subprocess.check_output('which md5sum', shell=True):
# md5_command = 'md5sum'
# else:
# md5_command = ''
# if not md5_command:
# logger.error("No MD5 command found (tried md5 and md5sum)")
# return None
# else:
# try:
# md5_output = subprocess.check_output(' '.join([md5_command, fn]), shell=True)
# except:
# return None
# else:
# return md5_output.partition(' ')[0].rstrip()
def test_encode_keys(server,keypair):
test_URI = "ENCBS000AAA"
url = urlparse.urljoin(server,test_URI)
r = requests.get(url, auth=keypair, headers=GET_HEADERS)
try:
r.raise_for_status()
except:
logger.debug('test_encode_keys got response %s' %(r.text))
return False
else:
return True
def input_csv(fh):
csv_args = CSV_ARGS
input_fieldnames = csv.reader(fh, **csv_args).next()
return csv.DictReader(fh, fieldnames=input_fieldnames, **csv_args)
def output_csv(fh,fieldnames):
csv_args = CSV_ARGS
additional_fields = ['accession','aws_return']
output_fieldnames = [fn for fn in fieldnames if fn] + additional_fields
output = csv.DictWriter(fh, fieldnames=output_fieldnames, **csv_args)
return output
def init_csvs(in_fh,out_fh):
input_reader = input_csv(in_fh)
output_writer = output_csv(out_fh,input_reader.fieldnames)
return input_reader, output_writer
def validate_file(f_obj, encValData, assembly=None, as_path=None):
path = f_obj.get('submitted_file_name')
file_format = f_obj.get('file_format')
file_format_type = f_obj.get('file_format_type')
output_type = f_obj.get('output_type')
gzip_types = [
"CEL",
"bam",
"bed",
"csfasta",
"csqual",
"fasta",
"fastq",
"gff",
"gtf",
"tar",
"sam",
"wig"
]
magic_number = open(path, 'rb').read(2)
is_gzipped = magic_number == b'\x1f\x8b'
if file_format in gzip_types:
if not is_gzipped:
logger.warning('%s: Expect %s format to be gzipped' %(path,file_format))
else:
if is_gzipped:
logger.warning('%s: Expect %s format to be un-gzipped' %(path,file_format))
if assembly:
chromInfo = '-chromInfo=%s/%s/chrom.sizes' % (encValData, assembly)
else:
chromInfo = None
if as_path:
as_file = '-as=%s' %(as_path)
else:
as_file = None
validate_map = {
('fasta', None): ['-type=fasta'],
('fastq', None): ['-type=fastq'],
('bam', None): ['-type=bam', chromInfo],
('bigWig', None): ['-type=bigWig', chromInfo],
('bed', 'bed3'): ['-type=bed3', chromInfo],
('bigBed', 'bed3'): ['-type=bed3', chromInfo],
('bed', 'bed3+'): ['-type=bed3+', chromInfo],
('bigBed', 'bed3+'): ['-type=bed3+', chromInfo],
('bed', 'bed6'): ['-type=bed6+', chromInfo],
('bigBed', 'bed6'): ['-type=bigBed6+', chromInfo],
('bed', 'bedLogR'): ['-type=bed9+1', chromInfo, '-as=%s/as/bedLogR.as' % encValData],
('bigBed', 'bedLogR'): ['-type=bigBed9+1', chromInfo, '-as=%s/as/bedLogR.as' % encValData],
('bed', 'bedMethyl'): ['-type=bed9+2', chromInfo, '-as=%s/as/bedMethyl.as' % encValData],
('bigBed', 'bedMethyl'): ['-type=bigBed9+2', chromInfo, '-as=%s/as/bedMethyl.as' % encValData],
('bed', 'broadPeak'): ['-type=bed6+3', chromInfo, '-as=%s/as/broadPeak.as' % encValData],
('bigBed', 'broadPeak'): ['-type=bigBed6+3', chromInfo, '-as=%s/as/broadPeak.as' % encValData],
('bed', 'gappedPeak'): ['-type=bed12+3', chromInfo, '-as=%s/as/gappedPeak.as' % encValData],
('bigBed', 'gappedPeak'): ['-type=bigBed12+3', chromInfo, '-as=%s/as/gappedPeak.as' % encValData],
('bed', 'narrowPeak'): ['-type=bed6+4', chromInfo, '-as=%s/as/narrowPeak.as' % encValData],
('bigBed', 'narrowPeak'): ['-type=bigBed6+4', chromInfo, '-as=%s/as/narrowPeak.as' % encValData],
('bed', 'bedRnaElements'): ['-type=bed6+3', chromInfo, '-as=%s/as/bedRnaElements.as' % encValData],
('bigBed', 'bedRnaElements'): ['-type=bed6+3', chromInfo, '-as=%s/as/bedRnaElements.as' % encValData],
('bed', 'bedExonScore'): ['-type=bed6+3', chromInfo, '-as=%s/as/bedExonScore.as' % encValData],
('bigBed', 'bedExonScore'): ['-type=bigBed6+3', chromInfo, '-as=%s/as/bedExonScore.as' % encValData],
('bed', 'bedRrbs'): ['-type=bed9+2', chromInfo, '-as=%s/as/bedRrbs.as' % encValData],
('bigBed', 'bedRrbs'): ['-type=bigBed9+2', chromInfo, '-as=%s/as/bedRrbs.as' % encValData],
('bed', 'enhancerAssay'): ['-type=bed9+1', chromInfo, '-as=%s | get_args | identifier_name |
|
sub_files.py | 1024*1024), b''):
md5sum.update(chunk)
return md5sum.hexdigest()
# This does not depend on hashlib
# if 'md5_command' not in globals():
# global md5_command
# if subprocess.check_output('which md5', shell=True):
# md5_command = 'md5 -q'
# elif subprocess.check_output('which md5sum', shell=True):
# md5_command = 'md5sum'
# else:
# md5_command = ''
# if not md5_command:
# logger.error("No MD5 command found (tried md5 and md5sum)")
# return None
# else:
# try:
# md5_output = subprocess.check_output(' '.join([md5_command, fn]), shell=True)
# except:
# return None
# else:
# return md5_output.partition(' ')[0].rstrip()
def test_encode_keys(server,keypair):
|
def input_csv(fh):
csv_args = CSV_ARGS
input_fieldnames = csv.reader(fh, **csv_args).next()
return csv.DictReader(fh, fieldnames=input_fieldnames, **csv_args)
def output_csv(fh,fieldnames):
csv_args = CSV_ARGS
additional_fields = ['accession','aws_return']
output_fieldnames = [fn for fn in fieldnames if fn] + additional_fields
output = csv.DictWriter(fh, fieldnames=output_fieldnames, **csv_args)
return output
def init_csvs(in_fh,out_fh):
input_reader = input_csv(in_fh)
output_writer = output_csv(out_fh,input_reader.fieldnames)
return input_reader, output_writer
def validate_file(f_obj, encValData, assembly=None, as_path=None):
path = f_obj.get('submitted_file_name')
file_format = f_obj.get('file_format')
file_format_type = f_obj.get('file_format_type')
output_type = f_obj.get('output_type')
gzip_types = [
"CEL",
"bam",
"bed",
"csfasta",
"csqual",
"fasta",
"fastq",
"gff",
"gtf",
"tar",
"sam",
"wig"
]
magic_number = open(path, 'rb').read(2)
is_gzipped = magic_number == b'\x1f\x8b'
if file_format in gzip_types:
if not is_gzipped:
logger.warning('%s: Expect %s format to be gzipped' %(path,file_format))
else:
if is_gzipped:
logger.warning('%s: Expect %s format to be un-gzipped' %(path,file_format))
if assembly:
chromInfo = '-chromInfo=%s/%s/chrom.sizes' % (encValData, assembly)
else:
chromInfo = None
if as_path:
as_file = '-as=%s' %(as_path)
else:
as_file = None
validate_map = {
('fasta', None): ['-type=fasta'],
('fastq', None): ['-type=fastq'],
('bam', None): ['-type=bam', chromInfo],
('bigWig', None): ['-type=bigWig', chromInfo],
('bed', 'bed3'): ['-type=bed3', chromInfo],
('bigBed', 'bed3'): ['-type=bed3', chromInfo],
('bed', 'bed3+'): ['-type=bed3+', chromInfo],
('bigBed', 'bed3+'): ['-type=bed3+', chromInfo],
('bed', 'bed6'): ['-type=bed6+', chromInfo],
('bigBed', 'bed6'): ['-type=bigBed6+', chromInfo],
('bed', 'bedLogR'): ['-type=bed9+1', chromInfo, '-as=%s/as/bedLogR.as' % encValData],
('bigBed', 'bedLogR'): ['-type=bigBed9+1', chromInfo, '-as=%s/as/bedLogR.as' % encValData],
('bed', 'bedMethyl'): ['-type=bed9+2', chromInfo, '-as=%s/as/bedMethyl.as' % encValData],
('bigBed', 'bedMethyl'): ['-type=bigBed9+2', chromInfo, '-as=%s/as/bedMethyl.as' % encValData],
('bed', 'broadPeak'): ['-type=bed6+3', chromInfo, '-as=%s/as/broadPeak.as' % encValData],
('bigBed', 'broadPeak'): ['-type=bigBed6+3', chromInfo, '-as=%s/as/broadPeak.as' % encValData],
('bed', 'gappedPeak'): ['-type=bed12+3', chromInfo, '-as=%s/as/gappedPeak.as' % encValData],
('bigBed', 'gappedPeak'): ['-type=bigBed12+3', chromInfo, '-as=%s/as/gappedPeak.as' % encValData],
('bed', 'narrowPeak'): ['-type=bed6+4', chromInfo, '-as=%s/as/narrowPeak.as' % encValData],
('bigBed', 'narrowPeak'): ['-type=bigBed6+4', chromInfo, '-as=%s/as/narrowPeak.as' % encValData],
('bed', 'bedRnaElements'): ['-type=bed6+3', chromInfo, '-as=%s/as/bedRnaElements.as' % encValData],
('bigBed', 'bedRnaElements'): ['-type=bed6+3', chromInfo, '-as=%s/as/bedRnaElements.as' % encValData],
('bed', 'bedExonScore'): ['-type=bed6+3', chromInfo, '-as=%s/as/bedExonScore.as' % encValData],
('bigBed', 'bedExonScore'): ['-type=bigBed6+3', chromInfo, '-as=%s/as/bedExonScore.as' % encValData],
('bed', 'bedRrbs'): ['-type=bed9+2', chromInfo, '-as=%s/as/bedRrbs.as' % encValData],
('bigBed', 'bedRrbs'): ['-type=bigBed9+2', chromInfo, '-as=%s/as/bedRrbs.as' % encValData],
('bed', 'enhancerAssay'): ['-type=bed9+1', chromInfo, '-as=%s/as/enhancerAssay.as' % encValData],
('bigBed', 'enhancerAssay'): ['-type=bigBed9+1', chromInfo, '-as=%s/as/enhancerAssay.as' % encValData],
('bed', 'modPepMap'): ['-type=bed9+7', chromInfo, '-as=%s/as/modPepMap.as' % encValData],
('bigBed', 'modPepMap'): ['-type=bigBed9+7', chromInfo, '-as=%s/as/modPepMap.as' % encValData],
('bed', 'pepMap'): ['-type=bed9+7', chromInfo, '-as=%s/as/pepMap.as' % encValData],
('bigBed', 'pepMap'): ['-type=bigBed9+7', chromInfo, '-as=%s/as/pepMap.as' % encValData],
('bed', 'openChromCombinedPeaks'): ['-type=bed9+12', chromInfo, '-as=%s/as/openChromCombinedPeaks.as' % encValData],
('bigBed', 'openChromCombinedPeaks'): ['-type=bigBed9+12', chromInfo, '-as=%s/as/openChromCombinedPeaks.as' % encValData],
('bed', 'peptideMapping'): ['-type=bed6+4', chromInfo, '-as=%s/as/peptideMapping.as' % encValData],
('bigBed', 'peptideMapping'): ['-type=bigBed6+4', chromInfo, '-as=%s/as/peptideMapping.as' % encValData],
('bed', 'shortFrags'): ['-type=bed6+21', chromInfo, '-as=%s/as/shortFrags.as' % encValData],
('bigBed', 'shortFrags'): ['-type=bigBed6+21', chromInfo, '-as=%s/as/shortFrags.as' % encValData],
('rcc', None): ['-type=rcc'],
('idat', None): ['-type=idat'],
('bedpe', None): ['-type=bed3+', chromInfo],
| test_URI = "ENCBS000AAA"
url = urlparse.urljoin(server,test_URI)
r = requests.get(url, auth=keypair, headers=GET_HEADERS)
try:
r.raise_for_status()
except:
logger.debug('test_encode_keys got response %s' %(r.text))
return False
else:
return True | identifier_body |
sub_files.py | Methyl'): ['-type=bigBed9+2', chromInfo, '-as=%s/as/bedMethyl.as' % encValData],
('bed', 'broadPeak'): ['-type=bed6+3', chromInfo, '-as=%s/as/broadPeak.as' % encValData],
('bigBed', 'broadPeak'): ['-type=bigBed6+3', chromInfo, '-as=%s/as/broadPeak.as' % encValData],
('bed', 'gappedPeak'): ['-type=bed12+3', chromInfo, '-as=%s/as/gappedPeak.as' % encValData],
('bigBed', 'gappedPeak'): ['-type=bigBed12+3', chromInfo, '-as=%s/as/gappedPeak.as' % encValData],
('bed', 'narrowPeak'): ['-type=bed6+4', chromInfo, '-as=%s/as/narrowPeak.as' % encValData],
('bigBed', 'narrowPeak'): ['-type=bigBed6+4', chromInfo, '-as=%s/as/narrowPeak.as' % encValData],
('bed', 'bedRnaElements'): ['-type=bed6+3', chromInfo, '-as=%s/as/bedRnaElements.as' % encValData],
('bigBed', 'bedRnaElements'): ['-type=bed6+3', chromInfo, '-as=%s/as/bedRnaElements.as' % encValData],
('bed', 'bedExonScore'): ['-type=bed6+3', chromInfo, '-as=%s/as/bedExonScore.as' % encValData],
('bigBed', 'bedExonScore'): ['-type=bigBed6+3', chromInfo, '-as=%s/as/bedExonScore.as' % encValData],
('bed', 'bedRrbs'): ['-type=bed9+2', chromInfo, '-as=%s/as/bedRrbs.as' % encValData],
('bigBed', 'bedRrbs'): ['-type=bigBed9+2', chromInfo, '-as=%s/as/bedRrbs.as' % encValData],
('bed', 'enhancerAssay'): ['-type=bed9+1', chromInfo, '-as=%s/as/enhancerAssay.as' % encValData],
('bigBed', 'enhancerAssay'): ['-type=bigBed9+1', chromInfo, '-as=%s/as/enhancerAssay.as' % encValData],
('bed', 'modPepMap'): ['-type=bed9+7', chromInfo, '-as=%s/as/modPepMap.as' % encValData],
('bigBed', 'modPepMap'): ['-type=bigBed9+7', chromInfo, '-as=%s/as/modPepMap.as' % encValData],
('bed', 'pepMap'): ['-type=bed9+7', chromInfo, '-as=%s/as/pepMap.as' % encValData],
('bigBed', 'pepMap'): ['-type=bigBed9+7', chromInfo, '-as=%s/as/pepMap.as' % encValData],
('bed', 'openChromCombinedPeaks'): ['-type=bed9+12', chromInfo, '-as=%s/as/openChromCombinedPeaks.as' % encValData],
('bigBed', 'openChromCombinedPeaks'): ['-type=bigBed9+12', chromInfo, '-as=%s/as/openChromCombinedPeaks.as' % encValData],
('bed', 'peptideMapping'): ['-type=bed6+4', chromInfo, '-as=%s/as/peptideMapping.as' % encValData],
('bigBed', 'peptideMapping'): ['-type=bigBed6+4', chromInfo, '-as=%s/as/peptideMapping.as' % encValData],
('bed', 'shortFrags'): ['-type=bed6+21', chromInfo, '-as=%s/as/shortFrags.as' % encValData],
('bigBed', 'shortFrags'): ['-type=bigBed6+21', chromInfo, '-as=%s/as/shortFrags.as' % encValData],
('rcc', None): ['-type=rcc'],
('idat', None): ['-type=idat'],
('bedpe', None): ['-type=bed3+', chromInfo],
('bedpe', 'mango'): ['-type=bed3+', chromInfo],
('gtf', None): None,
('tar', None): None,
('tsv', None): None,
('csv', None): None,
('2bit', None): None,
('csfasta', None): ['-type=csfasta'],
('csqual', None): ['-type=csqual'],
('CEL', None): None,
('sam', None): None,
('wig', None): None,
('hdf5', None): None,
('gff', None): None
}
validate_args = validate_map.get((file_format, file_format_type))
if validate_args is None:
logger.warning('No rules to validate file_format %s and file_format_type %s' %(file_format, file_format_type))
return False
if (file_format, file_format_type) in [('bed', 'bed3'), ('bed', 'bed3+')] and as_file: #TODO: Update file schema and change to bed3+
validate_args = ['-type=bed3+', chromInfo] #TODO: Update file schema. This is to foce bed3+ for validateFiles but pass bed3 to file_format_type
validate_args.append(as_file)
tokens = ['validateFiles'] + validate_args + [path]
logger.debug('Running: %s' %(tokens))
try:
subprocess.check_output(tokens)
except subprocess.CalledProcessError as e:
logger.error("validateFiles returned %s" %(e.output))
return False
else:
logger.debug("%s: validateFiles passed" %(path))
return True
def post_file(file_metadata, server, keypair, dryrun=False):
local_path = file_metadata.get('submitted_file_name')
if not file_metadata.get('md5sum'):
file_metadata['md5sum'] = md5(local_path)
try:
logger.debug("POST JSON: %s" %(json.dumps(file_metadata)))
except:
pass
if dryrun:
file_obj = copy.copy(file_metadata)
file_obj.update({'accession':None})
return file_obj
else:
url = urlparse.urljoin(server,'/files/')
r = requests.post(url, auth=keypair, headers=POST_HEADERS, data=json.dumps(file_metadata))
try:
r.raise_for_status()
except:
logger.warning('POST failed: %s %s' %(r.status_code, r.reason))
logger.warning(r.text)
return None
else:
return r.json()['@graph'][0]
def upload_file(file_obj, dryrun=False):
if dryrun:
return None
else:
creds = file_obj['upload_credentials']
logger.debug('AWS creds: %s' %(creds))
env = os.environ.copy()
env.update({
'AWS_ACCESS_KEY_ID': creds['access_key'],
'AWS_SECRET_ACCESS_KEY': creds['secret_key'],
'AWS_SECURITY_TOKEN': creds['session_token'],
})
path = file_obj.get('submitted_file_name')
try:
subprocess.check_call(['aws', 's3', 'cp', path, creds['upload_url']], env=env)
except subprocess.CalledProcessError as e:
# The aws command returns a non-zero exit code on error.
logger.error("AWS upload failed with exit code %d" %(e.returncode))
return e.returncode
else:
return 0
def get_asfile(uri_json, server, keypair):
try:
uris = json.loads(uri_json)
except:
logger.error("Could not parse as JSON: %s" %(uri_json))
return None
for uri in uris:
url = server + '/' + uri
r = requests.get(url, headers=GET_HEADERS, auth=keypair)
try:
r.raise_for_status()
except:
logger.error("Failed to get ENCODE object %s" %(uri))
return None
document_obj = r.json()
r = requests.get(urlparse.urljoin(server, document_obj['uuid'] + '/' + document_obj['attachment']['href']), auth=keypair)
try:
r.raise_for_status()
except:
logger.error("Failed to download ENCODE document %s" %(uri))
return None
f = tempfile.NamedTemporaryFile(delete=False)
f.write(r.text)
return f
def process_row(row):
json_payload = {}
for key,value in row.iteritems():
if not key:
continue
try:
json_payload.update({key:json.loads(value)})
except: | try:
json_payload.update({key:json.loads('"%s"' %(value))})
except:
logger.warning('Could not convert field %s value %s to JSON' %(n,key,value)) | random_line_split |
|
plugin.go | p.ReporterClient != nil {
return p.killGrpcProcess()
}
if isProcessRunning(p) {
defer p.connection.Close()
p.killTimer = time.NewTimer(config.PluginKillTimeout())
err := conn.SendProcessKillMessage(p.connection)
if err != nil {
logger.Warningf(true, "Error while killing plugin %s : %s ", p.descriptor.Name, err.Error())
}
exited := make(chan bool, 1)
go func() {
for {
if isProcessRunning(p) {
time.Sleep(100 * time.Millisecond)
} else {
exited <- true
return
}
}
}()
select {
case <-exited:
if !p.killTimer.Stop() {
<-p.killTimer.C
}
logger.Debugf(true, "Plugin [%s] with pid [%d] has exited", p.descriptor.Name, p.pluginCmd.Process.Pid)
case <-p.killTimer.C:
logger.Warningf(true, "Plugin [%s] with pid [%d] did not exit after %.2f seconds. Forcefully killing it.", p.descriptor.Name, p.pluginCmd.Process.Pid, config.PluginKillTimeout().Seconds())
err := p.pluginCmd.Process.Kill()
if err != nil {
logger.Warningf(true, "Error while killing plugin %s : %s ", p.descriptor.Name, err.Error())
}
return err
}
}
return nil
}
// IsPluginInstalled checks if given plugin with specific version is installed or not.
func IsPluginInstalled(pluginName, pluginVersion string) bool {
pluginsInstallDir, err := common.GetPluginsInstallDir(pluginName)
if err != nil {
return false
}
thisPluginDir := filepath.Join(pluginsInstallDir, pluginName)
if !common.DirExists(thisPluginDir) {
return false
}
if pluginVersion != "" {
return common.FileExists(filepath.Join(thisPluginDir, pluginVersion, common.PluginJSONFile))
}
return true
}
func getPluginJSONPath(pluginName, pluginVersion string) (string, error) {
if !IsPluginInstalled(pluginName, pluginVersion) {
plugin := strings.TrimSpace(fmt.Sprintf("%s %s", pluginName, pluginVersion))
return "", fmt.Errorf("Plugin %s is not installed", plugin)
}
pluginInstallDir, err := GetInstallDir(pluginName, "")
if err != nil {
return "", err
}
return filepath.Join(pluginInstallDir, common.PluginJSONFile), nil
}
// GetPluginDescriptor return the information about the plugin including name, id, commands to start etc.
func GetPluginDescriptor(pluginID, pluginVersion string) (*PluginDescriptor, error) {
pluginJSON, err := getPluginJSONPath(pluginID, pluginVersion)
if err != nil {
return nil, err
}
return GetPluginDescriptorFromJSON(pluginJSON)
}
func GetPluginDescriptorFromJSON(pluginJSON string) (*PluginDescriptor, error) {
pluginJSONContents, err := common.ReadFileContents(pluginJSON)
if err != nil {
return nil, err
}
var pd PluginDescriptor
if err = json.Unmarshal([]byte(pluginJSONContents), &pd); err != nil {
return nil, fmt.Errorf("%s: %s", pluginJSON, err.Error())
}
pd.pluginPath = filepath.Dir(pluginJSON)
return &pd, nil
}
func startPlugin(pd *PluginDescriptor, action pluginScope) (*plugin, error) {
var command []string
switch runtime.GOOS {
case "windows":
command = pd.Command.Windows
case "darwin":
command = pd.Command.Darwin
default:
command = pd.Command.Linux
}
if len(command) == 0 {
return nil, fmt.Errorf("Platform specific command not specified: %s.", runtime.GOOS)
}
if pd.hasCapability(gRPCSupportCapability) {
return startGRPCPlugin(pd, command)
}
return startLegacyPlugin(pd, command)
}
func startGRPCPlugin(pd *PluginDescriptor, command []string) (*plugin, error) {
portChan := make(chan string)
writer := &logger.LogWriter{
Stderr: logger.NewCustomWriter(portChan, os.Stderr, pd.ID, true),
Stdout: logger.NewCustomWriter(portChan, os.Stdout, pd.ID, false),
}
cmd, err := common.ExecuteCommand(command, pd.pluginPath, writer.Stdout, writer.Stderr)
go func() {
err = cmd.Wait()
if err != nil {
logger.Errorf(true, "Error occurred while waiting for plugin process to finish.\nError : %s", err.Error())
}
}()
if err != nil {
return nil, err
}
var port string
select {
case port = <-portChan:
close(portChan)
case <-time.After(config.PluginConnectionTimeout()):
return nil, fmt.Errorf("timed out connecting to %s", pd.ID)
}
logger.Debugf(true, "Attempting to connect to grpc server at port: %s", port)
gRPCConn, err := grpc.Dial(fmt.Sprintf("%s:%s", "127.0.0.1", port),
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(1024*1024*1024), grpc.MaxCallRecvMsgSize(1024*1024*1024)),
grpc.WithBlock())
if err != nil {
return nil, err
}
plugin := &plugin{
pluginCmd: cmd,
descriptor: pd,
gRPCConn: gRPCConn,
mutex: &sync.Mutex{},
}
if pd.hasScope(docScope) {
plugin.DocumenterClient = gauge_messages.NewDocumenterClient(gRPCConn)
} else {
plugin.ReporterClient = gauge_messages.NewReporterClient(gRPCConn)
}
logger.Debugf(true, "Successfully made the connection with plugin with port: %s", port)
return plugin, nil
}
func startLegacyPlugin(pd *PluginDescriptor, command []string) (*plugin, error) {
writer := logger.NewLogWriter(pd.ID, true, 0)
cmd, err := common.ExecuteCommand(command, pd.pluginPath, writer.Stdout, writer.Stderr)
if err != nil {
return nil, err
}
var mutex = &sync.Mutex{}
go func() {
pState, _ := cmd.Process.Wait()
mutex.Lock()
cmd.ProcessState = pState
mutex.Unlock()
}()
plugin := &plugin{pluginCmd: cmd, descriptor: pd, mutex: mutex}
return plugin, nil
}
func SetEnvForPlugin(action pluginScope, pd *PluginDescriptor, m *manifest.Manifest, pluginEnvVars map[string]string) error {
pluginEnvVars[fmt.Sprintf("%s_action", pd.ID)] = string(action)
pluginEnvVars["test_language"] = m.Language
return setEnvironmentProperties(pluginEnvVars)
}
func setEnvironmentProperties(properties map[string]string) error {
for k, v := range properties {
if err := common.SetEnvVariable(k, v); err != nil {
return err
}
}
return nil
}
func IsPluginAdded(m *manifest.Manifest, descriptor *PluginDescriptor) bool {
for _, pluginID := range m.Plugins {
if pluginID == descriptor.ID {
return true
}
} | }
func startPluginsForExecution(m *manifest.Manifest) (Handler, []string) {
var warnings []string
handler := &GaugePlugins{}
envProperties := make(map[string]string)
for _, pluginID := range m.Plugins {
pd, err := GetPluginDescriptor(pluginID, "")
if err != nil {
warnings = append(warnings, fmt.Sprintf("Unable to start plugin %s. %s. To install, run `gauge install %s`.", pluginID, err.Error(), pluginID))
continue
}
compatibilityErr := version.CheckCompatibility(version.CurrentGaugeVersion, &pd.GaugeVersionSupport)
if compatibilityErr != nil {
warnings = append(warnings, fmt.Sprintf("Compatible %s plugin version to current Gauge version %s not found", pd.Name, version.CurrentGaugeVersion))
continue
}
if pd.hasScope(executionScope) {
gaugeConnectionHandler, err := conn.NewGaugeConnectionHandler(0, nil)
if err != nil {
warnings = append(warnings, err.Error())
continue
}
envProperties[pluginConnectionPortEnv] = strconv.Itoa(gaugeConnectionHandler.ConnectionPortNumber())
prop, err := common.GetGaugeConfigurationFor(common.GaugePropertiesFile)
if err != nil {
warnings = append(warnings, fmt.Sprintf("Unable to read Gauge configuration. %s", err.Error()))
continue
}
envProperties["plugin_kill_timeout"] = prop["plugin_kill_timeout"]
err = SetEnvForPlugin(executionScope, pd, m, envProperties)
if err != nil {
warnings = append(warnings, fmt.Sprintf("Error setting environment for plugin %s %s. %s", pd.Name, pd.Version, err.Error()))
continue
}
logger.Debugf(true, "Starting %s plugin", pd.Name)
plugin, err := startPlugin(pd, executionScope)
if err != nil {
warnings = append(warnings, fmt.Sprintf("Error starting plugin %s %s. %s", pd.Name, pd.Version, err.Error()))
continue
}
if plugin.gRPCConn != nil {
handler.addPlugin(pluginID | return false | random_line_split |
plugin.go | port),
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(1024*1024*1024), grpc.MaxCallRecvMsgSize(1024*1024*1024)),
grpc.WithBlock())
if err != nil {
return nil, err
}
plugin := &plugin{
pluginCmd: cmd,
descriptor: pd,
gRPCConn: gRPCConn,
mutex: &sync.Mutex{},
}
if pd.hasScope(docScope) {
plugin.DocumenterClient = gauge_messages.NewDocumenterClient(gRPCConn)
} else {
plugin.ReporterClient = gauge_messages.NewReporterClient(gRPCConn)
}
logger.Debugf(true, "Successfully made the connection with plugin with port: %s", port)
return plugin, nil
}
func startLegacyPlugin(pd *PluginDescriptor, command []string) (*plugin, error) {
writer := logger.NewLogWriter(pd.ID, true, 0)
cmd, err := common.ExecuteCommand(command, pd.pluginPath, writer.Stdout, writer.Stderr)
if err != nil {
return nil, err
}
var mutex = &sync.Mutex{}
go func() {
pState, _ := cmd.Process.Wait()
mutex.Lock()
cmd.ProcessState = pState
mutex.Unlock()
}()
plugin := &plugin{pluginCmd: cmd, descriptor: pd, mutex: mutex}
return plugin, nil
}
func SetEnvForPlugin(action pluginScope, pd *PluginDescriptor, m *manifest.Manifest, pluginEnvVars map[string]string) error {
pluginEnvVars[fmt.Sprintf("%s_action", pd.ID)] = string(action)
pluginEnvVars["test_language"] = m.Language
return setEnvironmentProperties(pluginEnvVars)
}
func setEnvironmentProperties(properties map[string]string) error {
for k, v := range properties {
if err := common.SetEnvVariable(k, v); err != nil {
return err
}
}
return nil
}
func IsPluginAdded(m *manifest.Manifest, descriptor *PluginDescriptor) bool {
for _, pluginID := range m.Plugins {
if pluginID == descriptor.ID {
return true
}
}
return false
}
func startPluginsForExecution(m *manifest.Manifest) (Handler, []string) {
var warnings []string
handler := &GaugePlugins{}
envProperties := make(map[string]string)
for _, pluginID := range m.Plugins {
pd, err := GetPluginDescriptor(pluginID, "")
if err != nil {
warnings = append(warnings, fmt.Sprintf("Unable to start plugin %s. %s. To install, run `gauge install %s`.", pluginID, err.Error(), pluginID))
continue
}
compatibilityErr := version.CheckCompatibility(version.CurrentGaugeVersion, &pd.GaugeVersionSupport)
if compatibilityErr != nil {
warnings = append(warnings, fmt.Sprintf("Compatible %s plugin version to current Gauge version %s not found", pd.Name, version.CurrentGaugeVersion))
continue
}
if pd.hasScope(executionScope) {
gaugeConnectionHandler, err := conn.NewGaugeConnectionHandler(0, nil)
if err != nil {
warnings = append(warnings, err.Error())
continue
}
envProperties[pluginConnectionPortEnv] = strconv.Itoa(gaugeConnectionHandler.ConnectionPortNumber())
prop, err := common.GetGaugeConfigurationFor(common.GaugePropertiesFile)
if err != nil {
warnings = append(warnings, fmt.Sprintf("Unable to read Gauge configuration. %s", err.Error()))
continue
}
envProperties["plugin_kill_timeout"] = prop["plugin_kill_timeout"]
err = SetEnvForPlugin(executionScope, pd, m, envProperties)
if err != nil {
warnings = append(warnings, fmt.Sprintf("Error setting environment for plugin %s %s. %s", pd.Name, pd.Version, err.Error()))
continue
}
logger.Debugf(true, "Starting %s plugin", pd.Name)
plugin, err := startPlugin(pd, executionScope)
if err != nil {
warnings = append(warnings, fmt.Sprintf("Error starting plugin %s %s. %s", pd.Name, pd.Version, err.Error()))
continue
}
if plugin.gRPCConn != nil {
handler.addPlugin(pluginID, plugin)
continue
}
pluginConnection, err := gaugeConnectionHandler.AcceptConnection(config.PluginConnectionTimeout(), make(chan error))
if err != nil {
warnings = append(warnings, fmt.Sprintf("Error starting plugin %s %s. Failed to connect to plugin. %s", pd.Name, pd.Version, err.Error()))
err := plugin.pluginCmd.Process.Kill()
if err != nil {
logger.Errorf(false, "unable to kill plugin %s: %s", plugin.descriptor.Name, err.Error())
}
continue
}
logger.Debugf(true, "Established connection to %s plugin", pd.Name)
plugin.connection = pluginConnection
handler.addPlugin(pluginID, plugin)
}
}
return handler, warnings
}
func GenerateDoc(pluginName string, specDirs []string, startAPIFunc func([]string) int) {
pd, err := GetPluginDescriptor(pluginName, "")
if err != nil {
logger.Fatalf(true, "Error starting plugin %s. Failed to get plugin.json. %s. To install, run `gauge install %s`.", pluginName, err.Error(), pluginName)
}
if err := version.CheckCompatibility(version.CurrentGaugeVersion, &pd.GaugeVersionSupport); err != nil {
logger.Fatalf(true, "Compatible %s plugin version to current Gauge version %s not found", pd.Name, version.CurrentGaugeVersion)
}
if !pd.hasScope(docScope) {
logger.Fatalf(true, "Invalid plugin name: %s, this plugin cannot generate documentation.", pd.Name)
}
var sources []string
for _, src := range specDirs {
path, _ := filepath.Abs(src)
sources = append(sources, path)
}
os.Setenv("GAUGE_SPEC_DIRS", strings.Join(sources, "||"))
os.Setenv("GAUGE_PROJECT_ROOT", config.ProjectRoot)
if pd.hasCapability(gRPCSupportCapability) {
p, err := startPlugin(pd, docScope)
if err != nil {
logger.Fatalf(true, " %s %s. %s", pd.Name, pd.Version, err.Error())
}
_, err = p.DocumenterClient.GenerateDocs(context.Background(), getSpecDetails(specDirs))
grpcErr := p.killGrpcProcess()
if grpcErr != nil {
logger.Errorf(false, "Unable to kill plugin %s : %s", p.descriptor.Name, grpcErr.Error())
}
if err != nil {
logger.Fatalf(true, "Failed to generate docs. %s", err.Error())
}
} else {
port := startAPIFunc(specDirs)
err := os.Setenv(common.APIPortEnvVariableName, strconv.Itoa(port))
if err != nil {
logger.Fatalf(true, "Failed to set env GAUGE_API_PORT. %s", err.Error())
}
p, err := startPlugin(pd, docScope)
if err != nil {
logger.Fatalf(true, " %s %s. %s", pd.Name, pd.Version, err.Error())
}
for isProcessRunning(p) {
}
}
}
func (p *plugin) invokeService(m *gauge_messages.Message) error {
ctx := context.Background()
var err error
switch m.GetMessageType() {
case gauge_messages.Message_SuiteExecutionResult:
_, err = p.ReporterClient.NotifySuiteResult(ctx, m.GetSuiteExecutionResult())
case gauge_messages.Message_ExecutionStarting:
_, err = p.ReporterClient.NotifyExecutionStarting(ctx, m.GetExecutionStartingRequest())
case gauge_messages.Message_ExecutionEnding:
_, err = p.ReporterClient.NotifyExecutionEnding(ctx, m.GetExecutionEndingRequest())
case gauge_messages.Message_SpecExecutionEnding:
_, err = p.ReporterClient.NotifySpecExecutionEnding(ctx, m.GetSpecExecutionEndingRequest())
case gauge_messages.Message_SpecExecutionStarting:
_, err = p.ReporterClient.NotifySpecExecutionStarting(ctx, m.GetSpecExecutionStartingRequest())
case gauge_messages.Message_ScenarioExecutionEnding:
_, err = p.ReporterClient.NotifyScenarioExecutionEnding(ctx, m.GetScenarioExecutionEndingRequest())
case gauge_messages.Message_ScenarioExecutionStarting:
_, err = p.ReporterClient.NotifyScenarioExecutionStarting(ctx, m.GetScenarioExecutionStartingRequest())
case gauge_messages.Message_StepExecutionEnding:
_, err = p.ReporterClient.NotifyStepExecutionEnding(ctx, m.GetStepExecutionEndingRequest())
case gauge_messages.Message_StepExecutionStarting:
_, err = p.ReporterClient.NotifyStepExecutionStarting(ctx, m.GetStepExecutionStartingRequest())
}
return err
}
func (p *plugin) sendMessage(message *gauge_messages.Message) error | {
if p.gRPCConn != nil {
return p.invokeService(message)
}
messageID := common.GetUniqueID()
message.MessageId = messageID
messageBytes, err := proto.Marshal(message)
if err != nil {
return err
}
err = conn.Write(p.connection, messageBytes)
if err != nil {
return fmt.Errorf("[Warning] Failed to send message to plugin: %s %s", p.descriptor.ID, err.Error())
}
return nil
} | identifier_body |
|
plugin.go | : &sync.Mutex{},
}
if pd.hasScope(docScope) {
plugin.DocumenterClient = gauge_messages.NewDocumenterClient(gRPCConn)
} else {
plugin.ReporterClient = gauge_messages.NewReporterClient(gRPCConn)
}
logger.Debugf(true, "Successfully made the connection with plugin with port: %s", port)
return plugin, nil
}
func startLegacyPlugin(pd *PluginDescriptor, command []string) (*plugin, error) {
writer := logger.NewLogWriter(pd.ID, true, 0)
cmd, err := common.ExecuteCommand(command, pd.pluginPath, writer.Stdout, writer.Stderr)
if err != nil {
return nil, err
}
var mutex = &sync.Mutex{}
go func() {
pState, _ := cmd.Process.Wait()
mutex.Lock()
cmd.ProcessState = pState
mutex.Unlock()
}()
plugin := &plugin{pluginCmd: cmd, descriptor: pd, mutex: mutex}
return plugin, nil
}
func SetEnvForPlugin(action pluginScope, pd *PluginDescriptor, m *manifest.Manifest, pluginEnvVars map[string]string) error {
pluginEnvVars[fmt.Sprintf("%s_action", pd.ID)] = string(action)
pluginEnvVars["test_language"] = m.Language
return setEnvironmentProperties(pluginEnvVars)
}
func setEnvironmentProperties(properties map[string]string) error {
for k, v := range properties {
if err := common.SetEnvVariable(k, v); err != nil {
return err
}
}
return nil
}
func IsPluginAdded(m *manifest.Manifest, descriptor *PluginDescriptor) bool {
for _, pluginID := range m.Plugins {
if pluginID == descriptor.ID {
return true
}
}
return false
}
func startPluginsForExecution(m *manifest.Manifest) (Handler, []string) {
var warnings []string
handler := &GaugePlugins{}
envProperties := make(map[string]string)
for _, pluginID := range m.Plugins {
pd, err := GetPluginDescriptor(pluginID, "")
if err != nil {
warnings = append(warnings, fmt.Sprintf("Unable to start plugin %s. %s. To install, run `gauge install %s`.", pluginID, err.Error(), pluginID))
continue
}
compatibilityErr := version.CheckCompatibility(version.CurrentGaugeVersion, &pd.GaugeVersionSupport)
if compatibilityErr != nil {
warnings = append(warnings, fmt.Sprintf("Compatible %s plugin version to current Gauge version %s not found", pd.Name, version.CurrentGaugeVersion))
continue
}
if pd.hasScope(executionScope) {
gaugeConnectionHandler, err := conn.NewGaugeConnectionHandler(0, nil)
if err != nil {
warnings = append(warnings, err.Error())
continue
}
envProperties[pluginConnectionPortEnv] = strconv.Itoa(gaugeConnectionHandler.ConnectionPortNumber())
prop, err := common.GetGaugeConfigurationFor(common.GaugePropertiesFile)
if err != nil {
warnings = append(warnings, fmt.Sprintf("Unable to read Gauge configuration. %s", err.Error()))
continue
}
envProperties["plugin_kill_timeout"] = prop["plugin_kill_timeout"]
err = SetEnvForPlugin(executionScope, pd, m, envProperties)
if err != nil {
warnings = append(warnings, fmt.Sprintf("Error setting environment for plugin %s %s. %s", pd.Name, pd.Version, err.Error()))
continue
}
logger.Debugf(true, "Starting %s plugin", pd.Name)
plugin, err := startPlugin(pd, executionScope)
if err != nil {
warnings = append(warnings, fmt.Sprintf("Error starting plugin %s %s. %s", pd.Name, pd.Version, err.Error()))
continue
}
if plugin.gRPCConn != nil {
handler.addPlugin(pluginID, plugin)
continue
}
pluginConnection, err := gaugeConnectionHandler.AcceptConnection(config.PluginConnectionTimeout(), make(chan error))
if err != nil {
warnings = append(warnings, fmt.Sprintf("Error starting plugin %s %s. Failed to connect to plugin. %s", pd.Name, pd.Version, err.Error()))
err := plugin.pluginCmd.Process.Kill()
if err != nil {
logger.Errorf(false, "unable to kill plugin %s: %s", plugin.descriptor.Name, err.Error())
}
continue
}
logger.Debugf(true, "Established connection to %s plugin", pd.Name)
plugin.connection = pluginConnection
handler.addPlugin(pluginID, plugin)
}
}
return handler, warnings
}
func GenerateDoc(pluginName string, specDirs []string, startAPIFunc func([]string) int) {
pd, err := GetPluginDescriptor(pluginName, "")
if err != nil {
logger.Fatalf(true, "Error starting plugin %s. Failed to get plugin.json. %s. To install, run `gauge install %s`.", pluginName, err.Error(), pluginName)
}
if err := version.CheckCompatibility(version.CurrentGaugeVersion, &pd.GaugeVersionSupport); err != nil {
logger.Fatalf(true, "Compatible %s plugin version to current Gauge version %s not found", pd.Name, version.CurrentGaugeVersion)
}
if !pd.hasScope(docScope) {
logger.Fatalf(true, "Invalid plugin name: %s, this plugin cannot generate documentation.", pd.Name)
}
var sources []string
for _, src := range specDirs {
path, _ := filepath.Abs(src)
sources = append(sources, path)
}
os.Setenv("GAUGE_SPEC_DIRS", strings.Join(sources, "||"))
os.Setenv("GAUGE_PROJECT_ROOT", config.ProjectRoot)
if pd.hasCapability(gRPCSupportCapability) {
p, err := startPlugin(pd, docScope)
if err != nil {
logger.Fatalf(true, " %s %s. %s", pd.Name, pd.Version, err.Error())
}
_, err = p.DocumenterClient.GenerateDocs(context.Background(), getSpecDetails(specDirs))
grpcErr := p.killGrpcProcess()
if grpcErr != nil {
logger.Errorf(false, "Unable to kill plugin %s : %s", p.descriptor.Name, grpcErr.Error())
}
if err != nil {
logger.Fatalf(true, "Failed to generate docs. %s", err.Error())
}
} else {
port := startAPIFunc(specDirs)
err := os.Setenv(common.APIPortEnvVariableName, strconv.Itoa(port))
if err != nil {
logger.Fatalf(true, "Failed to set env GAUGE_API_PORT. %s", err.Error())
}
p, err := startPlugin(pd, docScope)
if err != nil {
logger.Fatalf(true, " %s %s. %s", pd.Name, pd.Version, err.Error())
}
for isProcessRunning(p) {
}
}
}
func (p *plugin) invokeService(m *gauge_messages.Message) error {
ctx := context.Background()
var err error
switch m.GetMessageType() {
case gauge_messages.Message_SuiteExecutionResult:
_, err = p.ReporterClient.NotifySuiteResult(ctx, m.GetSuiteExecutionResult())
case gauge_messages.Message_ExecutionStarting:
_, err = p.ReporterClient.NotifyExecutionStarting(ctx, m.GetExecutionStartingRequest())
case gauge_messages.Message_ExecutionEnding:
_, err = p.ReporterClient.NotifyExecutionEnding(ctx, m.GetExecutionEndingRequest())
case gauge_messages.Message_SpecExecutionEnding:
_, err = p.ReporterClient.NotifySpecExecutionEnding(ctx, m.GetSpecExecutionEndingRequest())
case gauge_messages.Message_SpecExecutionStarting:
_, err = p.ReporterClient.NotifySpecExecutionStarting(ctx, m.GetSpecExecutionStartingRequest())
case gauge_messages.Message_ScenarioExecutionEnding:
_, err = p.ReporterClient.NotifyScenarioExecutionEnding(ctx, m.GetScenarioExecutionEndingRequest())
case gauge_messages.Message_ScenarioExecutionStarting:
_, err = p.ReporterClient.NotifyScenarioExecutionStarting(ctx, m.GetScenarioExecutionStartingRequest())
case gauge_messages.Message_StepExecutionEnding:
_, err = p.ReporterClient.NotifyStepExecutionEnding(ctx, m.GetStepExecutionEndingRequest())
case gauge_messages.Message_StepExecutionStarting:
_, err = p.ReporterClient.NotifyStepExecutionStarting(ctx, m.GetStepExecutionStartingRequest())
}
return err
}
func (p *plugin) sendMessage(message *gauge_messages.Message) error {
if p.gRPCConn != nil {
return p.invokeService(message)
}
messageID := common.GetUniqueID()
message.MessageId = messageID
messageBytes, err := proto.Marshal(message)
if err != nil {
return err
}
err = conn.Write(p.connection, messageBytes)
if err != nil {
return fmt.Errorf("[Warning] Failed to send message to plugin: %s %s", p.descriptor.ID, err.Error())
}
return nil
}
func StartPlugins(m *manifest.Manifest) Handler {
pluginHandler, warnings := startPluginsForExecution(m)
logger.HandleWarningMessages(true, warnings)
return pluginHandler
}
func PluginsWithoutScope() (infos []pluginInfo.PluginInfo) {
if plugins, err := pluginInfo.GetAllInstalledPluginsWithVersion(); err == nil {
for _, p := range plugins {
pd, err := GetPluginDescriptor(p.Name, p.Version.String())
if err == nil && !pd.hasAnyScope() | {
infos = append(infos, p)
} | conditional_block |
|
plugin.go | p.ReporterClient != nil {
return p.killGrpcProcess()
}
if isProcessRunning(p) {
defer p.connection.Close()
p.killTimer = time.NewTimer(config.PluginKillTimeout())
err := conn.SendProcessKillMessage(p.connection)
if err != nil {
logger.Warningf(true, "Error while killing plugin %s : %s ", p.descriptor.Name, err.Error())
}
exited := make(chan bool, 1)
go func() {
for {
if isProcessRunning(p) {
time.Sleep(100 * time.Millisecond)
} else {
exited <- true
return
}
}
}()
select {
case <-exited:
if !p.killTimer.Stop() {
<-p.killTimer.C
}
logger.Debugf(true, "Plugin [%s] with pid [%d] has exited", p.descriptor.Name, p.pluginCmd.Process.Pid)
case <-p.killTimer.C:
logger.Warningf(true, "Plugin [%s] with pid [%d] did not exit after %.2f seconds. Forcefully killing it.", p.descriptor.Name, p.pluginCmd.Process.Pid, config.PluginKillTimeout().Seconds())
err := p.pluginCmd.Process.Kill()
if err != nil {
logger.Warningf(true, "Error while killing plugin %s : %s ", p.descriptor.Name, err.Error())
}
return err
}
}
return nil
}
// IsPluginInstalled checks if given plugin with specific version is installed or not.
func IsPluginInstalled(pluginName, pluginVersion string) bool {
pluginsInstallDir, err := common.GetPluginsInstallDir(pluginName)
if err != nil {
return false
}
thisPluginDir := filepath.Join(pluginsInstallDir, pluginName)
if !common.DirExists(thisPluginDir) {
return false
}
if pluginVersion != "" {
return common.FileExists(filepath.Join(thisPluginDir, pluginVersion, common.PluginJSONFile))
}
return true
}
func getPluginJSONPath(pluginName, pluginVersion string) (string, error) {
if !IsPluginInstalled(pluginName, pluginVersion) {
plugin := strings.TrimSpace(fmt.Sprintf("%s %s", pluginName, pluginVersion))
return "", fmt.Errorf("Plugin %s is not installed", plugin)
}
pluginInstallDir, err := GetInstallDir(pluginName, "")
if err != nil {
return "", err
}
return filepath.Join(pluginInstallDir, common.PluginJSONFile), nil
}
// GetPluginDescriptor return the information about the plugin including name, id, commands to start etc.
func GetPluginDescriptor(pluginID, pluginVersion string) (*PluginDescriptor, error) {
pluginJSON, err := getPluginJSONPath(pluginID, pluginVersion)
if err != nil {
return nil, err
}
return GetPluginDescriptorFromJSON(pluginJSON)
}
func GetPluginDescriptorFromJSON(pluginJSON string) (*PluginDescriptor, error) {
pluginJSONContents, err := common.ReadFileContents(pluginJSON)
if err != nil {
return nil, err
}
var pd PluginDescriptor
if err = json.Unmarshal([]byte(pluginJSONContents), &pd); err != nil {
return nil, fmt.Errorf("%s: %s", pluginJSON, err.Error())
}
pd.pluginPath = filepath.Dir(pluginJSON)
return &pd, nil
}
func startPlugin(pd *PluginDescriptor, action pluginScope) (*plugin, error) {
var command []string
switch runtime.GOOS {
case "windows":
command = pd.Command.Windows
case "darwin":
command = pd.Command.Darwin
default:
command = pd.Command.Linux
}
if len(command) == 0 {
return nil, fmt.Errorf("Platform specific command not specified: %s.", runtime.GOOS)
}
if pd.hasCapability(gRPCSupportCapability) {
return startGRPCPlugin(pd, command)
}
return startLegacyPlugin(pd, command)
}
func startGRPCPlugin(pd *PluginDescriptor, command []string) (*plugin, error) {
portChan := make(chan string)
writer := &logger.LogWriter{
Stderr: logger.NewCustomWriter(portChan, os.Stderr, pd.ID, true),
Stdout: logger.NewCustomWriter(portChan, os.Stdout, pd.ID, false),
}
cmd, err := common.ExecuteCommand(command, pd.pluginPath, writer.Stdout, writer.Stderr)
go func() {
err = cmd.Wait()
if err != nil {
logger.Errorf(true, "Error occurred while waiting for plugin process to finish.\nError : %s", err.Error())
}
}()
if err != nil {
return nil, err
}
var port string
select {
case port = <-portChan:
close(portChan)
case <-time.After(config.PluginConnectionTimeout()):
return nil, fmt.Errorf("timed out connecting to %s", pd.ID)
}
logger.Debugf(true, "Attempting to connect to grpc server at port: %s", port)
gRPCConn, err := grpc.Dial(fmt.Sprintf("%s:%s", "127.0.0.1", port),
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(1024*1024*1024), grpc.MaxCallRecvMsgSize(1024*1024*1024)),
grpc.WithBlock())
if err != nil {
return nil, err
}
plugin := &plugin{
pluginCmd: cmd,
descriptor: pd,
gRPCConn: gRPCConn,
mutex: &sync.Mutex{},
}
if pd.hasScope(docScope) {
plugin.DocumenterClient = gauge_messages.NewDocumenterClient(gRPCConn)
} else {
plugin.ReporterClient = gauge_messages.NewReporterClient(gRPCConn)
}
logger.Debugf(true, "Successfully made the connection with plugin with port: %s", port)
return plugin, nil
}
func startLegacyPlugin(pd *PluginDescriptor, command []string) (*plugin, error) {
writer := logger.NewLogWriter(pd.ID, true, 0)
cmd, err := common.ExecuteCommand(command, pd.pluginPath, writer.Stdout, writer.Stderr)
if err != nil {
return nil, err
}
var mutex = &sync.Mutex{}
go func() {
pState, _ := cmd.Process.Wait()
mutex.Lock()
cmd.ProcessState = pState
mutex.Unlock()
}()
plugin := &plugin{pluginCmd: cmd, descriptor: pd, mutex: mutex}
return plugin, nil
}
func SetEnvForPlugin(action pluginScope, pd *PluginDescriptor, m *manifest.Manifest, pluginEnvVars map[string]string) error {
pluginEnvVars[fmt.Sprintf("%s_action", pd.ID)] = string(action)
pluginEnvVars["test_language"] = m.Language
return setEnvironmentProperties(pluginEnvVars)
}
func setEnvironmentProperties(properties map[string]string) error {
for k, v := range properties {
if err := common.SetEnvVariable(k, v); err != nil {
return err
}
}
return nil
}
func IsPluginAdded(m *manifest.Manifest, descriptor *PluginDescriptor) bool {
for _, pluginID := range m.Plugins {
if pluginID == descriptor.ID {
return true
}
}
return false
}
func | (m *manifest.Manifest) (Handler, []string) {
var warnings []string
handler := &GaugePlugins{}
envProperties := make(map[string]string)
for _, pluginID := range m.Plugins {
pd, err := GetPluginDescriptor(pluginID, "")
if err != nil {
warnings = append(warnings, fmt.Sprintf("Unable to start plugin %s. %s. To install, run `gauge install %s`.", pluginID, err.Error(), pluginID))
continue
}
compatibilityErr := version.CheckCompatibility(version.CurrentGaugeVersion, &pd.GaugeVersionSupport)
if compatibilityErr != nil {
warnings = append(warnings, fmt.Sprintf("Compatible %s plugin version to current Gauge version %s not found", pd.Name, version.CurrentGaugeVersion))
continue
}
if pd.hasScope(executionScope) {
gaugeConnectionHandler, err := conn.NewGaugeConnectionHandler(0, nil)
if err != nil {
warnings = append(warnings, err.Error())
continue
}
envProperties[pluginConnectionPortEnv] = strconv.Itoa(gaugeConnectionHandler.ConnectionPortNumber())
prop, err := common.GetGaugeConfigurationFor(common.GaugePropertiesFile)
if err != nil {
warnings = append(warnings, fmt.Sprintf("Unable to read Gauge configuration. %s", err.Error()))
continue
}
envProperties["plugin_kill_timeout"] = prop["plugin_kill_timeout"]
err = SetEnvForPlugin(executionScope, pd, m, envProperties)
if err != nil {
warnings = append(warnings, fmt.Sprintf("Error setting environment for plugin %s %s. %s", pd.Name, pd.Version, err.Error()))
continue
}
logger.Debugf(true, "Starting %s plugin", pd.Name)
plugin, err := startPlugin(pd, executionScope)
if err != nil {
warnings = append(warnings, fmt.Sprintf("Error starting plugin %s %s. %s", pd.Name, pd.Version, err.Error()))
continue
}
if plugin.gRPCConn != nil {
handler.addPlugin | startPluginsForExecution | identifier_name |
hkdf.rs | // =8160
///
pub struct Test<'a> {
ikm: &'a [u8],
salt: &'a [u8],
info: &'a [u8],
okm: &'a [u8],
}
/// data taken from sample code in Readme of crates.io page
pub fn basic_test_hkdf<C: CryptoProvider>(_: PhantomData<C>) {
let ikm = hex!("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b");
let salt = hex!("000102030405060708090a0b0c");
let info = hex!("f0f1f2f3f4f5f6f7f8f9");
let hk = C::HkdfSha256::new(Some(&salt[..]), &ikm);
let mut okm = [0u8; 42];
hk.expand(&info, &mut okm)
.expect("42 is a valid length for Sha256 to output");
let expected = hex!(
"
3cb25f25faacd57a90434f64d0362f2a
2d2d0a90cf1a5a4c5db02d56ecc4c5bf
34007208d5b887185865
"
);
assert_eq!(okm, expected);
}
// Test Vectors from https://tools.ietf.org/html/rfc5869.
#[rustfmt::skip]
///
pub fn test_rfc5869_sha256<C: CryptoProvider>(_: PhantomData<C>) {
let tests = [
Test {
// Test Case 1
ikm: &hex!("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b"),
salt: &hex!("000102030405060708090a0b0c"),
info: &hex!("f0f1f2f3f4f5f6f7f8f9"),
okm: &hex!("
3cb25f25faacd57a90434f64d0362f2a
2d2d0a90cf1a5a4c5db02d56ecc4c5bf | 34007208d5b887185865
"),
},
Test {
// Test Case 2
ikm: &hex!("
000102030405060708090a0b0c0d0e0f
101112131415161718191a1b1c1d1e1f
202122232425262728292a2b2c2d2e2f
303132333435363738393a3b3c3d3e3f
404142434445464748494a4b4c4d4e4f
"),
salt: &hex!("
606162636465666768696a6b6c6d6e6f
707172737475767778797a7b7c7d7e7f
808182838485868788898a8b8c8d8e8f
909192939495969798999a9b9c9d9e9f
a0a1a2a3a4a5a6a7a8a9aaabacadaeaf
"),
info: &hex!("
b0b1b2b3b4b5b6b7b8b9babbbcbdbebf
c0c1c2c3c4c5c6c7c8c9cacbcccdcecf
d0d1d2d3d4d5d6d7d8d9dadbdcdddedf
e0e1e2e3e4e5e6e7e8e9eaebecedeeef
f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff
"),
okm: &hex!("
b11e398dc80327a1c8e7f78c596a4934
4f012eda2d4efad8a050cc4c19afa97c
59045a99cac7827271cb41c65e590e09
da3275600c2f09b8367793a9aca3db71
cc30c58179ec3e87c14c01d5c1f3434f
1d87
"),
},
Test {
// Test Case 3
ikm: &hex!("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b"),
salt: &hex!(""),
info: &hex!(""),
okm: &hex!("
8da4e775a563c18f715f802a063c5a31
b8a11f5c5ee1879ec3454e5f3c738d2d
9d201395faa4b61a96c8
"),
},
];
for Test { ikm, salt, info, okm } in tests.iter() {
let salt = if salt.is_empty() {
None
} else {
Some(&salt[..])
};
let hkdf = C::HkdfSha256::new(salt, ikm);
let mut okm2 = vec![0u8; okm.len()];
assert!(hkdf.expand(&info[..], &mut okm2).is_ok());
assert_eq!(okm2[..], okm[..]);
}
}
///
pub fn test_lengths<C: CryptoProvider>(_: PhantomData<C>) {
let hkdf = C::HkdfSha256::new(None, &[]);
let mut longest = vec![0u8; MAX_SHA256_LENGTH];
assert!(hkdf.expand(&[], &mut longest).is_ok());
// Runtime is O(length), so exhaustively testing all legal lengths
// would take too long (at least without --release). Only test a
// subset: the first 500, the last 10, and every 100th in between.
// 0 is an invalid key length for openssl, so start at 1
let lengths = (1..MAX_SHA256_LENGTH + 1)
.filter(|&len| !(500..=MAX_SHA256_LENGTH - 10).contains(&len) || len % 100 == 0);
for length in lengths {
let mut okm = vec![0u8; length];
assert!(hkdf.expand(&[], &mut okm).is_ok());
assert_eq!(okm.len(), length);
assert_eq!(okm[..], longest[..length]);
}
}
///
pub fn test_max_length<C: CryptoProvider>(_: PhantomData<C>) {
let hkdf = C::HkdfSha256::new(Some(&[]), &[]);
let mut okm = vec![0u8; MAX_SHA256_LENGTH];
assert!(hkdf.expand(&[], &mut okm).is_ok());
}
///
pub fn test_max_length_exceeded<C: CryptoProvider>(_: PhantomData<C>) {
let hkdf = C::HkdfSha256::new(Some(&[]), &[]);
let mut okm = vec![0u8; MAX_SHA256_LENGTH + 1];
assert!(hkdf.expand(&[], &mut okm). | random_line_split |
|
hkdf.rs | <C: CryptoProvider>(#[case] testcase: CryptoProviderTestCase<C>) {}
const MAX_SHA256_LENGTH: usize = 255 * (256 / 8); // =8160
///
pub struct Test<'a> {
ikm: &'a [u8],
salt: &'a [u8],
info: &'a [u8],
okm: &'a [u8],
}
/// data taken from sample code in Readme of crates.io page
pub fn basic_test_hkdf<C: CryptoProvider>(_: PhantomData<C>) {
let ikm = hex!("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b");
let salt = hex!("000102030405060708090a0b0c");
let info = hex!("f0f1f2f3f4f5f6f7f8f9");
let hk = C::HkdfSha256::new(Some(&salt[..]), &ikm);
let mut okm = [0u8; 42];
hk.expand(&info, &mut okm)
.expect("42 is a valid length for Sha256 to output");
let expected = hex!(
"
3cb25f25faacd57a90434f64d0362f2a
2d2d0a90cf1a5a4c5db02d56ecc4c5bf
34007208d5b887185865
"
);
assert_eq!(okm, expected);
}
// Test Vectors from https://tools.ietf.org/html/rfc5869.
#[rustfmt::skip]
///
pub fn test_rfc5869_sha256<C: CryptoProvider>(_: PhantomData<C>) {
let tests = [
Test {
// Test Case 1
ikm: &hex!("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b"),
salt: &hex!("000102030405060708090a0b0c"),
info: &hex!("f0f1f2f3f4f5f6f7f8f9"),
okm: &hex!("
3cb25f25faacd57a90434f64d0362f2a
2d2d0a90cf1a5a4c5db02d56ecc4c5bf
34007208d5b887185865
"),
},
Test {
// Test Case 2
ikm: &hex!("
000102030405060708090a0b0c0d0e0f
101112131415161718191a1b1c1d1e1f
202122232425262728292a2b2c2d2e2f
303132333435363738393a3b3c3d3e3f
404142434445464748494a4b4c4d4e4f
"),
salt: &hex!("
606162636465666768696a6b6c6d6e6f
707172737475767778797a7b7c7d7e7f
808182838485868788898a8b8c8d8e8f
909192939495969798999a9b9c9d9e9f
a0a1a2a3a4a5a6a7a8a9aaabacadaeaf
"),
info: &hex!("
b0b1b2b3b4b5b6b7b8b9babbbcbdbebf
c0c1c2c3c4c5c6c7c8c9cacbcccdcecf
d0d1d2d3d4d5d6d7d8d9dadbdcdddedf
e0e1e2e3e4e5e6e7e8e9eaebecedeeef
f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff
"),
okm: &hex!("
b11e398dc80327a1c8e7f78c596a4934
4f012eda2d4efad8a050cc4c19afa97c
59045a99cac7827271cb41c65e590e09
da3275600c2f09b8367793a9aca3db71
cc30c58179ec3e87c14c01d5c1f3434f
1d87
"),
},
Test {
// Test Case 3
ikm: &hex!("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b"),
salt: &hex!(""),
info: &hex!(""),
okm: &hex!("
8da4e775a563c18f715f802a063c5a31
b8a11f5c5ee1879ec3454e5f3c738d2d
9d201395faa4b61a96c8
"),
},
];
for Test { ikm, salt, info, okm } in tests.iter() {
let salt = if salt.is_empty() {
None
} else {
Some(&salt[..])
};
let hkdf = C::HkdfSha256::new(salt, ikm);
let mut okm2 = vec![0u8; okm.len()];
assert!(hkdf.expand(&info[..], &mut okm2).is_ok());
assert_eq!(okm2[..], okm[..]);
}
}
///
pub fn test_lengths<C: CryptoProvider>(_: PhantomData<C>) {
let hkdf = C::HkdfSha256::new(None, &[]);
let mut longest = vec![0u8; MAX_SHA256_LENGTH];
assert!(hkdf.expand(&[], &mut longest).is_ok());
// Runtime is O(length), so exhaustively testing all legal lengths
// would take too long (at least without --release). Only test a
// subset: the first 500, the last 10, and every 100th in between.
// 0 is an invalid key length for openssl, so start at 1
let lengths = (1..MAX_SHA256_LENGTH + 1)
.filter(|&len| !(500..=MAX_SHA256_LENGTH - 10).contains(&len) || len % 100 == 0);
for length in lengths {
let mut okm = vec![0u8; length];
assert!(hkdf.expand(&[], &mut okm).is_ok());
assert_eq!(okm.len(), length);
assert_eq!(okm[..], longest[..length]);
}
}
///
pub fn test_max_length<C: CryptoProvider>(_: PhantomData<C>) {
let hkdf = C::HkdfSha256::new(Some(&[]), &[]);
let mut okm = vec![0u8; MAX_SHA256_LENGTH];
assert!(hkdf.expand(&[], &mut okm).is_ok());
}
///
pub fn test_max_length_exceeded<C: CryptoProvider>(_: PhantomData<C>) {
let hkdf = C::HkdfSha25 | hkdf_test_cases | identifier_name |
|
hkdf.rs | hex!("
b0b1b2b3b4b5b6b7b8b9babbbcbdbebf
c0c1c2c3c4c5c6c7c8c9cacbcccdcecf
d0d1d2d3d4d5d6d7d8d9dadbdcdddedf
e0e1e2e3e4e5e6e7e8e9eaebecedeeef
f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff
"),
okm: &hex!("
b11e398dc80327a1c8e7f78c596a4934
4f012eda2d4efad8a050cc4c19afa97c
59045a99cac7827271cb41c65e590e09
da3275600c2f09b8367793a9aca3db71
cc30c58179ec3e87c14c01d5c1f3434f
1d87
"),
},
Test {
// Test Case 3
ikm: &hex!("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b"),
salt: &hex!(""),
info: &hex!(""),
okm: &hex!("
8da4e775a563c18f715f802a063c5a31
b8a11f5c5ee1879ec3454e5f3c738d2d
9d201395faa4b61a96c8
"),
},
];
for Test { ikm, salt, info, okm } in tests.iter() {
let salt = if salt.is_empty() {
None
} else {
Some(&salt[..])
};
let hkdf = C::HkdfSha256::new(salt, ikm);
let mut okm2 = vec![0u8; okm.len()];
assert!(hkdf.expand(&info[..], &mut okm2).is_ok());
assert_eq!(okm2[..], okm[..]);
}
}
///
pub fn test_lengths<C: CryptoProvider>(_: PhantomData<C>) {
let hkdf = C::HkdfSha256::new(None, &[]);
let mut longest = vec![0u8; MAX_SHA256_LENGTH];
assert!(hkdf.expand(&[], &mut longest).is_ok());
// Runtime is O(length), so exhaustively testing all legal lengths
// would take too long (at least without --release). Only test a
// subset: the first 500, the last 10, and every 100th in between.
// 0 is an invalid key length for openssl, so start at 1
let lengths = (1..MAX_SHA256_LENGTH + 1)
.filter(|&len| !(500..=MAX_SHA256_LENGTH - 10).contains(&len) || len % 100 == 0);
for length in lengths {
let mut okm = vec![0u8; length];
assert!(hkdf.expand(&[], &mut okm).is_ok());
assert_eq!(okm.len(), length);
assert_eq!(okm[..], longest[..length]);
}
}
///
pub fn test_max_length<C: CryptoProvider>(_: PhantomData<C>) {
let hkdf = C::HkdfSha256::new(Some(&[]), &[]);
let mut okm = vec![0u8; MAX_SHA256_LENGTH];
assert!(hkdf.expand(&[], &mut okm).is_ok());
}
///
pub fn test_max_length_exceeded<C: CryptoProvider>(_: PhantomData<C>) {
let hkdf = C::HkdfSha256::new(Some(&[]), &[]);
let mut okm = vec![0u8; MAX_SHA256_LENGTH + 1];
assert!(hkdf.expand(&[], &mut okm).is_err());
}
///
pub fn test_unsupported_length<C: CryptoProvider>(_: PhantomData<C>) {
let hkdf = C::HkdfSha256::new(Some(&[]), &[]);
let mut okm = vec![0u8; 90000];
assert!(hkdf.expand(&[], &mut okm).is_err());
}
///
pub fn test_expand_multi_info<C: CryptoProvider>(_: PhantomData<C>) {
let info_components = &[
&b"09090909090909090909090909090909090909090909"[..],
&b"8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a"[..],
&b"0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0"[..],
&b"4c4c4c4c4c4c4c4c4c4c4c4c4c4c4c4c4c4c4"[..],
&b"1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d"[..],
];
let hkdf = C::HkdfSha256::new(None, b"some ikm here");
// Compute HKDF-Expand on the concatenation of all the info components
let mut oneshot_res = [0u8; 16];
hkdf.expand(&info_components.concat(), &mut oneshot_res)
.unwrap();
// Now iteratively join the components of info_components until it's all 1 component. The value
// of HKDF-Expand should be the same throughout
let mut num_concatted = 0;
let mut info_head = Vec::new();
while num_concatted < info_components.len() {
info_head.extend(info_components[num_concatted]);
// Build the new input to be the info head followed by the remaining components
let input: Vec<&[u8]> = iter::once(info_head.as_slice())
.chain(info_components.iter().cloned().skip(num_concatted + 1))
.collect();
// Compute and compare to the one-shot answer
let mut multipart_res = [0u8; 16];
hkdf.expand_multi_info(&input, &mut multipart_res).unwrap();
assert_eq!(multipart_res, oneshot_res);
num_concatted += 1;
}
}
///
pub fn run_hkdf_sha256_vectors<C: CryptoProvider>(_: PhantomData<C>) {
run_hkdf_test_vectors::<C::HkdfSha256>(HashAlg::Sha256)
}
///
pub fn run_hkdf_sha512_vectors<C: CryptoProvider>(_: PhantomData<C>) {
run_hkdf_test_vectors::<C::HkdfSha512>(HashAlg::Sha512)
}
enum HashAlg {
Sha256,
Sha512,
}
///
fn run_hkdf_test_vectors<K: Hkdf>(hash: HashAlg) {
let test_name = match hash {
HashAlg::Sha256 => wycheproof::hkdf::TestName::HkdfSha256,
HashAlg::Sha512 => wycheproof::hkdf::TestName::HkdfSha512,
};
let test_set =
wycheproof::hkdf::TestSet::load(test_name).expect("should be able to load test set");
for test_group in test_set.test_groups {
for test in test_group.tests {
let ikm = test.ikm;
let salt = test.salt;
let info = test.info;
let okm = test.okm;
let tc_id = test.tc_id;
if let Some(desc) = run_test::<K>(
ikm.as_slice(),
salt.as_slice(),
info.as_slice(),
okm.as_slice(),
) | {
panic!(
"\n\
Failed test {tc_id}: {desc}\n\
ikm:\t{ikm:?}\n\
salt:\t{salt:?}\n\
info:\t{info:?}\n\
okm:\t{okm:?}\n"
);
} | conditional_block |
|
msg.rs | run,
10 => MinType,
i => UserDefined(i),
}
}
}
#[derive(Clone, Copy, Debug)]
enum Flags {
/// It is request message.
Request,
/// Multipart message, terminated by NLMSG_DONE
Multi,
/// Reply with ack, with zero or error code
Ack,
/// Echo this request
Echo,
}
impl Into<u16> for Flags {
fn into(self) -> u16 {
use self::Flags::*;
match self {
Request => 1,
Multi => 2,
Ack => 4,
Echo => 8,
}
}
}
/// Modifiers to GET request
#[derive(Clone, Copy, Debug)]
enum GetFlags {
/// specify tree root
Root,
/// return all matching
Match,
/// atomic GET
Atomic,
/// (Root|Match)
Dump,
}
impl Into<u16> for GetFlags {
fn into(self) -> u16 {
use self::GetFlags::*;
match self {
Root => 0x100,
Match => 0x200,
Atomic => 0x400,
Dump => 0x100 | 0x200,
}
}
}
/// Modifiers to NEW request
#[derive(Clone, Copy, Debug)]
enum NewFlags {
/// Override existing
Replace,
/// Do not touch, if it exists
Excl,
/// Create, if it does not exist
Create,
/// Add to end of list
Append,
}
impl Into<u16> for NewFlags {
fn into(self) -> u16 {
use self::NewFlags::*;
match self {
Replace => 0x100,
Excl => 0x200,
Create => 0x400,
Append => 0x800,
}
}
}
// HEADER FORMAT
// __u32 nlmsg_len; /* Length of message including header. */
// __u16 nlmsg_type; /* Type of message content. */
// __u16 nlmsg_flags; /* Additional flags. */
// __u32 nlmsg_seq; /* Sequence number. */
// __u32 nlmsg_pid; /* Sender port ID. */
#[repr(C)]
#[derive(Clone, Copy, Eq, PartialEq)]
pub struct NlMsgHeader {
msg_length: u32,
nl_type: u16,
flags: u16,
seq: u32,
pid: u32,
}
impl fmt::Debug for NlMsgHeader {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f,
"<NlMsgHeader len={} {:?} flags=[ ",
self.msg_length,
MsgType::from(self.nl_type)));
// output readable flags
if self.flags & 1 != 0 {
try!(write!(f, "Request "));
}
if self.flags & 2 != 0 {
try!(write!(f, "Multi "));
}
if self.flags & 4 != 0 {
try!(write!(f, "Ack "));
}
if self.flags & 8 != 0 {
try!(write!(f, "Echo "));
}
if self.flags >> 4 != 0 {
try!(write!(f, "other({:#X})", self.flags));
}
try!(write!(f, "] seq={} pid={}>", self.seq, self.pid));
Ok(())
}
}
impl NlMsgHeader {
pub fn user_defined(t: u16, data_length: u32) -> NlMsgHeader {
let mut h = NlMsgHeader {
msg_length: nlmsg_header_length() as u32,
nl_type: t,
flags: Flags::Request.into(),
seq: 0,
pid: 0,
};
h.data_length(data_length);
h
}
pub fn request() -> NlMsgHeader {
NlMsgHeader {
msg_length: nlmsg_header_length() as u32,
nl_type: MsgType::Request.into(),
flags: Flags::Request.into(),
seq: 0,
pid: 0,
}
}
pub fn done() -> NlMsgHeader {
NlMsgHeader {
msg_length: nlmsg_header_length() as u32,
nl_type: MsgType::Done.into(),
flags: Flags::Multi.into(),
seq: 0,
pid: 0,
}
}
pub fn error() -> NlMsgHeader {
NlMsgHeader {
msg_length: nlmsg_length(nlmsg_header_length() + 4) as u32, // nlmsgerr
nl_type: MsgType::Error.into(),
flags: 0,
seq: 0,
pid: 0,
}
}
pub fn from_bytes(bytes: &[u8]) -> io::Result<(NlMsgHeader, usize)> {
let mut cursor = Cursor::new(bytes);
let len = try!(cursor.read_u32::<NativeEndian>());
let nl_type = try!(cursor.read_u16::<NativeEndian>());
let flags = try!(cursor.read_u16::<NativeEndian>());
let seq = try!(cursor.read_u32::<NativeEndian>());
let pid = try!(cursor.read_u32::<NativeEndian>());
if len < nlmsg_header_length() as u32 {
Err(io::Error::new(ErrorKind::InvalidInput, "length smaller than msg header size"))
} else {
Ok((NlMsgHeader{
msg_length: len,
nl_type: nl_type,
flags: flags,
seq: seq,
pid: pid,
}, cursor.position() as usize))
}
}
pub fn bytes(&self) -> &[u8] {
let size = size_of::<NlMsgHeader>();
unsafe {
let head = self as *const NlMsgHeader as *const u8;
from_raw_parts(head, size)
}
}
pub fn msg_type(&self) -> MsgType {
self.nl_type.into()
}
pub fn msg_length(&self) -> u32 {
self.msg_length
}
/// Set message length
pub fn data_length(&mut self, len: u32) -> &mut NlMsgHeader {
self.msg_length = nlmsg_length(len as usize) as u32;
self
}
/// Multipart message
pub fn multipart(&mut self) -> &mut NlMsgHeader {
self.flags |= Flags::Multi.into();
self
}
/// Request acknowledgement
pub fn ack(&mut self) -> &mut NlMsgHeader {
self.flags |= Flags::Ack.into();
self
}
/// Echo message
pub fn echo(&mut self) -> &mut NlMsgHeader {
self.flags |= Flags::Echo.into();
self
}
/// Set sequence number
pub fn seq(&mut self, n: u32) -> &mut NlMsgHeader {
self.seq = n;
self
}
/// Set PID number
pub fn pid(&mut self, n: u32) -> &mut NlMsgHeader {
self.pid = n;
self
}
/// Override existing
pub fn replace(&mut self) -> &mut NlMsgHeader {
self.flags |= NewFlags::Replace.into();
self
}
/// Do not touch, if it exists
pub fn excl(&mut self) -> &mut NlMsgHeader {
self.flags |= NewFlags::Excl.into();
self
}
/// Create, if it does not exist
pub fn create(&mut self) -> &mut NlMsgHeader {
self.flags |= NewFlags::Create.into();
self
}
/// Add to end of list
pub fn append(&mut self) -> &mut NlMsgHeader {
self.flags |= NewFlags::Append.into();
self
}
/// specify tree root
pub fn root(&mut self) -> &mut NlMsgHeader {
self.flags |= GetFlags::Root.into();
self
}
/// return all matching
pub fn match_provided(&mut self) -> &mut NlMsgHeader {
self.flags |= GetFlags::Match.into();
self
}
/// atomic GET
pub fn atomic(&mut self) -> &mut NlMsgHeader {
self.flags |= GetFlags::Atomic.into();
self
}
/// (Root|Match)
pub fn | (&mut self) -> &mut NlMsgHeader {
self.flags |= GetFlags::Dump.into();
self
}
}
/*
http://linux.die.net/include/linux/netlink.h
/* Flags values */
#define NLM_F_REQUEST 1 /* It is request message. */
#define NLM_F_MULTI 2 /* Multipart message, terminated by NLMSG_DONE */
#define NLM_F_ACK 4 /* Reply with ack, with zero or error code */
#define NLM_F_ECHO 8 /* Echo this request */
/* Modifiers to GET request */
#define NLM_F_ROOT | dump | identifier_name |
msg.rs | run,
10 => MinType,
i => UserDefined(i),
}
}
}
#[derive(Clone, Copy, Debug)]
enum Flags {
/// It is request message.
Request,
/// Multipart message, terminated by NLMSG_DONE
Multi,
/// Reply with ack, with zero or error code
Ack,
/// Echo this request
Echo,
}
impl Into<u16> for Flags {
fn into(self) -> u16 {
use self::Flags::*;
match self {
Request => 1,
Multi => 2,
Ack => 4,
Echo => 8,
}
}
}
/// Modifiers to GET request
#[derive(Clone, Copy, Debug)]
enum GetFlags {
/// specify tree root
Root,
/// return all matching
Match,
/// atomic GET
Atomic,
/// (Root|Match)
Dump,
}
impl Into<u16> for GetFlags {
fn into(self) -> u16 {
use self::GetFlags::*;
match self {
Root => 0x100,
Match => 0x200,
Atomic => 0x400,
Dump => 0x100 | 0x200,
}
}
}
/// Modifiers to NEW request
#[derive(Clone, Copy, Debug)]
enum NewFlags {
/// Override existing
Replace,
/// Do not touch, if it exists
Excl,
/// Create, if it does not exist
Create,
/// Add to end of list
Append,
}
impl Into<u16> for NewFlags {
fn into(self) -> u16 {
use self::NewFlags::*;
match self {
Replace => 0x100,
Excl => 0x200,
Create => 0x400,
Append => 0x800,
}
}
}
// HEADER FORMAT
// __u32 nlmsg_len; /* Length of message including header. */
// __u16 nlmsg_type; /* Type of message content. */
// __u16 nlmsg_flags; /* Additional flags. */
// __u32 nlmsg_seq; /* Sequence number. */
// __u32 nlmsg_pid; /* Sender port ID. */
#[repr(C)]
#[derive(Clone, Copy, Eq, PartialEq)]
pub struct NlMsgHeader {
msg_length: u32, | nl_type: u16,
flags: u16,
seq: u32,
pid: u32,
}
impl fmt::Debug for NlMsgHeader {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f,
"<NlMsgHeader len={} {:?} flags=[ ",
self.msg_length,
MsgType::from(self.nl_type)));
// output readable flags
if self.flags & 1 != 0 {
try!(write!(f, "Request "));
}
if self.flags & 2 != 0 {
try!(write!(f, "Multi "));
}
if self.flags & 4 != 0 {
try!(write!(f, "Ack "));
}
if self.flags & 8 != 0 {
try!(write!(f, "Echo "));
}
if self.flags >> 4 != 0 {
try!(write!(f, "other({:#X})", self.flags));
}
try!(write!(f, "] seq={} pid={}>", self.seq, self.pid));
Ok(())
}
}
impl NlMsgHeader {
pub fn user_defined(t: u16, data_length: u32) -> NlMsgHeader {
let mut h = NlMsgHeader {
msg_length: nlmsg_header_length() as u32,
nl_type: t,
flags: Flags::Request.into(),
seq: 0,
pid: 0,
};
h.data_length(data_length);
h
}
pub fn request() -> NlMsgHeader {
NlMsgHeader {
msg_length: nlmsg_header_length() as u32,
nl_type: MsgType::Request.into(),
flags: Flags::Request.into(),
seq: 0,
pid: 0,
}
}
pub fn done() -> NlMsgHeader {
NlMsgHeader {
msg_length: nlmsg_header_length() as u32,
nl_type: MsgType::Done.into(),
flags: Flags::Multi.into(),
seq: 0,
pid: 0,
}
}
pub fn error() -> NlMsgHeader {
NlMsgHeader {
msg_length: nlmsg_length(nlmsg_header_length() + 4) as u32, // nlmsgerr
nl_type: MsgType::Error.into(),
flags: 0,
seq: 0,
pid: 0,
}
}
pub fn from_bytes(bytes: &[u8]) -> io::Result<(NlMsgHeader, usize)> {
let mut cursor = Cursor::new(bytes);
let len = try!(cursor.read_u32::<NativeEndian>());
let nl_type = try!(cursor.read_u16::<NativeEndian>());
let flags = try!(cursor.read_u16::<NativeEndian>());
let seq = try!(cursor.read_u32::<NativeEndian>());
let pid = try!(cursor.read_u32::<NativeEndian>());
if len < nlmsg_header_length() as u32 {
Err(io::Error::new(ErrorKind::InvalidInput, "length smaller than msg header size"))
} else {
Ok((NlMsgHeader{
msg_length: len,
nl_type: nl_type,
flags: flags,
seq: seq,
pid: pid,
}, cursor.position() as usize))
}
}
pub fn bytes(&self) -> &[u8] {
let size = size_of::<NlMsgHeader>();
unsafe {
let head = self as *const NlMsgHeader as *const u8;
from_raw_parts(head, size)
}
}
pub fn msg_type(&self) -> MsgType {
self.nl_type.into()
}
pub fn msg_length(&self) -> u32 {
self.msg_length
}
/// Set message length
pub fn data_length(&mut self, len: u32) -> &mut NlMsgHeader {
self.msg_length = nlmsg_length(len as usize) as u32;
self
}
/// Multipart message
pub fn multipart(&mut self) -> &mut NlMsgHeader {
self.flags |= Flags::Multi.into();
self
}
/// Request acknowledgement
pub fn ack(&mut self) -> &mut NlMsgHeader {
self.flags |= Flags::Ack.into();
self
}
/// Echo message
pub fn echo(&mut self) -> &mut NlMsgHeader {
self.flags |= Flags::Echo.into();
self
}
/// Set sequence number
pub fn seq(&mut self, n: u32) -> &mut NlMsgHeader {
self.seq = n;
self
}
/// Set PID number
pub fn pid(&mut self, n: u32) -> &mut NlMsgHeader {
self.pid = n;
self
}
/// Override existing
pub fn replace(&mut self) -> &mut NlMsgHeader {
self.flags |= NewFlags::Replace.into();
self
}
/// Do not touch, if it exists
pub fn excl(&mut self) -> &mut NlMsgHeader {
self.flags |= NewFlags::Excl.into();
self
}
/// Create, if it does not exist
pub fn create(&mut self) -> &mut NlMsgHeader {
self.flags |= NewFlags::Create.into();
self
}
/// Add to end of list
pub fn append(&mut self) -> &mut NlMsgHeader {
self.flags |= NewFlags::Append.into();
self
}
/// specify tree root
pub fn root(&mut self) -> &mut NlMsgHeader {
self.flags |= GetFlags::Root.into();
self
}
/// return all matching
pub fn match_provided(&mut self) -> &mut NlMsgHeader {
self.flags |= GetFlags::Match.into();
self
}
/// atomic GET
pub fn atomic(&mut self) -> &mut NlMsgHeader {
self.flags |= GetFlags::Atomic.into();
self
}
/// (Root|Match)
pub fn dump(&mut self) -> &mut NlMsgHeader {
self.flags |= GetFlags::Dump.into();
self
}
}
/*
http://linux.die.net/include/linux/netlink.h
/* Flags values */
#define NLM_F_REQUEST 1 /* It is request message. */
#define NLM_F_MULTI 2 /* Multipart message, terminated by NLMSG_DONE */
#define NLM_F_ACK 4 /* Reply with ack, with zero or error code */
#define NLM_F_ECHO 8 /* Echo this request */
/* Modifiers to GET request */
#define NLM_F_ROOT | random_line_split |
|
msg.rs | run,
10 => MinType,
i => UserDefined(i),
}
}
}
#[derive(Clone, Copy, Debug)]
enum Flags {
/// It is request message.
Request,
/// Multipart message, terminated by NLMSG_DONE
Multi,
/// Reply with ack, with zero or error code
Ack,
/// Echo this request
Echo,
}
impl Into<u16> for Flags {
fn into(self) -> u16 {
use self::Flags::*;
match self {
Request => 1,
Multi => 2,
Ack => 4,
Echo => 8,
}
}
}
/// Modifiers to GET request
#[derive(Clone, Copy, Debug)]
enum GetFlags {
/// specify tree root
Root,
/// return all matching
Match,
/// atomic GET
Atomic,
/// (Root|Match)
Dump,
}
impl Into<u16> for GetFlags {
fn into(self) -> u16 {
use self::GetFlags::*;
match self {
Root => 0x100,
Match => 0x200,
Atomic => 0x400,
Dump => 0x100 | 0x200,
}
}
}
/// Modifiers to NEW request
#[derive(Clone, Copy, Debug)]
enum NewFlags {
/// Override existing
Replace,
/// Do not touch, if it exists
Excl,
/// Create, if it does not exist
Create,
/// Add to end of list
Append,
}
impl Into<u16> for NewFlags {
fn into(self) -> u16 {
use self::NewFlags::*;
match self {
Replace => 0x100,
Excl => 0x200,
Create => 0x400,
Append => 0x800,
}
}
}
// HEADER FORMAT
// __u32 nlmsg_len; /* Length of message including header. */
// __u16 nlmsg_type; /* Type of message content. */
// __u16 nlmsg_flags; /* Additional flags. */
// __u32 nlmsg_seq; /* Sequence number. */
// __u32 nlmsg_pid; /* Sender port ID. */
#[repr(C)]
#[derive(Clone, Copy, Eq, PartialEq)]
pub struct NlMsgHeader {
msg_length: u32,
nl_type: u16,
flags: u16,
seq: u32,
pid: u32,
}
impl fmt::Debug for NlMsgHeader {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f,
"<NlMsgHeader len={} {:?} flags=[ ",
self.msg_length,
MsgType::from(self.nl_type)));
// output readable flags
if self.flags & 1 != 0 {
try!(write!(f, "Request "));
}
if self.flags & 2 != 0 {
try!(write!(f, "Multi "));
}
if self.flags & 4 != 0 {
try!(write!(f, "Ack "));
}
if self.flags & 8 != 0 {
try!(write!(f, "Echo "));
}
if self.flags >> 4 != 0 {
try!(write!(f, "other({:#X})", self.flags));
}
try!(write!(f, "] seq={} pid={}>", self.seq, self.pid));
Ok(())
}
}
impl NlMsgHeader {
pub fn user_defined(t: u16, data_length: u32) -> NlMsgHeader {
let mut h = NlMsgHeader {
msg_length: nlmsg_header_length() as u32,
nl_type: t,
flags: Flags::Request.into(),
seq: 0,
pid: 0,
};
h.data_length(data_length);
h
}
pub fn request() -> NlMsgHeader |
pub fn done() -> NlMsgHeader {
NlMsgHeader {
msg_length: nlmsg_header_length() as u32,
nl_type: MsgType::Done.into(),
flags: Flags::Multi.into(),
seq: 0,
pid: 0,
}
}
pub fn error() -> NlMsgHeader {
NlMsgHeader {
msg_length: nlmsg_length(nlmsg_header_length() + 4) as u32, // nlmsgerr
nl_type: MsgType::Error.into(),
flags: 0,
seq: 0,
pid: 0,
}
}
pub fn from_bytes(bytes: &[u8]) -> io::Result<(NlMsgHeader, usize)> {
let mut cursor = Cursor::new(bytes);
let len = try!(cursor.read_u32::<NativeEndian>());
let nl_type = try!(cursor.read_u16::<NativeEndian>());
let flags = try!(cursor.read_u16::<NativeEndian>());
let seq = try!(cursor.read_u32::<NativeEndian>());
let pid = try!(cursor.read_u32::<NativeEndian>());
if len < nlmsg_header_length() as u32 {
Err(io::Error::new(ErrorKind::InvalidInput, "length smaller than msg header size"))
} else {
Ok((NlMsgHeader{
msg_length: len,
nl_type: nl_type,
flags: flags,
seq: seq,
pid: pid,
}, cursor.position() as usize))
}
}
pub fn bytes(&self) -> &[u8] {
let size = size_of::<NlMsgHeader>();
unsafe {
let head = self as *const NlMsgHeader as *const u8;
from_raw_parts(head, size)
}
}
pub fn msg_type(&self) -> MsgType {
self.nl_type.into()
}
pub fn msg_length(&self) -> u32 {
self.msg_length
}
/// Set message length
pub fn data_length(&mut self, len: u32) -> &mut NlMsgHeader {
self.msg_length = nlmsg_length(len as usize) as u32;
self
}
/// Multipart message
pub fn multipart(&mut self) -> &mut NlMsgHeader {
self.flags |= Flags::Multi.into();
self
}
/// Request acknowledgement
pub fn ack(&mut self) -> &mut NlMsgHeader {
self.flags |= Flags::Ack.into();
self
}
/// Echo message
pub fn echo(&mut self) -> &mut NlMsgHeader {
self.flags |= Flags::Echo.into();
self
}
/// Set sequence number
pub fn seq(&mut self, n: u32) -> &mut NlMsgHeader {
self.seq = n;
self
}
/// Set PID number
pub fn pid(&mut self, n: u32) -> &mut NlMsgHeader {
self.pid = n;
self
}
/// Override existing
pub fn replace(&mut self) -> &mut NlMsgHeader {
self.flags |= NewFlags::Replace.into();
self
}
/// Do not touch, if it exists
pub fn excl(&mut self) -> &mut NlMsgHeader {
self.flags |= NewFlags::Excl.into();
self
}
/// Create, if it does not exist
pub fn create(&mut self) -> &mut NlMsgHeader {
self.flags |= NewFlags::Create.into();
self
}
/// Add to end of list
pub fn append(&mut self) -> &mut NlMsgHeader {
self.flags |= NewFlags::Append.into();
self
}
/// specify tree root
pub fn root(&mut self) -> &mut NlMsgHeader {
self.flags |= GetFlags::Root.into();
self
}
/// return all matching
pub fn match_provided(&mut self) -> &mut NlMsgHeader {
self.flags |= GetFlags::Match.into();
self
}
/// atomic GET
pub fn atomic(&mut self) -> &mut NlMsgHeader {
self.flags |= GetFlags::Atomic.into();
self
}
/// (Root|Match)
pub fn dump(&mut self) -> &mut NlMsgHeader {
self.flags |= GetFlags::Dump.into();
self
}
}
/*
http://linux.die.net/include/linux/netlink.h
/* Flags values */
#define NLM_F_REQUEST 1 /* It is request message. */
#define NLM_F_MULTI 2 /* Multipart message, terminated by NLMSG_DONE */
#define NLM_F_ACK 4 /* Reply with ack, with zero or error code */
#define NLM_F_ECHO 8 /* Echo this request */
/* Modifiers to GET request */
#define NLM_F | {
NlMsgHeader {
msg_length: nlmsg_header_length() as u32,
nl_type: MsgType::Request.into(),
flags: Flags::Request.into(),
seq: 0,
pid: 0,
}
} | identifier_body |
model_infer.py |
sigmoid_loss = FLAGS.sigmoid
identity_dim = FLAGS.identity_dim
class SupervisedGraphsage(object):
"""Implementation | self.activations = []
self.inputs = None
self.outputs = None
self.loss = 0
self.accuracy = 0
self.optimizer = None
self.opt_op = None
# === set aggregator ===
# 增加了两个cross, cross geniepath
if aggregator_type == 'cross':
self.aggregator_cls = CrossAggregator
else:
raise Exception("Unknown aggregator: ", aggregator_type)
self.input_dim = feats_dim
self.output_dim = num_classes # 2
# self.sampler = sampler
# self.adj_info = adj
self.layer_infos = layer_infos
self.concat = concat
self.model_size = model_size
self.sigmoid_loss = sigmoid_loss
self.dims = [(self.input_dim) + identity_dim]
self.dims.extend([layer_infos[i].output_dim for i in range(len(layer_infos))])# 102, 64, 32
self.aggregator_type = aggregator_type
# === get info from placeholders ===
# get info from placeholders...
self.placeholders = self.construct_placeholders(self.input_dim, self.output_dim)
# self.labels = self.placeholders['labels']
# self.batch_nodes = placeholders["batch_nodes"]
self.batch_size = self.placeholders["batch_size"]
# self.support_size = placeholders['support_size']
# self.features = placeholders['features']
sampled_weight = [self.placeholders['sampled_weight_0'],
self.placeholders['sampled_weight_1'],
self.placeholders['sampled_weight_2']]
sampled_column = [self.placeholders['sampled_column_0'],
self.placeholders['sampled_column_1'],
self.placeholders['sampled_column_2']]
sampled_feats = [self.placeholders['sampled_feats_0'],
self.placeholders['sampled_feats_1'],
self.placeholders['sampled_feats_2']]
self.data_sampled = [sampled_feats, sampled_weight, sampled_column]
self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
self.build()
self.var_list = tf.trainable_variables()
self.saver = tf.train.Saver(var_list=self.var_list)
self.sess = tf.Session(config=sess_config)
self.sess.run(tf.global_variables_initializer())
self.load(self.sess)
def construct_placeholders(self, num_classes, feats_dim):
# Define placeholders
# 这里的key 是供 model init 用的
# feed_dict = {placeholders: data}
placeholders = {
# 'features': tf.placeholder(tf.float32, shape=(None, feats_dim)),
# 'labels': tf.placeholder(tf.float32, shape=(None, num_classes), name='labels'),
# 'batch_nodes': tf.placeholder(tf.int32, shape=(None), name='batch_nodes'),
'batch_size': tf.placeholder(tf.int32, name='batch_size'),
'dropout': tf.placeholder_with_default(0., shape=(), name='dropout'),
'sampled_weight_0': tf.placeholder(tf.float32, name='sampled_weight_0'),
'sampled_column_0': tf.placeholder(tf.int32, name='sampled_column_0'),
'sampled_feats_0': tf.placeholder(tf.float32, name='sampled_feats_0'),
'sampled_weight_1': tf.placeholder(tf.float32, name='sampled_weight_1'),
'sampled_column_1': tf.placeholder(tf.int32, name='sampled_column_1'),
'sampled_feats_1': tf.placeholder(tf.float32, name='sampled_feats_1'),
'sampled_weight_2': tf.placeholder(tf.float32, name='sampled_weight_2'),
'sampled_column_2': tf.placeholder(tf.int32, name='sampled_column_2'),
'sampled_feats_2': tf.placeholder(tf.float32, name='sampled_feats_2')
}
return placeholders
# === build computation graph ===
def build(self):
# data_sampled, support_sizes = self.sample(self.batch_nodes, self.layer_infos)
support_size = 1 # [1, 8, 8*16]
support_sizes = [support_size]
for k in range(len(self.layer_infos)):
t = len(self.layer_infos) - k -1
support_size *= self.layer_infos[t].num_samples
support_sizes.append(support_size)
sample_size = [layer_info.num_samples for layer_info in self.layer_infos] # 16, 8
self.outputs, self.aggregators = self.aggregate(
self.data_sampled, self.dims, sample_size,
support_sizes, concat=self.concat, model_size=self.model_size)
# data_sampled, [self.features], self.dims, num_samples,
# support_sizes, concat=self.concat, model_size=self.model_size)
self.outputs = tf.nn.l2_normalize(self.outputs, 1)
dim_mult = 2 if self.concat else 1
self.node_pred = layers.Dense(dim_mult*self.dims[-1], self.output_dim,
dropout=self.placeholders['dropout'],
act=lambda x : x) # no non-linear activation
# TF graph management
self.node_preds = self.node_pred(self.outputs)
# self._loss()
# 不进行梯度修建
# grads_and_vars = self.optimizer.compute_gradients(self.loss)
# clipped_grads_and_vars = [(tf.clip_by_value(grad, -5.0, 5.0) if grad is not None else None, var)
# for grad, var in grads_and_vars]
# self.grad, _ = clipped_grads_and_vars[0]
# self.opt_op = self.optimizer.apply_gradients(clipped_grads_and_vars)
# self.opt_op = self.optimizer.minimize(self.loss)
self._predict()
def aggregate(self, data_sampled, dims, num_samples, support_sizes, batch_size=None,
aggregators=None, name='aggregate', concat=False, model_size="small"):
if batch_size is None:
batch_size = self.batch_size
# length: number of layers + 1
# hidden = [tf.nn.embedding_lookup(input_features, node_samples) for node_samples in samples]
feats_hidden = data_sampled[0] # 根据index取feats
weight_hidden = data_sampled[1]
column_hidden = data_sampled[2]
# feats_hidden = [tf.nn.embedding_lookup(input_features, node_samples) for node_samples in samples[0]] # 根据index取feats
# feats_hidden = [feat_samples for feat_samples in data_sampled[0]] # 根据index取feats
# weight_hidden = [weight_samples for weight_samples in data_sampled[1]]
# column_hidden = [column_samples for column_samples in data_sampled[2]]
new_agg = aggregators is None
if new_agg:
aggregators = []
# c_list = [] # 增加
for layer in range(len(num_samples)):
if new_agg:
dim_mult = 2 if concat and (layer != 0) else 1
# aggregator at current layer
if layer == len(num_samples) - 1: # 2*64, 32
aggregator = self.aggregator_cls(
dim_mult*dims[layer], dims[layer+1], act=lambda x : x, # no non-linear activation
dropout=self.placeholders['dropout'],
name=name, concat=concat, model_size=model_size)
else: # 这里aggregator.__init__() # 106 -> 64
aggregator = self.aggregator_cls(
dim_mult*dims[layer], dims[layer+1],
dropout=self.placeholders['dropout'],
name=name, concat=concat, model_size=model_size)
aggregators.append(aggregator)
else:
aggregator = aggregators[layer]
# hidden representation at current layer for all support nodes that are various hops away
next_hidden = []
# as layer increases, the number of support nodes needed decreases
for hop in range(len(num_samples) - layer):
dim_mult = 2 if concat and (layer != 0) else 1
neigh_dims = [batch_size * support_sizes[hop], # 1, 8; 1
num_samples[len(num_samples) - hop - 1], # 8, 16; 8
dim_mult*dims[layer]] # 106, 106; 2 * 64
weight_neigh_dims = [batch_size * support_sizes[hop],
num | of supervised GraphSAGE."""
def __init__(self, **kwargs):
# === from model.py ===
allowed_kwargs = {'name', 'logging', 'model_size'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
name = self.__class__.__name__.lower()
self.name = name
# logging = kwargs.get('logging', False)
# self.logging = logging
self.vars = {}
# self.placeholders = {}
self.layers = [] | identifier_body |
model_infer.py | .learning_rate)
self.build()
self.var_list = tf.trainable_variables()
self.saver = tf.train.Saver(var_list=self.var_list)
self.sess = tf.Session(config=sess_config)
self.sess.run(tf.global_variables_initializer())
self.load(self.sess)
def construct_placeholders(self, num_classes, feats_dim):
# Define placeholders
# 这里的key 是供 model init 用的
# feed_dict = {placeholders: data}
placeholders = {
# 'features': tf.placeholder(tf.float32, shape=(None, feats_dim)),
# 'labels': tf.placeholder(tf.float32, shape=(None, num_classes), name='labels'),
# 'batch_nodes': tf.placeholder(tf.int32, shape=(None), name='batch_nodes'),
'batch_size': tf.placeholder(tf.int32, name='batch_size'),
'dropout': tf.placeholder_with_default(0., shape=(), name='dropout'),
'sampled_weight_0': tf.placeholder(tf.float32, name='sampled_weight_0'),
'sampled_column_0': tf.placeholder(tf.int32, name='sampled_column_0'),
'sampled_feats_0': tf.placeholder(tf.float32, name='sampled_feats_0'),
'sampled_weight_1': tf.placeholder(tf.float32, name='sampled_weight_1'),
'sampled_column_1': tf.placeholder(tf.int32, name='sampled_column_1'),
'sampled_feats_1': tf.placeholder(tf.float32, name='sampled_feats_1'),
'sampled_weight_2': tf.placeholder(tf.float32, name='sampled_weight_2'),
'sampled_column_2': tf.placeholder(tf.int32, name='sampled_column_2'),
'sampled_feats_2': tf.placeholder(tf.float32, name='sampled_feats_2')
}
return placeholders
# === build computation graph ===
def build(self):
# data_sampled, support_sizes = self.sample(self.batch_nodes, self.layer_infos)
support_size = 1 # [1, 8, 8*16]
support_sizes = [support_size]
for k in range(len(self.layer_infos)):
t = len(self.layer_infos) - k -1
support_size *= self.layer_infos[t].num_samples
support_sizes.append(support_size)
sample_size = [layer_info.num_samples for layer_info in self.layer_infos] # 16, 8
self.outputs, self.aggregators = self.aggregate(
self.data_sampled, self.dims, sample_size,
support_sizes, concat=self.concat, model_size=self.model_size)
# data_sampled, [self.features], self.dims, num_samples,
# support_sizes, concat=self.concat, model_size=self.model_size)
self.outputs = tf.nn.l2_normalize(self.outputs, 1)
dim_mult = 2 if self.concat else 1
self.node_pred = layers.Dense(dim_mult*self.dims[-1], self.output_dim,
dropout=self.placeholders['dropout'],
act=lambda x : x) # no non-linear activation
# TF graph management
self.node_preds = self.node_pred(self.outputs)
# self._loss()
# 不进行梯度修建
# grads_and_vars = self.optimizer.compute_gradients(self.loss)
# clipped_grads_and_vars = [(tf.clip_by_value(grad, -5.0, 5.0) if grad is not None else None, var)
# for grad, var in grads_and_vars]
# self.grad, _ = clipped_grads_and_vars[0]
# self.opt_op = self.optimizer.apply_gradients(clipped_grads_and_vars)
# self.opt_op = self.optimizer.minimize(self.loss)
self._predict()
def aggregate(self, data_sampled, dims, num_samples, support_sizes, batch_size=None,
aggregators=None, name='aggregate', concat=False, model_size="small"):
if batch_size is None:
batch_size = self.batch_size
# length: number of layers + 1
# hidden = [tf.nn.embedding_lookup(input_features, node_samples) for node_samples in samples]
feats_hidden = data_sampled[0] # 根据index取feats
weight_hidden = data_sampled[1]
column_hidden = data_sampled[2]
# feats_hidden = [tf.nn.embedding_lookup(input_features, node_samples) for node_samples in samples[0]] # 根据index取feats
# feats_hidden = [feat_samples for feat_samples in data_sampled[0]] # 根据index取feats
# weight_hidden = [weight_samples for weight_samples in data_sampled[1]]
# column_hidden = [column_samples for column_samples in data_sampled[2]]
new_agg = aggregators is None
if new_agg:
aggregators = []
# c_list = [] # 增加
for layer in range(len(num_samples)):
if new_agg:
dim_mult = 2 if concat and (layer != 0) else 1
# aggregator at current layer
if layer == len(num_samples) - 1: # 2*64, 32
aggregator = self.aggregator_cls(
dim_mult*dims[layer], dims[layer+1], act=lambda x : x, # no non-linear activation
dropout=self.placeholders['dropout'],
name=name, concat=concat, model_size=model_size)
else: # 这里aggregator.__init__() # 106 -> 64
aggregator = self.aggregator_cls(
dim_mult*dims[layer], dims[layer+1],
dropout=self.placeholders['dropout'],
name=name, concat=concat, model_size=model_size)
aggregators.append(aggregator)
else:
aggregator = aggregators[layer]
# hidden representation at current layer for all support nodes that are various hops away
next_hidden = []
# as layer increases, the number of support nodes needed decreases
for hop in range(len(num_samples) - layer):
dim_mult = 2 if concat and (layer != 0) else 1
neigh_dims = [batch_size * support_sizes[hop], # 1, 8; 1
num_samples[len(num_samples) - hop - 1], # 8, 16; 8
dim_mult*dims[layer]] # 106, 106; 2 * 64
weight_neigh_dims = [batch_size * support_sizes[hop],
num_samples[len(num_samples)- hop -1],
1]
# h = aggregator((hidden[hop],
# tf.reshape(hidden[hop + 1], neigh_dims)))
# call aggregator
# self_vecs, neigh_vecs, neigh_weight, neigh_column
h = aggregator((
feats_hidden[hop],
tf.reshape(feats_hidden[hop + 1], neigh_dims), # [1,8,106], [8, 16, 106], [1, 8, 2*64]
tf.reshape(weight_hidden[hop + 1], weight_neigh_dims),
tf.reshape(column_hidden[hop + 1], weight_neigh_dims)))
next_hidden.append(h)
feats_hidden = next_hidden
#self.hiddenOutput.append(hidden[0])
return feats_hidden[0], aggregators
# def _loss(self):
# # Weight decay loss
# for aggregator in self.aggregators:
# for var in aggregator.vars.values():
# self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
# for var in self.node_pred.vars.values():
# self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
# # classification loss
# if self.sigmoid_loss:
# self.loss += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
# logits=self.node_preds,
# labels=self.labels))
# else:
# # 变成v2
# self.loss += tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
# logits=self.node_preds,
# labels=self.labels))
# # tf.summary.scalar('loss', self.loss)
def _predict(self):
if self.sigmoid_loss:
self.preds = tf.nn.sigmoid(self.node_preds)
else:
self.preds = tf.nn.softmax(self.node_preds)
# === 以上是计算图部分 ===
def predict(self, feed_dict):
preds = self.sess.run([self.preds],
feed_dict=feed_dict)
return preds
def close_sess(self):
self.sess.close()
# def save(self, sess=None):
# if not sess:
# raise AttributeError("TensorFlow session not provided.")
# saver = tf.train.Saver(var_list=self.var_list)
# save_path = "./data/model/%s.ckpt" %(self.aggregator_type)
# saver.restore(sess, save_path)
# print("Model saved in file: %s" % save_path)
def load(self, sess=None):
if not sess:
raise AttributeError("TensorFlow session not provided.")
# saver = tf.train.Saver(reshape=True)
| # saver = tf.train.Saver(var_list=self.var_list) | random_line_split |
|
model_infer.py | # self.support_size = placeholders['support_size']
# self.features = placeholders['features']
sampled_weight = [self.placeholders['sampled_weight_0'],
self.placeholders['sampled_weight_1'],
self.placeholders['sampled_weight_2']]
sampled_column = [self.placeholders['sampled_column_0'],
self.placeholders['sampled_column_1'],
self.placeholders['sampled_column_2']]
sampled_feats = [self.placeholders['sampled_feats_0'],
self.placeholders['sampled_feats_1'],
self.placeholders['sampled_feats_2']]
self.data_sampled = [sampled_feats, sampled_weight, sampled_column]
self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
self.build()
self.var_list = tf.trainable_variables()
self.saver = tf.train.Saver(var_list=self.var_list)
self.sess = tf.Session(config=sess_config)
self.sess.run(tf.global_variables_initializer())
self.load(self.sess)
def construct_placeholders(self, num_classes, feats_dim):
# Define placeholders
# 这里的key 是供 model init 用的
# feed_dict = {placeholders: data}
placeholders = {
# 'features': tf.placeholder(tf.float32, shape=(None, feats_dim)),
# 'labels': tf.placeholder(tf.float32, shape=(None, num_classes), name='labels'),
# 'batch_nodes': tf.placeholder(tf.int32, shape=(None), name='batch_nodes'),
'batch_size': tf.placeholder(tf.int32, name='batch_size'),
'dropout': tf.placeholder_with_default(0., shape=(), name='dropout'),
'sampled_weight_0': tf.placeholder(tf.float32, name='sampled_weight_0'),
'sampled_column_0': tf.placeholder(tf.int32, name='sampled_column_0'),
'sampled_feats_0': tf.placeholder(tf.float32, name='sampled_feats_0'),
'sampled_weight_1': tf.placeholder(tf.float32, name='sampled_weight_1'),
'sampled_column_1': tf.placeholder(tf.int32, name='sampled_column_1'),
'sampled_feats_1': tf.placeholder(tf.float32, name='sampled_feats_1'),
'sampled_weight_2': tf.placeholder(tf.float32, name='sampled_weight_2'),
'sampled_column_2': tf.placeholder(tf.int32, name='sampled_column_2'),
'sampled_feats_2': tf.placeholder(tf.float32, name='sampled_feats_2')
}
return placeholders
# === build computation graph ===
def build(self):
# data_sampled, support_sizes = self.sample(self.batch_nodes, self.layer_infos)
support_size = 1 # [1, 8, 8*16]
support_sizes = [support_size]
for k in range(len(self.layer_infos)):
t = len(self.layer_infos) - k -1
support_size *= self.layer_infos[t].num_samples
support_sizes.append(support_size)
sample_size = [layer_info.num_samples for layer_info in self.layer_infos] # 16, 8
self.outputs, self.aggregators = self.aggregate(
self.data_sampled, self.dims, sample_size,
support_sizes, concat=self.concat, model_size=self.model_size)
# data_sampled, [self.features], self.dims, num_samples,
# support_sizes, concat=self.concat, model_size=self.model_size)
self.outputs = tf.nn.l2_normalize(self.outputs, 1)
dim_mult = 2 if self.concat else 1
self.node_pred = layers.Dense(dim_mult*self.dims[-1], self.output_dim,
dropout=self.placeholders['dropout'],
act=lambda x : x) # no non-linear activation
# TF graph management
self.node_preds = self.node_pred(self.outputs)
# self._loss()
# 不进行梯度修建
# grads_and_vars = self.optimizer.compute_gradients(self.loss)
# clipped_grads_and_vars = [(tf.clip_by_value(grad, -5.0, 5.0) if grad is not None else None, var)
# for grad, var in grads_and_vars]
# self.grad, _ = clipped_grads_and_vars[0]
# self.opt_op = self.optimizer.apply_gradients(clipped_grads_and_vars)
# self.opt_op = self.optimizer.minimize(self.loss)
self._predict()
def aggregate(self, data_sampled, dims, num_samples, support_sizes, batch_size=None,
aggregators=None, name='aggregate', concat=False, model_size="small"):
if batch_size is None:
batch_size = self.batch_size
# length: number of layers + 1
# hidden = [tf.nn.embedding_lookup(input_features, node_samples) for node_samples in samples]
feats_hidden = data_sampled[0] # 根据index取feats
weight_hidden = data_sampled[1]
column_hidden = data_sampled[2]
# feats_hidden = [tf.nn.embedding_lookup(input_features, node_samples) for node_samples in samples[0]] # 根据index取feats
# feats_hidden = [feat_samples for feat_samples in data_sampled[0]] # 根据index取feats
# weight_hidden = [weight_samples for weight_samples in data_sampled[1]]
# column_hidden = [column_samples for column_samples in data_sampled[2]]
new_agg = aggregators is None
if new_agg:
aggregators = []
# c_list = [] # 增加
for layer in range(len(num_samples)):
if new_agg:
dim_mult = 2 if concat and (layer != 0) else 1
# aggregator at current layer
if layer == len(num_samples) - 1: # 2*64, 32
aggregator = self.aggregator_cls(
dim_mult*dims[layer], dims[layer+1], act=lambda x : x, # no non-linear activation
dropout=self.placeholders['dropout'],
name=name, concat=concat, model_size=model_size)
else: # 这里aggregator.__init__() # 106 -> 64
aggregator = self.aggregator_cls(
dim_mult*dims[layer], dims[layer+1],
dropout=self.placeholders['dropout'],
name=name, concat=concat, model_size=model_size)
aggregators.append(aggregator)
else:
aggregator = aggregators[layer]
# hidden representation at current layer for all support nodes that are various hops away
next_hidden = []
# as layer increases, the number of support nodes needed decreases
for hop in range(len(num_samples) - layer):
dim_mult = 2 if concat and (layer != 0) else 1
neigh_dims = [batch_size * support_sizes[hop], # 1, 8; 1
num_samples[len(num_samples) - hop - 1], # 8, 16; 8
dim_mult*dims[layer]] # 106, 106; 2 * 64
weight_neigh_dims = [batch_size * support_sizes[hop],
num_samples[len(num_samples)- hop -1],
1]
# h = aggregator((hidden[hop],
# tf.reshape(hidden[hop + 1], neigh_dims)))
# call aggregator
# self_vecs, neigh_vecs, neigh_weight, neigh_column
h = aggregator((
feats_hidden[hop],
tf.reshape(feats_hidden[hop + 1], neigh_dims), # [1,8,106], [8, 16, 106], [1, 8, 2*64]
tf.reshape(weight_hidden[hop + 1], weight_neigh_dims),
tf.reshape(column_hidden[hop + 1], weight_neigh_dims)))
next_hidden.append(h)
feats_hidden = next_hidden
#self.hiddenOutput.append(hidden[0])
return feats_hidden[0], aggregators
# def _loss(self):
# # Weight decay loss
# for aggregator in self.aggregators:
# for var in aggregator.vars.values():
# self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
# for var in self.node_pred.vars.values():
# self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
# # classification loss
# if self.sigmoid_loss:
# self.loss += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
# logits=self.node_preds,
# labels=self.labels))
# else:
# # 变成v2
# self.loss += tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
# logits=self.node_preds,
# labels=self.labels))
# # tf.summary.scalar('loss', self.loss)
def _predict(self):
if self.sigmoid_loss:
self.preds = tf.nn.sigmoid(self.node_preds)
else:
self.preds = tf.nn.softmax(self.node_preds)
# === 以上是计算图部分 ===
def predict(s | elf, feed_dict):
preds = self.sess. | conditional_block |
|
model_infer.py |
sigmoid_loss = FLAGS.sigmoid
identity_dim = FLAGS.identity_dim
class SupervisedGraphsage(object):
"""Implementation of supervised GraphSAGE."""
def __init__(self, **kwargs):
# === from model.py ===
allowed_kwargs = {'name', 'logging', 'model_size'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
name = self.__class__.__name__.lower()
self.name = name
# logging = kwargs.get('logging', False)
# self.logging = logging
self.vars = {}
# self.placeholders = {}
self.layers = []
self.activations = []
self.inputs = None
self.outputs = None
self.loss = 0
self.accuracy = 0
self.optimizer = None
self.opt_op = None
# === set aggregator ===
# 增加了两个cross, cross geniepath
if aggregator_type == 'cross':
self.aggregator_cls = CrossAggregator
else:
raise Exception("Unknown aggregator: ", aggregator_type)
self.input_dim = feats_dim
self.output_dim = num_classes # 2
# self.sampler = sampler
# self.adj_info = adj
self.layer_infos = layer_infos
self.concat = concat
self.model_size = model_size
self.sigmoid_loss = sigmoid_loss
self.dims = [(self.input_dim) + identity_dim]
self.dims.extend([layer_infos[i].output_dim for i in range(len(layer_infos))])# 102, 64, 32
self.aggregator_type = aggregator_type
# === get info from placeholders ===
# get info from placeholders...
self.placeholders = self.construct_placeholders(self.input_dim, self.output_dim)
# self.labels = self.placeholders['labels']
# self.batch_nodes = placeholders["batch_nodes"]
self.batch_size = self.placeholders["batch_size"]
# self.support_size = placeholders['support_size']
# self.features = placeholders['features']
sampled_weight = [self.placeholders['sampled_weight_0'],
self.placeholders['sampled_weight_1'],
self.placeholders['sampled_weight_2']]
sampled_column = [self.placeholders['sampled_column_0'],
self.placeholders['sampled_column_1'],
self.placeholders['sampled_column_2']]
sampled_feats = [self.placeholders['sampled_feats_0'],
self.placeholders['sampled_feats_1'],
self.placeholders['sampled_feats_2']]
self.data_sampled = [sampled_feats, sampled_weight, sampled_column]
self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
self.build()
self.var_list = tf.trainable_variables()
self.saver = tf.train.Saver(var_list=self.var_list)
self.sess = tf.Session(config=sess_config)
self.sess.run(tf.global_variables_initializer())
self.load(self.sess)
def construct_placeholders(self, num_classes, feats_dim):
# Define placeholders
# 这里的key 是供 model init 用的
# feed_dict = {placeholders: data}
placeholders = {
# 'features': tf.placeholder(tf.float32, shape=(None, feats_dim)),
# 'labels': tf.placeholder(tf.float32, shape=(None, num_classes), name='labels'),
# 'batch_nodes': tf.placeholder(tf.int32, shape=(None), name='batch_nodes'),
'batch_size': tf.placeholder(tf.int32, name='batch_size'),
'dropout': tf.placeholder_with_default(0., shape=(), name='dropout'),
'sampled_weight_0': tf.placeholder(tf.float32, name='sampled_weight_0'),
'sampled_column_0': tf.placeholder(tf.int32, name='sampled_column_0'),
'sampled_feats_0': tf.placeholder(tf.float32, name='sampled_feats_0'),
'sampled_weight_1': tf.placeholder(tf.float32, name='sampled_weight_1'),
'sampled_column_1': tf.placeholder(tf.int32, name='sampled_column_1'),
'sampled_feats_1': tf.placeholder(tf.float32, name='sampled_feats_1'),
'sampled_weight_2': tf.placeholder(tf.float32, name='sampled_weight_2'),
'sampled_column_2': tf.placeholder(tf.int32, name='sampled_column_2'),
'sampled_feats_2': tf.placeholder(tf.float32, name='sampled_feats_2')
}
return placeholders
# === build computation graph ===
def build(self):
# data_sampled, support_sizes = self.sample(self.batch_nodes, self.layer_infos)
support_size = 1 # [1, 8, 8*16]
support_sizes = [support_size]
for k in range(len(self.layer_infos)):
t = len(self.layer_infos) - k -1
support_size *= self.layer_infos[t].num_samples
support_sizes.append(support_size)
sample_size = [layer_info.num_samples for layer_info in self.layer_infos] # 16, 8
self.outputs, self.aggregators = self.aggregate(
self.data_sampled, self.dims, sample_size,
support_sizes, concat=self.concat, model_size=self.model_size)
# data_sampled, [self.features], self.dims, num_samples,
# support_sizes, concat=self.concat, model_size=self.model_size)
self.outputs = tf.nn.l2_normalize(self.outputs, 1)
dim_mult = 2 if self.concat else 1
self.node_pred = layers.Dense(dim_mult*self.dims[-1], self.output_dim,
dropout=self.placeholders['dropout'],
act=lambda x : x) # no non-linear activation
# TF graph management
self.node_preds = self.node_pred(self.outputs)
# self._loss()
# 不进行梯度修建
# grads_and_vars = self.optimizer.compute_gradients(self.loss)
# clipped_grads_and_vars = [(tf.clip_by_value(grad, -5.0, 5.0) if grad is not None else None, var)
# for grad, var in grads_and_vars]
# self.grad, _ = clipped_grads_and_vars[0]
# self.opt_op = self.optimizer.apply_gradients(clipped_grads_and_vars)
# self.opt_op = self.optimizer.minimize(self.loss)
self._predict()
def aggregate(self, data_sampled, dims, num_samples, support | atch_size=None,
aggregators=None, name='aggregate', concat=False, model_size="small"):
if batch_size is None:
batch_size = self.batch_size
# length: number of layers + 1
# hidden = [tf.nn.embedding_lookup(input_features, node_samples) for node_samples in samples]
feats_hidden = data_sampled[0] # 根据index取feats
weight_hidden = data_sampled[1]
column_hidden = data_sampled[2]
# feats_hidden = [tf.nn.embedding_lookup(input_features, node_samples) for node_samples in samples[0]] # 根据index取feats
# feats_hidden = [feat_samples for feat_samples in data_sampled[0]] # 根据index取feats
# weight_hidden = [weight_samples for weight_samples in data_sampled[1]]
# column_hidden = [column_samples for column_samples in data_sampled[2]]
new_agg = aggregators is None
if new_agg:
aggregators = []
# c_list = [] # 增加
for layer in range(len(num_samples)):
if new_agg:
dim_mult = 2 if concat and (layer != 0) else 1
# aggregator at current layer
if layer == len(num_samples) - 1: # 2*64, 32
aggregator = self.aggregator_cls(
dim_mult*dims[layer], dims[layer+1], act=lambda x : x, # no non-linear activation
dropout=self.placeholders['dropout'],
name=name, concat=concat, model_size=model_size)
else: # 这里aggregator.__init__() # 106 -> 64
aggregator = self.aggregator_cls(
dim_mult*dims[layer], dims[layer+1],
dropout=self.placeholders['dropout'],
name=name, concat=concat, model_size=model_size)
aggregators.append(aggregator)
else:
aggregator = aggregators[layer]
# hidden representation at current layer for all support nodes that are various hops away
next_hidden = []
# as layer increases, the number of support nodes needed decreases
for hop in range(len(num_samples) - layer):
dim_mult = 2 if concat and (layer != 0) else 1
neigh_dims = [batch_size * support_sizes[hop], # 1, 8; 1
num_samples[len(num_samples) - hop - 1], # 8, 16; 8
dim_mult*dims[layer]] # 106, 106; 2 * 64
weight_neigh_dims = [batch_size * support_sizes[hop],
num | _sizes, b | identifier_name |
plot2DlimitsAll.py | 2HDM}"
if model=="BARY": txt1 = "#bf{Baryonic Z'}"
txt1 += "#bf{, Z' #rightarrow DM + h"
if which=='gg': txt1 += "(#gamma#gamma)}"
if which=='tt': txt1 += "(#tau#tau)} "
if which=='combo': txt1 += "(#gamma#gamma + #tau#tau)}"
if model=="2HDM": txt2 = "#bf{Dirac DM, m_{DM} = 100 GeV}"
if model=="BARY": txt2 = "#bf{Dirac DM, g_{q} = 0.25, g_{DM} = 1.0 }"
if model=="2HDM": txt3 = "#bf{g_{Z'} = 0.8, g_{DM} = 1.0}"
if model=="BARY": txt3 = ""
txt = ROOT.TPaveText(x1,y1+0.15,x2,y2,"NDC")
txt.AddText(txt1)
txt.AddText(txt2)
txt.AddText(txt3)
txt.SetTextAlign(12)
txt.SetTextSize(0.04)
txt.Draw("SAME")
# --- legend
if model=="2HDM": leg = ROOT.TLegend(x1,y1,x2,y1+0.15)
if model=="BARY": leg = ROOT.TLegend(x1,y1+0.05,x2,y1+0.2)
leg.SetBorderSize(0)
leg.SetTextFont(42)
leg.SetTextSize(0.040)
leg.AddEntry(limitPlotObs,"Observed 95% CL","L")
leg.AddEntry(limitPlot,"Expected 95% CL","L")
leg.AddEntry(limitPlotUp,"#pm 1 s.d.","L")
leg.Draw()
canv.cd()
canv.Update()
CMS_lumi(canv,4,0)
canv.RedrawAxis()
canv.Update()
# --- save
outname = outdir+'contours_'
if do90: outname += '90CL'
else: outname += '95CL'
outname += '_'+model+'_'+which+'.root'
outfile = ROOT.TFile(outname,'RECREATE')
outfile.cd()
limitPlot.Write()
limitPlotObs.Write()
if do90:
canv.Print(outdir+'limits2D_'+model+'_'+which+'_90CL.pdf')
canv.Print(outdir+'limits2D_'+model+'_'+which+'_90CL.png')
else:
canv.Print(outdir+'limits2D_'+model+'_'+which+'.pdf')
canv.Print(outdir+'limits2D_'+model+'_'+which+'.png')
def fillAvg(limitPlot,A,Z,doL,doH,doR):
# --- ordering for each option
irange = range(1,limitPlot.GetNbinsY()+1)
jrange = range(1,limitPlot.GetNbinsX()+1)
if doL: jrange = list(reversed(range(1,limitPlot.GetNbinsX()+1)))
if doH: irange = list(reversed(range(1,limitPlot.GetNbinsY()+1)))
# --- average over 4 adjacent bins
for i in irange:
for j in jrange:
aVal = A[i-1]
zVal = Z[j-1]
binVal = str(limitPlot.GetBinContent(j,i))
# --- only if bin is 0 do averaging
if binVal == "0.0" and ((doL and 2*float(aVal) < float(zVal)) or (doH and 2*float(aVal) > float(zVal)) or (doR)):
avg = 0.0
div = 0.0
back = limitPlot.GetBinContent(j-1,i)
forw = limitPlot.GetBinContent(j+1,i)
down = limitPlot.GetBinContent(j,i-1)
abov = limitPlot.GetBinContent(j,i+1)
if back != 0.0 and ((doL and back < 50.) or (doH and back > 50.) or (doR)):
avg += back
div += 1
if forw != 0.0 and ((doL and forw < 50.) or (doH and forw > 50.) or (doR)):
avg += forw
div += 1
if down != 0.0 and ((doL and down < 50.) or (doH and down > 50.) or (doR)):
avg += down
div += 1
if abov != 0.0 and ((doL and abov < 50.) or (doH and abov > 50.) or (doR)):
avg += abov
div += 1
if div != 0:
avg = avg/div
limitPlot.SetBinContent(j,i,avg)
def InvertPalette():
# --- Function to make inverted kBird palette
alpha=1
stops = array('d', ([ 0.0000, 0.1250, 0.2500, 0.3750, 0.5000, 0.6250, 0.7500, 0.8750, 1.0000]) )
red = array('d', ([ 0.2082, 0.0592, 0.0780, 0.0232, 0.1802, 0.5301, 0.8186, 0.9956, 0.9764]) )
green = array('d', ([ 0.1664, 0.3599, 0.5041, 0.6419, 0.7178, 0.7492, 0.7328, 0.7862, 0.9832]) )
blue = array('d', ([ 0.5293, 0.8684, 0.8385, 0.7914, 0.6425, 0.4662, 0.3499, 0.1968, 0.0539]) )
invred = numpy.asarray(list(reversed(red)))
invgreen = numpy.asarray(list(reversed(green)))
invblue = numpy.asarray(list(reversed(blue)))
ROOT.TColor.CreateGradientColorTable(9, stops, invred, invgreen, invblue, 255, alpha);
def scaleXS(model,Z,A):
# --- Function to scale point by 1/xsec
if model=="2HDM": xsRef = open("crosssectionZp2HDM.txt")
if model=="BARY": xsRef = open("crosssectionZpBaryonic.txt")
returnString = "99999"
for line in xsRef:
if (str(line.split(' ')[0]) == str(Z) and str(line.split(' ')[1]) == str(A)):
returnString = str(1./float(line.split(' ')[2]))
print returnString
return returnString
def init():
# options
parser = OptionParser("usage: %prog [options]")
parser.add_option("-O",action="store",dest="outdir",type="string",
default="",help="Output directory [default = %default]"),
parser.add_option("-m",action="store",dest="model",type="string",
default="",help="Which model (2HDM or BARY)"),
parser.add_option("-w",action="store",dest="which",type="string",
default="",help="Which channel (gg, tt, combo)"),
parser.add_option("--dowgt",action="store_true",dest="dowgt",
default=False,help="Weight by 1/xsec (if not already done) [default = %default]"),
parser.add_option("--do90",action="store_true",dest="do90",
default=False,help="Store 90%CL root file [default = %default]"),
parser.add_option("--dosmooth",action="store_true",dest="dosmth",
default=False,help="Smooth TH2 after filling and avg. [default = %default]"),
parser.add_option("--smth",action="store",dest="smthfnc",type="string",
default="k5a",help="Smoothing function to apply [default = %default]"),
(options, args) = parser.parse_args()
if options.model!="2HDM" and options.model!="BARY":
print "Model "+options.model+" is NOT a valid option."
sys.exit()
if options.which!="gg" and options.which!="tt" and options.which!="combo":
print "Channel "+options.which+" is NOT a valid option."
sys.exit()
| random_line_split |
||
plot2DlimitsAll.py | Text("")
white.SetFillColor(0)
white.Draw("SAME")
# --- latex
if model=="2HDM": txt1 = "#bf{Z'-2HDM}"
if model=="BARY": txt1 = "#bf{Baryonic Z'}"
txt1 += "#bf{, Z' #rightarrow DM + h"
if which=='gg': txt1 += "(#gamma#gamma)}"
if which=='tt': txt1 += "(#tau#tau)} "
if which=='combo': txt1 += "(#gamma#gamma + #tau#tau)}"
if model=="2HDM": txt2 = "#bf{Dirac DM, m_{DM} = 100 GeV}"
if model=="BARY": txt2 = "#bf{Dirac DM, g_{q} = 0.25, g_{DM} = 1.0 }"
if model=="2HDM": txt3 = "#bf{g_{Z'} = 0.8, g_{DM} = 1.0}"
if model=="BARY": txt3 = ""
txt = ROOT.TPaveText(x1,y1+0.15,x2,y2,"NDC")
txt.AddText(txt1)
txt.AddText(txt2)
txt.AddText(txt3)
txt.SetTextAlign(12)
txt.SetTextSize(0.04)
txt.Draw("SAME")
# --- legend
if model=="2HDM": leg = ROOT.TLegend(x1,y1,x2,y1+0.15)
if model=="BARY": leg = ROOT.TLegend(x1,y1+0.05,x2,y1+0.2)
leg.SetBorderSize(0)
leg.SetTextFont(42)
leg.SetTextSize(0.040)
leg.AddEntry(limitPlotObs,"Observed 95% CL","L")
leg.AddEntry(limitPlot,"Expected 95% CL","L")
leg.AddEntry(limitPlotUp,"#pm 1 s.d.","L")
leg.Draw()
canv.cd()
canv.Update()
CMS_lumi(canv,4,0)
canv.RedrawAxis()
canv.Update()
# --- save
outname = outdir+'contours_'
if do90: outname += '90CL'
else: outname += '95CL'
outname += '_'+model+'_'+which+'.root'
outfile = ROOT.TFile(outname,'RECREATE')
outfile.cd()
limitPlot.Write()
limitPlotObs.Write()
if do90:
canv.Print(outdir+'limits2D_'+model+'_'+which+'_90CL.pdf')
canv.Print(outdir+'limits2D_'+model+'_'+which+'_90CL.png')
else:
canv.Print(outdir+'limits2D_'+model+'_'+which+'.pdf')
canv.Print(outdir+'limits2D_'+model+'_'+which+'.png')
def fillAvg(limitPlot,A,Z,doL,doH,doR):
# --- ordering for each option
irange = range(1,limitPlot.GetNbinsY()+1)
jrange = range(1,limitPlot.GetNbinsX()+1)
if doL: jrange = list(reversed(range(1,limitPlot.GetNbinsX()+1)))
if doH: irange = list(reversed(range(1,limitPlot.GetNbinsY()+1)))
# --- average over 4 adjacent bins
for i in irange:
for j in jrange:
aVal = A[i-1]
zVal = Z[j-1]
binVal = str(limitPlot.GetBinContent(j,i))
# --- only if bin is 0 do averaging
if binVal == "0.0" and ((doL and 2*float(aVal) < float(zVal)) or (doH and 2*float(aVal) > float(zVal)) or (doR)):
avg = 0.0
div = 0.0
back = limitPlot.GetBinContent(j-1,i)
forw = limitPlot.GetBinContent(j+1,i)
down = limitPlot.GetBinContent(j,i-1)
abov = limitPlot.GetBinContent(j,i+1)
if back != 0.0 and ((doL and back < 50.) or (doH and back > 50.) or (doR)):
avg += back
div += 1
if forw != 0.0 and ((doL and forw < 50.) or (doH and forw > 50.) or (doR)):
avg += forw
div += 1
if down != 0.0 and ((doL and down < 50.) or (doH and down > 50.) or (doR)):
avg += down
div += 1
if abov != 0.0 and ((doL and abov < 50.) or (doH and abov > 50.) or (doR)):
avg += abov
div += 1
if div != 0:
avg = avg/div
limitPlot.SetBinContent(j,i,avg)
def InvertPalette():
# --- Function to make inverted kBird palette
alpha=1
stops = array('d', ([ 0.0000, 0.1250, 0.2500, 0.3750, 0.5000, 0.6250, 0.7500, 0.8750, 1.0000]) )
red = array('d', ([ 0.2082, 0.0592, 0.0780, 0.0232, 0.1802, 0.5301, 0.8186, 0.9956, 0.9764]) )
green = array('d', ([ 0.1664, 0.3599, 0.5041, 0.6419, 0.7178, 0.7492, 0.7328, 0.7862, 0.9832]) )
blue = array('d', ([ 0.5293, 0.8684, 0.8385, 0.7914, 0.6425, 0.4662, 0.3499, 0.1968, 0.0539]) )
invred = numpy.asarray(list(reversed(red)))
invgreen = numpy.asarray(list(reversed(green)))
invblue = numpy.asarray(list(reversed(blue)))
ROOT.TColor.CreateGradientColorTable(9, stops, invred, invgreen, invblue, 255, alpha);
def scaleXS(model,Z,A):
# --- Function to scale point by 1/xsec
if model=="2HDM": xsRef = open("crosssectionZp2HDM.txt")
if model=="BARY": xsRef = open("crosssectionZpBaryonic.txt")
returnString = "99999"
for line in xsRef:
if (str(line.split(' ')[0]) == str(Z) and str(line.split(' ')[1]) == str(A)):
returnString = str(1./float(line.split(' ')[2]))
print returnString
return returnString
def init():
# options
| parser = OptionParser("usage: %prog [options]")
parser.add_option("-O",action="store",dest="outdir",type="string",
default="",help="Output directory [default = %default]"),
parser.add_option("-m",action="store",dest="model",type="string",
default="",help="Which model (2HDM or BARY)"),
parser.add_option("-w",action="store",dest="which",type="string",
default="",help="Which channel (gg, tt, combo)"),
parser.add_option("--dowgt",action="store_true",dest="dowgt",
default=False,help="Weight by 1/xsec (if not already done) [default = %default]"),
parser.add_option("--do90",action="store_true",dest="do90",
default=False,help="Store 90%CL root file [default = %default]"),
parser.add_option("--dosmooth",action="store_true",dest="dosmth",
default=False,help="Smooth TH2 after filling and avg. [default = %default]"),
parser.add_option("--smth",action="store",dest="smthfnc",type="string",
default="k5a",help="Smoothing function to apply [default = %default]"),
(options, args) = parser.parse_args()
if options.model!="2HDM" and options.model!="BARY":
print "Model "+options.model+" is NOT a valid option."
sys.exit() | identifier_body |
|
plot2DlimitsAll.py | (opts):
# --- read in options
model = opts.model
which = opts.which
outdir = opts.outdir
do90 = opts.do90
dowgt = opts.dowgt
dosmth = opts.dosmth
smthfnc = opts.smthfnc
#if dosmth: addtxt = '_smth'
# --- read in files
indir = '/eos/cms/store/group/phys_exotica/MonoHgg/MonoH-COMBO-2016/'+model+'_jsons/'
if dowgt: wfile = ''
else: wfile = '_weighted'
if do90: indir += which+'_'+model+wfile+'_results_90CL/'
else: indir += which+'_'+model+wfile+'_results/'
# --- options for plot averaging
doFillAvgLow = True # do averaging below mMed = 2*mDM line
doFillAvgHigh = True # do averaging above mMed = 2*mDM line
if model=="2HDM": doFillAvgLow = False
if model=="2HDM": doFillAvgHigh = False
doFillAvgRest = True # do averaging at line or average normally
doFillAvgAll = True # do averaging for all plots not just observed
# --- setup general style
ROOT.gROOT.SetBatch(ROOT.kTRUE)
ROOT.gStyle.SetOptStat(0)
plot.ModTDRStyle()
canv = ROOT.TCanvas()
canv.SetLogz()
canv.SetTicks()
canv.SetRightMargin(0.16) # allow enough space for z axis label
canv.cd()
# --- setup palette
ROOT.gStyle.SetPalette(57) # palette normal
InvertPalette() # palette inverted
ROOT.gStyle.SetNumberContours(255)
A=[]; Z=[]
# --- mass points
if model=="2HDM":
A=[300,325,350,375,400,425,450,475,500,525,550,575,600,625,650,675]
Z=[450,500,550,600,650,700,750,800,850,900,950,1000,1050,1100,1150,1200,1250,1300,1350,1400,1450,1500,1550,1600,1650,1700,1750,1800,1850,1900,1950]
if model=="BARY":
A=[1,35,100,125,150,175,200,225,250,275,300,325,350,375,400,425,450,475,500,525,550,575,600,625,650,675,700,725,750,775,800,825,850,875,900,925,950,975,1000]
Z=[10,50,100,200,250,300,350,400,450,500,550,600,675,750,800,850,900,950,1000,1100,1200,1300,1400,1500,1600,1700,1800,1900,2000]
if model=="BARY" and which=='combo':
A=[1,35,100,125,150,175,200,225,250,275,300,325,350,375,400,425,450,475,500,525,550,575,600,625,650,675,700,725,750,775,800,825,850,900,925,950,975,1000]
Z=[10,50,100,200,250,300,350,400,450,500,550,600,650,700,900,950,1000,1100,1200,1300,1400,1500,1600,1700,1800,1900,2000]
# --- binning for BARY model
# Y axis
BinningA = [0.5,1.5]
BinAAxis = [1.0,47.5]
for i in range(1, len(A)-1):
BinningA.append( (A[i] + A[i+1])/2.0 )
BinAAxis.append( (A[i] + A[i+1])/2.0 )
BinningA.append( (A[-1] + A[-1] - ((A[-1] + A[-2])/2.0)) )
BinAAxis.append( (A[-1] + A[-1] - ((A[-1] + A[-2])/2.0)) )
# X axis
BinningZ = [9,11]
BinZAxis = [10,75]
for i in range(1, len(Z)-1):
BinningZ.append( (Z[i] + Z[i+1])/2.0 )
BinZAxis.append( (Z[i] + Z[i+1])/2.0 )
BinningZ.append( (Z[-1] + Z[-1] - ((Z[-1] + Z[-2])/2.0)) )
BinZAxis.append( (Z[-1] + Z[-1] - ((Z[-1] + Z[-2])/2.0)) )
# --- setup histograms (different models have different binning)
if model=="2HDM":
limitPlotAxis = ROOT.TH2F("lplotAxis", "lplotAxis", len(Z), Z[0], Z[-1]+50, len(A), A[0], A[-1]+25)
limitPlot = ROOT.TH2F("lplot", "lplot", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotObs = ROOT.TH2F("lplotObs", "lplotObs", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotUp = ROOT.TH2F("lplotU", "lplotU", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotDown = ROOT.TH2F("lplotDown", "lplotDown", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotUp2 = ROOT.TH2F("lplotU2", "lplotU2", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotDown2 = ROOT.TH2F("lplotDown2", "lplotDown2", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
if model=="BARY": # variable binning
limitPlotAxis = ROOT.TH2F("lplotAxis", "lplotAxis", len(Z)-1, array('d',BinZAxis), len(A)-1, array('d',BinAAxis))
limitPlot = ROOT.TH2F("lplot", | run | identifier_name |
|
plot2DlimitsAll.py |
BinningA = [0.5,1.5]
BinAAxis = [1.0,47.5]
for i in range(1, len(A)-1):
BinningA.append( (A[i] + A[i+1])/2.0 )
BinAAxis.append( (A[i] + A[i+1])/2.0 )
BinningA.append( (A[-1] + A[-1] - ((A[-1] + A[-2])/2.0)) )
BinAAxis.append( (A[-1] + A[-1] - ((A[-1] + A[-2])/2.0)) )
# X axis
BinningZ = [9,11]
BinZAxis = [10,75]
for i in range(1, len(Z)-1):
BinningZ.append( (Z[i] + Z[i+1])/2.0 )
BinZAxis.append( (Z[i] + Z[i+1])/2.0 )
BinningZ.append( (Z[-1] + Z[-1] - ((Z[-1] + Z[-2])/2.0)) )
BinZAxis.append( (Z[-1] + Z[-1] - ((Z[-1] + Z[-2])/2.0)) )
# --- setup histograms (different models have different binning)
if model=="2HDM":
limitPlotAxis = ROOT.TH2F("lplotAxis", "lplotAxis", len(Z), Z[0], Z[-1]+50, len(A), A[0], A[-1]+25)
limitPlot = ROOT.TH2F("lplot", "lplot", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotObs = ROOT.TH2F("lplotObs", "lplotObs", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotUp = ROOT.TH2F("lplotU", "lplotU", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotDown = ROOT.TH2F("lplotDown", "lplotDown", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotUp2 = ROOT.TH2F("lplotU2", "lplotU2", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotDown2 = ROOT.TH2F("lplotDown2", "lplotDown2", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
if model=="BARY": # variable binning
limitPlotAxis = ROOT.TH2F("lplotAxis", "lplotAxis", len(Z)-1, array('d',BinZAxis), len(A)-1, array('d',BinAAxis))
limitPlot = ROOT.TH2F("lplot", "lplot", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
limitPlotObs = ROOT.TH2F("lplotObs", "lplotObs", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
limitPlotUp = ROOT.TH2F("lplotU", "lplotU", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
limitPlotDown = ROOT.TH2F("lplotDown", "lplotDown", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
limitPlotUp2 = ROOT.TH2F("lplotU2", "lplotU2", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
limitPlotDown2 = ROOT.TH2F("lplotDown2", "lplotDown2", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
# --- read in json files
for a in A:
|
# --- average plots to make smooth contours
fillAvg(limitPlotObs, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlotObs, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlotObs, A, Z, False, False, doFillAvgRest)
if doFillAvgAll:
fillAvg(limitPlot, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlotUp, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlotDown, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlotUp2, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlotDown2, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlot, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlotUp, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlotDown, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlotUp2, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlotDown2, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlot, A, Z, False, False, doFillAvgRest)
fillAvg(limitPlotUp, A, Z, False, False, doFillAvgRest)
fillAvg(limitPlotDown, A, Z, False, False, doFillAvgRest)
fillAvg(limitPlotUp2, A, Z, False, False, doFillAvgRest)
fillAvg(limitPlotDown2, A, Z, False, False, doFillAvgRest)
# --- axis labels
limitPlotAxis.GetXaxis().SetTitle("m_{Z'} [GeV]")
limitPlotObs.GetZaxis().SetTitle("#sigma_{95% CL}/#sigma_{th}")
if model=="2HDM": limitPlotAxis.GetYaxis().SetTitle("m_{A} [GeV]")
if model=="BARY": limitPlotAxis.GetYaxis().SetTitle("m_{DM} [GeV]")
# --- clone obs to get contour
limitPlotObsCopy = limitPlotObs.Clone()
# --- set up min and max of z axis
limitPlotObs.SetMaximum(100)
limitPlotObs.SetMinimum(0.3)
# --- set range of x and y axis
if | for z in Z:
data = {}
filename = indir+'Zprime'+str(z)+'A'+str(a)+'.json'
if which=='gg' and model=='BARY': # BARY gg ONLY has DM instead of A in filename
filename = indir+'Zprime'+str(z)+'DM'+str(a)+'.json'
scale = 1.
if dowgt: scale = scaleXS(model,z,a)
if os.path.isfile(filename) and scale != "99999":
with open(filename) as jsonfile:
data = json.load(jsonfile)
for key in data: # fill plots from json
limitPlot.SetBinContent(limitPlot.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'exp0'])
limitPlotUp.SetBinContent(limitPlotUp.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'exp+1'])
limitPlotDown.SetBinContent(limitPlotDown.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'exp-1'])
limitPlotUp2.SetBinContent(limitPlotUp2.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'exp+2'])
limitPlotDown2.SetBinContent(limitPlotDown2.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'exp-2'])
limitPlotObs.SetBinContent(limitPlotObs.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'obs']) | conditional_block |
reidtools.py | , optional): resized image width. Default is 128.
height (int, optional): resized image height. Default is 256.
save_dir (str): directory to save output images.
topk (int, optional): denoting top-k images in the rank list to be visualized.
Default is 10.
"""
num_q, num_g = distmat.shape
mkdir_if_missing(save_dir)
print('# query: {}\n# gallery {}'.format(num_q, num_g))
print('Visualizing top-{} ranks ...'.format(topk))
query, gallery = dataset
assert num_q == len(query)
assert num_g == len(gallery)
indices = np.argsort(distmat, axis=1)
def | (src, dst, rank, prefix, matched=False):
"""
Args:
src: image path or tuple (for vidreid)
dst: target directory
rank: int, denoting ranked position, starting from 1
prefix: string
matched: bool
"""
if isinstance(src, tuple) or isinstance(src, list):
if prefix == 'gallery':
suffix = 'TRUE' if matched else 'FALSE'
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3)) + '_' + suffix
else:
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3))
mkdir_if_missing(dst)
for img_path in src:
shutil.copy(img_path, dst)
else:
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3) + '_name_' + osp.basename(src))
shutil.copy(src, dst)
for q_idx in range(num_q):
item = query[q_idx]
qimg_path, qpid, qcamid = item[:3]
# qsegment_path = item[6]
qpid, qcamid = int(qpid), int(qcamid)
num_cols = topk + 1
# grid_img = 255 * np.ones((2*height+GRID_SPACING, num_cols*width+(topk-1)*GRID_SPACING+QUERY_EXTRA_SPACING, 3), dtype=np.uint8)
grid_img = 255 * np.ones((height, num_cols*width+(topk-1)*GRID_SPACING+QUERY_EXTRA_SPACING, 3), dtype=np.uint8)
idx_str = str(qpid) + '\n'
if data_type == 'image':
qimg = cv2.imread(qimg_path)
qimg = Image.fromarray(cv2.cvtColor(qimg, cv2.COLOR_BGR2RGB))
qimg = cv2.cvtColor(np.asarray(qimg), cv2.COLOR_RGB2BGR)
qimg = cv2.resize(qimg, (width, height))
qimg = cv2.copyMakeBorder(qimg, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=(0, 0, 0))
qimg = cv2.resize(qimg, (width, height)) # resize twice to ensure that the border width is consistent across images
# qsegment = cv2.imread(qsegment_path)
# qsegment = Image.fromarray(cv2.cvtColor(qsegment, cv2.COLOR_BGR2RGB))
# qsegment = cv2.cvtColor(np.asarray(qsegment), cv2.COLOR_RGB2BGR)
#
# qsegment = cv2.resize(qsegment, (width, height))
# qsegment = cv2.copyMakeBorder(qsegment, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=(0, 0, 0))
# qsegment = cv2.resize(qsegment, (
# width, height)) # resize twice to ensure that the border width is consistent across images
grid_img[:height, :width, :] = qimg
# grid_img[height+GRID_SPACING:, :width, :] = qsegment
else:
qdir = osp.join(save_dir, osp.basename(osp.splitext(qimg_path)[0]))
mkdir_if_missing(qdir)
_cp_img_to(qimg_path, qdir, rank=0, prefix='query')
rank_idx = 1
for g_idx in indices[q_idx, :]:
item = gallery[g_idx]
gimg_path, gpid, gcamid = item[:3]
# gsegment_path = item[6]
gpid, gcamid = int(gpid), int(gcamid)
invalid = (qpid == gpid) & (qcamid == gcamid)
idx_str = idx_str + str(gpid) + ' '
if not invalid:
matched = gpid == qpid
if data_type == 'image':
border_color = GREEN if matched else RED
gimg = cv2.imread(gimg_path)
gimg = Image.fromarray(cv2.cvtColor(gimg, cv2.COLOR_BGR2RGB))
gimg = cv2.cvtColor(np.asarray(gimg), cv2.COLOR_RGB2BGR)
gimg = cv2.resize(gimg, (width, height))
gimg = cv2.copyMakeBorder(gimg, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=border_color)
# gimg = cv2.copyMakeBorder(gimg, BW, BW, BW, BW, 1, value=border_color)
gimg = cv2.resize(gimg, (width, height))
# gsegment = cv2.imread(gsegment_path)
# gsegment = Image.fromarray(cv2.cvtColor(gsegment, cv2.COLOR_BGR2RGB))
# gsegment = cv2.cvtColor(np.asarray(gsegment), cv2.COLOR_RGB2BGR)
#
# gsegment = cv2.resize(gsegment, (width, height))
# gsegment = cv2.copyMakeBorder(gsegment, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=border_color)
# gsegment = cv2.resize(gsegment, (width, height))
start = rank_idx * width + (rank_idx - 1) * GRID_SPACING + QUERY_EXTRA_SPACING
end = (rank_idx + 1) * width + (rank_idx - 1) * GRID_SPACING + QUERY_EXTRA_SPACING
grid_img[:height, start:end, :] = gimg
# grid_img[height+GRID_SPACING:, start:end, :] = gsegment
else:
_cp_img_to(gimg_path, qdir, rank=rank_idx, prefix='gallery', matched=matched)
rank_idx += 1
if rank_idx > topk:
break
# if rank_idx > topk-1:
# break
relpath = qimg_path.split('/rgb/')[-1]
imname = osp.basename(osp.splitext(relpath)[0])
dirname = osp.dirname(relpath)
dir_path = osp.join(save_dir, dirname)
if not osp.exists(dir_path):
os.makedirs(dir_path)
cv2.imwrite(osp.join(dir_path, imname + '.jpg'), grid_img)
with open(osp.join(dir_path, imname + '.txt'), 'w') as fp:
fp.write(idx_str)
# imname = osp.basename(osp.splitext(qimg_path)[0])
# cv2.imwrite(osp.join(save_dir, imname+'.jpg'), grid_img)
if (q_idx + 1) % 100 == 0:
print('- done {}/{}'.format(q_idx + 1, num_q))
print('Done. Images have been saved to "{}" ...'.format(save_dir))
@torch.no_grad()
def visactmap(testloader, model, save_dir, width, height, print_freq, use_gpu, **kwargs):
"""Visualizes CNN activation maps to see where the CNN focuses on to extract features.
This function takes as input the query images of target datasets
Reference:
- Zagoruyko and Komodakis. Paying more attention to attention: Improving the
performance of convolutional neural networks via attention transfer. ICLR, 2017
- Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.
"""
model.eval()
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
# original images and activation maps are saved individually
actmap_dir = osp.join(save_dir, 'actmap_layer2')
mkdir_if_missing(actmap_dir)
print('Visualizing activation maps ...')
for batch_idx, data in enumerate(testloader):
# imgs, paths = data[0], data[3]
# imgs, paths = data[0], data[3]
imgs, contours, paths = data[0], data[1], data[4]
if use_gpu:
imgs = imgs.cuda()
contours = contours.cuda()
# forward to get convolutional feature maps
try:
# outputs = model(segments, imgs, return_featuremaps=True)
outputs = model(imgs, contours, return_featuremaps=True)
except TypeError:
raise TypeError('forward() got unexpected keyword argument "return_featuremaps". ' \
'Please add return_featuremaps as an input argument to forward(). When ' \
'return_featuremaps=True, return feature maps only.')
| _cp_img_to | identifier_name |
reidtools.py | , optional): resized image width. Default is 128.
height (int, optional): resized image height. Default is 256.
save_dir (str): directory to save output images.
topk (int, optional): denoting top-k images in the rank list to be visualized.
Default is 10.
"""
num_q, num_g = distmat.shape
mkdir_if_missing(save_dir)
print('# query: {}\n# gallery {}'.format(num_q, num_g))
print('Visualizing top-{} ranks ...'.format(topk))
query, gallery = dataset
assert num_q == len(query)
assert num_g == len(gallery)
indices = np.argsort(distmat, axis=1)
def _cp_img_to(src, dst, rank, prefix, matched=False):
|
for q_idx in range(num_q):
item = query[q_idx]
qimg_path, qpid, qcamid = item[:3]
# qsegment_path = item[6]
qpid, qcamid = int(qpid), int(qcamid)
num_cols = topk + 1
# grid_img = 255 * np.ones((2*height+GRID_SPACING, num_cols*width+(topk-1)*GRID_SPACING+QUERY_EXTRA_SPACING, 3), dtype=np.uint8)
grid_img = 255 * np.ones((height, num_cols*width+(topk-1)*GRID_SPACING+QUERY_EXTRA_SPACING, 3), dtype=np.uint8)
idx_str = str(qpid) + '\n'
if data_type == 'image':
qimg = cv2.imread(qimg_path)
qimg = Image.fromarray(cv2.cvtColor(qimg, cv2.COLOR_BGR2RGB))
qimg = cv2.cvtColor(np.asarray(qimg), cv2.COLOR_RGB2BGR)
qimg = cv2.resize(qimg, (width, height))
qimg = cv2.copyMakeBorder(qimg, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=(0, 0, 0))
qimg = cv2.resize(qimg, (width, height)) # resize twice to ensure that the border width is consistent across images
# qsegment = cv2.imread(qsegment_path)
# qsegment = Image.fromarray(cv2.cvtColor(qsegment, cv2.COLOR_BGR2RGB))
# qsegment = cv2.cvtColor(np.asarray(qsegment), cv2.COLOR_RGB2BGR)
#
# qsegment = cv2.resize(qsegment, (width, height))
# qsegment = cv2.copyMakeBorder(qsegment, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=(0, 0, 0))
# qsegment = cv2.resize(qsegment, (
# width, height)) # resize twice to ensure that the border width is consistent across images
grid_img[:height, :width, :] = qimg
# grid_img[height+GRID_SPACING:, :width, :] = qsegment
else:
qdir = osp.join(save_dir, osp.basename(osp.splitext(qimg_path)[0]))
mkdir_if_missing(qdir)
_cp_img_to(qimg_path, qdir, rank=0, prefix='query')
rank_idx = 1
for g_idx in indices[q_idx, :]:
item = gallery[g_idx]
gimg_path, gpid, gcamid = item[:3]
# gsegment_path = item[6]
gpid, gcamid = int(gpid), int(gcamid)
invalid = (qpid == gpid) & (qcamid == gcamid)
idx_str = idx_str + str(gpid) + ' '
if not invalid:
matched = gpid == qpid
if data_type == 'image':
border_color = GREEN if matched else RED
gimg = cv2.imread(gimg_path)
gimg = Image.fromarray(cv2.cvtColor(gimg, cv2.COLOR_BGR2RGB))
gimg = cv2.cvtColor(np.asarray(gimg), cv2.COLOR_RGB2BGR)
gimg = cv2.resize(gimg, (width, height))
gimg = cv2.copyMakeBorder(gimg, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=border_color)
# gimg = cv2.copyMakeBorder(gimg, BW, BW, BW, BW, 1, value=border_color)
gimg = cv2.resize(gimg, (width, height))
# gsegment = cv2.imread(gsegment_path)
# gsegment = Image.fromarray(cv2.cvtColor(gsegment, cv2.COLOR_BGR2RGB))
# gsegment = cv2.cvtColor(np.asarray(gsegment), cv2.COLOR_RGB2BGR)
#
# gsegment = cv2.resize(gsegment, (width, height))
# gsegment = cv2.copyMakeBorder(gsegment, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=border_color)
# gsegment = cv2.resize(gsegment, (width, height))
start = rank_idx * width + (rank_idx - 1) * GRID_SPACING + QUERY_EXTRA_SPACING
end = (rank_idx + 1) * width + (rank_idx - 1) * GRID_SPACING + QUERY_EXTRA_SPACING
grid_img[:height, start:end, :] = gimg
# grid_img[height+GRID_SPACING:, start:end, :] = gsegment
else:
_cp_img_to(gimg_path, qdir, rank=rank_idx, prefix='gallery', matched=matched)
rank_idx += 1
if rank_idx > topk:
break
# if rank_idx > topk-1:
# break
relpath = qimg_path.split('/rgb/')[-1]
imname = osp.basename(osp.splitext(relpath)[0])
dirname = osp.dirname(relpath)
dir_path = osp.join(save_dir, dirname)
if not osp.exists(dir_path):
os.makedirs(dir_path)
cv2.imwrite(osp.join(dir_path, imname + '.jpg'), grid_img)
with open(osp.join(dir_path, imname + '.txt'), 'w') as fp:
fp.write(idx_str)
# imname = osp.basename(osp.splitext(qimg_path)[0])
# cv2.imwrite(osp.join(save_dir, imname+'.jpg'), grid_img)
if (q_idx + 1) % 100 == 0:
print('- done {}/{}'.format(q_idx + 1, num_q))
print('Done. Images have been saved to "{}" ...'.format(save_dir))
@torch.no_grad()
def visactmap(testloader, model, save_dir, width, height, print_freq, use_gpu, **kwargs):
"""Visualizes CNN activation maps to see where the CNN focuses on to extract features.
This function takes as input the query images of target datasets
Reference:
- Zagoruyko and Komodakis. Paying more attention to attention: Improving the
performance of convolutional neural networks via attention transfer. ICLR, 2017
- Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.
"""
model.eval()
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
# original images and activation maps are saved individually
actmap_dir = osp.join(save_dir, 'actmap_layer2')
mkdir_if_missing(actmap_dir)
print('Visualizing activation maps ...')
for batch_idx, data in enumerate(testloader):
# imgs, paths = data[0], data[3]
# imgs, paths = data[0], data[3]
imgs, contours, paths = data[0], data[1], data[4]
if use_gpu:
imgs = imgs.cuda()
contours = contours.cuda()
# forward to get convolutional feature maps
try:
# outputs = model(segments, imgs, return_featuremaps=True)
outputs = model(imgs, contours, return_featuremaps=True)
except TypeError:
raise TypeError('forward() got unexpected keyword argument "return_featuremaps". ' \
'Please add return_featuremaps as an input argument to forward(). When ' \
'return_featuremaps=True, return feature maps only | """
Args:
src: image path or tuple (for vidreid)
dst: target directory
rank: int, denoting ranked position, starting from 1
prefix: string
matched: bool
"""
if isinstance(src, tuple) or isinstance(src, list):
if prefix == 'gallery':
suffix = 'TRUE' if matched else 'FALSE'
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3)) + '_' + suffix
else:
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3))
mkdir_if_missing(dst)
for img_path in src:
shutil.copy(img_path, dst)
else:
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3) + '_name_' + osp.basename(src))
shutil.copy(src, dst) | identifier_body |
reidtools.py | , optional): resized image width. Default is 128.
height (int, optional): resized image height. Default is 256.
save_dir (str): directory to save output images.
topk (int, optional): denoting top-k images in the rank list to be visualized.
Default is 10.
"""
num_q, num_g = distmat.shape
mkdir_if_missing(save_dir)
print('# query: {}\n# gallery {}'.format(num_q, num_g))
print('Visualizing top-{} ranks ...'.format(topk))
query, gallery = dataset
assert num_q == len(query)
assert num_g == len(gallery)
indices = np.argsort(distmat, axis=1)
def _cp_img_to(src, dst, rank, prefix, matched=False):
"""
Args:
src: image path or tuple (for vidreid)
dst: target directory
rank: int, denoting ranked position, starting from 1
prefix: string
matched: bool
"""
if isinstance(src, tuple) or isinstance(src, list):
if prefix == 'gallery':
suffix = 'TRUE' if matched else 'FALSE'
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3)) + '_' + suffix
else:
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3))
mkdir_if_missing(dst)
for img_path in src:
shutil.copy(img_path, dst)
else:
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3) + '_name_' + osp.basename(src))
shutil.copy(src, dst)
for q_idx in range(num_q):
item = query[q_idx]
qimg_path, qpid, qcamid = item[:3]
# qsegment_path = item[6]
qpid, qcamid = int(qpid), int(qcamid)
num_cols = topk + 1
# grid_img = 255 * np.ones((2*height+GRID_SPACING, num_cols*width+(topk-1)*GRID_SPACING+QUERY_EXTRA_SPACING, 3), dtype=np.uint8)
grid_img = 255 * np.ones((height, num_cols*width+(topk-1)*GRID_SPACING+QUERY_EXTRA_SPACING, 3), dtype=np.uint8)
idx_str = str(qpid) + '\n'
if data_type == 'image':
qimg = cv2.imread(qimg_path)
qimg = Image.fromarray(cv2.cvtColor(qimg, cv2.COLOR_BGR2RGB))
qimg = cv2.cvtColor(np.asarray(qimg), cv2.COLOR_RGB2BGR)
qimg = cv2.resize(qimg, (width, height))
qimg = cv2.copyMakeBorder(qimg, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=(0, 0, 0))
qimg = cv2.resize(qimg, (width, height)) # resize twice to ensure that the border width is consistent across images
# qsegment = cv2.imread(qsegment_path)
# qsegment = Image.fromarray(cv2.cvtColor(qsegment, cv2.COLOR_BGR2RGB))
# qsegment = cv2.cvtColor(np.asarray(qsegment), cv2.COLOR_RGB2BGR)
#
# qsegment = cv2.resize(qsegment, (width, height))
# qsegment = cv2.copyMakeBorder(qsegment, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=(0, 0, 0))
# qsegment = cv2.resize(qsegment, (
# width, height)) # resize twice to ensure that the border width is consistent across images
grid_img[:height, :width, :] = qimg
# grid_img[height+GRID_SPACING:, :width, :] = qsegment
else:
qdir = osp.join(save_dir, osp.basename(osp.splitext(qimg_path)[0]))
mkdir_if_missing(qdir)
_cp_img_to(qimg_path, qdir, rank=0, prefix='query')
rank_idx = 1
for g_idx in indices[q_idx, :]:
item = gallery[g_idx]
gimg_path, gpid, gcamid = item[:3]
# gsegment_path = item[6]
gpid, gcamid = int(gpid), int(gcamid)
invalid = (qpid == gpid) & (qcamid == gcamid)
idx_str = idx_str + str(gpid) + ' '
if not invalid:
matched = gpid == qpid
if data_type == 'image':
border_color = GREEN if matched else RED
gimg = cv2.imread(gimg_path)
gimg = Image.fromarray(cv2.cvtColor(gimg, cv2.COLOR_BGR2RGB))
gimg = cv2.cvtColor(np.asarray(gimg), cv2.COLOR_RGB2BGR)
gimg = cv2.resize(gimg, (width, height))
gimg = cv2.copyMakeBorder(gimg, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=border_color)
# gimg = cv2.copyMakeBorder(gimg, BW, BW, BW, BW, 1, value=border_color)
gimg = cv2.resize(gimg, (width, height))
# gsegment = cv2.imread(gsegment_path)
# gsegment = Image.fromarray(cv2.cvtColor(gsegment, cv2.COLOR_BGR2RGB))
# gsegment = cv2.cvtColor(np.asarray(gsegment), cv2.COLOR_RGB2BGR)
#
# gsegment = cv2.resize(gsegment, (width, height))
# gsegment = cv2.copyMakeBorder(gsegment, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=border_color)
# gsegment = cv2.resize(gsegment, (width, height))
start = rank_idx * width + (rank_idx - 1) * GRID_SPACING + QUERY_EXTRA_SPACING
end = (rank_idx + 1) * width + (rank_idx - 1) * GRID_SPACING + QUERY_EXTRA_SPACING
grid_img[:height, start:end, :] = gimg
# grid_img[height+GRID_SPACING:, start:end, :] = gsegment
else:
_cp_img_to(gimg_path, qdir, rank=rank_idx, prefix='gallery', matched=matched)
rank_idx += 1
if rank_idx > topk:
break
# if rank_idx > topk-1:
# break
relpath = qimg_path.split('/rgb/')[-1]
imname = osp.basename(osp.splitext(relpath)[0])
dirname = osp.dirname(relpath)
dir_path = osp.join(save_dir, dirname)
if not osp.exists(dir_path):
os.makedirs(dir_path)
cv2.imwrite(osp.join(dir_path, imname + '.jpg'), grid_img)
with open(osp.join(dir_path, imname + '.txt'), 'w') as fp:
fp.write(idx_str)
# imname = osp.basename(osp.splitext(qimg_path)[0])
# cv2.imwrite(osp.join(save_dir, imname+'.jpg'), grid_img)
if (q_idx + 1) % 100 == 0:
print('- done {}/{}'.format(q_idx + 1, num_q))
print('Done. Images have been saved to "{}" ...'.format(save_dir))
@torch.no_grad()
def visactmap(testloader, model, save_dir, width, height, print_freq, use_gpu, **kwargs):
"""Visualizes CNN activation maps to see where the CNN focuses on to extract features.
This function takes as input the query images of target datasets
Reference:
- Zagoruyko and Komodakis. Paying more attention to attention: Improving the
performance of convolutional neural networks via attention transfer. ICLR, 2017
- Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.
"""
model.eval()
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
# original images and activation maps are saved individually
actmap_dir = osp.join(save_dir, 'actmap_layer2')
mkdir_if_missing(actmap_dir)
print('Visualizing activation maps ...')
for batch_idx, data in enumerate(testloader):
# imgs, paths = data[0], data[3]
# imgs, paths = data[0], data[3]
imgs, contours, paths = data[0], data[1], data[4]
if use_gpu:
|
# forward to get convolutional feature maps
try:
# outputs = model(segments, imgs, return_featuremaps=True)
outputs = model(imgs, contours, return_featuremaps=True)
except TypeError:
raise TypeError('forward() got unexpected keyword argument "return_featuremaps". ' \
'Please add return_featuremaps as an input argument to forward(). When ' \
'return_featuremaps=True, return feature maps only | imgs = imgs.cuda()
contours = contours.cuda() | conditional_block |
reidtools.py | distmat.shape
mkdir_if_missing(save_dir)
print('# query: {}\n# gallery {}'.format(num_q, num_g))
print('Visualizing top-{} ranks ...'.format(topk))
query, gallery = dataset
assert num_q == len(query)
assert num_g == len(gallery)
indices = np.argsort(distmat, axis=1)
def _cp_img_to(src, dst, rank, prefix, matched=False):
"""
Args:
src: image path or tuple (for vidreid)
dst: target directory
rank: int, denoting ranked position, starting from 1
prefix: string
matched: bool
"""
if isinstance(src, tuple) or isinstance(src, list):
if prefix == 'gallery':
suffix = 'TRUE' if matched else 'FALSE'
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3)) + '_' + suffix
else:
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3))
mkdir_if_missing(dst)
for img_path in src:
shutil.copy(img_path, dst)
else:
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3) + '_name_' + osp.basename(src))
shutil.copy(src, dst)
for q_idx in range(num_q):
item = query[q_idx]
qimg_path, qpid, qcamid = item[:3]
# qsegment_path = item[6]
qpid, qcamid = int(qpid), int(qcamid)
num_cols = topk + 1
# grid_img = 255 * np.ones((2*height+GRID_SPACING, num_cols*width+(topk-1)*GRID_SPACING+QUERY_EXTRA_SPACING, 3), dtype=np.uint8)
grid_img = 255 * np.ones((height, num_cols*width+(topk-1)*GRID_SPACING+QUERY_EXTRA_SPACING, 3), dtype=np.uint8)
idx_str = str(qpid) + '\n'
if data_type == 'image':
qimg = cv2.imread(qimg_path)
qimg = Image.fromarray(cv2.cvtColor(qimg, cv2.COLOR_BGR2RGB))
qimg = cv2.cvtColor(np.asarray(qimg), cv2.COLOR_RGB2BGR)
qimg = cv2.resize(qimg, (width, height))
qimg = cv2.copyMakeBorder(qimg, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=(0, 0, 0))
qimg = cv2.resize(qimg, (width, height)) # resize twice to ensure that the border width is consistent across images
# qsegment = cv2.imread(qsegment_path)
# qsegment = Image.fromarray(cv2.cvtColor(qsegment, cv2.COLOR_BGR2RGB))
# qsegment = cv2.cvtColor(np.asarray(qsegment), cv2.COLOR_RGB2BGR)
#
# qsegment = cv2.resize(qsegment, (width, height))
# qsegment = cv2.copyMakeBorder(qsegment, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=(0, 0, 0))
# qsegment = cv2.resize(qsegment, (
# width, height)) # resize twice to ensure that the border width is consistent across images
grid_img[:height, :width, :] = qimg
# grid_img[height+GRID_SPACING:, :width, :] = qsegment
else:
qdir = osp.join(save_dir, osp.basename(osp.splitext(qimg_path)[0]))
mkdir_if_missing(qdir)
_cp_img_to(qimg_path, qdir, rank=0, prefix='query')
rank_idx = 1
for g_idx in indices[q_idx, :]:
item = gallery[g_idx]
gimg_path, gpid, gcamid = item[:3]
# gsegment_path = item[6]
gpid, gcamid = int(gpid), int(gcamid)
invalid = (qpid == gpid) & (qcamid == gcamid)
idx_str = idx_str + str(gpid) + ' '
if not invalid:
matched = gpid == qpid
if data_type == 'image':
border_color = GREEN if matched else RED
gimg = cv2.imread(gimg_path)
gimg = Image.fromarray(cv2.cvtColor(gimg, cv2.COLOR_BGR2RGB))
gimg = cv2.cvtColor(np.asarray(gimg), cv2.COLOR_RGB2BGR)
gimg = cv2.resize(gimg, (width, height))
gimg = cv2.copyMakeBorder(gimg, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=border_color)
# gimg = cv2.copyMakeBorder(gimg, BW, BW, BW, BW, 1, value=border_color)
gimg = cv2.resize(gimg, (width, height))
# gsegment = cv2.imread(gsegment_path)
# gsegment = Image.fromarray(cv2.cvtColor(gsegment, cv2.COLOR_BGR2RGB))
# gsegment = cv2.cvtColor(np.asarray(gsegment), cv2.COLOR_RGB2BGR)
#
# gsegment = cv2.resize(gsegment, (width, height))
# gsegment = cv2.copyMakeBorder(gsegment, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=border_color)
# gsegment = cv2.resize(gsegment, (width, height))
start = rank_idx * width + (rank_idx - 1) * GRID_SPACING + QUERY_EXTRA_SPACING
end = (rank_idx + 1) * width + (rank_idx - 1) * GRID_SPACING + QUERY_EXTRA_SPACING
grid_img[:height, start:end, :] = gimg
# grid_img[height+GRID_SPACING:, start:end, :] = gsegment
else:
_cp_img_to(gimg_path, qdir, rank=rank_idx, prefix='gallery', matched=matched)
rank_idx += 1
if rank_idx > topk:
break
# if rank_idx > topk-1:
# break
relpath = qimg_path.split('/rgb/')[-1]
imname = osp.basename(osp.splitext(relpath)[0])
dirname = osp.dirname(relpath)
dir_path = osp.join(save_dir, dirname)
if not osp.exists(dir_path):
os.makedirs(dir_path)
cv2.imwrite(osp.join(dir_path, imname + '.jpg'), grid_img)
with open(osp.join(dir_path, imname + '.txt'), 'w') as fp:
fp.write(idx_str)
# imname = osp.basename(osp.splitext(qimg_path)[0])
# cv2.imwrite(osp.join(save_dir, imname+'.jpg'), grid_img)
if (q_idx + 1) % 100 == 0:
print('- done {}/{}'.format(q_idx + 1, num_q))
print('Done. Images have been saved to "{}" ...'.format(save_dir))
@torch.no_grad()
def visactmap(testloader, model, save_dir, width, height, print_freq, use_gpu, **kwargs):
"""Visualizes CNN activation maps to see where the CNN focuses on to extract features.
This function takes as input the query images of target datasets
Reference:
- Zagoruyko and Komodakis. Paying more attention to attention: Improving the
performance of convolutional neural networks via attention transfer. ICLR, 2017
- Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.
"""
model.eval()
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
# original images and activation maps are saved individually
actmap_dir = osp.join(save_dir, 'actmap_layer2')
mkdir_if_missing(actmap_dir)
print('Visualizing activation maps ...')
for batch_idx, data in enumerate(testloader):
# imgs, paths = data[0], data[3]
# imgs, paths = data[0], data[3]
imgs, contours, paths = data[0], data[1], data[4]
if use_gpu:
imgs = imgs.cuda()
contours = contours.cuda()
# forward to get convolutional feature maps
try:
# outputs = model(segments, imgs, return_featuremaps=True)
outputs = model(imgs, contours, return_featuremaps=True)
except TypeError:
raise TypeError('forward() got unexpected keyword argument "return_featuremaps". ' \
'Please add return_featuremaps as an input argument to forward(). When ' \
'return_featuremaps=True, return feature maps only.')
if outputs.dim() != 4:
raise ValueError('The model output is supposed to have ' \
'shape of (b, c, h, w), i.e. 4 dimensions, but got {} dimensions. '
'Please make sure you set the model output at eval mode '
'to be the last convolutional feature maps'.format(outputs.dim()))
| # compute activation maps | random_line_split |
|
mqtt.go | params.DeviceName
iot.ProductKey = params.ProductKey
iot.Host = params.ProductKey + ".iot-as-mqtt.cn-shanghai.aliyuncs.com:1883"
opts := mqtt.NewClientOptions().AddBroker(iot.Host).SetClientID(iot.ClientId).SetUsername(iot.Username).SetPassword(iot.Password)
opts.SetPingTimeout(5 * time.Second)
opts.SetKeepAlive(30 * time.Second)
opts.SetCleanSession(false)
opts.SetAutoReconnect(true)
opts.SetConnectionLostHandler(params.ConnectionLostHandler)
opts.SetDefaultPublishHandler(params.DefaultHandel)
opts.SetOnConnectHandler(params.OnConnectHandler)
opts.SetMaxReconnectInterval(2 * time.Minute)
iot.App = params.App
iot.logOut = params.Logger
iot.Conn = mqtt.NewClient(opts)
return
}
func (this *Iot) Connect() (err error) {
c := this.Conn
if token := c.Connect(); token.Wait() && token.Error() != nil {
return token.Error()
}
return nil
}
func (this *Iot) Close() {
if this.Conn.IsConnected() {
this.Conn.Disconnect(256)
}
}
func (this *Iot) Publish(topic string, qos byte, retained bool, payload interface{}) error {
this.Conn.Publish(topic, qos, retained, payload)
return nil
}
func (this *Iot) Subscribe(topic string, qos byte, callback mqtt.MessageHandler) error {
if token := this.Conn.Subscribe(topic, qos, func(client mqtt.Client, message mqtt.Message) {
callback(client, message)
}); token.Wait() && token.Error() != nil {
this.writeLog("error", "Token publish: "+token.Error().Error())
return token.Error()
}
return nil
}
func (this *Iot) SubscribeAndCheck(topic string, qos byte) error {
if token := this.Conn.Subscribe(topic, qos, this.SubscribeGetCallback); token.Wait() && token.Error() != nil {
this.writeLog("error", "Token publish: "+token.Error().Error())
return token.Error()
}
return nil
}
func (this *Iot) SubscribeGetCallback(client mqtt.Client, message mqtt.Message) {
topic := "/" + this.ProductKey + "/" + this.DeviceName + "/get"
msg := &AliMsg{}
err := json.Unmarshal(message.Payload(), msg)
if err != nil | .Check(msg.MsgId) {
this.writeLog("warning", "msgId "+msg.MsgId+" Topic"+topic+" 重复消息")
return
}
event := common_types.NewEvent(msg.MsgId ,message.Payload())
this.App.Pub(event)
}
/***
* 订阅get消息
*/
func (this *Iot) SubscribeGet() {
topic := "/" + this.ProductKey + "/" + this.DeviceName + "/get"
this.SubscribeAndCheck(topic, 1)
}
/***
* 子设备
*/
func (this *Iot) SubscribeSubGet(subProductKey, subDeviceName string) {
topic := "/" + subProductKey + "/" + subDeviceName + "/get"
this.SubscribeAndCheck(topic, 0)
}
/***
* 子设备注册
*/
func (this *Iot) PublishSubRegister(subProductKey, subDeviceName string) {
data := "{'id': '%s', 'version':'1.0','params':[{'deviceName':'%s','productKey':'%s'}],'method':'thing.sub.register'}"
data = fmt.Sprintf(data, this.getMsgId(), subDeviceName, subProductKey)
topic := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/sub/register"
this.Publish(topic, 0, false, []byte(data))
}
func (this *Iot) SubscribeSubRegisterReply() {
topic_reply := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/sub/register_reply"
this.Subscribe(topic_reply, 1, func(client mqtt.Client, message mqtt.Message) {
msg, err := common_types.ParseAliMsg(message.Payload())
if err != nil {
this.writeLog("error", "SubRegister_reply json内容解析失败 "+string(message.Payload()))
return
}
if msg.Code != 200 {
this.writeLog("error", "SubRegister_reply 子设备注册失败 "+msg.Message)
return
}
v, ok := msg.Data.([]interface{})
if !ok {
this.writeLog("error", "SubRegister_reply json内容解析失败->data解析失败 "+string(message.Payload()))
return
}
for _, deviceData := range v {
deviceInfo, ok := deviceData.(map[string]interface{})
if !ok {
this.writeLog("error", "SubRegister_reply json内容解析失败->data解析失败->不能转为map"+string(message.Payload()))
continue
}
deviceSecret, _ := deviceInfo["deviceSecret"].(string)
productKey, _ := deviceInfo["productKey"].(string)
deviceName, _ := deviceInfo["deviceName"].(string)
this.writeLog("info", "SubRegister_reply 注册成功: "+deviceName)
go this.SubDeviceLogin(productKey, deviceName, deviceSecret)
}
})
}
func (this *Iot) SubDeviceLogin(productKey, deviceName, deviceSecret string) {
this.AppendSubDevice(productKey, deviceName, deviceSecret)
this.PublishSubAdd(productKey, deviceName, deviceSecret)
time.Sleep(time.Second * 4)
this.PublishSubLogin(productKey, deviceName, deviceSecret)
}
/***
* 添加子设备
*/
func (this *Iot) PublishSubAdd(subProductKey, subDeviceName, subDeviceSecret string) {
sign, timestamp := this.GetSign(subProductKey, subDeviceName, subDeviceSecret)
data := `{"id":"%s","version":"1.0","params":[{"productKey" : "%s","deviceName" : "%s","clientId":"%s","sign":"%s","signmethod":"hmacSha1","timestamp":"%s"}],"method":"thing.topo.add"}`
data = fmt.Sprintf(data, this.getMsgId(), subProductKey, subDeviceName, subDeviceName, sign, timestamp)
topic := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/topo/add"
this.SubscribeSubAddReply()
this.Publish(topic, 0, true, []byte(data))
}
func (this *Iot) SubscribeSubAddReply() {
topic_reply := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/topo/add_reply"
this.Subscribe(topic_reply, 0, func(client mqtt.Client, message mqtt.Message) {
msg, err := common_types.ParseAliMsg(message.Payload())
if err != nil {
this.writeLog("error", "PublishSubAdd "+"JSON解析失败")
return
}
if msg.Code != 200 {
this.writeLog("error", "PublishSubAdd "+msg.Message)
}
this.writeLog("info", "PublishSubAdd 子设备拓扑添加成功")
return
})
}
/***
step [1, 100] -1:代表升级失败 -2:代表下载失败 -3:代表校验失败 -4:代表烧写失败
desc 进度信息
*/
func (this *Iot) PublishProgress(step int8, desc string) {
topic := "/ota/device/progress/" + this.ProductKey + "/" + this.DeviceName
data := `{ "id": "%s", "params": {"step":"%d", "desc":" %s"}}`
data = fmt.Sprintf(data, this.getMsgId(), step, desc)
this.Publish(topic, 1, false, []byte(data))
}
func (this *Iot) SubscribeUpgrade() {
topic := "/ota/device/upgrade/" + this.ProductKey + "/" + this.DeviceName
this.Subscribe(topic, 1, this.SubscribeUpgradeCallback)
}
func (this *Iot) SubscribeUpgradeCallback(client mqtt.Client, message mqtt.Message) {
//fmt.Println("SubscribeUpgradeCallback", message.Topic(), string(message.Payload()))
update := common_types.UpdateResponse{}
err := json.Unmarshal(message.Payload(), &update)
if err != nil {
this.writeLog("error", "SubscribeUpgrade"+"Json fail "+err.Error())
return
}
if update.Message != "success" {
this.writeLog("error", "SubscribeUpgrade "+update.Message)
return
}
this.SyncUpgradeFile(update)
}
/***
* 上报设备版本信息
*/
func (this *Iot) PublishInform(version string) {
data := `{"id": "%s","params": {"version": "%s"}}`
data = fmt.Sprintf(data, this.getMsgId(), version)
topic := "/ota/device/inform/" + this.ProductKey + "/" + this.DeviceName
this.Publish(topic, 0, false, []byte(data))
}
/***
* 子设备登陆 这个函数会一直执行 ,所以在运行时要加上 go iot.PublishSubLogin
*/
func (this *Iot) PublishSubLogin(subProductKey, subDeviceName, subDeviceSecret string) {
sign, timestamp := this.GetSign(subProductKey, subDeviceName, subDeviceSecret)
data := `{"id":"%s","params":{"productKey":"%s","deviceName":"%s","clientId":"%s","sign":"%s","timestamp":"%s","signMethod":"h | {
this.writeLog("error", "Topic "+topic+" 消息解密失败 "+err.Error()+" Payload: "+string(message.Payload()))
return
}
if !msgIds | conditional_block |
mqtt.go | ProductKey string
ClientId string
Username string
Password string
Sign string
Conn mqtt.Client
logOut interfaces.ModuleLogger
App interfaces.App
SubDevices []Device
}
type Params struct {
ProductKey string
DeviceName string
DeviceSecret string
OnConnectHandler mqtt.OnConnectHandler
ConnectionLostHandler mqtt.ConnectionLostHandler
Logger interfaces.ModuleLogger
App interfaces.App
DefaultHandel mqtt.MessageHandler
}
func NewIot(params Params) (iot *Iot) {
iot = new(Iot)
iot.SubDevices = make([]Device, 0)
sign, timestamp := iot.GetSign(params.ProductKey, params.DeviceName, params.DeviceSecret)
iot.Password = sign
iot.ClientId = params.DeviceName + "|securemode=3,signmethod=hmacsha1,timestamp=" + timestamp + "|"
iot.Username = params.DeviceName + "&" + params.ProductKey
iot.DeviceName = params.DeviceName
iot.ProductKey = params.ProductKey
iot.Host = params.ProductKey + ".iot-as-mqtt.cn-shanghai.aliyuncs.com:1883"
opts := mqtt.NewClientOptions().AddBroker(iot.Host).SetClientID(iot.ClientId).SetUsername(iot.Username).SetPassword(iot.Password)
opts.SetPingTimeout(5 * time.Second)
opts.SetKeepAlive(30 * time.Second)
opts.SetCleanSession(false)
opts.SetAutoReconnect(true)
opts.SetConnectionLostHandler(params.ConnectionLostHandler)
opts.SetDefaultPublishHandler(params.DefaultHandel)
opts.SetOnConnectHandler(params.OnConnectHandler)
opts.SetMaxReconnectInterval(2 * time.Minute)
iot.App = params.App
iot.logOut = params.Logger
iot.Conn = mqtt.NewClient(opts)
return
}
func (this *Iot) Connect() (err error) {
c := this.Conn
if token := c.Connect(); token.Wait() && token.Error() != nil {
return token.Error()
}
return nil
}
func (this *Iot) Close() {
if this.Conn.IsConnected() {
this.Conn.Disconnect(256)
}
}
func (this *Iot) Publish(topic string, qos byte, retained bool, payload interface{}) error {
this.Conn.Publish(topic, qos, retained, payload)
return nil
}
func (this *Iot) Subscribe(topic string, qos byte, callback mqtt.MessageHandler) error {
if token := this.Conn.Subscribe(topic, qos, func(client mqtt.Client, message mqtt.Message) {
callback(client, message)
}); token.Wait() && token.Error() != nil {
this.writeLog("error", "Token publish: "+token.Error().Error())
return token.Error()
}
return nil
}
func (this *Iot) SubscribeAndCheck(topic string, qos byte) error {
if token := this.Conn.Subscribe(topic, qos, this.SubscribeGetCallback); token.Wait() && token.Error() != nil {
this.writeLog("error", "Token publish: "+token.Error().Error())
return token.Error()
}
return nil
}
func (this *Iot) SubscribeGetCallback(client mqtt.Client, message mqtt.Message) {
topic := "/" + this.ProductKey + "/" + this.DeviceName + "/get"
msg := &AliMsg{}
err := json.Unmarshal(message.Payload(), msg)
if err != nil {
this.writeLog("error", "Topic "+topic+" 消息解密失败 "+err.Error()+" Payload: "+string(message.Payload()))
return
}
if !msgIds.Check(msg.MsgId) {
this.writeLog("warning", "msgId "+msg.MsgId+" Topic"+topic+" 重复消息")
return
}
event := common_types.NewEvent(msg.MsgId ,message.Payload())
this.App.Pub(event)
}
/***
* 订阅get消息
*/
func (this *Iot) SubscribeGet() {
topic := "/" + this.ProductKey + "/" + this.DeviceName + "/get"
this.SubscribeAndCheck(topic, 1)
}
/***
* 子设备
*/
func (this *Iot) SubscribeSubGet(subProductKey, subDeviceName string) {
topic := "/" + subProductKey + "/" + subDeviceName + "/get"
this.SubscribeAndCheck(topic, 0)
}
/***
* 子设备注册
*/
func (this *Iot) PublishSubRegister(subProductKey, subDeviceName string) {
data := "{'id': '%s', 'version':'1.0','params':[{'deviceName':'%s','productKey':'%s'}],'method':'thing.sub.register'}"
data = fmt.Sprintf(data, this.getMsgId(), subDeviceName, subProductKey)
topic := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/sub/register"
this.Publish(topic, 0, false, []byte(data))
}
func (this *Iot) SubscribeSubRegisterReply() {
topic_reply := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/sub/register_reply"
this.Subscribe(topic_reply, 1, func(client mqtt.Client, message mqtt.Message) {
msg, err := common_types.ParseAliMsg(message.Payload())
if err != nil {
this.writeLog("error", "SubRegister_reply json内容解析失败 "+string(message.Payload()))
return
}
if msg.Code != 200 {
this.writeLog("error", "SubRegister_reply 子设备注册失败 "+msg.Message)
return
}
v, ok := msg.Data.([]interface{})
if !ok {
this.writeLog("error", "SubRegister_reply json内容解析失败->data解析失败 "+string(message.Payload()))
return
}
for _, deviceData := range v {
deviceInfo, ok := deviceData.(map[string]interface{})
if !ok {
this.writeLog("error", "SubRegister_reply json内容解析失败->data解析失败->不能转为map"+string(message.Payload()))
continue
}
deviceSecret, _ := deviceInfo["deviceSecret"].(string)
productKey, _ := deviceInfo["productKey"].(string)
deviceName, _ := deviceInfo["deviceName"].(string)
this.writeLog("info", "SubRegister_reply 注册成功: "+deviceName)
go this.SubDeviceLogin(productKey, deviceName, deviceSecret)
}
})
}
func (this *Iot) SubDeviceLogin(productKey, deviceName, deviceSecret string) {
this.AppendSubDevice(productKey, deviceName, deviceSecret)
this.PublishSubAdd(productKey, deviceName, deviceSecret)
time.Sleep(time.Second * 4)
this.PublishSubLogin(productKey, deviceName, deviceSecret)
}
/***
* 添加子设备
*/
func (this *Iot) PublishSubAdd(subProductKey, subDeviceName, subDeviceSecret string) {
sign, timestamp := this.GetSign(subProductKey, subDeviceName, subDeviceSecret)
data := `{"id":"%s","version":"1.0","params":[{"productKey" : "%s","deviceName" : "%s","clientId":"%s","sign":"%s","signmethod":"hmacSha1","timestamp":"%s"}],"method":"thing.topo.add"}`
data = fmt.Sprintf(data, this.getMsgId(), subProductKey, subDeviceName, subDeviceName, sign, timestamp)
topic := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/topo/add"
this.SubscribeSubAddReply()
this.Publish(topic, 0, true, []byte(data))
}
func (this *Iot) SubscribeSubAddReply() {
topic_reply := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/topo/add_reply"
this.Subscribe(topic_reply, 0, func(client mqtt.Client, message mqtt.Message) {
msg, err := common_types.ParseAliMsg(message.Payload())
if err != nil {
this.writeLog("error", "PublishSubAdd "+"JSON解析失败")
return
}
if msg.Code != 200 {
this.writeLog("error", "PublishSubAdd "+msg.Message)
}
this.writeLog("info", "PublishSubAdd 子设备拓扑添加成功")
return
})
}
/***
step [1, 100] -1:代表升级失败 -2:代表下载失败 -3:代表校验失败 -4:代表烧写失败
desc 进度信息
*/
func (this *Iot) PublishProgress(step int8, desc string) {
topic := "/ota/device/progress/" + this.ProductKey + "/" + this.DeviceName
data := `{ "id": "%s", "params": {"step":"%d", "desc":" %s"}}`
data = fmt.Sprintf(data, this.getMsgId(), step, desc)
this.Publish(topic, 1, false, []byte(data))
}
func (this *Iot) SubscribeUpgrade() {
topic := "/ota/device/upgrade/" + this.ProductKey + "/" + this.DeviceName
this.Subscribe(topic, 1, this.SubscribeUpgradeCallback)
}
func (this *Iot) SubscribeUpgradeCallback(client mqtt.Client, message mqtt.Message) {
//fmt.Println("SubscribeUpgradeCallback", message.Topic(), string(message.Payload()))
update := common_types.UpdateResponse{}
err := json.Unmarshal(message.Payload(), &update)
if err != nil {
this.writeLog("error", "SubscribeUpgrade"+"Json fail "+err.Error())
return
}
if update.Message != "success" {
this.writeLog("error", | DeviceName string | random_line_split |
|
mqtt.go | productKey, _ := deviceInfo["productKey"].(string)
deviceName, _ := deviceInfo["deviceName"].(string)
this.writeLog("info", "SubRegister_reply 注册成功: "+deviceName)
go this.SubDeviceLogin(productKey, deviceName, deviceSecret)
}
})
}
func (this *Iot) SubDeviceLogin(productKey, deviceName, deviceSecret string) {
this.AppendSubDevice(productKey, deviceName, deviceSecret)
this.PublishSubAdd(productKey, deviceName, deviceSecret)
time.Sleep(time.Second * 4)
this.PublishSubLogin(productKey, deviceName, deviceSecret)
}
/***
* 添加子设备
*/
func (this *Iot) PublishSubAdd(subProductKey, subDeviceName, subDeviceSecret string) {
sign, timestamp := this.GetSign(subProductKey, subDeviceName, subDeviceSecret)
data := `{"id":"%s","version":"1.0","params":[{"productKey" : "%s","deviceName" : "%s","clientId":"%s","sign":"%s","signmethod":"hmacSha1","timestamp":"%s"}],"method":"thing.topo.add"}`
data = fmt.Sprintf(data, this.getMsgId(), subProductKey, subDeviceName, subDeviceName, sign, timestamp)
topic := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/topo/add"
this.SubscribeSubAddReply()
this.Publish(topic, 0, true, []byte(data))
}
func (this *Iot) SubscribeSubAddReply() {
topic_reply := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/topo/add_reply"
this.Subscribe(topic_reply, 0, func(client mqtt.Client, message mqtt.Message) {
msg, err := common_types.ParseAliMsg(message.Payload())
if err != nil {
this.writeLog("error", "PublishSubAdd "+"JSON解析失败")
return
}
if msg.Code != 200 {
this.writeLog("error", "PublishSubAdd "+msg.Message)
}
this.writeLog("info", "PublishSubAdd 子设备拓扑添加成功")
return
})
}
/***
step [1, 100] -1:代表升级失败 -2:代表下载失败 -3:代表校验失败 -4:代表烧写失败
desc 进度信息
*/
func (this *Iot) PublishProgress(step int8, desc string) {
topic := "/ota/device/progress/" + this.ProductKey + "/" + this.DeviceName
data := `{ "id": "%s", "params": {"step":"%d", "desc":" %s"}}`
data = fmt.Sprintf(data, this.getMsgId(), step, desc)
this.Publish(topic, 1, false, []byte(data))
}
func (this *Iot) SubscribeUpgrade() {
topic := "/ota/device/upgrade/" + this.ProductKey + "/" + this.DeviceName
this.Subscribe(topic, 1, this.SubscribeUpgradeCallback)
}
func (this *Iot) SubscribeUpgradeCallback(client mqtt.Client, message mqtt.Message) {
//fmt.Println("SubscribeUpgradeCallback", message.Topic(), string(message.Payload()))
update := common_types.UpdateResponse{}
err := json.Unmarshal(message.Payload(), &update)
if err != nil {
this.writeLog("error", "SubscribeUpgrade"+"Json fail "+err.Error())
return
}
if update.Message != "success" {
this.writeLog("error", "SubscribeUpgrade "+update.Message)
return
}
this.SyncUpgradeFile(update)
}
/***
* 上报设备版本信息
*/
func (this *Iot) PublishInform(version string) {
data := `{"id": "%s","params": {"version": "%s"}}`
data = fmt.Sprintf(data, this.getMsgId(), version)
topic := "/ota/device/inform/" + this.ProductKey + "/" + this.DeviceName
this.Publish(topic, 0, false, []byte(data))
}
/***
* 子设备登陆 这个函数会一直执行 ,所以在运行时要加上 go iot.PublishSubLogin
*/
func (this *Iot) PublishSubLogin(subProductKey, subDeviceName, subDeviceSecret string) {
sign, timestamp := this.GetSign(subProductKey, subDeviceName, subDeviceSecret)
data := `{"id":"%s","params":{"productKey":"%s","deviceName":"%s","clientId":"%s","sign":"%s","timestamp":"%s","signMethod":"hmacSha1","cleanSession":"false"}}`
data = fmt.Sprintf(data, "ababab", subProductKey, subDeviceName, subDeviceName, sign, timestamp)
topic := "/ext/session/" + this.ProductKey + "/" + this.DeviceName + "/combine/login"
this.Publish(topic, 1, true, []byte(data))
}
/***
* 子设备登陆回调函数
*/
func (this *Iot) SubscribeSubLoginReply() {
topic_reply := "/ext/session/" + this.ProductKey + "/" + this.DeviceName + "/combine/login_reply"
this.Subscribe(topic_reply, 1, func(client mqtt.Client, message mqtt.Message) {
msg := common_types.LoginResponse{}
err := json.Unmarshal(message.Payload(), &msg)
if err != nil {
this.writeLog("error", "SubLogin_reply Json 解析失败"+string(message.Payload()))
return
}
if msg.Code != 200 {
this.writeLog("error", "SubLogin_reply Json 登陆失败"+msg.Message)
return
}
this.writeLog("info", "SubLogin_reply "+msg.Data.DeviceName+" 登陆成功"+msg.Message)
/*订阅主题*/
this.SubscribeSubGet(msg.Data.ProductKey, msg.Data.DeviceName)
})
}
/***
* 子设备下线
*/
func (this *Iot) PublishSubLoginOut(subProductKey, subDeviceName string) {
data := `{"id":"%s","params":{"productKey":"%s","deviceName":"%s",}}`
data = fmt.Sprintf(data, this.getMsgId(), subProductKey, subDeviceName)
topci := "/" + this.ProductKey + "/" + this.DeviceName + "/combine/logout"
topci_reply := "/" + this.ProductKey + "/" + this.DeviceName + "/combine/logout_reply"
this.Publish(topci, 1, false, []byte(data))
this.Subscribe(topci_reply, 1, func(client mqtt.Client, message mqtt.Message) {
msg, err := common_types.ParseAliMsg(message.Payload())
if err != nil {
this.writeLog("error", "SubLoginOut :"+subDeviceName+" "+err.Error())
return
}
if msg.Code != 200 {
this.writeLog("error", "SubLoginOut :"+subDeviceName+" "+msg.Message)
return
}
})
}
/***
* 计算签名
*/
func (this *Iot) GetSign(productKey, deviceName, deviceSecret string) (string, string) {
timestamp := strconv.Itoa(int(time.Now().Unix()))
str := "clientId" + deviceName + "deviceName" + deviceName + "productKey" + productKey + "timestamp" + timestamp;
key := []byte(deviceSecret)
hmacHandel := hmac.New(sha1.New, key)
hmacHandel.Write([]byte(str))
res := hmacHandel.Sum(nil)
return hex.EncodeToString(res), timestamp
}
/***
* 获取一个唯一的消息Id
*/
func (this *Iot) getMsgId() string {
return strconv.Itoa(int(time.Now().UnixNano()))
}
func (this *Iot) SetLogOutPut(writer interfaces.ModuleLogger) {
this.logOut = writer
}
func (this *Iot) writeLog(logType, Content string) {
switch logType {
case "warning":
this.logOut.Warning(Content)
break;
case "info":
this.logOut.Info(Content)
break
case "error":
this.logOut.Error(Content)
break;
case "debug":
this.logOut.Debug(Content)
break
default:
this.logOut.Info(Content)
break
}
}
func (this *Iot) Write(data []byte) (int, error) {
if this.Conn.IsConnected() {
this.PublishRaw(data)
}
return 0, nil
}
var appendSubDevicesMutex = sync.Mutex{}
/***
* 添加子设备
*/
func (this *Iot) AppendSubDevice(subProductKey, subDeviceName, subDeviceSecret string) (Device) {
subDevice := Device{}
subDevice.ProductKey = subProductKey
subDevice.DeviceName = subDeviceName
subDevice.DeviceSecret = subDeviceSecret
appendSubDevicesMutex.Lock()
this.SubDevices = append(this.SubDevices, subDevice)
appendSubDevicesMutex.Unlock()
return subDevice
}
func (this *Iot) PublishRaw(data []byte) {
topic := "/" + this.ProductKey + "/" + this.DeviceName + "/update"
this.Publish(topic, 1, false, data)
}
func (this *Iot) PublishLog(log []byte) {
topic := "/" + this.ProductKey + "/" + this.DeviceName + "/update"
type Log struct {
Timestamp int64 `json:"timestamp"`
Event string `json:"event"`
Data string `json:"data"`
}
logData := Log{}
logData.Timestamp = time.Now().Unix()
logData.Data = string(log)
logData.Event = "log"
data, err := json.Marshal(logData)
if err != nil {
| return
} | identifier_name |
|
mqtt.go | params.DeviceName
iot.ProductKey = params.ProductKey
iot.Host = params.ProductKey + ".iot-as-mqtt.cn-shanghai.aliyuncs.com:1883"
opts := mqtt.NewClientOptions().AddBroker(iot.Host).SetClientID(iot.ClientId).SetUsername(iot.Username).SetPassword(iot.Password)
opts.SetPingTimeout(5 * time.Second)
opts.SetKeepAlive(30 * time.Second)
opts.SetCleanSession(false)
opts.SetAutoReconnect(true)
opts.SetConnectionLostHandler(params.ConnectionLostHandler)
opts.SetDefaultPublishHandler(params.DefaultHandel)
opts.SetOnConnectHandler(params.OnConnectHandler)
opts.SetMaxReconnectInterval(2 * time.Minute)
iot.App = params.App
iot.logOut = params.Logger
iot.Conn = mqtt.NewClient(opts)
return
}
func (this *Iot) Connect() (err error) {
c := this.Conn
if token := c.Connect(); token.Wait() && token.Error() != nil {
return token.Error()
}
return nil
}
func (this *Iot) Close() {
if this.Conn.IsConnected() {
this.Conn.Disconnect(256)
}
}
func (this *Iot) Publish(topic string, qos byte, retained bool, payload interface{}) error {
this.Conn.Publish(topic, qos, retained, payload)
return nil
}
func (this *Iot) Subscribe(topic string, qos byte, callback mqtt.MessageHandler) error {
if token := this.Conn.Subscribe(topic, qos, func(client mqtt.Client, message mqtt.Message) {
callback(client, message)
}); token.Wait() && token.Error() != nil {
this.writeLog("error", "Token publish: "+token.Error().Error())
return token.Error()
}
return nil
}
func (this *Iot) SubscribeAndCheck(topic string, qos byte) error {
if token := this.Conn.Subscribe(topic, qos, this.SubscribeGetCallback); token.Wait() && token.Error() != nil {
this.writeLog("error", "Token publish: "+token.Error().Error())
return token.Error()
}
return nil
}
func (this *Iot) SubscribeGetCallback(client mqtt.Client, message mqtt.Message) {
topic := "/" + this.ProductKey + "/" + this.DeviceName + "/get"
msg := &AliMsg{}
err := json.Unmarshal(message.Payload(), msg)
if err != nil {
this.writeLog("error", "Topic "+topic+" 消息解密失败 "+err.Error()+" Payload: "+string(message.Payload()))
return
}
if !msgIds.Check(msg.MsgId) {
this.writeLog("warning", "msgId "+msg.MsgId+" Topic"+topic+" 重复消息")
return
}
event := common_types.NewEvent(msg.MsgId ,message.Payload())
this.App.Pub(event)
}
/***
* 订阅get消息
*/
func (this *Iot) SubscribeGet() {
topic := "/" + this.Produ | *Iot) SubscribeSubGet(subProductKey, subDeviceName string) {
topic := "/" + subProductKey + "/" + subDeviceName + "/get"
this.SubscribeAndCheck(topic, 0)
}
/***
* 子设备注册
*/
func (this *Iot) PublishSubRegister(subProductKey, subDeviceName string) {
data := "{'id': '%s', 'version':'1.0','params':[{'deviceName':'%s','productKey':'%s'}],'method':'thing.sub.register'}"
data = fmt.Sprintf(data, this.getMsgId(), subDeviceName, subProductKey)
topic := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/sub/register"
this.Publish(topic, 0, false, []byte(data))
}
func (this *Iot) SubscribeSubRegisterReply() {
topic_reply := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/sub/register_reply"
this.Subscribe(topic_reply, 1, func(client mqtt.Client, message mqtt.Message) {
msg, err := common_types.ParseAliMsg(message.Payload())
if err != nil {
this.writeLog("error", "SubRegister_reply json内容解析失败 "+string(message.Payload()))
return
}
if msg.Code != 200 {
this.writeLog("error", "SubRegister_reply 子设备注册失败 "+msg.Message)
return
}
v, ok := msg.Data.([]interface{})
if !ok {
this.writeLog("error", "SubRegister_reply json内容解析失败->data解析失败 "+string(message.Payload()))
return
}
for _, deviceData := range v {
deviceInfo, ok := deviceData.(map[string]interface{})
if !ok {
this.writeLog("error", "SubRegister_reply json内容解析失败->data解析失败->不能转为map"+string(message.Payload()))
continue
}
deviceSecret, _ := deviceInfo["deviceSecret"].(string)
productKey, _ := deviceInfo["productKey"].(string)
deviceName, _ := deviceInfo["deviceName"].(string)
this.writeLog("info", "SubRegister_reply 注册成功: "+deviceName)
go this.SubDeviceLogin(productKey, deviceName, deviceSecret)
}
})
}
func (this *Iot) SubDeviceLogin(productKey, deviceName, deviceSecret string) {
this.AppendSubDevice(productKey, deviceName, deviceSecret)
this.PublishSubAdd(productKey, deviceName, deviceSecret)
time.Sleep(time.Second * 4)
this.PublishSubLogin(productKey, deviceName, deviceSecret)
}
/***
* 添加子设备
*/
func (this *Iot) PublishSubAdd(subProductKey, subDeviceName, subDeviceSecret string) {
sign, timestamp := this.GetSign(subProductKey, subDeviceName, subDeviceSecret)
data := `{"id":"%s","version":"1.0","params":[{"productKey" : "%s","deviceName" : "%s","clientId":"%s","sign":"%s","signmethod":"hmacSha1","timestamp":"%s"}],"method":"thing.topo.add"}`
data = fmt.Sprintf(data, this.getMsgId(), subProductKey, subDeviceName, subDeviceName, sign, timestamp)
topic := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/topo/add"
this.SubscribeSubAddReply()
this.Publish(topic, 0, true, []byte(data))
}
func (this *Iot) SubscribeSubAddReply() {
topic_reply := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/topo/add_reply"
this.Subscribe(topic_reply, 0, func(client mqtt.Client, message mqtt.Message) {
msg, err := common_types.ParseAliMsg(message.Payload())
if err != nil {
this.writeLog("error", "PublishSubAdd "+"JSON解析失败")
return
}
if msg.Code != 200 {
this.writeLog("error", "PublishSubAdd "+msg.Message)
}
this.writeLog("info", "PublishSubAdd 子设备拓扑添加成功")
return
})
}
/***
step [1, 100] -1:代表升级失败 -2:代表下载失败 -3:代表校验失败 -4:代表烧写失败
desc 进度信息
*/
func (this *Iot) PublishProgress(step int8, desc string) {
topic := "/ota/device/progress/" + this.ProductKey + "/" + this.DeviceName
data := `{ "id": "%s", "params": {"step":"%d", "desc":" %s"}}`
data = fmt.Sprintf(data, this.getMsgId(), step, desc)
this.Publish(topic, 1, false, []byte(data))
}
func (this *Iot) SubscribeUpgrade() {
topic := "/ota/device/upgrade/" + this.ProductKey + "/" + this.DeviceName
this.Subscribe(topic, 1, this.SubscribeUpgradeCallback)
}
func (this *Iot) SubscribeUpgradeCallback(client mqtt.Client, message mqtt.Message) {
//fmt.Println("SubscribeUpgradeCallback", message.Topic(), string(message.Payload()))
update := common_types.UpdateResponse{}
err := json.Unmarshal(message.Payload(), &update)
if err != nil {
this.writeLog("error", "SubscribeUpgrade"+"Json fail "+err.Error())
return
}
if update.Message != "success" {
this.writeLog("error", "SubscribeUpgrade "+update.Message)
return
}
this.SyncUpgradeFile(update)
}
/***
* 上报设备版本信息
*/
func (this *Iot) PublishInform(version string) {
data := `{"id": "%s","params": {"version": "%s"}}`
data = fmt.Sprintf(data, this.getMsgId(), version)
topic := "/ota/device/inform/" + this.ProductKey + "/" + this.DeviceName
this.Publish(topic, 0, false, []byte(data))
}
/***
* 子设备登陆 这个函数会一直执行 ,所以在运行时要加上 go iot.PublishSubLogin
*/
func (this *Iot) PublishSubLogin(subProductKey, subDeviceName, subDeviceSecret string) {
sign, timestamp := this.GetSign(subProductKey, subDeviceName, subDeviceSecret)
data := `{"id":"%s","params":{"productKey":"%s","deviceName":"%s","clientId":"%s","sign":"%s","timestamp":"%s","signMethod":"h | ctKey + "/" + this.DeviceName + "/get"
this.SubscribeAndCheck(topic, 1)
}
/***
* 子设备
*/
func (this | identifier_body |
MapTilingScheme.ts | createFromContentId(stringId: string) {
const idParts = stringId.split("_");
if (3 !== idParts.length) {
assert(false, "Invalid quad tree ID");
return new QuadId(-1, -1, -1);
}
return new QuadId(parseInt(idParts[0], 10), parseInt(idParts[1], 10), parseInt(idParts[2], 10));
}
public get contentId(): string { return this.level + "_" + this.column + "_" + this.row; }
public constructor(level: number, column: number, row: number) {
this.level = level;
this.column = column;
this.row = row;
}
// Not used in display - used only to tell whether this tile overlaps the range provided by a tile provider for attribution.
public getLatLongRange(mapTilingScheme: MapTilingScheme): Range2d {
const range = Range2d.createNull();
mapTilingScheme.tileXYToCartographic(this.column, this.row, this.level, QuadId._scratchCartographic);
range.extendXY(QuadId._scratchCartographic.longitude * Angle.degreesPerRadian, QuadId._scratchCartographic.latitude * Angle.degreesPerRadian);
mapTilingScheme.tileXYToCartographic(this.column + 1, this.row + 1, this.level, QuadId._scratchCartographic);
range.extendXY(QuadId._scratchCartographic.longitude * Angle.degreesPerRadian, QuadId._scratchCartographic.latitude * Angle.degreesPerRadian);
return range;
}
}
/** @internal */
export class MapTileRectangle extends Range2d {
public constructor(west = 0, south = 0, east = 0, north = 0) {
super(west, south, east, north);
}
public static create(west = 0, south = 0, east = 0, north = 0, result?: MapTileRectangle): MapTileRectangle {
if (!result)
return new MapTileRectangle(west, south, east, north);
result.init(west, south, east, north);
return result;
}
public get west() { return this.low.x; }
public set west(x: number) { this.low.x = x; }
public get south() { return this.low.y; }
public set south(y: number) { this.low.y = y; }
public get east() { return this.high.x; }
public set east(x: number) { this.high.x = x; }
public get north() { return this.high.y; }
public set north(y: number) { this.high.y = y; }
public init(west = 0, south = 0, east = 0, north = 0) {
this.west = west;
this.south = south;
this.east = east;
this.north = north;
}
public containsCartographic(carto: Cartographic) { return this.containsXY(carto.longitude, carto.latitude); }
public getCenter(result?: Cartographic): Cartographic {
return Cartographic.fromRadians((this.west + this.east) / 2, (this.north + this.south) / 2, 0, result);
}
}
/** @internal */
export abstract class MapTilingScheme {
private _scratchFraction = Point2d.createZero();
/**
* @param longitude in radians (-pi to pi)
*/
public longitudeToXFraction(longitude: number) {
return longitude / Angle.pi2Radians + .5;
}
/**
* Return longitude in radians (-pi to pi from fraction).
* @param xFraction
*/
public xFractionToLongitude(xFraction: number) {
return Angle.pi2Radians * (xFraction - .5);
}
public abstract yFractionToLatitude(yFraction: number): number;
public abstract latitudeToYFraction(latitude: number): number;
protected constructor(public readonly numberOfLevelZeroTilesX: number, public readonly numberOfLevelZeroTilesY: number, private _rowZeroAtTop: boolean) { }
/**
* Gets the total number of tiles in the X direction at a specified level-of-detail.
*
* @param {Number} level The level-of-detail. Level 0 is the root tile.
* @returns {Number} The number of tiles in the X direction at the given level.
*/
public getNumberOfXTilesAtLevel(level: number) {
return 0 === level ? 1 : this.numberOfLevelZeroTilesX << (level - 1);
}
/**
* Gets the total number of tiles in the Y direction at a specified level-of-detail.
*
*
* @param {Number} level The level-of-detail. Level 0 is the root tile.
* @returns {Number} The number of tiles in the Y direction at the given level.
*/
public getNumberOfYTilesAtLevel(level: number): number {
return (0 === level) ? 1 : this.numberOfLevelZeroTilesY << (level - 1);
}
|
public tileYToFraction(y: number, level: number): number {
let yFraction = y / this.getNumberOfYTilesAtLevel(level);
if (this._rowZeroAtTop)
yFraction = 1.0 - yFraction;
return yFraction;
}
public tileXToLongitude(x: number, level: number) {
return this.xFractionToLongitude(this.tileXToFraction(x, level));
}
public tileYToLatitude(y: number, level: number) {
return this.yFractionToLatitude(this.tileYToFraction(y, level));
}
/**
* Gets the fraction of the normalized (0-1) coordinates with at left, bottom.
*
* @param x column
* @param y row
* @param level depth
* @param result result (0-1 from left, bottom
*/
public tileXYToFraction(x: number, y: number, level: number, result?: Point2d): Point2d {
if (undefined === result)
result = Point2d.createZero();
result.x = this.tileXToFraction(x, level);
result.y = this.tileYToFraction(y, level);
return result;
}
/**
*
* @param x column
* @param y row
* @param level depth
* @param result result longitude, latitude.
* @param height height (optional)
*/
public tileXYToCartographic(x: number, y: number, level: number, result: Cartographic, height?: number): Cartographic {
this.tileXYToFraction(x, y, level, this._scratchFraction);
return this.fractionToCartographic(this._scratchFraction.x, this._scratchFraction.y, result, height);
}
public tileXYToRectangle(x: number, y: number, level: number, result?: MapTileRectangle) {
return MapTileRectangle.create(this.tileXToLongitude(x, level), this.tileYToLatitude(y, level), this.tileXToLongitude(x + 1, level), this.tileYToLatitude(y + 1, level), result);
}
/**
*
* @param xFraction
* @param yFraction
* @param result
* @param height
*/
public fractionToCartographic(xFraction: number, yFraction: number, result: Cartographic, height?: number): Cartographic {
result.longitude = this.xFractionToLongitude(xFraction);
result.latitude = this.yFractionToLatitude(yFraction);
result.height = undefined === height ? 0.0 : height;
return result;
}
public cartographicToFraction(latitudeRadians: number, longitudeRadians: number, result: Point2d): Point2d {
result.x = this.longitudeToXFraction(longitudeRadians);
result.y = this.latitudeToYFraction(latitudeRadians);
return result;
}
// gets the longitude and latitude into a point with coordinates between 0 and 1
public ecefToPixelFraction(point: Point3d): Point3d {
const cartoGraphic = Cartographic.fromEcef(point)!;
return Point3d.create(this.longitudeToXFraction(cartoGraphic.longitude), this.latitudeToYFraction(cartoGraphic.latitude), 0.0);
}
public computeMercatorFractionToDb(iModel: IModelConnection, groundBias: number): Transform {
const ecefLocation: EcefLocation = iModel.ecefLocation!;
const dbToEcef = ecefLocation.getTransform();
const projectCenter = Point3d.create(iModel.projectExtents.center.x, iModel.projectExtents.center.y, groundBias);
const projectEast = Point3d.create(projectCenter.x + 1.0, projectCenter.y, groundBias);
const projectNorth = Point3d.create(projectCenter.x, projectCenter.y + 1.0, groundBias);
const mercatorOrigin = this.ecefToPixelFraction(dbToEcef.multiplyPoint3d(projectCenter));
const mercatorX = this.ecefToPixelFraction(dbToEcef.multiplyPoint3d(projectEast));
const mercatorY = this.ecefToPixelFraction(dbToEcef.multiplyPoint3d(projectNorth));
const | public tileXToFraction(x: number, level: number): number {
return x / this.getNumberOfXTilesAtLevel(level);
}
| random_line_split |
MapTilingScheme.ts | () { return this.level >= 0; }
private static _scratchCartographic = new Cartographic();
public static createFromContentId(stringId: string) {
const idParts = stringId.split("_");
if (3 !== idParts.length) {
assert(false, "Invalid quad tree ID");
return new QuadId(-1, -1, -1);
}
return new QuadId(parseInt(idParts[0], 10), parseInt(idParts[1], 10), parseInt(idParts[2], 10));
}
public get contentId(): string { return this.level + "_" + this.column + "_" + this.row; }
public constructor(level: number, column: number, row: number) {
this.level = level;
this.column = column;
this.row = row;
}
// Not used in display - used only to tell whether this tile overlaps the range provided by a tile provider for attribution.
public getLatLongRange(mapTilingScheme: MapTilingScheme): Range2d {
const range = Range2d.createNull();
mapTilingScheme.tileXYToCartographic(this.column, this.row, this.level, QuadId._scratchCartographic);
range.extendXY(QuadId._scratchCartographic.longitude * Angle.degreesPerRadian, QuadId._scratchCartographic.latitude * Angle.degreesPerRadian);
mapTilingScheme.tileXYToCartographic(this.column + 1, this.row + 1, this.level, QuadId._scratchCartographic);
range.extendXY(QuadId._scratchCartographic.longitude * Angle.degreesPerRadian, QuadId._scratchCartographic.latitude * Angle.degreesPerRadian);
return range;
}
}
/** @internal */
export class MapTileRectangle extends Range2d {
public constructor(west = 0, south = 0, east = 0, north = 0) {
super(west, south, east, north);
}
public static create(west = 0, south = 0, east = 0, north = 0, result?: MapTileRectangle): MapTileRectangle {
if (!result)
return new MapTileRectangle(west, south, east, north);
result.init(west, south, east, north);
return result;
}
public get west() { return this.low.x; }
public set west(x: number) { this.low.x = x; }
public get south() { return this.low.y; }
public set south(y: number) { this.low.y = y; }
public get east() { return this.high.x; }
public set east(x: number) { this.high.x = x; }
public get north() { return this.high.y; }
public set north(y: number) { this.high.y = y; }
public init(west = 0, south = 0, east = 0, north = 0) {
this.west = west;
this.south = south;
this.east = east;
this.north = north;
}
public containsCartographic(carto: Cartographic) { return this.containsXY(carto.longitude, carto.latitude); }
public getCenter(result?: Cartographic): Cartographic {
return Cartographic.fromRadians((this.west + this.east) / 2, (this.north + this.south) / 2, 0, result);
}
}
/** @internal */
export abstract class MapTilingScheme {
private _scratchFraction = Point2d.createZero();
/**
* @param longitude in radians (-pi to pi)
*/
public longitudeToXFraction(longitude: number) {
return longitude / Angle.pi2Radians + .5;
}
/**
* Return longitude in radians (-pi to pi from fraction).
* @param xFraction
*/
public xFractionToLongitude(xFraction: number) {
return Angle.pi2Radians * (xFraction - .5);
}
public abstract yFractionToLatitude(yFraction: number): number;
public abstract latitudeToYFraction(latitude: number): number;
protected constructor(public readonly numberOfLevelZeroTilesX: number, public readonly numberOfLevelZeroTilesY: number, private _rowZeroAtTop: boolean) { }
/**
* Gets the total number of tiles in the X direction at a specified level-of-detail.
*
* @param {Number} level The level-of-detail. Level 0 is the root tile.
* @returns {Number} The number of tiles in the X direction at the given level.
*/
public getNumberOfXTilesAtLevel(level: number) {
return 0 === level ? 1 : this.numberOfLevelZeroTilesX << (level - 1);
}
/**
* Gets the total number of tiles in the Y direction at a specified level-of-detail.
*
*
* @param {Number} level The level-of-detail. Level 0 is the root tile.
* @returns {Number} The number of tiles in the Y direction at the given level.
*/
public getNumberOfYTilesAtLevel(level: number): number {
return (0 === level) ? 1 : this.numberOfLevelZeroTilesY << (level - 1);
}
public tileXToFraction(x: number, level: number): number {
return x / this.getNumberOfXTilesAtLevel(level);
}
public tileYToFraction(y: number, level: number): number {
let yFraction = y / this.getNumberOfYTilesAtLevel(level);
if (this._rowZeroAtTop)
yFraction = 1.0 - yFraction;
return yFraction;
}
public tileXToLongitude(x: number, level: number) {
return this.xFractionToLongitude(this.tileXToFraction(x, level));
}
public tileYToLatitude(y: number, level: number) {
return this.yFractionToLatitude(this.tileYToFraction(y, level));
}
/**
* Gets the fraction of the normalized (0-1) coordinates with at left, bottom.
*
* @param x column
* @param y row
* @param level depth
* @param result result (0-1 from left, bottom
*/
public tileXYToFraction(x: number, y: number, level: number, result?: Point2d): Point2d {
if (undefined === result)
result = Point2d.createZero();
result.x = this.tileXToFraction(x, level);
result.y = this.tileYToFraction(y, level);
return result;
}
/**
*
* @param x column
* @param y row
* @param level depth
* @param result result longitude, latitude.
* @param height height (optional)
*/
public tileXYToCartographic(x: number, y: number, level: number, result: Cartographic, height?: number): Cartographic {
this.tileXYToFraction(x, y, level, this._scratchFraction);
return this.fractionToCartographic(this._scratchFraction.x, this._scratchFraction.y, result, height);
}
public tileXYToRectangle(x: number, y: number, level: number, result?: MapTileRectangle) {
return MapTileRectangle.create(this.tileXToLongitude(x, level), this.tileYToLatitude(y, level), this.tileXToLongitude(x + 1, level), this.tileYToLatitude(y + 1, level), result);
}
/**
*
* @param xFraction
* @param yFraction
* @param result
* @param height
*/
public fractionToCartographic(xFraction: number, yFraction: number, result: Cartographic, height?: number): Cartographic {
result.longitude = this.xFractionToLongitude(xFraction);
result.latitude = this.yFractionToLatitude(yFraction);
result.height = undefined === height ? 0.0 : height;
return result;
}
public cartographicToFraction(latitudeRadians: number, longitudeRadians: number, result: Point2d): Point2d {
result.x = this.longitudeToXFraction(longitudeRadians);
result.y = this.latitudeToYFraction(latitudeRadians);
return result;
}
// gets the longitude and latitude into a point with coordinates between 0 and 1
public ecefToPixelFraction(point: Point3d): Point3d {
const cartoGraphic = Cartographic.fromEcef(point)!;
return Point3d.create(this.longitudeToXFraction(cartoGraphic.longitude), this.latitudeToYFraction(cartoGraphic.latitude), 0.0);
}
public computeMercatorFractionToDb(iModel: IModelConnection, groundBias: number): Transform {
const ecefLocation: EcefLocation = iModel.ecefLocation!;
const dbToEcef = ecefLocation.getTransform();
const projectCenter = Point3d.create(iModel.projectExtents.center.x, iModel.projectExtents.center.y, groundBias);
const projectEast = Point3d.create(projectCenter.x + 1.0, projectCenter.y, groundBias);
const projectNorth = Point3d.create(projectCenter.x, projectCenter.y + 1.0, groundBias);
const mercatorOrigin = this.ecefToPixelFraction(dbToEcef.multiplyPoint3d(projectCenter));
const mercatorX = this.ecefToPixelFraction(dbToEcef.multiplyPoint3d(projectEast));
const | isValid | identifier_name |
|
MapTilingScheme.ts | * Angle.degreesPerRadian, QuadId._scratchCartographic.latitude * Angle.degreesPerRadian);
return range;
}
}
/** @internal */
export class MapTileRectangle extends Range2d {
public constructor(west = 0, south = 0, east = 0, north = 0) {
super(west, south, east, north);
}
public static create(west = 0, south = 0, east = 0, north = 0, result?: MapTileRectangle): MapTileRectangle {
if (!result)
return new MapTileRectangle(west, south, east, north);
result.init(west, south, east, north);
return result;
}
public get west() { return this.low.x; }
public set west(x: number) { this.low.x = x; }
public get south() { return this.low.y; }
public set south(y: number) { this.low.y = y; }
public get east() { return this.high.x; }
public set east(x: number) { this.high.x = x; }
public get north() { return this.high.y; }
public set north(y: number) { this.high.y = y; }
public init(west = 0, south = 0, east = 0, north = 0) {
this.west = west;
this.south = south;
this.east = east;
this.north = north;
}
public containsCartographic(carto: Cartographic) { return this.containsXY(carto.longitude, carto.latitude); }
public getCenter(result?: Cartographic): Cartographic {
return Cartographic.fromRadians((this.west + this.east) / 2, (this.north + this.south) / 2, 0, result);
}
}
/** @internal */
export abstract class MapTilingScheme {
private _scratchFraction = Point2d.createZero();
/**
* @param longitude in radians (-pi to pi)
*/
public longitudeToXFraction(longitude: number) {
return longitude / Angle.pi2Radians + .5;
}
/**
* Return longitude in radians (-pi to pi from fraction).
* @param xFraction
*/
public xFractionToLongitude(xFraction: number) {
return Angle.pi2Radians * (xFraction - .5);
}
public abstract yFractionToLatitude(yFraction: number): number;
public abstract latitudeToYFraction(latitude: number): number;
protected constructor(public readonly numberOfLevelZeroTilesX: number, public readonly numberOfLevelZeroTilesY: number, private _rowZeroAtTop: boolean) { }
/**
* Gets the total number of tiles in the X direction at a specified level-of-detail.
*
* @param {Number} level The level-of-detail. Level 0 is the root tile.
* @returns {Number} The number of tiles in the X direction at the given level.
*/
public getNumberOfXTilesAtLevel(level: number) {
return 0 === level ? 1 : this.numberOfLevelZeroTilesX << (level - 1);
}
/**
* Gets the total number of tiles in the Y direction at a specified level-of-detail.
*
*
* @param {Number} level The level-of-detail. Level 0 is the root tile.
* @returns {Number} The number of tiles in the Y direction at the given level.
*/
public getNumberOfYTilesAtLevel(level: number): number {
return (0 === level) ? 1 : this.numberOfLevelZeroTilesY << (level - 1);
}
public tileXToFraction(x: number, level: number): number {
return x / this.getNumberOfXTilesAtLevel(level);
}
public tileYToFraction(y: number, level: number): number {
let yFraction = y / this.getNumberOfYTilesAtLevel(level);
if (this._rowZeroAtTop)
yFraction = 1.0 - yFraction;
return yFraction;
}
public tileXToLongitude(x: number, level: number) {
return this.xFractionToLongitude(this.tileXToFraction(x, level));
}
public tileYToLatitude(y: number, level: number) {
return this.yFractionToLatitude(this.tileYToFraction(y, level));
}
/**
* Gets the fraction of the normalized (0-1) coordinates with at left, bottom.
*
* @param x column
* @param y row
* @param level depth
* @param result result (0-1 from left, bottom
*/
public tileXYToFraction(x: number, y: number, level: number, result?: Point2d): Point2d {
if (undefined === result)
result = Point2d.createZero();
result.x = this.tileXToFraction(x, level);
result.y = this.tileYToFraction(y, level);
return result;
}
/**
*
* @param x column
* @param y row
* @param level depth
* @param result result longitude, latitude.
* @param height height (optional)
*/
public tileXYToCartographic(x: number, y: number, level: number, result: Cartographic, height?: number): Cartographic {
this.tileXYToFraction(x, y, level, this._scratchFraction);
return this.fractionToCartographic(this._scratchFraction.x, this._scratchFraction.y, result, height);
}
public tileXYToRectangle(x: number, y: number, level: number, result?: MapTileRectangle) {
return MapTileRectangle.create(this.tileXToLongitude(x, level), this.tileYToLatitude(y, level), this.tileXToLongitude(x + 1, level), this.tileYToLatitude(y + 1, level), result);
}
/**
*
* @param xFraction
* @param yFraction
* @param result
* @param height
*/
public fractionToCartographic(xFraction: number, yFraction: number, result: Cartographic, height?: number): Cartographic {
result.longitude = this.xFractionToLongitude(xFraction);
result.latitude = this.yFractionToLatitude(yFraction);
result.height = undefined === height ? 0.0 : height;
return result;
}
public cartographicToFraction(latitudeRadians: number, longitudeRadians: number, result: Point2d): Point2d {
result.x = this.longitudeToXFraction(longitudeRadians);
result.y = this.latitudeToYFraction(latitudeRadians);
return result;
}
// gets the longitude and latitude into a point with coordinates between 0 and 1
public ecefToPixelFraction(point: Point3d): Point3d {
const cartoGraphic = Cartographic.fromEcef(point)!;
return Point3d.create(this.longitudeToXFraction(cartoGraphic.longitude), this.latitudeToYFraction(cartoGraphic.latitude), 0.0);
}
public computeMercatorFractionToDb(iModel: IModelConnection, groundBias: number): Transform {
const ecefLocation: EcefLocation = iModel.ecefLocation!;
const dbToEcef = ecefLocation.getTransform();
const projectCenter = Point3d.create(iModel.projectExtents.center.x, iModel.projectExtents.center.y, groundBias);
const projectEast = Point3d.create(projectCenter.x + 1.0, projectCenter.y, groundBias);
const projectNorth = Point3d.create(projectCenter.x, projectCenter.y + 1.0, groundBias);
const mercatorOrigin = this.ecefToPixelFraction(dbToEcef.multiplyPoint3d(projectCenter));
const mercatorX = this.ecefToPixelFraction(dbToEcef.multiplyPoint3d(projectEast));
const mercatorY = this.ecefToPixelFraction(dbToEcef.multiplyPoint3d(projectNorth));
const deltaX = Vector3d.createStartEnd(mercatorOrigin, mercatorX);
const deltaY = Vector3d.createStartEnd(mercatorOrigin, mercatorY);
const dbToMercator = Transform.createOriginAndMatrixColumns(mercatorOrigin, deltaX, deltaY, Vector3d.create(0.0, 0.0, 1.0)).multiplyTransformTransform(Transform.createTranslationXYZ(-projectCenter.x, -projectCenter.y, -groundBias));
return dbToMercator.inverse() as Transform;
}
}
/** @internal */
export class GeographicTilingScheme extends MapTilingScheme {
public constructor(numberOfLevelZeroTilesX: number = 2, numberOfLevelZeroTilesY: number = 1, rowZeroAtTop: boolean = false) {
super(numberOfLevelZeroTilesX, numberOfLevelZeroTilesY, rowZeroAtTop);
}
public yFractionToLatitude(yFraction: number): number {
return Math.PI * (yFraction - .5);
}
public latitudeToYFraction(latitude: number): number {
return .5 + latitude / Math.PI;
}
}
/** @internal */
export class WebMercatorTilingScheme extends MapTilingScheme {
public constructor(numberOfLevelZeroTilesX: number = 2, numberOfLevelZeroTilesY: number = 2, rowZeroAtTop: boolean = false) | {
super(numberOfLevelZeroTilesX, numberOfLevelZeroTilesY, rowZeroAtTop);
} | identifier_body |
|
exceptions.py | MODULE_CODE = ErrorCode.BKDATA_AUTH
class AuthCode:
# 错误码映射表,目前以三元组定义,包含了错误码、描述和处理方式(可选)
SDK_PERMISSION_DENIED_ERR = ("001", _("资源访问权限不足"))
SDK_PARAM_MISS_ERR = ("002", _("认证参数缺失"), _("请查看API请求文档"))
SDK_AUTHENTICATION_ERR = ("003", _("认证不通过,请提供合法的 BKData 认证信息"), _("请查看API请求文档"))
SDK_INVALID_SECRET_ERR = (
"004",
_("内部模块调用请传递准确的 bk_app_code 和 bk_app_secret"),
_("传递给的变量需要与 dataapi_settings.py 保持一致"),
)
SDK_INVALID_TOKEN_ERR = ("005", _("数据平台授权码不正确"), _("前往平台授权码页面查看授权码"))
SDK_NO_INDENTITY_ERR = ("007", _("未检测到有效的认证信息"), _("请查看API请求文档"))
SDK_WRONG_INDENTIRY_ERR = ("008", _("错误的认证方式"), _("请查看API请求文档"))
SDK_JWT_VERIFY_ERR = ("009", _("ESB 传递的 JWT 字符串解析失败"))
# 单据异常
NO_PERMISSION_ERR = ("101", _("当前用户无单据权限"))
TICKET_STATE_HAS_BEEN_OPERATED_ERR = ("102", _("当前单据状态已审批"))
APPROVAL_RANGE_ERR = ("103", _("审批范围错误"))
NOT_IN_APPROVAL_PROCESS_ERR = ("104", _("未在可审批阶段"))
NO_UPDATE_PERMISSION_ERR = ("105", _("无更新权限"))
OBJECT_CLASS_ERR = ("106", _("object类型错误"))
SUBJECT_CHECK_ERR = ("107", _("主体校验失败"))
SCOPE_CHECK_ERR = ("108", _("申请范围校验失败"))
HAS_ALREADY_EXISTS_ERR = ("109", _("已提交过此权限申请,请勿重复提交"))
OBJECT_NOT_EXIST_ERR = ("110", _("object不存在"))
ACTION_CHECK_ERR = ("111", _("action校验错误"))
NO_PROCESSOR_ERR = ("112", _("没有审批人"))
UNEXPECTED_TICKET_TYPE = ("113", _("单据类型校验错误"))
REPEAT_PERMISSION_INFO = ("114", _("重复的权限信息"))
ROLE_AT_LEAST_ONE_ERR = ("115", _("该角色成员不能为空"))
PERMISSION_OBJECT_DOSE_NOT_EXIST_ERR = ("116", _("申请的权限对象不存在"))
PROJECT_DATA_VALID_ERR = ("117", _("项目所申请的业务数据的区域标签不合法"))
NO_MATCHED_TICKET_TYPE_PROCESS_ERR = ("118", _("没有匹配的单据类型的审批流程"))
TICKET_CALLBACK_ERR = ("119", _("通用单据回调失败"))
NOTICE_APPROVE_CALLBACK_ERR = ("120", _("单据通知审核回调异常"))
NOT_EXIST_ERR = ("120", _("实例不存在"))
NOT_OBJECT_CLASS_SAME_WITH_META = ("121", _("不存在与 Meta 映射的对象类型"))
# data token 异常
TOKEN_NOT_EXIST_ERR = ("201", _("授权码不存在"))
TOKEN_NOT_AUTHORIZED = ("202", _("授权码未完全通过审批"))
TOKEN_EXPIRED_ERR = ("203", _("授权码已过期"))
DATA_SCOPE_FORMAT_ERR = ("204", _("数据范围格式错误"))
DATA_SCOPE_VALID_ERR = ("205", _("不合法的对象范围"))
TOKEN_DISABLED_ERR = ("206", _("授权码已被禁止使用"))
# 资源关系异常
QUERY_TOO_MANY_RESOURCE_ERR = ("401", _("查询太多资源,拒绝返回内容"))
INVALID_PARENT_RESOURCE_TYPE_ERR = ("402", _("非法的父级资源类型"))
INVALID_RESOURCE_ATTR_ERR = ("403", _("非法的资源属性"))
FILTER_NOT_TO_SCOPE_ERR = ("404", _("资源过滤器无法装换为属性范围"))
# IAM 关联异常
BKIAM_NOT_SUPPOR_ATTR_AUTHO = ("501", _("BKIAM 不支持通过接口进行属性授权"))
BKIAM_CONFIGURE_RESTRICTION = ("502", _("BKIAM 配置限制"))
BKIAM_SYNC_AUTHO_ERR = ("503", _("BKIAM 同步失败"))
BKIAM_POLICIES_COUNT_LIMIT_ERR = ("504", _("BKIAM 策略数上限溢出"))
# BaseModel 异常
CORE_BASE_MODEL_NO_PK = ("601", _("基础模型配置不存在PK"))
CORE_BASE_MODEL_INSTANCE_NOT_EXIT = ("602", _("基础模型实例不存在"))
# Others
PERMISSION_DENIED_ERR = ("705", _("角色访问权限不足"))
NO_FUNCTION_ERR = ("706", _("功能还未实现"))
APP_NOT_MATCH_ERR = ("707", _("非法APP发起访问,请检查 APP_CODE"))
OUTER_MODEL_ATTR_ERR = ("708", _("存在不符合预期的 Model 数据"))
PARAM_ERR = ("709", _("参数校验错误"))
REDIS_CONNECT_ERR = ("710", _("无法连接REDIS服务"), _("检查AuthAPI依赖redis服务是否正常"))
OBJECT_SERIALIZER_NO_PK_ERR = ("711", _("ObjectClass 类型定义缺少对主键定义"))
UPDATE_ROLE_ERR = ("712", _("更新角色成员列表错误"))
# ITSM
TICKET_CREATE_ERROR = ("801", _("当前有未完成的单据"))
CALL_BACK_ERROR = ("802", _("回调第三方模块发生错误"))
ITSM_CATALOGS_NOT_EXIST = ("803", _("未在itsm找到相关的目录"))
class PermissionDeniedError(AuthAPIError):
CODE = AuthCode.PERMISSION_DENIED_ERR[0]
MESSAGE = AuthCode.PERMISSION_DENIED_ERR[1]
class NoPermissionError(AuthAPIError):
CODE = AuthCode.NO_PERMISSION_ERR[0]
MESSAGE = AuthCode.NO_PERMISSION_ERR[1]
class TicketStateHasBeenOperatedError(AuthAPIError):
CODE = AuthCode.TICKET_STATE_HAS_BEEN_OPERATED_ERR[0]
MESSAGE = AuthCode.TICKET_STATE_HAS_BEEN_OPERATED_ERR[1]
class ApprovalRangeError(AuthAPIError):
CODE = AuthCode.APPROVAL_RANGE_ERR[0]
MESSAGE = AuthCode.APPROVAL_RANGE_ERR[1]
class NotInApprovalProcessError(AuthAPIError):
CODE = AuthCode.NOT_IN_APPROVAL_PROCESS_ERR[0]
MESSAGE = AuthCode.NOT_IN_APPROVAL_PROCESS_ERR[1]
class NoUpdatePermissionError(AuthAPIError):
CODE = AuthCode.NO_UPDATE_PERMISSION_ERR[0]
MESSAGE = AuthCode.NO_UPDATE_PERMISSION_ERR[1]
class ObjectClassError(AuthAPIError):
CODE = AuthCode.OBJECT_CLASS_ERR[0]
MESSAGE = AuthCode.OBJECT_CLASS_ERR[1]
class SubjectCheckErr(AuthAPIError):
CODE = AuthCode.SUBJECT_CHECK_ERR[0]
MESSAGE = AuthCode.SUBJECT_CHECK_ERR[1]
class UnexpectedTicketTypeErr(AuthAPIError):
CODE = AuthCode.UNEXPECTED_TICKET_TYPE[0]
MESSAGE = AuthCode.UNEXPECTED_TICKET_TYPE[1]
class ScopeCheckErr(AuthAPIError):
CODE = AuthCode.SCOPE_CHECK_ERR[0]
MESSAGE = AuthCode.SCOPE_CHECK_ERR[1]
class NoFunctionErr(AuthAPIError):
CODE = AuthCode.NO_FUNCTION_ERR[0]
MESSAGE = AuthCode.NO_FUNCTION_ERR[1]
class HasAlreadyExistsErr(AuthAPIError):
CODE = AuthCode.HAS_ALREADY_EXISTS_ERR[0]
MESSAGE = AuthCode.HAS_ALREADY_EXISTS_ERR[1]
class ObjectNotExistsErr(AuthAPIError):
CODE = AuthCode.OBJECT_NOT_EXIST_ERR[0]
MESSAGE = AuthCode.OBJECT_NOT_EXIST_ERR[1]
class ActionCheckErr(AuthAPIError):
CODE = AuthCode.ACTION_CHECK_ERR[0]
MESSAGE = AuthCode.ACTION_CHECK_ERR[1]
class NoProcessorErr(AuthAPIError):
CODE = AuthCode.NO_PROCESSOR_ERR[0]
MESSAGE = AuthCode.NO_PROCESSOR_ERR[1]
class RepeatPermissionErr(AuthAPIError):
CODE = AuthCode.REPEAT_PERMISSION_INFO[0]
MESSAGE = AuthCode.REPEAT_PERMISSION_INFO[1]
class RoleAtLeastOneErr(AuthAPIError):
CODE = AuthCode.ROLE_AT_LEAST_ONE_ERR[0]
MESSAGE = AuthCode.ROLE_AT_LEAST_ONE_ERR[1]
class TokenNotExistErr(AuthAPIError):
CODE = AuthCode.TOKEN_NOT_EXIST_ERR[0]
MESSAGE = AuthCode.TOKEN_NOT_EXIST_ERR[1]
class TokenNotAuthorizedErr(AuthAPIError):
CODE = AuthCode.TOKEN_NOT_AUTHORIZED[0]
MESSAGE = AuthCode.TOKEN_NOT_AUTHORIZED[1]
class TokenExpiredErr(AuthAPIError):
CODE = AuthCode.TOKEN_EXPIRED_ERR[0]
MESSAGE = AuthCode.TOKEN_EXPIRED_ERR[1]
class DataScopeFormatErr(AuthAPIError):
CODE = AuthCode.DATA_SCOPE_FORMAT_ERR[0]
MESSAGE = AuthCode.DATA_SCOPE_FORMAT_ERR[1]
class DataScopeValidErr(AuthAPIError):
CODE = AuthCode.DATA_SCOPE_VALID_ERR[0]
MESSAGE = AuthCode.DATA_SCOPE_VALID_ERR[1]
class AppNotMatchErr(AuthAPIError):
CODE = AuthCode.APP_NOT_MATCH_ERR[0]
MESSAGE | class AuthAPIError(BaseAPIError): | random_line_split |
|
exceptions.py | 无更新权限"))
OBJECT_CLASS_ERR = ("106", _("object类型错误"))
SUBJECT_CHECK_ERR = ("107", _("主体校验失败"))
SCOPE_CHECK_ERR = ("108", _("申请范围校验失败"))
HAS_ALREADY_EXISTS_ERR = ("109", _("已提交过此权限申请,请勿重复提交"))
OBJECT_NOT_EXIST_ERR = ("110", _("object不存在"))
ACTION_CHECK_ERR = ("111", _("action校验错误"))
NO_PROCESSOR_ERR = ("112", _("没有审批人"))
UNEXPECTED_TICKET_TYPE = ("113", _("单据类型校验错误"))
REPEAT_PERMISSION_INFO = ("114", _("重复的权限信息"))
ROLE_AT_LEAST_ONE_ERR = ("115", _("该角色成员不能为空"))
PERMISSION_OBJECT_DOSE_NOT_EXIST_ERR = ("116", _("申请的权限对象不存在"))
PROJECT_DATA_VALID_ERR = ("117", _("项目所申请的业务数据的区域标签不合法"))
NO_MATCHED_TICKET_TYPE_PROCESS_ERR = ("118", _("没有匹配的单据类型的审批流程"))
TICKET_CALLBACK_ERR = ("119", _("通用单据回调失败"))
NOTICE_APPROVE_CALLBACK_ERR = ("120", _("单据通知审核回调异常"))
NOT_EXIST_ERR = ("120", _("实例不存在"))
NOT_OBJECT_CLASS_SAME_WITH_META = ("121", _("不存在与 Meta 映射的对象类型"))
# data token 异常
TOKEN_NOT_EXIST_ERR = ("201", _("授权码不存在"))
TOKEN_NOT_AUTHORIZED = ("202", _("授权码未完全通过审批"))
TOKEN_EXPIRED_ERR = ("203", _("授权码已过期"))
DATA_SCOPE_FORMAT_ERR = ("204", _("数据范围格式错误"))
DATA_SCOPE_VALID_ERR = ("205", _("不合法的对象范围"))
TOKEN_DISABLED_ERR = ("206", _("授权码已被禁止使用"))
# 资源关系异常
QUERY_TOO_MANY_RESOURCE_ERR = ("401", _("查询太多资源,拒绝返回内容"))
INVALID_PARENT_RESOURCE_TYPE_ERR = ("402", _("非法的父级资源类型"))
INVALID_RESOURCE_ATTR_ERR = ("403", _("非法的资源属性"))
FILTER_NOT_TO_SCOPE_ERR = ("404", _("资源过滤器无法装换为属性范围"))
# IAM 关联异常
BKIAM_NOT_SUPPOR_ATTR_AUTHO = ("501", _("BKIAM 不支持通过接口进行属性授权"))
BKIAM_CONFIGURE_RESTRICTION = ("502", _("BKIAM 配置限制"))
BKIAM_SYNC_AUTHO_ERR = ("503", _("BKIAM 同步失败"))
BKIAM_POLICIES_COUNT_LIMIT_ERR = ("504", _("BKIAM 策略数上限溢出"))
# BaseModel 异常
CORE_BASE_MODEL_NO_PK = ("601", _("基础模型配置不存在PK"))
CORE_BASE_MODEL_INSTANCE_NOT_EXIT = ("602", _("基础模型实例不存在"))
# Others
PERMISSION_DENIED_ERR = ("705", _("角色访问权限不足"))
NO_FUNCTION_ERR = ("706", _("功能还未实现"))
APP_NOT_MATCH_ERR = ("707", _("非法APP发起访问,请检查 APP_CODE"))
OUTER_MODEL_ATTR_ERR = ("708", _("存在不符合预期的 Model 数据"))
PARAM_ERR = ("709", _("参数校验错误"))
REDIS_CONNECT_ERR = ("710", _("无法连接REDIS服务"), _("检查AuthAPI依赖redis服务是否正常"))
OBJECT_SERIALIZER_NO_PK_ERR = ("711", _("ObjectClass 类型定义缺少对主键定义"))
UPDATE_ROLE_ERR = ("712", _("更新角色成员列表错误"))
# ITSM
TICKET_CREATE_ERROR = ("801", _("当前有未完成的单据"))
CALL_BACK_ERROR = ("802", _("回调第三方模块发生错误"))
ITSM_CATALOGS_NOT_EXIST = ("803", _("未在itsm找到相关的目录"))
class PermissionDeniedError(AuthAPIError):
CODE = AuthCode.PERMISSION_DENIED_ERR[0]
MESSAGE = AuthCode.PERMISSION_DENIED_ERR[1]
class NoPermissionError(AuthAPIError):
CODE = AuthCode.NO_PERMISSION_ERR[0]
MESSAGE = AuthCode.NO_PERMISSION_ERR[1]
class TicketStateHasBeenOperatedError(AuthAPIError):
CODE = AuthCode.TICKET_STATE_HAS_BEEN_OPERATED_ERR[0]
MESSAGE = AuthCode.TICKET_STATE_HAS_BEEN_OPERATED_ERR[1]
class ApprovalRangeError(AuthAPIError):
CODE = AuthCode.APPROVAL_RANGE_ERR[0]
MESSAGE = AuthCode.APPROVAL_RANGE_ERR[1]
class NotInApprovalProcessError(AuthAPIError):
CODE = AuthCode.NOT_IN_APPROVAL_PROCESS_ERR[0]
MESSAGE = AuthCode.NOT_IN_APPROVAL_PROCESS_ERR[1]
class NoUpdatePermissionError(AuthAPIError):
CODE = AuthCode.NO_UPDATE_PERMISSION_ERR[0]
MESSAGE = AuthCode.NO_UPDATE_PERMISSION_ERR[1]
class ObjectClassError(AuthAPIError):
CODE = AuthCode.OBJECT_CLASS_ERR[0]
MESSAGE = AuthCode.OBJECT_CLASS_ERR[1]
class SubjectCheckErr(AuthAPIError):
CODE = AuthCode.SUBJECT_CHECK_ERR[0]
MESSAGE = AuthCode.SUBJECT_CHECK_ERR[1]
class UnexpectedTicketTypeErr(AuthAPIError):
CODE = AuthCode.UNEXPECTED_TICKET_TYPE[0]
MESSAGE = AuthCode.UNEXPECTED_TICKET_TYPE[1]
class ScopeCheckErr(AuthAPIError):
CODE = AuthCode.SCOPE_CHECK_ERR[0]
MESSAGE = AuthCode.SCOPE_CHECK_ERR[1]
class NoFunctionErr(AuthAPIError):
CODE = AuthCode.NO_FUNCTION_ERR[0]
MESSAGE = AuthCode.NO_FUNCTION_ERR[1]
class HasAlreadyExistsErr(AuthAPIError):
CODE = AuthCode.HAS_ALREADY_EXISTS_ERR[0]
MESSAGE = AuthCode.HAS_ALREADY_EXISTS_ERR[1]
class ObjectNotExistsErr(AuthAPIError):
CODE = AuthCode.OBJECT_NOT_EXIST_ERR[0]
MESSAGE = AuthCode.OBJECT_NOT_EXIST_ERR[1]
class ActionCheckErr(AuthAPIError):
CODE = AuthCode.ACTION_CHECK_ERR[0]
MESSAGE = AuthCode.ACTION_CHECK_ERR[1]
class NoProcessorErr(AuthAPIError):
CODE = AuthCode.NO_PROCESSOR_ERR[0]
MESSAGE = AuthCode.NO_PROCESSOR_ERR[1]
class RepeatPermissionErr(AuthAPIError):
CODE = AuthCode.REPEAT_PERMISSION_INFO[0]
MESSAGE = AuthCode.REPEAT_PERMISSION_INFO[1]
class RoleAtLeastOneErr(AuthAPIError):
CODE = AuthCode.ROLE_AT_LEAST_ONE_ERR[0]
MESSAGE = AuthCode.ROLE_AT_LEAST_ONE_ERR[1]
class TokenNotExistErr(AuthAPIError):
CODE = AuthCode.TOKEN_NOT_EXIST_ERR[0]
MESSAGE = AuthCode.TOKEN_NOT_EXIST_ERR[1]
class TokenNotAuthorizedErr(AuthAPIError):
CODE = AuthCode.TOKEN_NOT_AUTHORIZED[0]
MESSAGE = AuthCode.TOKEN_NOT_AUTHORIZED[1]
class TokenExpiredErr(AuthAPIError):
CODE = AuthCode.TOKEN_EXPIRED_ERR[0]
MESSAGE = AuthCode.TOKEN_EXPIRED_ERR[1]
class DataScopeFormatErr(AuthAPIError):
CODE = AuthCode.DATA_SCOPE_FORMAT_ERR[0]
MESSAGE = AuthCode.DATA_SCOPE_FORMAT_ERR[1]
class DataScopeValidErr(AuthAPIError):
CODE = AuthCode.DATA_SCOPE_VALID_ERR[0]
MESSAGE = AuthCode.DATA_SCOPE_VALID_ERR[1]
class AppNotMatchErr(AuthAPIError):
CODE = AuthCode.APP_NOT_MATCH_ERR[0]
MESSAGE = AuthCode.APP_NOT_MATCH_ERR[1]
class PermissionObjectDoseNotExistError(AuthAPIError):
CODE = AuthCode.PERMISSION_OBJECT_DOSE_NOT_EXIST_ERR[0]
MESSAGE = AuthCode.PERMISSION_OBJECT_DOSE_NOT_EXIST_ERR[1]
class TokenDisabledErr(AuthAPIError):
CODE = AuthCode.TOKEN_DISABLED_ERR[0]
MESSAGE = AuthCode.TOKEN_DISABLED_ERR[1]
class OuterModelAttrErr(AuthAPIError):
CODE = AuthCode.OUTER_MODEL_ATTR_ERR[0]
MESSAGE = AuthCode.OUTER_MODEL_ATTR_ERR[1]
class ProjectDataTagValidErr(AuthAPIError):
CODE = AuthCode.PROJECT_DATA_VALID_ERR[0]
MESSAGE = AuthCode.PROJECT_DATA_VALID_ERR[1]
class NoMatchedTicketTypeProcessErr(AuthAPIError):
CODE = AuthCode.NO_MATCHED_TICKET_TYPE_PROCESS_ERR[0]
MESSAGE = AuthCode.NO_MATCHED_TICKET_TYPE_PROCESS_ERR[1]
class TicketCallbackErr(AuthAPIError):
CODE = AuthCode.TICKET_CALLBACK_ERR[0]
MESSAGE = AuthCode.TICKET_CALLBACK_ERR[1]
class NoticeApproveCallbackErr(AuthAPIError):
CODE = AuthCode.NOTICE_APPROVE_CALLBACK_ERR[0]
MESSAGE = AuthCode.NOTICE_APPROVE_CALLBACK_ERR[1]
class NotExistErr(AuthAPIError):
CODE = AuthCode.NOT_EXIST_ERR[0]
MESSAGE = AuthCode.NOT_EXIST_ERR[1]
class NotObjectClassSameWithMeta(AuthAPIError):
CODE = AuthCode.NOT_OBJECT_CLASS_SAME_WITH_META[0]
MESSAGE = AuthCode.NOT_OBJECT_CLASS_SAME_WITH_META[1]
class ParameterErr(AuthAPIError):
CODE = AuthCode.PARAM_ERR[0]
MESSAGE = AuthCode.PARAM_ERR[1]
class RedisConnectError(AuthAPI | Error):
CODE | identifier_name |
|
exceptions.py | = ("002", _("认证参数缺失"), _("请查看API请求文档"))
SDK_AUTHENTICATION_ERR = ("003", _("认证不通过,请提供合法的 BKData 认证信息"), _("请查看API请求文档"))
SDK_INVALID_SECRET_ERR = (
"004",
_("内部模块调用请传递准确的 bk_app_code 和 bk_app_secret"),
_("传递给的变量需要与 dataapi_settings.py 保持一致"),
)
SDK_INVALID_TOKEN_ERR = ("005", _("数据平台授权码不正确"), _("前往平台授权码页面查看授权码"))
SDK_NO_INDENTITY_ERR = ("007", _("未检测到有效的认证信息"), _("请查看API请求文档"))
SDK_WRONG_INDENTIRY_ERR = ("008", _("错误的认证方式"), _("请查看API请求文档"))
SDK_JWT_VERIFY_ERR = ("009", _("ESB 传递的 JWT 字符串解析失败"))
# 单据异常
NO_PERMISSION_ERR = ("101", _("当前用户无单据权限"))
TICKET_STATE_HAS_BEEN_OPERATED_ERR = ("102", _("当前单据状态已审批"))
APPROVAL_RANGE_ERR = ("103", _("审批范围错误"))
NOT_IN_APPROVAL_PROCESS_ERR = ("104", _("未在可审批阶段"))
NO_UPDATE_PERMISSION_ERR = ("105", _("无更新权限"))
OBJECT_CLASS_ERR = ("106", _("object类型错误"))
SUBJECT_CHECK_ERR = ("107", _("主体校验失败"))
SCOPE_CHECK_ERR = ("108", _("申请范围校验失败"))
HAS_ALREADY_EXISTS_ERR = ("109", _("已提交过此权限申请,请勿重复提交"))
OBJECT_NOT_EXIST_ERR = ("110", _("object不存在"))
ACTION_CHECK_ERR = ("111", _("action校验错误"))
NO_PROCESSOR_ERR = ("112", _("没有审批人"))
UNEXPECTED_TICKET_TYPE = ("113", _("单据类型校验错误"))
REPEAT_PERMISSION_INFO = ("114", _("重复的权限信息"))
ROLE_AT_LEAST_ONE_ERR = ("115", _("该角色成员不能为空"))
PERMISSION_OBJECT_DOSE_NOT_EXIST_ERR = ("116", _("申请的权限对象不存在"))
PROJECT_DATA_VALID_ERR = ("117", _("项目所申请的业务数据的区域标签不合法"))
NO_MATCHED_TICKET_TYPE_PROCESS_ERR = ("118", _("没有匹配的单据类型的审批流程"))
TICKET_CALLBACK_ERR = ("119", _("通用单据回调失败"))
NOTICE_APPROVE_CALLBACK_ERR = ("120", _("单据通知审核回调异常"))
NOT_EXIST_ERR = ("120", _("实例不存在"))
NOT_OBJECT_CLASS_SAME_WITH_META = ("121", _("不存在与 Meta 映射的对象类型"))
# data token 异常
TOKEN_NOT_EXIST_ERR = ("201", _("授权码不存在"))
TOKEN_NOT_AUTHORIZED = ("202", _("授权码未完全通过审批"))
TOKEN_EXPIRED_ERR = ("203", _("授权码已过期"))
DATA_SCOPE_FORMAT_ERR = ("204", _("数据范围格式错误"))
DATA_SCOPE_VALID_ERR = ("205", _("不合法的对象范围"))
TOKEN_DISABLED_ERR = ("206", _("授权码已被禁止使用"))
# 资源关系异常
QUERY_TOO_MANY_RESOURCE_ERR = ("401", _("查询太多资源,拒绝返回内容"))
INVALID_PARENT_RESOURCE_TYPE_ERR = ("402", _("非法的父级资源类型"))
INVALID_RESOURCE_ATTR_ERR = ("403", _("非法的资源属性"))
FILTER_NOT_TO_SCOPE_ERR = ("404", _("资源过滤器无法装换为属性范围"))
# IAM 关联异常
BKIAM_NOT_SUPPOR_ATTR_AUTHO = ("501", _("BKIAM 不支持通过接口进行属性授权"))
BKIAM_CONFIGURE_RESTRICTION = ("502", _("BKIAM 配置限制"))
BKIAM_SYNC_AUTHO_ERR = ("503", _("BKIAM 同步失败"))
BKIAM_POLICIES_COUNT_LIMIT_ERR = ("504", _("BKIAM 策略数上限溢出"))
# BaseModel 异常
CORE_BASE_MODEL_NO_PK = ("601", _("基础模型配置不存在PK"))
CORE_BASE_MODEL_INSTANCE_NOT_EXIT = ("602", _("基础模型实例不存在"))
# Others
PERMISSION_DENIED_ERR = ("705", _("角色访问权限不足"))
NO_FUNCTION_ERR = ("706", _("功能还未实现"))
APP_NOT_MATCH_ERR = ("707", _("非法APP发起访问,请检查 APP_CODE"))
OUTER_MODEL_ATTR_ERR = ("708", _("存在不符合预期的 Model 数据"))
PARAM_ERR = ("709", _("参数校验错误"))
REDIS_CONNECT_ERR = ("710", _("无法连接REDIS服务"), _("检查AuthAPI依赖redis服务是否正常"))
OBJECT_SERIALIZER_NO_PK_ERR = ("711", _("ObjectClass 类型定义缺少对主键定义"))
UPDATE_ROLE_ERR = ("712", _("更新角色成员列表错误"))
# ITSM
TICKET_CREATE_ERROR = ("801", _("当前有未完成的单据"))
CALL_BACK_ERROR = ("802", _("回调第三方模块发生错误"))
ITSM_CATALOGS_NOT_EXIST = ("803", _("未在itsm找到相关的目录"))
class PermissionDeniedError(AuthAPIError):
CODE = AuthCode.PERMISSION_DENIED_ERR[0]
MESSAGE = AuthCode.PERMISSION_DENIED_ERR[1]
class NoPermissionError(AuthAPIError):
CODE = AuthCode.NO_PERMISSION_ERR[0]
MESSAGE = AuthCode.NO_PERMISSION_ERR[1]
class TicketStateHasBeenOperatedError(AuthAPIError):
CODE = AuthCode.TICKET_STATE_HAS_BEEN_OPERATED_ERR[0]
MESSAGE = AuthCode.TICKET_STATE_HAS_BEEN_OPERATED_ERR[1]
class ApprovalRangeError(AuthAPIError):
CODE = AuthCode.APPROVAL_RANGE_ERR[0]
MESSAGE = AuthCode.APPROVAL_RANGE_ERR[1]
class NotInApprovalProcessError(AuthAPIError):
CODE = AuthCode.NOT_IN_APPROVAL_PROCESS_ERR[0]
MESSAGE = AuthCode.NOT_IN_APPROVAL_PROCESS_ERR[1]
class NoUpdatePermissionError(AuthAPIError):
CODE = AuthCode.NO_UPDATE_PERMISSION_ERR[0]
MESSAGE = AuthCode.NO_UPDATE_PERMISSION_ERR[1]
class ObjectClassError(AuthAPIError):
CODE = AuthCode.OBJECT_CLASS_ERR[0]
MESSAGE = AuthCode.OBJECT_CLASS_ERR[1]
class SubjectCheckErr(AuthAPIError):
CODE = AuthCode.SUBJECT_CHECK_ERR[0]
MESSAGE = AuthCode.SUBJECT_CHECK_ERR[1]
class UnexpectedTicketTypeErr(AuthAPIError):
CODE = AuthCode.UNEXPECTED_TICKET_TYPE[0]
MESSAGE = AuthCode.UNEXPECTED_TICKET_TYPE[1]
class ScopeCheckErr(AuthAPIError):
CODE = AuthCode.SCOPE_CHECK_ERR[0]
MESSAGE = AuthCode.SCOPE_CHECK_ERR[1]
class NoFunctionErr(AuthAPIError):
CODE = AuthCode.NO_FUNCTION_ERR[0]
MESSAGE = AuthCode.NO_FUNCTION_ERR[1]
class HasAlreadyExistsErr(AuthAPIError):
CODE = AuthCode.HAS_ALREADY_EXISTS_ERR[0]
MESSAGE = AuthCode.HAS_ALREADY_EXISTS_ERR[1]
class ObjectNotExistsErr(AuthAPIError):
CODE = AuthCode.OBJECT_NOT_EXIST_ERR[0]
MESSAGE = AuthCode.OBJECT_NOT_EXIST_ERR[1]
class ActionCheckErr(AuthAPIError):
CODE = AuthCode.ACTION_CHECK_ERR[0]
MESSAGE = AuthCode.ACTION_CHECK_ERR[1]
class NoProcessorErr(AuthAPIError):
CODE = AuthCode.NO_PROCESSOR_ERR[0]
MESSAGE = AuthCode.NO_PROCESSOR_ERR[1]
class RepeatPermissionErr(AuthAPIError):
CODE = AuthCode.REPEAT_PERMISSION_INFO[0]
MESSAGE = AuthCode.REPEAT_PERMISSION_INFO[1]
class RoleAtLeastOneErr(AuthAPIError):
CODE = AuthCode.ROLE_AT_LEAST_ONE_ERR[0]
MESSAGE = AuthCode.ROLE_AT_LEAST_ONE_ERR[1]
class TokenNotExistErr(AuthAPIError):
CODE = AuthCode.TOKEN_NOT_EXIST_ERR[0]
MESSAGE = AuthCode.TOKEN_NOT_EXIST_ERR[1]
class TokenNotAuthorizedErr(AuthAPIError):
CODE = AuthCode.TOKEN_NOT_AUTHORIZED[0]
MESSAGE = AuthCode.TOKEN_NOT_AUTHORIZED[1]
class TokenExpiredErr(AuthAPIError):
CODE = AuthCode.TOKEN_EXPIRED_ERR[0]
MESSAGE = AuthCode.TOKEN_EXPIRED_ERR[1]
class DataScopeFormatErr(AuthAPIError):
CODE = AuthCode.DATA_SCOPE_FORMAT_ERR[0]
MESSAGE = AuthCode.DATA_SCOPE_FORMAT_ERR[1 | MESSAGE = AuthCode.DATA_SCOPE_VALID_ERR[1]
class AppNotMatchErr(AuthAPIError):
CODE = AuthCode.APP_NOT_MATCH_ERR[0]
MESSAGE = AuthCode.APP_NOT_MATCH_ERR[1]
class PermissionObjectDoseNotExistError(AuthAPIError):
CODE = AuthCode.PERMISSION_OBJECT_DOSE_NOT_EXIST_ERR[0]
MESSAGE = AuthCode.PERMISSION_OBJECT_DOSE_NOT_EXIST_ERR[1]
class TokenDisabledErr(AuthAPIError):
CODE = AuthCode.TOKEN_DISABLED_ERR[0]
MESSAGE | ]
class DataScopeValidErr(AuthAPIError):
CODE = AuthCode.DATA_SCOPE_VALID_ERR[0]
| identifier_body |
main.rs | err_write!("bkp: Destination '{}' already exists", name);
std::process::exit(1);
}
// parse the target URL
let url = Url::parse(&url)
.unwrap_or_fail("Cannot parse given URL");
// build the new target
let tgt = config::BackupTarget {
name: name.to_owned(),
url: url,
user: user.map(String::from),
password: password.map(String::from),
key_file: None,
options: config::TargetOptions {
reliable: true,
upload_cost: 1,
download_cost: 1
}
};
opts.cfg.targets.push(tgt);
opts.cfg.save().unwrap_or_fail("Failed to save config file");
},
(s, _) if (s == "list") || s.is_empty() => { // list destinations
let max_left_col = opts.cfg.targets.iter()
.map(|ref x| x.name.len())
.max().unwrap_or(0);
for t in opts.cfg.targets.iter() {
println!("{1:0$} {2}", max_left_col, t.name, t.url.as_str());
}
},
("remove", Some(m)) => { // remove destinations
unimplemented!()
},
("test", Some(m)) => { // test destination connectivity
let mut has_errs = false;
let max_col = m.values_of("name").unwrap()
.map(|ref x| x.len()).max().unwrap_or(0);
for name in m.values_of("name").unwrap() {
let tgt = connect_backend(name.to_owned(), &opts);
match tgt {
Ok(_) => println!("{1:0$}: successful", max_col, name),
Err(e) => {
println!("{1:0$}: {2}", max_col, name, e);
has_errs = true;
}
}
}
if has_errs {
std::process::exit(1);
}
},
(_, _) => panic!("No subcommand handler found")
}
}
fn do_test(args: &clap::ArgMatches, opts: &GlobalOptions) {
let profile = match args.value_of("profile").unwrap() {
"quick" => history::IntegrityTestMode::Quick,
"normal" => history::IntegrityTestMode::Normal,
"slow" => history::IntegrityTestMode::Slow,
"exhaustive" => history::IntegrityTestMode::Exhaustive,
_ => panic!("unexpected test mode string")
};
let names = opts.cfg.targets.iter().map(|x| {x.name.clone()})
.chain(opts.cfg.target_groups.iter().map(|x| {x.name.clone()}));
for t in names {
let b = connect_backend(t.clone(), opts);
if let Err(e) = b {
println!("bkp: skipping destination '{}': {}", t, e);
continue;
}
// construct a history object
let mut b = b.unwrap();
let hist = history::History::new(&mut b);
if let Err(e) = hist {
println!("bkp: skipping destination '{}': {}", t, e);
continue;
}
// run the check
match hist.unwrap().check(profile) {
Err(e) => {
println!("bkp: skipping destination '{}': {}", t, e);
continue;
},
Ok(true) => println!("{}: okay", t),
Ok(false) => println!("{}: failed", t),
}
}
}
fn | (args: &clap::ArgMatches, opts: &GlobalOptions) {
unimplemented!()
}
fn do_clean(args: &clap::ArgMatches, opts: &GlobalOptions) {
unimplemented!()
}
fn do_snap(args: &clap::ArgMatches, opts: &GlobalOptions) {
let remote = args.value_of("remote").unwrap().to_owned();
let snap_paths: Vec<&str> = args.values_of("local").unwrap().collect();
let mut remote = connect_backend(remote, opts)
.unwrap_or_fail("backend connection failed");
// construct a history object
let mut history = history::History::new(&mut remote)
.unwrap_or_fail("failed to configure history layer");
// update paths
let new_tree = history.update_paths(snap_paths)
.unwrap_or_fail("failed to write modified trees");
// build a new snapshot
let snap = history.new_snapshot(new_tree)
.unwrap_or_fail("failed to create snapshot");
println!("snapshot created.");
}
fn do_restore(args: &clap::ArgMatches, opts: &GlobalOptions) {
let remote = args.value_of("remote").unwrap().to_owned();
// TODO: avoid specifying remote by searching for all remotes with a file
let objects: Vec<&Path> = args.values_of("local").unwrap()
.map(Path::new).collect();
let mut remote = connect_backend(remote, opts)
.unwrap_or_fail("backend connection failed");
let mut history = history::History::new(&mut remote)
.unwrap_or_fail("failed to configure history layer");
// TODO: figure out the target time, if any
// find the requested snapshot
// TODO: add command for recovering backups with broken head snapshot
let mut snapshot = history.get_snapshot()
.unwrap_or_fail("failed to read root snapshot");
if snapshot.is_none() {
eprintln!("bkp: cannot restore from empty target");
std::process::exit(1);
}
let snapshot = loop {
match snapshot {
None => {
eprintln!("bkp: no matching snapshot");
// TODO: show most recent one?
std::process::exit(1);
},
Some(snap) => {
// TODO: Add target time check here
if true {
break snap;
}
snapshot = snap.parent()
.unwrap_or_fail("failed to read snapshot");
}
}
};
// retrieve the objects we're interested in
let objects: history::Result<Vec<_>> = objects.into_iter()
.map(|obj| snapshot.get(&obj).map(|r| (obj, r)))
.collect();
let objects = objects.unwrap_or_fail("cannot read stored objects");
// warn about missing files, if any
if objects.iter().any(|x| x.1.is_none()) {
println!("The following paths could not be found:");
for p in objects.iter().filter(|x| x.1.is_none()) {
println!("\t{}", p.0.to_str().unwrap_or("<unprintable path>"));
}
println!("");
use std::ascii::AsciiExt;
let abort = loop {
print!("Do you want to continue restoring? (y/n) ");
std::io::stdout().flush().unwrap();
let mut response = String::new();
std::io::stdin().read_line(&mut response).unwrap();
match response.chars().next().map(|x| x.to_ascii_lowercase()) {
Some('y') => break false, // no abort
Some('n') => break true, // abort
_ => {}, // ask again
}
};
if abort {
println!("aborted");
return;
}
}
let objects: Vec<_> = objects.into_iter()
.filter_map(|(p,o)| o.map(|v| (p, v)))
.collect();
// actually reconstruct them
let base_path = Path::new(args.value_of("into").unwrap_or("/"));
let options = history::RestoreOptions::new()
.overwrite(args.is_present("overwrite"))
.ignore_permissions(args.is_present("no_perms"));
for (path, obj) in objects {
match obj.restore(&base_path, &options) {
Ok(()) => {},
Err(history::Error::InvalidArgument) => {
eprintln!("bkp: possible integrity violation found!");
eprintln!(" invalid object type at path: {}",
path.to_str().unwrap_or("<unprintable>"));
},
Err(e) => fail_error("cannot restore object", e)
}
}
}
fn load_config(pth: &Path) -> config::Config {
let cfg = config::Config::load(&pth);
if let Err(e) = cfg {
if let config::ConfigErr::IOError(ref err) = e {
if err.kind() == std::io::ErrorKind::NotFound {
err_write!("Creating new configuration file");
// try to create a new config
let cfg = config::Config::default();
cfg.save().ok().unwrap_or(());
return cfg
}
}
let errstr = match e {
config::ConfigErr::ParseError(x) => x,
config::ConfigErr::IOError(x) => String::from(x.description())
};
writeln!(std::io::stderr(),
"bkp: Cannot load config file: {}", errstr).unwrap();
std::process::exit(1);
}
return cfg.unwrap();
}
fn main() {
let opt_matches = clap_app!(bkp =>
(version: "0.1")
(author: "Noah Zentzis <[email protected]>")
(about: "Automated system backup utility")
(@arg CONFIG: -c --config +takes_value "Specifies a config file to use")
(@arg DATADIR: - | do_stat | identifier_name |
main.rs | }
};
if abort {
println!("aborted");
return;
}
}
let objects: Vec<_> = objects.into_iter()
.filter_map(|(p,o)| o.map(|v| (p, v)))
.collect();
// actually reconstruct them
let base_path = Path::new(args.value_of("into").unwrap_or("/"));
let options = history::RestoreOptions::new()
.overwrite(args.is_present("overwrite"))
.ignore_permissions(args.is_present("no_perms"));
for (path, obj) in objects {
match obj.restore(&base_path, &options) {
Ok(()) => {},
Err(history::Error::InvalidArgument) => {
eprintln!("bkp: possible integrity violation found!");
eprintln!(" invalid object type at path: {}",
path.to_str().unwrap_or("<unprintable>"));
},
Err(e) => fail_error("cannot restore object", e)
}
}
}
fn load_config(pth: &Path) -> config::Config {
let cfg = config::Config::load(&pth);
if let Err(e) = cfg {
if let config::ConfigErr::IOError(ref err) = e {
if err.kind() == std::io::ErrorKind::NotFound {
err_write!("Creating new configuration file");
// try to create a new config
let cfg = config::Config::default();
cfg.save().ok().unwrap_or(());
return cfg
}
}
let errstr = match e {
config::ConfigErr::ParseError(x) => x,
config::ConfigErr::IOError(x) => String::from(x.description())
};
writeln!(std::io::stderr(),
"bkp: Cannot load config file: {}", errstr).unwrap();
std::process::exit(1);
}
return cfg.unwrap();
}
fn main() {
let opt_matches = clap_app!(bkp =>
(version: "0.1")
(author: "Noah Zentzis <[email protected]>")
(about: "Automated system backup utility")
(@arg CONFIG: -c --config +takes_value "Specifies a config file to use")
(@arg DATADIR: -D --data-dir +takes_value "Specify the local data path")
(@arg BACKEND: -t --target +takes_value
"Override the default destination")
(@arg VERBOSE: -v --verbose "Enable verbose terminal output")
(@arg QUIET: -q --quiet "Silence non-error terminal output")
(@subcommand dest =>
(about: "Query and modify available backup destinations")
(@subcommand add =>
(about: "Create a new destination")
(@arg name: +required "The name of the new destination")
(@arg url: +required {|s| {Url::parse(&s).map(|_| ())
.map_err(|_| String::from("Not a valid URL"))}}
"The new destination's URL" )
(@arg user: -u --user +takes_value "Set the associated username")
(@arg password: -p --password +takes_value
"Set the associated password"))
(@subcommand list =>
(about: "List the available destinations")
(@arg no_groups: -n --("no-groups")
"Don't show grouped destinations"))
(@subcommand remove =>
(about: "Remove an existing destination")
(@arg name: +required "The destination name to remove")
(@arg scrub: -S --scrub "Remove existing backups from the target"))
(@subcommand test =>
(about: "Test connectivity to a destination")
(@arg name: +required * "The destination to test")))
(@subcommand test =>
(about: "Test integrity of existing backups")
(@arg profile: +takes_value
possible_values(&["quick", "normal", "slow", "exhaustive"])
default_value("normal")
"The test profile to run")
(@arg all: -a --all
"Test backups from all machines rather than just this one"))
(@subcommand stat =>
(about: "Show backup statistics")
(@arg dest: +takes_value ...
"Only show data about the given destinations")
(@arg remote: -r --remote
"Query remote servers, bypassing local caches"))
(@subcommand clean =>
(about: "Remove backup data matching specific criteria. \
All given predicates must match in order for data to be removed.")
(@arg dest: +takes_value ...
"Only remove data from the given destinations")
(@arg dry_run: -n --("dry-run")
"Don't remove anything, just show what would be done")
(@group predicates =>
(@attributes +multiple +required)
(@arg snap_type: -t --type +takes_value
possible_values(&["diff", "full"])
"Match data in snapshots with type")
(@arg older_than: -o --("older-than") +takes_value
"Match data older than a certain age")
(@arg newer_than: -N --("newer-than") +takes_value
"Match data newer than a certain age")
(@arg exists: -e --exists +takes_value
possible_values(&["yes", "no"])
"Match data based on whether it exists on the host")))
(@subcommand snap =>
(about: "Take a snapshot of local files")
(@arg remote: +takes_value "Remote to store data in")
(@arg local: +takes_value ... "Files or directories to snapshot")
(@arg no_trust_mtime: -T --("no-trust-mtime")
"Use content hashes to check for file changes rather than FS's mtime"))
(@subcommand restore =>
(about: "Restore local files from backup")
(@arg remote: +required "Remote to restore from")
(@arg local: ... min_values(1) "Files or directories to restore")
(@arg as_of: -t --time +takes_value
"Restore to most recent snapshot before given date/time")
(@arg overwrite: -o --overwrite "Overwrite existing local files")
(@arg from: -f --from +takes_value "Restore data from another machine")
(@arg no_perms: -p --("no-perms")
"Don't restore filesystem permissions")
(@arg no_attrs: -a --("no-attrs") "Don't restore file metadata")
(@arg into: -i --into conflicts_with[overwrite] +takes_value
"Restore to a given path")
)
).get_matches();
// load a config file
let config_path = opt_matches
.value_of("CONFIG")
.map(Path::new)
.map(Path::to_path_buf)
.unwrap_or(std::env::home_dir().unwrap().join(".bkprc"));
let cfg = load_config(&config_path);
// create the data dir if needed
let data_dir = opt_matches.value_of("DATADIR").map(Path::new)
.map(Path::to_path_buf)
.unwrap_or(std::env::home_dir().unwrap().join(".bkp"));
if let Err(e) = fs::metadata(&data_dir) {
if e.kind() == std::io::ErrorKind::NotFound {
if fs::create_dir(&data_dir).is_err() {
writeln!(std::io::stderr(), "bkp: Cannot create directory: {}",
data_dir.display()).unwrap();
std::process::exit(1);
}
} else {
writeln!(std::io::stderr(), "bkp: Cannot access directory: {}",
data_dir.display()).unwrap();
std::process::exit(1);
}
}
// open the key store
let kspath = data_dir.join("keystore");
let ks = match fs::metadata(&kspath) {
Ok(_) => match keys::Keystore::open(&kspath) {
Ok(k) => k,
Err(e) => {
err_write!("bkp: Cannot open keystore: {}", e.description());
std::process::exit(1);
}
},
Err(e) => if e.kind() == std::io::ErrorKind::NotFound {
match keys::Keystore::create(&kspath) {
Ok(k) => k,
Err(e) => {
err_write!("bkp: Cannot create keystore: {}", e.description());
std::process::exit(1);
}
}
} else {
writeln!(std::io::stderr(), "bkp: Cannot access keystore: {}",
kspath.display()).unwrap();
std::process::exit(1);
}
};
// parse global flags
let mut global_flags = GlobalOptions {
cfg: cfg,
verbose: opt_matches.is_present("VERBOSE"),
quiet: opt_matches.is_present("QUIET"),
data_dir: data_dir,
keystore: ks
};
// figure out what to do
match opt_matches.subcommand() {
("", _) => { println!("bkp: No subcommand specified"); },
("dest", Some(m)) => do_dest(m, &mut global_flags),
("test", Some(m)) => do_test(m, &global_flags),
("stat", Some(m)) => do_stat(m, &global_flags),
("clean", Some(m)) => do_clean(m, &global_flags),
("snap", Some(m)) => do_snap(m, &global_flags), | random_line_split |
||
main.rs | destinations
let max_left_col = opts.cfg.targets.iter()
.map(|ref x| x.name.len())
.max().unwrap_or(0);
for t in opts.cfg.targets.iter() {
println!("{1:0$} {2}", max_left_col, t.name, t.url.as_str());
}
},
("remove", Some(m)) => { // remove destinations
unimplemented!()
},
("test", Some(m)) => { // test destination connectivity
let mut has_errs = false;
let max_col = m.values_of("name").unwrap()
.map(|ref x| x.len()).max().unwrap_or(0);
for name in m.values_of("name").unwrap() {
let tgt = connect_backend(name.to_owned(), &opts);
match tgt {
Ok(_) => println!("{1:0$}: successful", max_col, name),
Err(e) => {
println!("{1:0$}: {2}", max_col, name, e);
has_errs = true;
}
}
}
if has_errs {
std::process::exit(1);
}
},
(_, _) => panic!("No subcommand handler found")
}
}
fn do_test(args: &clap::ArgMatches, opts: &GlobalOptions) {
let profile = match args.value_of("profile").unwrap() {
"quick" => history::IntegrityTestMode::Quick,
"normal" => history::IntegrityTestMode::Normal,
"slow" => history::IntegrityTestMode::Slow,
"exhaustive" => history::IntegrityTestMode::Exhaustive,
_ => panic!("unexpected test mode string")
};
let names = opts.cfg.targets.iter().map(|x| {x.name.clone()})
.chain(opts.cfg.target_groups.iter().map(|x| {x.name.clone()}));
for t in names {
let b = connect_backend(t.clone(), opts);
if let Err(e) = b {
println!("bkp: skipping destination '{}': {}", t, e);
continue;
}
// construct a history object
let mut b = b.unwrap();
let hist = history::History::new(&mut b);
if let Err(e) = hist {
println!("bkp: skipping destination '{}': {}", t, e);
continue;
}
// run the check
match hist.unwrap().check(profile) {
Err(e) => {
println!("bkp: skipping destination '{}': {}", t, e);
continue;
},
Ok(true) => println!("{}: okay", t),
Ok(false) => println!("{}: failed", t),
}
}
}
fn do_stat(args: &clap::ArgMatches, opts: &GlobalOptions) {
unimplemented!()
}
fn do_clean(args: &clap::ArgMatches, opts: &GlobalOptions) {
unimplemented!()
}
fn do_snap(args: &clap::ArgMatches, opts: &GlobalOptions) {
let remote = args.value_of("remote").unwrap().to_owned();
let snap_paths: Vec<&str> = args.values_of("local").unwrap().collect();
let mut remote = connect_backend(remote, opts)
.unwrap_or_fail("backend connection failed");
// construct a history object
let mut history = history::History::new(&mut remote)
.unwrap_or_fail("failed to configure history layer");
// update paths
let new_tree = history.update_paths(snap_paths)
.unwrap_or_fail("failed to write modified trees");
// build a new snapshot
let snap = history.new_snapshot(new_tree)
.unwrap_or_fail("failed to create snapshot");
println!("snapshot created.");
}
fn do_restore(args: &clap::ArgMatches, opts: &GlobalOptions) {
let remote = args.value_of("remote").unwrap().to_owned();
// TODO: avoid specifying remote by searching for all remotes with a file
let objects: Vec<&Path> = args.values_of("local").unwrap()
.map(Path::new).collect();
let mut remote = connect_backend(remote, opts)
.unwrap_or_fail("backend connection failed");
let mut history = history::History::new(&mut remote)
.unwrap_or_fail("failed to configure history layer");
// TODO: figure out the target time, if any
// find the requested snapshot
// TODO: add command for recovering backups with broken head snapshot
let mut snapshot = history.get_snapshot()
.unwrap_or_fail("failed to read root snapshot");
if snapshot.is_none() {
eprintln!("bkp: cannot restore from empty target");
std::process::exit(1);
}
let snapshot = loop {
match snapshot {
None => {
eprintln!("bkp: no matching snapshot");
// TODO: show most recent one?
std::process::exit(1);
},
Some(snap) => {
// TODO: Add target time check here
if true {
break snap;
}
snapshot = snap.parent()
.unwrap_or_fail("failed to read snapshot");
}
}
};
// retrieve the objects we're interested in
let objects: history::Result<Vec<_>> = objects.into_iter()
.map(|obj| snapshot.get(&obj).map(|r| (obj, r)))
.collect();
let objects = objects.unwrap_or_fail("cannot read stored objects");
// warn about missing files, if any
if objects.iter().any(|x| x.1.is_none()) {
println!("The following paths could not be found:");
for p in objects.iter().filter(|x| x.1.is_none()) {
println!("\t{}", p.0.to_str().unwrap_or("<unprintable path>"));
}
println!("");
use std::ascii::AsciiExt;
let abort = loop {
print!("Do you want to continue restoring? (y/n) ");
std::io::stdout().flush().unwrap();
let mut response = String::new();
std::io::stdin().read_line(&mut response).unwrap();
match response.chars().next().map(|x| x.to_ascii_lowercase()) {
Some('y') => break false, // no abort
Some('n') => break true, // abort
_ => {}, // ask again
}
};
if abort {
println!("aborted");
return;
}
}
let objects: Vec<_> = objects.into_iter()
.filter_map(|(p,o)| o.map(|v| (p, v)))
.collect();
// actually reconstruct them
let base_path = Path::new(args.value_of("into").unwrap_or("/"));
let options = history::RestoreOptions::new()
.overwrite(args.is_present("overwrite"))
.ignore_permissions(args.is_present("no_perms"));
for (path, obj) in objects {
match obj.restore(&base_path, &options) {
Ok(()) => {},
Err(history::Error::InvalidArgument) => {
eprintln!("bkp: possible integrity violation found!");
eprintln!(" invalid object type at path: {}",
path.to_str().unwrap_or("<unprintable>"));
},
Err(e) => fail_error("cannot restore object", e)
}
}
}
fn load_config(pth: &Path) -> config::Config {
let cfg = config::Config::load(&pth);
if let Err(e) = cfg {
if let config::ConfigErr::IOError(ref err) = e {
if err.kind() == std::io::ErrorKind::NotFound {
err_write!("Creating new configuration file");
// try to create a new config
let cfg = config::Config::default();
cfg.save().ok().unwrap_or(());
return cfg
}
}
let errstr = match e {
config::ConfigErr::ParseError(x) => x,
config::ConfigErr::IOError(x) => String::from(x.description())
};
writeln!(std::io::stderr(),
"bkp: Cannot load config file: {}", errstr).unwrap();
std::process::exit(1);
}
return cfg.unwrap();
}
fn main() | {
let opt_matches = clap_app!(bkp =>
(version: "0.1")
(author: "Noah Zentzis <[email protected]>")
(about: "Automated system backup utility")
(@arg CONFIG: -c --config +takes_value "Specifies a config file to use")
(@arg DATADIR: -D --data-dir +takes_value "Specify the local data path")
(@arg BACKEND: -t --target +takes_value
"Override the default destination")
(@arg VERBOSE: -v --verbose "Enable verbose terminal output")
(@arg QUIET: -q --quiet "Silence non-error terminal output")
(@subcommand dest =>
(about: "Query and modify available backup destinations")
(@subcommand add =>
(about: "Create a new destination")
(@arg name: +required "The name of the new destination")
(@arg url: +required {|s| {Url::parse(&s).map(|_| ())
.map_err(|_| String::from("Not a valid URL"))}}
"The new destination's URL" )
(@arg user: -u --user +takes_value "Set the associated username") | identifier_body |
|
show_solution.py | figure_utils.configure_latex_fonts_latex()
data_vns_sop = orienteering_utils.parse_op_log(RESULT_FILE)
print("using the last results")
record = data_vns_sop[-1]
print("record", record)
problem_type = ProblemType.UNKNOWN
PROBLEM_FILE = record['PROBLEM_FILE']
PROBLEM_FILE = os.path.join(this_script_path, PROBLEM_FILE)
if "datasets/sop/" in PROBLEM_FILE:
print("showing SOP")
problem_type = ProblemType.SOP
SAVE_TO_FIGURE = "solution_sop.png"
elif "datasets/dop_sop_dataset/" in PROBLEM_FILE:
print("showing DOP")
problem_type = ProblemType.DOP
SAVE_TO_FIGURE = "solution_dop.png"
elif "datasets/opn_sop_dataset/" in PROBLEM_FILE:
print("showing OPN")
problem_type = ProblemType.OPN
SAVE_TO_FIGURE = "solution_opn.png"
else:
error("can not decide problem type based on problem file location")
problem_type = ProblemType.UNKNOWN
op = orienteering_utils.SetOrienteeringProblemDefinition()
op.load_problem_file(PROBLEM_FILE)
nodes = op.nodes
sets_prices = op.get_sets_prices()
sets = op.get_sets()
original_nodes = op.get_set_centers()
result_target_ids = record['RESULT_TARGET_IDS']
result_cluster_ids = record['RESULT_CLUSTER_IDS']
result_rewards = record['REWARDS']
print("problem loaded")
print("result_target_ids:", result_target_ids)
print("result_cluster_ids:", result_cluster_ids)
print("result_rewards", result_rewards)
print("sets_prices", sets_prices)
print("sets", sets)
print("nodes", nodes)
# for the DOP only
result_head_angs = []
sampling_heading = len(sets[0])
calc_reward = 0
for clust_idx in range(len(result_cluster_ids)):
clust = result_cluster_ids[clust_idx]
node = result_target_ids[clust_idx]
if problem_type == ProblemType.DOP:
node_inside_cluster = node - sets[clust][0]
# result_node_inside_cluster.append(node_inside_cluster)
head_ang = math.pi + (2 * math.pi * node_inside_cluster) / sampling_heading
result_head_angs.append(head_ang)
calc_reward += sets_prices[clust]
if node not in sets[clust]:
print("what the hell, it is not good")
print("calc_reward", calc_reward)
mycmap = plt.cm.get_cmap('RdYlBu_r')
maxx, maxy = -sys.float_info.max,-sys.float_info.max
minx, miny = sys.float_info.max,sys.float_info.max
circle_radiuses = np.ones([len(nodes), 1])
circle_radiuses1 = np.multiply(2.0, circle_radiuses)
nodes_w_rewards = np.zeros((len(nodes), 3))
if problem_type == ProblemType.DOP:
xses = [i[0] for i in original_nodes]
yses = [i[1] for i in original_nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(original_nodes), 3))
for nidx in range(len(original_nodes)):
nodes_w_rewards[nidx, 0] = original_nodes[nidx][0]
nodes_w_rewards[nidx, 1] = original_nodes[nidx][1]
nodes_w_rewards[nidx, 2] = sets_prices[nidx]
elif problem_type == ProblemType.OPN :
xses = [nodes[i][0] for i in nodes]
yses = [nodes[i][1] for i in nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(nodes), 3))
for nidx in nodes:
nodes_w_rewards[nidx, 0] = nodes[nidx][0]
nodes_w_rewards[nidx, 1] = nodes[nidx][1]
for set_idx in sets:
if nidx in sets[set_idx]:
nodes_w_rewards[nidx, 2] = sets_prices[set_idx]
break
else:
xses = [nodes[i][0] for i in nodes]
yses = [nodes[i][1] for i in nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(nodes), 3))
for nidx in nodes:
nodes_w_rewards[nidx, 0] = nodes[nidx][0]
nodes_w_rewards[nidx, 1] = nodes[nidx][1]
for set_idx in sets:
if nidx in sets[set_idx]:
nodes_w_rewards[nidx, 2] = sets_prices[set_idx]
break
minrew = min(nodes_w_rewards[:, 2])
maxrew = max(nodes_w_rewards[:, 2])
cNorm = mpl.colors.Normalize(vmin=minrew, vmax=maxrew + 0.1 * (maxrew - minrew))
mycmapScalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=mycmap)
fig_width = FIG_HEIGHT*(maxx-minx)/(maxy-miny)
figsize = (fig_width*0.9,FIG_HEIGHT)
print(figsize)
fig = plt.figure(num=None, figsize=figsize, dpi=80, facecolor='w', edgecolor='k')
circles = figure_utils.circles(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], circle_radiuses1, c=nodes_w_rewards[:, 2] , alpha=0.05, edgecolor='black', linewidth=0.9, linestyle=':')
sc = plt.scatter(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], c=nodes_w_rewards[:, 2], cmap=mycmap , alpha=1.0, s=1, facecolor='black', lw=0.5)
plt.plot(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], 'ok', ms=4.0)
# print(nodes_w_rewards[:, 2])
if problem_type == ProblemType.DOP:
for nidx1 in range(len(nodes_w_rewards)):
points = []
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for hind in range(sampling_heading):
head_ang = math.pi + (2 * math.pi * hind) / sampling_heading
arrow_len = 30
arrow(node1[0], node1[1], arrow_len * math.cos(head_ang), arrow_len * math.sin(head_ang))
set_rew = nodes_w_rewards[nidx1, 2]
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(40), fc=color)
elif problem_type == ProblemType.OPN:
for set_idx in reversed(sorted(sets.keys())):
points = []
set_rew = sets_prices[set_idx]
for nidx1 in sets[set_idx]:
|
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)
else:
for set_idx in reversed(sorted(sets.keys())):
points = []
set_rew = sets_prices[set_idx]
for nidx1 in sets[set_idx]:
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for nidx2 in sets[set_idx]:
if(nidx1 != nidx2):
node2 = nodes_w_rewards[nidx2, :]
# plt.plot([node1[0], node2[0] ], [node1[1], node2[1] ], '-k', lw=0.2)
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)
for node_idx in range(1, len(result_target_ids)):
if problem_type == ProblemType.DOP:
step_size = 20
turning_radius = op.dubins_radius
node = result_cluster_ids[node_idx]
node_prew = result_cluster_ids[node_idx - 1]
q_start = [nodes_w_rewards[node, 0], nodes_w_rewards[node, 1], result_head | node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for nidx2 in sets[set_idx]:
if(nidx1 != nidx2):
node2 = nodes_w_rewards[nidx2, :]
# plt.plot([node1[0], node2[0] ], [node1[1], node2[1] ], '-k', lw=0.2) | conditional_block |
show_solution.py | figure_utils.configure_latex_fonts_latex()
data_vns_sop = orienteering_utils.parse_op_log(RESULT_FILE)
print("using the last results")
record = data_vns_sop[-1]
print("record", record)
problem_type = ProblemType.UNKNOWN
PROBLEM_FILE = record['PROBLEM_FILE']
PROBLEM_FILE = os.path.join(this_script_path, PROBLEM_FILE)
if "datasets/sop/" in PROBLEM_FILE: |
elif "datasets/dop_sop_dataset/" in PROBLEM_FILE:
print("showing DOP")
problem_type = ProblemType.DOP
SAVE_TO_FIGURE = "solution_dop.png"
elif "datasets/opn_sop_dataset/" in PROBLEM_FILE:
print("showing OPN")
problem_type = ProblemType.OPN
SAVE_TO_FIGURE = "solution_opn.png"
else:
error("can not decide problem type based on problem file location")
problem_type = ProblemType.UNKNOWN
op = orienteering_utils.SetOrienteeringProblemDefinition()
op.load_problem_file(PROBLEM_FILE)
nodes = op.nodes
sets_prices = op.get_sets_prices()
sets = op.get_sets()
original_nodes = op.get_set_centers()
result_target_ids = record['RESULT_TARGET_IDS']
result_cluster_ids = record['RESULT_CLUSTER_IDS']
result_rewards = record['REWARDS']
print("problem loaded")
print("result_target_ids:", result_target_ids)
print("result_cluster_ids:", result_cluster_ids)
print("result_rewards", result_rewards)
print("sets_prices", sets_prices)
print("sets", sets)
print("nodes", nodes)
# for the DOP only
result_head_angs = []
sampling_heading = len(sets[0])
calc_reward = 0
for clust_idx in range(len(result_cluster_ids)):
clust = result_cluster_ids[clust_idx]
node = result_target_ids[clust_idx]
if problem_type == ProblemType.DOP:
node_inside_cluster = node - sets[clust][0]
# result_node_inside_cluster.append(node_inside_cluster)
head_ang = math.pi + (2 * math.pi * node_inside_cluster) / sampling_heading
result_head_angs.append(head_ang)
calc_reward += sets_prices[clust]
if node not in sets[clust]:
print("what the hell, it is not good")
print("calc_reward", calc_reward)
mycmap = plt.cm.get_cmap('RdYlBu_r')
maxx, maxy = -sys.float_info.max,-sys.float_info.max
minx, miny = sys.float_info.max,sys.float_info.max
circle_radiuses = np.ones([len(nodes), 1])
circle_radiuses1 = np.multiply(2.0, circle_radiuses)
nodes_w_rewards = np.zeros((len(nodes), 3))
if problem_type == ProblemType.DOP:
xses = [i[0] for i in original_nodes]
yses = [i[1] for i in original_nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(original_nodes), 3))
for nidx in range(len(original_nodes)):
nodes_w_rewards[nidx, 0] = original_nodes[nidx][0]
nodes_w_rewards[nidx, 1] = original_nodes[nidx][1]
nodes_w_rewards[nidx, 2] = sets_prices[nidx]
elif problem_type == ProblemType.OPN :
xses = [nodes[i][0] for i in nodes]
yses = [nodes[i][1] for i in nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(nodes), 3))
for nidx in nodes:
nodes_w_rewards[nidx, 0] = nodes[nidx][0]
nodes_w_rewards[nidx, 1] = nodes[nidx][1]
for set_idx in sets:
if nidx in sets[set_idx]:
nodes_w_rewards[nidx, 2] = sets_prices[set_idx]
break
else:
xses = [nodes[i][0] for i in nodes]
yses = [nodes[i][1] for i in nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(nodes), 3))
for nidx in nodes:
nodes_w_rewards[nidx, 0] = nodes[nidx][0]
nodes_w_rewards[nidx, 1] = nodes[nidx][1]
for set_idx in sets:
if nidx in sets[set_idx]:
nodes_w_rewards[nidx, 2] = sets_prices[set_idx]
break
minrew = min(nodes_w_rewards[:, 2])
maxrew = max(nodes_w_rewards[:, 2])
cNorm = mpl.colors.Normalize(vmin=minrew, vmax=maxrew + 0.1 * (maxrew - minrew))
mycmapScalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=mycmap)
fig_width = FIG_HEIGHT*(maxx-minx)/(maxy-miny)
figsize = (fig_width*0.9,FIG_HEIGHT)
print(figsize)
fig = plt.figure(num=None, figsize=figsize, dpi=80, facecolor='w', edgecolor='k')
circles = figure_utils.circles(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], circle_radiuses1, c=nodes_w_rewards[:, 2] , alpha=0.05, edgecolor='black', linewidth=0.9, linestyle=':')
sc = plt.scatter(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], c=nodes_w_rewards[:, 2], cmap=mycmap , alpha=1.0, s=1, facecolor='black', lw=0.5)
plt.plot(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], 'ok', ms=4.0)
# print(nodes_w_rewards[:, 2])
if problem_type == ProblemType.DOP:
for nidx1 in range(len(nodes_w_rewards)):
points = []
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for hind in range(sampling_heading):
head_ang = math.pi + (2 * math.pi * hind) / sampling_heading
arrow_len = 30
arrow(node1[0], node1[1], arrow_len * math.cos(head_ang), arrow_len * math.sin(head_ang))
set_rew = nodes_w_rewards[nidx1, 2]
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(40), fc=color)
elif problem_type == ProblemType.OPN:
for set_idx in reversed(sorted(sets.keys())):
points = []
set_rew = sets_prices[set_idx]
for nidx1 in sets[set_idx]:
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for nidx2 in sets[set_idx]:
if(nidx1 != nidx2):
node2 = nodes_w_rewards[nidx2, :]
# plt.plot([node1[0], node2[0] ], [node1[1], node2[1] ], '-k', lw=0.2)
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)
else:
for set_idx in reversed(sorted(sets.keys())):
points = []
set_rew = sets_prices[set_idx]
for nidx1 in sets[set_idx]:
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for nidx2 in sets[set_idx]:
if(nidx1 != nidx2):
node2 = nodes_w_rewards[nidx2, :]
# plt.plot([node1[0], node2[0] ], [node1[1], node2[1] ], '-k', lw=0.2)
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)
for node_idx in range(1, len(result_target_ids)):
if problem_type == ProblemType.DOP:
step_size = 20
turning_radius = op.dubins_radius
node = result_cluster_ids[node_idx]
node_prew = result_cluster_ids[node_idx - 1]
q_start = [nodes_w_rewards[node, 0], nodes_w_rewards[node, 1], result | print("showing SOP")
problem_type = ProblemType.SOP
SAVE_TO_FIGURE = "solution_sop.png" | random_line_split |
pager.py | out += "\nPrimary: " + str(list[primary])
out += "\nBackup: " + str(list[backup])
return out
def Info(self):
print(self._InfoString())
def MailInfo(self, sender, receiver):
import smtplib
# sending html email described here:
# http://stackoverflow.com/questions/882712/sending-html-email-in-python
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
msg = MIMEMultipart('alternative')
msg['Subject'] = "Pager rotation notification"
msg['From'] = sender
msg['To'] = receiver
html = """\
<html>
<head></head>
<body>
<p>""" + re.sub("\n", "<br>", self._InfoString()) + """</p>
<p>Pager rotation happens every monday at 00:00.</p>
<p>If the primary / backup contacts are unavailable, please
<ol>
<li>modify pager_config.txt</li>
<li>test your changes via ./pager.py --info</li>
<li>check them in and git push changes to be reflected</li>
<li>run ./pager.py --mail_info to broadcast the new info</li>
</ol>
</p>
</body>
</html>"""
# Record the MIME types of both parts - text/plain and text/html.
#part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
#msg.attach(part1)
msg.attach(part2)
smtp = smtplib.SMTP('localhost')
smtp.sendmail(sender, receiver, msg.as_string());
def SendAlert(self, msg, index = 0):
list = self._active_list
n = (self._GetPrimary() + index) % len(list)
print("Paging " + list[n][1] + " with message: " + msg)
if not self.dry_run: self._SendAlertToPhone(list[n][0], msg)
def _PrintStatus(self):
if len(self._status) > 0: print("Status : Message ID")
for key, value in list(self._status.items()):
print(value["status"] + " : " + key)
def _ProcessStatus(self):
more = "\nSee https://docs.google.com/a/room77.com/document/d/1YMrE5nM4aTG65ah6B3G_TxOG3vEKs4E84yKc4iAxH38/edit"
for key, value in list(self._status.items()):
if value["status"] == self.Status.NEW:
self.SendAlert("Alert. Please check your email. Subject: " + value["subject"] + more, 0)
if value["status"] == self.Status.OLD:
self.SendAlert("Alert. Please check your email. Subject: " + value["subject"] + more, 1)
# print value["status"] + " : " + key
remove_list = [k for (k,v) in list(self._status.items()) if v["status"] == self.Status.OLD or v["status"] == self.Status.REPLIED ]
for key in remove_list: del(self._status[key])
def _ReadStatusFile(self):
if not os.path.exists(self.status_file): return
data = open(self.status_file).read()
if (data == None or data == ""): return
self._status = json.loads(data)
for key, value in list(self._status.items()):
if value["status"] == self.Status.NEW: self._status[key]["status"] = self.Status.OLD
def _WriteStatusFile(self):
if self.dry_run == 1: self._status.clear()
data = open(self.status_file, "w")
json.dump(self._status, data)
data.close()
def _ProcessMail(self, mid, rid, date, sender, subject, body):
if rid in self._status:
# Existing alert.
self._status[rid]["status"] = self.Status.REPLIED
elif mid not in self._status:
# New alert.
self._status[mid] = dict()
self._status[mid]["status"] = self.Status.NEW
self._status[mid]["date"] = date
self._status[mid]["subject"] = subject
self._status[mid]["body"] = body
if re.match(self.reply_match_pattern, subject):
print("At least one reply found muting current alarms")
self.dry_run = 1
def _FetchMails(self):
import poplib
mail = poplib.POP3_SSL("pop.gmail.com")
mail.user(self.monitor_email)
mail.pass_(self.monitor_pass)
n = len(mail.list()[1])
print("Found " + str(n) + " new emails.")
for i in range(n):
mid = rid = date = sender = subject = body = ""
body_started = 0
for line in mail.retr(i+1)[1]:
if body_started :
body = body + line + '\n'
else:
if re.match("^Message-ID: ", line, re.IGNORECASE) : mid = line[line.find(" ")+1:]
if re.match("^In-Reply-To: ", line, re.IGNORECASE) : rid = line[line.find(" ")+1:]
if re.match("^Subject: ", line, re.IGNORECASE) : subject = line[line.find(" ")+1:]
if re.match("^Date:", line, re.IGNORECASE) : date = line[line.find(" ")+1:]
if re.match("^From: ", line, re.IGNORECASE) : sender = line[line.find(" ")+1:]
if not body_started and re.match("^$", line) : body_started = 1
if re.match(self.alert_match_pattern, subject) : self._ProcessMail(mid, rid, date, sender, subject, body)
mail.dele(i+1)
mail.quit()
# self._PrintStatus()
self._ProcessStatus()
self._WriteStatusFile()
self._PrintStatus()
def _GetPrimary(self, offset_days = 0) :
# If offset_days is 0, number of days from some monday. mod 7 of this number
# is 0 if it is a monday. Rotation occurs at 00:00, every monday.
days = (datetime.datetime.today() - datetime.datetime.utcfromtimestamp(0)).days + 3 + offset_days
return days // 7 % len(self._active_list)
def _SendAlertToPhone(self, phone, msg):
if not self._sent_one:
import code
from googlevoice import Voice
voice = Voice()
voice.login(self.monitor_email, self.monitor_pass)
voice.send_sms(phone, msg) # Send an SMS.
voice.call(phone, self.monitor_phone, 3) # Call the person as well.
self._sent_one = 1
def Run(self):
if not os.path.exists(self.status_dir):
os.makedirs(self.status_dir)
self._ReadStatusFile()
return self._FetchMails()
def main():
# Fill in the default values relevant for you to avoid adding the flags for each run.
parser = argparse.ArgumentParser(description="Handle pager rotations and alerts.");
parser.add_argument("--info", action = "store_true", help="Outputs active pager duty list and current primary and backup.");
parser.add_argument("--mail_info", action = "store_true", help="Mails active pager duty list and current primary and backup to the specified email address.");
parser.add_argument("--dry_run", action = "store_true", help = "Do not wake people up. Output information to console instead.");
parser.add_argument("--call", type = int, help = "Offset of person on pager duty to call. 0 means primary, 1 means backup, 2 means secondary backup etc.");
parser.add_argument("--sender", type = str, default = "", help="In mail_info mode, send the email from this address.");
parser.add_argument("--receiver", type = str, default = "", help="In mail_info mode, send the email to this address.");
parser.add_argument("--offset_days", type = int, default = 0, help = "Offset to add to current time. This can be used to compute primary / backup at a future / past time.");
parser.add_argument("--msg", type = str, default = "An alert has been issued. Please check your email.", help = "Message to send (for SMS portion)");
parser.add_argument("--monitor_email", type = str, default = "", help = "Email address to monitor for the alarm pattern. Needs to be a google account with gvoice.");
parser.add_argument("--monitor_pass", type = str, default = "", help = "Password for the monitor email address.");
parser.add_argument("--monitor_phone", type = str, default = "", help = "Google voice phone number associated with the account. Example: 15551231234");
args = parser.parse_args()
pager = Pager() | pager.Init()
pager.offset_days = args.offset_days
pager.monitor_email = args.monitor_email | random_line_split |
|
pager.py | status_file = status_dir + "pager_status.json"
monitor_email = ""
monitor_pass = ""
monitor_phone = ""
alert_match_pattern = ".*ALARM.*"
reply_match_pattern = "^R[eE]:.*ALARM.*"
dry_run = 0
offset_days = 0
_status = dict()
_active_list = []
_sent_one = 0
class Status:
NEW = "new"
OLD = "old"
REPLIED = "replied"
def Init(self):
data = open(self.config_file)
self._active_list = []
for line in data.readlines() :
if re.match("^#", line) : continue
line = line.strip()
m = re.match(r"(?P<phone>[\d]{3}-[\d]{3}-[\d]{4})[\t](?P<email>\w+[@][\w\.]+)", line)
self._active_list += [(m.group("phone").translate(None, "-+"), m.group("email"))]
def _InfoString(self):
out = ""
list = self._active_list
out += "Pager rotation contains " + str(len(list)) + " contacts:\n"
for p in list:
out += p[0] + "\t" + p[1] +"\n"
primary = self._GetPrimary();
backup = (primary + 1) % len(list)
out += "\nCurrent contacts:"
out += "\nPrimary: " + str(list[primary])
out += "\nBackup: " + str(list[backup])
out += "\n\nPlease turn your phones to MAX volume to ensure you receive pages."
if self.offset_days != 0:
primary = self._GetPrimary(self.offset_days);
backup = (primary + 1) % len(list)
out += "\n\nContacts " + str(self.offset_days) + " days from now:"
out += "\nPrimary: " + str(list[primary])
out += "\nBackup: " + str(list[backup])
return out
def Info(self):
print(self._InfoString())
def MailInfo(self, sender, receiver):
import smtplib
# sending html email described here:
# http://stackoverflow.com/questions/882712/sending-html-email-in-python
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
msg = MIMEMultipart('alternative')
msg['Subject'] = "Pager rotation notification"
msg['From'] = sender
msg['To'] = receiver
html = """\
<html>
<head></head>
<body>
<p>""" + re.sub("\n", "<br>", self._InfoString()) + """</p>
<p>Pager rotation happens every monday at 00:00.</p>
<p>If the primary / backup contacts are unavailable, please
<ol>
<li>modify pager_config.txt</li>
<li>test your changes via ./pager.py --info</li>
<li>check them in and git push changes to be reflected</li>
<li>run ./pager.py --mail_info to broadcast the new info</li>
</ol>
</p>
</body>
</html>"""
# Record the MIME types of both parts - text/plain and text/html.
#part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
#msg.attach(part1)
msg.attach(part2)
smtp = smtplib.SMTP('localhost')
smtp.sendmail(sender, receiver, msg.as_string());
def SendAlert(self, msg, index = 0):
list = self._active_list
n = (self._GetPrimary() + index) % len(list)
print("Paging " + list[n][1] + " with message: " + msg)
if not self.dry_run: self._SendAlertToPhone(list[n][0], msg)
def _PrintStatus(self):
if len(self._status) > 0: print("Status : Message ID")
for key, value in list(self._status.items()):
print(value["status"] + " : " + key)
def _ProcessStatus(self):
more = "\nSee https://docs.google.com/a/room77.com/document/d/1YMrE5nM4aTG65ah6B3G_TxOG3vEKs4E84yKc4iAxH38/edit"
for key, value in list(self._status.items()):
if value["status"] == self.Status.NEW:
self.SendAlert("Alert. Please check your email. Subject: " + value["subject"] + more, 0)
if value["status"] == self.Status.OLD:
self.SendAlert("Alert. Please check your email. Subject: " + value["subject"] + more, 1)
# print value["status"] + " : " + key
remove_list = [k for (k,v) in list(self._status.items()) if v["status"] == self.Status.OLD or v["status"] == self.Status.REPLIED ]
for key in remove_list: del(self._status[key])
def _ReadStatusFile(self):
if not os.path.exists(self.status_file): return
data = open(self.status_file).read()
if (data == None or data == ""): return
self._status = json.loads(data)
for key, value in list(self._status.items()):
if value["status"] == self.Status.NEW: self._status[key]["status"] = self.Status.OLD
def _WriteStatusFile(self):
if self.dry_run == 1: self._status.clear()
data = open(self.status_file, "w")
json.dump(self._status, data)
data.close()
def _ProcessMail(self, mid, rid, date, sender, subject, body):
if rid in self._status:
# Existing alert.
self._status[rid]["status"] = self.Status.REPLIED
elif mid not in self._status:
# New alert.
self._status[mid] = dict()
self._status[mid]["status"] = self.Status.NEW
self._status[mid]["date"] = date
self._status[mid]["subject"] = subject
self._status[mid]["body"] = body
if re.match(self.reply_match_pattern, subject):
print("At least one reply found muting current alarms")
self.dry_run = 1
def _FetchMails(self):
import poplib
mail = poplib.POP3_SSL("pop.gmail.com")
mail.user(self.monitor_email)
mail.pass_(self.monitor_pass)
n = len(mail.list()[1])
print("Found " + str(n) + " new emails.")
for i in range(n):
mid = rid = date = sender = subject = body = ""
body_started = 0
for line in mail.retr(i+1)[1]:
if body_started :
body = body + line + '\n'
else:
if re.match("^Message-ID: ", line, re.IGNORECASE) : mid = line[line.find(" ")+1:]
if re.match("^In-Reply-To: ", line, re.IGNORECASE) : rid = line[line.find(" ")+1:]
if re.match("^Subject: ", line, re.IGNORECASE) : subject = line[line.find(" ")+1:]
if re.match("^Date:", line, re.IGNORECASE) : date = line[line.find(" ")+1:]
if re.match("^From: ", line, re.IGNORECASE) : sender = line[line.find(" ")+1:]
if not body_started and re.match("^$", line) : body_started = 1
if re.match(self.alert_match_pattern, subject) : self._ProcessMail(mid, rid, date, sender, subject, body)
mail.dele(i+1)
mail.quit()
# self._PrintStatus()
self._ProcessStatus()
self._WriteStatusFile()
self._PrintStatus()
def _GetPrimary(self, offset_days = 0) :
# If offset_days is 0, number of days from some monday. mod 7 of this number
# is 0 if it is a monday. Rotation occurs at 00:00, every monday.
days = (datetime.datetime.today() - datetime.datetime.utcfromtimestamp(0)).days + 3 + offset_days
return days // 7 % len(self._active_list)
def _SendAlertToPhone(self, phone, msg):
if not self._sent_one:
|
def Run(self):
if not os.path.exists(self.status_dir):
os.makedirs(self.status_dir)
self._ReadStatusFile()
return self._FetchMails()
def main():
# Fill in the default values relevant for you to avoid adding the flags for each run.
parser | import code
from googlevoice import Voice
voice = Voice()
voice.login(self.monitor_email, self.monitor_pass)
voice.send_sms(phone, msg) # Send an SMS.
voice.call(phone, self.monitor_phone, 3) # Call the person as well.
self._sent_one = 1 | conditional_block |
pager.py | status_file = status_dir + "pager_status.json"
monitor_email = ""
monitor_pass = ""
monitor_phone = ""
alert_match_pattern = ".*ALARM.*"
reply_match_pattern = "^R[eE]:.*ALARM.*"
dry_run = 0
offset_days = 0
_status = dict()
_active_list = []
_sent_one = 0
class Status:
NEW = "new"
OLD = "old"
REPLIED = "replied"
def Init(self):
data = open(self.config_file)
self._active_list = []
for line in data.readlines() :
if re.match("^#", line) : continue
line = line.strip()
m = re.match(r"(?P<phone>[\d]{3}-[\d]{3}-[\d]{4})[\t](?P<email>\w+[@][\w\.]+)", line)
self._active_list += [(m.group("phone").translate(None, "-+"), m.group("email"))]
def _InfoString(self):
out = ""
list = self._active_list
out += "Pager rotation contains " + str(len(list)) + " contacts:\n"
for p in list:
out += p[0] + "\t" + p[1] +"\n"
primary = self._GetPrimary();
backup = (primary + 1) % len(list)
out += "\nCurrent contacts:"
out += "\nPrimary: " + str(list[primary])
out += "\nBackup: " + str(list[backup])
out += "\n\nPlease turn your phones to MAX volume to ensure you receive pages."
if self.offset_days != 0:
primary = self._GetPrimary(self.offset_days);
backup = (primary + 1) % len(list)
out += "\n\nContacts " + str(self.offset_days) + " days from now:"
out += "\nPrimary: " + str(list[primary])
out += "\nBackup: " + str(list[backup])
return out
def Info(self):
print(self._InfoString())
def MailInfo(self, sender, receiver):
import smtplib
# sending html email described here:
# http://stackoverflow.com/questions/882712/sending-html-email-in-python
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
msg = MIMEMultipart('alternative')
msg['Subject'] = "Pager rotation notification"
msg['From'] = sender
msg['To'] = receiver
html = """\
<html>
<head></head>
<body>
<p>""" + re.sub("\n", "<br>", self._InfoString()) + """</p>
<p>Pager rotation happens every monday at 00:00.</p>
<p>If the primary / backup contacts are unavailable, please
<ol>
<li>modify pager_config.txt</li>
<li>test your changes via ./pager.py --info</li>
<li>check them in and git push changes to be reflected</li>
<li>run ./pager.py --mail_info to broadcast the new info</li>
</ol>
</p>
</body>
</html>"""
# Record the MIME types of both parts - text/plain and text/html.
#part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
#msg.attach(part1)
msg.attach(part2)
smtp = smtplib.SMTP('localhost')
smtp.sendmail(sender, receiver, msg.as_string());
def SendAlert(self, msg, index = 0):
list = self._active_list
n = (self._GetPrimary() + index) % len(list)
print("Paging " + list[n][1] + " with message: " + msg)
if not self.dry_run: self._SendAlertToPhone(list[n][0], msg)
def _PrintStatus(self):
if len(self._status) > 0: print("Status : Message ID")
for key, value in list(self._status.items()):
print(value["status"] + " : " + key)
def _ProcessStatus(self):
more = "\nSee https://docs.google.com/a/room77.com/document/d/1YMrE5nM4aTG65ah6B3G_TxOG3vEKs4E84yKc4iAxH38/edit"
for key, value in list(self._status.items()):
if value["status"] == self.Status.NEW:
self.SendAlert("Alert. Please check your email. Subject: " + value["subject"] + more, 0)
if value["status"] == self.Status.OLD:
self.SendAlert("Alert. Please check your email. Subject: " + value["subject"] + more, 1)
# print value["status"] + " : " + key
remove_list = [k for (k,v) in list(self._status.items()) if v["status"] == self.Status.OLD or v["status"] == self.Status.REPLIED ]
for key in remove_list: del(self._status[key])
def _ReadStatusFile(self):
if not os.path.exists(self.status_file): return
data = open(self.status_file).read()
if (data == None or data == ""): return
self._status = json.loads(data)
for key, value in list(self._status.items()):
if value["status"] == self.Status.NEW: self._status[key]["status"] = self.Status.OLD
def _WriteStatusFile(self):
if self.dry_run == 1: self._status.clear()
data = open(self.status_file, "w")
json.dump(self._status, data)
data.close()
def _ProcessMail(self, mid, rid, date, sender, subject, body):
if rid in self._status:
# Existing alert.
self._status[rid]["status"] = self.Status.REPLIED
elif mid not in self._status:
# New alert.
self._status[mid] = dict()
self._status[mid]["status"] = self.Status.NEW
self._status[mid]["date"] = date
self._status[mid]["subject"] = subject
self._status[mid]["body"] = body
if re.match(self.reply_match_pattern, subject):
print("At least one reply found muting current alarms")
self.dry_run = 1
def _FetchMails(self):
import poplib
mail = poplib.POP3_SSL("pop.gmail.com")
mail.user(self.monitor_email)
mail.pass_(self.monitor_pass)
n = len(mail.list()[1])
print("Found " + str(n) + " new emails.")
for i in range(n):
mid = rid = date = sender = subject = body = ""
body_started = 0
for line in mail.retr(i+1)[1]:
if body_started :
body = body + line + '\n'
else:
if re.match("^Message-ID: ", line, re.IGNORECASE) : mid = line[line.find(" ")+1:]
if re.match("^In-Reply-To: ", line, re.IGNORECASE) : rid = line[line.find(" ")+1:]
if re.match("^Subject: ", line, re.IGNORECASE) : subject = line[line.find(" ")+1:]
if re.match("^Date:", line, re.IGNORECASE) : date = line[line.find(" ")+1:]
if re.match("^From: ", line, re.IGNORECASE) : sender = line[line.find(" ")+1:]
if not body_started and re.match("^$", line) : body_started = 1
if re.match(self.alert_match_pattern, subject) : self._ProcessMail(mid, rid, date, sender, subject, body)
mail.dele(i+1)
mail.quit()
# self._PrintStatus()
self._ProcessStatus()
self._WriteStatusFile()
self._PrintStatus()
def _GetPrimary(self, offset_days = 0) :
# If offset_days is 0, number of days from some monday. mod 7 of this number
# is 0 if it is a monday. Rotation occurs at 00:00, every monday.
|
def _SendAlertToPhone(self, phone, msg):
if not self._sent_one:
import code
from googlevoice import Voice
voice = Voice()
voice.login(self.monitor_email, self.monitor_pass)
voice.send_sms(phone, msg) # Send an SMS.
voice.call(phone, self.monitor_phone, 3) # Call the person as well.
self._sent_one = 1
def Run(self):
if not os.path.exists(self.status_dir):
os.makedirs(self.status_dir)
self._ReadStatusFile()
return self._FetchMails()
def main():
# Fill in the default values relevant for you to avoid adding the flags for each run.
| days = (datetime.datetime.today() - datetime.datetime.utcfromtimestamp(0)).days + 3 + offset_days
return days // 7 % len(self._active_list) | identifier_body |
pager.py | status_file = status_dir + "pager_status.json"
monitor_email = ""
monitor_pass = ""
monitor_phone = ""
alert_match_pattern = ".*ALARM.*"
reply_match_pattern = "^R[eE]:.*ALARM.*"
dry_run = 0
offset_days = 0
_status = dict()
_active_list = []
_sent_one = 0
class Status:
NEW = "new"
OLD = "old"
REPLIED = "replied"
def Init(self):
data = open(self.config_file)
self._active_list = []
for line in data.readlines() :
if re.match("^#", line) : continue
line = line.strip()
m = re.match(r"(?P<phone>[\d]{3}-[\d]{3}-[\d]{4})[\t](?P<email>\w+[@][\w\.]+)", line)
self._active_list += [(m.group("phone").translate(None, "-+"), m.group("email"))]
def _InfoString(self):
out = ""
list = self._active_list
out += "Pager rotation contains " + str(len(list)) + " contacts:\n"
for p in list:
out += p[0] + "\t" + p[1] +"\n"
primary = self._GetPrimary();
backup = (primary + 1) % len(list)
out += "\nCurrent contacts:"
out += "\nPrimary: " + str(list[primary])
out += "\nBackup: " + str(list[backup])
out += "\n\nPlease turn your phones to MAX volume to ensure you receive pages."
if self.offset_days != 0:
primary = self._GetPrimary(self.offset_days);
backup = (primary + 1) % len(list)
out += "\n\nContacts " + str(self.offset_days) + " days from now:"
out += "\nPrimary: " + str(list[primary])
out += "\nBackup: " + str(list[backup])
return out
def Info(self):
print(self._InfoString())
def MailInfo(self, sender, receiver):
import smtplib
# sending html email described here:
# http://stackoverflow.com/questions/882712/sending-html-email-in-python
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
msg = MIMEMultipart('alternative')
msg['Subject'] = "Pager rotation notification"
msg['From'] = sender
msg['To'] = receiver
html = """\
<html>
<head></head>
<body>
<p>""" + re.sub("\n", "<br>", self._InfoString()) + """</p>
<p>Pager rotation happens every monday at 00:00.</p>
<p>If the primary / backup contacts are unavailable, please
<ol>
<li>modify pager_config.txt</li>
<li>test your changes via ./pager.py --info</li>
<li>check them in and git push changes to be reflected</li>
<li>run ./pager.py --mail_info to broadcast the new info</li>
</ol>
</p>
</body>
</html>"""
# Record the MIME types of both parts - text/plain and text/html.
#part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
#msg.attach(part1)
msg.attach(part2)
smtp = smtplib.SMTP('localhost')
smtp.sendmail(sender, receiver, msg.as_string());
def SendAlert(self, msg, index = 0):
list = self._active_list
n = (self._GetPrimary() + index) % len(list)
print("Paging " + list[n][1] + " with message: " + msg)
if not self.dry_run: self._SendAlertToPhone(list[n][0], msg)
def _PrintStatus(self):
if len(self._status) > 0: print("Status : Message ID")
for key, value in list(self._status.items()):
print(value["status"] + " : " + key)
def _ProcessStatus(self):
more = "\nSee https://docs.google.com/a/room77.com/document/d/1YMrE5nM4aTG65ah6B3G_TxOG3vEKs4E84yKc4iAxH38/edit"
for key, value in list(self._status.items()):
if value["status"] == self.Status.NEW:
self.SendAlert("Alert. Please check your email. Subject: " + value["subject"] + more, 0)
if value["status"] == self.Status.OLD:
self.SendAlert("Alert. Please check your email. Subject: " + value["subject"] + more, 1)
# print value["status"] + " : " + key
remove_list = [k for (k,v) in list(self._status.items()) if v["status"] == self.Status.OLD or v["status"] == self.Status.REPLIED ]
for key in remove_list: del(self._status[key])
def _ReadStatusFile(self):
if not os.path.exists(self.status_file): return
data = open(self.status_file).read()
if (data == None or data == ""): return
self._status = json.loads(data)
for key, value in list(self._status.items()):
if value["status"] == self.Status.NEW: self._status[key]["status"] = self.Status.OLD
def _WriteStatusFile(self):
if self.dry_run == 1: self._status.clear()
data = open(self.status_file, "w")
json.dump(self._status, data)
data.close()
def _ProcessMail(self, mid, rid, date, sender, subject, body):
if rid in self._status:
# Existing alert.
self._status[rid]["status"] = self.Status.REPLIED
elif mid not in self._status:
# New alert.
self._status[mid] = dict()
self._status[mid]["status"] = self.Status.NEW
self._status[mid]["date"] = date
self._status[mid]["subject"] = subject
self._status[mid]["body"] = body
if re.match(self.reply_match_pattern, subject):
print("At least one reply found muting current alarms")
self.dry_run = 1
def _FetchMails(self):
import poplib
mail = poplib.POP3_SSL("pop.gmail.com")
mail.user(self.monitor_email)
mail.pass_(self.monitor_pass)
n = len(mail.list()[1])
print("Found " + str(n) + " new emails.")
for i in range(n):
mid = rid = date = sender = subject = body = ""
body_started = 0
for line in mail.retr(i+1)[1]:
if body_started :
body = body + line + '\n'
else:
if re.match("^Message-ID: ", line, re.IGNORECASE) : mid = line[line.find(" ")+1:]
if re.match("^In-Reply-To: ", line, re.IGNORECASE) : rid = line[line.find(" ")+1:]
if re.match("^Subject: ", line, re.IGNORECASE) : subject = line[line.find(" ")+1:]
if re.match("^Date:", line, re.IGNORECASE) : date = line[line.find(" ")+1:]
if re.match("^From: ", line, re.IGNORECASE) : sender = line[line.find(" ")+1:]
if not body_started and re.match("^$", line) : body_started = 1
if re.match(self.alert_match_pattern, subject) : self._ProcessMail(mid, rid, date, sender, subject, body)
mail.dele(i+1)
mail.quit()
# self._PrintStatus()
self._ProcessStatus()
self._WriteStatusFile()
self._PrintStatus()
def _GetPrimary(self, offset_days = 0) :
# If offset_days is 0, number of days from some monday. mod 7 of this number
# is 0 if it is a monday. Rotation occurs at 00:00, every monday.
days = (datetime.datetime.today() - datetime.datetime.utcfromtimestamp(0)).days + 3 + offset_days
return days // 7 % len(self._active_list)
def _SendAlertToPhone(self, phone, msg):
if not self._sent_one:
import code
from googlevoice import Voice
voice = Voice()
voice.login(self.monitor_email, self.monitor_pass)
voice.send_sms(phone, msg) # Send an SMS.
voice.call(phone, self.monitor_phone, 3) # Call the person as well.
self._sent_one = 1
def | (self):
if not os.path.exists(self.status_dir):
os.makedirs(self.status_dir)
self._ReadStatusFile()
return self._FetchMails()
def main():
# Fill in the default values relevant for you to avoid adding the flags for each run.
| Run | identifier_name |
main.go | 13/pflag"
"github.com/optakt/flow-dps/codec/zbor"
"github.com/optakt/flow-dps/metrics/output"
"github.com/optakt/flow-dps/metrics/rcrowley"
"github.com/optakt/flow-dps/models/dps"
"github.com/optakt/flow-dps/service/chain"
"github.com/optakt/flow-dps/service/feeder"
"github.com/optakt/flow-dps/service/forest"
"github.com/optakt/flow-dps/service/index"
"github.com/optakt/flow-dps/service/loader"
"github.com/optakt/flow-dps/service/mapper"
"github.com/optakt/flow-dps/service/metrics"
"github.com/optakt/flow-dps/service/storage"
)
const (
success = 0
failure = 1
)
func m | ) {
os.Exit(run())
}
func run() int {
// Signal catching for clean shutdown.
sig := make(chan os.Signal, 1)
signal.Notify(sig, os.Interrupt)
// Command line parameter initialization.
var (
flagCheckpoint string
flagData string
flagForce bool
flagIndex string
flagIndexAll bool
flagIndexCollections bool
flagIndexGuarantees bool
flagIndexCommit bool
flagIndexEvents bool
flagIndexHeader bool
flagIndexPayloads bool
flagIndexResults bool
flagIndexTransactions bool
flagIndexSeals bool
flagLevel string
flagMetrics bool
flagMetricsInterval time.Duration
flagSkipBootstrap bool
flagTrie string
)
pflag.StringVarP(&flagCheckpoint, "checkpoint", "c", "", "checkpoint file for state trie")
pflag.StringVarP(&flagData, "data", "d", "", "database directory for protocol data")
pflag.BoolVarP(&flagForce, "force", "f", false, "overwrite existing index database")
pflag.StringVarP(&flagIndex, "index", "i", "index", "database directory for state index")
pflag.BoolVarP(&flagIndexAll, "index-all", "a", false, "index everything")
pflag.BoolVar(&flagIndexCollections, "index-collections", false, "index collections")
pflag.BoolVar(&flagIndexGuarantees, "index-guarantees", false, "index collection guarantees")
pflag.BoolVar(&flagIndexCommit, "index-commits", false, "index commits")
pflag.BoolVar(&flagIndexEvents, "index-events", false, "index events")
pflag.BoolVar(&flagIndexHeader, "index-headers", false, "index headers")
pflag.BoolVar(&flagIndexPayloads, "index-payloads", false, "index payloads")
pflag.BoolVar(&flagIndexResults, "index-results", false, "index transaction results")
pflag.BoolVar(&flagIndexTransactions, "index-transactions", false, "index transactions")
pflag.BoolVar(&flagIndexSeals, "index-seals", false, "index seals")
pflag.StringVarP(&flagLevel, "level", "l", "info", "log output level")
pflag.BoolVarP(&flagMetrics, "metrics", "m", false, "enable metrics collection and output")
pflag.DurationVar(&flagMetricsInterval, "metrics-interval", 5*time.Minute, "defines the interval of metrics output to log")
pflag.BoolVar(&flagSkipBootstrap, "skip-bootstrap", false, "enable skipping checkpoint register payloads indexing")
pflag.StringVarP(&flagTrie, "trie", "t", "", "data directory for state ledger")
pflag.Parse()
// Increase the GOMAXPROCS value in order to use the full IOPS available, see:
// https://groups.google.com/g/golang-nuts/c/jPb_h3TvlKE
_ = runtime.GOMAXPROCS(128)
// Logger initialization.
zerolog.TimestampFunc = func() time.Time { return time.Now().UTC() }
log := zerolog.New(os.Stderr).With().Timestamp().Logger().Level(zerolog.DebugLevel)
level, err := zerolog.ParseLevel(flagLevel)
if err != nil {
log.Error().Str("level", flagLevel).Err(err).Msg("could not parse log level")
return failure
}
log = log.Level(level)
// Ensure that at least one index is specified.
if !flagIndexAll && !flagIndexCommit && !flagIndexHeader && !flagIndexPayloads && !flagIndexCollections &&
!flagIndexGuarantees && !flagIndexTransactions && !flagIndexResults && !flagIndexEvents && !flagIndexSeals {
log.Error().Str("level", flagLevel).Msg("no indexing option specified, use -a/--all to build all indexes")
pflag.Usage()
return failure
}
// Fail if IndexAll is specified along with other index flags, as this would most likely mean that the user does
// not understand what they are doing.
if flagIndexAll && (flagIndexCommit || flagIndexHeader || flagIndexPayloads || flagIndexGuarantees ||
flagIndexCollections || flagIndexTransactions || flagIndexResults || flagIndexEvents || flagIndexSeals) {
log.Error().Str("level", flagLevel).Msg("-a/--all is mutually exclusive with specific indexing flags")
pflag.Usage()
return failure
}
// Open index database.
db, err := badger.Open(dps.DefaultOptions(flagIndex))
if err != nil {
log.Error().Str("index", flagIndex).Err(err).Msg("could not open index DB")
return failure
}
defer db.Close()
// Open protocol state database.
data, err := badger.Open(dps.DefaultOptions(flagData))
if err != nil {
log.Error().Err(err).Msg("could not open blockchain database")
return failure
}
defer data.Close()
// We initialize a metrics logger regardless of whether metrics are enabled;
// it will just do nothing if there are no registered metrics.
mout := output.New(log, flagMetricsInterval)
// The storage library is initialized with a codec and provides functions to
// interact with a Badger database while encoding and compressing
// transparently.
var codec dps.Codec
codec, err = zbor.NewCodec()
if err != nil {
log.Error().Err(err).Msg("could not initialize storage codec")
return failure
}
if flagMetrics {
size := rcrowley.NewSize("store")
mout.Register(size)
codec = metrics.NewCodec(codec, size)
}
storage := storage.New(codec)
// Check if index already exists.
_, err = index.NewReader(db, storage).First()
indexExists := err == nil
if indexExists && !flagForce {
log.Error().Err(err).Msg("index already exists, manually delete it or use (-f, --force) to overwrite it")
return failure
}
// The loader component is responsible for loading and decoding the checkpoint.
load := loader.New(
loader.WithCheckpointPath(flagCheckpoint),
)
// The chain is responsible for reading blockchain data from the protocol state.
var disk dps.Chain
disk = chain.FromDisk(data)
if flagMetrics {
time := rcrowley.NewTime("read")
mout.Register(time)
disk = metrics.NewChain(disk, time)
}
// Feeder is responsible for reading the write-ahead log of the execution state.
segments, err := wal.NewSegmentsReader(flagTrie)
if err != nil {
log.Error().Str("trie", flagTrie).Err(err).Msg("could not open segments reader")
return failure
}
feed, err := feeder.FromDisk(wal.NewReader(segments))
if err != nil {
log.Error().Str("trie", flagTrie).Err(err).Msg("could not initialize feeder")
return failure
}
// Writer is responsible for writing the index data to the index database.
index := index.NewWriter(db, storage)
defer func() {
err := index.Close()
if err != nil {
log.Error().Err(err).Msg("could not close index")
}
}()
write := dps.Writer(index)
if flagMetrics {
time := rcrowley.NewTime("write")
mout.Register(time)
write = metrics.NewWriter(write, time)
}
// Initialize the transitions with the dependencies and add them to the FSM.
transitions := mapper.NewTransitions(log, load, disk, feed, write,
mapper.WithIndexCommit(flagIndexAll || flagIndexCommit),
mapper.WithIndexHeader(flagIndexAll || flagIndexHeader),
mapper.WithIndexCollections(flagIndexAll || flagIndexCollections),
mapper.WithIndexGuarantees(flagIndexAll || flagIndexGuarantees),
mapper.WithIndexTransactions(flagIndexAll || flagIndexTransactions),
mapper.WithIndexResults(flagIndexAll || flagIndexResults),
mapper.WithIndexEvents(flagIndexAll || flagIndexEvents),
mapper.WithIndexPayloads(flagIndexAll || flagIndexPayloads),
mapper.WithIndexSeals(flagIndexAll || flagIndexSeals),
mapper.WithSkipBootstrap(flagSkipBootstrap),
)
forest := forest.New()
state := mapper.EmptyState(forest)
fsm := mapper.NewFSM(state,
mapper.WithTransition(mapper.StatusEmpty, transitions.BootstrapState),
mapper.WithTransition(mapper.StatusUpdating, transitions.UpdateTree),
mapper.WithTransition(mapper.StatusMatched, transitions.Collect | ain( | identifier_name |
main.go | f13/pflag"
"github.com/optakt/flow-dps/codec/zbor"
"github.com/optakt/flow-dps/metrics/output"
"github.com/optakt/flow-dps/metrics/rcrowley"
"github.com/optakt/flow-dps/models/dps"
"github.com/optakt/flow-dps/service/chain"
"github.com/optakt/flow-dps/service/feeder"
"github.com/optakt/flow-dps/service/forest"
"github.com/optakt/flow-dps/service/index"
"github.com/optakt/flow-dps/service/loader"
"github.com/optakt/flow-dps/service/mapper"
"github.com/optakt/flow-dps/service/metrics"
"github.com/optakt/flow-dps/service/storage"
)
const (
success = 0
failure = 1
)
func main() {
os.Exit(run())
}
func run() int {
// Signal catching for clean shutdown.
sig := make(chan os.Signal, 1)
signal.Notify(sig, os.Interrupt)
// Command line parameter initialization.
var (
flagCheckpoint string
flagData string
flagForce bool
flagIndex string
flagIndexAll bool
flagIndexCollections bool
flagIndexGuarantees bool
flagIndexCommit bool
flagIndexEvents bool
flagIndexHeader bool
flagIndexPayloads bool
flagIndexResults bool
flagIndexTransactions bool
flagIndexSeals bool
flagLevel string
flagMetrics bool
flagMetricsInterval time.Duration
flagSkipBootstrap bool
flagTrie string
)
pflag.StringVarP(&flagCheckpoint, "checkpoint", "c", "", "checkpoint file for state trie")
pflag.StringVarP(&flagData, "data", "d", "", "database directory for protocol data")
pflag.BoolVarP(&flagForce, "force", "f", false, "overwrite existing index database")
pflag.StringVarP(&flagIndex, "index", "i", "index", "database directory for state index")
pflag.BoolVarP(&flagIndexAll, "index-all", "a", false, "index everything")
pflag.BoolVar(&flagIndexCollections, "index-collections", false, "index collections")
pflag.BoolVar(&flagIndexGuarantees, "index-guarantees", false, "index collection guarantees")
pflag.BoolVar(&flagIndexCommit, "index-commits", false, "index commits")
pflag.BoolVar(&flagIndexEvents, "index-events", false, "index events")
pflag.BoolVar(&flagIndexHeader, "index-headers", false, "index headers")
pflag.BoolVar(&flagIndexPayloads, "index-payloads", false, "index payloads")
pflag.BoolVar(&flagIndexResults, "index-results", false, "index transaction results")
pflag.BoolVar(&flagIndexTransactions, "index-transactions", false, "index transactions")
pflag.BoolVar(&flagIndexSeals, "index-seals", false, "index seals")
pflag.StringVarP(&flagLevel, "level", "l", "info", "log output level")
pflag.BoolVarP(&flagMetrics, "metrics", "m", false, "enable metrics collection and output")
pflag.DurationVar(&flagMetricsInterval, "metrics-interval", 5*time.Minute, "defines the interval of metrics output to log")
pflag.BoolVar(&flagSkipBootstrap, "skip-bootstrap", false, "enable skipping checkpoint register payloads indexing")
pflag.StringVarP(&flagTrie, "trie", "t", "", "data directory for state ledger")
pflag.Parse()
// Increase the GOMAXPROCS value in order to use the full IOPS available, see:
// https://groups.google.com/g/golang-nuts/c/jPb_h3TvlKE
_ = runtime.GOMAXPROCS(128)
// Logger initialization.
zerolog.TimestampFunc = func() time.Time { return time.Now().UTC() }
log := zerolog.New(os.Stderr).With().Timestamp().Logger().Level(zerolog.DebugLevel)
level, err := zerolog.ParseLevel(flagLevel)
if err != nil {
log.Error().Str("level", flagLevel).Err(err).Msg("could not parse log level")
return failure
}
log = log.Level(level)
// Ensure that at least one index is specified.
if !flagIndexAll && !flagIndexCommit && !flagIndexHeader && !flagIndexPayloads && !flagIndexCollections &&
!flagIndexGuarantees && !flagIndexTransactions && !flagIndexResults && !flagIndexEvents && !flagIndexSeals {
log.Error().Str("level", flagLevel).Msg("no indexing option specified, use -a/--all to build all indexes")
pflag.Usage()
return failure
}
// Fail if IndexAll is specified along with other index flags, as this would most likely mean that the user does
// not understand what they are doing.
if flagIndexAll && (flagIndexCommit || flagIndexHeader || flagIndexPayloads || flagIndexGuarantees ||
flagIndexCollections || flagIndexTransactions || flagIndexResults || flagIndexEvents || flagIndexSeals) {
log.Error().Str("level", flagLevel).Msg("-a/--all is mutually exclusive with specific indexing flags")
pflag.Usage()
return failure
}
// Open index database.
db, err := badger.Open(dps.DefaultOptions(flagIndex))
if err != nil {
log.Error().Str("index", flagIndex).Err(err).Msg("could not open index DB")
return failure
}
defer db.Close()
// Open protocol state database.
data, err := badger.Open(dps.DefaultOptions(flagData))
if err != nil {
log.Error().Err(err).Msg("could not open blockchain database")
return failure
}
defer data.Close()
// We initialize a metrics logger regardless of whether metrics are enabled;
// it will just do nothing if there are no registered metrics.
mout := output.New(log, flagMetricsInterval)
// The storage library is initialized with a codec and provides functions to
// interact with a Badger database while encoding and compressing
// transparently.
var codec dps.Codec
codec, err = zbor.NewCodec()
if err != nil {
log.Error().Err(err).Msg("could not initialize storage codec")
return failure
}
if flagMetrics { | storage := storage.New(codec)
// Check if index already exists.
_, err = index.NewReader(db, storage).First()
indexExists := err == nil
if indexExists && !flagForce {
log.Error().Err(err).Msg("index already exists, manually delete it or use (-f, --force) to overwrite it")
return failure
}
// The loader component is responsible for loading and decoding the checkpoint.
load := loader.New(
loader.WithCheckpointPath(flagCheckpoint),
)
// The chain is responsible for reading blockchain data from the protocol state.
var disk dps.Chain
disk = chain.FromDisk(data)
if flagMetrics {
time := rcrowley.NewTime("read")
mout.Register(time)
disk = metrics.NewChain(disk, time)
}
// Feeder is responsible for reading the write-ahead log of the execution state.
segments, err := wal.NewSegmentsReader(flagTrie)
if err != nil {
log.Error().Str("trie", flagTrie).Err(err).Msg("could not open segments reader")
return failure
}
feed, err := feeder.FromDisk(wal.NewReader(segments))
if err != nil {
log.Error().Str("trie", flagTrie).Err(err).Msg("could not initialize feeder")
return failure
}
// Writer is responsible for writing the index data to the index database.
index := index.NewWriter(db, storage)
defer func() {
err := index.Close()
if err != nil {
log.Error().Err(err).Msg("could not close index")
}
}()
write := dps.Writer(index)
if flagMetrics {
time := rcrowley.NewTime("write")
mout.Register(time)
write = metrics.NewWriter(write, time)
}
// Initialize the transitions with the dependencies and add them to the FSM.
transitions := mapper.NewTransitions(log, load, disk, feed, write,
mapper.WithIndexCommit(flagIndexAll || flagIndexCommit),
mapper.WithIndexHeader(flagIndexAll || flagIndexHeader),
mapper.WithIndexCollections(flagIndexAll || flagIndexCollections),
mapper.WithIndexGuarantees(flagIndexAll || flagIndexGuarantees),
mapper.WithIndexTransactions(flagIndexAll || flagIndexTransactions),
mapper.WithIndexResults(flagIndexAll || flagIndexResults),
mapper.WithIndexEvents(flagIndexAll || flagIndexEvents),
mapper.WithIndexPayloads(flagIndexAll || flagIndexPayloads),
mapper.WithIndexSeals(flagIndexAll || flagIndexSeals),
mapper.WithSkipBootstrap(flagSkipBootstrap),
)
forest := forest.New()
state := mapper.EmptyState(forest)
fsm := mapper.NewFSM(state,
mapper.WithTransition(mapper.StatusEmpty, transitions.BootstrapState),
mapper.WithTransition(mapper.StatusUpdating, transitions.UpdateTree),
mapper.WithTransition(mapper.StatusMatched, transitions.Collect |
size := rcrowley.NewSize("store")
mout.Register(size)
codec = metrics.NewCodec(codec, size)
}
| conditional_block |
main.go | 13/pflag"
"github.com/optakt/flow-dps/codec/zbor"
"github.com/optakt/flow-dps/metrics/output"
"github.com/optakt/flow-dps/metrics/rcrowley"
"github.com/optakt/flow-dps/models/dps"
"github.com/optakt/flow-dps/service/chain"
"github.com/optakt/flow-dps/service/feeder"
"github.com/optakt/flow-dps/service/forest"
"github.com/optakt/flow-dps/service/index"
"github.com/optakt/flow-dps/service/loader"
"github.com/optakt/flow-dps/service/mapper"
"github.com/optakt/flow-dps/service/metrics"
"github.com/optakt/flow-dps/service/storage"
)
const (
success = 0
failure = 1
)
func main() { |
func run() int {
// Signal catching for clean shutdown.
sig := make(chan os.Signal, 1)
signal.Notify(sig, os.Interrupt)
// Command line parameter initialization.
var (
flagCheckpoint string
flagData string
flagForce bool
flagIndex string
flagIndexAll bool
flagIndexCollections bool
flagIndexGuarantees bool
flagIndexCommit bool
flagIndexEvents bool
flagIndexHeader bool
flagIndexPayloads bool
flagIndexResults bool
flagIndexTransactions bool
flagIndexSeals bool
flagLevel string
flagMetrics bool
flagMetricsInterval time.Duration
flagSkipBootstrap bool
flagTrie string
)
pflag.StringVarP(&flagCheckpoint, "checkpoint", "c", "", "checkpoint file for state trie")
pflag.StringVarP(&flagData, "data", "d", "", "database directory for protocol data")
pflag.BoolVarP(&flagForce, "force", "f", false, "overwrite existing index database")
pflag.StringVarP(&flagIndex, "index", "i", "index", "database directory for state index")
pflag.BoolVarP(&flagIndexAll, "index-all", "a", false, "index everything")
pflag.BoolVar(&flagIndexCollections, "index-collections", false, "index collections")
pflag.BoolVar(&flagIndexGuarantees, "index-guarantees", false, "index collection guarantees")
pflag.BoolVar(&flagIndexCommit, "index-commits", false, "index commits")
pflag.BoolVar(&flagIndexEvents, "index-events", false, "index events")
pflag.BoolVar(&flagIndexHeader, "index-headers", false, "index headers")
pflag.BoolVar(&flagIndexPayloads, "index-payloads", false, "index payloads")
pflag.BoolVar(&flagIndexResults, "index-results", false, "index transaction results")
pflag.BoolVar(&flagIndexTransactions, "index-transactions", false, "index transactions")
pflag.BoolVar(&flagIndexSeals, "index-seals", false, "index seals")
pflag.StringVarP(&flagLevel, "level", "l", "info", "log output level")
pflag.BoolVarP(&flagMetrics, "metrics", "m", false, "enable metrics collection and output")
pflag.DurationVar(&flagMetricsInterval, "metrics-interval", 5*time.Minute, "defines the interval of metrics output to log")
pflag.BoolVar(&flagSkipBootstrap, "skip-bootstrap", false, "enable skipping checkpoint register payloads indexing")
pflag.StringVarP(&flagTrie, "trie", "t", "", "data directory for state ledger")
pflag.Parse()
// Increase the GOMAXPROCS value in order to use the full IOPS available, see:
// https://groups.google.com/g/golang-nuts/c/jPb_h3TvlKE
_ = runtime.GOMAXPROCS(128)
// Logger initialization.
zerolog.TimestampFunc = func() time.Time { return time.Now().UTC() }
log := zerolog.New(os.Stderr).With().Timestamp().Logger().Level(zerolog.DebugLevel)
level, err := zerolog.ParseLevel(flagLevel)
if err != nil {
log.Error().Str("level", flagLevel).Err(err).Msg("could not parse log level")
return failure
}
log = log.Level(level)
// Ensure that at least one index is specified.
if !flagIndexAll && !flagIndexCommit && !flagIndexHeader && !flagIndexPayloads && !flagIndexCollections &&
!flagIndexGuarantees && !flagIndexTransactions && !flagIndexResults && !flagIndexEvents && !flagIndexSeals {
log.Error().Str("level", flagLevel).Msg("no indexing option specified, use -a/--all to build all indexes")
pflag.Usage()
return failure
}
// Fail if IndexAll is specified along with other index flags, as this would most likely mean that the user does
// not understand what they are doing.
if flagIndexAll && (flagIndexCommit || flagIndexHeader || flagIndexPayloads || flagIndexGuarantees ||
flagIndexCollections || flagIndexTransactions || flagIndexResults || flagIndexEvents || flagIndexSeals) {
log.Error().Str("level", flagLevel).Msg("-a/--all is mutually exclusive with specific indexing flags")
pflag.Usage()
return failure
}
// Open index database.
db, err := badger.Open(dps.DefaultOptions(flagIndex))
if err != nil {
log.Error().Str("index", flagIndex).Err(err).Msg("could not open index DB")
return failure
}
defer db.Close()
// Open protocol state database.
data, err := badger.Open(dps.DefaultOptions(flagData))
if err != nil {
log.Error().Err(err).Msg("could not open blockchain database")
return failure
}
defer data.Close()
// We initialize a metrics logger regardless of whether metrics are enabled;
// it will just do nothing if there are no registered metrics.
mout := output.New(log, flagMetricsInterval)
// The storage library is initialized with a codec and provides functions to
// interact with a Badger database while encoding and compressing
// transparently.
var codec dps.Codec
codec, err = zbor.NewCodec()
if err != nil {
log.Error().Err(err).Msg("could not initialize storage codec")
return failure
}
if flagMetrics {
size := rcrowley.NewSize("store")
mout.Register(size)
codec = metrics.NewCodec(codec, size)
}
storage := storage.New(codec)
// Check if index already exists.
_, err = index.NewReader(db, storage).First()
indexExists := err == nil
if indexExists && !flagForce {
log.Error().Err(err).Msg("index already exists, manually delete it or use (-f, --force) to overwrite it")
return failure
}
// The loader component is responsible for loading and decoding the checkpoint.
load := loader.New(
loader.WithCheckpointPath(flagCheckpoint),
)
// The chain is responsible for reading blockchain data from the protocol state.
var disk dps.Chain
disk = chain.FromDisk(data)
if flagMetrics {
time := rcrowley.NewTime("read")
mout.Register(time)
disk = metrics.NewChain(disk, time)
}
// Feeder is responsible for reading the write-ahead log of the execution state.
segments, err := wal.NewSegmentsReader(flagTrie)
if err != nil {
log.Error().Str("trie", flagTrie).Err(err).Msg("could not open segments reader")
return failure
}
feed, err := feeder.FromDisk(wal.NewReader(segments))
if err != nil {
log.Error().Str("trie", flagTrie).Err(err).Msg("could not initialize feeder")
return failure
}
// Writer is responsible for writing the index data to the index database.
index := index.NewWriter(db, storage)
defer func() {
err := index.Close()
if err != nil {
log.Error().Err(err).Msg("could not close index")
}
}()
write := dps.Writer(index)
if flagMetrics {
time := rcrowley.NewTime("write")
mout.Register(time)
write = metrics.NewWriter(write, time)
}
// Initialize the transitions with the dependencies and add them to the FSM.
transitions := mapper.NewTransitions(log, load, disk, feed, write,
mapper.WithIndexCommit(flagIndexAll || flagIndexCommit),
mapper.WithIndexHeader(flagIndexAll || flagIndexHeader),
mapper.WithIndexCollections(flagIndexAll || flagIndexCollections),
mapper.WithIndexGuarantees(flagIndexAll || flagIndexGuarantees),
mapper.WithIndexTransactions(flagIndexAll || flagIndexTransactions),
mapper.WithIndexResults(flagIndexAll || flagIndexResults),
mapper.WithIndexEvents(flagIndexAll || flagIndexEvents),
mapper.WithIndexPayloads(flagIndexAll || flagIndexPayloads),
mapper.WithIndexSeals(flagIndexAll || flagIndexSeals),
mapper.WithSkipBootstrap(flagSkipBootstrap),
)
forest := forest.New()
state := mapper.EmptyState(forest)
fsm := mapper.NewFSM(state,
mapper.WithTransition(mapper.StatusEmpty, transitions.BootstrapState),
mapper.WithTransition(mapper.StatusUpdating, transitions.UpdateTree),
mapper.WithTransition(mapper.StatusMatched, transitions.Collect |
os.Exit(run())
}
| identifier_body |
main.go | "
"github.com/optakt/flow-dps/service/chain"
"github.com/optakt/flow-dps/service/feeder"
"github.com/optakt/flow-dps/service/forest"
"github.com/optakt/flow-dps/service/index"
"github.com/optakt/flow-dps/service/loader"
"github.com/optakt/flow-dps/service/mapper"
"github.com/optakt/flow-dps/service/metrics"
"github.com/optakt/flow-dps/service/storage"
)
const (
success = 0
failure = 1
)
func main() {
os.Exit(run())
}
func run() int {
// Signal catching for clean shutdown.
sig := make(chan os.Signal, 1)
signal.Notify(sig, os.Interrupt)
// Command line parameter initialization.
var (
flagCheckpoint string
flagData string
flagForce bool
flagIndex string
flagIndexAll bool
flagIndexCollections bool
flagIndexGuarantees bool
flagIndexCommit bool
flagIndexEvents bool
flagIndexHeader bool
flagIndexPayloads bool
flagIndexResults bool
flagIndexTransactions bool
flagIndexSeals bool
flagLevel string
flagMetrics bool
flagMetricsInterval time.Duration
flagSkipBootstrap bool
flagTrie string
)
pflag.StringVarP(&flagCheckpoint, "checkpoint", "c", "", "checkpoint file for state trie")
pflag.StringVarP(&flagData, "data", "d", "", "database directory for protocol data")
pflag.BoolVarP(&flagForce, "force", "f", false, "overwrite existing index database")
pflag.StringVarP(&flagIndex, "index", "i", "index", "database directory for state index")
pflag.BoolVarP(&flagIndexAll, "index-all", "a", false, "index everything")
pflag.BoolVar(&flagIndexCollections, "index-collections", false, "index collections")
pflag.BoolVar(&flagIndexGuarantees, "index-guarantees", false, "index collection guarantees")
pflag.BoolVar(&flagIndexCommit, "index-commits", false, "index commits")
pflag.BoolVar(&flagIndexEvents, "index-events", false, "index events")
pflag.BoolVar(&flagIndexHeader, "index-headers", false, "index headers")
pflag.BoolVar(&flagIndexPayloads, "index-payloads", false, "index payloads")
pflag.BoolVar(&flagIndexResults, "index-results", false, "index transaction results")
pflag.BoolVar(&flagIndexTransactions, "index-transactions", false, "index transactions")
pflag.BoolVar(&flagIndexSeals, "index-seals", false, "index seals")
pflag.StringVarP(&flagLevel, "level", "l", "info", "log output level")
pflag.BoolVarP(&flagMetrics, "metrics", "m", false, "enable metrics collection and output")
pflag.DurationVar(&flagMetricsInterval, "metrics-interval", 5*time.Minute, "defines the interval of metrics output to log")
pflag.BoolVar(&flagSkipBootstrap, "skip-bootstrap", false, "enable skipping checkpoint register payloads indexing")
pflag.StringVarP(&flagTrie, "trie", "t", "", "data directory for state ledger")
pflag.Parse()
// Increase the GOMAXPROCS value in order to use the full IOPS available, see:
// https://groups.google.com/g/golang-nuts/c/jPb_h3TvlKE
_ = runtime.GOMAXPROCS(128)
// Logger initialization.
zerolog.TimestampFunc = func() time.Time { return time.Now().UTC() }
log := zerolog.New(os.Stderr).With().Timestamp().Logger().Level(zerolog.DebugLevel)
level, err := zerolog.ParseLevel(flagLevel)
if err != nil {
log.Error().Str("level", flagLevel).Err(err).Msg("could not parse log level")
return failure
}
log = log.Level(level)
// Ensure that at least one index is specified.
if !flagIndexAll && !flagIndexCommit && !flagIndexHeader && !flagIndexPayloads && !flagIndexCollections &&
!flagIndexGuarantees && !flagIndexTransactions && !flagIndexResults && !flagIndexEvents && !flagIndexSeals {
log.Error().Str("level", flagLevel).Msg("no indexing option specified, use -a/--all to build all indexes")
pflag.Usage()
return failure
}
// Fail if IndexAll is specified along with other index flags, as this would most likely mean that the user does
// not understand what they are doing.
if flagIndexAll && (flagIndexCommit || flagIndexHeader || flagIndexPayloads || flagIndexGuarantees ||
flagIndexCollections || flagIndexTransactions || flagIndexResults || flagIndexEvents || flagIndexSeals) {
log.Error().Str("level", flagLevel).Msg("-a/--all is mutually exclusive with specific indexing flags")
pflag.Usage()
return failure
}
// Open index database.
db, err := badger.Open(dps.DefaultOptions(flagIndex))
if err != nil {
log.Error().Str("index", flagIndex).Err(err).Msg("could not open index DB")
return failure
}
defer db.Close()
// Open protocol state database.
data, err := badger.Open(dps.DefaultOptions(flagData))
if err != nil {
log.Error().Err(err).Msg("could not open blockchain database")
return failure
}
defer data.Close()
// We initialize a metrics logger regardless of whether metrics are enabled;
// it will just do nothing if there are no registered metrics.
mout := output.New(log, flagMetricsInterval)
// The storage library is initialized with a codec and provides functions to
// interact with a Badger database while encoding and compressing
// transparently.
var codec dps.Codec
codec, err = zbor.NewCodec()
if err != nil {
log.Error().Err(err).Msg("could not initialize storage codec")
return failure
}
if flagMetrics {
size := rcrowley.NewSize("store")
mout.Register(size)
codec = metrics.NewCodec(codec, size)
}
storage := storage.New(codec)
// Check if index already exists.
_, err = index.NewReader(db, storage).First()
indexExists := err == nil
if indexExists && !flagForce {
log.Error().Err(err).Msg("index already exists, manually delete it or use (-f, --force) to overwrite it")
return failure
}
// The loader component is responsible for loading and decoding the checkpoint.
load := loader.New(
loader.WithCheckpointPath(flagCheckpoint),
)
// The chain is responsible for reading blockchain data from the protocol state.
var disk dps.Chain
disk = chain.FromDisk(data)
if flagMetrics {
time := rcrowley.NewTime("read")
mout.Register(time)
disk = metrics.NewChain(disk, time)
}
// Feeder is responsible for reading the write-ahead log of the execution state.
segments, err := wal.NewSegmentsReader(flagTrie)
if err != nil {
log.Error().Str("trie", flagTrie).Err(err).Msg("could not open segments reader")
return failure
}
feed, err := feeder.FromDisk(wal.NewReader(segments))
if err != nil {
log.Error().Str("trie", flagTrie).Err(err).Msg("could not initialize feeder")
return failure
}
// Writer is responsible for writing the index data to the index database.
index := index.NewWriter(db, storage)
defer func() {
err := index.Close()
if err != nil {
log.Error().Err(err).Msg("could not close index")
}
}()
write := dps.Writer(index)
if flagMetrics {
time := rcrowley.NewTime("write")
mout.Register(time)
write = metrics.NewWriter(write, time)
}
// Initialize the transitions with the dependencies and add them to the FSM.
transitions := mapper.NewTransitions(log, load, disk, feed, write,
mapper.WithIndexCommit(flagIndexAll || flagIndexCommit),
mapper.WithIndexHeader(flagIndexAll || flagIndexHeader),
mapper.WithIndexCollections(flagIndexAll || flagIndexCollections),
mapper.WithIndexGuarantees(flagIndexAll || flagIndexGuarantees),
mapper.WithIndexTransactions(flagIndexAll || flagIndexTransactions),
mapper.WithIndexResults(flagIndexAll || flagIndexResults),
mapper.WithIndexEvents(flagIndexAll || flagIndexEvents),
mapper.WithIndexPayloads(flagIndexAll || flagIndexPayloads),
mapper.WithIndexSeals(flagIndexAll || flagIndexSeals),
mapper.WithSkipBootstrap(flagSkipBootstrap),
)
forest := forest.New()
state := mapper.EmptyState(forest)
fsm := mapper.NewFSM(state,
mapper.WithTransition(mapper.StatusEmpty, transitions.BootstrapState),
mapper.WithTransition(mapper.StatusUpdating, transitions.UpdateTree),
mapper.WithTransition(mapper.StatusMatched, transitions.CollectRegisters),
mapper.WithTransition(mapper.StatusCollected, transitions.IndexRegisters),
mapper.WithTransition(mapper.StatusIndexed, transitions.ForwardHeight),
mapper.WithTransition(mapper.StatusForwarded, transitions.IndexChain), | )
// This section launches the main executing components in their own | random_line_split |
|
fixtures.go | success {
tb.Fail()
exportLogs(tb, testData, "afterSetupTest", true)
}
}()
tb.Logf("Creating '%s' K8s Namespace", testNamespace)
if err := ensureAntreaRunning(testData); err != nil {
return nil, err
}
if err := testData.createTestNamespace(); err != nil {
return nil, err
}
success = true
return testData, nil
}
func setupTestWithIPFIXCollector(tb testing.TB) (*TestData, bool, bool, error) {
v4Enabled := clusterInfo.podV4NetworkCIDR != ""
v6Enabled := clusterInfo.podV6NetworkCIDR != ""
testData, err := setupTest(tb)
if err != nil {
return testData, v4Enabled, v6Enabled, err
}
// Create pod using ipfix collector image
if err = testData.createPodOnNode("ipfix-collector", testNamespace, "", ipfixCollectorImage, nil, nil, nil, nil, true, nil); err != nil {
tb.Errorf("Error when creating the ipfix collector Pod: %v", err)
}
ipfixCollectorIP, err := testData.podWaitForIPs(defaultTimeout, "ipfix-collector", testNamespace)
if err != nil || len(ipfixCollectorIP.ipStrings) == 0 {
tb.Errorf("Error when waiting to get ipfix collector Pod IP: %v", err)
return nil, v4Enabled, v6Enabled, err
}
var ipStr string
if v6Enabled && ipfixCollectorIP.ipv6 != nil {
ipStr = ipfixCollectorIP.ipv6.String()
} else {
ipStr = ipfixCollectorIP.ipv4.String()
}
ipfixCollectorAddr := fmt.Sprintf("%s:tcp", net.JoinHostPort(ipStr, ipfixCollectorPort))
faClusterIPAddr := ""
tb.Logf("Applying flow aggregator YAML with ipfix collector address: %s", ipfixCollectorAddr)
faClusterIP, err := testData.deployFlowAggregator(ipfixCollectorAddr)
if err != nil {
return testData, v4Enabled, v6Enabled, err
}
if testOptions.providerName == "kind" {
// In Kind cluster, there are issues with DNS name resolution on worker nodes.
// Please note that CoreDNS services are forced on to control-plane Node.
faClusterIPAddr = fmt.Sprintf("%s:%s:tls", faClusterIP, ipfixCollectorPort)
}
tb.Logf("Deploying flow exporter with collector address: %s", faClusterIPAddr)
if err = testData.deployAntreaFlowExporter(faClusterIPAddr); err != nil {
return testData, v4Enabled, v6Enabled, err
}
tb.Logf("Checking CoreDNS deployment")
if err = testData.checkCoreDNSPods(defaultTimeout); err != nil {
return testData, v4Enabled, v6Enabled, err
}
return testData, v4Enabled, v6Enabled, nil
}
func exportLogs(tb testing.TB, data *TestData, logsSubDir string, writeNodeLogs bool) {
if tb.Skipped() {
return
}
// if test was successful and --logs-export-on-success was not provided, we do not export
// any logs.
if !tb.Failed() && !testOptions.logsExportOnSuccess {
return
}
const timeFormat = "Jan02-15-04-05"
timeStamp := time.Now().Format(timeFormat)
logsDir := filepath.Join(data.logsDirForTestCase, fmt.Sprintf("%s.%s", logsSubDir, timeStamp))
err := createDirectory(logsDir)
if err != nil {
tb.Errorf("Error when creating logs directory '%s': %v", logsDir, err)
return
}
tb.Logf("Exporting test logs to '%s'", logsDir)
// for now we just retrieve the logs for the Antrea Pods, but maybe we can find a good way to
// retrieve the logs for the test Pods in the future (before deleting them) if it is useful
// for debugging.
// getPodWriter creates the file with name nodeName-podName-suffix. It returns nil if the
// file cannot be created. File must be closed by the caller.
getPodWriter := func(nodeName, podName, suffix string) *os.File {
logFile := filepath.Join(logsDir, fmt.Sprintf("%s-%s-%s", nodeName, podName, suffix))
f, err := os.Create(logFile)
if err != nil {
tb.Errorf("Error when creating log file '%s': '%v'", logFile, err)
return nil
}
return f
}
// runKubectl runs the provided kubectl command on the control-plane Node and returns the
// output. It returns an empty string in case of error.
runKubectl := func(cmd string) string {
rc, stdout, _, err := RunCommandOnNode(controlPlaneNodeName(), cmd)
if err != nil || rc != 0 {
tb.Errorf("Error when running this kubectl command on control-plane Node: %s", cmd)
return ""
}
return stdout
}
// dump the logs for Antrea Pods to disk.
writePodLogs := func(nodeName, podName, nsName string) error {
w := getPodWriter(nodeName, podName, "logs")
if w == nil {
return nil
}
defer w.Close()
cmd := fmt.Sprintf("kubectl -n %s logs --all-containers %s", nsName, podName)
stdout := runKubectl(cmd)
if stdout == "" {
return nil
}
w.WriteString(stdout)
return nil
}
data.forAllMatchingPodsInNamespace("k8s-app=kube-proxy", kubeNamespace, writePodLogs)
data.forAllMatchingPodsInNamespace("app=antrea", antreaNamespace, writePodLogs)
// dump the logs for monitoring Pods to disk.
data.forAllMatchingPodsInNamespace("", monitoringNamespace, writePodLogs)
// dump the logs for flow-aggregator Pods to disk.
data.forAllMatchingPodsInNamespace("", flowAggregatorNamespace, writePodLogs)
// dump the output of "kubectl describe" for Antrea pods to disk.
data.forAllMatchingPodsInNamespace("app=antrea", antreaNamespace, func(nodeName, podName, nsName string) error {
w := getPodWriter(nodeName, podName, "describe")
if w == nil {
return nil
}
defer w.Close()
cmd := fmt.Sprintf("kubectl -n %s describe pod %s", nsName, podName)
stdout := runKubectl(cmd)
if stdout == "" {
return nil
}
w.WriteString(stdout)
return nil
})
if !writeNodeLogs {
return
}
// getNodeWriter creates the file with name nodeName-suffix. It returns nil if the file
// cannot be created. File must be closed by the caller.
getNodeWriter := func(nodeName, suffix string) *os.File {
logFile := filepath.Join(logsDir, fmt.Sprintf("%s-%s", nodeName, suffix))
f, err := os.Create(logFile)
if err != nil {
tb.Errorf("Error when creating log file '%s': '%v'", logFile, err)
return nil
}
return f
}
// export kubelet logs with journalctl for each Node. If the Nodes do not use journalctl we
// print a log message. If kubelet is not run with systemd, the log file will be empty.
if err := forAllNodes(func(nodeName string) error {
const numLines = 100
// --no-pager ensures the command does not hang.
cmd := fmt.Sprintf("journalctl -u kubelet -n %d --no-pager", numLines)
if clusterInfo.nodesOS[nodeName] == "windows" {
cmd = "Get-EventLog -LogName \"System\" -Source \"Service Control Manager\" | grep kubelet ; Get-EventLog -LogName \"Application\" -Source \"nssm\" | grep kubelet"
}
rc, stdout, _, err := RunCommandOnNode(nodeName, cmd)
if err != nil || rc != 0 {
// return an error and skip subsequent Nodes
return fmt.Errorf("error when running journalctl on Node '%s', is it available? Error: %v", nodeName, err)
}
w := getNodeWriter(nodeName, "kubelet")
if w == nil {
// move on to the next Node
return nil
}
defer w.Close()
w.WriteString(stdout)
return nil
}); err != nil {
tb.Logf("Error when exporting kubelet logs: %v", err)
}
}
func teardownFlowAggregator(tb testing.TB, data *TestData) {
if testOptions.enableCoverage {
if err := testData.gracefulExitFlowAggregator(testOptions.coverageDir); err != nil {
tb.Fatalf("Error when gracefully exiting Flow Aggregator: %v", err)
}
}
tb.Logf("Deleting '%s' K8s Namespace", flowAggregatorNamespace)
if err := data.deleteNamespace(flowAggregatorNamespace, defaultTimeout); err != nil {
tb.Logf("Error when tearing down flow aggregator: %v", err)
}
}
func | teardownTest | identifier_name |
|
fixtures.go | Collector(tb testing.TB) (*TestData, bool, bool, error) {
v4Enabled := clusterInfo.podV4NetworkCIDR != ""
v6Enabled := clusterInfo.podV6NetworkCIDR != ""
testData, err := setupTest(tb)
if err != nil {
return testData, v4Enabled, v6Enabled, err
}
// Create pod using ipfix collector image
if err = testData.createPodOnNode("ipfix-collector", testNamespace, "", ipfixCollectorImage, nil, nil, nil, nil, true, nil); err != nil {
tb.Errorf("Error when creating the ipfix collector Pod: %v", err)
}
ipfixCollectorIP, err := testData.podWaitForIPs(defaultTimeout, "ipfix-collector", testNamespace)
if err != nil || len(ipfixCollectorIP.ipStrings) == 0 {
tb.Errorf("Error when waiting to get ipfix collector Pod IP: %v", err)
return nil, v4Enabled, v6Enabled, err
}
var ipStr string
if v6Enabled && ipfixCollectorIP.ipv6 != nil {
ipStr = ipfixCollectorIP.ipv6.String()
} else {
ipStr = ipfixCollectorIP.ipv4.String()
}
ipfixCollectorAddr := fmt.Sprintf("%s:tcp", net.JoinHostPort(ipStr, ipfixCollectorPort))
faClusterIPAddr := ""
tb.Logf("Applying flow aggregator YAML with ipfix collector address: %s", ipfixCollectorAddr)
faClusterIP, err := testData.deployFlowAggregator(ipfixCollectorAddr)
if err != nil {
return testData, v4Enabled, v6Enabled, err
}
if testOptions.providerName == "kind" {
// In Kind cluster, there are issues with DNS name resolution on worker nodes.
// Please note that CoreDNS services are forced on to control-plane Node.
faClusterIPAddr = fmt.Sprintf("%s:%s:tls", faClusterIP, ipfixCollectorPort)
}
tb.Logf("Deploying flow exporter with collector address: %s", faClusterIPAddr)
if err = testData.deployAntreaFlowExporter(faClusterIPAddr); err != nil {
return testData, v4Enabled, v6Enabled, err
}
tb.Logf("Checking CoreDNS deployment")
if err = testData.checkCoreDNSPods(defaultTimeout); err != nil {
return testData, v4Enabled, v6Enabled, err
}
return testData, v4Enabled, v6Enabled, nil
}
func exportLogs(tb testing.TB, data *TestData, logsSubDir string, writeNodeLogs bool) {
if tb.Skipped() {
return
}
// if test was successful and --logs-export-on-success was not provided, we do not export
// any logs.
if !tb.Failed() && !testOptions.logsExportOnSuccess {
return
}
const timeFormat = "Jan02-15-04-05"
timeStamp := time.Now().Format(timeFormat)
logsDir := filepath.Join(data.logsDirForTestCase, fmt.Sprintf("%s.%s", logsSubDir, timeStamp))
err := createDirectory(logsDir)
if err != nil {
tb.Errorf("Error when creating logs directory '%s': %v", logsDir, err)
return
}
tb.Logf("Exporting test logs to '%s'", logsDir)
// for now we just retrieve the logs for the Antrea Pods, but maybe we can find a good way to
// retrieve the logs for the test Pods in the future (before deleting them) if it is useful
// for debugging.
// getPodWriter creates the file with name nodeName-podName-suffix. It returns nil if the
// file cannot be created. File must be closed by the caller.
getPodWriter := func(nodeName, podName, suffix string) *os.File {
logFile := filepath.Join(logsDir, fmt.Sprintf("%s-%s-%s", nodeName, podName, suffix))
f, err := os.Create(logFile)
if err != nil {
tb.Errorf("Error when creating log file '%s': '%v'", logFile, err)
return nil
}
return f
}
// runKubectl runs the provided kubectl command on the control-plane Node and returns the
// output. It returns an empty string in case of error.
runKubectl := func(cmd string) string {
rc, stdout, _, err := RunCommandOnNode(controlPlaneNodeName(), cmd)
if err != nil || rc != 0 {
tb.Errorf("Error when running this kubectl command on control-plane Node: %s", cmd)
return ""
}
return stdout
}
// dump the logs for Antrea Pods to disk.
writePodLogs := func(nodeName, podName, nsName string) error {
w := getPodWriter(nodeName, podName, "logs")
if w == nil {
return nil
}
defer w.Close()
cmd := fmt.Sprintf("kubectl -n %s logs --all-containers %s", nsName, podName)
stdout := runKubectl(cmd)
if stdout == "" {
return nil
}
w.WriteString(stdout)
return nil
}
data.forAllMatchingPodsInNamespace("k8s-app=kube-proxy", kubeNamespace, writePodLogs)
data.forAllMatchingPodsInNamespace("app=antrea", antreaNamespace, writePodLogs)
// dump the logs for monitoring Pods to disk.
data.forAllMatchingPodsInNamespace("", monitoringNamespace, writePodLogs)
// dump the logs for flow-aggregator Pods to disk.
data.forAllMatchingPodsInNamespace("", flowAggregatorNamespace, writePodLogs)
// dump the output of "kubectl describe" for Antrea pods to disk.
data.forAllMatchingPodsInNamespace("app=antrea", antreaNamespace, func(nodeName, podName, nsName string) error {
w := getPodWriter(nodeName, podName, "describe")
if w == nil {
return nil
}
defer w.Close()
cmd := fmt.Sprintf("kubectl -n %s describe pod %s", nsName, podName)
stdout := runKubectl(cmd)
if stdout == "" {
return nil
}
w.WriteString(stdout)
return nil
})
if !writeNodeLogs {
return
}
// getNodeWriter creates the file with name nodeName-suffix. It returns nil if the file
// cannot be created. File must be closed by the caller.
getNodeWriter := func(nodeName, suffix string) *os.File {
logFile := filepath.Join(logsDir, fmt.Sprintf("%s-%s", nodeName, suffix))
f, err := os.Create(logFile)
if err != nil {
tb.Errorf("Error when creating log file '%s': '%v'", logFile, err)
return nil
}
return f
}
// export kubelet logs with journalctl for each Node. If the Nodes do not use journalctl we
// print a log message. If kubelet is not run with systemd, the log file will be empty.
if err := forAllNodes(func(nodeName string) error {
const numLines = 100
// --no-pager ensures the command does not hang.
cmd := fmt.Sprintf("journalctl -u kubelet -n %d --no-pager", numLines)
if clusterInfo.nodesOS[nodeName] == "windows" {
cmd = "Get-EventLog -LogName \"System\" -Source \"Service Control Manager\" | grep kubelet ; Get-EventLog -LogName \"Application\" -Source \"nssm\" | grep kubelet"
}
rc, stdout, _, err := RunCommandOnNode(nodeName, cmd)
if err != nil || rc != 0 {
// return an error and skip subsequent Nodes
return fmt.Errorf("error when running journalctl on Node '%s', is it available? Error: %v", nodeName, err)
}
w := getNodeWriter(nodeName, "kubelet")
if w == nil {
// move on to the next Node
return nil
}
defer w.Close()
w.WriteString(stdout)
return nil
}); err != nil {
tb.Logf("Error when exporting kubelet logs: %v", err)
}
}
func teardownFlowAggregator(tb testing.TB, data *TestData) {
if testOptions.enableCoverage {
if err := testData.gracefulExitFlowAggregator(testOptions.coverageDir); err != nil {
tb.Fatalf("Error when gracefully exiting Flow Aggregator: %v", err)
}
}
tb.Logf("Deleting '%s' K8s Namespace", flowAggregatorNamespace)
if err := data.deleteNamespace(flowAggregatorNamespace, defaultTimeout); err != nil {
tb.Logf("Error when tearing down flow aggregator: %v", err)
}
}
func teardownTest(tb testing.TB, data *TestData) {
exportLogs(tb, data, "beforeTeardown", true)
if empty, _ := IsDirEmpty(data.logsDirForTestCase); empty {
_ = os.Remove(data.logsDirForTestCase)
}
tb.Logf("Deleting '%s' K8s Namespace", testNamespace)
if err := data.deleteTestNamespace(defaultTimeout); err != nil {
tb.Logf("Error when tearing down test: %v", err)
}
}
| random_line_split |
||
fixtures.go | range requiredModules {
// modprobe with "--dry-run" does not require root privileges
cmd := fmt.Sprintf("modprobe --dry-run %s", module)
rc, stdout, stderr, err := RunCommandOnNode(nodeName, cmd)
if err != nil {
tb.Skipf("Skipping test as modprobe could not be run to confirm the presence of module '%s': %v", module, err)
}
if rc != 0 {
tb.Skipf("Skipping test as modprobe exited with an error when trying to confirm the presence of module '%s' - stdout: %s - stderr: %s", module, stdout, stderr)
}
}
tb.Logf("The following modules have been found on Node '%s': %v", nodeName, requiredModules)
}
func skipIfEncapModeIsNot(tb testing.TB, data *TestData, encapMode config.TrafficEncapModeType) {
currentEncapMode, err := data.GetEncapMode()
if err != nil {
tb.Fatalf("Failed to get encap mode: %v", err)
}
if currentEncapMode != encapMode {
tb.Skipf("Skipping test for encap mode '%s', test requires '%s'", currentEncapMode.String(), encapMode.String())
}
}
func skipIfHasWindowsNodes(tb testing.TB) {
if len(clusterInfo.windowsNodes) != 0 {
tb.Skipf("Skipping test as the cluster has Windows Nodes")
}
}
func skipIfNoWindowsNodes(tb testing.TB) {
if len(clusterInfo.windowsNodes) == 0 {
tb.Skipf("Skipping test as the cluster has no Windows Nodes")
}
}
func skipIfFeatureDisabled(tb testing.TB, feature featuregate.Feature, checkAgent bool, checkController bool) {
if checkAgent {
if featureGate, err := GetAgentFeatures(); err != nil {
tb.Fatalf("Cannot determine if %s is enabled in the Agent: %v", feature, err)
} else if !featureGate.Enabled(feature) {
tb.Skipf("Skipping test because %s is not enabled in the Agent", feature)
}
}
if checkController {
if featureGate, err := GetControllerFeatures(); err != nil {
tb.Fatalf("Cannot determine if %s is enabled in the Controller: %v", feature, err)
} else if !featureGate.Enabled(feature) {
tb.Skipf("Skipping test because %s is not enabled in the Controller", feature)
}
}
}
func ensureAntreaRunning(data *TestData) error {
log.Println("Applying Antrea YAML")
if err := data.deployAntrea(deployAntreaDefault); err != nil {
return err
}
log.Println("Waiting for all Antrea DaemonSet Pods")
if err := data.waitForAntreaDaemonSetPods(defaultTimeout); err != nil {
return err
}
log.Println("Checking CoreDNS deployment")
if err := data.checkCoreDNSPods(defaultTimeout); err != nil {
return err
}
return nil
}
func createDirectory(path string) error {
return os.Mkdir(path, 0700)
}
func (data *TestData) setupLogDirectoryForTest(testName string) error {
path := filepath.Join(testOptions.logsExportDir, testName)
// remove directory if it already exists. This ensures that we start with an empty
// directory
_ = os.RemoveAll(path)
err := createDirectory(path)
if err != nil {
return err
}
data.logsDirForTestCase = path
return nil
}
func setupTest(tb testing.TB) (*TestData, error) {
if err := testData.setupLogDirectoryForTest(tb.Name()); err != nil {
tb.Errorf("Error creating logs directory '%s': %v", testData.logsDirForTestCase, err)
return nil, err
}
success := false
defer func() {
if !success {
tb.Fail()
exportLogs(tb, testData, "afterSetupTest", true)
}
}()
tb.Logf("Creating '%s' K8s Namespace", testNamespace)
if err := ensureAntreaRunning(testData); err != nil {
return nil, err
}
if err := testData.createTestNamespace(); err != nil {
return nil, err
}
success = true
return testData, nil
}
func setupTestWithIPFIXCollector(tb testing.TB) (*TestData, bool, bool, error) {
v4Enabled := clusterInfo.podV4NetworkCIDR != ""
v6Enabled := clusterInfo.podV6NetworkCIDR != ""
testData, err := setupTest(tb)
if err != nil {
return testData, v4Enabled, v6Enabled, err
}
// Create pod using ipfix collector image
if err = testData.createPodOnNode("ipfix-collector", testNamespace, "", ipfixCollectorImage, nil, nil, nil, nil, true, nil); err != nil {
tb.Errorf("Error when creating the ipfix collector Pod: %v", err)
}
ipfixCollectorIP, err := testData.podWaitForIPs(defaultTimeout, "ipfix-collector", testNamespace)
if err != nil || len(ipfixCollectorIP.ipStrings) == 0 {
tb.Errorf("Error when waiting to get ipfix collector Pod IP: %v", err)
return nil, v4Enabled, v6Enabled, err
}
var ipStr string
if v6Enabled && ipfixCollectorIP.ipv6 != nil {
ipStr = ipfixCollectorIP.ipv6.String()
} else {
ipStr = ipfixCollectorIP.ipv4.String()
}
ipfixCollectorAddr := fmt.Sprintf("%s:tcp", net.JoinHostPort(ipStr, ipfixCollectorPort))
faClusterIPAddr := ""
tb.Logf("Applying flow aggregator YAML with ipfix collector address: %s", ipfixCollectorAddr)
faClusterIP, err := testData.deployFlowAggregator(ipfixCollectorAddr)
if err != nil {
return testData, v4Enabled, v6Enabled, err
}
if testOptions.providerName == "kind" {
// In Kind cluster, there are issues with DNS name resolution on worker nodes.
// Please note that CoreDNS services are forced on to control-plane Node.
faClusterIPAddr = fmt.Sprintf("%s:%s:tls", faClusterIP, ipfixCollectorPort)
}
tb.Logf("Deploying flow exporter with collector address: %s", faClusterIPAddr)
if err = testData.deployAntreaFlowExporter(faClusterIPAddr); err != nil |
tb.Logf("Checking CoreDNS deployment")
if err = testData.checkCoreDNSPods(defaultTimeout); err != nil {
return testData, v4Enabled, v6Enabled, err
}
return testData, v4Enabled, v6Enabled, nil
}
func exportLogs(tb testing.TB, data *TestData, logsSubDir string, writeNodeLogs bool) {
if tb.Skipped() {
return
}
// if test was successful and --logs-export-on-success was not provided, we do not export
// any logs.
if !tb.Failed() && !testOptions.logsExportOnSuccess {
return
}
const timeFormat = "Jan02-15-04-05"
timeStamp := time.Now().Format(timeFormat)
logsDir := filepath.Join(data.logsDirForTestCase, fmt.Sprintf("%s.%s", logsSubDir, timeStamp))
err := createDirectory(logsDir)
if err != nil {
tb.Errorf("Error when creating logs directory '%s': %v", logsDir, err)
return
}
tb.Logf("Exporting test logs to '%s'", logsDir)
// for now we just retrieve the logs for the Antrea Pods, but maybe we can find a good way to
// retrieve the logs for the test Pods in the future (before deleting them) if it is useful
// for debugging.
// getPodWriter creates the file with name nodeName-podName-suffix. It returns nil if the
// file cannot be created. File must be closed by the caller.
getPodWriter := func(nodeName, podName, suffix string) *os.File {
logFile := filepath.Join(logsDir, fmt.Sprintf("%s-%s-%s", nodeName, podName, suffix))
f, err := os.Create(logFile)
if err != nil {
tb.Errorf("Error when creating log file '%s': '%v'", logFile, err)
return nil
}
return f
}
// runKubectl runs the provided kubectl command on the control-plane Node and returns the
// output. It returns an empty string in case of error.
runKubectl := func(cmd string) string {
rc, stdout, _, err := RunCommandOnNode(controlPlaneNodeName(), cmd)
if err != nil || rc != 0 {
tb.Errorf("Error when running this kubectl command on control-plane Node: %s", cmd)
return ""
}
return stdout
}
// dump the logs for Antrea Pods to disk.
writePodLogs := func(nodeName, podName, nsName string) error {
w := getPodWriter(nodeName, podName, "logs")
if w == nil {
return nil
}
defer w.Close()
cmd := fmt.Sprintf("kubectl -n %s logs --all-containers %s", nsName, podName)
stdout := runKubectl(cmd)
| {
return testData, v4Enabled, v6Enabled, err
} | conditional_block |
fixtures.go |
func skipIfNotIPv6Cluster(tb testing.TB) {
if clusterInfo.podV6NetworkCIDR == "" {
tb.Skipf("Skipping test as it requires IPv6 addresses but the IPv6 network CIDR is not set")
}
}
func skipIfMissingKernelModule(tb testing.TB, nodeName string, requiredModules []string) {
for _, module := range requiredModules {
// modprobe with "--dry-run" does not require root privileges
cmd := fmt.Sprintf("modprobe --dry-run %s", module)
rc, stdout, stderr, err := RunCommandOnNode(nodeName, cmd)
if err != nil {
tb.Skipf("Skipping test as modprobe could not be run to confirm the presence of module '%s': %v", module, err)
}
if rc != 0 {
tb.Skipf("Skipping test as modprobe exited with an error when trying to confirm the presence of module '%s' - stdout: %s - stderr: %s", module, stdout, stderr)
}
}
tb.Logf("The following modules have been found on Node '%s': %v", nodeName, requiredModules)
}
func skipIfEncapModeIsNot(tb testing.TB, data *TestData, encapMode config.TrafficEncapModeType) {
currentEncapMode, err := data.GetEncapMode()
if err != nil {
tb.Fatalf("Failed to get encap mode: %v", err)
}
if currentEncapMode != encapMode {
tb.Skipf("Skipping test for encap mode '%s', test requires '%s'", currentEncapMode.String(), encapMode.String())
}
}
func skipIfHasWindowsNodes(tb testing.TB) {
if len(clusterInfo.windowsNodes) != 0 {
tb.Skipf("Skipping test as the cluster has Windows Nodes")
}
}
func skipIfNoWindowsNodes(tb testing.TB) {
if len(clusterInfo.windowsNodes) == 0 {
tb.Skipf("Skipping test as the cluster has no Windows Nodes")
}
}
func skipIfFeatureDisabled(tb testing.TB, feature featuregate.Feature, checkAgent bool, checkController bool) {
if checkAgent {
if featureGate, err := GetAgentFeatures(); err != nil {
tb.Fatalf("Cannot determine if %s is enabled in the Agent: %v", feature, err)
} else if !featureGate.Enabled(feature) {
tb.Skipf("Skipping test because %s is not enabled in the Agent", feature)
}
}
if checkController {
if featureGate, err := GetControllerFeatures(); err != nil {
tb.Fatalf("Cannot determine if %s is enabled in the Controller: %v", feature, err)
} else if !featureGate.Enabled(feature) {
tb.Skipf("Skipping test because %s is not enabled in the Controller", feature)
}
}
}
func ensureAntreaRunning(data *TestData) error {
log.Println("Applying Antrea YAML")
if err := data.deployAntrea(deployAntreaDefault); err != nil {
return err
}
log.Println("Waiting for all Antrea DaemonSet Pods")
if err := data.waitForAntreaDaemonSetPods(defaultTimeout); err != nil {
return err
}
log.Println("Checking CoreDNS deployment")
if err := data.checkCoreDNSPods(defaultTimeout); err != nil {
return err
}
return nil
}
func createDirectory(path string) error {
return os.Mkdir(path, 0700)
}
func (data *TestData) setupLogDirectoryForTest(testName string) error {
path := filepath.Join(testOptions.logsExportDir, testName)
// remove directory if it already exists. This ensures that we start with an empty
// directory
_ = os.RemoveAll(path)
err := createDirectory(path)
if err != nil {
return err
}
data.logsDirForTestCase = path
return nil
}
func setupTest(tb testing.TB) (*TestData, error) {
if err := testData.setupLogDirectoryForTest(tb.Name()); err != nil {
tb.Errorf("Error creating logs directory '%s': %v", testData.logsDirForTestCase, err)
return nil, err
}
success := false
defer func() {
if !success {
tb.Fail()
exportLogs(tb, testData, "afterSetupTest", true)
}
}()
tb.Logf("Creating '%s' K8s Namespace", testNamespace)
if err := ensureAntreaRunning(testData); err != nil {
return nil, err
}
if err := testData.createTestNamespace(); err != nil {
return nil, err
}
success = true
return testData, nil
}
func setupTestWithIPFIXCollector(tb testing.TB) (*TestData, bool, bool, error) {
v4Enabled := clusterInfo.podV4NetworkCIDR != ""
v6Enabled := clusterInfo.podV6NetworkCIDR != ""
testData, err := setupTest(tb)
if err != nil {
return testData, v4Enabled, v6Enabled, err
}
// Create pod using ipfix collector image
if err = testData.createPodOnNode("ipfix-collector", testNamespace, "", ipfixCollectorImage, nil, nil, nil, nil, true, nil); err != nil {
tb.Errorf("Error when creating the ipfix collector Pod: %v", err)
}
ipfixCollectorIP, err := testData.podWaitForIPs(defaultTimeout, "ipfix-collector", testNamespace)
if err != nil || len(ipfixCollectorIP.ipStrings) == 0 {
tb.Errorf("Error when waiting to get ipfix collector Pod IP: %v", err)
return nil, v4Enabled, v6Enabled, err
}
var ipStr string
if v6Enabled && ipfixCollectorIP.ipv6 != nil {
ipStr = ipfixCollectorIP.ipv6.String()
} else {
ipStr = ipfixCollectorIP.ipv4.String()
}
ipfixCollectorAddr := fmt.Sprintf("%s:tcp", net.JoinHostPort(ipStr, ipfixCollectorPort))
faClusterIPAddr := ""
tb.Logf("Applying flow aggregator YAML with ipfix collector address: %s", ipfixCollectorAddr)
faClusterIP, err := testData.deployFlowAggregator(ipfixCollectorAddr)
if err != nil {
return testData, v4Enabled, v6Enabled, err
}
if testOptions.providerName == "kind" {
// In Kind cluster, there are issues with DNS name resolution on worker nodes.
// Please note that CoreDNS services are forced on to control-plane Node.
faClusterIPAddr = fmt.Sprintf("%s:%s:tls", faClusterIP, ipfixCollectorPort)
}
tb.Logf("Deploying flow exporter with collector address: %s", faClusterIPAddr)
if err = testData.deployAntreaFlowExporter(faClusterIPAddr); err != nil {
return testData, v4Enabled, v6Enabled, err
}
tb.Logf("Checking CoreDNS deployment")
if err = testData.checkCoreDNSPods(defaultTimeout); err != nil {
return testData, v4Enabled, v6Enabled, err
}
return testData, v4Enabled, v6Enabled, nil
}
func exportLogs(tb testing.TB, data *TestData, logsSubDir string, writeNodeLogs bool) {
if tb.Skipped() {
return
}
// if test was successful and --logs-export-on-success was not provided, we do not export
// any logs.
if !tb.Failed() && !testOptions.logsExportOnSuccess {
return
}
const timeFormat = "Jan02-15-04-05"
timeStamp := time.Now().Format(timeFormat)
logsDir := filepath.Join(data.logsDirForTestCase, fmt.Sprintf("%s.%s", logsSubDir, timeStamp))
err := createDirectory(logsDir)
if err != nil {
tb.Errorf("Error when creating logs directory '%s': %v", logsDir, err)
return
}
tb.Logf("Exporting test logs to '%s'", logsDir)
// for now we just retrieve the logs for the Antrea Pods, but maybe we can find a good way to
// retrieve the logs for the test Pods in the future (before deleting them) if it is useful
// for debugging.
// getPodWriter creates the file with name nodeName-podName-suffix. It returns nil if the
// file cannot be created. File must be closed by the caller.
getPodWriter := func(nodeName, podName, suffix string) *os.File {
logFile := filepath.Join(logsDir, fmt.Sprintf("%s-%s-%s", nodeName, podName, suffix))
f, err := os.Create(logFile)
if err != nil {
tb.Errorf("Error when creating log file '%s': '%v'", logFile, err)
return nil
}
return f
}
// runKubectl runs the provided kubectl command on the control-plane Node and returns the
// output. It returns an empty string in case of error.
runKubectl := func(cmd string) string {
rc, stdout, _, err := RunCommandOnNode(controlPlaneNodeName(), cmd)
if err != nil || rc != 0 {
tb.Errorf("Error when running this kubectl command on control-plane Node: %s", cmd | {
if clusterInfo.podV6NetworkCIDR != "" {
tb.Skipf("Skipping test as it is not supported in IPv6 cluster")
}
} | identifier_body |
|
lineChart.component.ts | total?:number;
}
@Component({
selector: 'line-chart',
template: `<div id="{{controlUid}}"></div>`
})
export class LineChartComponent extends ViewComponent implements OnInit, AfterViewInit, OnDestroy, HandleDataFunc {
@Input() dataList: DataListItem[];
/** First element is Name of the Field a string
* Followed by the other elements being a number.
* This fits in with how C3 charts work.
*
* [ ["nameOfField_1", number, number, number, number],
* ["nameOfField_2", number, number, number, number]
* ]
*/
@Input() series: any[][];
@Input() legends: string[];
@Input() type: string;
@Input() divideBy: number;
@Input() chartFormatter: ChartFormatter;
@Input() minY?: number = 0;
@Input() maxY?: number = 100;
@Input() labelY?: string = 'Label Y';
@Input() interactive: boolean;
public chart:any;
public conf:any;
public columns:any;
public linechartData:any;
public units: string = '';
public showLegendValues: boolean = false;
public legendEvents: BehaviorSubject<any>;
public legendLabels: BehaviorSubject<any>;
public legendAnalytics: BehaviorSubject<any>;
data: LineChartData = {
labels: [],
series: [],
//meta: {}
};
colorPattern = ["#2196f3", "#009688", "#ffc107", "#9c27b0", "#607d8b", "#00bcd4", "#8bc34a", "#ffeb3b", "#e91e63", "#3f51b5"];
timeFormat: string = "%H:%M";
culling:number = 6;
controlUid: string;
constructor(private core:CoreService, private _lineChartService: LineChartService) {
super();
this.legendEvents = new BehaviorSubject(false);
this.legendLabels = new BehaviorSubject([]);
this.legendAnalytics = new BehaviorSubject([]);
}
handleDataFunc(linechartData: LineChartData) {
//console.log(linechartData);
this.data.labels.splice(0, this.data.labels.length);
this.data.series.splice(0, this.data.series.length);
if(linechartData.meta){
this.units = linechartData.meta.units;
}
linechartData.labels.forEach((label) => {this.data.labels.push(new Date(label))});
linechartData.series.forEach((dataSeriesArray) => {
const newArray = [];
if(!linechartData.meta)console.log(linechartData);
if (typeof (this.divideBy) !== 'undefined' || linechartData.meta.conversion) {
dataSeriesArray.forEach((numberVal) => {
if(linechartData.meta.conversion){
newArray.push(this.convertTo(numberVal, linechartData.meta.conversion));
} else if (numberVal > 0) {
newArray.push((numberVal / this.divideBy).toFixed(2));
} else {
newArray.push(numberVal);
}
});
dataSeriesArray = newArray;
} else {
dataSeriesArray.forEach((numberVal) => {
if(numberVal > 0){
newArray.push(numberVal.toFixed(2));
} else {
newArray.push(numberVal);
}
});
dataSeriesArray = newArray;
}
this.data.series.push(dataSeriesArray);
});
const columns: any[][] = [];
let legendLabels: string[] = [];
// xColumn
const xValues: any[] = [];
xValues.push('xValues');
this.data.labels.forEach((label) => {
xValues.push(label);
});
columns.push(xValues);
// For C3.. Put the name of the series as the first element of each series array
for (let i = 0; i < this.legends.length && this.data.series.length; ++i) {
let legend: string;
if(linechartData.meta.removePrefix){
legend = this.legends[i].replace(linechartData.meta.removePrefix, "");
} else {
legend = this.legends[i];
}
legendLabels.push(legend);
let series: any[] = this.data.series[i];
if( typeof(series) !== 'undefined' && series.length > 0 ) {
series.unshift(legend);
} else {
series = [legend];
}
columns.push(series);
}
this.columns = columns;
this.linechartData = linechartData;
this.legendLabels.next(legendLabels);
this.analyze(columns);
this.render();
}
public render(conf?:any){
if(!conf){
conf = this.makeConfig();
}
let colors = this.colorsFromTheme();
const color = {
pattern: colors
}
conf.color = color;
this.chart = c3.generate(conf);
}
//this.chart = c3.generate({
public makeConfig(){
let conf = {
interaction: {
enabled:this.interactive
},
bindto: '#' + this.controlUid,
/*color: {
pattern: this.colorPattern
},*/
data: {
columns: this.columns,
//colors: this.createColorObject(),
x: 'xValues',
//xFormat: '%H:%M',
type: 'line',
onmouseout: (d) => {
this.showLegendValues = false;
}
},
axis: {
x: {
type: 'timeseries',
tick: {
//format: '%H:%M:%S',
format: this.timeFormat,
fit: true,
//values: ['01:10', '03:10', '06:10']
culling: {
max: this.culling
}
}
},
y:{
tick: {
format: (y) => { return y + this.linechartData.meta.units}
},
label: {
text:this.linechartData.meta. labelY,
position: 'outer-middle',
}
//default: [this.minY,this.maxY],
/*min: this.minY,
max: this.maxY*/
}
},
grid:{
x:{
show: true
},
y:{
show: true
}
},
subchart: {
show: false
},
legend: {
show:false
},
zoom: {
enabled: false
},
tooltip: {
show: true,
contents: (raw, defaultTitleFormat, defaultValueFormat, color) => {
if(!this.showLegendValues){
this.showLegendValues = true;
}
if(raw.value == Number(0)){
raw.value == raw.value.toString()
}
this.legendEvents.next(raw);
return '<div style="display:none">' + raw[0].x + '</div>';
}
}
}
return conf;
}
/*private setupPiechart() {
const chart = c3.generate({
bindto: '#' + this.controlUid,
data: {
columns: this.series,
type: 'pie'
},
pie: {
label: {
format: this.chartFormatter.format
}
}
});
}*/
private processThemeColors(theme):string[]{
let colors: string[] = [];
theme.accentColors.map((color) => {
colors.push(theme[color]);
});
return colors;
}
private createColorObject(){
let obj = {};
this.legends.forEach((item, index)=>{
obj[item] = this.colorPattern[index]
})
return obj;
}
public fetchData(rrdOptions, timeformat?: string, culling?:number){
if(timeformat){
this.timeFormat = timeformat;
}
if(culling){
this.culling = culling;
}
// Convert from milliseconds to seconds for epoch time
rrdOptions.start = Math.floor(rrdOptions.start / 1000);
if(rrdOptions.end) |
// This is the time portion of the API call.
this._lineChartService.getData(this, this.dataList, rrdOptions);
}
public convertTo(value, conversion){
let result;
switch(conversion){
case 'bytesToGigabytes':
result = value / 1073741824;
break;
case 'percentFloatToInteger':
result = value * 100;
break;
}
return result.toFixed(2);
}
// Analytics
analyze(columns){
let allColumns: Analytics[] = [];
let cols = Object.assign([], columns);
// Remove X axis
cols.shift(columns[0]);
for(let i = 0; i < cols.length; i++){
// Middleware provides data as strings
// so we store the label (first item)
// and convert the rest to numbers
let colStrings = cols[i];
let label = colStrings[0];
let col = colStrings.map(x => Number(x));
col.shift(col[0]);
let total = col.length > 0 ? col.reduce((accumulator, currentValue) => Number(accumulator) + Number(currentValue)) : "N/A";
| {
rrdOptions.end = Math.floor(rrdOptions.end / 1000);
} | conditional_block |
lineChart.component.ts | total?:number;
}
@Component({
selector: 'line-chart',
template: `<div id="{{controlUid}}"></div>`
})
export class LineChartComponent extends ViewComponent implements OnInit, AfterViewInit, OnDestroy, HandleDataFunc {
@Input() dataList: DataListItem[];
/** First element is Name of the Field a string
* Followed by the other elements being a number.
* This fits in with how C3 charts work.
*
* [ ["nameOfField_1", number, number, number, number],
* ["nameOfField_2", number, number, number, number]
* ]
*/
@Input() series: any[][];
@Input() legends: string[];
@Input() type: string;
@Input() divideBy: number;
@Input() chartFormatter: ChartFormatter;
@Input() minY?: number = 0;
@Input() maxY?: number = 100;
@Input() labelY?: string = 'Label Y';
@Input() interactive: boolean;
public chart:any;
public conf:any;
public columns:any;
public linechartData:any;
public units: string = '';
public showLegendValues: boolean = false;
public legendEvents: BehaviorSubject<any>;
public legendLabels: BehaviorSubject<any>;
public legendAnalytics: BehaviorSubject<any>;
data: LineChartData = {
labels: [],
series: [],
//meta: {}
};
colorPattern = ["#2196f3", "#009688", "#ffc107", "#9c27b0", "#607d8b", "#00bcd4", "#8bc34a", "#ffeb3b", "#e91e63", "#3f51b5"];
timeFormat: string = "%H:%M";
culling:number = 6;
controlUid: string;
constructor(private core:CoreService, private _lineChartService: LineChartService) {
super();
this.legendEvents = new BehaviorSubject(false);
this.legendLabels = new BehaviorSubject([]);
this.legendAnalytics = new BehaviorSubject([]);
}
handleDataFunc(linechartData: LineChartData) {
//console.log(linechartData);
this.data.labels.splice(0, this.data.labels.length);
this.data.series.splice(0, this.data.series.length);
if(linechartData.meta){
this.units = linechartData.meta.units;
}
linechartData.labels.forEach((label) => {this.data.labels.push(new Date(label))});
linechartData.series.forEach((dataSeriesArray) => {
const newArray = [];
if(!linechartData.meta)console.log(linechartData);
if (typeof (this.divideBy) !== 'undefined' || linechartData.meta.conversion) {
dataSeriesArray.forEach((numberVal) => {
if(linechartData.meta.conversion){
newArray.push(this.convertTo(numberVal, linechartData.meta.conversion));
} else if (numberVal > 0) {
newArray.push((numberVal / this.divideBy).toFixed(2));
} else {
newArray.push(numberVal);
}
});
dataSeriesArray = newArray;
} else {
dataSeriesArray.forEach((numberVal) => {
if(numberVal > 0){
newArray.push(numberVal.toFixed(2));
} else {
newArray.push(numberVal);
}
});
dataSeriesArray = newArray;
}
this.data.series.push(dataSeriesArray);
});
const columns: any[][] = [];
let legendLabels: string[] = [];
// xColumn
const xValues: any[] = [];
xValues.push('xValues');
this.data.labels.forEach((label) => {
xValues.push(label);
});
columns.push(xValues);
// For C3.. Put the name of the series as the first element of each series array
for (let i = 0; i < this.legends.length && this.data.series.length; ++i) {
let legend: string;
if(linechartData.meta.removePrefix){
legend = this.legends[i].replace(linechartData.meta.removePrefix, "");
} else {
legend = this.legends[i];
}
legendLabels.push(legend);
let series: any[] = this.data.series[i];
if( typeof(series) !== 'undefined' && series.length > 0 ) {
series.unshift(legend);
} else {
series = [legend];
}
columns.push(series);
}
this.columns = columns;
this.linechartData = linechartData;
this.legendLabels.next(legendLabels);
this.analyze(columns);
this.render();
}
public render(conf?:any){
if(!conf){
conf = this.makeConfig();
}
let colors = this.colorsFromTheme();
const color = {
pattern: colors
}
conf.color = color;
this.chart = c3.generate(conf);
}
//this.chart = c3.generate({
public | (){
let conf = {
interaction: {
enabled:this.interactive
},
bindto: '#' + this.controlUid,
/*color: {
pattern: this.colorPattern
},*/
data: {
columns: this.columns,
//colors: this.createColorObject(),
x: 'xValues',
//xFormat: '%H:%M',
type: 'line',
onmouseout: (d) => {
this.showLegendValues = false;
}
},
axis: {
x: {
type: 'timeseries',
tick: {
//format: '%H:%M:%S',
format: this.timeFormat,
fit: true,
//values: ['01:10', '03:10', '06:10']
culling: {
max: this.culling
}
}
},
y:{
tick: {
format: (y) => { return y + this.linechartData.meta.units}
},
label: {
text:this.linechartData.meta. labelY,
position: 'outer-middle',
}
//default: [this.minY,this.maxY],
/*min: this.minY,
max: this.maxY*/
}
},
grid:{
x:{
show: true
},
y:{
show: true
}
},
subchart: {
show: false
},
legend: {
show:false
},
zoom: {
enabled: false
},
tooltip: {
show: true,
contents: (raw, defaultTitleFormat, defaultValueFormat, color) => {
if(!this.showLegendValues){
this.showLegendValues = true;
}
if(raw.value == Number(0)){
raw.value == raw.value.toString()
}
this.legendEvents.next(raw);
return '<div style="display:none">' + raw[0].x + '</div>';
}
}
}
return conf;
}
/*private setupPiechart() {
const chart = c3.generate({
bindto: '#' + this.controlUid,
data: {
columns: this.series,
type: 'pie'
},
pie: {
label: {
format: this.chartFormatter.format
}
}
});
}*/
private processThemeColors(theme):string[]{
let colors: string[] = [];
theme.accentColors.map((color) => {
colors.push(theme[color]);
});
return colors;
}
private createColorObject(){
let obj = {};
this.legends.forEach((item, index)=>{
obj[item] = this.colorPattern[index]
})
return obj;
}
public fetchData(rrdOptions, timeformat?: string, culling?:number){
if(timeformat){
this.timeFormat = timeformat;
}
if(culling){
this.culling = culling;
}
// Convert from milliseconds to seconds for epoch time
rrdOptions.start = Math.floor(rrdOptions.start / 1000);
if(rrdOptions.end){
rrdOptions.end = Math.floor(rrdOptions.end / 1000);
}
// This is the time portion of the API call.
this._lineChartService.getData(this, this.dataList, rrdOptions);
}
public convertTo(value, conversion){
let result;
switch(conversion){
case 'bytesToGigabytes':
result = value / 1073741824;
break;
case 'percentFloatToInteger':
result = value * 100;
break;
}
return result.toFixed(2);
}
// Analytics
analyze(columns){
let allColumns: Analytics[] = [];
let cols = Object.assign([], columns);
// Remove X axis
cols.shift(columns[0]);
for(let i = 0; i < cols.length; i++){
// Middleware provides data as strings
// so we store the label (first item)
// and convert the rest to numbers
let colStrings = cols[i];
let label = colStrings[0];
let col = colStrings.map(x => Number(x));
col.shift(col[0]);
let total = col.length > 0 ? col.reduce((accumulator, currentValue) => Number(accumulator) + Number(currentValue)) : "N/A";
| makeConfig | identifier_name |
lineChart.component.ts | = 'Label Y';
@Input() interactive: boolean;
public chart:any;
public conf:any;
public columns:any;
public linechartData:any;
public units: string = '';
public showLegendValues: boolean = false;
public legendEvents: BehaviorSubject<any>;
public legendLabels: BehaviorSubject<any>;
public legendAnalytics: BehaviorSubject<any>;
data: LineChartData = {
labels: [],
series: [],
//meta: {}
};
colorPattern = ["#2196f3", "#009688", "#ffc107", "#9c27b0", "#607d8b", "#00bcd4", "#8bc34a", "#ffeb3b", "#e91e63", "#3f51b5"];
timeFormat: string = "%H:%M";
culling:number = 6;
controlUid: string;
constructor(private core:CoreService, private _lineChartService: LineChartService) {
super();
this.legendEvents = new BehaviorSubject(false);
this.legendLabels = new BehaviorSubject([]);
this.legendAnalytics = new BehaviorSubject([]);
}
handleDataFunc(linechartData: LineChartData) {
//console.log(linechartData);
this.data.labels.splice(0, this.data.labels.length);
this.data.series.splice(0, this.data.series.length);
if(linechartData.meta){
this.units = linechartData.meta.units;
}
linechartData.labels.forEach((label) => {this.data.labels.push(new Date(label))});
linechartData.series.forEach((dataSeriesArray) => {
const newArray = [];
if(!linechartData.meta)console.log(linechartData);
if (typeof (this.divideBy) !== 'undefined' || linechartData.meta.conversion) {
dataSeriesArray.forEach((numberVal) => {
if(linechartData.meta.conversion){
newArray.push(this.convertTo(numberVal, linechartData.meta.conversion));
} else if (numberVal > 0) {
newArray.push((numberVal / this.divideBy).toFixed(2));
} else {
newArray.push(numberVal);
}
});
dataSeriesArray = newArray;
} else {
dataSeriesArray.forEach((numberVal) => {
if(numberVal > 0){
newArray.push(numberVal.toFixed(2));
} else {
newArray.push(numberVal);
}
});
dataSeriesArray = newArray;
}
this.data.series.push(dataSeriesArray);
});
const columns: any[][] = [];
let legendLabels: string[] = [];
// xColumn
const xValues: any[] = [];
xValues.push('xValues');
this.data.labels.forEach((label) => {
xValues.push(label);
});
columns.push(xValues);
// For C3.. Put the name of the series as the first element of each series array
for (let i = 0; i < this.legends.length && this.data.series.length; ++i) {
let legend: string;
if(linechartData.meta.removePrefix){
legend = this.legends[i].replace(linechartData.meta.removePrefix, "");
} else {
legend = this.legends[i];
}
legendLabels.push(legend);
let series: any[] = this.data.series[i];
if( typeof(series) !== 'undefined' && series.length > 0 ) {
series.unshift(legend);
} else {
series = [legend];
}
columns.push(series);
}
this.columns = columns;
this.linechartData = linechartData;
this.legendLabels.next(legendLabels);
this.analyze(columns);
this.render();
}
public render(conf?:any){
if(!conf){
conf = this.makeConfig();
}
let colors = this.colorsFromTheme();
const color = {
pattern: colors
}
conf.color = color;
this.chart = c3.generate(conf);
}
//this.chart = c3.generate({
public makeConfig(){
let conf = {
interaction: {
enabled:this.interactive
},
bindto: '#' + this.controlUid,
/*color: {
pattern: this.colorPattern
},*/
data: {
columns: this.columns,
//colors: this.createColorObject(),
x: 'xValues',
//xFormat: '%H:%M',
type: 'line',
onmouseout: (d) => {
this.showLegendValues = false;
}
},
axis: {
x: {
type: 'timeseries',
tick: {
//format: '%H:%M:%S',
format: this.timeFormat,
fit: true,
//values: ['01:10', '03:10', '06:10']
culling: {
max: this.culling
}
}
},
y:{
tick: {
format: (y) => { return y + this.linechartData.meta.units}
},
label: {
text:this.linechartData.meta. labelY,
position: 'outer-middle',
}
//default: [this.minY,this.maxY],
/*min: this.minY,
max: this.maxY*/
}
},
grid:{
x:{
show: true
},
y:{
show: true
}
},
subchart: {
show: false
},
legend: {
show:false
},
zoom: {
enabled: false
},
tooltip: {
show: true,
contents: (raw, defaultTitleFormat, defaultValueFormat, color) => {
if(!this.showLegendValues){
this.showLegendValues = true;
}
if(raw.value == Number(0)){
raw.value == raw.value.toString()
}
this.legendEvents.next(raw);
return '<div style="display:none">' + raw[0].x + '</div>';
}
}
}
return conf;
}
/*private setupPiechart() {
const chart = c3.generate({
bindto: '#' + this.controlUid,
data: {
columns: this.series,
type: 'pie'
},
pie: {
label: {
format: this.chartFormatter.format
}
}
});
}*/
private processThemeColors(theme):string[]{
let colors: string[] = [];
theme.accentColors.map((color) => {
colors.push(theme[color]);
});
return colors;
}
private createColorObject(){
let obj = {};
this.legends.forEach((item, index)=>{
obj[item] = this.colorPattern[index]
})
return obj;
}
public fetchData(rrdOptions, timeformat?: string, culling?:number){
if(timeformat){
this.timeFormat = timeformat;
}
if(culling){
this.culling = culling;
}
// Convert from milliseconds to seconds for epoch time
rrdOptions.start = Math.floor(rrdOptions.start / 1000);
if(rrdOptions.end){
rrdOptions.end = Math.floor(rrdOptions.end / 1000);
}
// This is the time portion of the API call.
this._lineChartService.getData(this, this.dataList, rrdOptions);
}
public convertTo(value, conversion){
let result;
switch(conversion){
case 'bytesToGigabytes':
result = value / 1073741824;
break;
case 'percentFloatToInteger':
result = value * 100;
break;
}
return result.toFixed(2);
}
// Analytics
analyze(columns){
let allColumns: Analytics[] = [];
let cols = Object.assign([], columns);
// Remove X axis
cols.shift(columns[0]);
for(let i = 0; i < cols.length; i++){
// Middleware provides data as strings
// so we store the label (first item)
// and convert the rest to numbers
let colStrings = cols[i];
let label = colStrings[0];
let col = colStrings.map(x => Number(x));
col.shift(col[0]);
let total = col.length > 0 ? col.reduce((accumulator, currentValue) => Number(accumulator) + Number(currentValue)) : "N/A";
let avg = total !== "N/A" ? Number((total / col.length).toFixed(2)) : total;
//console.log("Total type: " + typeof col.length)
let myResult:Analytics = {
label:label,
min: total !== "N/A" ? this.getMin(col) : total ,//.toFixed(2),
max: total !== "N/A" ? this.getMax(col) : total,//.toFixed(2),
avg: avg,
last: total !== "N/A" ? Number(col[col.length - 1].toFixed(2)) : total,
total: total !== "N/A" ? Number(total.toFixed(2)) : total
}
allColumns.push(myResult);
}
this.legendAnalytics.next(allColumns);
}
getMin(arr:any[]){
return Math.min(...arr);
}
getMax(arr:any[]){
return Math.max(...arr);
}
getAvg(arr:any[]){
return 1;
}
getLast(arr:any[]) | {
return 1
} | identifier_body |
|
lineChart.component.ts | total?:number;
}
@Component({
selector: 'line-chart',
template: `<div id="{{controlUid}}"></div>`
})
export class LineChartComponent extends ViewComponent implements OnInit, AfterViewInit, OnDestroy, HandleDataFunc {
@Input() dataList: DataListItem[];
/** First element is Name of the Field a string
* Followed by the other elements being a number.
* This fits in with how C3 charts work.
*
* [ ["nameOfField_1", number, number, number, number],
* ["nameOfField_2", number, number, number, number]
* ]
*/
@Input() series: any[][];
@Input() legends: string[];
@Input() type: string;
@Input() divideBy: number;
@Input() chartFormatter: ChartFormatter;
@Input() minY?: number = 0;
@Input() maxY?: number = 100;
@Input() labelY?: string = 'Label Y';
@Input() interactive: boolean;
public chart:any;
public conf:any;
public columns:any;
public linechartData:any;
public units: string = '';
public showLegendValues: boolean = false;
public legendEvents: BehaviorSubject<any>;
public legendLabels: BehaviorSubject<any>;
public legendAnalytics: BehaviorSubject<any>;
data: LineChartData = {
labels: [],
series: [],
//meta: {}
};
colorPattern = ["#2196f3", "#009688", "#ffc107", "#9c27b0", "#607d8b", "#00bcd4", "#8bc34a", "#ffeb3b", "#e91e63", "#3f51b5"];
timeFormat: string = "%H:%M";
culling:number = 6;
controlUid: string;
constructor(private core:CoreService, private _lineChartService: LineChartService) {
super();
this.legendEvents = new BehaviorSubject(false);
this.legendLabels = new BehaviorSubject([]);
this.legendAnalytics = new BehaviorSubject([]);
}
handleDataFunc(linechartData: LineChartData) {
//console.log(linechartData);
this.data.labels.splice(0, this.data.labels.length);
this.data.series.splice(0, this.data.series.length);
if(linechartData.meta){
this.units = linechartData.meta.units;
}
linechartData.labels.forEach((label) => {this.data.labels.push(new Date(label))});
linechartData.series.forEach((dataSeriesArray) => {
const newArray = [];
if(!linechartData.meta)console.log(linechartData);
if (typeof (this.divideBy) !== 'undefined' || linechartData.meta.conversion) {
dataSeriesArray.forEach((numberVal) => {
if(linechartData.meta.conversion){
newArray.push(this.convertTo(numberVal, linechartData.meta.conversion));
} else if (numberVal > 0) {
newArray.push((numberVal / this.divideBy).toFixed(2));
} else {
newArray.push(numberVal);
}
});
dataSeriesArray = newArray;
} else {
dataSeriesArray.forEach((numberVal) => {
if(numberVal > 0){
newArray.push(numberVal.toFixed(2));
} else {
newArray.push(numberVal);
} | dataSeriesArray = newArray;
}
this.data.series.push(dataSeriesArray);
});
const columns: any[][] = [];
let legendLabels: string[] = [];
// xColumn
const xValues: any[] = [];
xValues.push('xValues');
this.data.labels.forEach((label) => {
xValues.push(label);
});
columns.push(xValues);
// For C3.. Put the name of the series as the first element of each series array
for (let i = 0; i < this.legends.length && this.data.series.length; ++i) {
let legend: string;
if(linechartData.meta.removePrefix){
legend = this.legends[i].replace(linechartData.meta.removePrefix, "");
} else {
legend = this.legends[i];
}
legendLabels.push(legend);
let series: any[] = this.data.series[i];
if( typeof(series) !== 'undefined' && series.length > 0 ) {
series.unshift(legend);
} else {
series = [legend];
}
columns.push(series);
}
this.columns = columns;
this.linechartData = linechartData;
this.legendLabels.next(legendLabels);
this.analyze(columns);
this.render();
}
public render(conf?:any){
if(!conf){
conf = this.makeConfig();
}
let colors = this.colorsFromTheme();
const color = {
pattern: colors
}
conf.color = color;
this.chart = c3.generate(conf);
}
//this.chart = c3.generate({
public makeConfig(){
let conf = {
interaction: {
enabled:this.interactive
},
bindto: '#' + this.controlUid,
/*color: {
pattern: this.colorPattern
},*/
data: {
columns: this.columns,
//colors: this.createColorObject(),
x: 'xValues',
//xFormat: '%H:%M',
type: 'line',
onmouseout: (d) => {
this.showLegendValues = false;
}
},
axis: {
x: {
type: 'timeseries',
tick: {
//format: '%H:%M:%S',
format: this.timeFormat,
fit: true,
//values: ['01:10', '03:10', '06:10']
culling: {
max: this.culling
}
}
},
y:{
tick: {
format: (y) => { return y + this.linechartData.meta.units}
},
label: {
text:this.linechartData.meta. labelY,
position: 'outer-middle',
}
//default: [this.minY,this.maxY],
/*min: this.minY,
max: this.maxY*/
}
},
grid:{
x:{
show: true
},
y:{
show: true
}
},
subchart: {
show: false
},
legend: {
show:false
},
zoom: {
enabled: false
},
tooltip: {
show: true,
contents: (raw, defaultTitleFormat, defaultValueFormat, color) => {
if(!this.showLegendValues){
this.showLegendValues = true;
}
if(raw.value == Number(0)){
raw.value == raw.value.toString()
}
this.legendEvents.next(raw);
return '<div style="display:none">' + raw[0].x + '</div>';
}
}
}
return conf;
}
/*private setupPiechart() {
const chart = c3.generate({
bindto: '#' + this.controlUid,
data: {
columns: this.series,
type: 'pie'
},
pie: {
label: {
format: this.chartFormatter.format
}
}
});
}*/
private processThemeColors(theme):string[]{
let colors: string[] = [];
theme.accentColors.map((color) => {
colors.push(theme[color]);
});
return colors;
}
private createColorObject(){
let obj = {};
this.legends.forEach((item, index)=>{
obj[item] = this.colorPattern[index]
})
return obj;
}
public fetchData(rrdOptions, timeformat?: string, culling?:number){
if(timeformat){
this.timeFormat = timeformat;
}
if(culling){
this.culling = culling;
}
// Convert from milliseconds to seconds for epoch time
rrdOptions.start = Math.floor(rrdOptions.start / 1000);
if(rrdOptions.end){
rrdOptions.end = Math.floor(rrdOptions.end / 1000);
}
// This is the time portion of the API call.
this._lineChartService.getData(this, this.dataList, rrdOptions);
}
public convertTo(value, conversion){
let result;
switch(conversion){
case 'bytesToGigabytes':
result = value / 1073741824;
break;
case 'percentFloatToInteger':
result = value * 100;
break;
}
return result.toFixed(2);
}
// Analytics
analyze(columns){
let allColumns: Analytics[] = [];
let cols = Object.assign([], columns);
// Remove X axis
cols.shift(columns[0]);
for(let i = 0; i < cols.length; i++){
// Middleware provides data as strings
// so we store the label (first item)
// and convert the rest to numbers
let colStrings = cols[i];
let label = colStrings[0];
let col = colStrings.map(x => Number(x));
col.shift(col[0]);
let total = col.length > 0 ? col.reduce((accumulator, currentValue) => Number(accumulator) + Number(currentValue)) : "N/A";
| }); | random_line_split |
micro_updater.py | Sending end "END" message')
message = self.mqtt_client.publish(self.topic, payload='END', retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
self.mqtt_client.reconnect()
message = self.mqtt_client.publish(self.topic, payload='END', retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
logger.error(f'[{self.identity()}]: Message not sent')
return
logger.debug(f'[{self.identity()}]: Message sent')
def send_new_release(self, release_json):
logger.debug(f"[{self.identity()}]: Sending {release_json} on {self.topic}...")
message = self.mqtt_client.publish(self.topic, payload='', retain=True)
self.mqtt_wait_publish(message)
message = self.mqtt_client.publish(self.topic, payload=release_json, retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
self.mqtt_client.reconnect()
message = self.mqtt_client.publish(self.topic, payload=release_json, retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
logger.error(f'[{self.identity()}]: Message not sent')
return
logger.debug('Message sent')
def mqtt_wait_publish(self, message: MQTTMessageInfo, timeout=1):
start = time()
t_out = timeout * 1000
while not message.is_published() and time()-start < t_out:
sleep(0.1)
if message.is_published():
return True
return False
def send_file(self, file: RepoFile) -> bool:
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((self.ip, self.port))
with open(file.path, 'rb') as f:
content = f.read()
if s.sendall(content) is None:
logger.debug(f'[{self.identity()}]: {file.name} sent.')
return True
else:
logger.error(f'[{self.identity()}]: {file.name} not sent.')
return False
except socket.timeout as stout:
logger.error(f"[{self.identity()}]: Timeout connecting to remote socket; {stout}")
return False
except Exception as e:
logger.error(f"[{self.identity()}]: Error reading file '{file.path}'; {e}")
return False
class FileDownloader(Thread):
def __init__(self, file: RepoFile, download_path: str, trusted: bool):
super().__init__()
self.file = file
self.download_path = download_path
self.response = None
self.trusted = trusted
def run(self) -> None:
try:
if not self._download_file(self.file, self.download_path) or not self._verify_file(self.file):
self.response = False
self.response = True
except SystemExit:
self.response = False
def _download_file(self, file: RepoFile, download_path) -> bool:
logger.debug(f'Downloading {file.name} into {download_path}...')
try:
file_path = os.path.join(download_path, file.name)
wget.download(file.download_link, out=file_path)
file.path = file_path
return True
except Exception as e:
logger.error(f"Can't download {file.name} from {file.download_link}")
return False
def _verify_file(self, file: RepoFile) -> bool: | with open(file.path, 'r') as file_opened:
content = file_opened.read()
size = os.stat(file.path)
hasher = hashlib.sha1(f'blob {len(content)}\x00{content}'.encode("utf-8"))
hash = hasher.hexdigest()
if hash == file.sha:
logger.debug('File integrity OK')
return True
else:
logger.error(f'File hash mismatch\n'
f'Calculated:\t{hash}\n'
f'Correct:\t{file.sha}')
return False
except Exception as e:
logger.error(f"Can't verify {file.name} integrity; {e}")
return False
def kill_thread(self):
thread_id = self.get_id()
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, ctypes.py_object(SystemExit))
class MicroUpdater:
def __init__(self, config_path=DEFAULT_COMPLETE_CONFIG_PATH):
log_format = '<green>{time: YYYY-MM-DD HH:mm:ss.SSS}</green> <level>{level}: {message}</level>'
logger.remove()
logger.add(sys.stdout, format=log_format, colorize=True)
logger.info('Starting MicroUpdater...')
self.config = configparser.ConfigParser()
self.read_configuration(config_path)
logger.add(os.path.join(self.config['logging']['logs_path'], 'log_{time: YYYY-MM-DD}.log'), format=log_format, colorize=True, compression='zip', rotation='00:00')
self.github_client = None
self.repo_obj = None
self.cached_release = None
self.github_init()
self.mqtt_client = MQTTClient(client_id=self.config['mqtt']['id'])
self.mqtt_init()
self.threads = {} # ip: thread
self.server_thread = None
self.server_loop = False
def loop(self):
while True:
try:
while True:
next(thread for thread in self.threads.values() if thread.is_alive())
sleep(1)
except StopIteration:
pass
tag, files = self.check_repo()
files = [file for file in files if ".mpy" in file] # Download compiled files only
if tag is not None:
self._download_files(files)
update_json = self._update_json(files=files, tag=tag)
self.start_server(files, update_json)
sleep(int(self.config['github']['check_rate']))
def server(self, files, update_json):
while self.server_loop:
logger.debug('Server waiting for installed tag...')
topic = self.config['mqtt']['installed_tags_topic']
broker = self.config['mqtt']['broker']
message = subscribe(topic, hostname=broker)
payload = message.payload
msg_str = payload.decode("utf-8")
try:
installed_tag_json = json.loads(msg_str)
if 'ip' not in installed_tag_json or 'tag' not in installed_tag_json:
logger.warning('Server received a malformed installed tag message, skipping it...')
continue
except:
logger.warning('Server received a malformed installed tag message, skipping it...')
continue
logger.debug(f'New update installed tag from {installed_tag_json["ip"]}')
if installed_tag_json['tag'] != update_json['tag']:
logger.debug(f"Probe out of date: installed {installed_tag_json['tag']}, latest {update_json['tag']}")
self.spawn_update_thread(installed_tag_json['ip'], files, update_json)
def spawn_update_thread(self, ip: str, files, update_json):
logger.debug(f'Spawning new thread for {ip} update...')
broker = self.config['mqtt']['broker']
topic = self.config['mqtt']['installed_tags_topic']
port = self.config['updates']['port']
th = DeviceUpdater(ip, port=port, files=files, broker=broker, installed_tags_topic=topic, release_json=update_json, mqtt_client=self.mqtt_client)
th.start()
self.threads[ip] = th
logger.debug(f'Thread spawned and registered.')
def mqtt_wait_publish(self, message: MQTTMessageInfo, timeout=1):
start = time()
t_out = timeout * 1000
while not message.is_published() and time()-start < t_out:
sleep(0.1)
if message.is_published():
return True
return False
def start_server(self, files, update_json):
logger.debug('Starting update server...')
self.server_loop = False
if self.server_thread is not None:
self.server_thread.join()
self.server_loop = True
self.server_thread = Thread(target=self.server, args=(files, update_json))
self.server_thread.start()
logger.debug('Update server started.')
def mqtt_init(self):
self.mqtt_client.on_connect = self.mqtt_on_connect
self.mqtt_client.on_disconnect = self.mqtt_on_disconnect
self.mqtt_connect()
self.mqtt_client.loop_start()
def mqtt_connect(self):
broker = self.config['mqtt']['broker']
self.mqtt_client.connect(broker)
def mqtt_on_connect(self, client, userdata, flags, rc) -> bool:
if rc == 0:
logger.debug(f'MQTT client connected to {self.config["mqtt"]["broker"]}')
return True
else:
logger.error(f'Connection to the broker failed, response: {rc}')
return False
def mqtt_on_disconnect(self, *args):
logger.warning(f'MQTT client disconnect from the broker')
self.mqtt_client.reconnect()
def read_configuration(self, config_path):
logger.debug(f'Reading configuration file "{config_path}"')
try:
self.config.read(config_path)
except Exception as e:
logger.critical(f'Error reading configuration file; {e}')
logger.critical('Closing...')
exit(1)
try:
sections = self.config.sections()
for section in CONFIGURATION | logger.debug(f'Verifying {file.name} integrity...')
if self.trusted:
logger.warning(f'Skipping {file.name} integrity check')
return True
try: | random_line_split |
micro_updater.py | self.identity()}]: Message not sent')
return
logger.debug(f'[{self.identity()}]: Message sent')
def send_new_release(self, release_json):
logger.debug(f"[{self.identity()}]: Sending {release_json} on {self.topic}...")
message = self.mqtt_client.publish(self.topic, payload='', retain=True)
self.mqtt_wait_publish(message)
message = self.mqtt_client.publish(self.topic, payload=release_json, retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
self.mqtt_client.reconnect()
message = self.mqtt_client.publish(self.topic, payload=release_json, retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
logger.error(f'[{self.identity()}]: Message not sent')
return
logger.debug('Message sent')
def mqtt_wait_publish(self, message: MQTTMessageInfo, timeout=1):
start = time()
t_out = timeout * 1000
while not message.is_published() and time()-start < t_out:
sleep(0.1)
if message.is_published():
return True
return False
def send_file(self, file: RepoFile) -> bool:
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((self.ip, self.port))
with open(file.path, 'rb') as f:
content = f.read()
if s.sendall(content) is None:
logger.debug(f'[{self.identity()}]: {file.name} sent.')
return True
else:
logger.error(f'[{self.identity()}]: {file.name} not sent.')
return False
except socket.timeout as stout:
logger.error(f"[{self.identity()}]: Timeout connecting to remote socket; {stout}")
return False
except Exception as e:
logger.error(f"[{self.identity()}]: Error reading file '{file.path}'; {e}")
return False
class FileDownloader(Thread):
def __init__(self, file: RepoFile, download_path: str, trusted: bool):
super().__init__()
self.file = file
self.download_path = download_path
self.response = None
self.trusted = trusted
def run(self) -> None:
try:
if not self._download_file(self.file, self.download_path) or not self._verify_file(self.file):
self.response = False
self.response = True
except SystemExit:
self.response = False
def _download_file(self, file: RepoFile, download_path) -> bool:
logger.debug(f'Downloading {file.name} into {download_path}...')
try:
file_path = os.path.join(download_path, file.name)
wget.download(file.download_link, out=file_path)
file.path = file_path
return True
except Exception as e:
logger.error(f"Can't download {file.name} from {file.download_link}")
return False
def _verify_file(self, file: RepoFile) -> bool:
logger.debug(f'Verifying {file.name} integrity...')
if self.trusted:
logger.warning(f'Skipping {file.name} integrity check')
return True
try:
with open(file.path, 'r') as file_opened:
content = file_opened.read()
size = os.stat(file.path)
hasher = hashlib.sha1(f'blob {len(content)}\x00{content}'.encode("utf-8"))
hash = hasher.hexdigest()
if hash == file.sha:
logger.debug('File integrity OK')
return True
else:
logger.error(f'File hash mismatch\n'
f'Calculated:\t{hash}\n'
f'Correct:\t{file.sha}')
return False
except Exception as e:
logger.error(f"Can't verify {file.name} integrity; {e}")
return False
def kill_thread(self):
thread_id = self.get_id()
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, ctypes.py_object(SystemExit))
class MicroUpdater:
def __init__(self, config_path=DEFAULT_COMPLETE_CONFIG_PATH):
log_format = '<green>{time: YYYY-MM-DD HH:mm:ss.SSS}</green> <level>{level}: {message}</level>'
logger.remove()
logger.add(sys.stdout, format=log_format, colorize=True)
logger.info('Starting MicroUpdater...')
self.config = configparser.ConfigParser()
self.read_configuration(config_path)
logger.add(os.path.join(self.config['logging']['logs_path'], 'log_{time: YYYY-MM-DD}.log'), format=log_format, colorize=True, compression='zip', rotation='00:00')
self.github_client = None
self.repo_obj = None
self.cached_release = None
self.github_init()
self.mqtt_client = MQTTClient(client_id=self.config['mqtt']['id'])
self.mqtt_init()
self.threads = {} # ip: thread
self.server_thread = None
self.server_loop = False
def loop(self):
while True:
try:
while True:
next(thread for thread in self.threads.values() if thread.is_alive())
sleep(1)
except StopIteration:
pass
tag, files = self.check_repo()
files = [file for file in files if ".mpy" in file] # Download compiled files only
if tag is not None:
self._download_files(files)
update_json = self._update_json(files=files, tag=tag)
self.start_server(files, update_json)
sleep(int(self.config['github']['check_rate']))
def server(self, files, update_json):
while self.server_loop:
logger.debug('Server waiting for installed tag...')
topic = self.config['mqtt']['installed_tags_topic']
broker = self.config['mqtt']['broker']
message = subscribe(topic, hostname=broker)
payload = message.payload
msg_str = payload.decode("utf-8")
try:
installed_tag_json = json.loads(msg_str)
if 'ip' not in installed_tag_json or 'tag' not in installed_tag_json:
logger.warning('Server received a malformed installed tag message, skipping it...')
continue
except:
logger.warning('Server received a malformed installed tag message, skipping it...')
continue
logger.debug(f'New update installed tag from {installed_tag_json["ip"]}')
if installed_tag_json['tag'] != update_json['tag']:
logger.debug(f"Probe out of date: installed {installed_tag_json['tag']}, latest {update_json['tag']}")
self.spawn_update_thread(installed_tag_json['ip'], files, update_json)
def spawn_update_thread(self, ip: str, files, update_json):
logger.debug(f'Spawning new thread for {ip} update...')
broker = self.config['mqtt']['broker']
topic = self.config['mqtt']['installed_tags_topic']
port = self.config['updates']['port']
th = DeviceUpdater(ip, port=port, files=files, broker=broker, installed_tags_topic=topic, release_json=update_json, mqtt_client=self.mqtt_client)
th.start()
self.threads[ip] = th
logger.debug(f'Thread spawned and registered.')
def mqtt_wait_publish(self, message: MQTTMessageInfo, timeout=1):
start = time()
t_out = timeout * 1000
while not message.is_published() and time()-start < t_out:
sleep(0.1)
if message.is_published():
return True
return False
def start_server(self, files, update_json):
logger.debug('Starting update server...')
self.server_loop = False
if self.server_thread is not None:
self.server_thread.join()
self.server_loop = True
self.server_thread = Thread(target=self.server, args=(files, update_json))
self.server_thread.start()
logger.debug('Update server started.')
def mqtt_init(self):
self.mqtt_client.on_connect = self.mqtt_on_connect
self.mqtt_client.on_disconnect = self.mqtt_on_disconnect
self.mqtt_connect()
self.mqtt_client.loop_start()
def mqtt_connect(self):
broker = self.config['mqtt']['broker']
self.mqtt_client.connect(broker)
def mqtt_on_connect(self, client, userdata, flags, rc) -> bool:
if rc == 0:
logger.debug(f'MQTT client connected to {self.config["mqtt"]["broker"]}')
return True
else:
logger.error(f'Connection to the broker failed, response: {rc}')
return False
def mqtt_on_disconnect(self, *args):
logger.warning(f'MQTT client disconnect from the broker')
self.mqtt_client.reconnect()
def read_configuration(self, config_path):
logger.debug(f'Reading configuration file "{config_path}"')
try:
self.config.read(config_path)
except Exception as e:
logger.critical(f'Error reading configuration file; {e}')
logger.critical('Closing...')
exit(1)
try:
sections = self.config.sections()
for section in CONFIGURATION_LAYOUT:
assert section in sections
for key in CONFIGURATION_LAYOUT[section]:
assert key in self.config[section]
except AssertionError:
logger.critical(f'Configuration file malformed, creating sample as "{DEFAULT_COMPLETE_CONFIG_PATH}"...')
for section in CONFIGURATION_LAYOUT:
| self.config[section] = {}
for key in CONFIGURATION_LAYOUT[section]:
self.config[section][key] = f'<{key}>' | conditional_block |
|
micro_updater.py | ]: Message not sent')
return
logger.debug(f'[{self.identity()}]: Message sent')
def send_new_release(self, release_json):
logger.debug(f"[{self.identity()}]: Sending {release_json} on {self.topic}...")
message = self.mqtt_client.publish(self.topic, payload='', retain=True)
self.mqtt_wait_publish(message)
message = self.mqtt_client.publish(self.topic, payload=release_json, retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
self.mqtt_client.reconnect()
message = self.mqtt_client.publish(self.topic, payload=release_json, retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
logger.error(f'[{self.identity()}]: Message not sent')
return
logger.debug('Message sent')
def mqtt_wait_publish(self, message: MQTTMessageInfo, timeout=1):
start = time()
t_out = timeout * 1000
while not message.is_published() and time()-start < t_out:
sleep(0.1)
if message.is_published():
return True
return False
def send_file(self, file: RepoFile) -> bool:
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((self.ip, self.port))
with open(file.path, 'rb') as f:
content = f.read()
if s.sendall(content) is None:
logger.debug(f'[{self.identity()}]: {file.name} sent.')
return True
else:
logger.error(f'[{self.identity()}]: {file.name} not sent.')
return False
except socket.timeout as stout:
logger.error(f"[{self.identity()}]: Timeout connecting to remote socket; {stout}")
return False
except Exception as e:
logger.error(f"[{self.identity()}]: Error reading file '{file.path}'; {e}")
return False
class FileDownloader(Thread):
def __init__(self, file: RepoFile, download_path: str, trusted: bool):
super().__init__()
self.file = file
self.download_path = download_path
self.response = None
self.trusted = trusted
def run(self) -> None:
try:
if not self._download_file(self.file, self.download_path) or not self._verify_file(self.file):
self.response = False
self.response = True
except SystemExit:
self.response = False
def _download_file(self, file: RepoFile, download_path) -> bool:
logger.debug(f'Downloading {file.name} into {download_path}...')
try:
file_path = os.path.join(download_path, file.name)
wget.download(file.download_link, out=file_path)
file.path = file_path
return True
except Exception as e:
logger.error(f"Can't download {file.name} from {file.download_link}")
return False
def _verify_file(self, file: RepoFile) -> bool:
logger.debug(f'Verifying {file.name} integrity...')
if self.trusted:
logger.warning(f'Skipping {file.name} integrity check')
return True
try:
with open(file.path, 'r') as file_opened:
content = file_opened.read()
size = os.stat(file.path)
hasher = hashlib.sha1(f'blob {len(content)}\x00{content}'.encode("utf-8"))
hash = hasher.hexdigest()
if hash == file.sha:
logger.debug('File integrity OK')
return True
else:
logger.error(f'File hash mismatch\n'
f'Calculated:\t{hash}\n'
f'Correct:\t{file.sha}')
return False
except Exception as e:
logger.error(f"Can't verify {file.name} integrity; {e}")
return False
def kill_thread(self):
thread_id = self.get_id()
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, ctypes.py_object(SystemExit))
class MicroUpdater:
def __init__(self, config_path=DEFAULT_COMPLETE_CONFIG_PATH):
log_format = '<green>{time: YYYY-MM-DD HH:mm:ss.SSS}</green> <level>{level}: {message}</level>'
logger.remove()
logger.add(sys.stdout, format=log_format, colorize=True)
logger.info('Starting MicroUpdater...')
self.config = configparser.ConfigParser()
self.read_configuration(config_path)
logger.add(os.path.join(self.config['logging']['logs_path'], 'log_{time: YYYY-MM-DD}.log'), format=log_format, colorize=True, compression='zip', rotation='00:00')
self.github_client = None
self.repo_obj = None
self.cached_release = None
self.github_init()
self.mqtt_client = MQTTClient(client_id=self.config['mqtt']['id'])
self.mqtt_init()
self.threads = {} # ip: thread
self.server_thread = None
self.server_loop = False
def loop(self):
while True:
try:
while True:
next(thread for thread in self.threads.values() if thread.is_alive())
sleep(1)
except StopIteration:
pass
tag, files = self.check_repo()
files = [file for file in files if ".mpy" in file] # Download compiled files only
if tag is not None:
self._download_files(files)
update_json = self._update_json(files=files, tag=tag)
self.start_server(files, update_json)
sleep(int(self.config['github']['check_rate']))
def server(self, files, update_json):
while self.server_loop:
logger.debug('Server waiting for installed tag...')
topic = self.config['mqtt']['installed_tags_topic']
broker = self.config['mqtt']['broker']
message = subscribe(topic, hostname=broker)
payload = message.payload
msg_str = payload.decode("utf-8")
try:
installed_tag_json = json.loads(msg_str)
if 'ip' not in installed_tag_json or 'tag' not in installed_tag_json:
logger.warning('Server received a malformed installed tag message, skipping it...')
continue
except:
logger.warning('Server received a malformed installed tag message, skipping it...')
continue
logger.debug(f'New update installed tag from {installed_tag_json["ip"]}')
if installed_tag_json['tag'] != update_json['tag']:
logger.debug(f"Probe out of date: installed {installed_tag_json['tag']}, latest {update_json['tag']}")
self.spawn_update_thread(installed_tag_json['ip'], files, update_json)
def spawn_update_thread(self, ip: str, files, update_json):
logger.debug(f'Spawning new thread for {ip} update...')
broker = self.config['mqtt']['broker']
topic = self.config['mqtt']['installed_tags_topic']
port = self.config['updates']['port']
th = DeviceUpdater(ip, port=port, files=files, broker=broker, installed_tags_topic=topic, release_json=update_json, mqtt_client=self.mqtt_client)
th.start()
self.threads[ip] = th
logger.debug(f'Thread spawned and registered.')
def mqtt_wait_publish(self, message: MQTTMessageInfo, timeout=1):
start = time()
t_out = timeout * 1000
while not message.is_published() and time()-start < t_out:
sleep(0.1)
if message.is_published():
return True
return False
def start_server(self, files, update_json):
logger.debug('Starting update server...')
self.server_loop = False
if self.server_thread is not None:
self.server_thread.join()
self.server_loop = True
self.server_thread = Thread(target=self.server, args=(files, update_json))
self.server_thread.start()
logger.debug('Update server started.')
def mqtt_init(self):
self.mqtt_client.on_connect = self.mqtt_on_connect
self.mqtt_client.on_disconnect = self.mqtt_on_disconnect
self.mqtt_connect()
self.mqtt_client.loop_start()
def mqtt_connect(self):
broker = self.config['mqtt']['broker']
self.mqtt_client.connect(broker)
def mqtt_on_connect(self, client, userdata, flags, rc) -> bool:
if rc == 0:
logger.debug(f'MQTT client connected to {self.config["mqtt"]["broker"]}')
return True
else:
logger.error(f'Connection to the broker failed, response: {rc}')
return False
def mqtt_on_disconnect(self, *args):
logger.warning(f'MQTT client disconnect from the broker')
self.mqtt_client.reconnect()
def read_configuration(self, config_path):
| logger.debug(f'Reading configuration file "{config_path}"')
try:
self.config.read(config_path)
except Exception as e:
logger.critical(f'Error reading configuration file; {e}')
logger.critical('Closing...')
exit(1)
try:
sections = self.config.sections()
for section in CONFIGURATION_LAYOUT:
assert section in sections
for key in CONFIGURATION_LAYOUT[section]:
assert key in self.config[section]
except AssertionError:
logger.critical(f'Configuration file malformed, creating sample as "{DEFAULT_COMPLETE_CONFIG_PATH}"...')
for section in CONFIGURATION_LAYOUT:
self.config[section] = {}
for key in CONFIGURATION_LAYOUT[section]:
self.config[section][key] = f'<{key}>'
try: | identifier_body |
|
micro_updater.py | Sending end "END" message')
message = self.mqtt_client.publish(self.topic, payload='END', retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
self.mqtt_client.reconnect()
message = self.mqtt_client.publish(self.topic, payload='END', retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
logger.error(f'[{self.identity()}]: Message not sent')
return
logger.debug(f'[{self.identity()}]: Message sent')
def send_new_release(self, release_json):
logger.debug(f"[{self.identity()}]: Sending {release_json} on {self.topic}...")
message = self.mqtt_client.publish(self.topic, payload='', retain=True)
self.mqtt_wait_publish(message)
message = self.mqtt_client.publish(self.topic, payload=release_json, retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
self.mqtt_client.reconnect()
message = self.mqtt_client.publish(self.topic, payload=release_json, retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
logger.error(f'[{self.identity()}]: Message not sent')
return
logger.debug('Message sent')
def mqtt_wait_publish(self, message: MQTTMessageInfo, timeout=1):
start = time()
t_out = timeout * 1000
while not message.is_published() and time()-start < t_out:
sleep(0.1)
if message.is_published():
return True
return False
def send_file(self, file: RepoFile) -> bool:
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((self.ip, self.port))
with open(file.path, 'rb') as f:
content = f.read()
if s.sendall(content) is None:
logger.debug(f'[{self.identity()}]: {file.name} sent.')
return True
else:
logger.error(f'[{self.identity()}]: {file.name} not sent.')
return False
except socket.timeout as stout:
logger.error(f"[{self.identity()}]: Timeout connecting to remote socket; {stout}")
return False
except Exception as e:
logger.error(f"[{self.identity()}]: Error reading file '{file.path}'; {e}")
return False
class FileDownloader(Thread):
def __init__(self, file: RepoFile, download_path: str, trusted: bool):
super().__init__()
self.file = file
self.download_path = download_path
self.response = None
self.trusted = trusted
def run(self) -> None:
try:
if not self._download_file(self.file, self.download_path) or not self._verify_file(self.file):
self.response = False
self.response = True
except SystemExit:
self.response = False
def | (self, file: RepoFile, download_path) -> bool:
logger.debug(f'Downloading {file.name} into {download_path}...')
try:
file_path = os.path.join(download_path, file.name)
wget.download(file.download_link, out=file_path)
file.path = file_path
return True
except Exception as e:
logger.error(f"Can't download {file.name} from {file.download_link}")
return False
def _verify_file(self, file: RepoFile) -> bool:
logger.debug(f'Verifying {file.name} integrity...')
if self.trusted:
logger.warning(f'Skipping {file.name} integrity check')
return True
try:
with open(file.path, 'r') as file_opened:
content = file_opened.read()
size = os.stat(file.path)
hasher = hashlib.sha1(f'blob {len(content)}\x00{content}'.encode("utf-8"))
hash = hasher.hexdigest()
if hash == file.sha:
logger.debug('File integrity OK')
return True
else:
logger.error(f'File hash mismatch\n'
f'Calculated:\t{hash}\n'
f'Correct:\t{file.sha}')
return False
except Exception as e:
logger.error(f"Can't verify {file.name} integrity; {e}")
return False
def kill_thread(self):
thread_id = self.get_id()
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, ctypes.py_object(SystemExit))
class MicroUpdater:
def __init__(self, config_path=DEFAULT_COMPLETE_CONFIG_PATH):
log_format = '<green>{time: YYYY-MM-DD HH:mm:ss.SSS}</green> <level>{level}: {message}</level>'
logger.remove()
logger.add(sys.stdout, format=log_format, colorize=True)
logger.info('Starting MicroUpdater...')
self.config = configparser.ConfigParser()
self.read_configuration(config_path)
logger.add(os.path.join(self.config['logging']['logs_path'], 'log_{time: YYYY-MM-DD}.log'), format=log_format, colorize=True, compression='zip', rotation='00:00')
self.github_client = None
self.repo_obj = None
self.cached_release = None
self.github_init()
self.mqtt_client = MQTTClient(client_id=self.config['mqtt']['id'])
self.mqtt_init()
self.threads = {} # ip: thread
self.server_thread = None
self.server_loop = False
def loop(self):
while True:
try:
while True:
next(thread for thread in self.threads.values() if thread.is_alive())
sleep(1)
except StopIteration:
pass
tag, files = self.check_repo()
files = [file for file in files if ".mpy" in file] # Download compiled files only
if tag is not None:
self._download_files(files)
update_json = self._update_json(files=files, tag=tag)
self.start_server(files, update_json)
sleep(int(self.config['github']['check_rate']))
def server(self, files, update_json):
while self.server_loop:
logger.debug('Server waiting for installed tag...')
topic = self.config['mqtt']['installed_tags_topic']
broker = self.config['mqtt']['broker']
message = subscribe(topic, hostname=broker)
payload = message.payload
msg_str = payload.decode("utf-8")
try:
installed_tag_json = json.loads(msg_str)
if 'ip' not in installed_tag_json or 'tag' not in installed_tag_json:
logger.warning('Server received a malformed installed tag message, skipping it...')
continue
except:
logger.warning('Server received a malformed installed tag message, skipping it...')
continue
logger.debug(f'New update installed tag from {installed_tag_json["ip"]}')
if installed_tag_json['tag'] != update_json['tag']:
logger.debug(f"Probe out of date: installed {installed_tag_json['tag']}, latest {update_json['tag']}")
self.spawn_update_thread(installed_tag_json['ip'], files, update_json)
def spawn_update_thread(self, ip: str, files, update_json):
logger.debug(f'Spawning new thread for {ip} update...')
broker = self.config['mqtt']['broker']
topic = self.config['mqtt']['installed_tags_topic']
port = self.config['updates']['port']
th = DeviceUpdater(ip, port=port, files=files, broker=broker, installed_tags_topic=topic, release_json=update_json, mqtt_client=self.mqtt_client)
th.start()
self.threads[ip] = th
logger.debug(f'Thread spawned and registered.')
def mqtt_wait_publish(self, message: MQTTMessageInfo, timeout=1):
start = time()
t_out = timeout * 1000
while not message.is_published() and time()-start < t_out:
sleep(0.1)
if message.is_published():
return True
return False
def start_server(self, files, update_json):
logger.debug('Starting update server...')
self.server_loop = False
if self.server_thread is not None:
self.server_thread.join()
self.server_loop = True
self.server_thread = Thread(target=self.server, args=(files, update_json))
self.server_thread.start()
logger.debug('Update server started.')
def mqtt_init(self):
self.mqtt_client.on_connect = self.mqtt_on_connect
self.mqtt_client.on_disconnect = self.mqtt_on_disconnect
self.mqtt_connect()
self.mqtt_client.loop_start()
def mqtt_connect(self):
broker = self.config['mqtt']['broker']
self.mqtt_client.connect(broker)
def mqtt_on_connect(self, client, userdata, flags, rc) -> bool:
if rc == 0:
logger.debug(f'MQTT client connected to {self.config["mqtt"]["broker"]}')
return True
else:
logger.error(f'Connection to the broker failed, response: {rc}')
return False
def mqtt_on_disconnect(self, *args):
logger.warning(f'MQTT client disconnect from the broker')
self.mqtt_client.reconnect()
def read_configuration(self, config_path):
logger.debug(f'Reading configuration file "{config_path}"')
try:
self.config.read(config_path)
except Exception as e:
logger.critical(f'Error reading configuration file; {e}')
logger.critical('Closing...')
exit(1)
try:
sections = self.config.sections()
for section in CONFIGURATION | _download_file | identifier_name |
WaterHeaterClass.py | CarbonPrice=20): #$20/ton is the default rate for Carbon...if not specified when calling the func
result = {}
for i in range(vint, vint+self.lt):
if self.hasRefrigerant == True:
result[i] = UnitCarbonPrice * (self.AnnualEmissions(i) )
else:
result[i] = UnitCarbonPrice * (self.AnnEmissions(i) )
return result
def averageCarbonCost(self, vint, UnitCarbonPrice=20):
result = {}
result = self.annualCarbonCost(vint, UnitCarbonPrice)
return sum(result.values())/self.lt
def NPVEmissions_Refrigerant(self, yr):
if self.hasRefrigerant == True:
result = 0
RefLeek = self.RefLeaks(yr)
for i in range(yr, yr+self.lt+1):
result = result + RefLeek[i]/(1+DiscRate)**(i-yr+1)
else:
result = 0
return result
def NPVEmissions_Indirect(self, yr):
result = 0
for i in range(yr, yr+self.lt+1):
result = result + self.AnnEmissions(i)/(1+DiscRate)**(i-yr+1)
return result
def NPVEmissions(self, yr): #NPV OF EMISSIONS USED FOR COMPUTING NPV OF CARBONCOST
NPVEm = self.NPVEmissions_Indirect(yr)+ self. NPVEmissions_Refrigerant(yr)
return NPVEm
def lcc(self, yr, UnitCarbonPrice =20 ): #levelized
return (self.NPVEmissions(yr)*UnitCarbonPrice + self.calcNPV(yr) )
def totalCapex(self): #total cost of the stock of vintage yr
return self.OrigNum * self.IC
def NPVCost(self,yr):
NPV = self.IC
for I in range(yr, self.lt+yr):
NPV = NPV + (self.OM[I-yr])/(1+DiscRate)**(I-yr+1)
return NPV
def NPVEngCost(self,yr):
NPV = 0
for I in range(yr, self.lt+yr):
NPV = NPV + (self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def NPVCC(self,vint, CarbonCost= 21): #NPV of carbon cost
return self.NPVEmissions(vint)*CarbonCost
def calcNPV_Capex(self, yr, Capex): #changing capex
NPV = Capex
for I in range(yr,self.lt +yr):
NPV = NPV + (self.OM[I-yr] + self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def calcNPV_LifeTime(self, yr, lifetime): #changing can specify a difff lifetime other than self.lt
NPV = self.IC
for I in range(yr,lifetime +yr):
NPV = NPV + (self.OM[I-yr] + self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def calcNPV(self,yr):#initial fixed capex
NPV = self.IC
for I in range(yr, self.lt+ yr):
# print I, self.OM[I-ThisYear], self.AnnualEngCost(I)
NPV = NPV + (self.OM[I-yr] + self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def annualizedNPV(self,yr):
return self.calcNPV(yr)/self.lt
def lcc(self, yr, UnitCarbonPrice =20 ): #levelized
return (self.NPVEmissions(yr)*UnitCarbonPrice + self.calcNPV(yr) )
def Annuallcc(self, yr, UnitCarbonPrice =20 ): #levelized
|
def payback(self, WHx,yr):
X = WHx.IC - self.IC
Y = (self.OM[0] + self.AnnualEngCost(yr)) - (WHx.OM[0] + WHx.AnnualEngCost(yr))
#print "#", X, Y, "#"
if self == WHx:
return 0
elif (X>=0 and Y<=0):
return max(self.lt, WHx.lt)
elif (X<0 and Y>=0):
return 0
else:
return (min(self.lt, WHx.lt, X/(Y) ))
def payback1(self, WHx,yr):
N= 1
maxN = max(self.lt, WHx.lt)
X = WHx.IC - self.IC
Y = (self.OM[0] + self.AnnualEngCost(yr)) - (WHx.OM[0] + WHx.AnnualEngCost(yr))
# print '\n test', X, Y
# if X <= 0 and Y <=0:
# return 0
# else:
while N < maxN and abs(X/Y) >1 :
Y = Y + (self.OM[N] + self.AnnualEngCost(yr+N)) - (WHx.OM[N] + WHx.AnnualEngCost(yr+N))
N = N +1
if N == maxN and X/Y > 1:
return maxN
else:
return N
def AnnualEngUsage(self):
return self.dailyVol* self.IncTemp*self.fuel.unitEng * 365/self.ef
def AnnualEngCost(self, yr):
# if self.fuel == NG:
# print yr, self.fuel.UnitEngCost[inf][yr]
return self.AnnualEngUsage() * self.fuel.UnitEngCost[yr]
def compareEngUsage(self, WH2):
return (self.AnnualEngUsage() - WH2.AnnualEngUsage())
def compareEmissions(self,WH2):
return(self.AnnualEmissions() - WH2.AnnualEmissions())
def CCBreakEven(self, WH2, yr): #breakeven carbon cost
breakeven = (self.calcNPV(yr)/self.lt- WH2.calcNPV(yr)/WH2.lt)/( WH2.NPVEmissions(yr)/WH2.lt - self.NPVEmissions(yr)/self.lt )
return breakeven
def weib(self):
x = range(0, self.lt+UltimYr+1)
w = weibull_min.cdf(x,3,loc=2.5,scale = self.lt) * self.OrigNum
#print w
return(w)
def deadsofar(self, yr):
if yr > self.vintage and yr < self.vintage+ self.lt + UltimYr:
# print yr, self.vintage
return self.weib()[yr-self.vintage]
elif yr >= self.vintage + self.lt + UltimYr:
return self.OrigNum
else:
return 0
def numAlive(self,yr):
return (self.OrigNum - self.deadsofar(yr))
def age(self, yr):
return (yr - self.vintage)
def annualreplacement(self,yr):
# if yr> self.vintage + (self.lt + UltimYr) or yr < self.vintage:
# return 0
# else:
return (max(self.deadsofar(yr)- self.deadsofar(yr-1),0))
class FuelType:
def __init__(self, name,unitEng,UnitEngCost, UnitEmissions):
self.name = name
self.unitEng = unitEng
self.UnitEngCost = UnitEngCost
self.UnitEmissions= UnitEmissions
NG = FuelType("NG", UnitNG , NGCostYrly, NGEmisYrly)
Elec = FuelType("Elec", UnitElec, ElecCostYrly, ElecEmisYrly)
Prop = FuelType("Prop", UnitProp, PropCostYrly, PropEmisYrly)
#for yr in range(ThisYear, EndYear+1):
# print yr, "NGCOST", NGCostYrly['MED'][yr], ElecCostYrly['LOW'][yr], PropCostYrly['LOW'][yr]
#this class is to track the annual 'living' stock of WHs of a particular type, their annual energy and emissions for each
#WH in | return ( (self.NPVEmissions(yr)*UnitCarbonPrice + self.calcNPV(yr) ) /self.lt) | identifier_body |
WaterHeaterClass.py | CarbonPrice=20): #$20/ton is the default rate for Carbon...if not specified when calling the func
result = {}
for i in range(vint, vint+self.lt):
if self.hasRefrigerant == True:
result[i] = UnitCarbonPrice * (self.AnnualEmissions(i) )
else:
result[i] = UnitCarbonPrice * (self.AnnEmissions(i) )
return result
def averageCarbonCost(self, vint, UnitCarbonPrice=20):
result = {}
result = self.annualCarbonCost(vint, UnitCarbonPrice)
return sum(result.values())/self.lt
def NPVEmissions_Refrigerant(self, yr):
if self.hasRefrigerant == True:
result = 0
RefLeek = self.RefLeaks(yr)
for i in range(yr, yr+self.lt+1):
result = result + RefLeek[i]/(1+DiscRate)**(i-yr+1)
else:
result = 0
return result
def NPVEmissions_Indirect(self, yr):
result = 0
for i in range(yr, yr+self.lt+1):
result = result + self.AnnEmissions(i)/(1+DiscRate)**(i-yr+1)
return result
def NPVEmissions(self, yr): #NPV OF EMISSIONS USED FOR COMPUTING NPV OF CARBONCOST
NPVEm = self.NPVEmissions_Indirect(yr)+ self. NPVEmissions_Refrigerant(yr)
return NPVEm
def lcc(self, yr, UnitCarbonPrice =20 ): #levelized
return (self.NPVEmissions(yr)*UnitCarbonPrice + self.calcNPV(yr) )
def totalCapex(self): #total cost of the stock of vintage yr
return self.OrigNum * self.IC
def NPVCost(self,yr):
NPV = self.IC
for I in range(yr, self.lt+yr):
NPV = NPV + (self.OM[I-yr])/(1+DiscRate)**(I-yr+1)
return NPV
def NPVEngCost(self,yr):
NPV = 0
for I in range(yr, self.lt+yr):
NPV = NPV + (self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def NPVCC(self,vint, CarbonCost= 21): #NPV of carbon cost
return self.NPVEmissions(vint)*CarbonCost
def calcNPV_Capex(self, yr, Capex): #changing capex
NPV = Capex
for I in range(yr,self.lt +yr):
NPV = NPV + (self.OM[I-yr] + self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def calcNPV_LifeTime(self, yr, lifetime): #changing can specify a difff lifetime other than self.lt
NPV = self.IC
for I in range(yr,lifetime +yr):
NPV = NPV + (self.OM[I-yr] + self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def calcNPV(self,yr):#initial fixed capex
NPV = self.IC
for I in range(yr, self.lt+ yr):
# print I, self.OM[I-ThisYear], self.AnnualEngCost(I)
NPV = NPV + (self.OM[I-yr] + self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def annualizedNPV(self,yr):
return self.calcNPV(yr)/self.lt
def lcc(self, yr, UnitCarbonPrice =20 ): #levelized
return (self.NPVEmissions(yr)*UnitCarbonPrice + self.calcNPV(yr) )
def Annuallcc(self, yr, UnitCarbonPrice =20 ): #levelized
return ( (self.NPVEmissions(yr)*UnitCarbonPrice + self.calcNPV(yr) ) /self.lt)
def payback(self, WHx,yr):
X = WHx.IC - self.IC
Y = (self.OM[0] + self.AnnualEngCost(yr)) - (WHx.OM[0] + WHx.AnnualEngCost(yr))
#print "#", X, Y, "#"
if self == WHx:
return 0
elif (X>=0 and Y<=0):
return max(self.lt, WHx.lt)
elif (X<0 and Y>=0):
return 0
else:
return (min(self.lt, WHx.lt, X/(Y) ))
def payback1(self, WHx,yr):
N= 1
maxN = max(self.lt, WHx.lt)
X = WHx.IC - self.IC
Y = (self.OM[0] + self.AnnualEngCost(yr)) - (WHx.OM[0] + WHx.AnnualEngCost(yr))
# print '\n test', X, Y
# if X <= 0 and Y <=0:
# return 0
# else:
while N < maxN and abs(X/Y) >1 :
Y = Y + (self.OM[N] + self.AnnualEngCost(yr+N)) - (WHx.OM[N] + WHx.AnnualEngCost(yr+N))
N = N +1
if N == maxN and X/Y > 1:
return maxN
else:
return N
def AnnualEngUsage(self):
return self.dailyVol* self.IncTemp*self.fuel.unitEng * 365/self.ef
def AnnualEngCost(self, yr):
# if self.fuel == NG:
# print yr, self.fuel.UnitEngCost[inf][yr]
return self.AnnualEngUsage() * self.fuel.UnitEngCost[yr]
def compareEngUsage(self, WH2):
return (self.AnnualEngUsage() - WH2.AnnualEngUsage())
def compareEmissions(self,WH2):
return(self.AnnualEmissions() - WH2.AnnualEmissions())
def CCBreakEven(self, WH2, yr): #breakeven carbon cost
breakeven = (self.calcNPV(yr)/self.lt- WH2.calcNPV(yr)/WH2.lt)/( WH2.NPVEmissions(yr)/WH2.lt - self.NPVEmissions(yr)/self.lt )
return breakeven
def weib(self):
x = range(0, self.lt+UltimYr+1)
w = weibull_min.cdf(x,3,loc=2.5,scale = self.lt) * self.OrigNum
#print w
return(w)
def deadsofar(self, yr):
if yr > self.vintage and yr < self.vintage+ self.lt + UltimYr:
# print yr, self.vintage
|
elif yr >= self.vintage + self.lt + UltimYr:
return self.OrigNum
else:
return 0
def numAlive(self,yr):
return (self.OrigNum - self.deadsofar(yr))
def age(self, yr):
return (yr - self.vintage)
def annualreplacement(self,yr):
# if yr> self.vintage + (self.lt + UltimYr) or yr < self.vintage:
# return 0
# else:
return (max(self.deadsofar(yr)- self.deadsofar(yr-1),0))
class FuelType:
def __init__(self, name,unitEng,UnitEngCost, UnitEmissions):
self.name = name
self.unitEng = unitEng
self.UnitEngCost = UnitEngCost
self.UnitEmissions= UnitEmissions
NG = FuelType("NG", UnitNG , NGCostYrly, NGEmisYrly)
Elec = FuelType("Elec", UnitElec, ElecCostYrly, ElecEmisYrly)
Prop = FuelType("Prop", UnitProp, PropCostYrly, PropEmisYrly)
#for yr in range(ThisYear, EndYear+1):
# print yr, "NGCOST", NGCostYrly['MED'][yr], ElecCostYrly['LOW'][yr], PropCostYrly['LOW'][yr]
#this class is to track the annual 'living' stock of WHs of a particular type, their annual energy and emissions for each
#WH | return self.weib()[yr-self.vintage] | conditional_block |
WaterHeaterClass.py | CarbonPrice=20): #$20/ton is the default rate for Carbon...if not specified when calling the func
result = {}
for i in range(vint, vint+self.lt):
if self.hasRefrigerant == True:
result[i] = UnitCarbonPrice * (self.AnnualEmissions(i) )
else:
result[i] = UnitCarbonPrice * (self.AnnEmissions(i) )
return result
def averageCarbonCost(self, vint, UnitCarbonPrice=20):
result = {}
result = self.annualCarbonCost(vint, UnitCarbonPrice)
return sum(result.values())/self.lt
def NPVEmissions_Refrigerant(self, yr):
if self.hasRefrigerant == True:
result = 0
RefLeek = self.RefLeaks(yr)
for i in range(yr, yr+self.lt+1):
result = result + RefLeek[i]/(1+DiscRate)**(i-yr+1)
else:
result = 0
return result
def NPVEmissions_Indirect(self, yr):
result = 0
for i in range(yr, yr+self.lt+1):
result = result + self.AnnEmissions(i)/(1+DiscRate)**(i-yr+1)
return result
def NPVEmissions(self, yr): #NPV OF EMISSIONS USED FOR COMPUTING NPV OF CARBONCOST
NPVEm = self.NPVEmissions_Indirect(yr)+ self. NPVEmissions_Refrigerant(yr)
return NPVEm
def lcc(self, yr, UnitCarbonPrice =20 ): #levelized
return (self.NPVEmissions(yr)*UnitCarbonPrice + self.calcNPV(yr) )
def totalCapex(self): #total cost of the stock of vintage yr
return self.OrigNum * self.IC
def NPVCost(self,yr):
NPV = self.IC
for I in range(yr, self.lt+yr):
NPV = NPV + (self.OM[I-yr])/(1+DiscRate)**(I-yr+1)
return NPV
def NPVEngCost(self,yr):
NPV = 0
for I in range(yr, self.lt+yr):
NPV = NPV + (self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def NPVCC(self,vint, CarbonCost= 21): #NPV of carbon cost
return self.NPVEmissions(vint)*CarbonCost
def calcNPV_Capex(self, yr, Capex): #changing capex
NPV = Capex
for I in range(yr,self.lt +yr):
NPV = NPV + (self.OM[I-yr] + self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def calcNPV_LifeTime(self, yr, lifetime): #changing can specify a difff lifetime other than self.lt
NPV = self.IC
for I in range(yr,lifetime +yr):
NPV = NPV + (self.OM[I-yr] + self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def calcNPV(self,yr):#initial fixed capex
NPV = self.IC
for I in range(yr, self.lt+ yr):
# print I, self.OM[I-ThisYear], self.AnnualEngCost(I)
NPV = NPV + (self.OM[I-yr] + self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def annualizedNPV(self,yr):
return self.calcNPV(yr)/self.lt
def lcc(self, yr, UnitCarbonPrice =20 ): #levelized
return (self.NPVEmissions(yr)*UnitCarbonPrice + self.calcNPV(yr) )
def Annuallcc(self, yr, UnitCarbonPrice =20 ): #levelized
return ( (self.NPVEmissions(yr)*UnitCarbonPrice + self.calcNPV(yr) ) /self.lt)
def payback(self, WHx,yr):
X = WHx.IC - self.IC
Y = (self.OM[0] + self.AnnualEngCost(yr)) - (WHx.OM[0] + WHx.AnnualEngCost(yr))
#print "#", X, Y, "#"
if self == WHx:
return 0
elif (X>=0 and Y<=0):
return max(self.lt, WHx.lt)
elif (X<0 and Y>=0):
return 0
else:
return (min(self.lt, WHx.lt, X/(Y) ))
def payback1(self, WHx,yr):
N= 1
maxN = max(self.lt, WHx.lt)
X = WHx.IC - self.IC
Y = (self.OM[0] + self.AnnualEngCost(yr)) - (WHx.OM[0] + WHx.AnnualEngCost(yr))
# print '\n test', X, Y
# if X <= 0 and Y <=0:
# return 0
# else:
while N < maxN and abs(X/Y) >1 :
Y = Y + (self.OM[N] + self.AnnualEngCost(yr+N)) - (WHx.OM[N] + WHx.AnnualEngCost(yr+N))
N = N +1
if N == maxN and X/Y > 1:
return maxN
else:
return N
def AnnualEngUsage(self):
return self.dailyVol* self.IncTemp*self.fuel.unitEng * 365/self.ef
def AnnualEngCost(self, yr):
# if self.fuel == NG:
# print yr, self.fuel.UnitEngCost[inf][yr]
return self.AnnualEngUsage() * self.fuel.UnitEngCost[yr]
def compareEngUsage(self, WH2):
return (self.AnnualEngUsage() - WH2.AnnualEngUsage())
def compareEmissions(self,WH2):
return(self.AnnualEmissions() - WH2.AnnualEmissions())
| def weib(self):
x = range(0, self.lt+UltimYr+1)
w = weibull_min.cdf(x,3,loc=2.5,scale = self.lt) * self.OrigNum
#print w
return(w)
def deadsofar(self, yr):
if yr > self.vintage and yr < self.vintage+ self.lt + UltimYr:
# print yr, self.vintage
return self.weib()[yr-self.vintage]
elif yr >= self.vintage + self.lt + UltimYr:
return self.OrigNum
else:
return 0
def numAlive(self,yr):
return (self.OrigNum - self.deadsofar(yr))
def age(self, yr):
return (yr - self.vintage)
def annualreplacement(self,yr):
# if yr> self.vintage + (self.lt + UltimYr) or yr < self.vintage:
# return 0
# else:
return (max(self.deadsofar(yr)- self.deadsofar(yr-1),0))
class FuelType:
def __init__(self, name,unitEng,UnitEngCost, UnitEmissions):
self.name = name
self.unitEng = unitEng
self.UnitEngCost = UnitEngCost
self.UnitEmissions= UnitEmissions
NG = FuelType("NG", UnitNG , NGCostYrly, NGEmisYrly)
Elec = FuelType("Elec", UnitElec, ElecCostYrly, ElecEmisYrly)
Prop = FuelType("Prop", UnitProp, PropCostYrly, PropEmisYrly)
#for yr in range(ThisYear, EndYear+1):
# print yr, "NGCOST", NGCostYrly['MED'][yr], ElecCostYrly['LOW'][yr], PropCostYrly['LOW'][yr]
#this class is to track the annual 'living' stock of WHs of a particular type, their annual energy and emissions for each
# | def CCBreakEven(self, WH2, yr): #breakeven carbon cost
breakeven = (self.calcNPV(yr)/self.lt- WH2.calcNPV(yr)/WH2.lt)/( WH2.NPVEmissions(yr)/WH2.lt - self.NPVEmissions(yr)/self.lt )
return breakeven
| random_line_split |
WaterHeaterClass.py | (self,yr): #in tons of CO2 eq
result = {}
avgleak = 0
if (self.hasRefrigerant == True):
result = self.RefLeaks(yr)
#for i in range(vint, vint+ self.lt):
# avgleak = avgleak + result[i]/(1+CCDiscRate)**(i-vint+1)
avgleak = sum(result.values())/self.lt
else:
avgleak = 0
return avgleak
def AnnEmissions(self,yr): #in tons with NO REFRIGERANT
return self.AnnualEngUsage() * self.fuel.UnitEmissions[yr]/1000
def AnnualEmissions(self,yr): #in tons with REFRIGERANTS
if self.hasRefrigerant == False:
return self.AnnEmissions(yr)
else:
return ( self.AnnEmissions(yr)+ self.AvgRefLeaks(yr) )
def annualizedEmissions(self, vint): #in tons (THIS IS THE AVERAGE EMISSIONS..NOT DISCOUNTED
result = {}
# result1 = {}
for i in range(vint, vint+self.lt):
result[i] = self.AnnualEmissions(i) #INCLUDING DIRECT AND INDIRECT
# result1[yr] = self.AnnEmissions(yr)
annEmis = sum(result.values())/self.lt
return (annEmis)
def MarginalAnnualEmissions(self, WH2, yr):
return (self.AnnualEmissions(yr) - WH2.AnnualEmissions(yr) )
def annualCarbonCost(self, vint, UnitCarbonPrice=20): #$20/ton is the default rate for Carbon...if not specified when calling the func
result = {}
for i in range(vint, vint+self.lt):
if self.hasRefrigerant == True:
result[i] = UnitCarbonPrice * (self.AnnualEmissions(i) )
else:
result[i] = UnitCarbonPrice * (self.AnnEmissions(i) )
return result
def averageCarbonCost(self, vint, UnitCarbonPrice=20):
result = {}
result = self.annualCarbonCost(vint, UnitCarbonPrice)
return sum(result.values())/self.lt
def NPVEmissions_Refrigerant(self, yr):
if self.hasRefrigerant == True:
result = 0
RefLeek = self.RefLeaks(yr)
for i in range(yr, yr+self.lt+1):
result = result + RefLeek[i]/(1+DiscRate)**(i-yr+1)
else:
result = 0
return result
def NPVEmissions_Indirect(self, yr):
result = 0
for i in range(yr, yr+self.lt+1):
result = result + self.AnnEmissions(i)/(1+DiscRate)**(i-yr+1)
return result
def NPVEmissions(self, yr): #NPV OF EMISSIONS USED FOR COMPUTING NPV OF CARBONCOST
NPVEm = self.NPVEmissions_Indirect(yr)+ self. NPVEmissions_Refrigerant(yr)
return NPVEm
def lcc(self, yr, UnitCarbonPrice =20 ): #levelized
return (self.NPVEmissions(yr)*UnitCarbonPrice + self.calcNPV(yr) )
def totalCapex(self): #total cost of the stock of vintage yr
return self.OrigNum * self.IC
def NPVCost(self,yr):
NPV = self.IC
for I in range(yr, self.lt+yr):
NPV = NPV + (self.OM[I-yr])/(1+DiscRate)**(I-yr+1)
return NPV
def NPVEngCost(self,yr):
NPV = 0
for I in range(yr, self.lt+yr):
NPV = NPV + (self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def NPVCC(self,vint, CarbonCost= 21): #NPV of carbon cost
return self.NPVEmissions(vint)*CarbonCost
def calcNPV_Capex(self, yr, Capex): #changing capex
NPV = Capex
for I in range(yr,self.lt +yr):
NPV = NPV + (self.OM[I-yr] + self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def calcNPV_LifeTime(self, yr, lifetime): #changing can specify a difff lifetime other than self.lt
NPV = self.IC
for I in range(yr,lifetime +yr):
NPV = NPV + (self.OM[I-yr] + self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def calcNPV(self,yr):#initial fixed capex
NPV = self.IC
for I in range(yr, self.lt+ yr):
# print I, self.OM[I-ThisYear], self.AnnualEngCost(I)
NPV = NPV + (self.OM[I-yr] + self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def annualizedNPV(self,yr):
return self.calcNPV(yr)/self.lt
def lcc(self, yr, UnitCarbonPrice =20 ): #levelized
return (self.NPVEmissions(yr)*UnitCarbonPrice + self.calcNPV(yr) )
def Annuallcc(self, yr, UnitCarbonPrice =20 ): #levelized
return ( (self.NPVEmissions(yr)*UnitCarbonPrice + self.calcNPV(yr) ) /self.lt)
def payback(self, WHx,yr):
X = WHx.IC - self.IC
Y = (self.OM[0] + self.AnnualEngCost(yr)) - (WHx.OM[0] + WHx.AnnualEngCost(yr))
#print "#", X, Y, "#"
if self == WHx:
return 0
elif (X>=0 and Y<=0):
return max(self.lt, WHx.lt)
elif (X<0 and Y>=0):
return 0
else:
return (min(self.lt, WHx.lt, X/(Y) ))
def payback1(self, WHx,yr):
N= 1
maxN = max(self.lt, WHx.lt)
X = WHx.IC - self.IC
Y = (self.OM[0] + self.AnnualEngCost(yr)) - (WHx.OM[0] + WHx.AnnualEngCost(yr))
# print '\n test', X, Y
# if X <= 0 and Y <=0:
# return 0
# else:
while N < maxN and abs(X/Y) >1 :
Y = Y + (self.OM[N] + self.AnnualEngCost(yr+N)) - (WHx.OM[N] + WHx.AnnualEngCost(yr+N))
N = N +1
if N == maxN and X/Y > 1:
return maxN
else:
return N
def AnnualEngUsage(self):
return self.dailyVol* self.IncTemp*self.fuel.unitEng * 365/self.ef
def AnnualEngCost(self, yr):
# if self.fuel == NG:
# print yr, self.fuel.UnitEngCost[inf][yr]
return self.AnnualEngUsage() * self.fuel.UnitEngCost[yr]
def compareEngUsage(self, WH2):
return (self.AnnualEngUsage() - WH2.AnnualEngUsage())
def compareEmissions(self,WH2):
return(self.AnnualEmissions() - WH2.AnnualEmissions())
def CCBreakEven(self, WH2, yr): #breakeven carbon cost
breakeven = (self.calcNPV(yr)/self.lt- WH2.calcNPV(yr)/WH2.lt)/( WH2.NPVEmissions(yr)/WH2.lt - self.NPVEmissions(yr)/self.lt )
return breakeven
def weib(self):
x = range(0, self.lt+UltimYr+1)
w = weibull_min.cdf(x,3,loc=2.5,scale = self.lt) * self.OrigNum
#print w
return(w)
def deadsofar(self, yr):
if yr | AvgRefLeaks | identifier_name |
|
indexed_set.rs | _of::<Word>() * 8;
impl<T: Idx> fmt::Debug for IdxSet<T> {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
w.debug_list()
.entries(self.iter())
.finish()
}
}
impl<T: Idx> IdxSet<T> {
fn new(init: Word, domain_size: usize) -> Self {
let num_words = (domain_size + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
IdxSet {
_pd: Default::default(),
bits: vec![init; num_words],
}
}
/// Creates set holding every element whose index falls in range 0..domain_size.
pub fn new_filled(domain_size: usize) -> Self {
let mut result = Self::new(!0, domain_size);
result.trim_to(domain_size);
result
}
/// Creates set holding no elements.
pub fn new_empty(domain_size: usize) -> Self {
Self::new(0, domain_size)
}
/// Duplicates as a hybrid set.
pub fn to_hybrid(&self) -> HybridIdxSet<T> {
// This domain_size may be slightly larger than the one specified
// upon creation, due to rounding up to a whole word. That's ok.
let domain_size = self.bits.len() * BITS_PER_WORD;
// Note: we currently don't bother trying to make a Sparse set.
HybridIdxSet::Dense(self.to_owned(), domain_size)
}
/// Removes all elements
pub fn clear(&mut self) {
for b in &mut self.bits {
*b = 0;
}
}
/// Sets all elements up to `domain_size`
pub fn set_up_to(&mut self, domain_size: usize) {
for b in &mut self.bits {
*b = !0;
}
self.trim_to(domain_size);
}
/// Clear all elements above `domain_size`.
fn trim_to(&mut self, domain_size: usize) {
// `trim_block` is the first block where some bits have
// to be cleared.
let trim_block = domain_size / BITS_PER_WORD;
// all the blocks above it have to be completely cleared.
if trim_block < self.bits.len() {
for b in &mut self.bits[trim_block+1..] {
*b = 0;
}
// at that block, the `domain_size % BITS_PER_WORD` LSBs
// should remain.
let remaining_bits = domain_size % BITS_PER_WORD;
let mask = (1<<remaining_bits)-1;
self.bits[trim_block] &= mask;
}
}
/// Removes `elem` from the set `self`; returns true iff this changed `self`.
pub fn remove(&mut self, elem: &T) -> bool {
self.bits.clear_bit(elem.index())
}
/// Adds `elem` to the set `self`; returns true iff this changed `self`.
pub fn add(&mut self, elem: &T) -> bool {
self.bits.set_bit(elem.index())
}
/// Returns true iff set `self` contains `elem`.
pub fn contains(&self, elem: &T) -> bool {
self.bits.get_bit(elem.index())
}
pub fn words(&self) -> &[Word] {
&self.bits
}
pub fn words_mut(&mut self) -> &mut [Word] {
&mut self.bits
}
/// Efficiently overwrite `self` with `other`. Panics if `self` and `other`
/// don't have the same length.
pub fn overwrite(&mut self, other: &IdxSet<T>) {
self.words_mut().clone_from_slice(other.words());
}
/// Set `self = self | other` and return true if `self` changed
/// (i.e., if new bits were added).
pub fn union(&mut self, other: &impl UnionIntoIdxSet<T>) -> bool {
other.union_into(self)
}
/// Set `self = self - other` and return true if `self` changed.
/// (i.e., if any bits were removed).
pub fn subtract(&mut self, other: &impl SubtractFromIdxSet<T>) -> bool {
other.subtract_from(self)
}
/// Set `self = self & other` and return true if `self` changed.
/// (i.e., if any bits were removed).
pub fn intersect(&mut self, other: &IdxSet<T>) -> bool {
bitwise(self.words_mut(), other.words(), &Intersect)
}
pub fn iter(&self) -> Iter<T> {
Iter {
cur: None,
iter: self.words().iter().enumerate(),
_pd: PhantomData,
}
}
}
impl<T: Idx> UnionIntoIdxSet<T> for IdxSet<T> {
fn union_into(&self, other: &mut IdxSet<T>) -> bool {
bitwise(other.words_mut(), self.words(), &Union)
}
}
impl<T: Idx> SubtractFromIdxSet<T> for IdxSet<T> {
fn subtract_from(&self, other: &mut IdxSet<T>) -> bool {
bitwise(other.words_mut(), self.words(), &Subtract)
}
}
pub struct Iter<'a, T: Idx> {
cur: Option<(Word, usize)>,
iter: iter::Enumerate<slice::Iter<'a, Word>>,
_pd: PhantomData<fn(&T)>,
}
impl<'a, T: Idx> Iterator for Iter<'a, T> {
type Item = T;
fn next(&mut self) -> Option<T> {
loop {
if let Some((ref mut word, offset)) = self.cur {
let bit_pos = word.trailing_zeros() as usize;
if bit_pos != BITS_PER_WORD {
let bit = 1 << bit_pos;
*word ^= bit;
return Some(T::new(bit_pos + offset))
}
}
let (i, word) = self.iter.next()?;
self.cur = Some((*word, BITS_PER_WORD * i));
}
}
}
const SPARSE_MAX: usize = 8;
/// A sparse index set with a maximum of SPARSE_MAX elements. Used by
/// HybridIdxSet; do not use directly.
///
/// The elements are stored as an unsorted vector with no duplicates.
#[derive(Clone, Debug)]
pub struct SparseIdxSet<T: Idx>(ArrayVec<[T; SPARSE_MAX]>);
impl<T: Idx> SparseIdxSet<T> {
fn new() -> Self {
SparseIdxSet(ArrayVec::new())
}
fn len(&self) -> usize {
self.0.len()
}
fn contains(&self, elem: &T) -> bool {
self.0.contains(elem)
}
fn add(&mut self, elem: &T) -> bool {
// Ensure there are no duplicates.
if self.0.contains(elem) {
false
} else {
self.0.push(*elem);
true
}
}
fn remove(&mut self, elem: &T) -> bool {
if let Some(i) = self.0.iter().position(|e| e == elem) {
// Swap the found element to the end, then pop it.
let len = self.0.len();
self.0.swap(i, len - 1);
self.0.pop();
true
} else {
false
}
}
fn to_dense(&self, domain_size: usize) -> IdxSet<T> {
let mut dense = IdxSet::new_empty(domain_size);
for elem in self.0.iter() {
dense.add(elem);
}
dense
}
fn iter(&self) -> SparseIter<T> {
SparseIter {
iter: self.0.iter(),
}
}
}
impl<T: Idx> UnionIntoIdxSet<T> for SparseIdxSet<T> {
fn union_into(&self, other: &mut IdxSet<T>) -> bool {
let mut changed = false;
for elem in self.iter() {
changed |= other.add(&elem);
}
changed
}
}
impl<T: Idx> SubtractFromIdxSet<T> for SparseIdxSet<T> {
fn subtract_from(&self, other: &mut IdxSet<T>) -> bool {
let mut changed = false;
for elem in self.iter() {
changed |= other.remove(&elem);
}
changed
}
}
pub struct SparseIter<'a, T: Idx> {
iter: slice::Iter<'a, T>,
}
impl<'a, T: Idx> Iterator for SparseIter<'a, T> {
type Item = T;
fn next(&mut self) -> Option<T> {
self.iter.next().map(|e| *e)
}
}
/// Like IdxSet, but with a hybrid representation: sparse when there are few
/// elements in the set, but dense when there are many. It's especially
/// efficient for sets that typically have a small number of elements, but a
/// large `domain_size`, and are cleared frequently.
#[derive(Clone, Debug)]
pub enum HybridIdxSet<T: Idx> {
Sparse(SparseIdxSet<T>, usize),
Dense(IdxSet<T>, usize),
}
impl<T: Idx> HybridIdxSet<T> {
pub fn | new_empty | identifier_name |
|
indexed_set.rs | a, T: Idx> {
cur: Option<(Word, usize)>,
iter: iter::Enumerate<slice::Iter<'a, Word>>,
_pd: PhantomData<fn(&T)>,
}
impl<'a, T: Idx> Iterator for Iter<'a, T> {
type Item = T;
fn next(&mut self) -> Option<T> {
loop {
if let Some((ref mut word, offset)) = self.cur {
let bit_pos = word.trailing_zeros() as usize;
if bit_pos != BITS_PER_WORD {
let bit = 1 << bit_pos;
*word ^= bit;
return Some(T::new(bit_pos + offset))
}
}
let (i, word) = self.iter.next()?;
self.cur = Some((*word, BITS_PER_WORD * i));
}
}
}
const SPARSE_MAX: usize = 8;
/// A sparse index set with a maximum of SPARSE_MAX elements. Used by
/// HybridIdxSet; do not use directly.
///
/// The elements are stored as an unsorted vector with no duplicates.
#[derive(Clone, Debug)]
pub struct SparseIdxSet<T: Idx>(ArrayVec<[T; SPARSE_MAX]>);
impl<T: Idx> SparseIdxSet<T> {
fn new() -> Self {
SparseIdxSet(ArrayVec::new())
}
fn len(&self) -> usize {
self.0.len()
}
fn contains(&self, elem: &T) -> bool {
self.0.contains(elem)
}
fn add(&mut self, elem: &T) -> bool {
// Ensure there are no duplicates.
if self.0.contains(elem) {
false
} else {
self.0.push(*elem);
true
}
}
fn remove(&mut self, elem: &T) -> bool {
if let Some(i) = self.0.iter().position(|e| e == elem) {
// Swap the found element to the end, then pop it.
let len = self.0.len();
self.0.swap(i, len - 1);
self.0.pop();
true
} else {
false
}
}
fn to_dense(&self, domain_size: usize) -> IdxSet<T> {
let mut dense = IdxSet::new_empty(domain_size);
for elem in self.0.iter() {
dense.add(elem);
}
dense
}
fn iter(&self) -> SparseIter<T> {
SparseIter {
iter: self.0.iter(),
}
}
}
impl<T: Idx> UnionIntoIdxSet<T> for SparseIdxSet<T> {
fn union_into(&self, other: &mut IdxSet<T>) -> bool {
let mut changed = false;
for elem in self.iter() {
changed |= other.add(&elem);
}
changed
}
}
impl<T: Idx> SubtractFromIdxSet<T> for SparseIdxSet<T> {
fn subtract_from(&self, other: &mut IdxSet<T>) -> bool {
let mut changed = false;
for elem in self.iter() {
changed |= other.remove(&elem);
}
changed
}
}
pub struct SparseIter<'a, T: Idx> {
iter: slice::Iter<'a, T>,
}
impl<'a, T: Idx> Iterator for SparseIter<'a, T> {
type Item = T;
fn next(&mut self) -> Option<T> {
self.iter.next().map(|e| *e)
}
}
/// Like IdxSet, but with a hybrid representation: sparse when there are few
/// elements in the set, but dense when there are many. It's especially
/// efficient for sets that typically have a small number of elements, but a
/// large `domain_size`, and are cleared frequently.
#[derive(Clone, Debug)]
pub enum HybridIdxSet<T: Idx> {
Sparse(SparseIdxSet<T>, usize),
Dense(IdxSet<T>, usize),
}
impl<T: Idx> HybridIdxSet<T> {
pub fn new_empty(domain_size: usize) -> Self {
HybridIdxSet::Sparse(SparseIdxSet::new(), domain_size)
}
pub fn clear(&mut self) {
let domain_size = match *self {
HybridIdxSet::Sparse(_, size) => size,
HybridIdxSet::Dense(_, size) => size,
};
*self = HybridIdxSet::new_empty(domain_size);
}
/// Returns true iff set `self` contains `elem`.
pub fn contains(&self, elem: &T) -> bool {
match self {
HybridIdxSet::Sparse(sparse, _) => sparse.contains(elem),
HybridIdxSet::Dense(dense, _) => dense.contains(elem),
}
}
/// Adds `elem` to the set `self`.
pub fn add(&mut self, elem: &T) -> bool {
match self {
HybridIdxSet::Sparse(sparse, _) if sparse.len() < SPARSE_MAX => {
// The set is sparse and has space for `elem`.
sparse.add(elem)
}
HybridIdxSet::Sparse(sparse, _) if sparse.contains(elem) => {
// The set is sparse and does not have space for `elem`, but
// that doesn't matter because `elem` is already present.
false
}
HybridIdxSet::Sparse(_, _) => {
// The set is sparse and full. Convert to a dense set.
//
// FIXME: This code is awful, but I can't work out how else to
// appease the borrow checker.
let dummy = HybridIdxSet::Sparse(SparseIdxSet::new(), 0);
match mem::replace(self, dummy) {
HybridIdxSet::Sparse(sparse, domain_size) => {
let mut dense = sparse.to_dense(domain_size);
let changed = dense.add(elem);
assert!(changed);
mem::replace(self, HybridIdxSet::Dense(dense, domain_size));
changed
}
_ => panic!("impossible"),
}
}
HybridIdxSet::Dense(dense, _) => dense.add(elem),
}
}
/// Removes `elem` from the set `self`.
pub fn remove(&mut self, elem: &T) -> bool {
// Note: we currently don't bother going from Dense back to Sparse.
match self {
HybridIdxSet::Sparse(sparse, _) => sparse.remove(elem),
HybridIdxSet::Dense(dense, _) => dense.remove(elem),
}
}
/// Converts to a dense set, consuming itself in the process.
pub fn to_dense(self) -> IdxSet<T> {
match self {
HybridIdxSet::Sparse(sparse, domain_size) => sparse.to_dense(domain_size),
HybridIdxSet::Dense(dense, _) => dense,
}
}
/// Iteration order is unspecified.
pub fn iter(&self) -> HybridIter<T> {
match self {
HybridIdxSet::Sparse(sparse, _) => HybridIter::Sparse(sparse.iter()),
HybridIdxSet::Dense(dense, _) => HybridIter::Dense(dense.iter()),
}
}
}
impl<T: Idx> UnionIntoIdxSet<T> for HybridIdxSet<T> {
fn union_into(&self, other: &mut IdxSet<T>) -> bool {
match self {
HybridIdxSet::Sparse(sparse, _) => sparse.union_into(other),
HybridIdxSet::Dense(dense, _) => dense.union_into(other),
}
}
}
impl<T: Idx> SubtractFromIdxSet<T> for HybridIdxSet<T> {
fn subtract_from(&self, other: &mut IdxSet<T>) -> bool {
match self {
HybridIdxSet::Sparse(sparse, _) => sparse.subtract_from(other),
HybridIdxSet::Dense(dense, _) => dense.subtract_from(other),
}
}
}
pub enum HybridIter<'a, T: Idx> {
Sparse(SparseIter<'a, T>),
Dense(Iter<'a, T>),
}
impl<'a, T: Idx> Iterator for HybridIter<'a, T> {
type Item = T;
fn next(&mut self) -> Option<T> {
match self {
HybridIter::Sparse(sparse) => sparse.next(),
HybridIter::Dense(dense) => dense.next(),
}
}
}
#[test]
fn test_trim_to() {
use std::cmp;
for i in 0..256 {
let mut idx_buf: IdxSet<usize> = IdxSet::new_filled(128);
idx_buf.trim_to(i);
let elems: Vec<usize> = idx_buf.iter().collect();
let expected: Vec<usize> = (0..cmp::min(i, 128)).collect();
assert_eq!(elems, expected);
}
}
#[test]
fn test_set_up_to() {
for i in 0..128 {
for mut idx_buf in
vec![IdxSet::new_empty(128), IdxSet::new_filled(128)]
.into_iter()
{
idx_buf.set_up_to(i);
let elems: Vec<usize> = idx_buf.iter().collect();
let expected: Vec<usize> = (0..i).collect(); | assert_eq!(elems, expected);
}
}
} | random_line_split |
|
source_contributions.rs | #[fail(display = "No more retries.")]
NoRetriesLeft,
}
impl From<std::io::Error> for AppError {
fn from(err: std::io::Error) -> AppError {
AppError::IOError(err)
}
}
impl From<std::env::VarError> for AppError {
fn from(err: std::env::VarError) -> AppError {
AppError::GithubTokenNotFound(err)
}
}
impl From<reqwest::Error> for AppError {
fn from(err: reqwest::Error) -> AppError {
AppError::GithubAPIRequestFailed(err)
}
}
impl From<reqwest::UrlError> for AppError {
fn from(err: reqwest::UrlError) -> AppError {
AppError::GithubUrlParsingFailed(err)
}
}
// The order of the fields must be the same of the input file.
#[derive(Debug)]
struct Project<'a> {
id: u32,
platform: &'a str,
project_name: &'a str,
repository_url: &'a str,
repository_fork: bool,
repository_display_name: &'a str,
}
struct Retries {
retries_num: u8,
}
impl Retries {
fn new(retries_num: u8) -> Self {
Retries { retries_num }
}
}
#[derive(Debug, Deserialize)]
struct GithubContribution {
total: u64,
author: GithubUser,
weeks: Vec<GithubWeek>,
}
#[derive(Debug, Deserialize)]
struct GithubWeek {
// Unix timestamp of the beginning of this week.
w: u64,
}
#[derive(Debug, Deserialize)]
struct GithubUser {
login: String,
id: u64,
}
type UniqueProjects = HashSet<String>;
/// Calls the Github API using the given HttpMethod and url_path. Due to the
/// fact some endpoints like the statistics one use cached information and
/// might return a 202 with an empty JSON as the stats are computed, we need
/// to wait a little bit and retry, up to a certain number of times.
fn call_github<T>(
http_client: &Client,
http_method: HttpMethod,
token: &str,
url_path: &str,
retries: Retries,
) -> Result<T, AppError>
where
T: DeserializeOwned,
{
let retries_left = retries.retries_num;
if retries_left == 0 {
Err(AppError::NoRetriesLeft)
} else {
let bearer = format!("Bearer {}", token);
match http_method {
HttpMethod::Get => {
let url: Url = format!("{}{}", GITHUB_BASE_URL, url_path)
.as_str()
.parse()?;
let mut res = http_client
.get(url)
.header(reqwest::header::AUTHORIZATION, bearer.as_str())
.send()?;
match res.status() {
reqwest::StatusCode::OK => res
.json()
.or_else(|e| Err(AppError::DeserialisationFailure(e))),
// Github needs a bit more time to compute the stats.
// We retry.
reqwest::StatusCode::ACCEPTED => {
println!("Retrying, only {} retries left ...", retries_left);
thread::sleep(time::Duration::from_secs(1));
call_github(
http_client,
http_method,
token,
url_path,
Retries::new(retries_left - 1),
)
}
err => {
let body = res.text()?;
Err(AppError::GithubAPINotOK(err, body))
}
}
}
}
}
}
fn deserialise_project(sr: &StringRecord) -> Option<Project> {
if let Some(Ok(pid)) = sr.get(0).map(|s: &str| s.parse::<u32>()) {
let platform = sr.get(1);
let project_name = sr.get(2);
let repository_url = sr.get(9);
let repository_fork = sr.get(24).and_then(|s: &str| match s {
"0" => Some(false),
"1" => Some(true),
"t" => Some(true),
"f" => Some(false),
"true" => Some(true),
"false" => Some(false),
_ => None,
});
let repository_display_name = sr.get(54);
match (
platform,
project_name,
repository_url,
repository_fork,
repository_display_name,
) {
(Some(pl), Some(pn), Some(ru), Some(rf), Some(dn)) => Some(Project {
id: pid,
platform: pl,
project_name: pn,
repository_url: ru,
repository_fork: rf,
repository_display_name: dn,
}),
_ => None,
}
} else {
None
}
}
fn source_contributors(
github_token: &str,
path: &str,
platform: &str,
resume_from: Option<&str>,
) -> Result<(), AppError> {
let projects_file = File::open(path)?;
// Build the CSV reader and iterate over each record.
let mut rdr = csv::ReaderBuilder::new()
.flexible(true)
.from_reader(projects_file);
let mut contributions = OpenOptions::new()
.append(resume_from.is_some())
.write(resume_from.is_none())
.create_new(resume_from.is_none()) // Allow re-opening if we need to resume.
.open(format!("data/{}_contributions.csv", platform.to_lowercase()).as_str())?;
let mut unique_projects = HashSet::new();
let http_client = reqwest::Client::new();
let mut skip_resumed_record = resume_from.is_some();
//Write the header (if we are not resuming)
if resume_from.is_none() {
contributions.write_all(b"ID,MAINTAINER,REPO,CONTRIBUTIONS,NAME\n")?;
}
for result in rdr
.records()
.filter_map(|e| e.ok())
.filter(by_platform(platform))
.skip_while(resumes(resume_from))
{
// As we cannot know which is the /next/ element we need to process
// and we are resuming from the last (known) one, we need to skip it
// in order to not create a dupe.
if skip_resumed_record {
skip_resumed_record = false;
continue;
}
if let Some(project) = deserialise_project(&result) {
extract_contribution(
&http_client,
&mut contributions,
&mut unique_projects,
project,
github_token,
)?;
}
}
Ok(())
}
fn | (repo_url: &str) -> Result<(&str, &str), AppError> {
match repo_url.split('/').collect::<Vec<&str>>().as_slice() {
[_, "", "github.com", owner, repo] => Ok((owner, repo)),
_ => Err(AppError::MetadataExtractionFailed {
repo_url: repo_url.to_string(),
}),
}
}
// Extract the contribution relative to this project. For now only GitHub is
// supported.
fn extract_contribution(
http_client: &Client,
contributions: &mut File,
unique_projects: &mut UniqueProjects,
project: Project,
auth_token: &str,
) -> Result<(), AppError> {
// If this is an authentic project and not a fork, proceed.
if !project.repository_fork && unique_projects.get(project.project_name) == None {
match extract_github_owner_and_repo(project.repository_url) {
Err(err) => {
println!("Skipping {} due to {:#?}", &project.repository_url, err);
Ok(())
}
Ok((owner, name)) => {
unique_projects.insert(String::from(project.project_name));
println!("Processing {} ({}/{})", project.project_name, owner, name);
let res: Result<Vec<GithubContribution>, AppError> = call_github(
&http_client,
HttpMethod::Get,
auth_token,
format!("/repos/{}/{}/stats/contributors", owner, name).as_str(),
Retries::new(5),
);
match res {
Err(err) => {
println!("Skipping {} due to {:#?}", &project.repository_url, err);
}
Ok(stats) => {
let stats_len = stats.len();
for contribution in stats {
if is_maintainer(&owner, &contribution, stats_len) {
contributions.write_all(
format!(
"{},github@{},{},{},{}\n",
project.id,
contribution.author.login,
project.repository_url,
contribution.total,
project.project_name
)
.as_bytes(),
)?;
}
}
}
}
// Wait 800 ms to not overload Github and not hit the quota limit.
// GH allows us 5000 requests per hour. If we wait 800ms, we
// aim for the theoretical limit, while preserving a certain
// slack.
let delay = time::Duration::from_millis(800);
thread::sleep(delay);
Ok(())
}
}
} else {
Ok(())
}
}
// FIXME(adn) Totally arbitrary choice: consider a maintainer
// for a project a user that has been contributed for more
// than 6 months. Furthermore, it needs to have a somewhat | extract_github_owner_and_repo | identifier_name |
source_contributions.rs | #[fail(display = "No more retries.")]
NoRetriesLeft,
}
impl From<std::io::Error> for AppError {
fn from(err: std::io::Error) -> AppError {
AppError::IOError(err)
}
}
impl From<std::env::VarError> for AppError {
fn from(err: std::env::VarError) -> AppError {
AppError::GithubTokenNotFound(err)
}
}
impl From<reqwest::Error> for AppError {
fn from(err: reqwest::Error) -> AppError {
AppError::GithubAPIRequestFailed(err)
}
}
impl From<reqwest::UrlError> for AppError {
fn from(err: reqwest::UrlError) -> AppError {
AppError::GithubUrlParsingFailed(err)
}
}
// The order of the fields must be the same of the input file.
#[derive(Debug)]
struct Project<'a> {
id: u32,
platform: &'a str,
project_name: &'a str,
repository_url: &'a str,
repository_fork: bool,
repository_display_name: &'a str,
}
struct Retries {
retries_num: u8,
}
impl Retries {
fn new(retries_num: u8) -> Self {
Retries { retries_num }
}
}
#[derive(Debug, Deserialize)]
struct GithubContribution {
total: u64,
author: GithubUser,
weeks: Vec<GithubWeek>,
}
#[derive(Debug, Deserialize)]
struct GithubWeek {
// Unix timestamp of the beginning of this week.
w: u64,
}
#[derive(Debug, Deserialize)]
struct GithubUser {
login: String,
id: u64,
}
type UniqueProjects = HashSet<String>;
/// Calls the Github API using the given HttpMethod and url_path. Due to the
/// fact some endpoints like the statistics one use cached information and
/// might return a 202 with an empty JSON as the stats are computed, we need
/// to wait a little bit and retry, up to a certain number of times.
fn call_github<T>(
http_client: &Client,
http_method: HttpMethod,
token: &str,
url_path: &str,
retries: Retries,
) -> Result<T, AppError>
where
T: DeserializeOwned,
| // We retry.
reqwest::StatusCode::ACCEPTED => {
println!("Retrying, only {} retries left ...", retries_left);
thread::sleep(time::Duration::from_secs(1));
call_github(
http_client,
http_method,
token,
url_path,
Retries::new(retries_left - 1),
)
}
err => {
let body = res.text()?;
Err(AppError::GithubAPINotOK(err, body))
}
}
}
}
}
}
fn deserialise_project(sr: &StringRecord) -> Option<Project> {
if let Some(Ok(pid)) = sr.get(0).map(|s: &str| s.parse::<u32>()) {
let platform = sr.get(1);
let project_name = sr.get(2);
let repository_url = sr.get(9);
let repository_fork = sr.get(24).and_then(|s: &str| match s {
"0" => Some(false),
"1" => Some(true),
"t" => Some(true),
"f" => Some(false),
"true" => Some(true),
"false" => Some(false),
_ => None,
});
let repository_display_name = sr.get(54);
match (
platform,
project_name,
repository_url,
repository_fork,
repository_display_name,
) {
(Some(pl), Some(pn), Some(ru), Some(rf), Some(dn)) => Some(Project {
id: pid,
platform: pl,
project_name: pn,
repository_url: ru,
repository_fork: rf,
repository_display_name: dn,
}),
_ => None,
}
} else {
None
}
}
fn source_contributors(
github_token: &str,
path: &str,
platform: &str,
resume_from: Option<&str>,
) -> Result<(), AppError> {
let projects_file = File::open(path)?;
// Build the CSV reader and iterate over each record.
let mut rdr = csv::ReaderBuilder::new()
.flexible(true)
.from_reader(projects_file);
let mut contributions = OpenOptions::new()
.append(resume_from.is_some())
.write(resume_from.is_none())
.create_new(resume_from.is_none()) // Allow re-opening if we need to resume.
.open(format!("data/{}_contributions.csv", platform.to_lowercase()).as_str())?;
let mut unique_projects = HashSet::new();
let http_client = reqwest::Client::new();
let mut skip_resumed_record = resume_from.is_some();
//Write the header (if we are not resuming)
if resume_from.is_none() {
contributions.write_all(b"ID,MAINTAINER,REPO,CONTRIBUTIONS,NAME\n")?;
}
for result in rdr
.records()
.filter_map(|e| e.ok())
.filter(by_platform(platform))
.skip_while(resumes(resume_from))
{
// As we cannot know which is the /next/ element we need to process
// and we are resuming from the last (known) one, we need to skip it
// in order to not create a dupe.
if skip_resumed_record {
skip_resumed_record = false;
continue;
}
if let Some(project) = deserialise_project(&result) {
extract_contribution(
&http_client,
&mut contributions,
&mut unique_projects,
project,
github_token,
)?;
}
}
Ok(())
}
fn extract_github_owner_and_repo(repo_url: &str) -> Result<(&str, &str), AppError> {
match repo_url.split('/').collect::<Vec<&str>>().as_slice() {
[_, "", "github.com", owner, repo] => Ok((owner, repo)),
_ => Err(AppError::MetadataExtractionFailed {
repo_url: repo_url.to_string(),
}),
}
}
// Extract the contribution relative to this project. For now only GitHub is
// supported.
fn extract_contribution(
http_client: &Client,
contributions: &mut File,
unique_projects: &mut UniqueProjects,
project: Project,
auth_token: &str,
) -> Result<(), AppError> {
// If this is an authentic project and not a fork, proceed.
if !project.repository_fork && unique_projects.get(project.project_name) == None {
match extract_github_owner_and_repo(project.repository_url) {
Err(err) => {
println!("Skipping {} due to {:#?}", &project.repository_url, err);
Ok(())
}
Ok((owner, name)) => {
unique_projects.insert(String::from(project.project_name));
println!("Processing {} ({}/{})", project.project_name, owner, name);
let res: Result<Vec<GithubContribution>, AppError> = call_github(
&http_client,
HttpMethod::Get,
auth_token,
format!("/repos/{}/{}/stats/contributors", owner, name).as_str(),
Retries::new(5),
);
match res {
Err(err) => {
println!("Skipping {} due to {:#?}", &project.repository_url, err);
}
Ok(stats) => {
let stats_len = stats.len();
for contribution in stats {
if is_maintainer(&owner, &contribution, stats_len) {
contributions.write_all(
format!(
"{},github@{},{},{},{}\n",
project.id,
contribution.author.login,
project.repository_url,
contribution.total,
project.project_name
)
.as_bytes(),
)?;
}
}
}
}
// Wait 800 ms to not overload Github and not hit the quota limit.
// GH allows us 5000 requests per hour. If we wait 800ms, we
// aim for the theoretical limit, while preserving a certain
// slack.
let delay = time::Duration::from_millis(800);
thread::sleep(delay);
Ok(())
}
}
} else {
Ok(())
}
}
// FIXME(adn) Totally arbitrary choice: consider a maintainer
// for a project a user that has been contributed for more
// than 6 months. Furthermore, it needs to have a somewhat steady | {
let retries_left = retries.retries_num;
if retries_left == 0 {
Err(AppError::NoRetriesLeft)
} else {
let bearer = format!("Bearer {}", token);
match http_method {
HttpMethod::Get => {
let url: Url = format!("{}{}", GITHUB_BASE_URL, url_path)
.as_str()
.parse()?;
let mut res = http_client
.get(url)
.header(reqwest::header::AUTHORIZATION, bearer.as_str())
.send()?;
match res.status() {
reqwest::StatusCode::OK => res
.json()
.or_else(|e| Err(AppError::DeserialisationFailure(e))),
// Github needs a bit more time to compute the stats. | identifier_body |
source_contributions.rs | #[fail(display = "No more retries.")]
NoRetriesLeft,
}
impl From<std::io::Error> for AppError {
fn from(err: std::io::Error) -> AppError {
AppError::IOError(err)
}
}
impl From<std::env::VarError> for AppError {
fn from(err: std::env::VarError) -> AppError {
AppError::GithubTokenNotFound(err)
}
}
impl From<reqwest::Error> for AppError {
fn from(err: reqwest::Error) -> AppError {
AppError::GithubAPIRequestFailed(err)
}
}
impl From<reqwest::UrlError> for AppError {
fn from(err: reqwest::UrlError) -> AppError {
AppError::GithubUrlParsingFailed(err)
}
}
// The order of the fields must be the same of the input file.
#[derive(Debug)]
struct Project<'a> {
id: u32,
platform: &'a str,
project_name: &'a str,
repository_url: &'a str,
repository_fork: bool,
repository_display_name: &'a str,
}
struct Retries {
retries_num: u8,
}
impl Retries {
fn new(retries_num: u8) -> Self {
Retries { retries_num }
}
}
#[derive(Debug, Deserialize)]
struct GithubContribution {
total: u64,
author: GithubUser,
weeks: Vec<GithubWeek>,
}
#[derive(Debug, Deserialize)]
struct GithubWeek {
// Unix timestamp of the beginning of this week.
w: u64,
}
#[derive(Debug, Deserialize)]
struct GithubUser {
login: String,
id: u64,
}
type UniqueProjects = HashSet<String>;
/// Calls the Github API using the given HttpMethod and url_path. Due to the
/// fact some endpoints like the statistics one use cached information and
/// might return a 202 with an empty JSON as the stats are computed, we need
/// to wait a little bit and retry, up to a certain number of times.
fn call_github<T>(
http_client: &Client,
http_method: HttpMethod,
token: &str,
url_path: &str,
retries: Retries,
) -> Result<T, AppError>
where
T: DeserializeOwned,
{
let retries_left = retries.retries_num;
if retries_left == 0 {
Err(AppError::NoRetriesLeft)
} else {
let bearer = format!("Bearer {}", token);
match http_method {
HttpMethod::Get => {
let url: Url = format!("{}{}", GITHUB_BASE_URL, url_path)
.as_str()
.parse()?;
let mut res = http_client
.get(url)
.header(reqwest::header::AUTHORIZATION, bearer.as_str())
.send()?;
match res.status() {
reqwest::StatusCode::OK => res
.json()
.or_else(|e| Err(AppError::DeserialisationFailure(e))),
// Github needs a bit more time to compute the stats.
// We retry.
reqwest::StatusCode::ACCEPTED => {
println!("Retrying, only {} retries left ...", retries_left);
thread::sleep(time::Duration::from_secs(1));
call_github(
http_client,
http_method,
token,
url_path,
Retries::new(retries_left - 1),
)
}
err => {
let body = res.text()?;
Err(AppError::GithubAPINotOK(err, body))
}
}
}
}
}
}
fn deserialise_project(sr: &StringRecord) -> Option<Project> {
if let Some(Ok(pid)) = sr.get(0).map(|s: &str| s.parse::<u32>()) {
let platform = sr.get(1);
let project_name = sr.get(2);
let repository_url = sr.get(9);
let repository_fork = sr.get(24).and_then(|s: &str| match s {
"0" => Some(false),
"1" => Some(true),
"t" => Some(true),
"f" => Some(false),
"true" => Some(true),
"false" => Some(false),
_ => None,
});
let repository_display_name = sr.get(54);
match (
platform,
project_name,
repository_url,
repository_fork,
repository_display_name,
) {
(Some(pl), Some(pn), Some(ru), Some(rf), Some(dn)) => Some(Project {
id: pid,
platform: pl,
project_name: pn,
repository_url: ru,
repository_fork: rf,
repository_display_name: dn,
}),
_ => None,
}
} else {
None
}
}
fn source_contributors(
github_token: &str,
path: &str,
platform: &str,
resume_from: Option<&str>,
) -> Result<(), AppError> {
let projects_file = File::open(path)?;
// Build the CSV reader and iterate over each record.
let mut rdr = csv::ReaderBuilder::new()
.flexible(true)
.from_reader(projects_file);
let mut contributions = OpenOptions::new()
.append(resume_from.is_some())
.write(resume_from.is_none())
.create_new(resume_from.is_none()) // Allow re-opening if we need to resume.
.open(format!("data/{}_contributions.csv", platform.to_lowercase()).as_str())?;
let mut unique_projects = HashSet::new();
let http_client = reqwest::Client::new();
let mut skip_resumed_record = resume_from.is_some();
//Write the header (if we are not resuming)
if resume_from.is_none() {
contributions.write_all(b"ID,MAINTAINER,REPO,CONTRIBUTIONS,NAME\n")?;
}
for result in rdr
.records()
.filter_map(|e| e.ok())
.filter(by_platform(platform))
.skip_while(resumes(resume_from))
{
// As we cannot know which is the /next/ element we need to process
// and we are resuming from the last (known) one, we need to skip it
// in order to not create a dupe.
if skip_resumed_record {
skip_resumed_record = false;
continue;
}
if let Some(project) = deserialise_project(&result) {
extract_contribution(
&http_client,
&mut contributions,
&mut unique_projects,
project,
github_token,
)?;
}
}
Ok(())
}
| match repo_url.split('/').collect::<Vec<&str>>().as_slice() {
[_, "", "github.com", owner, repo] => Ok((owner, repo)),
_ => Err(AppError::MetadataExtractionFailed {
repo_url: repo_url.to_string(),
}),
}
}
// Extract the contribution relative to this project. For now only GitHub is
// supported.
fn extract_contribution(
http_client: &Client,
contributions: &mut File,
unique_projects: &mut UniqueProjects,
project: Project,
auth_token: &str,
) -> Result<(), AppError> {
// If this is an authentic project and not a fork, proceed.
if !project.repository_fork && unique_projects.get(project.project_name) == None {
match extract_github_owner_and_repo(project.repository_url) {
Err(err) => {
println!("Skipping {} due to {:#?}", &project.repository_url, err);
Ok(())
}
Ok((owner, name)) => {
unique_projects.insert(String::from(project.project_name));
println!("Processing {} ({}/{})", project.project_name, owner, name);
let res: Result<Vec<GithubContribution>, AppError> = call_github(
&http_client,
HttpMethod::Get,
auth_token,
format!("/repos/{}/{}/stats/contributors", owner, name).as_str(),
Retries::new(5),
);
match res {
Err(err) => {
println!("Skipping {} due to {:#?}", &project.repository_url, err);
}
Ok(stats) => {
let stats_len = stats.len();
for contribution in stats {
if is_maintainer(&owner, &contribution, stats_len) {
contributions.write_all(
format!(
"{},github@{},{},{},{}\n",
project.id,
contribution.author.login,
project.repository_url,
contribution.total,
project.project_name
)
.as_bytes(),
)?;
}
}
}
}
// Wait 800 ms to not overload Github and not hit the quota limit.
// GH allows us 5000 requests per hour. If we wait 800ms, we
// aim for the theoretical limit, while preserving a certain
// slack.
let delay = time::Duration::from_millis(800);
thread::sleep(delay);
Ok(())
}
}
} else {
Ok(())
}
}
// FIXME(adn) Totally arbitrary choice: consider a maintainer
// for a project a user that has been contributed for more
// than 6 months. Furthermore, it needs to have a somewhat steady | fn extract_github_owner_and_repo(repo_url: &str) -> Result<(&str, &str), AppError> { | random_line_split |
shanten_improve.go | // } else if j < 7 {
// needCheck34[idx-2] = true
// needCheck34[idx-1] = true
// needCheck34[idx] = true
// needCheck34[idx+1] = true
// needCheck34[idx+2] = true
// } else if j == 7 {
// needCheck34[idx-2] = true
// needCheck34[idx-1] = true
// needCheck34[idx] = true
// needCheck34[idx+1] = true
// } else {
// needCheck34[i | heck34[idx] = true
// }
// }
//}
//for i := 27; i < 34; i++ {
// if tiles34[i] > 0 {
// needCheck34[i] = true
// }
//}
waits = Waits{}
for i := 0; i < 34; i++ {
//if !needCheck34[i] {
// continue
//}
if tiles34[i] == 4 {
// 无法摸到这张牌
continue
}
// 摸牌
tiles34[i]++
if newShanten := CalculateShanten(tiles34); newShanten < shanten {
// 向听前进了,则换的这张牌为进张,进张数即剩余枚数
// 有可能为 0,但这对于判断振听是有帮助的,所以记录
waits[i] = leftTiles34[i]
}
tiles34[i]--
}
return
}
// 1/4/7/10/13 张牌,计算向听数、进张、改良等(考虑了剩余枚数)
func CalculateShantenWithImproves13(playerInfo *model.PlayerInfo) (r *WaitsWithImproves13) {
if len(playerInfo.LeftTiles34) == 0 {
playerInfo.FillLeftTiles34()
}
tiles34 := playerInfo.HandTiles34
leftTiles34 := playerInfo.LeftTiles34
shanten13, waits := CalculateShantenAndWaits13(tiles34, leftTiles34)
waitsCount := waits.AllCount()
nextShantenWaitsCountMap := map[int]int{} // map[进张牌]听多少张牌
improves := Improves{}
improveWayCount := 0
// 对于每张牌,摸到之后的手牌进张数(如果摸到的是 waits 中的牌,则进张数视作 waitsCount)
maxImproveWaitsCount34 := make([]int, 34)
for i := 0; i < 34; i++ {
maxImproveWaitsCount34[i] = waitsCount // 初始化成基本进张
}
avgAgariRate := 0.0
avgRonPoint := 0.0
ronPointWeight := 0
avgRiichiRonPoint := 0.0
canYaku := make([]bool, maxYakuType)
if len(playerInfo.Melds) == 0 && CountPairsOfTiles34(tiles34)+shanten13 == 6 {
// 对于三向听,除非进张很差才会考虑七对子
if shanten13 == 3 {
if waitsCount <= 21 {
canYaku[YakuChiitoi] = true
}
} else if shanten13 == 1 || shanten13 == 2 {
// 一向听和两向听考虑七对子
canYaku[YakuChiitoi] = true
}
}
fillYakuTypes := func(_shanten13 int, _waits Waits) {
if _shanten13 != 0 {
return
}
// 听牌
for tile, left := range _waits {
if left == 0 {
continue
}
tiles34[tile]++
playerInfo.WinTile = tile
_yakuTypes := FindAllYakuTypes(playerInfo)
for _, t := range _yakuTypes {
canYaku[t] = true
}
tiles34[tile]--
}
}
// 计算可能的役种
fillYakuTypes(shanten13, waits)
for i := 0; i < 34; i++ {
// 从剩余牌中摸牌
if leftTiles34[i] == 0 {
continue
}
leftTiles34[i]--
tiles34[i]++
if _, ok := waits[i]; ok { // 摸到的是进张
maxAgariRate := 0.0 // 摸到此进张后的和率
maxAvgRonPoint := 0.0 // 平均打点
maxAvgRiichiRonPoint := 0.0
for j := 0; j < 34; j++ {
if tiles34[j] == 0 || j == i {
continue
}
// 切牌,然后分析 3k+1 张牌下的手牌情况
// 若这张是5,在只有赤5的情况下才会切赤5(TODO: 考虑赤5骗37)
_isRedFive := playerInfo.IsOnlyRedFive(j)
playerInfo.DiscardTile(j, _isRedFive)
// 向听前进才是正确的切牌
if newShanten13, newWaits := CalculateShantenAndWaits13(tiles34, leftTiles34); newShanten13 < shanten13 {
// 切牌一般切进张最多的
if waitsCount := newWaits.AllCount(); waitsCount > nextShantenWaitsCountMap[i] {
nextShantenWaitsCountMap[i] = waitsCount
}
// 听牌了
if newShanten13 == 0 {
// 听牌一般切和率最高的,TODO: 除非打点更高,比如说听到 dora 上,或者有三色等
_agariRate := CalculateAvgAgariRate(newWaits, playerInfo.DiscardTiles)
if _agariRate >= maxAgariRate {
maxAgariRate = _agariRate
// 计算荣和点数
// TODO: 这里简化了,和率优先,需要更加精细的考量
// TODO: maxAvgRonPoint = CalcAvgRonPoint(playerInfo, newWaits)
// TODO: maxAvgRiichiRonPoint = CalcAvgRiichiRonPoint(playerInfo, newWaits)
}
// 计算可能的役种
fillYakuTypes(newShanten13, newWaits)
}
}
playerInfo.UndoDiscardTile(j, _isRedFive)
}
// 加权:进张牌的剩余枚数*和率
w := leftTiles34[i] + 1
avgAgariRate += maxAgariRate * float64(w)
if maxAvgRonPoint > 0 {
avgRonPoint += maxAvgRonPoint * float64(w)
ronPointWeight += w
}
//fmt.Println(i, maxAvgRiichiRonPoint)
avgRiichiRonPoint += maxAvgRiichiRonPoint * float64(w)
} else { // 摸到的不是进张,但可能有改良
for j := 0; j < 34; j++ {
if tiles34[j] == 0 || j == i {
continue
}
// 切牌,然后分析 3k+1 张牌下的手牌情况
// 若这张是5,在只有赤5的情况下才会切赤5(TODO: 考虑赤5骗37)
_isRedFive := playerInfo.IsOnlyRedFive(j)
playerInfo.DiscardTile(j, _isRedFive)
// 正确的切牌
if newShanten13, improveWaits := CalculateShantenAndWaits13(tiles34, leftTiles34); newShanten13 == shanten13 {
// 若进张数变多,则为改良
if improveWaitsCount := improveWaits.AllCount(); improveWaitsCount > waitsCount {
improveWayCount++
if improveWaitsCount > maxImproveWaitsCount34[i] {
maxImproveWaitsCount34[i] = improveWaitsCount
// improves 选的是进张数最大的改良
improves[i | dx-2] = true
// needCheck34[idx-1] = true
// needC | conditional_block |
shanten_improve.go |
// map[进张牌]向听前进后的进张数(这里让向听前进的切牌选择的是使「向听前进后的进张数最大」的切牌)
NextShantenWaitsCountMap map[int]int
// 向听前进后的进张数的加权均值
AvgNextShantenWaitsCount float64
// 综合了进张与向听前进后进张的评分
MixedWaitsScore float64
// 改良:摸到这张牌虽不能让向听数前进,但可以让进张变多
// len(Improves) 即为改良的牌的种数
Improves Improves
// 改良情况数,这里计算的是有多少种使进张增加的切牌方式
ImproveWayCount int
// 在没有摸到进张时的改良后进张数的加权均值(计算时,对于既不是进张也不是改良的牌,其进张数为 Waits.AllCount())
// 这里只考虑一巡的改良均值
// TODO: 在考虑改良的情况下,如何计算向听前进所需要的摸牌次数的期望值?
AvgImproveWaitsCount float64
// 向听前进后,若听牌,其最大和率的加权均值
// 若已听牌,则该值为当前手牌和率
AvgAgariRate float64
// 振听可能率(一向听和听牌时)
FuritenRate float64
// 役种
YakuTypes []int
// 宝牌个数(手牌+副露)
DoraCount int
// 无立直时的荣和打点期望
RonPoint float64
// 立直时的荣和打点期望
RiichiRonPoint float64
// 自摸打点期望
TsumoPoint float64
// TODO: 赤牌改良提醒
}
// 进张和向听前进后进张的评分
// 这里粗略地近似为向听前进两次的概率
func (r *WaitsWithImproves13) mixedWaitsScore() float64 {
if r.Waits.AllCount() == 0 || r.AvgNextShantenWaitsCount == 0 {
return 0
}
leftCount := float64(CountOfTiles34(r.LeftTiles34))
p2 := float64(r.Waits.AllCount()) / leftCount
//p2 := r.AvgImproveWaitsCount / leftCount
p1 := r.AvgNextShantenWaitsCount / leftCount
//if r.AvgAgariRate > 0 {
// p1 = r.AvgAgariRate / 100
//}
p2_, p1_ := 1-p2, 1-p1
const leftTurns = 10.0 // math.Max(5.0, leftCount/4)
sumP2 := p2_ * (1 - math.Pow(p2_, leftTurns)) / p2
sumP1 := p1_ * (1 - math.Pow(p1_, leftTurns)) / p1
result := p2 * p1 * (sumP2 - sumP1) / (p2_ - p1_)
return result * 100
}
// 调试用
func (r *WaitsWithImproves13) String() string {
s := fmt.Sprintf("%d 进张 %s\n%.2f 改良进张 [%d(%d) 种]",
r.Waits.AllCount(),
//r.Waits.AllCount()+r.MeldWaits.AllCount(),
TilesToStrWithBracket(r.Waits.indexes()),
r.AvgImproveWaitsCount,
len(r.Improves),
r.ImproveWayCount,
)
if r.Shanten >= 1 {
mixedScore := r.MixedWaitsScore
//for i := 2; i <= r.Shanten; i++ {
// mixedScore /= 4
//}
s += fmt.Sprintf(" %.2f %s进张(%.2f 综合分)",
r.AvgNextShantenWaitsCount,
NumberToChineseShanten(r.Shanten-1),
mixedScore,
)
}
if r.Shanten >= 0 && r.Shanten <= 1 {
s += fmt.Sprintf("(%.2f%% 参考和率)", r.AvgAgariRate)
if r.FuritenRate > 0 {
if r.FuritenRate < 1 {
s += "[可能振听]"
} else {
s += "[振听]"
}
}
s += YakuTypesWithDoraToStr(r.YakuTypes, r.DoraCount)
}
if r.RonPoint > 0 {
s += fmt.Sprintf("[(默听)荣和%d]", int(math.Round(r.RonPoint)))
}
if r.RiichiRonPoint > 0 {
s += fmt.Sprintf("[立直荣和%d]", int(math.Round(r.RiichiRonPoint)))
}
if r.TsumoPoint > 0 {
s += fmt.Sprintf("[自摸%d]", int(math.Round(r.TsumoPoint)))
}
return s
}
// 1/4/7/10/13 张牌,计算向听数、进张(考虑了剩余枚数)
func CalculateShantenAndWaits13(tiles34 []int, leftTiles34 []int) (shanten int, waits Waits) {
if len(leftTiles34) == 0 {
leftTiles34 = InitLeftTiles34WithTiles34(tiles34)
}
shanten = CalculateShanten(tiles34)
// 剪枝:检测非浮牌,在不考虑国士无双的情况下,这种牌是不可能让向听数前进的(但有改良的可能,不过 CalculateShantenAndWaits13 函数不考虑这个)
// 此处优化提升了约 30% 的性能
//needCheck34 := make([]bool, 34)
//idx := -1
//for i := 0; i < 3; i++ {
// for j := 0; j < 9; j++ {
// idx++
// if tiles34[idx] == 0 {
// continue
// }
// if j == 0 {
// needCheck34[idx] = true
// needCheck34[idx+1] = true
// needCheck34[idx+2] = true
// } else if j == 1 {
// needCheck34[idx-1] = true
// needCheck34[idx] = true
// needCheck34[idx+1] = true
// needCheck34[idx+2] = true
// } else if j < 7 {
// needCheck34[idx-2] = true
// needCheck34[idx-1] = true
// needCheck34[idx] = true
// needCheck34[idx+1] = true
// needCheck34[idx+2] = true
// } else if j == 7 {
// needCheck34[idx-2] = true
// needCheck34[idx-1] = true
// needCheck34[idx] = true
// needCheck34[idx+1] = true
// } else {
// needCheck34[idx-2] = true
// needCheck34[idx-1] = true
// needCheck34[idx] = true
// }
// }
//}
//for i := 27; i < 34; i++ {
// if tiles34[i] > 0 {
// needCheck34[i] = true
// }
//}
waits = Waits{}
for i := 0; i < 34; i++ {
//if !needCheck34[i] {
// continue
//}
if tiles34[i] == 4 {
// 无法摸到这张牌
continue
}
// 摸牌
tiles34[i]++
if newShanten := CalculateShanten(tiles34); newShanten < shanten {
// 向听前进了,则换的这张牌为进张,进张数即剩余枚数
// 有可能为 0,但这对于判断振听是有帮助的, | // 若某个进张牌 4 枚都可见,则该进张的 value 值为 0
Waits Waits
// TODO: 鸣牌进张:他家打出这张牌,可以鸣牌,且能让向听数前进
//MeldWaits Waits | random_line_split |
|
shanten_improve.go | // } else if j < 7 {
// needCheck34[idx-2] = true
// needCheck34[idx-1] = true
// needCheck34[idx] = true
// needCheck34[idx+1] = true
// needCheck34[idx+2] = true
// } else if j == 7 {
// needCheck34[idx-2] = true
// needC | needCheck34[idx] = true
// needCheck34[idx+1] = true
// } else {
// needCheck34[idx-2] = true
// needCheck34[idx-1] = true
// needCheck34[idx] = true
// }
// }
//}
//for i := 27; i < 34; i++ {
// if tiles34[i] > 0 {
// needCheck34[i] = true
// }
//}
waits = Waits{}
for i := 0; i < 34; i++ {
//if !needCheck34[i] {
// continue
//}
if tiles34[i] == 4 {
// 无法摸到这张牌
continue
}
// 摸牌
tiles34[i]++
if newShanten := CalculateShanten(tiles34); newShanten < shanten {
// 向听前进了,则换的这张牌为进张,进张数即剩余枚数
// 有可能为 0,但这对于判断振听是有帮助的,所以记录
waits[i] = leftTiles34[i]
}
tiles34[i]--
}
return
}
// 1/4/7/10/13 张牌,计算向听数、进张、改良等(考虑了剩余枚数)
func CalculateShantenWithImproves13(playerInfo *model.PlayerInfo) (r *WaitsWithImproves13) {
if len(playerInfo.LeftTiles34) == 0 {
playerInfo.FillLeftTiles34()
}
tiles34 := playerInfo.HandTiles34
leftTiles34 := playerInfo.LeftTiles34
shanten13, waits := CalculateShantenAndWaits13(tiles34, leftTiles34)
waitsCount := waits.AllCount()
nextShantenWaitsCountMap := map[int]int{} // map[进张牌]听多少张牌
improves := Improves{}
improveWayCount := 0
// 对于每张牌,摸到之后的手牌进张数(如果摸到的是 waits 中的牌,则进张数视作 waitsCount)
maxImproveWaitsCount34 := make([]int, 34)
for i := 0; i < 34; i++ {
maxImproveWaitsCount34[i] = waitsCount // 初始化成基本进张
}
avgAgariRate := 0.0
avgRonPoint := 0.0
ronPointWeight := 0
avgRiichiRonPoint := 0.0
canYaku := make([]bool, maxYakuType)
if len(playerInfo.Melds) == 0 && CountPairsOfTiles34(tiles34)+shanten13 == 6 {
// 对于三向听,除非进张很差才会考虑七对子
if shanten13 == 3 {
if waitsCount <= 21 {
canYaku[YakuChiitoi] = true
}
} else if shanten13 == 1 || shanten13 == 2 {
// 一向听和两向听考虑七对子
canYaku[YakuChiitoi] = true
}
}
fillYakuTypes := func(_shanten13 int, _waits Waits) {
if _shanten13 != 0 {
return
}
// 听牌
for tile, left := range _waits {
if left == 0 {
continue
}
tiles34[tile]++
playerInfo.WinTile = tile
_yakuTypes := FindAllYakuTypes(playerInfo)
for _, t := range _yakuTypes {
canYaku[t] = true
}
tiles34[tile]--
}
}
// 计算可能的役种
fillYakuTypes(shanten13, waits)
for i := 0; i < 34; i++ {
// 从剩余牌中摸牌
if leftTiles34[i] == 0 {
continue
}
leftTiles34[i]--
tiles34[i]++
if _, ok := waits[i]; ok { // 摸到的是进张
maxAgariRate := 0.0 // 摸到此进张后的和率
maxAvgRonPoint := 0.0 // 平均打点
maxAvgRiichiRonPoint := 0.0
for j := 0; j < 34; j++ {
if tiles34[j] == 0 || j == i {
continue
}
// 切牌,然后分析 3k+1 张牌下的手牌情况
// 若这张是5,在只有赤5的情况下才会切赤5(TODO: 考虑赤5骗37)
_isRedFive := playerInfo.IsOnlyRedFive(j)
playerInfo.DiscardTile(j, _isRedFive)
// 向听前进才是正确的切牌
if newShanten13, newWaits := CalculateShantenAndWaits13(tiles34, leftTiles34); newShanten13 < shanten13 {
// 切牌一般切进张最多的
if waitsCount := newWaits.AllCount(); waitsCount > nextShantenWaitsCountMap[i] {
nextShantenWaitsCountMap[i] = waitsCount
}
// 听牌了
if newShanten13 == 0 {
// 听牌一般切和率最高的,TODO: 除非打点更高,比如说听到 dora 上,或者有三色等
_agariRate := CalculateAvgAgariRate(newWaits, playerInfo.DiscardTiles)
if _agariRate >= maxAgariRate {
maxAgariRate = _agariRate
// 计算荣和点数
// TODO: 这里简化了,和率优先,需要更加精细的考量
// TODO: maxAvgRonPoint = CalcAvgRonPoint(playerInfo, newWaits)
// TODO: maxAvgRiichiRonPoint = CalcAvgRiichiRonPoint(playerInfo, newWaits)
}
// 计算可能的役种
fillYakuTypes(newShanten13, newWaits)
}
}
playerInfo.UndoDiscardTile(j, _isRedFive)
}
// 加权:进张牌的剩余枚数*和率
w := leftTiles34[i] + 1
avgAgariRate += maxAgariRate * float64(w)
if maxAvgRonPoint > 0 {
avgRonPoint += maxAvgRonPoint * float64(w)
ronPointWeight += w
}
//fmt.Println(i, maxAvgRiichiRonPoint)
avgRiichiRonPoint += maxAvgRiichiRonPoint * float64(w)
} else { // 摸到的不是进张,但可能有改良
for j := 0; j < 34; j++ {
if tiles34[j] == 0 || j == i {
continue
}
// 切牌,然后分析 3k+1 张牌下的手牌情况
// 若这张是5,在只有赤5的情况下才会切赤5(TODO: 考虑赤5骗37)
_isRedFive := playerInfo.IsOnlyRedFive(j)
playerInfo.DiscardTile(j, _isRedFive)
// 正确的切牌
if newShanten13, improveWaits := CalculateShantenAndWaits13(tiles34, leftTiles34); newShanten13 == shanten13 {
// 若进张数变多,则为改良
if improveWaitsCount := improveWaits.AllCount(); improveWaitsCount > waitsCount {
improveWayCount++
if improveWaitsCount > maxImproveWaitsCount34[i] {
maxImproveWaitsCount34[i] = improveWaitsCount
// improves 选的是进张数最大的改良
improves[i] | heck34[idx-1] = true
// | identifier_name |
shanten_improve.go | len(rj.Improves)
//}
//if ri.ImproveWayCount != rj.ImproveWayCount {
// return ri.ImproveWayCount > rj.ImproveWayCount
//}
})
}
func (l *WaitsWithImproves14List) filterOutDiscard(cantDiscardTile int) {
newResults := WaitsWithImproves14List{}
for _, r := range *l {
if r.DiscardTile != cantDiscardTile {
newResults = append(newResults, r)
}
}
*l = newResults
}
func (l WaitsWithImproves14List) addOpenTile(openTiles []int) {
for _, r := range l {
r.OpenTiles = openTiles
}
}
// 2/5/8/11/14 张牌,计算向听数、进张、改良、向听倒退等
func CalculateShantenWithImproves14(playerInfo *model.PlayerInfo) (shanten int, waitsWithImproves WaitsWithImproves14List, incShantenResults WaitsWithImproves14List) {
if len(playerInfo.LeftTiles34) == 0 {
playerInfo.FillLeftTiles34()
}
tiles34 := playerInfo.HandTiles34
shanten = CalculateShanten(tiles34)
for i := 0; i < 34; i++ {
if tiles34[i] == 0 {
continue
}
isRedFive := playerInfo.IsOnlyRedFive(i)
// 切牌,然后分析 3k+1 张牌下的手牌情况
// 若这张是5,在只有赤5的情况下才会切赤5(TODO: 考虑赤5骗37)
playerInfo.DiscardTile(i, isRedFive)
result13 := CalculateShantenWithImproves13(playerInfo)
// 记录切牌后的分析结果
r := &WaitsWithImproves14{
DiscardTile: i,
Result13: result13,
}
if result13.Shanten == shanten {
waitsWithImproves = append(waitsWithImproves, r)
} else {
// 向听倒退
incShantenResults = append(incShantenResults, r)
}
playerInfo.UndoDiscardTile(i, isRedFive)
}
needImprove := func(l []*WaitsWithImproves14) bool {
if len(l) == 0 {
return false
}
shanten := l[0].Result13.Shanten
// 一向听及以下进张优先,改良其次
if shanten <= 1 {
return false
}
maxWaitsCount := 0
for _, r := range l {
maxWaitsCount = MaxInt(maxWaitsCount, r.Result13.Waits.AllCount())
}
// 两向听及以上的七对子考虑改良
return maxWaitsCount <= 6*shanten+3
}
ni := needImprove(waitsWithImproves)
waitsWithImproves.Sort(ni)
ni = needImprove(incShantenResults)
incShantenResults.Sort(ni)
return
}
// 计算最小向听数,鸣牌方式
func calculateMeldShanten(tiles34 []int, calledTile int, isRedFive bool, allowChi bool) (minShanten int, meldCombinations []model.Meld) {
// 是否能碰
if tiles34[calledTile] >= 2 {
meldCombinations = append(meldCombinations, model.Meld{
MeldType: model.MeldTypePon,
Tiles: []int{calledTile, calledTile, calledTile},
SelfTiles: []int{calledTile, calledTile},
CalledTile: calledTile,
RedFiveFromOthers: isRedFive,
})
}
// 是否能吃
if allowChi && calledTile < 27 {
checkChi := func(tileA, tileB int) {
if tiles34[tileA] > 0 && tiles34[tileB] > 0 {
_tiles := []int{tileA, tileB, calledTile}
sort.Ints(_tiles)
meldCombinations = append(meldCombinations, model.Meld{
MeldType: model.MeldTypeChi,
Tiles: _tiles,
SelfTiles: []int{tileA, tileB},
CalledTile: calledTile,
RedFiveFromOthers: isRedFive,
})
}
}
t9 := calledTile % 9
if t9 >= 2 {
checkChi(calledTile-2, calledTile-1)
}
if t9 >= 1 && t9 <= 7 {
checkChi(calledTile-1, calledTile+1)
}
if t9 <= 6 {
checkChi(calledTile+1, calledTile+2)
}
}
// 计算所有鸣牌下的最小向听数
minShanten = 99
for _, c := range meldCombinations {
tiles34[c.SelfTiles[0]]--
tiles34[c.SelfTiles[1]]--
minShanten = MinInt(minShanten, CalculateShanten(tiles34))
tiles34[c.SelfTiles[0]]++
tiles34[c.SelfTiles[1]]++
}
return
}
// TODO 鸣牌的情况判断(待重构)
// 编程时注意他家切掉的这张牌是否算到剩余数中
//if isOpen {
//if newShanten, combinations, shantens := calculateMeldShanten(tiles34, i, true); newShanten < shanten {
// // 向听前进了,说明鸣牌成功,则换的这张牌为鸣牌进张
// // 计算进张数:若能碰则 =剩余数*3,否则 =剩余数
// meldWaits[i] = leftTile - tiles34[i]
// for i, comb := range combinations {
// if comb[0] == comb[1] && shantens[i] == newShanten {
// meldWaits[i] *= 3
// break
// }
// }
//}
//}
// 计算鸣牌下的何切分析
// calledTile 他家出的牌,尝试鸣这张牌
// isRedFive 这张牌是否为赤5
// allowChi 是否允许吃这张牌
func CalculateMeld(playerInfo *model.PlayerInfo, calledTile int, isRedFive bool, allowChi bool) (minShanten int, waitsWithImproves WaitsWithImproves14List, incShantenResults WaitsWithImproves14List) {
if len(playerInfo.LeftTiles34) == 0 {
playerInfo.FillLeftTiles34()
}
minShanten, meldCombinations := calculateMeldShanten(playerInfo.HandTiles34, calledTile, isRedFive, allowChi)
for _, c := range meldCombinations {
// 尝试鸣这张牌
playerInfo.AddMeld(c)
_shanten, _waitsWithImproves, _incShantenResults := CalculateShantenWithImproves14(playerInfo)
playerInfo.UndoAddMeld()
// 去掉现物食替的情况
_waitsWithImproves.filterOutDiscard(calledTile)
_incShantenResults.filterOutDiscard(calledTile)
// 去掉筋食替的情况
if c.MeldType == model.MeldTypeChi {
cannotDiscardTile := -1
if c.SelfTiles[0] < calledTile && c.SelfTiles[1] < calledTile && calledTile%9 >= 3 {
cannotDiscardTile = calledTile - 3
} else if c.SelfTiles[0] > calledTile && c.SelfTiles[1] > calledTile && calledTile%9 <= 5 {
cannotDiscardTile = calledTile + 3
}
if cannotDiscardTile != -1 {
_waitsWithImproves.filterOutDiscard(cannotDiscardTile)
_incShantenResults.f | ilterOutDiscard(cannotDiscardTile)
}
}
// 添加副露信息,用于输出
_waitsWithImproves.addOpenTile(c.SelfTiles)
_incShantenResults.addOpenTile(c.SelfTiles)
// 整理副露结果
if _shanten == minShanten {
waitsWithImproves = append(waitsWithImproves, _waitsWithImproves...)
incShantenResults = append(incShantenResults, _incShantenResults...)
} else if _shanten == minShanten+1 {
incShantenResults = append(incShantenResults, _waitsWithImproves...)
}
}
waitsWithImproves.Sort(false)
incShantenResults.Sort(false)
| identifier_body |
|
pod_driver.go |
// checkAnnotations
// 1. check refs in mount pod annotation
// 2. delete ref that target pod is not found
func (p *PodDriver) checkAnnotations(ctx context.Context, pod *corev1.Pod) error {
// check refs in mount pod, the corresponding pod exists or not
lock := config.GetPodLock(pod.Name)
lock.Lock()
defer lock.Unlock()
delAnnotations := []string{}
var existTargets int
for k, target := range pod.Annotations {
if k == util.GetReferenceKey(target) {
_, exists := p.mit.deletedPods[getPodUid(target)]
if !exists { // only it is not in pod lists can be seen as deleted
// target pod is deleted
delAnnotations = append(delAnnotations, k)
continue
}
existTargets++
}
}
if existTargets != 0 && pod.Annotations[config.DeleteDelayAtKey] != "" {
delAnnotations = append(delAnnotations, config.DeleteDelayAtKey)
}
if len(delAnnotations) != 0 {
// check mount pod reference key, if it is not the latest, return conflict
newPod, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err != nil {
return err
}
if len(util.GetAllRefKeys(*newPod)) != len(util.GetAllRefKeys(*pod)) {
return apierrors.NewConflict(schema.GroupResource{
Group: pod.GroupVersionKind().Group,
Resource: pod.GroupVersionKind().Kind,
}, pod.Name, fmt.Errorf("can not patch pod"))
}
if err := util.DelPodAnnotation(ctx, p.Client, pod, delAnnotations); err != nil {
return err
}
}
if existTargets == 0 && pod.DeletionTimestamp == nil {
var shouldDelay bool
shouldDelay, err := util.ShouldDelay(ctx, pod, p.Client)
if err != nil {
return err
}
if !shouldDelay {
// check mount pod resourceVersion, if it is not the latest, return conflict
newPod, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err != nil {
return err
}
// check mount pod reference key, if it is not none, return conflict
if len(util.GetAllRefKeys(*newPod)) != 0 {
return apierrors.NewConflict(schema.GroupResource{
Group: pod.GroupVersionKind().Group,
Resource: pod.GroupVersionKind().Kind,
}, pod.Name, fmt.Errorf("can not delete pod"))
}
// if there are no refs or after delay time, delete it
klog.V(5).Infof("There are no refs in pod %s annotation, delete it", pod.Name)
if err := p.Client.DeletePod(ctx, pod); err != nil {
klog.Errorf("Delete pod %s error: %v", pod.Name, err)
return err
}
// delete related secret
secretName := pod.Name + "-secret"
klog.V(6).Infof("delete related secret of pod: %s", secretName)
if err := p.Client.DeleteSecret(ctx, secretName, pod.Namespace); err != nil {
klog.V(5).Infof("Delete secret %s error: %v", secretName, err)
}
}
}
return nil
}
// podErrorHandler handles mount pod error status
func (p *PodDriver) podErrorHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
return nil
}
lock := config.GetPodLock(pod.Name)
lock.Lock()
defer lock.Unlock()
// check resource err
if util.IsPodResourceError(pod) {
klog.V(5).Infof("waitUtilMount: Pod %s failed because of resource.", pod.Name)
if util.IsPodHasResource(*pod) {
// if pod is failed because of resource, delete resource and deploy pod again.
_ = util.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer)
klog.V(5).Infof("Delete it and deploy again with no resource.")
if err := p.Client.DeletePod(ctx, pod); err != nil {
klog.Errorf("delete po:%s err:%v", pod.Name, err)
return nil
}
isDeleted := false
// wait pod delete for 1min
for {
_, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err == nil {
klog.V(6).Infof("pod %s %s still exists wait.", pod.Name, pod.Namespace)
time.Sleep(time.Microsecond * 500)
continue
}
if apierrors.IsNotFound(err) {
isDeleted = true
break
}
if apierrors.IsTimeout(err) {
break
}
if ctx.Err() == context.Canceled || ctx.Err() == context.DeadlineExceeded {
break
}
klog.Errorf("get mountPod err:%v", err)
}
if !isDeleted {
klog.Errorf("Old pod %s %s deleting timeout", pod.Name, config.Namespace)
return nil
}
var newPod = &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Labels: pod.Labels,
Annotations: pod.Annotations,
},
Spec: pod.Spec,
}
controllerutil.AddFinalizer(newPod, config.Finalizer)
util.DeleteResourceOfPod(newPod)
_, err := p.Client.CreatePod(ctx, newPod)
if err != nil {
klog.Errorf("create pod:%s err:%v", pod.Name, err)
}
} else {
klog.V(5).Infof("mountPod PodResourceError, but pod no resource, do nothing.")
}
}
return nil
}
// podDeletedHandler handles mount pod that will be deleted
func (p *PodDriver) podDeletedHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
klog.Errorf("get nil pod")
return nil
}
klog.V(5).Infof("Pod %s in namespace %s is to be deleted.", pod.Name, pod.Namespace)
// pod with no finalizer
if !util.ContainsString(pod.GetFinalizers(), config.Finalizer) {
// do nothing
return nil
}
// remove finalizer of pod
if err := util.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer); err != nil {
klog.Errorf("remove pod finalizer err:%v", err)
return err
}
// pod with resource error
if util.IsPodResourceError(pod) {
klog.V(6).Infof("The pod is PodResourceError, podDeletedHandler skip delete the pod:%s", pod.Name)
return nil
}
// get mount point
sourcePath, _, err := util.GetMountPathOfPod(*pod)
if err != nil {
klog.Error(err)
return nil
}
// check if it needs to create new one
klog.V(6).Infof("Annotations:%v", pod.Annotations)
if pod.Annotations == nil {
return nil
}
annotation := pod.Annotations
existTargets := make(map[string]string)
for k, v := range pod.Annotations {
// annotation is checked in beginning, don't double-check here
if k == util.GetReferenceKey(v) {
existTargets[k] = v
}
}
if len(existTargets) == 0 {
// do not need to create new one, umount
util.UmountPath(ctx, sourcePath)
// clean mount point
err = util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error {
klog.V(5).Infof("Clean mount point : %s", sourcePath)
return mount.CleanupMountPoint(sourcePath, p.SafeFormatAndMount.Interface, false)
})
if err != nil {
klog.Errorf("Clean mount point %s error: %v", sourcePath, err)
}
// cleanup cache should always complete, don't set timeout
go p.CleanUpCache(context.TODO(), pod)
return nil
}
lock := config.GetPodLock(pod.Name)
lock.Lock()
defer lock.Unlock()
// create
klog.V(5).Infof("pod targetPath not empty, need create pod:%s", pod.Name)
// check pod delete
for {
po, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err == nil && po.DeletionTimestamp != nil {
klog.V(6).Infof("pod %s %s is being deleted, waiting", pod.Name, pod.Namespace)
time.Sleep(time.Millisecond * 500)
continue
}
if err != nil {
if apierrors.IsTimeout(err | {
if pod == nil {
return podError
}
if pod.DeletionTimestamp != nil {
return podDeleted
}
if util.IsPodError(pod) {
return podError
}
if util.IsPodReady(pod) {
return podReady
}
return podPending
} | identifier_body |
|
pod_driver.go | if err != nil {
klog.Errorf("create pod:%s err:%v", pod.Name, err)
}
} else {
klog.V(5).Infof("mountPod PodResourceError, but pod no resource, do nothing.")
}
}
return nil
}
// podDeletedHandler handles mount pod that will be deleted
func (p *PodDriver) podDeletedHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
klog.Errorf("get nil pod")
return nil
}
klog.V(5).Infof("Pod %s in namespace %s is to be deleted.", pod.Name, pod.Namespace)
// pod with no finalizer
if !util.ContainsString(pod.GetFinalizers(), config.Finalizer) {
// do nothing
return nil
}
// remove finalizer of pod
if err := util.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer); err != nil {
klog.Errorf("remove pod finalizer err:%v", err)
return err
}
// pod with resource error
if util.IsPodResourceError(pod) {
klog.V(6).Infof("The pod is PodResourceError, podDeletedHandler skip delete the pod:%s", pod.Name)
return nil
}
// get mount point
sourcePath, _, err := util.GetMountPathOfPod(*pod)
if err != nil {
klog.Error(err)
return nil
}
// check if it needs to create new one
klog.V(6).Infof("Annotations:%v", pod.Annotations)
if pod.Annotations == nil {
return nil
}
annotation := pod.Annotations
existTargets := make(map[string]string)
for k, v := range pod.Annotations {
// annotation is checked in beginning, don't double-check here
if k == util.GetReferenceKey(v) {
existTargets[k] = v
}
}
if len(existTargets) == 0 {
// do not need to create new one, umount
util.UmountPath(ctx, sourcePath)
// clean mount point
err = util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error {
klog.V(5).Infof("Clean mount point : %s", sourcePath)
return mount.CleanupMountPoint(sourcePath, p.SafeFormatAndMount.Interface, false)
})
if err != nil {
klog.Errorf("Clean mount point %s error: %v", sourcePath, err)
}
// cleanup cache should always complete, don't set timeout
go p.CleanUpCache(context.TODO(), pod)
return nil
}
lock := config.GetPodLock(pod.Name)
lock.Lock()
defer lock.Unlock()
// create
klog.V(5).Infof("pod targetPath not empty, need create pod:%s", pod.Name)
// check pod delete
for {
po, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err == nil && po.DeletionTimestamp != nil {
klog.V(6).Infof("pod %s %s is being deleted, waiting", pod.Name, pod.Namespace)
time.Sleep(time.Millisecond * 500)
continue
}
if err != nil {
if apierrors.IsTimeout(err) {
break
}
if apierrors.IsNotFound(err) {
// umount mount point before recreate mount pod
err := util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error {
exist, _ := mount.PathExists(sourcePath)
if !exist {
return fmt.Errorf("%s not exist", sourcePath)
}
return nil
})
if err == nil {
klog.Infof("start to umount: %s", sourcePath)
util.UmountPath(ctx, sourcePath)
}
// create pod
var newPod = &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Labels: pod.Labels,
Annotations: annotation,
},
Spec: pod.Spec,
}
controllerutil.AddFinalizer(newPod, config.Finalizer)
klog.Infof("Need to create pod %s %s", pod.Name, pod.Namespace)
_, err = p.Client.CreatePod(ctx, newPod)
if err != nil {
klog.Errorf("Create pod:%s err:%v", pod.Name, err)
}
return nil
}
klog.Errorf("Get pod err:%v", err)
return nil
}
// pod is created elsewhere
if po.Annotations == nil {
po.Annotations = make(map[string]string)
}
for k, v := range existTargets {
// add exist target in annotation
po.Annotations[k] = v
}
if err := util.ReplacePodAnnotation(ctx, p.Client, pod, po.Annotations); err != nil {
klog.Errorf("Update pod %s %s error: %v", po.Name, po.Namespace, err)
}
return err
}
err = fmt.Errorf("old pod %s %s deleting timeout", pod.Name, config.Namespace)
klog.V(5).Infof(err.Error())
return err
}
// podPendingHandler handles mount pod that is pending
func (p *PodDriver) podPendingHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
return nil
}
lock := config.GetPodLock(pod.Name)
lock.Lock()
defer lock.Unlock()
// check resource err
if util.IsPodResourceError(pod) {
klog.V(5).Infof("waitUtilMount: Pod %s failed because of resource.", pod.Name)
if util.IsPodHasResource(*pod) {
// if pod is failed because of resource, delete resource and deploy pod again.
_ = util.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer)
klog.V(5).Infof("Delete it and deploy again with no resource.")
if err := p.Client.DeletePod(ctx, pod); err != nil {
klog.Errorf("delete po:%s err:%v", pod.Name, err)
return nil
}
isDeleted := false
// wait pod delete for 1min
for {
_, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err == nil {
klog.V(6).Infof("pod %s %s still exists wait.", pod.Name, pod.Namespace)
time.Sleep(time.Microsecond * 500)
continue
}
if apierrors.IsNotFound(err) {
isDeleted = true
break
}
if apierrors.IsTimeout(err) {
break
}
if ctx.Err() == context.Canceled || ctx.Err() == context.DeadlineExceeded {
break
}
klog.Errorf("get mountPod err:%v", err)
}
if !isDeleted {
klog.Errorf("Old pod %s %s deleting timeout", pod.Name, config.Namespace)
return nil
}
var newPod = &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Labels: pod.Labels,
Annotations: pod.Annotations,
},
Spec: pod.Spec,
}
controllerutil.AddFinalizer(newPod, config.Finalizer)
util.DeleteResourceOfPod(newPod)
_, err := p.Client.CreatePod(ctx, newPod)
if err != nil {
klog.Errorf("create pod:%s err:%v", pod.Name, err)
}
} else {
klog.V(5).Infof("mountPod PodResourceError, but pod no resource, do nothing.")
}
}
return nil
}
// podReadyHandler handles mount pod that is ready
func (p *PodDriver) podReadyHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
klog.Errorf("[podReadyHandler] get nil pod")
return nil
}
if pod.Annotations == nil {
return nil
}
// get mount point
mntPath, _, err := util.GetMountPathOfPod(*pod)
if err != nil {
klog.Error(err)
return nil
}
e := util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error {
_, e := os.Stat(mntPath)
return e
})
if e != nil {
klog.Errorf("[podReadyHandler] stat mntPath: %s err: %v, don't do recovery", mntPath, e)
return nil
}
// recovery for each target
for k, target := range pod.Annotations {
if k == util.GetReferenceKey(target) {
mi := p.mit.resolveTarget(target)
if mi == nil {
klog.Errorf("pod %s target %s resolve fail", pod.Name, target)
continue
}
p.recoverTarget(pod.Name, mntPath, mi.baseTarget, mi)
for _, ti := range mi.subPathTarget {
p.recoverTarget(pod.Name, mntPath, ti, mi) | }
}
}
| random_line_split |
|
pod_driver.go | (client *k8sclient.K8sClient, mounter mount.SafeFormatAndMount) *PodDriver {
driver := &PodDriver{
Client: client,
handlers: map[podStatus]podHandler{},
SafeFormatAndMount: mounter,
}
driver.handlers[podReady] = driver.podReadyHandler
driver.handlers[podError] = driver.podErrorHandler
driver.handlers[podPending] = driver.podPendingHandler
driver.handlers[podDeleted] = driver.podDeletedHandler
return driver
}
type podHandler func(ctx context.Context, pod *corev1.Pod) error
type podStatus string
const (
podReady podStatus = "podReady"
podError podStatus = "podError"
podDeleted podStatus = "podDeleted"
podPending podStatus = "podPending"
)
func (p *PodDriver) SetMountInfo(mit mountInfoTable) {
p.mit = mit
}
func (p *PodDriver) Run(ctx context.Context, current *corev1.Pod) error {
// check refs in mount pod annotation first, delete ref that target pod is not found
err := p.checkAnnotations(ctx, current)
if err != nil {
return err
}
podStatus := p.getPodStatus(current)
if podStatus != podError && podStatus != podDeleted {
return p.handlers[podStatus](ctx, current)
}
// resourceVersion of kubelet may be different from apiserver
// so we need get latest pod resourceVersion from apiserver
pod, err := p.Client.GetPod(ctx, current.Name, current.Namespace)
if err != nil {
return err
}
// set mount pod status in mit again, maybe deleted
p.mit.setPodStatus(pod)
return p.handlers[p.getPodStatus(pod)](ctx, pod)
}
// getPodStatus get pod status
func (p *PodDriver) getPodStatus(pod *corev1.Pod) podStatus {
if pod == nil {
return podError
}
if pod.DeletionTimestamp != nil {
return podDeleted
}
if util.IsPodError(pod) {
return podError
}
if util.IsPodReady(pod) {
return podReady
}
return podPending
}
// checkAnnotations
// 1. check refs in mount pod annotation
// 2. delete ref that target pod is not found
func (p *PodDriver) checkAnnotations(ctx context.Context, pod *corev1.Pod) error {
// check refs in mount pod, the corresponding pod exists or not
lock := config.GetPodLock(pod.Name)
lock.Lock()
defer lock.Unlock()
delAnnotations := []string{}
var existTargets int
for k, target := range pod.Annotations {
if k == util.GetReferenceKey(target) {
_, exists := p.mit.deletedPods[getPodUid(target)]
if !exists { // only it is not in pod lists can be seen as deleted
// target pod is deleted
delAnnotations = append(delAnnotations, k)
continue
}
existTargets++
}
}
if existTargets != 0 && pod.Annotations[config.DeleteDelayAtKey] != "" {
delAnnotations = append(delAnnotations, config.DeleteDelayAtKey)
}
if len(delAnnotations) != 0 {
// check mount pod reference key, if it is not the latest, return conflict
newPod, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err != nil {
return err
}
if len(util.GetAllRefKeys(*newPod)) != len(util.GetAllRefKeys(*pod)) {
return apierrors.NewConflict(schema.GroupResource{
Group: pod.GroupVersionKind().Group,
Resource: pod.GroupVersionKind().Kind,
}, pod.Name, fmt.Errorf("can not patch pod"))
}
if err := util.DelPodAnnotation(ctx, p.Client, pod, delAnnotations); err != nil {
return err
}
}
if existTargets == 0 && pod.DeletionTimestamp == nil {
var shouldDelay bool
shouldDelay, err := util.ShouldDelay(ctx, pod, p.Client)
if err != nil {
return err
}
if !shouldDelay {
// check mount pod resourceVersion, if it is not the latest, return conflict
newPod, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err != nil {
return err
}
// check mount pod reference key, if it is not none, return conflict
if len(util.GetAllRefKeys(*newPod)) != 0 {
return apierrors.NewConflict(schema.GroupResource{
Group: pod.GroupVersionKind().Group,
Resource: pod.GroupVersionKind().Kind,
}, pod.Name, fmt.Errorf("can not delete pod"))
}
// if there are no refs or after delay time, delete it
klog.V(5).Infof("There are no refs in pod %s annotation, delete it", pod.Name)
if err := p.Client.DeletePod(ctx, pod); err != nil {
klog.Errorf("Delete pod %s error: %v", pod.Name, err)
return err
}
// delete related secret
secretName := pod.Name + "-secret"
klog.V(6).Infof("delete related secret of pod: %s", secretName)
if err := p.Client.DeleteSecret(ctx, secretName, pod.Namespace); err != nil {
klog.V(5).Infof("Delete secret %s error: %v", secretName, err)
}
}
}
return nil
}
// podErrorHandler handles mount pod error status
func (p *PodDriver) podErrorHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
return nil
}
lock := config.GetPodLock(pod.Name)
lock.Lock()
defer lock.Unlock()
// check resource err
if util.IsPodResourceError(pod) {
klog.V(5).Infof("waitUtilMount: Pod %s failed because of resource.", pod.Name)
if util.IsPodHasResource(*pod) {
// if pod is failed because of resource, delete resource and deploy pod again.
_ = util.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer)
klog.V(5).Infof("Delete it and deploy again with no resource.")
if err := p.Client.DeletePod(ctx, pod); err != nil {
klog.Errorf("delete po:%s err:%v", pod.Name, err)
return nil
}
isDeleted := false
// wait pod delete for 1min
for {
_, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err == nil {
klog.V(6).Infof("pod %s %s still exists wait.", pod.Name, pod.Namespace)
time.Sleep(time.Microsecond * 500)
continue
}
if apierrors.IsNotFound(err) {
isDeleted = true
break
}
if apierrors.IsTimeout(err) {
break
}
if ctx.Err() == context.Canceled || ctx.Err() == context.DeadlineExceeded {
break
}
klog.Errorf("get mountPod err:%v", err)
}
if !isDeleted {
klog.Errorf("Old pod %s %s deleting timeout", pod.Name, config.Namespace)
return nil
}
var newPod = &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Labels: pod.Labels,
Annotations: pod.Annotations,
},
Spec: pod.Spec,
}
controllerutil.AddFinalizer(newPod, config.Finalizer)
util.DeleteResourceOfPod(newPod)
_, err := p.Client.CreatePod(ctx, newPod)
if err != nil {
klog.Errorf("create pod:%s err:%v", pod.Name, err)
}
} else {
klog.V(5).Infof("mountPod PodResourceError, but pod no resource, do nothing.")
}
}
return nil
}
// podDeletedHandler handles mount pod that will be deleted
func (p *PodDriver) podDeletedHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
klog.Errorf("get nil pod")
return nil
}
klog.V(5).Infof("Pod %s in namespace %s is to be deleted.", pod.Name, pod.Namespace)
// pod with no finalizer
if !util.ContainsString(pod.GetFinalizers(), config.Finalizer) {
// do nothing
return nil
}
// remove finalizer of pod
if err := util.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer); err != nil {
klog.Errorf("remove pod finalizer err:%v", err)
return err
}
// pod with resource error
if util.IsPodResourceError(pod) {
klog.V(6).Infof("The pod is PodResourceError, podDeletedHandler skip delete the pod:%s", pod.Name)
return nil
}
// get mount point
sourcePath, _, err := util.GetMountPathOfPod(*pod | newPodDriver | identifier_name |
|
pod_driver.go | if err != nil {
klog.Errorf("create pod:%s err:%v", pod.Name, err)
}
} else {
klog.V(5).Infof("mountPod PodResourceError, but pod no resource, do nothing.")
}
}
return nil
}
// podDeletedHandler handles mount pod that will be deleted
func (p *PodDriver) podDeletedHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
klog.Errorf("get nil pod")
return nil
}
klog.V(5).Infof("Pod %s in namespace %s is to be deleted.", pod.Name, pod.Namespace)
// pod with no finalizer
if !util.ContainsString(pod.GetFinalizers(), config.Finalizer) {
// do nothing
return nil
}
// remove finalizer of pod
if err := util.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer); err != nil {
klog.Errorf("remove pod finalizer err:%v", err)
return err
}
// pod with resource error
if util.IsPodResourceError(pod) {
klog.V(6).Infof("The pod is PodResourceError, podDeletedHandler skip delete the pod:%s", pod.Name)
return nil
}
// get mount point
sourcePath, _, err := util.GetMountPathOfPod(*pod)
if err != nil {
klog.Error(err)
return nil
}
// check if it needs to create new one
klog.V(6).Infof("Annotations:%v", pod.Annotations)
if pod.Annotations == nil {
return nil
}
annotation := pod.Annotations
existTargets := make(map[string]string)
for k, v := range pod.Annotations {
// annotation is checked in beginning, don't double-check here
if k == util.GetReferenceKey(v) {
existTargets[k] = v
}
}
if len(existTargets) == 0 {
// do not need to create new one, umount
util.UmountPath(ctx, sourcePath)
// clean mount point
err = util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error {
klog.V(5).Infof("Clean mount point : %s", sourcePath)
return mount.CleanupMountPoint(sourcePath, p.SafeFormatAndMount.Interface, false)
})
if err != nil {
klog.Errorf("Clean mount point %s error: %v", sourcePath, err)
}
// cleanup cache should always complete, don't set timeout
go p.CleanUpCache(context.TODO(), pod)
return nil
}
lock := config.GetPodLock(pod.Name)
lock.Lock()
defer lock.Unlock()
// create
klog.V(5).Infof("pod targetPath not empty, need create pod:%s", pod.Name)
// check pod delete
for {
po, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err == nil && po.DeletionTimestamp != nil {
klog.V(6).Infof("pod %s %s is being deleted, waiting", pod.Name, pod.Namespace)
time.Sleep(time.Millisecond * 500)
continue
}
if err != nil {
if apierrors.IsTimeout(err) {
break
}
if apierrors.IsNotFound(err) {
// umount mount point before recreate mount pod
err := util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error {
exist, _ := mount.PathExists(sourcePath)
if !exist {
return fmt.Errorf("%s not exist", sourcePath)
}
return nil
})
if err == nil {
klog.Infof("start to umount: %s", sourcePath)
util.UmountPath(ctx, sourcePath)
}
// create pod
var newPod = &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Labels: pod.Labels,
Annotations: annotation,
},
Spec: pod.Spec,
}
controllerutil.AddFinalizer(newPod, config.Finalizer)
klog.Infof("Need to create pod %s %s", pod.Name, pod.Namespace)
_, err = p.Client.CreatePod(ctx, newPod)
if err != nil {
klog.Errorf("Create pod:%s err:%v", pod.Name, err)
}
return nil
}
klog.Errorf("Get pod err:%v", err)
return nil
}
// pod is created elsewhere
if po.Annotations == nil {
po.Annotations = make(map[string]string)
}
for k, v := range existTargets {
// add exist target in annotation
po.Annotations[k] = v
}
if err := util.ReplacePodAnnotation(ctx, p.Client, pod, po.Annotations); err != nil {
klog.Errorf("Update pod %s %s error: %v", po.Name, po.Namespace, err)
}
return err
}
err = fmt.Errorf("old pod %s %s deleting timeout", pod.Name, config.Namespace)
klog.V(5).Infof(err.Error())
return err
}
// podPendingHandler handles mount pod that is pending
func (p *PodDriver) podPendingHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
return nil
}
lock := config.GetPodLock(pod.Name)
lock.Lock()
defer lock.Unlock()
// check resource err
if util.IsPodResourceError(pod) {
klog.V(5).Infof("waitUtilMount: Pod %s failed because of resource.", pod.Name)
if util.IsPodHasResource(*pod) {
// if pod is failed because of resource, delete resource and deploy pod again.
_ = util.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer)
klog.V(5).Infof("Delete it and deploy again with no resource.")
if err := p.Client.DeletePod(ctx, pod); err != nil {
klog.Errorf("delete po:%s err:%v", pod.Name, err)
return nil
}
isDeleted := false
// wait pod delete for 1min
for {
_, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err == nil {
klog.V(6).Infof("pod %s %s still exists wait.", pod.Name, pod.Namespace)
time.Sleep(time.Microsecond * 500)
continue
}
if apierrors.IsNotFound(err) {
isDeleted = true
break
}
if apierrors.IsTimeout(err) {
break
}
if ctx.Err() == context.Canceled || ctx.Err() == context.DeadlineExceeded {
break
}
klog.Errorf("get mountPod err:%v", err)
}
if !isDeleted {
klog.Errorf("Old pod %s %s deleting timeout", pod.Name, config.Namespace)
return nil
}
var newPod = &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Labels: pod.Labels,
Annotations: pod.Annotations,
},
Spec: pod.Spec,
}
controllerutil.AddFinalizer(newPod, config.Finalizer)
util.DeleteResourceOfPod(newPod)
_, err := p.Client.CreatePod(ctx, newPod)
if err != nil {
klog.Errorf("create pod:%s err:%v", pod.Name, err)
}
} else {
klog.V(5).Infof("mountPod PodResourceError, but pod no resource, do nothing.")
}
}
return nil
}
// podReadyHandler handles mount pod that is ready
func (p *PodDriver) podReadyHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
klog.Errorf("[podReadyHandler] get nil pod")
return nil
}
if pod.Annotations == nil {
return nil
}
// get mount point
mntPath, _, err := util.GetMountPathOfPod(*pod)
if err != nil {
klog.Error(err)
return nil
}
e := util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error {
_, e := os.Stat(mntPath)
return e
})
if e != nil {
klog.Errorf("[podReadyHandler] stat mntPath: %s err: %v, don't do recovery", mntPath, e)
return nil
}
// recovery for each target
for k, target := range pod.Annotations {
if k == util.GetReferenceKey(target) | {
mi := p.mit.resolveTarget(target)
if mi == nil {
klog.Errorf("pod %s target %s resolve fail", pod.Name, target)
continue
}
p.recoverTarget(pod.Name, mntPath, mi.baseTarget, mi)
for _, ti := range mi.subPathTarget {
p.recoverTarget(pod.Name, mntPath, ti, mi)
}
} | conditional_block |
|
kitti_sem_data_loader.py | self.get_dataset()
self.K = self.dataset.calib.K_cam0
self.load_extrinsics()
self.get_GroundTruth()
if object_label_status == 'tracklet_label':
self.load_tracklet()
elif object_label_status == 'detection_label':
self.load_detection()
else:
return
# generate path to store groundtruth 3D bounding box
self.generate_gt_bbox_path()
# generate path to store the object 3D IOU results
self.generate_object_eval_path()
def get_GroundTruth(self):
"""
load gt position and orientation
"""
# set first pose to identity
# first_pose = self.dataset.oxts[0].T_w_imu
# first_pose_inv = src.se3.inversePose(first_pose)
# do not correct the orientation
# first_pose_inv[:3, :3] = np.eye(3)
# do not set first pose to identity
first_pose_inv = np.eye(4)
for o in self.dataset.oxts:
normalized_pose_original = first_pose_inv @ o.T_w_imu
self.poses_gt.append(normalized_pose_original)
# gt pose is from I to G
for i, pose in enumerate(self.poses_gt):
# get gt position
gt_position = np.reshape(pose[0:3, 3], (-1, 1))
self.gt_position.append(gt_position)
# get gt orientation
R_wIMU = pose[0:3, 0:3]
self.gt_orientation.append(R_wIMU)
def get_dataset(self):
"""
load kitti dataset using pykitti
"""
self.dataset = pykitti.raw(self.kitti_dataset_path, self.kitti_date, self.kitti_drive, frames = range(self.start_index, self.end_index, 1))
LOGGER.info('Drive: ' + str(self.dataset.drive))
LOGGER.info('Frame range: ' + str(self.dataset.frames))
def load_extrinsics(self):
# cam to imu T
T_camvelo = self.dataset.calib.T_cam0_velo
T_veloimu = self.dataset.calib.T_velo_imu
# T_cam0_imu Take a vector from IMU frame to the cam0 frame.
# refer to https://github.com/utiasSTARS/pykitti
# point_velo = np.array([0,0,0,1])
# point_cam0 = data.calib.T_cam0_velo.dot(point_velo)
T_cam0_imu = np.matmul(T_camvelo, T_veloimu)
self.oTi = T_cam0_imu
self.iTo = se3.inversePose(self.oTi)
# add vel to imu transformation
self.iTv = se3.inversePose(self.dataset.calib.T_velo_imu)
self.o2Tv = self.dataset.calib.T_cam2_velo
def generate_gt_bbox_path(self):
|
def generate_object_eval_path(self):
self.pr_table_dir = self.cache_path + self.kitti_dir + '/evaluation/'
if not os.path.exists(self.pr_table_dir):
os.makedirs(self.pr_table_dir)
def load_tracklet(self):
"""
# load tracklet
# to show 3D bounding box
# need to use the groundtruth trajectory
# P.S. here we average all the tracklets for one object,
# if it is moving then it is not accurate
"""
tracklet_all = parseTrackletXML.parseXML(self.tracklet_xml_path)
for i, tracklet in enumerate(tracklet_all):
h, w, l = tracklet.size
if tracklet.objectType not in ["Car", "Van", "Truck"]:
continue
trackletBox = np.array([
[-l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2],
[w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2],
[0, 0, 0, 0, h, h, h, h]])
corner_sublist = []
for translation, rotation, state, occlusion, truncation, amtOcclusion, \
amtBorders, absoluteFrameNumber in tracklet:
# determine if object is in the image; otherwise continue
if truncation not in (parseTrackletXML.TRUNC_IN_IMAGE, parseTrackletXML.TRUNC_TRUNCATED):
continue
# print("translation {}".format(translation))
# re-create 3D bounding box in velodyne coordinate system
yaw = rotation[2] # other rotations are supposedly 0
assert np.abs(rotation[:2]).sum() == 0, 'object rotations other than yaw given!'
# transform from camera frame to world frame
FN = absoluteFrameNumber
# only load bbox between start and end frame
if FN >= self.end_index:
# print("FN {} end {}".format(FN, self.end_index))
continue
# object to velodyne transform
vTq = np.array([[np.cos(yaw), -np.sin(yaw), 0.0, translation[0]],
[np.sin(yaw), np.cos(yaw), 0.0, translation[1]],
[0.0, 0.0, 1.0, translation[2]],
[0.0, 0.0, 0.0, 1.0]])
wTi = np.eye(4)
wRi = self.gt_orientation[FN]
# note q is from G to I
wTi[:3, :3] = wRi
wTi[:3, 3] = np.squeeze(self.gt_position[FN])
wTq = wTi @ self.iTv @ vTq
# force only yaw and x,y translation
wTq = utils.poseSE32SE2(wTq)
cornerPosInVelo = wTq[:3, :3] @ trackletBox + np.tile(wTq[:3, 3], (8, 1)).T
corner_sublist.append(cornerPosInVelo)
cornerInVelo = vTq[:3, :3] @ trackletBox + np.tile(vTq[:3, 3], (8, 1)).T
cornerInCam2 = self.o2Tv @ np.vstack((cornerInVelo, np.ones((1, 8))))
cornerInCam2 = np.eye(3) @ cornerInCam2[:3, :]
# used for per frame IOU evaluation
if FN not in self.local_cuboid_dict.keys():
self.local_cuboid_dict[FN] = [cornerInCam2.T]
self.local_volume_dict[FN] = [h * w * l]
self.local_yaw_dict[FN] = [yaw]
self.local_hwl_dict[FN] = [[h, w, l]]
else:
self.local_cuboid_dict[FN].append(cornerInCam2.T)
self.local_volume_dict[FN].append(h * w * l)
self.local_yaw_dict[FN].append(yaw)
self.local_hwl_dict[FN].append([h, w, l])
if len(corner_sublist) > 0:
# for plotting
corner_sublist = np.concatenate([corner_sublist], axis=0)
corner_sub = np.mean(corner_sublist, axis=0)
self.corner_list.append(corner_sub)
# for 3D IOU eval
# for global cuboids
self.cuboid_list.append(np.mean(np.array(corner_sublist), axis=0).T)
self.volume_list.append(h * w * l)
self.cuboid_list = np.array(self.cuboid_list)
self.volume_list = np.array(self.volume_list)
def load_detection(self):
"""
load object bounding box labels in detection benchmark
"""
root_dir = self.kitti_dataset_path + 'object/'
kitti_det_loader = kitti_detection_helper.KittiDataset(root_dir,
self.kitti_date, self.kitti_drive)
type_list = ['Car', 'Van', 'Truck']
# some of the bbox are the same one
# need to compute average bbox
for id, object_3d_list in enumerate(kitti_det_loader.all_object_3d):
for object_3d in object_3d_list:
corner_sublist = []
if object_3d.cls_type not in type_list:
continue
trackletBox, oTq, yaw = object_3d.generate_corners3d()
FN = kitti_det_loader.img_idx_list[id]
# only load bbox between start and end frame
if FN >= self.end_index:
# print("FN {} end {}".format(FN, self.end_index))
continue
wTi = np.eye(4)
wRi = self.gt_orientation[FN]
# note q is from | self.gt_bbox_results_path = self.cache_path + self.kitti_dir + '/gt_bboxes_results/'
if not os.path.exists(self.gt_bbox_results_path):
os.makedirs(self.gt_bbox_results_path) | identifier_body |
kitti_sem_data_loader.py | self.get_dataset()
self.K = self.dataset.calib.K_cam0
self.load_extrinsics()
self.get_GroundTruth()
if object_label_status == 'tracklet_label':
self.load_tracklet()
elif object_label_status == 'detection_label':
self.load_detection()
else:
return
# generate path to store groundtruth 3D bounding box
self.generate_gt_bbox_path()
# generate path to store the object 3D IOU results
self.generate_object_eval_path()
def get_GroundTruth(self):
"""
load gt position and orientation
"""
# set first pose to identity
# first_pose = self.dataset.oxts[0].T_w_imu
# first_pose_inv = src.se3.inversePose(first_pose)
# do not correct the orientation
# first_pose_inv[:3, :3] = np.eye(3)
# do not set first pose to identity
first_pose_inv = np.eye(4)
for o in self.dataset.oxts:
normalized_pose_original = first_pose_inv @ o.T_w_imu
self.poses_gt.append(normalized_pose_original)
# gt pose is from I to G
for i, pose in enumerate(self.poses_gt):
# get gt position
gt_position = np.reshape(pose[0:3, 3], (-1, 1))
self.gt_position.append(gt_position)
# get gt orientation
R_wIMU = pose[0:3, 0:3]
self.gt_orientation.append(R_wIMU)
def get_dataset(self):
"""
load kitti dataset using pykitti
"""
self.dataset = pykitti.raw(self.kitti_dataset_path, self.kitti_date, self.kitti_drive, frames = range(self.start_index, self.end_index, 1))
LOGGER.info('Drive: ' + str(self.dataset.drive))
LOGGER.info('Frame range: ' + str(self.dataset.frames))
def load_extrinsics(self):
# cam to imu T
T_camvelo = self.dataset.calib.T_cam0_velo
T_veloimu = self.dataset.calib.T_velo_imu
# T_cam0_imu Take a vector from IMU frame to the cam0 frame.
# refer to https://github.com/utiasSTARS/pykitti
# point_velo = np.array([0,0,0,1])
# point_cam0 = data.calib.T_cam0_velo.dot(point_velo)
T_cam0_imu = np.matmul(T_camvelo, T_veloimu)
self.oTi = T_cam0_imu
self.iTo = se3.inversePose(self.oTi)
# add vel to imu transformation
self.iTv = se3.inversePose(self.dataset.calib.T_velo_imu)
self.o2Tv = self.dataset.calib.T_cam2_velo
def generate_gt_bbox_path(self):
self.gt_bbox_results_path = self.cache_path + self.kitti_dir + '/gt_bboxes_results/'
if not os.path.exists(self.gt_bbox_results_path):
os.makedirs(self.gt_bbox_results_path)
def | (self):
self.pr_table_dir = self.cache_path + self.kitti_dir + '/evaluation/'
if not os.path.exists(self.pr_table_dir):
os.makedirs(self.pr_table_dir)
def load_tracklet(self):
"""
# load tracklet
# to show 3D bounding box
# need to use the groundtruth trajectory
# P.S. here we average all the tracklets for one object,
# if it is moving then it is not accurate
"""
tracklet_all = parseTrackletXML.parseXML(self.tracklet_xml_path)
for i, tracklet in enumerate(tracklet_all):
h, w, l = tracklet.size
if tracklet.objectType not in ["Car", "Van", "Truck"]:
continue
trackletBox = np.array([
[-l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2],
[w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2],
[0, 0, 0, 0, h, h, h, h]])
corner_sublist = []
for translation, rotation, state, occlusion, truncation, amtOcclusion, \
amtBorders, absoluteFrameNumber in tracklet:
# determine if object is in the image; otherwise continue
if truncation not in (parseTrackletXML.TRUNC_IN_IMAGE, parseTrackletXML.TRUNC_TRUNCATED):
continue
# print("translation {}".format(translation))
# re-create 3D bounding box in velodyne coordinate system
yaw = rotation[2] # other rotations are supposedly 0
assert np.abs(rotation[:2]).sum() == 0, 'object rotations other than yaw given!'
# transform from camera frame to world frame
FN = absoluteFrameNumber
# only load bbox between start and end frame
if FN >= self.end_index:
# print("FN {} end {}".format(FN, self.end_index))
continue
# object to velodyne transform
vTq = np.array([[np.cos(yaw), -np.sin(yaw), 0.0, translation[0]],
[np.sin(yaw), np.cos(yaw), 0.0, translation[1]],
[0.0, 0.0, 1.0, translation[2]],
[0.0, 0.0, 0.0, 1.0]])
wTi = np.eye(4)
wRi = self.gt_orientation[FN]
# note q is from G to I
wTi[:3, :3] = wRi
wTi[:3, 3] = np.squeeze(self.gt_position[FN])
wTq = wTi @ self.iTv @ vTq
# force only yaw and x,y translation
wTq = utils.poseSE32SE2(wTq)
cornerPosInVelo = wTq[:3, :3] @ trackletBox + np.tile(wTq[:3, 3], (8, 1)).T
corner_sublist.append(cornerPosInVelo)
cornerInVelo = vTq[:3, :3] @ trackletBox + np.tile(vTq[:3, 3], (8, 1)).T
cornerInCam2 = self.o2Tv @ np.vstack((cornerInVelo, np.ones((1, 8))))
cornerInCam2 = np.eye(3) @ cornerInCam2[:3, :]
# used for per frame IOU evaluation
if FN not in self.local_cuboid_dict.keys():
self.local_cuboid_dict[FN] = [cornerInCam2.T]
self.local_volume_dict[FN] = [h * w * l]
self.local_yaw_dict[FN] = [yaw]
self.local_hwl_dict[FN] = [[h, w, l]]
else:
self.local_cuboid_dict[FN].append(cornerInCam2.T)
self.local_volume_dict[FN].append(h * w * l)
self.local_yaw_dict[FN].append(yaw)
self.local_hwl_dict[FN].append([h, w, l])
if len(corner_sublist) > 0:
# for plotting
corner_sublist = np.concatenate([corner_sublist], axis=0)
corner_sub = np.mean(corner_sublist, axis=0)
self.corner_list.append(corner_sub)
# for 3D IOU eval
# for global cuboids
self.cuboid_list.append(np.mean(np.array(corner_sublist), axis=0).T)
self.volume_list.append(h * w * l)
self.cuboid_list = np.array(self.cuboid_list)
self.volume_list = np.array(self.volume_list)
def load_detection(self):
"""
load object bounding box labels in detection benchmark
"""
root_dir = self.kitti_dataset_path + 'object/'
kitti_det_loader = kitti_detection_helper.KittiDataset(root_dir,
self.kitti_date, self.kitti_drive)
type_list = ['Car', 'Van', 'Truck']
# some of the bbox are the same one
# need to compute average bbox
for id, object_3d_list in enumerate(kitti_det_loader.all_object_3d):
for object_3d in object_3d_list:
corner_sublist = []
if object_3d.cls_type not in type_list:
continue
trackletBox, oTq, yaw = object_3d.generate_corners3d()
FN = kitti_det_loader.img_idx_list[id]
# only load bbox between start and end frame
if FN >= self.end_index:
# print("FN {} end {}".format(FN, self.end_index))
continue
wTi = np.eye(4)
wRi = self.gt_orientation[FN]
# note q is from | generate_object_eval_path | identifier_name |
kitti_sem_data_loader.py | self.get_dataset()
self.K = self.dataset.calib.K_cam0
self.load_extrinsics()
self.get_GroundTruth()
if object_label_status == 'tracklet_label':
self.load_tracklet()
elif object_label_status == 'detection_label':
self.load_detection()
else:
return
# generate path to store groundtruth 3D bounding box
self.generate_gt_bbox_path()
# generate path to store the object 3D IOU results
self.generate_object_eval_path()
def get_GroundTruth(self):
"""
load gt position and orientation
""" | # first_pose_inv = src.se3.inversePose(first_pose)
# do not correct the orientation
# first_pose_inv[:3, :3] = np.eye(3)
# do not set first pose to identity
first_pose_inv = np.eye(4)
for o in self.dataset.oxts:
normalized_pose_original = first_pose_inv @ o.T_w_imu
self.poses_gt.append(normalized_pose_original)
# gt pose is from I to G
for i, pose in enumerate(self.poses_gt):
# get gt position
gt_position = np.reshape(pose[0:3, 3], (-1, 1))
self.gt_position.append(gt_position)
# get gt orientation
R_wIMU = pose[0:3, 0:3]
self.gt_orientation.append(R_wIMU)
def get_dataset(self):
"""
load kitti dataset using pykitti
"""
self.dataset = pykitti.raw(self.kitti_dataset_path, self.kitti_date, self.kitti_drive, frames = range(self.start_index, self.end_index, 1))
LOGGER.info('Drive: ' + str(self.dataset.drive))
LOGGER.info('Frame range: ' + str(self.dataset.frames))
def load_extrinsics(self):
# cam to imu T
T_camvelo = self.dataset.calib.T_cam0_velo
T_veloimu = self.dataset.calib.T_velo_imu
# T_cam0_imu Take a vector from IMU frame to the cam0 frame.
# refer to https://github.com/utiasSTARS/pykitti
# point_velo = np.array([0,0,0,1])
# point_cam0 = data.calib.T_cam0_velo.dot(point_velo)
T_cam0_imu = np.matmul(T_camvelo, T_veloimu)
self.oTi = T_cam0_imu
self.iTo = se3.inversePose(self.oTi)
# add vel to imu transformation
self.iTv = se3.inversePose(self.dataset.calib.T_velo_imu)
self.o2Tv = self.dataset.calib.T_cam2_velo
def generate_gt_bbox_path(self):
self.gt_bbox_results_path = self.cache_path + self.kitti_dir + '/gt_bboxes_results/'
if not os.path.exists(self.gt_bbox_results_path):
os.makedirs(self.gt_bbox_results_path)
def generate_object_eval_path(self):
self.pr_table_dir = self.cache_path + self.kitti_dir + '/evaluation/'
if not os.path.exists(self.pr_table_dir):
os.makedirs(self.pr_table_dir)
def load_tracklet(self):
"""
# load tracklet
# to show 3D bounding box
# need to use the groundtruth trajectory
# P.S. here we average all the tracklets for one object,
# if it is moving then it is not accurate
"""
tracklet_all = parseTrackletXML.parseXML(self.tracklet_xml_path)
for i, tracklet in enumerate(tracklet_all):
h, w, l = tracklet.size
if tracklet.objectType not in ["Car", "Van", "Truck"]:
continue
trackletBox = np.array([
[-l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2],
[w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2],
[0, 0, 0, 0, h, h, h, h]])
corner_sublist = []
for translation, rotation, state, occlusion, truncation, amtOcclusion, \
amtBorders, absoluteFrameNumber in tracklet:
# determine if object is in the image; otherwise continue
if truncation not in (parseTrackletXML.TRUNC_IN_IMAGE, parseTrackletXML.TRUNC_TRUNCATED):
continue
# print("translation {}".format(translation))
# re-create 3D bounding box in velodyne coordinate system
yaw = rotation[2] # other rotations are supposedly 0
assert np.abs(rotation[:2]).sum() == 0, 'object rotations other than yaw given!'
# transform from camera frame to world frame
FN = absoluteFrameNumber
# only load bbox between start and end frame
if FN >= self.end_index:
# print("FN {} end {}".format(FN, self.end_index))
continue
# object to velodyne transform
vTq = np.array([[np.cos(yaw), -np.sin(yaw), 0.0, translation[0]],
[np.sin(yaw), np.cos(yaw), 0.0, translation[1]],
[0.0, 0.0, 1.0, translation[2]],
[0.0, 0.0, 0.0, 1.0]])
wTi = np.eye(4)
wRi = self.gt_orientation[FN]
# note q is from G to I
wTi[:3, :3] = wRi
wTi[:3, 3] = np.squeeze(self.gt_position[FN])
wTq = wTi @ self.iTv @ vTq
# force only yaw and x,y translation
wTq = utils.poseSE32SE2(wTq)
cornerPosInVelo = wTq[:3, :3] @ trackletBox + np.tile(wTq[:3, 3], (8, 1)).T
corner_sublist.append(cornerPosInVelo)
cornerInVelo = vTq[:3, :3] @ trackletBox + np.tile(vTq[:3, 3], (8, 1)).T
cornerInCam2 = self.o2Tv @ np.vstack((cornerInVelo, np.ones((1, 8))))
cornerInCam2 = np.eye(3) @ cornerInCam2[:3, :]
# used for per frame IOU evaluation
if FN not in self.local_cuboid_dict.keys():
self.local_cuboid_dict[FN] = [cornerInCam2.T]
self.local_volume_dict[FN] = [h * w * l]
self.local_yaw_dict[FN] = [yaw]
self.local_hwl_dict[FN] = [[h, w, l]]
else:
self.local_cuboid_dict[FN].append(cornerInCam2.T)
self.local_volume_dict[FN].append(h * w * l)
self.local_yaw_dict[FN].append(yaw)
self.local_hwl_dict[FN].append([h, w, l])
if len(corner_sublist) > 0:
# for plotting
corner_sublist = np.concatenate([corner_sublist], axis=0)
corner_sub = np.mean(corner_sublist, axis=0)
self.corner_list.append(corner_sub)
# for 3D IOU eval
# for global cuboids
self.cuboid_list.append(np.mean(np.array(corner_sublist), axis=0).T)
self.volume_list.append(h * w * l)
self.cuboid_list = np.array(self.cuboid_list)
self.volume_list = np.array(self.volume_list)
def load_detection(self):
"""
load object bounding box labels in detection benchmark
"""
root_dir = self.kitti_dataset_path + 'object/'
kitti_det_loader = kitti_detection_helper.KittiDataset(root_dir,
self.kitti_date, self.kitti_drive)
type_list = ['Car', 'Van', 'Truck']
# some of the bbox are the same one
# need to compute average bbox
for id, object_3d_list in enumerate(kitti_det_loader.all_object_3d):
for object_3d in object_3d_list:
corner_sublist = []
if object_3d.cls_type not in type_list:
continue
trackletBox, oTq, yaw = object_3d.generate_corners3d()
FN = kitti_det_loader.img_idx_list[id]
# only load bbox between start and end frame
if FN >= self.end_index:
# print("FN {} end {}".format(FN, self.end_index))
continue
wTi = np.eye(4)
wRi = self.gt_orientation[FN]
# note q is from |
# set first pose to identity
# first_pose = self.dataset.oxts[0].T_w_imu | random_line_split |
kitti_sem_data_loader.py | self.get_dataset()
self.K = self.dataset.calib.K_cam0
self.load_extrinsics()
self.get_GroundTruth()
if object_label_status == 'tracklet_label':
self.load_tracklet()
elif object_label_status == 'detection_label':
self.load_detection()
else:
return
# generate path to store groundtruth 3D bounding box
self.generate_gt_bbox_path()
# generate path to store the object 3D IOU results
self.generate_object_eval_path()
def get_GroundTruth(self):
"""
load gt position and orientation
"""
# set first pose to identity
# first_pose = self.dataset.oxts[0].T_w_imu
# first_pose_inv = src.se3.inversePose(first_pose)
# do not correct the orientation
# first_pose_inv[:3, :3] = np.eye(3)
# do not set first pose to identity
first_pose_inv = np.eye(4)
for o in self.dataset.oxts:
normalized_pose_original = first_pose_inv @ o.T_w_imu
self.poses_gt.append(normalized_pose_original)
# gt pose is from I to G
for i, pose in enumerate(self.poses_gt):
# get gt position
gt_position = np.reshape(pose[0:3, 3], (-1, 1))
self.gt_position.append(gt_position)
# get gt orientation
R_wIMU = pose[0:3, 0:3]
self.gt_orientation.append(R_wIMU)
def get_dataset(self):
"""
load kitti dataset using pykitti
"""
self.dataset = pykitti.raw(self.kitti_dataset_path, self.kitti_date, self.kitti_drive, frames = range(self.start_index, self.end_index, 1))
LOGGER.info('Drive: ' + str(self.dataset.drive))
LOGGER.info('Frame range: ' + str(self.dataset.frames))
def load_extrinsics(self):
# cam to imu T
T_camvelo = self.dataset.calib.T_cam0_velo
T_veloimu = self.dataset.calib.T_velo_imu
# T_cam0_imu Take a vector from IMU frame to the cam0 frame.
# refer to https://github.com/utiasSTARS/pykitti
# point_velo = np.array([0,0,0,1])
# point_cam0 = data.calib.T_cam0_velo.dot(point_velo)
T_cam0_imu = np.matmul(T_camvelo, T_veloimu)
self.oTi = T_cam0_imu
self.iTo = se3.inversePose(self.oTi)
# add vel to imu transformation
self.iTv = se3.inversePose(self.dataset.calib.T_velo_imu)
self.o2Tv = self.dataset.calib.T_cam2_velo
def generate_gt_bbox_path(self):
self.gt_bbox_results_path = self.cache_path + self.kitti_dir + '/gt_bboxes_results/'
if not os.path.exists(self.gt_bbox_results_path):
|
def generate_object_eval_path(self):
self.pr_table_dir = self.cache_path + self.kitti_dir + '/evaluation/'
if not os.path.exists(self.pr_table_dir):
os.makedirs(self.pr_table_dir)
def load_tracklet(self):
"""
# load tracklet
# to show 3D bounding box
# need to use the groundtruth trajectory
# P.S. here we average all the tracklets for one object,
# if it is moving then it is not accurate
"""
tracklet_all = parseTrackletXML.parseXML(self.tracklet_xml_path)
for i, tracklet in enumerate(tracklet_all):
h, w, l = tracklet.size
if tracklet.objectType not in ["Car", "Van", "Truck"]:
continue
trackletBox = np.array([
[-l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2],
[w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2],
[0, 0, 0, 0, h, h, h, h]])
corner_sublist = []
for translation, rotation, state, occlusion, truncation, amtOcclusion, \
amtBorders, absoluteFrameNumber in tracklet:
# determine if object is in the image; otherwise continue
if truncation not in (parseTrackletXML.TRUNC_IN_IMAGE, parseTrackletXML.TRUNC_TRUNCATED):
continue
# print("translation {}".format(translation))
# re-create 3D bounding box in velodyne coordinate system
yaw = rotation[2] # other rotations are supposedly 0
assert np.abs(rotation[:2]).sum() == 0, 'object rotations other than yaw given!'
# transform from camera frame to world frame
FN = absoluteFrameNumber
# only load bbox between start and end frame
if FN >= self.end_index:
# print("FN {} end {}".format(FN, self.end_index))
continue
# object to velodyne transform
vTq = np.array([[np.cos(yaw), -np.sin(yaw), 0.0, translation[0]],
[np.sin(yaw), np.cos(yaw), 0.0, translation[1]],
[0.0, 0.0, 1.0, translation[2]],
[0.0, 0.0, 0.0, 1.0]])
wTi = np.eye(4)
wRi = self.gt_orientation[FN]
# note q is from G to I
wTi[:3, :3] = wRi
wTi[:3, 3] = np.squeeze(self.gt_position[FN])
wTq = wTi @ self.iTv @ vTq
# force only yaw and x,y translation
wTq = utils.poseSE32SE2(wTq)
cornerPosInVelo = wTq[:3, :3] @ trackletBox + np.tile(wTq[:3, 3], (8, 1)).T
corner_sublist.append(cornerPosInVelo)
cornerInVelo = vTq[:3, :3] @ trackletBox + np.tile(vTq[:3, 3], (8, 1)).T
cornerInCam2 = self.o2Tv @ np.vstack((cornerInVelo, np.ones((1, 8))))
cornerInCam2 = np.eye(3) @ cornerInCam2[:3, :]
# used for per frame IOU evaluation
if FN not in self.local_cuboid_dict.keys():
self.local_cuboid_dict[FN] = [cornerInCam2.T]
self.local_volume_dict[FN] = [h * w * l]
self.local_yaw_dict[FN] = [yaw]
self.local_hwl_dict[FN] = [[h, w, l]]
else:
self.local_cuboid_dict[FN].append(cornerInCam2.T)
self.local_volume_dict[FN].append(h * w * l)
self.local_yaw_dict[FN].append(yaw)
self.local_hwl_dict[FN].append([h, w, l])
if len(corner_sublist) > 0:
# for plotting
corner_sublist = np.concatenate([corner_sublist], axis=0)
corner_sub = np.mean(corner_sublist, axis=0)
self.corner_list.append(corner_sub)
# for 3D IOU eval
# for global cuboids
self.cuboid_list.append(np.mean(np.array(corner_sublist), axis=0).T)
self.volume_list.append(h * w * l)
self.cuboid_list = np.array(self.cuboid_list)
self.volume_list = np.array(self.volume_list)
def load_detection(self):
"""
load object bounding box labels in detection benchmark
"""
root_dir = self.kitti_dataset_path + 'object/'
kitti_det_loader = kitti_detection_helper.KittiDataset(root_dir,
self.kitti_date, self.kitti_drive)
type_list = ['Car', 'Van', 'Truck']
# some of the bbox are the same one
# need to compute average bbox
for id, object_3d_list in enumerate(kitti_det_loader.all_object_3d):
for object_3d in object_3d_list:
corner_sublist = []
if object_3d.cls_type not in type_list:
continue
trackletBox, oTq, yaw = object_3d.generate_corners3d()
FN = kitti_det_loader.img_idx_list[id]
# only load bbox between start and end frame
if FN >= self.end_index:
# print("FN {} end {}".format(FN, self.end_index))
continue
wTi = np.eye(4)
wRi = self.gt_orientation[FN]
# note q is from | os.makedirs(self.gt_bbox_results_path) | conditional_block |
vspheremachine_controller.go | Reconcile ensures the back-end state reflects the Kubernetes resource state intent.
func (r machineReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) {
// Get the VSphereMachine resource for this request.
vsphereMachine := &infrav1.VSphereMachine{}
if err := r.Client.Get(r, req.NamespacedName, vsphereMachine); err != nil {
if apierrors.IsNotFound(err) {
r.Logger.Info("VSphereMachine not found, won't reconcile", "key", req.NamespacedName)
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
// Fetch the CAPI Machine.
machine, err := clusterutilv1.GetOwnerMachine(r, r.Client, vsphereMachine.ObjectMeta)
if err != nil {
return reconcile.Result{}, err
}
if machine == nil {
r.Logger.Info("Waiting for Machine Controller to set OwnerRef on VSphereMachine")
return reconcile.Result{}, nil
}
// Fetch the CAPI Cluster.
cluster, err := clusterutilv1.GetClusterFromMetadata(r, r.Client, machine.ObjectMeta)
if err != nil {
r.Logger.Info("Machine is missing cluster label or cluster does not exist")
return reconcile.Result{}, nil
}
// Fetch the VSphereCluster
vsphereCluster := &infrav1.VSphereCluster{}
vsphereClusterName := client.ObjectKey{
Namespace: vsphereMachine.Namespace,
Name: cluster.Spec.InfrastructureRef.Name,
}
if err := r.Client.Get(r, vsphereClusterName, vsphereCluster); err != nil {
r.Logger.Info("Waiting for VSphereCluster")
return reconcile.Result{}, nil
}
// Get or create an authenticated session to the vSphere endpoint.
authSession, err := session.GetOrCreate(r.Context,
vsphereCluster.Spec.Server, vsphereMachine.Spec.Datacenter,
r.ControllerManagerContext.Username, r.ControllerManagerContext.Password)
if err != nil {
return reconcile.Result{}, errors.Wrap(err, "failed to create vSphere session")
}
// Create the patch helper.
patchHelper, err := patch.NewHelper(vsphereMachine, r.Client)
if err != nil {
return reconcile.Result{}, errors.Wrapf(
err,
"failed to init patch helper for %s %s/%s",
vsphereMachine.GroupVersionKind(),
vsphereMachine.Namespace,
vsphereMachine.Name)
}
// Create the machine context for this request.
machineContext := &context.MachineContext{
ClusterContext: &context.ClusterContext{
ControllerContext: r.ControllerContext,
Cluster: cluster,
VSphereCluster: vsphereCluster,
},
Machine: machine,
VSphereMachine: vsphereMachine,
Session: authSession,
Logger: r.Logger.WithName(req.Namespace).WithName(req.Name),
PatchHelper: patchHelper,
}
// Print the task-ref upon entry and upon exit.
machineContext.Logger.V(4).Info(
"VSphereMachine.Status.TaskRef OnEntry",
"task-ref", machineContext.VSphereMachine.Status.TaskRef)
defer func() {
machineContext.Logger.V(4).Info(
"VSphereMachine.Status.TaskRef OnExit",
"task-ref", machineContext.VSphereMachine.Status.TaskRef)
}()
// Always issue a patch when exiting this function so changes to the
// resource are patched back to the API server.
defer func() {
// Patch the VSphereMachine resource.
if err := machineContext.Patch(); err != nil {
if reterr == nil {
reterr = err
}
machineContext.Logger.Error(err, "patch failed", "machine", machineContext.String())
}
// localObj is a deep copy of the VSphereMachine resource that was
// fetched at the top of this Reconcile function.
localObj := machineContext.VSphereMachine.DeepCopy()
// Fetch the up-to-date VSphereMachine resource into remoteObj until the
// fetched resource has a a different ResourceVersion than the local
// object.
//
// FYI - resource versions are opaque, numeric strings and should not
// be compared with < or >, only for equality -
// https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions.
//
// Since CAPV is currently deployed with a single replica, and this
// controller has a max concurrency of one, the only agent updating the
// VSphereMachine resource should be this controller.
//
// So if the remote resource's ResourceVersion is different than the
// ResourceVersion of the resource fetched at the beginning of this
// reconcile request, then that means the remote resource should be
// newer than the local resource.
wait.PollImmediateInfinite(time.Second*1, func() (bool, error) {
// remoteObj refererences the same VSphereMachine resource as it exists
// on the API server post the patch operation above. In a perfect world,
// the Status for localObj and remoteObj should be the same.
remoteObj := &infrav1.VSphereMachine{}
if err := machineContext.Client.Get(machineContext, req.NamespacedName, remoteObj); err != nil {
if apierrors.IsNotFound(err) {
// It's possible that the remote resource cannot be found
// because it has been removed. Do not error, just exit.
return true, nil
}
// There was an issue getting the remote resource. Sleep for a
// second and try again.
machineContext.Logger.Error(err, "failed to get VSphereMachine while exiting reconcile")
return false, nil
}
// If the remote resource version is not the same as the local
// resource version, then it means we were able to get a resource
// newer than the one we already had.
if localObj.ResourceVersion != remoteObj.ResourceVersion {
machineContext.Logger.Info(
"resource is patched",
"local-resource-version", localObj.ResourceVersion,
"remote-resource-version", remoteObj.ResourceVersion)
return true, nil
}
// If the resources are the same resource version, then a previous
// patch may not have resulted in any changes. Check to see if the
// remote status is the same as the local status.
if cmp.Equal(localObj.Status, remoteObj.Status, cmpopts.EquateEmpty()) {
machineContext.Logger.Info(
"resource patch was not required",
"local-resource-version", localObj.ResourceVersion,
"remote-resource-version", remoteObj.ResourceVersion)
return true, nil
}
// The remote resource version is the same as the local resource
// version, which means the local cache is not yet up-to-date.
machineContext.Logger.Info(
"resource is not patched",
"local-resource-version", localObj.ResourceVersion,
"remote-resource-version", remoteObj.ResourceVersion)
return false, nil
})
}()
// Handle deleted machines
if !vsphereMachine.ObjectMeta.DeletionTimestamp.IsZero() {
return r.reconcileDelete(machineContext)
}
// Handle non-deleted machines
return r.reconcileNormal(machineContext)
}
func (r machineReconciler) reconcileDelete(ctx *context.MachineContext) (reconcile.Result, error) {
ctx.Logger.Info("Handling deleted VSphereMachine")
// TODO(akutz) Implement selection of VM service based on vSphere version
var vmService services.VirtualMachineService = &govmomi.VMService{}
vm, err := vmService.DestroyVM(ctx)
if err != nil {
return reconcile.Result{}, errors.Wrapf(err, "failed to destroy VM")
}
// Requeue the operation until the VM is "notfound".
if vm.State != infrav1.VirtualMachineStateNotFound {
ctx.Logger.Info("vm state is not reconciled", "expected-vm-state", infrav1.VirtualMachineStateNotFound, "actual-vm-state", vm.State)
return reconcile.Result{}, nil
}
// The VM is deleted so remove the finalizer.
ctx.VSphereMachine.Finalizers = clusterutilv1.Filter(ctx.VSphereMachine.Finalizers, infrav1.MachineFinalizer)
return reconcile.Result{}, nil
}
func (r machineReconciler) reconcileNormal(ctx *context.MachineContext) (reconcile.Result, error) | {
// If the VSphereMachine is in an error state, return early.
if ctx.VSphereMachine.Status.ErrorReason != nil || ctx.VSphereMachine.Status.ErrorMessage != nil {
ctx.Logger.Info("Error state detected, skipping reconciliation")
return reconcile.Result{}, nil
}
// If the VSphereMachine doesn't have our finalizer, add it.
if !clusterutilv1.Contains(ctx.VSphereMachine.Finalizers, infrav1.MachineFinalizer) {
ctx.VSphereMachine.Finalizers = append(ctx.VSphereMachine.Finalizers, infrav1.MachineFinalizer)
}
if !ctx.Cluster.Status.InfrastructureReady {
ctx.Logger.Info("Cluster infrastructure is not ready yet")
return reconcile.Result{}, nil
}
// Make sure bootstrap data is available and populated.
// Make sure bootstrap data is available and populated.
if ctx.Machine.Spec.Bootstrap.DataSecretName == nil { | identifier_body |
|
vspheremachine_controller.go | (controlledTypeName)
controllerNameShort = fmt.Sprintf("%s-controller", strings.ToLower(controlledTypeName))
controllerNameLong = fmt.Sprintf("%s/%s/%s", ctx.Namespace, ctx.Name, controllerNameShort)
)
// Build the controller context.
controllerContext := &context.ControllerContext{
ControllerManagerContext: ctx,
Name: controllerNameShort,
Recorder: record.New(mgr.GetEventRecorderFor(controllerNameLong)),
Logger: ctx.Logger.WithName(controllerNameShort),
}
return ctrl.NewControllerManagedBy(mgr).
// Watch the controlled, infrastructure resource.
For(controlledType).
// Watch the CAPI resource that owns this infrastructure resource.
Watches(
&source.Kind{Type: &clusterv1.Machine{}},
&handler.EnqueueRequestsFromMapFunc{
ToRequests: clusterutilv1.MachineToInfrastructureMapFunc(controlledTypeGVK),
},
).
// Watch a GenericEvent channel for the controlled resource.
//
// This is useful when there are events outside of Kubernetes that
// should cause a resource to be synchronized, such as a goroutine
// waiting on some asynchronous, external task to complete.
Watches(
&source.Channel{Source: ctx.GetGenericEventChannelFor(controlledTypeGVK)},
&handler.EnqueueRequestForObject{},
).
Complete(machineReconciler{ControllerContext: controllerContext})
}
type machineReconciler struct {
*context.ControllerContext
}
// Reconcile ensures the back-end state reflects the Kubernetes resource state intent.
func (r machineReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) {
// Get the VSphereMachine resource for this request.
vsphereMachine := &infrav1.VSphereMachine{}
if err := r.Client.Get(r, req.NamespacedName, vsphereMachine); err != nil {
if apierrors.IsNotFound(err) {
r.Logger.Info("VSphereMachine not found, won't reconcile", "key", req.NamespacedName)
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
// Fetch the CAPI Machine.
machine, err := clusterutilv1.GetOwnerMachine(r, r.Client, vsphereMachine.ObjectMeta)
if err != nil {
return reconcile.Result{}, err
}
if machine == nil {
r.Logger.Info("Waiting for Machine Controller to set OwnerRef on VSphereMachine")
return reconcile.Result{}, nil
}
// Fetch the CAPI Cluster.
cluster, err := clusterutilv1.GetClusterFromMetadata(r, r.Client, machine.ObjectMeta)
if err != nil {
r.Logger.Info("Machine is missing cluster label or cluster does not exist")
return reconcile.Result{}, nil
}
// Fetch the VSphereCluster
vsphereCluster := &infrav1.VSphereCluster{}
vsphereClusterName := client.ObjectKey{
Namespace: vsphereMachine.Namespace,
Name: cluster.Spec.InfrastructureRef.Name,
}
if err := r.Client.Get(r, vsphereClusterName, vsphereCluster); err != nil {
r.Logger.Info("Waiting for VSphereCluster")
return reconcile.Result{}, nil
}
// Get or create an authenticated session to the vSphere endpoint.
authSession, err := session.GetOrCreate(r.Context,
vsphereCluster.Spec.Server, vsphereMachine.Spec.Datacenter,
r.ControllerManagerContext.Username, r.ControllerManagerContext.Password)
if err != nil {
return reconcile.Result{}, errors.Wrap(err, "failed to create vSphere session")
}
// Create the patch helper.
patchHelper, err := patch.NewHelper(vsphereMachine, r.Client)
if err != nil {
return reconcile.Result{}, errors.Wrapf(
err,
"failed to init patch helper for %s %s/%s",
vsphereMachine.GroupVersionKind(),
vsphereMachine.Namespace,
vsphereMachine.Name)
}
// Create the machine context for this request.
machineContext := &context.MachineContext{
ClusterContext: &context.ClusterContext{
ControllerContext: r.ControllerContext,
Cluster: cluster,
VSphereCluster: vsphereCluster,
},
Machine: machine,
VSphereMachine: vsphereMachine,
Session: authSession,
Logger: r.Logger.WithName(req.Namespace).WithName(req.Name),
PatchHelper: patchHelper,
}
// Print the task-ref upon entry and upon exit.
machineContext.Logger.V(4).Info(
"VSphereMachine.Status.TaskRef OnEntry",
"task-ref", machineContext.VSphereMachine.Status.TaskRef)
defer func() {
machineContext.Logger.V(4).Info(
"VSphereMachine.Status.TaskRef OnExit",
"task-ref", machineContext.VSphereMachine.Status.TaskRef)
}()
// Always issue a patch when exiting this function so changes to the
// resource are patched back to the API server.
defer func() {
// Patch the VSphereMachine resource.
if err := machineContext.Patch(); err != nil {
if reterr == nil {
reterr = err
}
machineContext.Logger.Error(err, "patch failed", "machine", machineContext.String())
}
// localObj is a deep copy of the VSphereMachine resource that was
// fetched at the top of this Reconcile function.
localObj := machineContext.VSphereMachine.DeepCopy()
// Fetch the up-to-date VSphereMachine resource into remoteObj until the
// fetched resource has a a different ResourceVersion than the local
// object.
//
// FYI - resource versions are opaque, numeric strings and should not
// be compared with < or >, only for equality -
// https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions.
//
// Since CAPV is currently deployed with a single replica, and this
// controller has a max concurrency of one, the only agent updating the
// VSphereMachine resource should be this controller.
//
// So if the remote resource's ResourceVersion is different than the
// ResourceVersion of the resource fetched at the beginning of this
// reconcile request, then that means the remote resource should be
// newer than the local resource.
wait.PollImmediateInfinite(time.Second*1, func() (bool, error) {
// remoteObj refererences the same VSphereMachine resource as it exists
// on the API server post the patch operation above. In a perfect world,
// the Status for localObj and remoteObj should be the same.
remoteObj := &infrav1.VSphereMachine{}
if err := machineContext.Client.Get(machineContext, req.NamespacedName, remoteObj); err != nil {
if apierrors.IsNotFound(err) {
// It's possible that the remote resource cannot be found
// because it has been removed. Do not error, just exit.
return true, nil
}
// There was an issue getting the remote resource. Sleep for a
// second and try again.
machineContext.Logger.Error(err, "failed to get VSphereMachine while exiting reconcile")
return false, nil
}
// If the remote resource version is not the same as the local
// resource version, then it means we were able to get a resource
// newer than the one we already had.
if localObj.ResourceVersion != remoteObj.ResourceVersion {
machineContext.Logger.Info(
"resource is patched",
"local-resource-version", localObj.ResourceVersion,
"remote-resource-version", remoteObj.ResourceVersion)
return true, nil
}
// If the resources are the same resource version, then a previous
// patch may not have resulted in any changes. Check to see if the
// remote status is the same as the local status.
if cmp.Equal(localObj.Status, remoteObj.Status, cmpopts.EquateEmpty()) {
machineContext.Logger.Info(
"resource patch was not required",
"local-resource-version", localObj.ResourceVersion,
"remote-resource-version", remoteObj.ResourceVersion)
return true, nil
}
// The remote resource version is the same as the local resource
// version, which means the local cache is not yet up-to-date.
machineContext.Logger.Info(
"resource is not patched",
"local-resource-version", localObj.ResourceVersion,
"remote-resource-version", remoteObj.ResourceVersion)
return false, nil
})
}()
// Handle deleted machines
if !vsphereMachine.ObjectMeta.DeletionTimestamp.IsZero() {
return r.reconcileDelete(machineContext)
}
// Handle non-deleted machines
return r.reconcileNormal(machineContext)
}
func (r machineReconciler) reconcileDelete(ctx *context.MachineContext) (reconcile.Result, error) {
ctx.Logger.Info("Handling deleted VSphereMachine")
// TODO(akutz) Implement selection of VM service based on vSphere version
var vmService services.VirtualMachineService = &govmomi.VMService{}
vm, err := vmService.DestroyVM(ctx)
if err != nil | {
return reconcile.Result{}, errors.Wrapf(err, "failed to destroy VM")
} | conditional_block |
|
vspheremachine_controller.go | corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/wait"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
clusterutilv1 "sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/patch"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/api/v1alpha3"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/context"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/record"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/services"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/services/govmomi"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/session"
infrautilv1 "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util"
)
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vspheremachines,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vspheremachines/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch
// AddMachineControllerToManager adds the machine controller to the provided
// manager.
func AddMachineControllerToManager(ctx *context.ControllerManagerContext, mgr manager.Manager) error {
var (
controlledType = &infrav1.VSphereMachine{}
controlledTypeName = reflect.TypeOf(controlledType).Elem().Name()
controlledTypeGVK = infrav1.GroupVersion.WithKind(controlledTypeName)
controllerNameShort = fmt.Sprintf("%s-controller", strings.ToLower(controlledTypeName))
controllerNameLong = fmt.Sprintf("%s/%s/%s", ctx.Namespace, ctx.Name, controllerNameShort)
)
// Build the controller context.
controllerContext := &context.ControllerContext{
ControllerManagerContext: ctx,
Name: controllerNameShort,
Recorder: record.New(mgr.GetEventRecorderFor(controllerNameLong)),
Logger: ctx.Logger.WithName(controllerNameShort),
}
return ctrl.NewControllerManagedBy(mgr).
// Watch the controlled, infrastructure resource.
For(controlledType).
// Watch the CAPI resource that owns this infrastructure resource.
Watches(
&source.Kind{Type: &clusterv1.Machine{}},
&handler.EnqueueRequestsFromMapFunc{
ToRequests: clusterutilv1.MachineToInfrastructureMapFunc(controlledTypeGVK),
},
).
// Watch a GenericEvent channel for the controlled resource.
//
// This is useful when there are events outside of Kubernetes that
// should cause a resource to be synchronized, such as a goroutine
// waiting on some asynchronous, external task to complete.
Watches(
&source.Channel{Source: ctx.GetGenericEventChannelFor(controlledTypeGVK)},
&handler.EnqueueRequestForObject{},
).
Complete(machineReconciler{ControllerContext: controllerContext})
}
type machineReconciler struct {
*context.ControllerContext
}
// Reconcile ensures the back-end state reflects the Kubernetes resource state intent.
func (r machineReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) {
// Get the VSphereMachine resource for this request.
vsphereMachine := &infrav1.VSphereMachine{}
if err := r.Client.Get(r, req.NamespacedName, vsphereMachine); err != nil {
if apierrors.IsNotFound(err) {
r.Logger.Info("VSphereMachine not found, won't reconcile", "key", req.NamespacedName)
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
// Fetch the CAPI Machine.
machine, err := clusterutilv1.GetOwnerMachine(r, r.Client, vsphereMachine.ObjectMeta)
if err != nil {
return reconcile.Result{}, err
}
if machine == nil {
r.Logger.Info("Waiting for Machine Controller to set OwnerRef on VSphereMachine")
return reconcile.Result{}, nil
}
// Fetch the CAPI Cluster.
cluster, err := clusterutilv1.GetClusterFromMetadata(r, r.Client, machine.ObjectMeta)
if err != nil {
r.Logger.Info("Machine is missing cluster label or cluster does not exist")
return reconcile.Result{}, nil
}
// Fetch the VSphereCluster
vsphereCluster := &infrav1.VSphereCluster{}
vsphereClusterName := client.ObjectKey{
Namespace: vsphereMachine.Namespace,
Name: cluster.Spec.InfrastructureRef.Name,
}
if err := r.Client.Get(r, vsphereClusterName, vsphereCluster); err != nil {
r.Logger.Info("Waiting for VSphereCluster")
return reconcile.Result{}, nil
}
// Get or create an authenticated session to the vSphere endpoint.
authSession, err := session.GetOrCreate(r.Context,
vsphereCluster.Spec.Server, vsphereMachine.Spec.Datacenter,
r.ControllerManagerContext.Username, r.ControllerManagerContext.Password)
if err != nil {
return reconcile.Result{}, errors.Wrap(err, "failed to create vSphere session")
}
// Create the patch helper.
patchHelper, err := patch.NewHelper(vsphereMachine, r.Client)
if err != nil {
return reconcile.Result{}, errors.Wrapf(
err,
"failed to init patch helper for %s %s/%s",
vsphereMachine.GroupVersionKind(),
vsphereMachine.Namespace,
vsphereMachine.Name)
}
// Create the machine context for this request.
machineContext := &context.MachineContext{
ClusterContext: &context.ClusterContext{
ControllerContext: r.ControllerContext,
Cluster: cluster,
VSphereCluster: vsphereCluster,
},
Machine: machine,
VSphereMachine: vsphereMachine,
Session: authSession,
Logger: r.Logger.WithName(req.Namespace).WithName(req.Name),
PatchHelper: patchHelper,
}
// Print the task-ref upon entry and upon exit.
machineContext.Logger.V(4).Info(
"VSphereMachine.Status.TaskRef OnEntry",
"task-ref", machineContext.VSphereMachine.Status.TaskRef)
defer func() {
machineContext.Logger.V(4).Info(
"VSphereMachine.Status.TaskRef OnExit",
"task-ref", machineContext.VSphereMachine.Status.TaskRef)
}()
// Always issue a patch when exiting this function so changes to the
// resource are patched back to the API server.
defer func() {
// Patch the VSphereMachine resource.
if err := machineContext.Patch(); err != nil {
if reterr == nil {
reterr = err
}
machineContext.Logger.Error(err, "patch failed", "machine", machineContext.String())
}
// localObj is a deep copy of the VSphereMachine resource that was
// fetched at the top of this Reconcile function.
localObj := machineContext.VSphereMachine.DeepCopy()
// Fetch the up-to-date VSphereMachine resource into remoteObj until the
// fetched resource has a a different ResourceVersion than the local
// object.
//
// FYI - resource versions are opaque, numeric strings and should not
// be compared with < or >, only for equality -
// https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions.
//
// Since CAPV is currently deployed with a single replica, and this
// controller has a max concurrency of one, the only agent updating the
// VSphereMachine resource should be this controller.
//
// So if the remote resource's ResourceVersion is different than the
// ResourceVersion of the resource fetched at the beginning of this
// reconcile request, then that means the remote resource should be
// newer than the local resource.
wait.PollImmediateInfinite(time.Second*1, func() (bool, error) {
// remoteObj refererences the same VSphereMachine resource as it exists
// on the API server post the patch operation above. In a perfect world,
// the Status for localObj and remoteObj should be the same.
remoteObj := &infrav1.VSphereMachine{}
if err := machineContext.Client.Get(machineContext, req.NamespacedName, remoteObj); err != nil {
if apierrors.IsNotFound(err) {
// It's possible that the remote | "github.com/google/go-cmp/cmp/cmpopts"
"github.com/pkg/errors" | random_line_split |
|
vspheremachine_controller.go | For(controlledType).
// Watch the CAPI resource that owns this infrastructure resource.
Watches(
&source.Kind{Type: &clusterv1.Machine{}},
&handler.EnqueueRequestsFromMapFunc{
ToRequests: clusterutilv1.MachineToInfrastructureMapFunc(controlledTypeGVK),
},
).
// Watch a GenericEvent channel for the controlled resource.
//
// This is useful when there are events outside of Kubernetes that
// should cause a resource to be synchronized, such as a goroutine
// waiting on some asynchronous, external task to complete.
Watches(
&source.Channel{Source: ctx.GetGenericEventChannelFor(controlledTypeGVK)},
&handler.EnqueueRequestForObject{},
).
Complete(machineReconciler{ControllerContext: controllerContext})
}
type machineReconciler struct {
*context.ControllerContext
}
// Reconcile ensures the back-end state reflects the Kubernetes resource state intent.
func (r machineReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) {
// Get the VSphereMachine resource for this request.
vsphereMachine := &infrav1.VSphereMachine{}
if err := r.Client.Get(r, req.NamespacedName, vsphereMachine); err != nil {
if apierrors.IsNotFound(err) {
r.Logger.Info("VSphereMachine not found, won't reconcile", "key", req.NamespacedName)
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
// Fetch the CAPI Machine.
machine, err := clusterutilv1.GetOwnerMachine(r, r.Client, vsphereMachine.ObjectMeta)
if err != nil {
return reconcile.Result{}, err
}
if machine == nil {
r.Logger.Info("Waiting for Machine Controller to set OwnerRef on VSphereMachine")
return reconcile.Result{}, nil
}
// Fetch the CAPI Cluster.
cluster, err := clusterutilv1.GetClusterFromMetadata(r, r.Client, machine.ObjectMeta)
if err != nil {
r.Logger.Info("Machine is missing cluster label or cluster does not exist")
return reconcile.Result{}, nil
}
// Fetch the VSphereCluster
vsphereCluster := &infrav1.VSphereCluster{}
vsphereClusterName := client.ObjectKey{
Namespace: vsphereMachine.Namespace,
Name: cluster.Spec.InfrastructureRef.Name,
}
if err := r.Client.Get(r, vsphereClusterName, vsphereCluster); err != nil {
r.Logger.Info("Waiting for VSphereCluster")
return reconcile.Result{}, nil
}
// Get or create an authenticated session to the vSphere endpoint.
authSession, err := session.GetOrCreate(r.Context,
vsphereCluster.Spec.Server, vsphereMachine.Spec.Datacenter,
r.ControllerManagerContext.Username, r.ControllerManagerContext.Password)
if err != nil {
return reconcile.Result{}, errors.Wrap(err, "failed to create vSphere session")
}
// Create the patch helper.
patchHelper, err := patch.NewHelper(vsphereMachine, r.Client)
if err != nil {
return reconcile.Result{}, errors.Wrapf(
err,
"failed to init patch helper for %s %s/%s",
vsphereMachine.GroupVersionKind(),
vsphereMachine.Namespace,
vsphereMachine.Name)
}
// Create the machine context for this request.
machineContext := &context.MachineContext{
ClusterContext: &context.ClusterContext{
ControllerContext: r.ControllerContext,
Cluster: cluster,
VSphereCluster: vsphereCluster,
},
Machine: machine,
VSphereMachine: vsphereMachine,
Session: authSession,
Logger: r.Logger.WithName(req.Namespace).WithName(req.Name),
PatchHelper: patchHelper,
}
// Print the task-ref upon entry and upon exit.
machineContext.Logger.V(4).Info(
"VSphereMachine.Status.TaskRef OnEntry",
"task-ref", machineContext.VSphereMachine.Status.TaskRef)
defer func() {
machineContext.Logger.V(4).Info(
"VSphereMachine.Status.TaskRef OnExit",
"task-ref", machineContext.VSphereMachine.Status.TaskRef)
}()
// Always issue a patch when exiting this function so changes to the
// resource are patched back to the API server.
defer func() {
// Patch the VSphereMachine resource.
if err := machineContext.Patch(); err != nil {
if reterr == nil {
reterr = err
}
machineContext.Logger.Error(err, "patch failed", "machine", machineContext.String())
}
// localObj is a deep copy of the VSphereMachine resource that was
// fetched at the top of this Reconcile function.
localObj := machineContext.VSphereMachine.DeepCopy()
// Fetch the up-to-date VSphereMachine resource into remoteObj until the
// fetched resource has a a different ResourceVersion than the local
// object.
//
// FYI - resource versions are opaque, numeric strings and should not
// be compared with < or >, only for equality -
// https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions.
//
// Since CAPV is currently deployed with a single replica, and this
// controller has a max concurrency of one, the only agent updating the
// VSphereMachine resource should be this controller.
//
// So if the remote resource's ResourceVersion is different than the
// ResourceVersion of the resource fetched at the beginning of this
// reconcile request, then that means the remote resource should be
// newer than the local resource.
wait.PollImmediateInfinite(time.Second*1, func() (bool, error) {
// remoteObj refererences the same VSphereMachine resource as it exists
// on the API server post the patch operation above. In a perfect world,
// the Status for localObj and remoteObj should be the same.
remoteObj := &infrav1.VSphereMachine{}
if err := machineContext.Client.Get(machineContext, req.NamespacedName, remoteObj); err != nil {
if apierrors.IsNotFound(err) {
// It's possible that the remote resource cannot be found
// because it has been removed. Do not error, just exit.
return true, nil
}
// There was an issue getting the remote resource. Sleep for a
// second and try again.
machineContext.Logger.Error(err, "failed to get VSphereMachine while exiting reconcile")
return false, nil
}
// If the remote resource version is not the same as the local
// resource version, then it means we were able to get a resource
// newer than the one we already had.
if localObj.ResourceVersion != remoteObj.ResourceVersion {
machineContext.Logger.Info(
"resource is patched",
"local-resource-version", localObj.ResourceVersion,
"remote-resource-version", remoteObj.ResourceVersion)
return true, nil
}
// If the resources are the same resource version, then a previous
// patch may not have resulted in any changes. Check to see if the
// remote status is the same as the local status.
if cmp.Equal(localObj.Status, remoteObj.Status, cmpopts.EquateEmpty()) {
machineContext.Logger.Info(
"resource patch was not required",
"local-resource-version", localObj.ResourceVersion,
"remote-resource-version", remoteObj.ResourceVersion)
return true, nil
}
// The remote resource version is the same as the local resource
// version, which means the local cache is not yet up-to-date.
machineContext.Logger.Info(
"resource is not patched",
"local-resource-version", localObj.ResourceVersion,
"remote-resource-version", remoteObj.ResourceVersion)
return false, nil
})
}()
// Handle deleted machines
if !vsphereMachine.ObjectMeta.DeletionTimestamp.IsZero() {
return r.reconcileDelete(machineContext)
}
// Handle non-deleted machines
return r.reconcileNormal(machineContext)
}
func (r machineReconciler) reconcileDelete(ctx *context.MachineContext) (reconcile.Result, error) {
ctx.Logger.Info("Handling deleted VSphereMachine")
// TODO(akutz) Implement selection of VM service based on vSphere version
var vmService services.VirtualMachineService = &govmomi.VMService{}
vm, err := vmService.DestroyVM(ctx)
if err != nil {
return reconcile.Result{}, errors.Wrapf(err, "failed to destroy VM")
}
// Requeue the operation until the VM is "notfound".
if vm.State != infrav1.VirtualMachineStateNotFound {
ctx.Logger.Info("vm state is not reconciled", "expected-vm-state", infrav1.VirtualMachineStateNotFound, "actual-vm-state", vm.State)
return reconcile.Result{}, nil
}
// The VM is deleted so remove the finalizer.
ctx.VSphereMachine.Finalizers = clusterutilv1.Filter(ctx.VSphereMachine.Finalizers, infrav1.MachineFinalizer)
return reconcile.Result{}, nil
}
func (r machineReconciler) | reconcileNormal | identifier_name |
|
scrape.py | _TODAY_HEADER = ["district"]
ACTIVE_TODAY_HEADER.extend(CASE_HEADER)
CSV_HEADER = copy.deepcopy(QUARANTINE_HEADER)
CSV_HEADER.extend(CASE_HEADER)
TESTING_HEADER = [
"date",
"total_sent",
"sent_on_date",
"processed_in_one_day",
"total_positive",
"new_positive",
]
TEST_DATA_JSON = "./testData.json"
DATA_INDEX_JSON = "./dataIndexJSON.json"
DATA_INDEX_JS = "./dataIndex.js"
INIT_URL = "https://dashboard.kerala.gov.in"
QAR_REQ_URL = "https://dashboard.kerala.gov.in/quarantined-datewise.php"
ACTIVE_REQ_URL = (
"https://dashboard.kerala.gov.in/dailyreporting-view-public-districtwise.php"
)
TESTING_REQ_URL = "https://dashboard.kerala.gov.in/testing-view-public.php"
DATE_REQ_STRING = "rep_date3"
HEADERS = {
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Site": "same-origin",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-User": "?1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36",
"Origin": "https://dashboard.kerala.gov.in",
"Referer": "https://dashboard.kerala.gov.in/quarantined-datewise.php",
}
def get_date(datearg):
dsplit = [int(i) for i in re.split(r"[-/]\s*", datearg)]
return date(day=dsplit[0], month=dsplit[1], year=dsplit[2])
def getint(string):
try:
return int(string)
except (TypeError, ValueError):
return int("".join(filter(str.isdigit, string)))
def csv_writer(filepath, data):
with open(filepath, "w") as f:
w = csv.DictWriter(f, fieldnames=set(CSV_HEADER))
w.writeheader()
w.writerows(data)
def init_sess():
try:
# warm up the cookies
sess = requests.Session()
sess.get(INIT_URL, headers=HEADERS)
except requests.exceptions.RequestException as e:
print(e)
else:
return sess
def run_req(sess, url, data, headers, method="GET"):
kwargs = {}
if method == "GET":
req_func = sess.get
elif method == "POST":
req_func = sess.post
kwargs["data"] = data
kwargs["headers"] = headers
try:
response = req_func(url, **kwargs)
except requests.exceptions.RequestException as e:
print(e)
else:
return response
def get_quarantine_details(date):
sess = init_sess()
date_str = date.strftime("%d/%m/%Y")
payload = {DATE_REQ_STRING: date_str, "lw": "View"}
csv_data = []
response = run_req(sess, QAR_REQ_URL, payload, HEADERS, "POST")
if not response:
return
soup = BeautifulSoup(response.content, "html.parser")
table = soup.find_all("table")[0]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
district = cols[0]
data_dict = dict(zip(QUARANTINE_HEADER, cols))
data_dict["dist_code"] = district
data_dict["district"] = district_map[district]
csv_data.append(data_dict)
return csv_data
def extract_datewise_active(soup, district):
datalist = []
# assumes last table is the datewise table
table = soup.find_all("table")[-1]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
data_dict = dict(zip(ACTIVE_HEADER, cols))
data_dict["date"] = get_date(data_dict["date"])
data_dict["district"] = district
datalist.append(data_dict)
return datalist
def get_active_details_today():
active_data = defaultdict(list)
sess = init_sess()
response = run_req(sess, ACTIVE_REQ_URL, None, HEADERS, "GET")
if not response:
print("response failed for today's active case details")
return
if len(response.content) < 1000:
print(len(response.content), " Actual response not recieved")
return
soup = BeautifulSoup(response.content, "html.parser")
region = soup.find_all(text=re.compile("you have chosen", re.I))[0]
print("Got data for {}".format(region.next_sibling.text))
last_date = soup.find_all(text=re.compile("updated", re.I))[0].split()[1]
last_date = get_date(last_date)
# assumes second last table is the summary table
table = soup.find_all("table")[-2]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
data_dict = dict(zip(ACTIVE_TODAY_HEADER, cols))
data_dict["date"] = last_date
district = data_dict["district"]
active_data[district].append(data_dict)
# are there 14 district data + total data
assert len(active_data) == 15
# get kerala datewise details table as well
kerala_data = {i["date"]: i for i in extract_datewise_active(soup, "KERALA")}
return (kerala_data, active_data, last_date)
def get_bulk_active_details():
active_data = {}
time.sleep(5)
for code, district in district_code_map.items():
# new session for each district data
sess = init_sess()
print("Processing: {}".format(district))
payload = {"district": code}
response = run_req(sess, ACTIVE_REQ_URL, payload, HEADERS, "POST")
if not response:
print("response failed for district {}".format(district))
break
if len(response.content) < 1000:
print(len(response.content), " Actual response not recieved")
break
soup = BeautifulSoup(response.content, "html.parser")
region = soup.find_all(text=re.compile("you have chosen", re.I))[0]
print("Got data for {}".format(region.next_sibling.text))
# assumes last table is the datewise table
active_data[district] = extract_datewise_active(soup, district)
time.sleep(5)
return active_data
def | ():
testing_data = {}
sess = init_sess()
response = run_req(sess, TESTING_REQ_URL, None, HEADERS, "GET")
if not response:
print("response failed for today's active case details")
return
if len(response.content) < 1000:
print(len(response.content), " Actual response not recieved")
return
print("Processing testing data")
soup = BeautifulSoup(response.content, "html.parser")
case_report = soup.find_all(text=re.compile("Daily Case Reports from", re.I))[0]
table = case_report.parent.parent.parent.find_all("table")[0]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
data_dict = dict(zip(TESTING_HEADER, cols))
test_date = get_date(data_dict["date"])
data_dict["date"] = test_date
testing_data[test_date] = data_dict
return testing_data
def active_detail_pivot(active_data, get_only_curr=False):
pivot_data = defaultdict(dict)
for district, values in active_data.items():
count = {"district": district, "confirmed": 0, "deaths": 0, "recovered": 0}
for value in values:
v_date = value["date"]
for key in ["confirmed", "deaths", "recovered"]:
count[key] += getint(value[key])
count["active"] = getint(value["active"])
pivot_data[v_date][district] = copy.deepcopy(count)
# set total if get_only_curr is False
if not get_only_curr:
for d, dist_dict in pivot_data.items():
total = {
"confirmed": 0,
"deaths": 0,
"recovered": 0,
"active": 0,
}
for district, value in dist_dict.items():
for key in total.keys():
total[key] += getint(value[key])
total["district"] = "TOTAL"
dist_dict["TOTAL"] = total
return pivot_data
def edit_data_index(date_list, totals_data, testing_data, kerala_data):
di_data = None
# write test_data into testdatajson
with open(TEST_DATA_JSON, "w") as test_json:
td = {}
for d, v in testing_data.items():
strdate = "{}-{}-{}".format(d.day, d.month, d.year)
td[strdate] = {key: val for key, val in v.items() if key != "date"}
json.dump(td, test_json)
print("Wrote testing data to: {}".format(TEST_DATA_JSON))
with open(DATA_INDEX_JSON, "r") as json_file:
di_data = json.load(json_file)
| get_testing_details | identifier_name |
scrape.py | ")[0]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
district = cols[0]
data_dict = dict(zip(QUARANTINE_HEADER, cols))
data_dict["dist_code"] = district
data_dict["district"] = district_map[district]
csv_data.append(data_dict)
return csv_data
def extract_datewise_active(soup, district):
datalist = []
# assumes last table is the datewise table
table = soup.find_all("table")[-1]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
data_dict = dict(zip(ACTIVE_HEADER, cols))
data_dict["date"] = get_date(data_dict["date"])
data_dict["district"] = district
datalist.append(data_dict)
return datalist
def get_active_details_today():
active_data = defaultdict(list)
sess = init_sess()
response = run_req(sess, ACTIVE_REQ_URL, None, HEADERS, "GET")
if not response:
print("response failed for today's active case details")
return
if len(response.content) < 1000:
print(len(response.content), " Actual response not recieved")
return
soup = BeautifulSoup(response.content, "html.parser")
region = soup.find_all(text=re.compile("you have chosen", re.I))[0]
print("Got data for {}".format(region.next_sibling.text))
last_date = soup.find_all(text=re.compile("updated", re.I))[0].split()[1]
last_date = get_date(last_date)
# assumes second last table is the summary table
table = soup.find_all("table")[-2]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
data_dict = dict(zip(ACTIVE_TODAY_HEADER, cols))
data_dict["date"] = last_date
district = data_dict["district"]
active_data[district].append(data_dict)
# are there 14 district data + total data
assert len(active_data) == 15
# get kerala datewise details table as well
kerala_data = {i["date"]: i for i in extract_datewise_active(soup, "KERALA")}
return (kerala_data, active_data, last_date)
def get_bulk_active_details():
active_data = {}
time.sleep(5)
for code, district in district_code_map.items():
# new session for each district data
sess = init_sess()
print("Processing: {}".format(district))
payload = {"district": code}
response = run_req(sess, ACTIVE_REQ_URL, payload, HEADERS, "POST")
if not response:
print("response failed for district {}".format(district))
break
if len(response.content) < 1000:
print(len(response.content), " Actual response not recieved")
break
soup = BeautifulSoup(response.content, "html.parser")
region = soup.find_all(text=re.compile("you have chosen", re.I))[0]
print("Got data for {}".format(region.next_sibling.text))
# assumes last table is the datewise table
active_data[district] = extract_datewise_active(soup, district)
time.sleep(5)
return active_data
def get_testing_details():
testing_data = {}
sess = init_sess()
response = run_req(sess, TESTING_REQ_URL, None, HEADERS, "GET")
if not response:
print("response failed for today's active case details")
return
if len(response.content) < 1000:
print(len(response.content), " Actual response not recieved")
return
print("Processing testing data")
soup = BeautifulSoup(response.content, "html.parser")
case_report = soup.find_all(text=re.compile("Daily Case Reports from", re.I))[0]
table = case_report.parent.parent.parent.find_all("table")[0]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
data_dict = dict(zip(TESTING_HEADER, cols))
test_date = get_date(data_dict["date"])
data_dict["date"] = test_date
testing_data[test_date] = data_dict
return testing_data
def active_detail_pivot(active_data, get_only_curr=False):
pivot_data = defaultdict(dict)
for district, values in active_data.items():
count = {"district": district, "confirmed": 0, "deaths": 0, "recovered": 0}
for value in values:
v_date = value["date"]
for key in ["confirmed", "deaths", "recovered"]:
count[key] += getint(value[key])
count["active"] = getint(value["active"])
pivot_data[v_date][district] = copy.deepcopy(count)
# set total if get_only_curr is False
if not get_only_curr:
for d, dist_dict in pivot_data.items():
total = {
"confirmed": 0,
"deaths": 0,
"recovered": 0,
"active": 0,
}
for district, value in dist_dict.items():
for key in total.keys():
total[key] += getint(value[key])
total["district"] = "TOTAL"
dist_dict["TOTAL"] = total
return pivot_data
def edit_data_index(date_list, totals_data, testing_data, kerala_data):
di_data = None
# write test_data into testdatajson
with open(TEST_DATA_JSON, "w") as test_json:
td = {}
for d, v in testing_data.items():
strdate = "{}-{}-{}".format(d.day, d.month, d.year)
td[strdate] = {key: val for key, val in v.items() if key != "date"}
json.dump(td, test_json)
print("Wrote testing data to: {}".format(TEST_DATA_JSON))
with open(DATA_INDEX_JSON, "r") as json_file:
di_data = json.load(json_file)
# redundant code for updating testing details on dataIndex for every run
print("updating testing in dataIndex")
for d, v in testing_data.items():
datestr = "{}-{}-{}".format(d.day, d.month, d.year)
try:
testing_entry = di_data["daily_bulletin"][datestr]
except KeyError:
testing_entry = {}
testing_day = v
testing_entry["sample_sent"] = getint(testing_day["total_sent"])
testing_entry["sample_sent_today"] = getint(testing_day["sent_on_date"])
di_data["daily_bulletin"][datestr] = testing_entry
print("updating other details in dataIndex")
for d in date_list:
datestr = "{}-{}-{}".format(d.day, d.month, d.year)
try:
entry = di_data["daily_bulletin"][datestr]
except KeyError:
entry = {}
total_day = totals_data[d]
try:
testing_day = testing_data[d]
except KeyError:
print("data not available for {}".format(d))
available = False
prevday = d
while not available:
prevday -= timedelta(days=1)
print("trying for {}".format(prevday))
# if data isn't available for current day, show previous day data
try:
testing_day = testing_data[prevday]
except KeyError:
print("not available for {}".format(prevday))
else:
available = True
kd = kerala_data[d]
entry["total_active"] = getint(total_day["active"])
entry["total_positive"] = getint(total_day["confirmed"])
entry["deaths"] = getint(total_day["deaths"])
entry["positive_today"] = getint(kd["confirmed"])
if "sample_sent" not in entry:
entry["sample_sent"] = getint(testing_day["total_sent"])
entry["sample_sent_today"] = getint(testing_day["sent_on_date"])
entry["total_passengers"] = 0
filename = "data_{}_{}_{}.csv".format(d.day, d.month, d.year)
entry["file"] = filename
di_data["daily_bulletin"][datestr] = entry
with open(DATA_INDEX_JSON, "w") as json_file:
json.dump(di_data, json_file)
print("Wrote index data to: {}".format(DATA_INDEX_JSON))
with open(DATA_INDEX_JS, "w") as js_file:
write_str = "var dataIndex = " + str(di_data)
js_file.write(write_str)
print("Wrote index data to: {}".format(DATA_INDEX_JS))
def get_data_for_date(dates=[], get_only_curr=False):
kd, active_today, updated_date = get_active_details_today()
if not get_only_curr:
active_data = get_bulk_active_details()
else:
assert dates[0] == updated_date, "Date mismatch. Site updated till {}".format(
updated_date
)
active_data = active_today
time.sleep(2)
testing_data = get_testing_details()
time.sleep(3)
active_data_pivot = active_detail_pivot(active_data, get_only_curr)
totals_data = {}
for d in dates:
qar_data = get_quarantine_details(d) | active = active_data_pivot[d] | random_line_split |
|
scrape.py | _all("td")]
district = cols[0]
data_dict = dict(zip(QUARANTINE_HEADER, cols))
data_dict["dist_code"] = district
data_dict["district"] = district_map[district]
csv_data.append(data_dict)
return csv_data
def extract_datewise_active(soup, district):
datalist = []
# assumes last table is the datewise table
table = soup.find_all("table")[-1]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
data_dict = dict(zip(ACTIVE_HEADER, cols))
data_dict["date"] = get_date(data_dict["date"])
data_dict["district"] = district
datalist.append(data_dict)
return datalist
def get_active_details_today():
active_data = defaultdict(list)
sess = init_sess()
response = run_req(sess, ACTIVE_REQ_URL, None, HEADERS, "GET")
if not response:
print("response failed for today's active case details")
return
if len(response.content) < 1000:
print(len(response.content), " Actual response not recieved")
return
soup = BeautifulSoup(response.content, "html.parser")
region = soup.find_all(text=re.compile("you have chosen", re.I))[0]
print("Got data for {}".format(region.next_sibling.text))
last_date = soup.find_all(text=re.compile("updated", re.I))[0].split()[1]
last_date = get_date(last_date)
# assumes second last table is the summary table
table = soup.find_all("table")[-2]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
data_dict = dict(zip(ACTIVE_TODAY_HEADER, cols))
data_dict["date"] = last_date
district = data_dict["district"]
active_data[district].append(data_dict)
# are there 14 district data + total data
assert len(active_data) == 15
# get kerala datewise details table as well
kerala_data = {i["date"]: i for i in extract_datewise_active(soup, "KERALA")}
return (kerala_data, active_data, last_date)
def get_bulk_active_details():
active_data = {}
time.sleep(5)
for code, district in district_code_map.items():
# new session for each district data
sess = init_sess()
print("Processing: {}".format(district))
payload = {"district": code}
response = run_req(sess, ACTIVE_REQ_URL, payload, HEADERS, "POST")
if not response:
print("response failed for district {}".format(district))
break
if len(response.content) < 1000:
print(len(response.content), " Actual response not recieved")
break
soup = BeautifulSoup(response.content, "html.parser")
region = soup.find_all(text=re.compile("you have chosen", re.I))[0]
print("Got data for {}".format(region.next_sibling.text))
# assumes last table is the datewise table
active_data[district] = extract_datewise_active(soup, district)
time.sleep(5)
return active_data
def get_testing_details():
testing_data = {}
sess = init_sess()
response = run_req(sess, TESTING_REQ_URL, None, HEADERS, "GET")
if not response:
print("response failed for today's active case details")
return
if len(response.content) < 1000:
print(len(response.content), " Actual response not recieved")
return
print("Processing testing data")
soup = BeautifulSoup(response.content, "html.parser")
case_report = soup.find_all(text=re.compile("Daily Case Reports from", re.I))[0]
table = case_report.parent.parent.parent.find_all("table")[0]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
data_dict = dict(zip(TESTING_HEADER, cols))
test_date = get_date(data_dict["date"])
data_dict["date"] = test_date
testing_data[test_date] = data_dict
return testing_data
def active_detail_pivot(active_data, get_only_curr=False):
pivot_data = defaultdict(dict)
for district, values in active_data.items():
count = {"district": district, "confirmed": 0, "deaths": 0, "recovered": 0}
for value in values:
v_date = value["date"]
for key in ["confirmed", "deaths", "recovered"]:
count[key] += getint(value[key])
count["active"] = getint(value["active"])
pivot_data[v_date][district] = copy.deepcopy(count)
# set total if get_only_curr is False
if not get_only_curr:
for d, dist_dict in pivot_data.items():
total = {
"confirmed": 0,
"deaths": 0,
"recovered": 0,
"active": 0,
}
for district, value in dist_dict.items():
for key in total.keys():
total[key] += getint(value[key])
total["district"] = "TOTAL"
dist_dict["TOTAL"] = total
return pivot_data
def edit_data_index(date_list, totals_data, testing_data, kerala_data):
di_data = None
# write test_data into testdatajson
with open(TEST_DATA_JSON, "w") as test_json:
td = {}
for d, v in testing_data.items():
strdate = "{}-{}-{}".format(d.day, d.month, d.year)
td[strdate] = {key: val for key, val in v.items() if key != "date"}
json.dump(td, test_json)
print("Wrote testing data to: {}".format(TEST_DATA_JSON))
with open(DATA_INDEX_JSON, "r") as json_file:
di_data = json.load(json_file)
# redundant code for updating testing details on dataIndex for every run
print("updating testing in dataIndex")
for d, v in testing_data.items():
datestr = "{}-{}-{}".format(d.day, d.month, d.year)
try:
testing_entry = di_data["daily_bulletin"][datestr]
except KeyError:
testing_entry = {}
testing_day = v
testing_entry["sample_sent"] = getint(testing_day["total_sent"])
testing_entry["sample_sent_today"] = getint(testing_day["sent_on_date"])
di_data["daily_bulletin"][datestr] = testing_entry
print("updating other details in dataIndex")
for d in date_list:
datestr = "{}-{}-{}".format(d.day, d.month, d.year)
try:
entry = di_data["daily_bulletin"][datestr]
except KeyError:
entry = {}
total_day = totals_data[d]
try:
testing_day = testing_data[d]
except KeyError:
print("data not available for {}".format(d))
available = False
prevday = d
while not available:
prevday -= timedelta(days=1)
print("trying for {}".format(prevday))
# if data isn't available for current day, show previous day data
try:
testing_day = testing_data[prevday]
except KeyError:
print("not available for {}".format(prevday))
else:
available = True
kd = kerala_data[d]
entry["total_active"] = getint(total_day["active"])
entry["total_positive"] = getint(total_day["confirmed"])
entry["deaths"] = getint(total_day["deaths"])
entry["positive_today"] = getint(kd["confirmed"])
if "sample_sent" not in entry:
entry["sample_sent"] = getint(testing_day["total_sent"])
entry["sample_sent_today"] = getint(testing_day["sent_on_date"])
entry["total_passengers"] = 0
filename = "data_{}_{}_{}.csv".format(d.day, d.month, d.year)
entry["file"] = filename
di_data["daily_bulletin"][datestr] = entry
with open(DATA_INDEX_JSON, "w") as json_file:
json.dump(di_data, json_file)
print("Wrote index data to: {}".format(DATA_INDEX_JSON))
with open(DATA_INDEX_JS, "w") as js_file:
write_str = "var dataIndex = " + str(di_data)
js_file.write(write_str)
print("Wrote index data to: {}".format(DATA_INDEX_JS))
def get_data_for_date(dates=[], get_only_curr=False):
| kd, active_today, updated_date = get_active_details_today()
if not get_only_curr:
active_data = get_bulk_active_details()
else:
assert dates[0] == updated_date, "Date mismatch. Site updated till {}".format(
updated_date
)
active_data = active_today
time.sleep(2)
testing_data = get_testing_details()
time.sleep(3)
active_data_pivot = active_detail_pivot(active_data, get_only_curr)
totals_data = {}
for d in dates:
qar_data = get_quarantine_details(d)
active = active_data_pivot[d]
if not active:
print("Active details empty for date: {}".format(d))
continue
csv_data = [] | identifier_body |
|
scrape.py | _TODAY_HEADER = ["district"]
ACTIVE_TODAY_HEADER.extend(CASE_HEADER)
CSV_HEADER = copy.deepcopy(QUARANTINE_HEADER)
CSV_HEADER.extend(CASE_HEADER)
TESTING_HEADER = [
"date",
"total_sent",
"sent_on_date",
"processed_in_one_day",
"total_positive",
"new_positive",
]
TEST_DATA_JSON = "./testData.json"
DATA_INDEX_JSON = "./dataIndexJSON.json"
DATA_INDEX_JS = "./dataIndex.js"
INIT_URL = "https://dashboard.kerala.gov.in"
QAR_REQ_URL = "https://dashboard.kerala.gov.in/quarantined-datewise.php"
ACTIVE_REQ_URL = (
"https://dashboard.kerala.gov.in/dailyreporting-view-public-districtwise.php"
)
TESTING_REQ_URL = "https://dashboard.kerala.gov.in/testing-view-public.php"
DATE_REQ_STRING = "rep_date3"
HEADERS = {
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Site": "same-origin",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-User": "?1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36",
"Origin": "https://dashboard.kerala.gov.in",
"Referer": "https://dashboard.kerala.gov.in/quarantined-datewise.php",
}
def get_date(datearg):
dsplit = [int(i) for i in re.split(r"[-/]\s*", datearg)]
return date(day=dsplit[0], month=dsplit[1], year=dsplit[2])
def getint(string):
try:
return int(string)
except (TypeError, ValueError):
return int("".join(filter(str.isdigit, string)))
def csv_writer(filepath, data):
with open(filepath, "w") as f:
w = csv.DictWriter(f, fieldnames=set(CSV_HEADER))
w.writeheader()
w.writerows(data)
def init_sess():
try:
# warm up the cookies
sess = requests.Session()
sess.get(INIT_URL, headers=HEADERS)
except requests.exceptions.RequestException as e:
print(e)
else:
return sess
def run_req(sess, url, data, headers, method="GET"):
kwargs = {}
if method == "GET":
req_func = sess.get
elif method == "POST":
req_func = sess.post
kwargs["data"] = data
kwargs["headers"] = headers
try:
response = req_func(url, **kwargs)
except requests.exceptions.RequestException as e:
print(e)
else:
return response
def get_quarantine_details(date):
sess = init_sess()
date_str = date.strftime("%d/%m/%Y")
payload = {DATE_REQ_STRING: date_str, "lw": "View"}
csv_data = []
response = run_req(sess, QAR_REQ_URL, payload, HEADERS, "POST")
if not response:
return
soup = BeautifulSoup(response.content, "html.parser")
table = soup.find_all("table")[0]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
district = cols[0]
data_dict = dict(zip(QUARANTINE_HEADER, cols))
data_dict["dist_code"] = district
data_dict["district"] = district_map[district]
csv_data.append(data_dict)
return csv_data
def extract_datewise_active(soup, district):
datalist = []
# assumes last table is the datewise table
table = soup.find_all("table")[-1]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
data_dict = dict(zip(ACTIVE_HEADER, cols))
data_dict["date"] = get_date(data_dict["date"])
data_dict["district"] = district
datalist.append(data_dict)
return datalist
def get_active_details_today():
active_data = defaultdict(list)
sess = init_sess()
response = run_req(sess, ACTIVE_REQ_URL, None, HEADERS, "GET")
if not response:
print("response failed for today's active case details")
return
if len(response.content) < 1000:
print(len(response.content), " Actual response not recieved")
return
soup = BeautifulSoup(response.content, "html.parser")
region = soup.find_all(text=re.compile("you have chosen", re.I))[0]
print("Got data for {}".format(region.next_sibling.text))
last_date = soup.find_all(text=re.compile("updated", re.I))[0].split()[1]
last_date = get_date(last_date)
# assumes second last table is the summary table
table = soup.find_all("table")[-2]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
data_dict = dict(zip(ACTIVE_TODAY_HEADER, cols))
data_dict["date"] = last_date
district = data_dict["district"]
active_data[district].append(data_dict)
# are there 14 district data + total data
assert len(active_data) == 15
# get kerala datewise details table as well
kerala_data = {i["date"]: i for i in extract_datewise_active(soup, "KERALA")}
return (kerala_data, active_data, last_date)
def get_bulk_active_details():
active_data = {}
time.sleep(5)
for code, district in district_code_map.items():
# new session for each district data
sess = init_sess()
print("Processing: {}".format(district))
payload = {"district": code}
response = run_req(sess, ACTIVE_REQ_URL, payload, HEADERS, "POST")
if not response:
print("response failed for district {}".format(district))
break
if len(response.content) < 1000:
print(len(response.content), " Actual response not recieved")
break
soup = BeautifulSoup(response.content, "html.parser")
region = soup.find_all(text=re.compile("you have chosen", re.I))[0]
print("Got data for {}".format(region.next_sibling.text))
# assumes last table is the datewise table
active_data[district] = extract_datewise_active(soup, district)
time.sleep(5)
return active_data
def get_testing_details():
testing_data = {}
sess = init_sess()
response = run_req(sess, TESTING_REQ_URL, None, HEADERS, "GET")
if not response:
|
if len(response.content) < 1000:
print(len(response.content), " Actual response not recieved")
return
print("Processing testing data")
soup = BeautifulSoup(response.content, "html.parser")
case_report = soup.find_all(text=re.compile("Daily Case Reports from", re.I))[0]
table = case_report.parent.parent.parent.find_all("table")[0]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
data_dict = dict(zip(TESTING_HEADER, cols))
test_date = get_date(data_dict["date"])
data_dict["date"] = test_date
testing_data[test_date] = data_dict
return testing_data
def active_detail_pivot(active_data, get_only_curr=False):
pivot_data = defaultdict(dict)
for district, values in active_data.items():
count = {"district": district, "confirmed": 0, "deaths": 0, "recovered": 0}
for value in values:
v_date = value["date"]
for key in ["confirmed", "deaths", "recovered"]:
count[key] += getint(value[key])
count["active"] = getint(value["active"])
pivot_data[v_date][district] = copy.deepcopy(count)
# set total if get_only_curr is False
if not get_only_curr:
for d, dist_dict in pivot_data.items():
total = {
"confirmed": 0,
"deaths": 0,
"recovered": 0,
"active": 0,
}
for district, value in dist_dict.items():
for key in total.keys():
total[key] += getint(value[key])
total["district"] = "TOTAL"
dist_dict["TOTAL"] = total
return pivot_data
def edit_data_index(date_list, totals_data, testing_data, kerala_data):
di_data = None
# write test_data into testdatajson
with open(TEST_DATA_JSON, "w") as test_json:
td = {}
for d, v in testing_data.items():
strdate = "{}-{}-{}".format(d.day, d.month, d.year)
td[strdate] = {key: val for key, val in v.items() if key != "date"}
json.dump(td, test_json)
print("Wrote testing data to: {}".format(TEST_DATA_JSON))
with open(DATA_INDEX_JSON, "r") as json_file:
di_data = json.load(json_file)
| print("response failed for today's active case details")
return | conditional_block |
vck.py | their pixel-by-pixel AND, OR or
# XOR as appropriate.
def AND(*args): return boolean(lambda a,b:a&b, args)
def OR(*args): return boolean(lambda a,b:a|b, args)
def XOR(*args): return boolean(lambda a,b:a^b, args)
def NOT(bmp):
"""Take a bitmap and return its negative (obtained by swopping white
and black at each pixel)."""
maxX, maxY = size = bmp.size()
result = bitmap(size)
for x in range(maxX):
for y in range(maxY):
result.set(x,y, not bmp.get(x,y))
return result
def randomBitmap(size):
"""Take a size (2-tuple of x and y) and return a bitmap of that size
filled with random pixels. WARNING! THE CODE HERE IS ONLY FOR
DEMONSTRATION PURPOSES, SINCE IT CALLS THE STANDARD PYTHON RANDOM
NUMBER GENERATOR, which is fine for statistics but not good enough for
crypto. For real use, substitute this with really random data from an
external source, or at least with a properly seeded cryptographically
strong RNG."""
b = bitmap(size)
xmax, ymax = size
for x in xrange(xmax):
for y in xrange(ymax):
b.set(x, y, random.randint(0,1))
return b
class _viewer:
"""A toplevel window with a canvas."""
def __init__(self, root, width, height, title="Unnamed VCK image"):
self.__width = width
self.__height = height
self._t = Tkinter.Toplevel(root)
Tkinter.Wm.title(self._t, title)
self._c = Tkinter.Canvas(self._t, width=width, height=height,
border=0, highlightthickness=0,
background="White")
self._c.pack()
self._t.update()
def psprint(self, filename):
"""Write a postscript representation of the canvas to the specified
file."""
# The portrait A4 page is, in mm, WxH=210x297. Let's have a safety
# margin of 7mm all around it, and the usable area becomes 196x283.
W = 196.0
H = 283.0
x1, y1, x2, y2 = self._c.bbox("all")
options = {
"pageanchor": "sw",
"x": "%fp" % x1,
"y": "%fp" % y1,
"height": "%fp" % (y2-y1),
"width": "%fp" % (x2-x1),
"pagex": "0",
"pagey": "0",
"file": filename,
"colormode": "mono",
}
# ??? I think I'm doing all this viewport math sensibly, BUT I
# still get a weird asymmetric margin around the thing, and I
# haven't got a clue how to get rid of it.
yscale = (y2-y1) / H
xscale = (x2-x1) / W
# The direction with the greater scaling factor is the limiting one
if xscale > yscale:
options["pagewidth"] = "%fm" % W
else:
options["pageheight"] ="%fm" % H
self._c.update()
apply(self._c.postscript, (), options)
def canvas(self):
"""Return the canvas."""
return self._c
def __del__(self):
self._t.destroy()
class _bitmapViewer(_viewer):
"""A viewer for bitmaps."""
def __init__(self, root, image, title="Unnamed VCK image"):
width, height = image.size
_viewer.__init__(self, root, width, height, title)
self.__photo = ImageTk.BitmapImage(
image, background="Black", foreground="White")
self._c.create_image(0, 0, anchor=Tkinter.NW, image=self.__photo)
self._t.update()
def encrypt(rawPlaintext, rawPad = None):
"""Take a plaintext bitmap and, optionally, a supposedly random pad of
the same size (one will be made up on the spot if not supplied). Return
a 2-tuple containing the large pixelcoded versions of ciphertext and
pad."""
# The raw versions are the same size as the original rawPlaintext
if not rawPad:
rawPad = randomBitmap(rawPlaintext.size())
rawCiphertext = XOR(rawPlaintext, rawPad)
# The final versions are linearly twice as big due to pixelcoding
ciphertext = rawCiphertext.pixelcode()
pad = rawPad.pixelcode()
return ciphertext, pad
def decrypt(ciphertext, pad):
"""Actually the decription ought to be performed without a computer
(the whole point of visual cryptography), by just superimposing the
transparencies of the ciphertext and pad. This is a simulation of this
process."""
return OR(ciphertext, pad)
def mainApp(function):
"""Execute the supplied function. The function may create new windows
by calling bitmap.view() or by making instances of viewer, but it must
return a list of any such windows it makes. The point of this wrapper
is merely to shield the caller away from the quirks of initialising
Tkinter, running its main loop and ensuring that windows don't
disappear unexpectedly."""
root = Tkinter.Tk()
quit = Tkinter.Button(root, text="Quit", command=root.quit)
quit.pack()
Tkinter.Wm.title(root, "VCK main")
windows = function(root)
root.update()
root.mainloop()
# --------------------------------------------------------------
# Analog (greyscale) version
class | (_viewer):
"""A toplevel window with a canvas, suitable for viewing a moonfield."""
R = 9 # default radius
def __init__(self, root, mf, title="Unnamed moonfield", radius=R):
"""Precondition: the moonfield mf must be filled."""
xmax, ymax = mf.size()
_viewer.__init__(self, root, xmax*2*radius, ymax*2*radius, title)
mf.renderOnCanvas(self._c, radius)
self._t.update()
class photoViewer(_viewer):
"""A viewer for greyscale images."""
def __init__(self, root, image, title="Unnamed VCK image"):
width, height = image.size
_viewer.__init__(self, root, width, height, title)
self.__photo = ImageTk.PhotoImage(
image)
self._c.create_image(0, 0, anchor=Tkinter.NW, image=self.__photo)
self._t.update()
class moonfield:
"""A 2d array of angles. Items in the array are indexed by integers in
0..xmax, 0..ymax, with 0,0 being the NW corner. Each angle specifies
the phase (rotation) of a black halfmoon around its centre (determined
by its place in the array) and is represented by an integer in the
range 0..509"""
# Why that strange range? Well, since we are going to use two rotated
# halfmoons to display a luminosity, and since the luminosity of the
# gap between the two halfmoons ranges from 255 (white) when they're 0
# radians apart (i.e. superimposed, leaving a half-moon of white) to 0
# (black) when they're pi radians apart (i.e. non-overlapping, covering
# the whole disc with black), this means that there are 255 discrete
# steps in pi (not 256, because the 256th step is already "the first of
# the other half"), and 2*255 in 2*pi. So the integers in a moonfield
# range from 0 to 2*255-1 = 509. And we use arithmetic modulo 510 on
# them.
discretePi = 255
mod = discretePi*2
i2d = 360.0 / mod # integer to degree conversion factor
def __init__(self, size, filler=None):
"""Make a moonfield of the specified size. If a filler function is
specified, fill it with it, otherwise leave the data
uninitialised."""
self.__data = {}
self.__xmax, self.__ymax = size
if filler:
self.fill(filler)
def size(self):
"""Return a 2-tuple with the dimensions of the moonfield."""
return self.__xmax, self.__ymax
def fill(self, filler):
"""Take a function f(x,y) that accepts a position in the moonfield
and returns an integer value. Fill every cell in the moonfield with
the value returned by the filler (taken modulo mod)."""
for x in range(self.__xmax):
for y in range(self.__ymax):
self.__data[(x,y)] = filler(x,y) % self.mod
def randomFill(self, | moonfieldViewer | identifier_name |
vck.py | postscript representation of the canvas to the specified
file."""
# The portrait A4 page is, in mm, WxH=210x297. Let's have a safety
# margin of 7mm all around it, and the usable area becomes 196x283.
W = 196.0
H = 283.0
x1, y1, x2, y2 = self._c.bbox("all")
options = {
"pageanchor": "sw",
"x": "%fp" % x1,
"y": "%fp" % y1,
"height": "%fp" % (y2-y1),
"width": "%fp" % (x2-x1),
"pagex": "0",
"pagey": "0",
"file": filename,
"colormode": "mono",
}
# ??? I think I'm doing all this viewport math sensibly, BUT I
# still get a weird asymmetric margin around the thing, and I
# haven't got a clue how to get rid of it.
yscale = (y2-y1) / H
xscale = (x2-x1) / W
# The direction with the greater scaling factor is the limiting one
if xscale > yscale:
options["pagewidth"] = "%fm" % W
else:
options["pageheight"] ="%fm" % H
self._c.update()
apply(self._c.postscript, (), options)
def canvas(self):
"""Return the canvas."""
return self._c
def __del__(self):
self._t.destroy()
class _bitmapViewer(_viewer):
"""A viewer for bitmaps."""
def __init__(self, root, image, title="Unnamed VCK image"):
width, height = image.size
_viewer.__init__(self, root, width, height, title)
self.__photo = ImageTk.BitmapImage(
image, background="Black", foreground="White")
self._c.create_image(0, 0, anchor=Tkinter.NW, image=self.__photo)
self._t.update()
def encrypt(rawPlaintext, rawPad = None):
"""Take a plaintext bitmap and, optionally, a supposedly random pad of
the same size (one will be made up on the spot if not supplied). Return
a 2-tuple containing the large pixelcoded versions of ciphertext and
pad."""
# The raw versions are the same size as the original rawPlaintext
if not rawPad:
rawPad = randomBitmap(rawPlaintext.size())
rawCiphertext = XOR(rawPlaintext, rawPad)
# The final versions are linearly twice as big due to pixelcoding
ciphertext = rawCiphertext.pixelcode()
pad = rawPad.pixelcode()
return ciphertext, pad
def decrypt(ciphertext, pad):
"""Actually the decription ought to be performed without a computer
(the whole point of visual cryptography), by just superimposing the
transparencies of the ciphertext and pad. This is a simulation of this
process."""
return OR(ciphertext, pad)
def mainApp(function):
"""Execute the supplied function. The function may create new windows
by calling bitmap.view() or by making instances of viewer, but it must
return a list of any such windows it makes. The point of this wrapper
is merely to shield the caller away from the quirks of initialising
Tkinter, running its main loop and ensuring that windows don't
disappear unexpectedly."""
root = Tkinter.Tk()
quit = Tkinter.Button(root, text="Quit", command=root.quit)
quit.pack()
Tkinter.Wm.title(root, "VCK main")
windows = function(root)
root.update()
root.mainloop()
# --------------------------------------------------------------
# Analog (greyscale) version
class moonfieldViewer(_viewer):
"""A toplevel window with a canvas, suitable for viewing a moonfield."""
R = 9 # default radius
def __init__(self, root, mf, title="Unnamed moonfield", radius=R):
"""Precondition: the moonfield mf must be filled."""
xmax, ymax = mf.size()
_viewer.__init__(self, root, xmax*2*radius, ymax*2*radius, title)
mf.renderOnCanvas(self._c, radius)
self._t.update()
class photoViewer(_viewer):
"""A viewer for greyscale images."""
def __init__(self, root, image, title="Unnamed VCK image"):
width, height = image.size
_viewer.__init__(self, root, width, height, title)
self.__photo = ImageTk.PhotoImage(
image)
self._c.create_image(0, 0, anchor=Tkinter.NW, image=self.__photo)
self._t.update()
class moonfield:
"""A 2d array of angles. Items in the array are indexed by integers in
0..xmax, 0..ymax, with 0,0 being the NW corner. Each angle specifies
the phase (rotation) of a black halfmoon around its centre (determined
by its place in the array) and is represented by an integer in the
range 0..509"""
# Why that strange range? Well, since we are going to use two rotated
# halfmoons to display a luminosity, and since the luminosity of the
# gap between the two halfmoons ranges from 255 (white) when they're 0
# radians apart (i.e. superimposed, leaving a half-moon of white) to 0
# (black) when they're pi radians apart (i.e. non-overlapping, covering
# the whole disc with black), this means that there are 255 discrete
# steps in pi (not 256, because the 256th step is already "the first of
# the other half"), and 2*255 in 2*pi. So the integers in a moonfield
# range from 0 to 2*255-1 = 509. And we use arithmetic modulo 510 on
# them.
discretePi = 255
mod = discretePi*2
i2d = 360.0 / mod # integer to degree conversion factor
def __init__(self, size, filler=None):
"""Make a moonfield of the specified size. If a filler function is
specified, fill it with it, otherwise leave the data
uninitialised."""
self.__data = {}
self.__xmax, self.__ymax = size
if filler:
self.fill(filler)
def size(self):
"""Return a 2-tuple with the dimensions of the moonfield."""
return self.__xmax, self.__ymax
def fill(self, filler):
"""Take a function f(x,y) that accepts a position in the moonfield
and returns an integer value. Fill every cell in the moonfield with
the value returned by the filler (taken modulo mod)."""
for x in range(self.__xmax):
for y in range(self.__ymax):
self.__data[(x,y)] = filler(x,y) % self.mod
def randomFill(self, low=0, high=mod-1):
"""Fill the moonfield with random values in the range min..max
inclusive. WARNING: NOT GOOD FOR REAL CRYPTO USE. Use a
cryptographically strong RNG instead of the library's unless you're
just playing around."""
def randomFiller(x,y, low=low, high=high):
return random.randint(low, high)
self.fill(randomFiller)
def imageComplement(self, img):
"""Precondition: self must have been filled already. Take a
greyscale image (PIL type "L"), which must have the same size as
self. Return a new moonfield such that, if that new moonfield and
the current one were superimposed, one would "see" the supplied
image. NB: if the supplied image parameter is a string, an attempt
is made to open the file of that name."""
if type(img) == type(""):
img = Image.open(img).convert("L")
assert self.size() == img.size
result = moonfield(size=(self.__xmax, self.__ymax))
def filler(x,y,i=img, d=self.__data, pi=self.discretePi, m=self.mod):
return (d[(x,y)] - (pi - i.getpixel((x,y)))) % m
result.fill(filler)
return result
def renderOnCanvas(self, canvas, radius=moonfieldViewer.R):
"""Take a canvas and render the moonfield on it. The radius of the
halfmoons must be specified in canvas units."""
for x in range(self.__xmax):
for y in range(self.__ymax):
# Make the halfmoon at x,y | canvas.create_arc(
radius*2*x, radius*2*y, radius*2*(x+1)-1, radius*2*(y+1)-1,
start = self.__data[(x,y)] * self.i2d, extent = 180.0, | random_line_split |
|
vck.py |
return result
# Doc string for the following three functions:
# Take an arbitrary number (>=1) of bitmap arguments, all of the same size,
# and return another bitmap resulting from their pixel-by-pixel AND, OR or
# XOR as appropriate.
def AND(*args): return boolean(lambda a,b:a&b, args)
def OR(*args): return boolean(lambda a,b:a|b, args)
def XOR(*args): return boolean(lambda a,b:a^b, args)
def NOT(bmp):
"""Take a bitmap and return its negative (obtained by swopping white
and black at each pixel)."""
maxX, maxY = size = bmp.size()
result = bitmap(size)
for x in range(maxX):
for y in range(maxY):
result.set(x,y, not bmp.get(x,y))
return result
def randomBitmap(size):
"""Take a size (2-tuple of x and y) and return a bitmap of that size
filled with random pixels. WARNING! THE CODE HERE IS ONLY FOR
DEMONSTRATION PURPOSES, SINCE IT CALLS THE STANDARD PYTHON RANDOM
NUMBER GENERATOR, which is fine for statistics but not good enough for
crypto. For real use, substitute this with really random data from an
external source, or at least with a properly seeded cryptographically
strong RNG."""
b = bitmap(size)
xmax, ymax = size
for x in xrange(xmax):
for y in xrange(ymax):
b.set(x, y, random.randint(0,1))
return b
class _viewer:
"""A toplevel window with a canvas."""
def __init__(self, root, width, height, title="Unnamed VCK image"):
self.__width = width
self.__height = height
self._t = Tkinter.Toplevel(root)
Tkinter.Wm.title(self._t, title)
self._c = Tkinter.Canvas(self._t, width=width, height=height,
border=0, highlightthickness=0,
background="White")
self._c.pack()
self._t.update()
def psprint(self, filename):
"""Write a postscript representation of the canvas to the specified
file."""
# The portrait A4 page is, in mm, WxH=210x297. Let's have a safety
# margin of 7mm all around it, and the usable area becomes 196x283.
W = 196.0
H = 283.0
x1, y1, x2, y2 = self._c.bbox("all")
options = {
"pageanchor": "sw",
"x": "%fp" % x1,
"y": "%fp" % y1,
"height": "%fp" % (y2-y1),
"width": "%fp" % (x2-x1),
"pagex": "0",
"pagey": "0",
"file": filename,
"colormode": "mono",
}
# ??? I think I'm doing all this viewport math sensibly, BUT I
# still get a weird asymmetric margin around the thing, and I
# haven't got a clue how to get rid of it.
yscale = (y2-y1) / H
xscale = (x2-x1) / W
# The direction with the greater scaling factor is the limiting one
if xscale > yscale:
options["pagewidth"] = "%fm" % W
else:
options["pageheight"] ="%fm" % H
self._c.update()
apply(self._c.postscript, (), options)
def canvas(self):
"""Return the canvas."""
return self._c
def __del__(self):
self._t.destroy()
class _bitmapViewer(_viewer):
"""A viewer for bitmaps."""
def __init__(self, root, image, title="Unnamed VCK image"):
width, height = image.size
_viewer.__init__(self, root, width, height, title)
self.__photo = ImageTk.BitmapImage(
image, background="Black", foreground="White")
self._c.create_image(0, 0, anchor=Tkinter.NW, image=self.__photo)
self._t.update()
def encrypt(rawPlaintext, rawPad = None):
"""Take a plaintext bitmap and, optionally, a supposedly random pad of
the same size (one will be made up on the spot if not supplied). Return
a 2-tuple containing the large pixelcoded versions of ciphertext and
pad."""
# The raw versions are the same size as the original rawPlaintext
if not rawPad:
rawPad = randomBitmap(rawPlaintext.size())
rawCiphertext = XOR(rawPlaintext, rawPad)
# The final versions are linearly twice as big due to pixelcoding
ciphertext = rawCiphertext.pixelcode()
pad = rawPad.pixelcode()
return ciphertext, pad
def decrypt(ciphertext, pad):
"""Actually the decription ought to be performed without a computer
(the whole point of visual cryptography), by just superimposing the
transparencies of the ciphertext and pad. This is a simulation of this
process."""
return OR(ciphertext, pad)
def mainApp(function):
"""Execute the supplied function. The function may create new windows
by calling bitmap.view() or by making instances of viewer, but it must
return a list of any such windows it makes. The point of this wrapper
is merely to shield the caller away from the quirks of initialising
Tkinter, running its main loop and ensuring that windows don't
disappear unexpectedly."""
root = Tkinter.Tk()
quit = Tkinter.Button(root, text="Quit", command=root.quit)
quit.pack()
Tkinter.Wm.title(root, "VCK main")
windows = function(root)
root.update()
root.mainloop()
# --------------------------------------------------------------
# Analog (greyscale) version
class moonfieldViewer(_viewer):
"""A toplevel window with a canvas, suitable for viewing a moonfield."""
R = 9 # default radius
def __init__(self, root, mf, title="Unnamed moonfield", radius=R):
"""Precondition: the moonfield mf must be filled."""
xmax, ymax = mf.size()
_viewer.__init__(self, root, xmax*2*radius, ymax*2*radius, title)
mf.renderOnCanvas(self._c, radius)
self._t.update()
class photoViewer(_viewer):
"""A viewer for greyscale images."""
def __init__(self, root, image, title="Unnamed VCK image"):
width, height = image.size
_viewer.__init__(self, root, width, height, title)
self.__photo = ImageTk.PhotoImage(
image)
self._c.create_image(0, 0, anchor=Tkinter.NW, image=self.__photo)
self._t.update()
class moonfield:
"""A 2d array of angles. Items in the array are indexed by integers in
0..xmax, 0..ymax, with 0,0 being the NW corner. Each angle specifies
the phase (rotation) of a black halfmoon around its centre (determined
by its place in the array) and is represented by an integer in the
range 0..509"""
# Why that strange range? Well, since we are going to use two rotated
# halfmoons to display a luminosity, and since the luminosity of the
# gap between the two halfmoons ranges from 255 (white) when they're 0
# radians apart (i.e. superimposed, leaving a half-moon of white) to 0
# (black) when they're pi radians apart (i.e. non-overlapping, covering
# the whole disc with black), this means that there are 255 discrete
# steps in pi (not 256, because the 256th step is already "the first of
# the other half"), and 2*255 in 2*pi. So the integers in a moonfield
# range from 0 to 2*255-1 = 509. And we use arithmetic modulo 510 on
# them.
discretePi = 255
mod = discretePi*2
i2d = 360.0 / mod # integer to degree conversion factor
def __init__(self, size, filler=None):
"""Make a moonfield of the specified size. If a filler function is
specified, fill it with it, otherwise leave the data
uninitialised."""
self.__data = {}
self.__xmax, self.__ymax = size
if filler:
self.fill(filler)
def size(self):
"""Return a 2-tuple with the dimensions of the moonfield."""
return self.__xmax, self.__ymax
def fill(self, filler | for y in range(maxY):
pixel = bitmaps[0].get(x,y)
for b in bitmaps[1:]:
pixel = apply(operation, (pixel, b.get(x,y)))
result.set(x,y,pixel) | conditional_block |
|
vck.py | their pixel-by-pixel AND, OR or
# XOR as appropriate.
def AND(*args): return boolean(lambda a,b:a&b, args)
def OR(*args): return boolean(lambda a,b:a|b, args)
def XOR(*args): return boolean(lambda a,b:a^b, args)
def NOT(bmp):
"""Take a bitmap and return its negative (obtained by swopping white
and black at each pixel)."""
maxX, maxY = size = bmp.size()
result = bitmap(size)
for x in range(maxX):
for y in range(maxY):
result.set(x,y, not bmp.get(x,y))
return result
def randomBitmap(size):
"""Take a size (2-tuple of x and y) and return a bitmap of that size
filled with random pixels. WARNING! THE CODE HERE IS ONLY FOR
DEMONSTRATION PURPOSES, SINCE IT CALLS THE STANDARD PYTHON RANDOM
NUMBER GENERATOR, which is fine for statistics but not good enough for
crypto. For real use, substitute this with really random data from an
external source, or at least with a properly seeded cryptographically
strong RNG."""
b = bitmap(size)
xmax, ymax = size
for x in xrange(xmax):
for y in xrange(ymax):
b.set(x, y, random.randint(0,1))
return b
class _viewer:
"""A toplevel window with a canvas."""
def __init__(self, root, width, height, title="Unnamed VCK image"):
self.__width = width
self.__height = height
self._t = Tkinter.Toplevel(root)
Tkinter.Wm.title(self._t, title)
self._c = Tkinter.Canvas(self._t, width=width, height=height,
border=0, highlightthickness=0,
background="White")
self._c.pack()
self._t.update()
def psprint(self, filename):
"""Write a postscript representation of the canvas to the specified
file."""
# The portrait A4 page is, in mm, WxH=210x297. Let's have a safety
# margin of 7mm all around it, and the usable area becomes 196x283.
W = 196.0
H = 283.0
x1, y1, x2, y2 = self._c.bbox("all")
options = {
"pageanchor": "sw",
"x": "%fp" % x1,
"y": "%fp" % y1,
"height": "%fp" % (y2-y1),
"width": "%fp" % (x2-x1),
"pagex": "0",
"pagey": "0",
"file": filename,
"colormode": "mono",
}
# ??? I think I'm doing all this viewport math sensibly, BUT I
# still get a weird asymmetric margin around the thing, and I
# haven't got a clue how to get rid of it.
yscale = (y2-y1) / H
xscale = (x2-x1) / W
# The direction with the greater scaling factor is the limiting one
if xscale > yscale:
options["pagewidth"] = "%fm" % W
else:
options["pageheight"] ="%fm" % H
self._c.update()
apply(self._c.postscript, (), options)
def canvas(self):
"""Return the canvas."""
return self._c
def __del__(self):
self._t.destroy()
class _bitmapViewer(_viewer):
"""A viewer for bitmaps."""
def __init__(self, root, image, title="Unnamed VCK image"):
width, height = image.size
_viewer.__init__(self, root, width, height, title)
self.__photo = ImageTk.BitmapImage(
image, background="Black", foreground="White")
self._c.create_image(0, 0, anchor=Tkinter.NW, image=self.__photo)
self._t.update()
def encrypt(rawPlaintext, rawPad = None):
"""Take a plaintext bitmap and, optionally, a supposedly random pad of
the same size (one will be made up on the spot if not supplied). Return
a 2-tuple containing the large pixelcoded versions of ciphertext and
pad."""
# The raw versions are the same size as the original rawPlaintext
if not rawPad:
rawPad = randomBitmap(rawPlaintext.size())
rawCiphertext = XOR(rawPlaintext, rawPad)
# The final versions are linearly twice as big due to pixelcoding
ciphertext = rawCiphertext.pixelcode()
pad = rawPad.pixelcode()
return ciphertext, pad
def decrypt(ciphertext, pad):
"""Actually the decription ought to be performed without a computer
(the whole point of visual cryptography), by just superimposing the
transparencies of the ciphertext and pad. This is a simulation of this
process."""
return OR(ciphertext, pad)
def mainApp(function):
"""Execute the supplied function. The function may create new windows
by calling bitmap.view() or by making instances of viewer, but it must
return a list of any such windows it makes. The point of this wrapper
is merely to shield the caller away from the quirks of initialising
Tkinter, running its main loop and ensuring that windows don't
disappear unexpectedly."""
root = Tkinter.Tk()
quit = Tkinter.Button(root, text="Quit", command=root.quit)
quit.pack()
Tkinter.Wm.title(root, "VCK main")
windows = function(root)
root.update()
root.mainloop()
# --------------------------------------------------------------
# Analog (greyscale) version
class moonfieldViewer(_viewer):
"""A toplevel window with a canvas, suitable for viewing a moonfield."""
R = 9 # default radius
def __init__(self, root, mf, title="Unnamed moonfield", radius=R):
"""Precondition: the moonfield mf must be filled."""
xmax, ymax = mf.size()
_viewer.__init__(self, root, xmax*2*radius, ymax*2*radius, title)
mf.renderOnCanvas(self._c, radius)
self._t.update()
class photoViewer(_viewer):
"""A viewer for greyscale images."""
def __init__(self, root, image, title="Unnamed VCK image"):
width, height = image.size
_viewer.__init__(self, root, width, height, title)
self.__photo = ImageTk.PhotoImage(
image)
self._c.create_image(0, 0, anchor=Tkinter.NW, image=self.__photo)
self._t.update()
class moonfield:
|
def __init__(self, size, filler=None):
"""Make a moonfield of the specified size. If a filler function is
specified, fill it with it, otherwise leave the data
uninitialised."""
self.__data = {}
self.__xmax, self.__ymax = size
if filler:
self.fill(filler)
def size(self):
"""Return a 2-tuple with the dimensions of the moonfield."""
return self.__xmax, self.__ymax
def fill(self, filler):
"""Take a function f(x,y) that accepts a position in the moonfield
and returns an integer value. Fill every cell in the moonfield with
the value returned by the filler (taken modulo mod)."""
for x in range(self.__xmax):
for y in range(self.__ymax):
self.__data[(x,y)] = filler(x,y) % self.mod
def randomFill(self, low | """A 2d array of angles. Items in the array are indexed by integers in
0..xmax, 0..ymax, with 0,0 being the NW corner. Each angle specifies
the phase (rotation) of a black halfmoon around its centre (determined
by its place in the array) and is represented by an integer in the
range 0..509"""
# Why that strange range? Well, since we are going to use two rotated
# halfmoons to display a luminosity, and since the luminosity of the
# gap between the two halfmoons ranges from 255 (white) when they're 0
# radians apart (i.e. superimposed, leaving a half-moon of white) to 0
# (black) when they're pi radians apart (i.e. non-overlapping, covering
# the whole disc with black), this means that there are 255 discrete
# steps in pi (not 256, because the 256th step is already "the first of
# the other half"), and 2*255 in 2*pi. So the integers in a moonfield
# range from 0 to 2*255-1 = 509. And we use arithmetic modulo 510 on
# them.
discretePi = 255
mod = discretePi*2
i2d = 360.0 / mod # integer to degree conversion factor | identifier_body |
lib.rs | } }
/**
Forces the parser to interpret this macro's argument as an item, even in the presence of `tt` substitutions.
See [TLBoRM: AST Coercion](https://danielkeep.github.io/tlborm/book/blk-ast-coercion.html).
## Examples
```rust
# #[macro_use(as_item, tlborm_util)] extern crate tlborm;
macro_rules! enoom {
($name:ident { $($body:tt)* }) => {
as_item! {
// The `tt` substitution breaks regular parsing.
enum $name { $($body)* }
}
}
}
enoom! {
Dash { Solid, Dash, Dot }
}
# fn main() {}
```
*/
#[macro_export]
macro_rules! as_item { ($i:item) => {$i} }
/**
Forces the parser to interpret this macro's argument as a pattern, even in the presence of `tt` substitutions.
See [TLBoRM: AST Coercion](https://danielkeep.github.io/tlborm/book/blk-ast-coercion.html).
## Examples
```rust
# #[macro_use(as_pat, tlborm_util)] extern crate tlborm;
# fn main() {
macro_rules! tuple_pat {
($($names:tt)*) => {
// The `tt` substitution breaks regular parsing.
as_pat!( ( $($names,)* ) )
}
}
match (1, 2, 3) {
tuple_pat!(a b c) => assert_eq!((a, b, c), (1, 2, 3))
}
# }
```
*/
#[macro_export]
macro_rules! as_pat { ($p:pat) => {$p} }
/**
Forces the parser to interpret this macro's argument as a statement, even in the presence of `tt` substitutions.
See [TLBoRM: AST Coercion](https://danielkeep.github.io/tlborm/book/blk-ast-coercion.html).
## Examples
```rust
# #[macro_use(as_stmt, tlborm_util)] extern crate tlborm;
# fn main() {
macro_rules! let_stmt {
($name:tt = $($init:tt)*) => {
// The `tt` substitution breaks regular parsing.
as_stmt!(let $name = $($init)*);
}
}
let_stmt!(x = 42);
assert_eq!(x, 42);
# }
```
*/
#[macro_export]
macro_rules! as_stmt { ($s:stmt) => {$s} }
/**
Expands to the number of identifiers provided. The expansion is suitable for use in a constant expression, and is of type `u32`.
The identifiers provided **must** be mutually unique; *i.e.* there cannot be any repeated identifiers. In addition, the identifier `__CountIdentsLast` **must not** be used in the invocation. This macro should be usable for even very large numbers of identifiers.
See [TLBoRM: Counting (Enum counting)](https://danielkeep.github.io/tlborm/book/blk-counting.html#enum-counting).
## Examples
```rust
# #[macro_use(count_idents_enum, tlborm_util)] extern crate tlborm;
# fn main() {
const NUM: u32 = count_idents_enum!(Silly swingers get your feeling under spell);
assert_eq!(NUM, 7);
# }
*/
#[macro_export]
macro_rules! count_idents_enum {
($($idents:ident)*) => {tlborm_util!(@count_idents_enum $($idents)*)};
}
/**
Expands to the number of token trees provided. The expansion is suitable for use in a constant expression, and is of type `usize`.
This macro is limited to input of approximately 500 tokens, but efficiently expands in a single pass. This makes it useful in recursion-limited contexts, or when you want fast expansion of small inputs.
See [TLBoRM: Counting (Repetition with replacement)](https://danielkeep.github.io/tlborm/book/blk-counting.html#repetition-with-replacement).
## Examples
```rust
# #[macro_use(count_tts_flat, tlborm_util)] extern crate tlborm;
# fn main() {
const NUM: usize = count_tts_flat!(Everybody's rhythm mad (and I love that rhythm too!));
assert_eq!(NUM, 5);
# }
*/
#[macro_export]
macro_rules! count_tts_flat {
($($tts:tt)*) => {tlborm_util!(@count_tts_flat $($tts)*)};
}
/**
Expands to the number of token trees provided. The expansion is suitable for use in a constant expression, and is of type `usize`.
This macro is limited to input of approximately 1,200 tokens, but requires multiple recursive expansion passes. This macro is useful when you need to count a large number of things *and* you need the result to be a compile-time constant.
See [TLBoRM: Counting (Recursion)](https://danielkeep.github.io/tlborm/book/blk-counting.html#recursion).
## Examples
```rust
# #[macro_use(count_tts_recur, tlborm_util)] extern crate tlborm;
# fn main() {
const NUM: usize = count_tts_recur!(De l'enfer au paradis!);
assert_eq!(NUM, 6);
# }
*/
#[macro_export]
macro_rules! count_tts_recur {
($($tts:tt)*) => {tlborm_util!(@count_tts_recur $($tts)*)};
}
/**
Expands to the number of token trees provided. The expansion is **not** suitable for use in a constant expression, though it should be optimised to a simple integer constant in release builds.
This macro is has no practical limit (and has been tested to over 10,000 tokens).
See [TLBoRM: Counting (Slice length)](https://danielkeep.github.io/tlborm/book/blk-counting.html#slice-length).
## Examples
```rust
# #[macro_use(count_tts_slice, tlborm_util)] extern crate tlborm;
# fn main() {
let num = count_tts_slice!(You have no idea how tedious this is! #examplesrhard);
assert_eq!(num, 11);
# }
*/
#[macro_export]
macro_rules! count_tts_slice {
($($tts:tt)*) => {tlborm_util!(@count_tts_slice $($tts)*)};
}
/**
Expands to an invocation of the `$callback` macro, with a list of the unitary variant names of the provided enum separated by commas. The invocation's argument will be prefixed by the contents of `$arg`.
If `$arg` is of the form `{…}`, then the expansion will be parsed as one or more items. If it is of the form `(…)`, the expansion will be parsed as an expression.
See [TLBoRM: Enum Parsing](https://danielkeep.github.io/tlborm/book/blk-enum-parsing.html).
## Examples
```rust
# #[macro_use(parse_unitary_variants, tlborm_util)] extern crate tlborm;
# fn main() {
macro_rules! variant_list {
(sep: $sep:tt, ($($var:ident),*)) => {
concat!($(stringify!($var), $sep,)*)
}
}
const LIST: &'static str = parse_unitary_variants!(
enum Currency { Trenni, Phiring, Ryut, FakeMarinne, Faram, SoManyCoins }
=> variant_list(sep: ", ", )
);
assert_eq!(LIST, "Trenni, Phiring, Ryut, FakeMarinne, Faram, SoManyCoins, ");
# }
*/
#[macro_export]
macro_rules! parse_unitary_variants {
(
enum $name:ident {$($body:tt)*} => $callback:ident $arg:tt
) => {
tlborm_util! {
@parse_unitary_variants
enum $name {$($body)*} => $callback $arg
}
};
}
|
This is typically used to replace elements of an arbitrary token sequence with some fixed expression.
See [TLBoRM: Repetition replacement](https://danielkeep.github.io/tlborm/book/pat-repetition-replacement.html).
## Examples
```rust
# #[macro_use(replace_expr, tlborm_util)] extern crate tlborm;
# fn main() {
macro_rules! tts_to_zeroes {
($($tts:tt)*) => {
[$(replace_expr!($tts 0)),*]
}
}
assert_eq!(tts_to_zeroes!(pub const unsafe impl), [0, 0, 0, 0]);
# }
```
*/
#[macro_export]
macro_rules! replace_expr {
($_t:tt $sub:expr) => {tlborm_util!(@replace_expr $_t $sub)};
}
#[doc(hidden)]
#[macro_export]
macro_rules! tlborm_util {
(@as_expr $e:expr) => {$e};
(@as_item $($i:item)+) => {$($i)+};
(@as_pat $p:pat) => {$p};
(@as_stmt $s:stmt) => {$s};
(@count_idents_enum $($idents:ident)*) => {
{
#[allow(dead_code, non_camel_case_types)]
enum Idents { $($ | /**
Utility macro that takes a token tree and an expression, expanding to the expression. | random_line_split |
athena_cli.py | csv.QUOTE_ALL)
if self.format == 'CSV_HEADER':
csv_writer.writerow(headers)
csv_writer.writerows([[text.encode("utf-8") for text in row] for row in self.athena.yield_rows(results, headers)])
elif self.format == 'TSV':
print(tabulate([row for row in self.athena.yield_rows(results, headers)], tablefmt='tsv'))
elif self.format == 'TSV_HEADER':
print(tabulate([row for row in self.athena.yield_rows(results, headers)], headers=headers, tablefmt='tsv'))
elif self.format == 'VERTICAL':
for num, row in enumerate(self.athena.yield_rows(results, headers)):
print('--[RECORD {}]--'.format(num+1))
print(tabulate(zip(*[headers, row]), tablefmt='presto'))
else: # ALIGNED
print(tabulate([x for x in self.athena.yield_rows(results, headers)], headers=headers, tablefmt='presto'))
if status == 'FAILED':
print(stats['QueryExecution']['Status']['StateChangeReason'])
try:
del cmd.Cmd.do_show # "show" is an Athena command
except AttributeError:
# "show" was removed from Cmd2 0.8.0
pass
class AthenaShell(cmd.Cmd, object):
multilineCommands = ['WITH', 'SELECT', 'ALTER', 'CREATE', 'DESCRIBE', 'DROP', 'MSCK', 'SHOW', 'USE', 'VALUES']
allow_cli_args = False
def __init__(self, athena, db=None):
cmd.Cmd.__init__(self)
self.athena = athena
self.dbname = db
self.execution_id = None
self.row_count = 0
self.set_prompt()
self.pager = os.environ.get('ATHENA_CLI_PAGER', LESS).split(' ')
self.hist_file = os.path.join(os.path.expanduser("~"), ".athena_history")
self.init_history()
def set_prompt(self):
self.prompt = 'athena:%s> ' % self.dbname if self.dbname else 'athena> '
def cmdloop_with_cancel(self, intro=None):
try:
self.cmdloop(intro)
except KeyboardInterrupt:
if self.execution_id:
self.athena.stop_query_execution(self.execution_id)
print('\n\n%s' % self.athena.console_link(self.execution_id))
print('\nQuery aborted by user')
else:
print('\r')
self.cmdloop_with_cancel(intro)
def preloop(self):
if os.path.exists(self.hist_file):
readline.read_history_file(self.hist_file)
def postloop(self):
self.save_history()
def init_history(self):
try:
readline.read_history_file(self.hist_file)
readline.set_history_length(HISTORY_FILE_SIZE)
readline.write_history_file(self.hist_file)
except IOError:
readline.write_history_file(self.hist_file)
atexit.register(self.save_history)
def save_history(self):
try:
readline.write_history_file(self.hist_file)
except IOError:
pass
def do_help(self, arg):
help_output = """
Supported commands:
QUIT
SELECT
ALTER DATABASE <schema>
ALTER TABLE <table>
CREATE DATABASE <schema>
CREATE TABLE <table>
DESCRIBE <table>
DROP DATABASE <schema>
DROP TABLE <table>
MSCK REPAIR TABLE <table>
SHOW COLUMNS FROM <table>
SHOW CREATE TABLE <table>
SHOW DATABASES [LIKE <pattern>]
SHOW PARTITIONS <table>
SHOW TABLES [IN <schema>] [<pattern>]
SHOW TBLPROPERTIES <table>
USE [<catalog>.]<schema>
VALUES row [, ...]
See http://docs.aws.amazon.com/athena/latest/ug/language-reference.html
"""
print(help_output)
def do_quit(self, arg):
print()
return -1
def do_EOF(self, arg):
return self.do_quit(arg)
def do_use(self, schema):
self.dbname = schema.rstrip(';')
self.set_prompt()
def do_set(self, arg):
try:
statement, param_name, val = arg.parsed.raw.split(None, 2)
val = val.strip()
param_name = param_name.strip().lower()
if param_name == 'debug':
self.athena.debug = cmd.cast(True, val)
except (ValueError, AttributeError):
self.do_show(arg)
super(AthenaShell, self).do_set(arg)
def default(self, line):
self.execution_id = self.athena.start_query_execution(self.dbname, line.full_parsed_statement())
if not self.execution_id:
return
while True:
stats = self.athena.get_query_execution(self.execution_id)
status = stats['QueryExecution']['Status']['State']
status_line = 'Query {0}, {1:9}'.format(self.execution_id, status)
sys.stdout.write('\r' + status_line)
sys.stdout.flush()
if status in ['SUCCEEDED', 'FAILED', 'CANCELLED']:
break
time.sleep(0.2) # 200ms
sys.stdout.write('\r' + ' ' * len(status_line) + '\r') # delete query status line
sys.stdout.flush()
if status == 'SUCCEEDED':
results = self.athena.get_query_results(self.execution_id)
headers = [h['Name'] for h in results['ResultSet']['ResultSetMetadata']['ColumnInfo']]
row_count = len(results['ResultSet']['Rows'])
if headers and len(results['ResultSet']['Rows']) and results['ResultSet']['Rows'][0]['Data'][0].get('VarCharValue', None) == headers[0]:
row_count -= 1 # don't count header
process = subprocess.Popen(self.pager, stdin=subprocess.PIPE)
process.stdin.write(tabulate([x for x in self.athena.yield_rows(results, headers)], headers=headers, tablefmt='presto').encode('utf-8'))
process.communicate()
print('(%s rows)\n' % row_count)
print('Query {0}, {1}'.format(self.execution_id, status))
if status == 'FAILED':
print(stats['QueryExecution']['Status']['StateChangeReason'])
print(self.athena.console_link(self.execution_id))
submission_date = stats['QueryExecution']['Status']['SubmissionDateTime']
completion_date = stats['QueryExecution']['Status']['CompletionDateTime']
execution_time = stats['QueryExecution']['Statistics']['EngineExecutionTimeInMillis']
data_scanned = stats['QueryExecution']['Statistics']['DataScannedInBytes']
query_cost = data_scanned / 1000000000000.0 * 5.0
print('Time: {}, CPU Time: {}ms total, Data Scanned: {}, Cost: ${:,.2f}\n'.format(
str(completion_date - submission_date).split('.')[0],
execution_time,
human_readable(data_scanned),
query_cost
))
class Athena(object):
def __init__(self, profile, region=None, bucket=None, debug=False, encryption=False):
self.session = boto3.Session(profile_name=profile, region_name=region)
self.athena = self.session.client('athena')
self.region = region or os.environ.get('AWS_DEFAULT_REGION', None) or self.session.region_name
self.bucket = bucket or self.default_bucket
self.debug = debug
self.encryption = encryption
@property
def default_bucket(self):
account_id = self.session.client('sts').get_caller_identity().get('Account')
return 's3://{}-query-results-{}-{}'.format(self.session.profile_name or 'aws-athena', account_id, self.region)
def start_query_execution(self, db, query):
try: | }
if self.encryption:
result_configuration['EncryptionConfiguration'] = {
'EncryptionOption': 'SSE_S3'
}
return self.athena.start_query_execution(
QueryString=query,
ClientRequestToken=str(uuid.uuid4()),
QueryExecutionContext={
'Database': db
},
ResultConfiguration=result_configuration
)['QueryExecutionId']
except (ClientError, ParamValidationError, ValueError) as e:
print(e)
return
def get_query_execution(self, execution_id):
try:
return self.athena.get_query_execution(
QueryExecutionId=execution_id
)
except ClientError as e:
print(e)
def get_query_results(self, execution_id):
try:
results = None
paginator = self.athena.get_paginator('get_query_results')
page_iterator = paginator.paginate(
QueryExecutionId=execution_id
)
for page in page_iterator:
if results is None:
results = page
else:
results['ResultSet']['Rows'].extend(page['ResultSet']['Rows'])
except ClientError as e:
sys.exit(e)
if self.debug:
print(json.dumps(results, indent=2))
return results
def stop_query_execution(self, execution_id):
try:
return self.athena.stop_query_execution(
QueryExecutionId=execution_id
)
except ClientError as e:
sys.exit(e)
@staticmethod
def yield_rows(results, headers):
for row in results['ResultSet']['Rows']:
# https:// | if not db:
raise ValueError('Schema must be specified when session schema is not set')
result_configuration = {
'OutputLocation': self.bucket, | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.