file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
main.rs |
/// easily remove all the neighbors of a vertex from a state very efficiently.
neighbors: Vec<BitSet>,
/// For each vertex 'i', the value of 'weight[i]' denotes the weight associated
/// to vertex i in the problem instance. The goal of MISP is to select the nodes
/// from the underlying graph such that the resulting set is an independent set
/// where the sum of the weights of selected vertices is maximum.
weight: Vec<isize>,
}
/// A constant to mean take the node in the independent set.
const YES: isize = 1;
/// A constant to mean leave the node out of the independent set.
const NO: isize = 0;
/// The Misp class implements the 'Problem' trait. This means Misp is the definition
/// of the DP model. That DP model is pretty straightforward, still you might want
/// to check the implementation of the branching heuristic (next_variable method)
/// since it does interesting stuffs.
impl Problem for Misp {
type State = BitSet;
fn nb_variables(&self) -> usize {
self.nb_vars
}
fn initial_state(&self) -> Self::State {
(0..self.nb_variables()).collect()
}
fn initial_value(&self) -> isize {
0
}
fn transition(&self, state: &Self::State, decision: Decision) -> Self::State {
let mut res = state.clone();
res.remove(decision.variable.id());
if decision.value == YES {
// intersect with complement of the neighbors for fast set difference
res.intersect_with(&self.neighbors[decision.variable.id()]);
}
res
}
fn transition_cost(&self, _: &Self::State, decision: Decision) -> isize {
if decision.value == NO {
0
} else {
self.weight[decision.variable.id()]
}
}
fn | (&self, variable: Variable, state: &Self::State, f: &mut dyn DecisionCallback) {
if state.contains(variable.id()) {
f.apply(Decision{variable, value: YES});
f.apply(Decision{variable, value: NO });
} else {
f.apply(Decision{variable, value: NO });
}
}
/// This method is (apparently) a bit more hairy. What it does is it simply decides to branch on
/// the variable that occurs in the least number of states present in the next layer. The intuition
/// here is to limit the max width as much as possible when developing the layers since all
/// nodes that are not impacted by the change on the selectd vertex are simply copied over to the
/// next layer.
fn next_variable(&self, _: usize, next_layer: &mut dyn Iterator<Item = &Self::State>) -> Option<Variable> {
// The thread local stuff is possibly one of the most surprising bits of this code. It declares
// a static variable called VAR_HEURISTIC storing the counts of each vertex in the next layer.
// The fact that it is static means that it will not be re-created (re allocated) upon each
// pass. The fact that it is declared within a thread local block, means that this static var
// will be created with a potentially mutable access for each thread.
thread_local! {
static VAR_HEURISTIC: RefCell<Vec<usize>> = RefCell::new(vec![]);
}
VAR_HEURISTIC.with(|heu| {
let mut heu = heu.borrow_mut();
let heu: &mut Vec<usize> = heu.as_mut();
// initialize
heu.reserve_exact(self.nb_variables());
if heu.is_empty() {
for _ in 0..self.nb_variables() { heu.push(0); }
} else {
heu.iter_mut().for_each(|i| *i = 0);
}
// count the occurrence of each var
for s in next_layer {
for sit in s.iter() {
heu[sit] += 1;
}
}
// take the one occurring the least often
heu.iter().copied().enumerate()
.filter(|(_, v)| *v > 0)
.min_by_key(|(_, v)| *v)
.map(|(x, _)| Variable(x))
})
}
fn is_impacted_by(&self, var: Variable, state: &Self::State) -> bool {
state.contains(var.id())
}
}
/// In addition to a dynamic programming (DP) model of the problem you want to solve,
/// the branch and bound with MDD algorithm (and thus ddo) requires that you provide
/// an additional relaxation allowing to control the maximum amount of space used by
/// the decision diagrams that are compiled.
///
/// That relaxation requires two operations: one to merge several nodes into one
/// merged node that acts as an over approximation of the other nodes. The second
/// operation is used to possibly offset some weight that would otherwise be lost
/// to the arcs entering the newly created merged node.
///
/// The role of this very simple structure is simply to provide an implementation
/// of that relaxation.
///
/// # Note:
/// In addition to the aforementioned two operations, the MispRelax structure implements
/// an optional `fast_upper_bound` method. Which one provides a useful bound to
/// prune some portions of the state-space as the decision diagrams are compiled.
/// (aka rough upper bound pruning).
pub struct MispRelax<'a>{pb: &'a Misp}
impl Relaxation for MispRelax<'_> {
type State = BitSet;
fn merge(&self, states: &mut dyn Iterator<Item = &Self::State>) -> Self::State {
let mut state = BitSet::with_capacity(self.pb.nb_variables());
for s in states {
state.union_with(s);
}
state
}
fn relax(
&self,
_source: &Self::State,
_dest: &Self::State,
_new: &Self::State,
_decision: Decision,
cost: isize,
) -> isize {
cost
}
fn fast_upper_bound(&self, state: &Self::State) -> isize {
state.iter().map(|x| self.pb.weight[x]).sum()
}
}
/// The last bit of information which we need to provide when implementing a ddo-based
/// solver is a `StateRanking`. This is an heuristic which is used to select the most
/// and least promising nodes as a means to only delete/merge the *least* promising nodes
/// when compiling restricted and relaxed DDs.
pub struct MispRanking;
impl StateRanking for MispRanking {
type State = BitSet;
fn compare(&self, a: &Self::State, b: &Self::State) -> std::cmp::Ordering {
a.len().cmp(&b.len())
.then_with(|| a.cmp(b))
}
}
// #########################################################################################
// # THE INFORMATION BEYOND THIS LINE IS NOT DIRECTLY RELATED TO THE IMPLEMENTATION OF #
// # A SOLVER BASED ON DDO. INSTEAD, THAT PORTION OF THE CODE CONTAINS GENERIC FUNCTION #
// # THAT ARE USED TO READ AN INSTANCE FROM FILE, PROCESS COMMAND LINE ARGUMENTS, AND #
// # THE MAIN FUNCTION. THESE ARE THUS NOT REQUIRED 'PER-SE', BUT I BELIEVE IT IS USEFUL #
// # TO SHOW HOW IT CAN BE DONE IN AN EXAMPLE. #
// #########################################################################################
/// This structure uses `clap-derive` annotations and define the arguments that can
/// be passed on to the executable solver.
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// The path to the instance file
fname: String,
/// The number of concurrent threads
#[clap(short, long, default_value = "8")]
threads: usize,
/// The maximum amount of time you would like this solver to run
#[clap(short, long)]
duration: Option<u64>,
/// The maximum number of nodes per layer
#[clap(short, long)]
width: Option<usize>,
}
/// This enumeration simply groups the kind of errors that might occur when parsing a
/// misp instance from file. There can be io errors (file unavailable ?), format error
/// (e.g. the file is not an instance but contains the text of your next paper),
/// or parse int errors (which are actually a variant of the format error since it tells
/// you that the parser expected an integer number but got ... something else).
#[derive(Debug, thiserror::Error)]
enum Error {
/// There was an io related error
#[error("io error {0}")]
Io(#[from] std::io::Error),
/// The parser expected to read something that was an integer but got some garbage
#[error("parse int {0}")]
ParseInt(#[from] ParseIntError),
/// The file was not properly formatted.
#[error("ill formed instance")]
Format,
}
/// This function is used to read a misp instance from file. It returns either a
/// misp instance if everything went on well or an error describing the problem.
fn read_instance<P: AsRef<Path>>(fname: P) -> Result<Misp, Error> {
let f = File::open(fname)?;
let f = BufReader::new(f);
let comment = Regex::new | for_each_in_domain | identifier_name |
main.rs | .", | }
}
writeln_report!();
writeln_report!(r"その上で$a \leqq c, c > 0$となるような$a, c$を求める.");
writeln_report!(r"\begin{{itemize}}");
// 条件を満たす a, c を求める.
let mut res = Vec::new();
for b in bs {
let do_report = b >= 0;
if do_report {
writeln_report!(
r"\item $b = {}{}$のとき \\",
if b != 0 { r"\pm " } else { "" },
b
);
}
let ac4 = b * b - disc;
if ac4 % 4 != 0 {
if do_report {
writeln_report!(r"$4ac = {}$となり,これは整数解を持たない.", ac4);
}
continue;
}
let ac = ac4 / 4;
if do_report {
writeln_report!(r"$4ac = {}$より$ac = {}$.", ac4, ac);
write_report!(r"よって$(a, c) = $");
}
let mut first = true;
for a in -ac..=ac {
if a == 0 || ac % a != 0 {
continue;
}
let c = ac / a;
if a <= c && c > 0 {
if do_report {
write_report!("{}$({}, {})$", if first { "" } else { ", " }, a, c);
first = false;
}
res.push((a, b, c));
}
}
if do_report {
writeln_report!(".");
}
}
writeln_report!(r"\end{{itemize}}");
res.sort();
res.dedup();
res.sort_by_key(|&(a, b, c)| (a.abs(), b.abs(), c.abs()));
writeln_report!(r"以上により,ここまでの条件を満たす$(a, b, c)$の組は");
writeln_report!(r"$(a, b, c) = $ ${:?}$.", res.iter().format("$, $"));
// 条件 (B) をチェックする
fn cond(&(a, b, c): &(i64, i64, i64)) -> bool {
writeln_report!(r"\item $(a, b, c) = ({}, {}, {})$のとき \\", a, b, c);
let g = gcd(gcd(a, b), c);
if g != 1 {
writeln_report!("最大公約数が${}$となるので不適.", g);
return false;
}
let left = -a < b && b <= a && a < c;
let right = 0 <= b && b <= a && a == c;
if left {
writeln_report!(
r"これは左側の不等式${} < {} \leqq {} < {}$を満たす.",
-a,
b,
a,
c
);
return true;
}
if right {
writeln_report!(
r"これは右側の不等式$0 \leqq {} \leqq {} = {}$満たす.",
b,
a,
c
);
return true;
}
let left_failure = if !(-a < b) {
format!(r"$-a < b$について${} \not< {}$", -a, b)
} else if !(b <= a) {
format!(r"$b \leqq a$について${} \not\leqq {}$", b, a)
} else if !(a < c) {
format!(r"$a < c$について${} \not< {}$", a, c)
} else {
unreachable!()
};
let right_failure = if !(0 <= b) {
format!(r"$0 \leqq b$について${} \not\leqq {}$", 0, b)
} else if !(b <= a) {
format!(r"$b \leqq a$について${} \not\leqq {}$", b, a)
} else if !(a == c) {
format!(r"$a = c$について${} \neq {}$", a, c)
} else {
unreachable!()
};
writeln_report!("この組は左側の不等式では{}であり,右側の不等式では{}であるから,両方の不等式を満たさない.", left_failure, right_failure);
false
}
writeln_report!(r"\begin{{itemize}}");
res.retain(cond);
writeln_report!(r"\end{{itemize}}");
writeln_report!(
"以上により,全ての条件を満たす$(a, b, c)$の組は${:?}$となる.",
res.iter().format("$, $")
);
Ok(res)
}
#[allow(clippy::cognitive_complexity, clippy::nonminimal_bool)]
fn calc_positive(disc: i64) -> Result<Vec<(i64, i64, i64)>, String> {
assert!(disc > 0);
// 条件 (A) を確認する
// b の候補を得る (exclusive))
writeln_report!("まず,条件を満たす$b$の候補を計算する.$b$の範囲は");
let minb = {
let sqrt = (disc as f64).sqrt();
// 本来は d = 1 以外で int になることはないのであまり考える必要はない。
let is_int = is_int(sqrt);
// 1.3 -> -2, -2.9 -> -3, 4.0 -> -4 としたい。
let minb = if is_int { -sqrt.round() } else { -sqrt.ceil() } as i64;
writeln_report!(
r"\[ 0 > b > -\sqrt{{ {disc} }} {op} {minb}. \]",
disc = disc,
op = if is_int { "=" } else { ">" },
minb = if is_int { minb - 1 } else { minb },
);
minb
};
writeln_report!(
r"$4ac = b^2 - {}$より$b$は{}であるから,",
disc,
if disc % 2 == 0 { "偶数" } else { "奇数" }
);
let bs = ((minb + 1)..0).filter(|x| x.abs() % 2 == disc % 2);
if bs.clone().collect_vec().is_empty() {
writeln_report!(r"条件を満たす$b$はない.");
return Err("no cands".to_string());
}
writeln_report!(r"条件を満たす$b$は$b = $ ${}$.", bs.clone().format("$, $"));
// a, c を求める
writeln_report!();
writeln_report!("その上で$a > 0, c < 0$となる$a, c$を求める.");
let mut res = Vec::new();
writeln_report!(r"\begin{{itemize}}");
for b in bs {
writeln_report!(r"\item $b = {}$のとき \\", b);
let ac4 = b * b - disc;
if ac4 % 4 != 0 {
writeln_report!("$4ac = {}$となり,これは整数解を持たない.", ac4);
continue;
}
let ac = ac4 / 4;
writeln_report!("$4ac = {}$より$ac = {}$.", ac4, ac);
write_report!("よって$(a, c) = $");
let mut first = true;
for a in 0..=-ac {
if a == 0 || ac % a != 0 {
continue;
}
let c = ac / a;
assert!(c < 0);
write_report!("{}$({}, {})$", if first { "" } else { ", " }, a, c);
first = false;
res.push((a, b, c));
}
writeln_report!(".");
}
writeln_report!(r"\end{{itemize}}");
writeln_report!(r"以上により,ここまでの条件を満たす$(a, b, c)$の組は");
writeln_report!(r"$(a, b, c) = $ ${:?}$.", res.iter().format("$, $"));
// 条件 (B) を確認する
fn cond(&(a, b, c): &(i64, i64, i64)) -> bool {
writeln_report!(r"\item $(a, b, c) = ({}, {}, {})$のとき \\", a, b, c);
let g = gcd(gcd(a, b), c);
if g != 1 {
writeln_report!("最大公約数が${}$となるので不適.", g);
return false;
}
let left = a + b + c < 0;
let leftopnot = if ! | if has_zero { "0$, " } else { "$" },
nonzero.format(r"$, $\pm ")
); | random_line_split |
main.rs | .",
if has_zero { "0$, " } else { "$" },
nonzero.format(r"$, $\pm ")
);
}
}
writeln_report!();
writeln_report!(r"その上で$a \leqq c, c > 0$となるような$a, c$を求める.");
writeln_report!(r"\begin{{itemize}}");
// 条件を満たす a, c を求める.
let mut res = Vec::new();
for b in bs {
let do_report = b >= 0;
if do_report {
writeln_report!(
r"\item $b = {}{}$のとき \\",
if b != 0 { r"\pm " } else { "" },
b
);
}
let ac4 = b * b - disc;
if ac4 % 4 != 0 {
if do_report {
writeln_report!(r"$4ac = {}$となり,これは整数解を持たない.", ac4);
}
continue;
}
let ac = ac4 / 4;
if do_report {
writeln_report!(r"$4ac = {}$より$ac = {}$.", ac4, ac);
write_report!(r"よって$(a, c) = $");
}
let mut first = true;
for a in -ac..=ac {
if a == 0 || ac % a != 0 {
continue;
}
let c = ac / a;
if a <= c && c > 0 {
if do_report {
write_report!("{}$({}, {})$", if first { "" } else { ", " }, a, c);
first = false;
}
res.push((a, b, c));
}
}
if do_report {
writeln_report!(".");
}
}
writeln_report!(r"\end{{itemize}}");
res.sort();
res.dedup();
res.sort_by_key(|&(a, b, c)| (a.abs(), b.abs(), c.abs()));
writeln_report!(r"以上により,ここまでの条件を満たす$(a, b, c)$の組は");
writeln_report!(r"$(a, b, c) = $ ${:?}$.", res.iter().format("$, $"));
// 条件 (B) をチェックする
fn cond(&(a, b, c): &(i64, i64, i64)) -> bool {
writeln_report!(r"\item $(a, b, c) = ({}, {}, {})$のとき \\", a, b, c);
let g = gcd(gcd(a, b), c);
if g != 1 {
writeln_report!("最大公約数が${}$となるので不適.", g);
return false;
}
let left = -a < b && b <= a && a < c;
let right = 0 <= b && b <= a && a == c;
if left {
writeln_report!(
r"これは左側の不等式${} < {} \leqq {} < {}$を満たす.",
-a,
b,
a,
c
);
return true;
}
if right {
| a,
c
);
return true;
}
let left_failure = if !(-a < b) {
format!(r"$-a < b$について${} \not< {}$", -a, b)
} else if !(b <= a) {
format!(r"$b \leqq a$について${} \not\leqq {}$", b, a)
} else if !(a < c) {
format!(r"$a < c$について${} \not< {}$", a, c)
} else {
unreachable!()
};
let right_failure = if !(0 <= b) {
format!(r"$0 \leqq b$について${} \not\leqq {}$", 0, b)
} else if !(b <= a) {
format!(r"$b \leqq a$について${} \not\leqq {}$", b, a)
} else if !(a == c) {
format!(r"$a = c$について${} \neq {}$", a, c)
} else {
unreachable!()
};
writeln_report!("この組は左側の不等式では{}であり,右側の不等式では{}であるから,両方の不等式を満たさない.", left_failure, right_failure);
false
}
writeln_report!(r"\begin{{itemize}}");
res.retain(cond);
writeln_report!(r"\end{{itemize}}");
writeln_report!(
"以上により,全ての条件を満たす$(a, b, c)$の組は${:?}$となる.",
res.iter().format("$, $")
);
Ok(res)
}
#[allow(clippy::cognitive_complexity, clippy::nonminimal_bool)]
fn calc_positive(disc: i64) -> Result<Vec<(i64, i64, i64)>, String> {
assert!(disc > 0);
// 条件 (A) を確認する
// b の候補を得る (exclusive))
writeln_report!("まず,条件を満たす$b$の候補を計算する.$b$の範囲は");
let minb = {
let sqrt = (disc as f64).sqrt();
// 本来は d = 1 以外で int になることはないのであまり考える必要はない。
let is_int = is_int(sqrt);
// 1.3 -> -2, -2.9 -> -3, 4.0 -> -4 としたい。
let minb = if is_int { -sqrt.round() } else { -sqrt.ceil() } as i64;
writeln_report!(
r"\[ 0 > b > -\sqrt{{ {disc} }} {op} {minb}. \]",
disc = disc,
op = if is_int { "=" } else { ">" },
minb = if is_int { minb - 1 } else { minb },
);
minb
};
writeln_report!(
r"$4ac = b^2 - {}$より$b$は{}であるから,",
disc,
if disc % 2 == 0 { "偶数" } else { "奇数" }
);
let bs = ((minb + 1)..0).filter(|x| x.abs() % 2 == disc % 2);
if bs.clone().collect_vec().is_empty() {
writeln_report!(r"条件を満たす$b$はない.");
return Err("no cands".to_string());
}
writeln_report!(r"条件を満たす$b$は$b = $ ${}$.", bs.clone().format("$, $"));
// a, c を求める
writeln_report!();
writeln_report!("その上で$a > 0, c < 0$となる$a, c$を求める.");
let mut res = Vec::new();
writeln_report!(r"\begin{{itemize}}");
for b in bs {
writeln_report!(r"\item $b = {}$のとき \\", b);
let ac4 = b * b - disc;
if ac4 % 4 != 0 {
writeln_report!("$4ac = {}$となり,これは整数解を持たない.", ac4);
continue;
}
let ac = ac4 / 4;
writeln_report!("$4ac = {}$より$ac = {}$.", ac4, ac);
write_report!("よって$(a, c) = $");
let mut first = true;
for a in 0..=-ac {
if a == 0 || ac % a != 0 {
continue;
}
let c = ac / a;
assert!(c < 0);
write_report!("{}$({}, {})$", if first { "" } else { ", " }, a, c);
first = false;
res.push((a, b, c));
}
writeln_report!(".");
}
writeln_report!(r"\end{{itemize}}");
writeln_report!(r"以上により,ここまでの条件を満たす$(a, b, c)$の組は");
writeln_report!(r"$(a, b, c) = $ ${:?}$.", res.iter().format("$, $"));
// 条件 (B) を確認する
fn cond(&(a, b, c): &(i64, i64, i64)) -> bool {
writeln_report!(r"\item $(a, b, c) = ({}, {}, {})$のとき \\", a, b, c);
let g = gcd(gcd(a, b), c);
if g != 1 {
writeln_report!("最大公約数が${}$となるので不適.", g);
return false;
}
let left = a + b + c < 0;
let leftopnot = if | writeln_report!(
r"これは右側の不等式$0 \leqq {} \leqq {} = {}$満たす.",
b,
| conditional_block |
main.rs | b, c) = $ ${:?}$.", res.iter().format("$, $"));
// 条件 (B) を確認する
fn cond(&(a, b, c): &(i64, i64, i64)) -> bool {
writeln_report!(r"\item $(a, b, c) = ({}, {}, {})$のとき \\", a, b, c);
let g = gcd(gcd(a, b), c);
if g != 1 {
writeln_report!("最大公約数が${}$となるので不適.", g);
return false;
}
let left = a + b + c < 0;
let leftopnot = if !left { r"\not" } else { "" };
let leftend = if left {
"を満たす."
} else {
"となるので不適."
};
let right = a - b + c > 0;
let rightopnot = if !right { r"\not" } else { "" };
let rightstart = if left && right {
"また"
} else {
"このとき"
};
let rightend = if right {
"を満たす."
} else {
"となるので不適."
};
if !left || (left && right) {
writeln_report!(
r"このとき$a + b + c = {} {:+} {:+} = {} {}< 0${}",
a,
b,
c,
a + b + c,
leftopnot,
leftend
);
}
if !right || (left && right) {
writeln_report!(
r"{}$a - b + c = {} {:+} {:+} = {} {}> 0${}",
rightstart,
a,
-b,
c,
a - b + c,
rightopnot,
rightend
);
}
left && right
}
writeln_report!(r"\begin{{itemize}}");
res.retain(cond);
writeln_report!(r"\end{{itemize}}");
// 条件 (C) を確認する
let res = remove_same_repeat(disc, &res);
writeln_report!();
writeln_report!(
"以上により,全ての条件を満たす$(a, b, c)$の組は${:?}$となる.",
res.iter().format("$, $")
);
Ok(res)
}
fn remove_same_repeat(disc: i64, cands: &[(i64, i64, i64)]) -> Vec<(i64, i64, i64)> {
writeln_report!("");
writeln_report!("ここまでで得られた$(a, b, c)$の組は,");
writeln_report!(r"${:?}$.", cands.iter().format("$, $"));
writeln_report!(r"これを連分数展開し,循環節が同じものを除く.");
writeln_report!(r"連分数展開の途中に現れた分数を全て除けば良い.");
let cand_fracs = cands
.iter()
.map(|&(a, b, _)| Frac::from_abd(a, b, disc))
.collect_vec();
let map: HashMap<_, _> = cand_fracs
.iter()
.copied()
.zip(cands.iter().copied())
.collect();
let mut notfound: HashSet<_> = map.keys().collect();
let mut res = Vec::new();
for mut frac in cand_fracs {
if !notfound.contains(&frac) {
continue;
}
writeln_report!();
writeln_report!("${:?}$に対応する${}$を連分数展開する.", map[&frac], frac);
res.push(map[&frac]);
notfound.remove(&frac);
let mut obtained = HashSet::new();
while obtained.insert(frac) && !notfound.is_empty() {
write_report!(r"\[ {} = ", frac);
let int = frac.integer_part();
frac = frac.sub_int(int);
write_report!(r"{} + \left({}\right) = ", int, frac);
frac = frac.invert();
writeln_report!(r"{} + \frac{{ 1 }}{{ {} }}. \]", int, frac);
if notfound.contains(&frac) {
writeln_report!(
"${}$は${:?}$に対応するので,${:?}$は除く.",
frac,
map[&frac],
map[&frac]
);
notfound.remove(&frac);
}
}
if !notfound.is_empty() && obtained.contains(&frac) {
writeln_report!(
"ここで${}$は一度現れたので,この連分数はここから循環する.",
frac
);
}
}
res
}
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
struct Frac {
num: i64,
coeff: i64,
root: i64,
denom: i64,
}
impl fmt::Display for Frac {
fn fmt(&self, b: &mut fmt::Formatter) -> fmt::Result {
let coeff = if self.coeff == 1 {
"+".to_string()
} else if self.coeff == -1 {
"-".to_string()
} else {
format!("{:+}", self.coeff)
};
let num = format!(r"{} {}\sqrt{{ {} }}", self.num, coeff, self.root);
let frac = if self.denom == 1 {
num
} else {
format!(r"\frac{{ {} }}{{ {} }}", num, self.denom)
};
write!(b, "{}", frac)
}
}
impl Frac {
pub fn from_abd(a: i64, b: i64, disc: i64) -> Frac {
Frac::new(-b, 1, disc, 2 * a)
}
pub fn new(num: i64, coeff: i64, root: i64, denom: i64) -> Frac {
assert!(root > 0);
let mut f = Frac {
num,
coeff,
root,
denom,
};
f.normalize();
f
}
pub fn normalize(&mut self) {
self.normalize_root();
self.reduce();
if self.denom < 0 {
self.denom *= -1;
self.num *= -1;
self.coeff *= -1;
}
}
pub fn invert(self) -> Frac {
let denom = self.num * self.num - self.coeff * self.coeff * self.root;
let num = self.denom * self.num;
let coeff = -self.denom * self.coeff;
let root = self.root;
let mut res = Frac {
denom,
num,
coeff,
root,
};
res.normalize();
res
}
pub fn integer_part(self) -> i64 {
let num = self.num as f64 + self.coeff as f64 * (self.root as f64).sqrt();
let denom = self.denom as f64;
let float = num / denom;
if is_int(float) {
float.round() as i64
} else {
float.floor() as i64
}
}
pub fn sub_int(mut self, int: i64) -> Frac {
self.num -= int * self.denom;
self.normalize();
self
}
fn normalize_root(&mut self) {
while let Err(d) = has_square_factor(self.root) {
self.root /= d * d;
self.coeff *= d;
}
}
fn reduce(&mut self) {
let g = gcd(gcd(self.num, self.coeff), self.denom);
self.num /= g;
self.coeff /= g;
self.denom /= g;
}
}
#[allow(clippy::collapsible_if)]
fn do_main(d: i64) -> Result<(), String> {
// if d.abs() > 999 {
// return Err(format!("input too large: {}", d));
// }
if d == 0 {
writeln_report!("$d = 0$ のときは考えない.");
return Err("d is zero".to_string());
}
if let Err(f) = has_square_factor(d) {
writeln_report!("$d = {}$は平方因子${}$を持つため,考えない.", d, f);
return Err(format!("{} has square factor: {}", d, f));
}
writeln_report!(r"このとき$d = {}$である.", d);
let disc = discriminant(d);
writeln_report!();
let res = if d < 0 {
calc_negative(disc)?
} else {
calc_positive(disc)?
};
if !MAKE_REPORT.load(Ordering::SeqCst) {
println!("d = {}: {} ({:?})", d, res.len(), res);
}
if MAKE_REPORT.load(Ordering::SeqCst) {
writeln_report!("したがって,$h_K = {}$.", res.len());
writeln_report!();
writeln_report!("イデアル類群の代表元は,");
let mut first = t | rue;
| identifier_name |
|
main.rs | ",
if has_zero { "0$, " } else { "$" },
nonzero.format(r"$, $\pm ")
);
}
}
writeln_report!();
writeln_report!(r"その上で$a \leqq c, c > 0$となるような$a, c$を求める.");
writeln_report!(r"\begin{{itemize}}");
// 条件を満たす a, c を求める.
let mut res = Vec::new();
for b in bs {
let do_report = b >= 0;
if do_report {
writeln_report!(
r"\item $b = {}{}$のとき \\",
if b != 0 { r"\pm " } else { "" },
b
);
}
let ac4 = b * b - disc;
if ac4 % 4 != 0 {
if do_report {
writeln_report!(r"$4ac = {}$となり,これは整数解を持たない.", ac4);
}
continue;
}
let ac = ac4 / 4;
if do_report {
writeln_report!(r"$4ac = {}$より$ac = {}$.", ac4, ac);
write_report!(r"よって$(a, c) = $");
}
let mut first = true;
for a in -ac..=ac {
if a == 0 || ac % a != 0 {
continue;
}
let c = ac / a;
if a <= c && c > 0 {
if do_report {
write_report!("{}$({}, {})$", if first { "" } else { ", " }, a, c);
first = false;
}
res.push((a, b, c));
}
}
if do_report {
writeln_report!(".");
}
}
writeln_report!(r"\end{{itemize}}");
res.sort();
res.dedup();
res.sort_by_key(|&(a, b, c)| (a.abs(), b.abs(), c.abs()));
writeln_report!(r"以上により,ここまでの条件を満たす$(a, b, c)$の組は");
writeln_report!(r"$(a, b, c) = $ ${:?}$.", res.iter().format("$, $"));
// 条件 (B) をチェックする
fn cond(&(a, b, c): &(i64, i64, i64)) -> bool {
writeln_report!(r"\item $(a, b, c) = ({}, {}, {})$のとき \\", a, b, c);
let g = gcd(gcd(a, b), c);
if g != 1 {
writeln_report!("最大公約数が${}$となるので不適.", g);
return false;
}
let left = -a < b && b <= a && a < c;
let right = 0 <= b && b <= a && a == c;
if left {
writeln_report!(
r"これは左側の不等式${} < {} \leqq {} < {}$を満たす.",
-a,
b,
a,
c
);
return true;
}
if right {
writeln_report!(
r"これは右側の不等式$0 \leqq {} \leqq {} = {}$満たす.",
b,
a,
c
);
return true;
}
let left_failure = if !(-a < b) {
format!(r"$-a < b$について${} \not< {}$", -a, b)
} else if !(b <= a) {
format!(r"$b \leqq a$について${} \not\leqq {}$", b, a)
} else if !(a < c) {
format!(r"$a < c$について${} \not< {}$", a, c)
} else {
unreachable!()
};
let right_failure = if !(0 <= b) {
format!(r"$0 \leqq b$について${} \not\leqq {}$", 0, b)
} else if !(b <= a) {
format!(r"$b \leqq a$について${} \not\leqq {}$", b, a)
} else if !(a == c) {
format!(r"$a = c$について${} \neq {}$", a, c)
} else {
unreachable!()
};
writeln_report!("この組は左側の不等式では{}であり,右側の不等式では{}であるから,両方の不等式を満たさない.", left_failure, right_failure);
false
}
writeln_report!(r"\begin{{itemize}}");
res.retain(cond);
writeln_report!(r"\end{{itemize}}");
writeln_report!(
"以上により,全ての条件を満たす$(a, b, c)$の組は${:?}$となる.",
res.iter().format("$, $")
);
Ok(res)
}
#[allow(clippy::cognitive_complexity, clippy::nonminimal_bool)]
fn calc_positive(disc: i64) -> Result<Vec<(i64, i64, i64)>, String> {
assert!(disc > 0);
// 条件 (A) を確認する
// b の候補を得る (exclusive))
writeln_report!("まず,条件を満たす$b$の候補を計算する.$b$の範囲は");
let minb = {
let sqrt = (disc as f64).sqrt();
// 本来は d = 1 以外で int になることはないのであまり考える必要はない。
let is_int = is_int(sqrt);
// 1.3 -> -2, -2.9 -> -3, 4.0 -> -4 としたい。
let minb = if is_int { -sqrt.round() } else { -sqrt.ceil() } as i64;
writeln_report!(
r"\[ 0 > b > -\sqrt{{ {disc} }} {op} {minb}. \]",
disc = disc,
op = if is_int { "=" } else { ">" },
minb = if is_int { minb - 1 } else { minb },
);
minb
};
writeln_report!(
r"$4ac = b^2 - {}$より$ | let ac4 = b * b - disc;
if ac4 % 4 != 0 {
writeln_report!("$4ac = {}$となり,これは整数解を持たない.", ac4);
continue;
}
let ac = ac4 / 4;
writeln_report!("$4ac = {}$より$ac = {}$.", ac4, ac);
write_report!("よって$(a, c) = $");
let mut first = true;
for a in 0..=-ac {
if a == 0 || ac % a != 0 {
continue;
}
let c = ac / a;
assert!(c < 0);
write_report!("{}$({}, {})$", if first { "" } else { ", " }, a, c);
first = false;
res.push((a, b, c));
}
writeln_report!(".");
}
writeln_report!(r"\end{{itemize}}");
writeln_report!(r"以上により,ここまでの条件を満たす$(a, b, c)$の組は");
writeln_report!(r"$(a, b, c) = $ ${:?}$.", res.iter().format("$, $"));
// 条件 (B) を確認する
fn cond(&(a, b, c): &(i64, i64, i64)) -> bool {
writeln_report!(r"\item $(a, b, c) = ({}, {}, {})$のとき \\", a, b, c);
let g = gcd(gcd(a, b), c);
if g != 1 {
writeln_report!("最大公約数が${}$となるので不適.", g);
return false;
}
let left = a + b + c < 0;
let leftopnot = if ! | b$は{}であるから,",
disc,
if disc % 2 == 0 { "偶数" } else { "奇数" }
);
let bs = ((minb + 1)..0).filter(|x| x.abs() % 2 == disc % 2);
if bs.clone().collect_vec().is_empty() {
writeln_report!(r"条件を満たす$b$はない.");
return Err("no cands".to_string());
}
writeln_report!(r"条件を満たす$b$は$b = $ ${}$.", bs.clone().format("$, $"));
// a, c を求める
writeln_report!();
writeln_report!("その上で$a > 0, c < 0$となる$a, c$を求める.");
let mut res = Vec::new();
writeln_report!(r"\begin{{itemize}}");
for b in bs {
writeln_report!(r"\item $b = {}$のとき \\", b); | identifier_body |
csr.rs | -> usize {
self.e
}
pub fn get_offsets(&self) -> &Vec<usize> {
&self.offsets
}
pub fn get_neighbs(&self) -> &[usize] {
&self.neighbs
}
/// Build a random edge list
/// This method returns a tuple of the number of vertices seen and the edge list
/// el.len() is the number of edges.
pub fn random_el(numv: usize, maxe: usize) -> Vec<(usize, usize)> {
let mut rng = rand::thread_rng();
let mut el: Vec<(usize, usize)> = Vec::new();
for i in 0..numv {
/*edges per vertex*/
let num_e: usize = rng.gen_range(0, maxe) as usize;
for _ in 0..num_e {
let edge = (i as usize, rng.gen_range(0, numv) as usize);
el.push(edge);
}
}
el
}
/// Build an edge list from a file containing text describing one.
/// The file format is line oriented and human readable:
/// v0,v1
/// v0,v2
/// v0,v3
/// v0,v3
/// v1,v2
/// v1,v2
/// v2,v3
/// v3,v1
/// ...
///
/// This method returns a tuple of the number of vertices seen and the edge list
/// el.len() is the number of edges.
pub fn el_from_file(path: &str) -> (usize, Vec<(usize, usize)>) {
let mut el: Vec<(usize, usize)> = Vec::new();
let mut maxv: usize = 0;
let f = File::open(path);
match f {
Ok(file) => {
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(file);
for result in rdr.records() {
match result {
Ok(p) => {
let v0 = p.get(0).unwrap().parse::<usize>().unwrap();
let v1 = p.get(1).unwrap().parse::<usize>().unwrap();
if v0 > maxv {
maxv = v0
}
if v1 > maxv {
maxv = v1
}
el.push((v0, v1));
}
_ => {
eprintln!("Failed to parse file");
}
}
}
}
_ => {
eprintln!("Failed to open file {}", path);
}
}
(maxv + 1, el)
}
pub fn new_from_el_mmap(v: usize, f: String) -> CSR {
let path = PathBuf::from(f);
let file = OpenOptions::new()
.read(true)
.open(&path)
.unwrap();
let mmap = unsafe { Mmap::map(&file).unwrap() };
let el = mmap[..]
.as_slice_of::<usize>()
.unwrap();
let mut ncnt = Vec::with_capacity(v);
for _ in 0..v {
ncnt.push(AtomicUsize::new(0));
}
/*Count up the number of neighbors that each vertex has */
el.chunks(2).par_bridge().for_each(|e| {
ncnt[ e[0] ].fetch_add(1, Ordering::SeqCst);
});
let mut work_offsets = Vec::with_capacity(v);
work_offsets.push(AtomicUsize::new(0));
let mut g = CSR {
v: v,
e: el.chunks(2).len(),
vtxprop: vec![0f64; v],
offsets: vec![0; v],
neighbs: vec![0; el.chunks(2).len()],
};
/* CSR Structure e.g.,
|0,3,5,6,9|
|v2,v3,v5|v1,v9|v2|v3,v7,v8|x|
*/
/*vertex i's offset is vtx i-1's offset + i's neighbor count*/
for i in 1..ncnt.len() {
g.offsets[i] = g.offsets[i - 1] + ncnt[i - 1].load(Ordering::SeqCst);
work_offsets.push(AtomicUsize::new(g.offsets[i]));
}
/*Temporary synchronized edge list array*/
let mut nbs = Vec::with_capacity(el.chunks(2).len());
for _ in 0..el.chunks(2).len() {
nbs.push(AtomicUsize::new(0));
}
/*Populate the neighbor array based on the counts*/
el.chunks(2).par_bridge().for_each(|e| {
let cur_ind = work_offsets[e[0]].fetch_add(1, Ordering::SeqCst);
nbs[cur_ind].store(e[1], Ordering::Relaxed);
});
g.neighbs.par_iter_mut().enumerate().for_each(|(i,e)| {
*e = nbs[i].load(Ordering::Relaxed);
}); |
}
/// Take an edge list in and produce a CSR out
/// (u,v)
pub fn new(numv: usize, ref el: Vec<(usize, usize)>) -> CSR {
const NUMCHUNKS: usize = 16;
let chunksz: usize = if numv > NUMCHUNKS {
numv / NUMCHUNKS
} else {
1
};
/*TODO: Parameter*/
let numbins = 16;
let mut ncnt = Vec::new();
for _ in 0..numv {
ncnt.push(AtomicUsize::new(0));
}
/*Count up the number of neighbors that each vertex has */
el.par_chunks(chunksz).for_each(|cnk| {
/*Per-thread bin structure*/
let mut bins = Vec::new();
for _ in 0..numbins {
bins.push(Vec::<&(usize, usize)>::new());
}
/*iterate over chunk, push edges to bins*/
cnk.iter().for_each(|e| {
bins[(e).0 % 16].push(e);
});
bins.iter().for_each(|b| {
b.iter().for_each(|e| {
ncnt[(e).0].fetch_add(1, Ordering::SeqCst);
});
});
});
let mut work_offsets = Vec::new();
work_offsets.push(AtomicUsize::new(0));
let mut g = CSR {
v: numv,
e: el.len(),
vtxprop: vec![0f64; numv],
offsets: vec![0; numv],
neighbs: vec![0; el.len()],
};
/* CSR Structure e.g.,
|0,3,5,6,9|
|v2,v3,v5|v1,v9|v2|v3,v7,v8|x|
*/
/*vertex i's offset is vtx i-1's offset + i's neighbor count*/
for i in 1..ncnt.len() {
g.offsets[i] = g.offsets[i - 1] + ncnt[i - 1].load(Ordering::SeqCst);
work_offsets.push(AtomicUsize::new(g.offsets[i]));
}
/*Temporary synchronized edge list array*/
let mut nbs = Vec::new();
for _ in 0..el.len() {
nbs.push(AtomicUsize::new(0));
}
/*Populate the neighbor array based on the counts*/
el.par_chunks(chunksz).for_each(|cnk| {
cnk.iter().for_each(|edge| match *edge {
(v0, v1) => {
let cur_ind = work_offsets[v0].fetch_add(1, Ordering::SeqCst);
nbs[cur_ind].store(v1, Ordering::Relaxed);
}
});
});
g.neighbs
.par_chunks_mut(chunksz)
.enumerate()
.for_each(|(chunkbase, cnk)| {
cnk.iter_mut().enumerate().for_each(|(i, e)| {
*e = nbs[chunkbase + i].load(Ordering::Relaxed);
});
});
/*return the graph, g*/
g
}
/// Get the range of offsets into the neighbs array that hold the neighbors
/// of vertex v
pub fn vtx_offset_range(&self, v: usize) -> (usize, usize) {
(
self.offsets[v],
match v {
v if v == self.v - 1 => self.e,
_ => self.offsets[v + 1],
},
)
}
/// read_only_scan is a read only scan of all edges in the entire CSR
/// that accepts a FnMut(usize,usize,u64) -> () to apply to each vertex
pub fn read_only_scan(&self, mut f: impl FnMut(usize, usize) -> ()) {
/*Iterate over the vertices in the offsets array*/
let len = self.offsets.len();
for i in 0..len {
/*A |
/*return the graph, g*/
g | random_line_split |
csr.rs | {
v: usize,
e: usize,
vtxprop: Vec<f64>,
offsets: Vec<usize>,
neighbs: Vec<usize>,
}
impl CSR {
pub fn get_vtxprop(&self) -> &[f64] {
&self.vtxprop
}
pub fn get_mut_vtxprop(&mut self) -> &mut [f64] {
&mut self.vtxprop
}
pub fn get_v(&self) -> usize {
self.v
}
pub fn get_e(&self) -> usize {
self.e
}
pub fn get_offsets(&self) -> &Vec<usize> {
&self.offsets
}
pub fn get_neighbs(&self) -> &[usize] {
&self.neighbs
}
/// Build a random edge list
/// This method returns a tuple of the number of vertices seen and the edge list
/// el.len() is the number of edges.
pub fn random_el(numv: usize, maxe: usize) -> Vec<(usize, usize)> {
let mut rng = rand::thread_rng();
let mut el: Vec<(usize, usize)> = Vec::new();
for i in 0..numv {
/*edges per vertex*/
let num_e: usize = rng.gen_range(0, maxe) as usize;
for _ in 0..num_e {
let edge = (i as usize, rng.gen_range(0, numv) as usize);
el.push(edge);
}
}
el
}
/// Build an edge list from a file containing text describing one.
/// The file format is line oriented and human readable:
/// v0,v1
/// v0,v2
/// v0,v3
/// v0,v3
/// v1,v2
/// v1,v2
/// v2,v3
/// v3,v1
/// ...
///
/// This method returns a tuple of the number of vertices seen and the edge list
/// el.len() is the number of edges.
pub fn el_from_file(path: &str) -> (usize, Vec<(usize, usize)>) {
let mut el: Vec<(usize, usize)> = Vec::new();
let mut maxv: usize = 0;
let f = File::open(path);
match f {
Ok(file) => {
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(file);
for result in rdr.records() {
match result {
Ok(p) => {
let v0 = p.get(0).unwrap().parse::<usize>().unwrap();
let v1 = p.get(1).unwrap().parse::<usize>().unwrap();
if v0 > maxv {
maxv = v0
}
if v1 > maxv {
maxv = v1
}
el.push((v0, v1));
}
_ => {
eprintln!("Failed to parse file");
}
}
}
}
_ => {
eprintln!("Failed to open file {}", path);
}
}
(maxv + 1, el)
}
pub fn new_from_el_mmap(v: usize, f: String) -> CSR {
let path = PathBuf::from(f);
let file = OpenOptions::new()
.read(true)
.open(&path)
.unwrap();
let mmap = unsafe { Mmap::map(&file).unwrap() };
let el = mmap[..]
.as_slice_of::<usize>()
.unwrap();
let mut ncnt = Vec::with_capacity(v);
for _ in 0..v {
ncnt.push(AtomicUsize::new(0));
}
/*Count up the number of neighbors that each vertex has */
el.chunks(2).par_bridge().for_each(|e| {
ncnt[ e[0] ].fetch_add(1, Ordering::SeqCst);
});
let mut work_offsets = Vec::with_capacity(v);
work_offsets.push(AtomicUsize::new(0));
let mut g = CSR {
v: v,
e: el.chunks(2).len(),
vtxprop: vec![0f64; v],
offsets: vec![0; v],
neighbs: vec![0; el.chunks(2).len()],
};
/* CSR Structure e.g.,
|0,3,5,6,9|
|v2,v3,v5|v1,v9|v2|v3,v7,v8|x|
*/
/*vertex i's offset is vtx i-1's offset + i's neighbor count*/
for i in 1..ncnt.len() {
g.offsets[i] = g.offsets[i - 1] + ncnt[i - 1].load(Ordering::SeqCst);
work_offsets.push(AtomicUsize::new(g.offsets[i]));
}
/*Temporary synchronized edge list array*/
let mut nbs = Vec::with_capacity(el.chunks(2).len());
for _ in 0..el.chunks(2).len() {
nbs.push(AtomicUsize::new(0));
}
/*Populate the neighbor array based on the counts*/
el.chunks(2).par_bridge().for_each(|e| {
let cur_ind = work_offsets[e[0]].fetch_add(1, Ordering::SeqCst);
nbs[cur_ind].store(e[1], Ordering::Relaxed);
});
g.neighbs.par_iter_mut().enumerate().for_each(|(i,e)| {
*e = nbs[i].load(Ordering::Relaxed);
});
/*return the graph, g*/
g
}
/// Take an edge list in and produce a CSR out
/// (u,v)
pub fn new(numv: usize, ref el: Vec<(usize, usize)>) -> CSR {
const NUMCHUNKS: usize = 16;
let chunksz: usize = if numv > NUMCHUNKS {
numv / NUMCHUNKS
} else {
1
};
/*TODO: Parameter*/
let numbins = 16;
let mut ncnt = Vec::new();
for _ in 0..numv {
ncnt.push(AtomicUsize::new(0));
}
/*Count up the number of neighbors that each vertex has */
el.par_chunks(chunksz).for_each(|cnk| {
/*Per-thread bin structure*/
let mut bins = Vec::new();
for _ in 0..numbins {
bins.push(Vec::<&(usize, usize)>::new());
}
/*iterate over chunk, push edges to bins*/
cnk.iter().for_each(|e| {
bins[(e).0 % 16].push(e);
});
bins.iter().for_each(|b| {
b.iter().for_each(|e| {
ncnt[(e).0].fetch_add(1, Ordering::SeqCst);
});
});
});
let mut work_offsets = Vec::new();
work_offsets.push(AtomicUsize::new(0));
let mut g = CSR {
v: numv,
e: el.len(),
vtxprop: vec![0f64; numv],
offsets: vec![0; numv],
neighbs: vec![0; el.len()],
};
/* CSR Structure e.g.,
|0,3,5,6,9|
|v2,v3,v5|v1,v9|v2|v3,v7,v8|x|
*/
/*vertex i's offset is vtx i-1's offset + i's neighbor count*/
for i in 1..ncnt.len() {
g.offsets[i] = g.offsets[i - 1] + ncnt[i - 1].load(Ordering::SeqCst);
work_offsets.push(AtomicUsize::new(g.offsets[i]));
}
/*Temporary synchronized edge list array*/
let mut nbs = Vec::new();
for _ in 0..el.len() {
nbs.push(AtomicUsize::new(0));
}
/*Populate the neighbor array based on the counts*/
el.par_chunks(chunksz).for_each(|cnk| {
cnk.iter().for_each(|edge| match *edge {
(v0, v1) => {
let cur_ind = work_offsets[v0].fetch_add(1, Ordering::SeqCst);
nbs[cur_ind].store(v1, Ordering::Relaxed);
}
});
});
g.neighbs
.par_chunks_mut(chunksz)
.enumerate()
.for_each(|(chunkbase, cnk)| {
cnk.iter_mut().enumerate().for_each(|(i, e)| {
*e = nbs[chunkbase + i].load(Ordering::Relaxed);
});
});
/*return the graph, g*/
g
}
/// Get the range of offsets into the neighbs array that hold the neighbors
/// of vertex v
pub fn vtx_offset_range(&self, v: usize) -> (usize, usize) {
(
self.offsets[v],
match v {
v if v == self.v - | CSR | identifier_name |
|
csr.rs |
pub fn get_v(&self) -> usize {
self.v
}
pub fn get_e(&self) -> usize {
self.e
}
pub fn get_offsets(&self) -> &Vec<usize> {
&self.offsets
}
pub fn get_neighbs(&self) -> &[usize] {
&self.neighbs
}
/// Build a random edge list
/// This method returns a tuple of the number of vertices seen and the edge list
/// el.len() is the number of edges.
pub fn random_el(numv: usize, maxe: usize) -> Vec<(usize, usize)> {
let mut rng = rand::thread_rng();
let mut el: Vec<(usize, usize)> = Vec::new();
for i in 0..numv {
/*edges per vertex*/
let num_e: usize = rng.gen_range(0, maxe) as usize;
for _ in 0..num_e {
let edge = (i as usize, rng.gen_range(0, numv) as usize);
el.push(edge);
}
}
el
}
/// Build an edge list from a file containing text describing one.
/// The file format is line oriented and human readable:
/// v0,v1
/// v0,v2
/// v0,v3
/// v0,v3
/// v1,v2
/// v1,v2
/// v2,v3
/// v3,v1
/// ...
///
/// This method returns a tuple of the number of vertices seen and the edge list
/// el.len() is the number of edges.
pub fn el_from_file(path: &str) -> (usize, Vec<(usize, usize)>) {
let mut el: Vec<(usize, usize)> = Vec::new();
let mut maxv: usize = 0;
let f = File::open(path);
match f {
Ok(file) => {
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(file);
for result in rdr.records() {
match result {
Ok(p) => {
let v0 = p.get(0).unwrap().parse::<usize>().unwrap();
let v1 = p.get(1).unwrap().parse::<usize>().unwrap();
if v0 > maxv {
maxv = v0
}
if v1 > maxv {
maxv = v1
}
el.push((v0, v1));
}
_ => {
eprintln!("Failed to parse file");
}
}
}
}
_ => {
eprintln!("Failed to open file {}", path);
}
}
(maxv + 1, el)
}
pub fn new_from_el_mmap(v: usize, f: String) -> CSR {
let path = PathBuf::from(f);
let file = OpenOptions::new()
.read(true)
.open(&path)
.unwrap();
let mmap = unsafe { Mmap::map(&file).unwrap() };
let el = mmap[..]
.as_slice_of::<usize>()
.unwrap();
let mut ncnt = Vec::with_capacity(v);
for _ in 0..v {
ncnt.push(AtomicUsize::new(0));
}
/*Count up the number of neighbors that each vertex has */
el.chunks(2).par_bridge().for_each(|e| {
ncnt[ e[0] ].fetch_add(1, Ordering::SeqCst);
});
let mut work_offsets = Vec::with_capacity(v);
work_offsets.push(AtomicUsize::new(0));
let mut g = CSR {
v: v,
e: el.chunks(2).len(),
vtxprop: vec![0f64; v],
offsets: vec![0; v],
neighbs: vec![0; el.chunks(2).len()],
};
/* CSR Structure e.g.,
|0,3,5,6,9|
|v2,v3,v5|v1,v9|v2|v3,v7,v8|x|
*/
/*vertex i's offset is vtx i-1's offset + i's neighbor count*/
for i in 1..ncnt.len() {
g.offsets[i] = g.offsets[i - 1] + ncnt[i - 1].load(Ordering::SeqCst);
work_offsets.push(AtomicUsize::new(g.offsets[i]));
}
/*Temporary synchronized edge list array*/
let mut nbs = Vec::with_capacity(el.chunks(2).len());
for _ in 0..el.chunks(2).len() {
nbs.push(AtomicUsize::new(0));
}
/*Populate the neighbor array based on the counts*/
el.chunks(2).par_bridge().for_each(|e| {
let cur_ind = work_offsets[e[0]].fetch_add(1, Ordering::SeqCst);
nbs[cur_ind].store(e[1], Ordering::Relaxed);
});
g.neighbs.par_iter_mut().enumerate().for_each(|(i,e)| {
*e = nbs[i].load(Ordering::Relaxed);
});
/*return the graph, g*/
g
}
/// Take an edge list in and produce a CSR out
/// (u,v)
pub fn new(numv: usize, ref el: Vec<(usize, usize)>) -> CSR {
const NUMCHUNKS: usize = 16;
let chunksz: usize = if numv > NUMCHUNKS {
numv / NUMCHUNKS
} else {
1
};
/*TODO: Parameter*/
let numbins = 16;
let mut ncnt = Vec::new();
for _ in 0..numv {
ncnt.push(AtomicUsize::new(0));
}
/*Count up the number of neighbors that each vertex has */
el.par_chunks(chunksz).for_each(|cnk| {
/*Per-thread bin structure*/
let mut bins = Vec::new();
for _ in 0..numbins {
bins.push(Vec::<&(usize, usize)>::new());
}
/*iterate over chunk, push edges to bins*/
cnk.iter().for_each(|e| {
bins[(e).0 % 16].push(e);
});
bins.iter().for_each(|b| {
b.iter().for_each(|e| {
ncnt[(e).0].fetch_add(1, Ordering::SeqCst);
});
});
});
let mut work_offsets = Vec::new();
work_offsets.push(AtomicUsize::new(0));
let mut g = CSR {
v: numv,
e: el.len(),
vtxprop: vec![0f64; numv],
offsets: vec![0; numv],
neighbs: vec![0; el.len()],
};
/* CSR Structure e.g.,
|0,3,5,6,9|
|v2,v3,v5|v1,v9|v2|v3,v7,v8|x|
*/
/*vertex i's offset is vtx i-1's offset + i's neighbor count*/
for i in 1..ncnt.len() {
g.offsets[i] = g.offsets[i - 1] + ncnt[i - 1].load(Ordering::SeqCst);
work_offsets.push(AtomicUsize::new(g.offsets[i]));
}
/*Temporary synchronized edge list array*/
let mut nbs = Vec::new();
for _ in 0..el.len() {
nbs.push(AtomicUsize::new(0));
}
/*Populate the neighbor array based on the counts*/
el.par_chunks(chunksz).for_each(|cnk| {
cnk.iter().for_each(|edge| match *edge {
(v0, v1) => {
let cur_ind = work_offsets[v0].fetch_add(1, Ordering::SeqCst);
nbs[cur_ind].store(v1, Ordering::Relaxed);
}
});
});
g.neighbs
.par_chunks_mut(chunksz)
.enumerate()
.for_each(|(chunkbase, cnk)| {
cnk.iter_mut().enumerate().for_each(|(i, e)| {
*e = nbs[chunkbase + i].load(Ordering::Relaxed);
});
});
/*return the graph, g*/
g
}
/// Get the range of offsets into the neighbs array that hold the neighbors
/// of vertex v
pub fn vtx_offset_range(&self, v: usize) -> (usize, usize) {
(
self.offsets[v],
match v {
v if v == self.v - 1 => self.e,
_ => self.offsets[v + 1],
},
)
}
/// read_only_scan is a read only scan of all edges in the entire CSR
/// that accepts a FnMut(usize,usize,u64) -> () to apply to each vertex
pub fn read_only_scan(&self, mut f: impl FnMut(usize, usize) | {
&mut self.vtxprop
} | identifier_body |
|
main.rs |
null_separator: bool,
/// The maximum search depth, or `None` if no maximum search depth should be set.
///
/// A depth of `1` includes all files under the current directory, a depth of `2` also includes
/// all files under subdirectories of the current directory, etc.
max_depth: Option<usize>,
/// The number of threads to use.
threads: usize,
/// Time to buffer results internally before streaming to the console. This is useful to
/// provide a sorted output, in case the total execution time is shorter than
/// `max_buffer_time`.
max_buffer_time: Option<time::Duration>,
/// Display results as relative or absolute path.
path_display: PathDisplay,
/// `None` if the output should not be colorized. Otherwise, a `LsColors` instance that defines
/// how to style different filetypes.
ls_colors: Option<LsColors>,
/// The type of file to search for. All files other than the specified type will be ignored.
file_type: FileType,
/// The extension to search for. Only entries matching the extension will be included.
///
/// The value (if present) will be a lowercase string without leading dots.
extension: Option<String>,
}
/// The receiver thread can either be buffering results or directly streaming to the console.
enum ReceiverMode {
/// Receiver is still buffering in order to sort the results, if the search finishes fast
/// enough.
Buffering,
/// Receiver is directly printing results to the output.
Streaming,
}
/// Root directory
#[cfg(unix)]
static ROOT_DIR: &'static str = "/";
#[cfg(windows)]
static ROOT_DIR: &'static str = "";
/// Print a search result to the console.
fn print_entry(base: &Path, entry: &PathBuf, config: &FdOptions) {
let path_full = base.join(entry);
let path_str = entry.to_string_lossy();
#[cfg(unix)]
let is_executable = |p: Option<&std::fs::Metadata>| {
p.map(|f| f.permissions().mode() & 0o111 != 0)
.unwrap_or(false)
};
#[cfg(windows)]
let is_executable = |_: Option<&std::fs::Metadata>| false;
let stdout = std::io::stdout();
let mut handle = stdout.lock();
if let Some(ref ls_colors) = config.ls_colors {
let default_style = ansi_term::Style::default();
let mut component_path = base.to_path_buf();
if config.path_display == PathDisplay::Absolute {
print!("{}", ls_colors.directory.paint(ROOT_DIR));
}
// Traverse the path and colorize each component
for component in entry.components() {
let comp_str = component.as_os_str().to_string_lossy();
component_path.push(Path::new(comp_str.deref()));
let metadata = component_path.metadata().ok();
let is_directory = metadata.as_ref().map(|md| md.is_dir()).unwrap_or(false);
let style =
if component_path.symlink_metadata()
.map(|md| md.file_type().is_symlink())
.unwrap_or(false) {
&ls_colors.symlink
} else if is_directory {
&ls_colors.directory
} else if is_executable(metadata.as_ref()) {
&ls_colors.executable
} else {
// Look up file name
let o_style =
component_path.file_name()
.and_then(|n| n.to_str())
.and_then(|n| ls_colors.filenames.get(n));
match o_style {
Some(s) => s,
None =>
// Look up file extension
component_path.extension()
.and_then(|e| e.to_str())
.and_then(|e| ls_colors.extensions.get(e))
.unwrap_or(&default_style)
}
};
write!(handle, "{}", style.paint(comp_str)).ok();
if is_directory && component_path != path_full {
let sep = std::path::MAIN_SEPARATOR.to_string();
write!(handle, "{}", style.paint(sep)).ok();
}
}
let r = if config.null_separator {
write!(handle, "\0")
} else {
writeln!(handle, "")
};
if r.is_err() {
// Probably a broken pipe. Exit gracefully.
process::exit(0);
}
} else {
// Uncolorized output
let prefix = if config.path_display == PathDisplay::Absolute { ROOT_DIR } else { "" };
let separator = if config.null_separator { "\0" } else { "\n" }; |
let r = write!(&mut std::io::stdout(), "{}{}{}", prefix, path_str, separator);
if r.is_err() {
// Probably a broken pipe. Exit gracefully.
process::exit(0);
}
}
}
/// Recursively scan the given search path and search for files / pathnames matching the pattern.
fn scan(root: &Path, pattern: Arc<Regex>, base: &Path, config: Arc<FdOptions>) {
let (tx, rx) = channel();
let walker = WalkBuilder::new(root)
.hidden(config.ignore_hidden)
.ignore(config.read_ignore)
.git_ignore(config.read_ignore)
.parents(config.read_ignore)
.git_global(config.read_ignore)
.git_exclude(config.read_ignore)
.follow_links(config.follow_links)
.max_depth(config.max_depth)
.threads(config.threads)
.build_parallel();
// Spawn the thread that receives all results through the channel.
let rx_config = Arc::clone(&config);
let rx_base = base.to_owned();
let receiver_thread = thread::spawn(move || {
let start = time::Instant::now();
let mut buffer = vec![];
// Start in buffering mode
let mut mode = ReceiverMode::Buffering;
// Maximum time to wait before we start streaming to the console.
let max_buffer_time = rx_config.max_buffer_time
.unwrap_or_else(|| time::Duration::from_millis(100));
for value in rx {
match mode {
ReceiverMode::Buffering => {
buffer.push(value);
// Have we reached the maximum time?
if time::Instant::now() - start > max_buffer_time {
// Flush the buffer
for v in &buffer {
print_entry(&rx_base, v, &rx_config);
}
buffer.clear();
// Start streaming
mode = ReceiverMode::Streaming;
}
},
ReceiverMode::Streaming => {
print_entry(&rx_base, &value, &rx_config);
}
}
}
// If we have finished fast enough (faster than max_buffer_time), we haven't streamed
// anything to the console, yet. In this case, sort the results and print them:
if !buffer.is_empty() {
buffer.sort();
for value in buffer {
print_entry(&rx_base, &value, &rx_config);
}
}
});
// Spawn the sender threads.
walker.run(|| {
let base = base.to_owned();
let config = Arc::clone(&config);
let pattern = Arc::clone(&pattern);
let tx_thread = tx.clone();
Box::new(move |entry_o| {
let entry = match entry_o {
Ok(e) => e,
Err(_) => return ignore::WalkState::Continue,
};
// Filter out unwanted file types.
match config.file_type {
FileType::Any => (),
FileType::RegularFile => if entry.file_type().map_or(false, |ft| !ft.is_file()) {
return ignore::WalkState::Continue;
},
FileType::Directory => if entry.file_type().map_or(false, |ft| !ft.is_dir()) {
return ignore::WalkState::Continue;
},
FileType::SymLink => if entry.file_type().map_or(false, |ft| !ft.is_symlink()) {
return ignore::WalkState::Continue;
},
}
// Filter out unwanted extensions.
if let Some(ref filter_ext) = config.extension {
let entry_ext = entry.path().extension().map(|e| e.to_string_lossy().to_lowercase());
if entry_ext.map_or(true, |ext| ext != *filter_ext) {
return ignore::WalkState::Continue;
}
}
let path_rel_buf = match fshelper::path_relative_from(entry.path(), &*base) {
Some(p) => p,
None => error("Error: could not get relative path for directory entry.")
};
let path_rel = path_rel_buf.as_path();
let search_str_o =
if config.search_full_path {
Some(path_rel.to_string_lossy())
} else {
path_rel.file_name()
.map(|f| f.to_string_lossy())
};
if let Some(search_str) = search_str_o {
// TODO: take care of the unwrap call
pattern.find(&*search_str)
.map(|_| tx_thread.send(path_rel_buf.to_owned()).unwrap());
}
ignore::WalkState::Continue
})
});
// Drop the initial sender. If we don't do this, the receiver will block even
// if all threads have finished, since there is still one sender around.
drop(tx);
// Wait for the receiver thread to print out all results.
receiver_thread.join().unwrap();
}
/// Print error message to stderr and exit with status `1`.
fn error(message: &str | random_line_split |
|
main.rs |
null_separator: bool,
/// The maximum search depth, or `None` if no maximum search depth should be set.
///
/// A depth of `1` includes all files under the current directory, a depth of `2` also includes
/// all files under subdirectories of the current directory, etc.
max_depth: Option<usize>,
/// The number of threads to use.
threads: usize,
/// Time to buffer results internally before streaming to the console. This is useful to
/// provide a sorted output, in case the total execution time is shorter than
/// `max_buffer_time`.
max_buffer_time: Option<time::Duration>,
/// Display results as relative or absolute path.
path_display: PathDisplay,
/// `None` if the output should not be colorized. Otherwise, a `LsColors` instance that defines
/// how to style different filetypes.
ls_colors: Option<LsColors>,
/// The type of file to search for. All files other than the specified type will be ignored.
file_type: FileType,
/// The extension to search for. Only entries matching the extension will be included.
///
/// The value (if present) will be a lowercase string without leading dots.
extension: Option<String>,
}
/// The receiver thread can either be buffering results or directly streaming to the console.
enum ReceiverMode {
/// Receiver is still buffering in order to sort the results, if the search finishes fast
/// enough.
Buffering,
/// Receiver is directly printing results to the output.
Streaming,
}
/// Root directory
#[cfg(unix)]
static ROOT_DIR: &'static str = "/";
#[cfg(windows)]
static ROOT_DIR: &'static str = "";
/// Print a search result to the console.
fn print_entry(base: &Path, entry: &PathBuf, config: &FdOptions) {
let path_full = base.join(entry);
let path_str = entry.to_string_lossy();
#[cfg(unix)]
let is_executable = |p: Option<&std::fs::Metadata>| {
p.map(|f| f.permissions().mode() & 0o111 != 0)
.unwrap_or(false)
};
#[cfg(windows)]
let is_executable = |_: Option<&std::fs::Metadata>| false;
let stdout = std::io::stdout();
let mut handle = stdout.lock();
if let Some(ref ls_colors) = config.ls_colors {
let default_style = ansi_term::Style::default();
let mut component_path = base.to_path_buf();
if config.path_display == PathDisplay::Absolute {
print!("{}", ls_colors.directory.paint(ROOT_DIR));
}
// Traverse the path and colorize each component
for component in entry.components() {
let comp_str = component.as_os_str().to_string_lossy();
component_path.push(Path::new(comp_str.deref()));
let metadata = component_path.metadata().ok();
let is_directory = metadata.as_ref().map(|md| md.is_dir()).unwrap_or(false);
let style =
if component_path.symlink_metadata()
.map(|md| md.file_type().is_symlink())
.unwrap_or(false) | else if is_directory {
&ls_colors.directory
} else if is_executable(metadata.as_ref()) {
&ls_colors.executable
} else {
// Look up file name
let o_style =
component_path.file_name()
.and_then(|n| n.to_str())
.and_then(|n| ls_colors.filenames.get(n));
match o_style {
Some(s) => s,
None =>
// Look up file extension
component_path.extension()
.and_then(|e| e.to_str())
.and_then(|e| ls_colors.extensions.get(e))
.unwrap_or(&default_style)
}
};
write!(handle, "{}", style.paint(comp_str)).ok();
if is_directory && component_path != path_full {
let sep = std::path::MAIN_SEPARATOR.to_string();
write!(handle, "{}", style.paint(sep)).ok();
}
}
let r = if config.null_separator {
write!(handle, "\0")
} else {
writeln!(handle, "")
};
if r.is_err() {
// Probably a broken pipe. Exit gracefully.
process::exit(0);
}
} else {
// Uncolorized output
let prefix = if config.path_display == PathDisplay::Absolute { ROOT_DIR } else { "" };
let separator = if config.null_separator { "\0" } else { "\n" };
let r = write!(&mut std::io::stdout(), "{}{}{}", prefix, path_str, separator);
if r.is_err() {
// Probably a broken pipe. Exit gracefully.
process::exit(0);
}
}
}
/// Recursively scan the given search path and search for files / pathnames matching the pattern.
fn scan(root: &Path, pattern: Arc<Regex>, base: &Path, config: Arc<FdOptions>) {
let (tx, rx) = channel();
let walker = WalkBuilder::new(root)
.hidden(config.ignore_hidden)
.ignore(config.read_ignore)
.git_ignore(config.read_ignore)
.parents(config.read_ignore)
.git_global(config.read_ignore)
.git_exclude(config.read_ignore)
.follow_links(config.follow_links)
.max_depth(config.max_depth)
.threads(config.threads)
.build_parallel();
// Spawn the thread that receives all results through the channel.
let rx_config = Arc::clone(&config);
let rx_base = base.to_owned();
let receiver_thread = thread::spawn(move || {
let start = time::Instant::now();
let mut buffer = vec![];
// Start in buffering mode
let mut mode = ReceiverMode::Buffering;
// Maximum time to wait before we start streaming to the console.
let max_buffer_time = rx_config.max_buffer_time
.unwrap_or_else(|| time::Duration::from_millis(100));
for value in rx {
match mode {
ReceiverMode::Buffering => {
buffer.push(value);
// Have we reached the maximum time?
if time::Instant::now() - start > max_buffer_time {
// Flush the buffer
for v in &buffer {
print_entry(&rx_base, v, &rx_config);
}
buffer.clear();
// Start streaming
mode = ReceiverMode::Streaming;
}
},
ReceiverMode::Streaming => {
print_entry(&rx_base, &value, &rx_config);
}
}
}
// If we have finished fast enough (faster than max_buffer_time), we haven't streamed
// anything to the console, yet. In this case, sort the results and print them:
if !buffer.is_empty() {
buffer.sort();
for value in buffer {
print_entry(&rx_base, &value, &rx_config);
}
}
});
// Spawn the sender threads.
walker.run(|| {
let base = base.to_owned();
let config = Arc::clone(&config);
let pattern = Arc::clone(&pattern);
let tx_thread = tx.clone();
Box::new(move |entry_o| {
let entry = match entry_o {
Ok(e) => e,
Err(_) => return ignore::WalkState::Continue,
};
// Filter out unwanted file types.
match config.file_type {
FileType::Any => (),
FileType::RegularFile => if entry.file_type().map_or(false, |ft| !ft.is_file()) {
return ignore::WalkState::Continue;
},
FileType::Directory => if entry.file_type().map_or(false, |ft| !ft.is_dir()) {
return ignore::WalkState::Continue;
},
FileType::SymLink => if entry.file_type().map_or(false, |ft| !ft.is_symlink()) {
return ignore::WalkState::Continue;
},
}
// Filter out unwanted extensions.
if let Some(ref filter_ext) = config.extension {
let entry_ext = entry.path().extension().map(|e| e.to_string_lossy().to_lowercase());
if entry_ext.map_or(true, |ext| ext != *filter_ext) {
return ignore::WalkState::Continue;
}
}
let path_rel_buf = match fshelper::path_relative_from(entry.path(), &*base) {
Some(p) => p,
None => error("Error: could not get relative path for directory entry.")
};
let path_rel = path_rel_buf.as_path();
let search_str_o =
if config.search_full_path {
Some(path_rel.to_string_lossy())
} else {
path_rel.file_name()
.map(|f| f.to_string_lossy())
};
if let Some(search_str) = search_str_o {
// TODO: take care of the unwrap call
pattern.find(&*search_str)
.map(|_| tx_thread.send(path_rel_buf.to_owned()).unwrap());
}
ignore::WalkState::Continue
})
});
// Drop the initial sender. If we don't do this, the receiver will block even
// if all threads have finished, since there is still one sender around.
drop(tx);
// Wait for the receiver thread to print out all results.
receiver_thread.join().unwrap();
}
/// Print error message to stderr and exit with status `1`.
fn error(message: &str | {
&ls_colors.symlink
} | conditional_block |
main.rs |
null_separator: bool,
/// The maximum search depth, or `None` if no maximum search depth should be set.
///
/// A depth of `1` includes all files under the current directory, a depth of `2` also includes
/// all files under subdirectories of the current directory, etc.
max_depth: Option<usize>,
/// The number of threads to use.
threads: usize,
/// Time to buffer results internally before streaming to the console. This is useful to
/// provide a sorted output, in case the total execution time is shorter than
/// `max_buffer_time`.
max_buffer_time: Option<time::Duration>,
/// Display results as relative or absolute path.
path_display: PathDisplay,
/// `None` if the output should not be colorized. Otherwise, a `LsColors` instance that defines
/// how to style different filetypes.
ls_colors: Option<LsColors>,
/// The type of file to search for. All files other than the specified type will be ignored.
file_type: FileType,
/// The extension to search for. Only entries matching the extension will be included.
///
/// The value (if present) will be a lowercase string without leading dots.
extension: Option<String>,
}
/// The receiver thread can either be buffering results or directly streaming to the console.
enum ReceiverMode {
/// Receiver is still buffering in order to sort the results, if the search finishes fast
/// enough.
Buffering,
/// Receiver is directly printing results to the output.
Streaming,
}
/// Root directory
#[cfg(unix)]
static ROOT_DIR: &'static str = "/";
#[cfg(windows)]
static ROOT_DIR: &'static str = "";
/// Print a search result to the console.
fn print_entry(base: &Path, entry: &PathBuf, config: &FdOptions) {
let path_full = base.join(entry);
let path_str = entry.to_string_lossy();
#[cfg(unix)]
let is_executable = |p: Option<&std::fs::Metadata>| {
p.map(|f| f.permissions().mode() & 0o111 != 0)
.unwrap_or(false)
};
#[cfg(windows)]
let is_executable = |_: Option<&std::fs::Metadata>| false;
let stdout = std::io::stdout();
let mut handle = stdout.lock();
if let Some(ref ls_colors) = config.ls_colors {
let default_style = ansi_term::Style::default();
let mut component_path = base.to_path_buf();
if config.path_display == PathDisplay::Absolute {
print!("{}", ls_colors.directory.paint(ROOT_DIR));
}
// Traverse the path and colorize each component
for component in entry.components() {
let comp_str = component.as_os_str().to_string_lossy();
component_path.push(Path::new(comp_str.deref()));
let metadata = component_path.metadata().ok();
let is_directory = metadata.as_ref().map(|md| md.is_dir()).unwrap_or(false);
let style =
if component_path.symlink_metadata()
.map(|md| md.file_type().is_symlink())
.unwrap_or(false) {
&ls_colors.symlink
} else if is_directory {
&ls_colors.directory
} else if is_executable(metadata.as_ref()) {
&ls_colors.executable
} else {
// Look up file name
let o_style =
component_path.file_name()
.and_then(|n| n.to_str())
.and_then(|n| ls_colors.filenames.get(n));
match o_style {
Some(s) => s,
None =>
// Look up file extension
component_path.extension()
.and_then(|e| e.to_str())
.and_then(|e| ls_colors.extensions.get(e))
.unwrap_or(&default_style)
}
};
write!(handle, "{}", style.paint(comp_str)).ok();
if is_directory && component_path != path_full {
let sep = std::path::MAIN_SEPARATOR.to_string();
write!(handle, "{}", style.paint(sep)).ok();
}
}
let r = if config.null_separator {
write!(handle, "\0")
} else {
writeln!(handle, "")
};
if r.is_err() {
// Probably a broken pipe. Exit gracefully.
process::exit(0);
}
} else {
// Uncolorized output
let prefix = if config.path_display == PathDisplay::Absolute { ROOT_DIR } else { "" };
let separator = if config.null_separator { "\0" } else { "\n" };
let r = write!(&mut std::io::stdout(), "{}{}{}", prefix, path_str, separator);
if r.is_err() {
// Probably a broken pipe. Exit gracefully.
process::exit(0);
}
}
}
/// Recursively scan the given search path and search for files / pathnames matching the pattern.
fn | (root: &Path, pattern: Arc<Regex>, base: &Path, config: Arc<FdOptions>) {
let (tx, rx) = channel();
let walker = WalkBuilder::new(root)
.hidden(config.ignore_hidden)
.ignore(config.read_ignore)
.git_ignore(config.read_ignore)
.parents(config.read_ignore)
.git_global(config.read_ignore)
.git_exclude(config.read_ignore)
.follow_links(config.follow_links)
.max_depth(config.max_depth)
.threads(config.threads)
.build_parallel();
// Spawn the thread that receives all results through the channel.
let rx_config = Arc::clone(&config);
let rx_base = base.to_owned();
let receiver_thread = thread::spawn(move || {
let start = time::Instant::now();
let mut buffer = vec![];
// Start in buffering mode
let mut mode = ReceiverMode::Buffering;
// Maximum time to wait before we start streaming to the console.
let max_buffer_time = rx_config.max_buffer_time
.unwrap_or_else(|| time::Duration::from_millis(100));
for value in rx {
match mode {
ReceiverMode::Buffering => {
buffer.push(value);
// Have we reached the maximum time?
if time::Instant::now() - start > max_buffer_time {
// Flush the buffer
for v in &buffer {
print_entry(&rx_base, v, &rx_config);
}
buffer.clear();
// Start streaming
mode = ReceiverMode::Streaming;
}
},
ReceiverMode::Streaming => {
print_entry(&rx_base, &value, &rx_config);
}
}
}
// If we have finished fast enough (faster than max_buffer_time), we haven't streamed
// anything to the console, yet. In this case, sort the results and print them:
if !buffer.is_empty() {
buffer.sort();
for value in buffer {
print_entry(&rx_base, &value, &rx_config);
}
}
});
// Spawn the sender threads.
walker.run(|| {
let base = base.to_owned();
let config = Arc::clone(&config);
let pattern = Arc::clone(&pattern);
let tx_thread = tx.clone();
Box::new(move |entry_o| {
let entry = match entry_o {
Ok(e) => e,
Err(_) => return ignore::WalkState::Continue,
};
// Filter out unwanted file types.
match config.file_type {
FileType::Any => (),
FileType::RegularFile => if entry.file_type().map_or(false, |ft| !ft.is_file()) {
return ignore::WalkState::Continue;
},
FileType::Directory => if entry.file_type().map_or(false, |ft| !ft.is_dir()) {
return ignore::WalkState::Continue;
},
FileType::SymLink => if entry.file_type().map_or(false, |ft| !ft.is_symlink()) {
return ignore::WalkState::Continue;
},
}
// Filter out unwanted extensions.
if let Some(ref filter_ext) = config.extension {
let entry_ext = entry.path().extension().map(|e| e.to_string_lossy().to_lowercase());
if entry_ext.map_or(true, |ext| ext != *filter_ext) {
return ignore::WalkState::Continue;
}
}
let path_rel_buf = match fshelper::path_relative_from(entry.path(), &*base) {
Some(p) => p,
None => error("Error: could not get relative path for directory entry.")
};
let path_rel = path_rel_buf.as_path();
let search_str_o =
if config.search_full_path {
Some(path_rel.to_string_lossy())
} else {
path_rel.file_name()
.map(|f| f.to_string_lossy())
};
if let Some(search_str) = search_str_o {
// TODO: take care of the unwrap call
pattern.find(&*search_str)
.map(|_| tx_thread.send(path_rel_buf.to_owned()).unwrap());
}
ignore::WalkState::Continue
})
});
// Drop the initial sender. If we don't do this, the receiver will block even
// if all threads have finished, since there is still one sender around.
drop(tx);
// Wait for the receiver thread to print out all results.
receiver_thread.join().unwrap();
}
/// Print error message to stderr and exit with status `1`.
fn error(message: &str | scan | identifier_name |
doc_upsert.rs | (&self) -> Signature {
Signature::build("doc upsert")
.optional("id", SyntaxShape::String, "the document id")
.optional("content", SyntaxShape::Any, "the document content")
.named(
"id-column",
SyntaxShape::String,
"the name of the id column if used with an input stream",
None,
)
.named(
"bucket",
SyntaxShape::String,
"the name of the bucket",
None,
)
.named(
"content-column",
SyntaxShape::String,
"the name of the content column if used with an input stream",
None,
)
.named(
"expiry",
SyntaxShape::Number,
"the expiry for the documents in seconds, or absolute",
None,
)
.named("scope", SyntaxShape::String, "the name of the scope", None)
.named(
"collection",
SyntaxShape::String,
"the name of the collection",
None,
)
.named(
"databases",
SyntaxShape::String,
"the databases which should be contacted",
None,
)
.named(
"batch-size",
SyntaxShape::Number,
"the maximum number of items to batch send at a time",
None,
)
.switch("halt-on-error", "halt on any errors", Some('e'))
.category(Category::Custom("couchbase".to_string()))
}
fn usage(&self) -> &str {
"Upsert (insert or override) a document through the data service"
}
fn run(
&self,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
run_upsert(self.state.clone(), engine_state, stack, call, input)
}
}
fn build_req(key: String, value: Vec<u8>, expiry: u32) -> KeyValueRequest {
KeyValueRequest::Set { key, value, expiry }
}
fn run_upsert(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
let results = run_kv_store_ops(state, engine_state, stack, call, input, build_req)?;
Ok(Value::List {
vals: results,
span: call.head,
}
.into_pipeline_data())
}
pub(crate) fn run_kv_store_ops(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
req_builder: fn(String, Vec<u8>, u32) -> KeyValueRequest,
) -> Result<Vec<Value>, ShellError> {
let span = call.head;
let id_column = call
.get_flag(engine_state, stack, "id-column")?
.unwrap_or_else(|| String::from("id"));
let content_column = call
.get_flag(engine_state, stack, "content-column")?
.unwrap_or_else(|| String::from("content"));
let input_args = if let Some(id) = call.opt::<String>(engine_state, stack, 0)? {
if let Some(v) = call.opt::<Value>(engine_state, stack, 1)? {
let content = convert_nu_value_to_json_value(&v, span)?;
vec![(id, content)]
} else {
vec![]
}
} else {
vec![]
};
let filtered = input.into_iter().filter_map(move |i| {
let id_column = id_column.clone();
let content_column = content_column.clone();
if let Value::Record { cols, vals, .. } = i {
let mut id = None;
let mut content = None;
for (k, v) in cols.iter().zip(vals) {
if k.clone() == id_column {
id = v.as_string().ok();
}
if k.clone() == content_column {
content = convert_nu_value_to_json_value(&v, span).ok();
} | }
if let Some(i) = id {
if let Some(c) = content {
return Some((i, c));
}
}
}
None
});
let mut all_items = vec![];
for item in filtered.chain(input_args) {
let value =
serde_json::to_vec(&item.1).map_err(|e| serialize_error(e.to_string(), span))?;
all_items.push((item.0, value));
}
run_kv_mutations(
state,
engine_state,
stack,
call,
span,
all_items,
req_builder,
)
}
pub fn run_kv_mutations(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
span: Span,
all_items: Vec<(String, Vec<u8>)>,
req_builder: fn(String, Vec<u8>, u32) -> KeyValueRequest,
) -> Result<Vec<Value>, ShellError> {
let ctrl_c = engine_state.ctrlc.as_ref().unwrap().clone();
let expiry: i64 = call.get_flag(engine_state, stack, "expiry")?.unwrap_or(0);
let batch_size: Option<i64> = call.get_flag(engine_state, stack, "batch-size")?;
let bucket_flag = call.get_flag(engine_state, stack, "bucket")?;
let scope_flag = call.get_flag(engine_state, stack, "scope")?;
let collection_flag = call.get_flag(engine_state, stack, "collection")?;
let halt_on_error = call.has_flag("halt-on-error");
let cluster_identifiers = cluster_identifiers_from(engine_state, stack, &state, call, true)?;
let guard = state.lock().unwrap();
let mut all_values = vec![];
if let Some(size) = batch_size {
all_values = build_batched_kv_items(size as u32, all_items.clone());
}
let mut results = vec![];
for identifier in cluster_identifiers {
let rt = Runtime::new().unwrap();
let (active_cluster, client, cid) = match get_active_cluster_client_cid(
&rt,
identifier.clone(),
&guard,
bucket_flag.clone(),
scope_flag.clone(),
collection_flag.clone(),
ctrl_c.clone(),
span,
) {
Ok(c) => c,
Err(e) => {
if halt_on_error {
return Err(e);
}
let mut failures = HashSet::new();
failures.insert(e.to_string());
let collected = MutationResult::new(identifier.clone())
.fail_reasons(failures)
.into_value(call.head);
results.push(collected);
continue;
}
};
if all_values.is_empty() {
all_values = build_batched_kv_items(active_cluster.kv_batch_size(), all_items.clone());
}
let mut workers = FuturesUnordered::new();
let mut success = 0;
let mut failed = 0;
let mut fail_reasons: HashSet<String> = HashSet::new();
for items in all_values.clone() {
for item in items.clone() {
let deadline = Instant::now().add(active_cluster.timeouts().data_timeout());
let ctrl_c = ctrl_c.clone();
let client = client.clone();
workers.push(async move {
client
.request(
req_builder(item.0, item.1, expiry as u32),
cid,
deadline,
ctrl_c,
)
.await
});
}
// process_kv_workers will handle creating an error for us if halt_on_error is set so we
// can just bubble it.
let worked = process_kv_workers(workers, &rt, halt_on_error, span)?;
success += worked.success;
failed += worked.failed;
fail_reasons.extend(worked.fail_reasons);
workers = FuturesUnordered::new()
}
let collected = MutationResult::new(identifier.clone())
.success(success)
.failed(failed)
.fail_reasons(fail_reasons);
results.push(collected.into_value(span));
}
Ok(results)
}
pub(crate) struct WorkerResponse {
pub(crate) success: i32,
pub(crate) failed: i32,
pub(crate) fail_reasons: HashSet<String>,
}
pub(crate) fn process_kv_workers(
mut workers: FuturesUnordered<impl Future<Output = Result<KvResponse, ClientError>>>,
rt: &Runtime,
halt_on_error: bool,
span: Span,
) -> Result<WorkerResponse, ShellError> {
let (success, failed, fail_reasons) = rt.block_on(async {
let mut success = 0;
let mut failed = 0;
let mut fail_reasons: HashSet<String> = HashSet::new();
while let Some(result) = workers.next().await {
match result {
Ok(_) => success += 1,
Err(e) => {
if halt_on_error {
return Err(client_error_to_shell_error(e, span));
}
failed += 1;
fail_reasons.insert(e.to_string());
}
}
}
Ok((success, failed, fail_reasons))
})?;
Ok(WorkerResponse {
success,
failed,
fail_reasons,
})
}
pub(crate | random_line_split |
|
doc_upsert.rs | self) -> Signature {
Signature::build("doc upsert")
.optional("id", SyntaxShape::String, "the document id")
.optional("content", SyntaxShape::Any, "the document content")
.named(
"id-column",
SyntaxShape::String,
"the name of the id column if used with an input stream",
None,
)
.named(
"bucket",
SyntaxShape::String,
"the name of the bucket",
None,
)
.named(
"content-column",
SyntaxShape::String,
"the name of the content column if used with an input stream",
None,
)
.named(
"expiry",
SyntaxShape::Number,
"the expiry for the documents in seconds, or absolute",
None,
)
.named("scope", SyntaxShape::String, "the name of the scope", None)
.named(
"collection",
SyntaxShape::String,
"the name of the collection",
None,
)
.named(
"databases",
SyntaxShape::String,
"the databases which should be contacted",
None,
)
.named(
"batch-size",
SyntaxShape::Number,
"the maximum number of items to batch send at a time",
None,
)
.switch("halt-on-error", "halt on any errors", Some('e'))
.category(Category::Custom("couchbase".to_string()))
}
fn usage(&self) -> &str {
"Upsert (insert or override) a document through the data service"
}
fn run(
&self,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
run_upsert(self.state.clone(), engine_state, stack, call, input)
}
}
fn build_req(key: String, value: Vec<u8>, expiry: u32) -> KeyValueRequest {
KeyValueRequest::Set { key, value, expiry }
}
fn | (
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
let results = run_kv_store_ops(state, engine_state, stack, call, input, build_req)?;
Ok(Value::List {
vals: results,
span: call.head,
}
.into_pipeline_data())
}
pub(crate) fn run_kv_store_ops(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
req_builder: fn(String, Vec<u8>, u32) -> KeyValueRequest,
) -> Result<Vec<Value>, ShellError> {
let span = call.head;
let id_column = call
.get_flag(engine_state, stack, "id-column")?
.unwrap_or_else(|| String::from("id"));
let content_column = call
.get_flag(engine_state, stack, "content-column")?
.unwrap_or_else(|| String::from("content"));
let input_args = if let Some(id) = call.opt::<String>(engine_state, stack, 0)? {
if let Some(v) = call.opt::<Value>(engine_state, stack, 1)? {
let content = convert_nu_value_to_json_value(&v, span)?;
vec![(id, content)]
} else {
vec![]
}
} else {
vec![]
};
let filtered = input.into_iter().filter_map(move |i| {
let id_column = id_column.clone();
let content_column = content_column.clone();
if let Value::Record { cols, vals, .. } = i {
let mut id = None;
let mut content = None;
for (k, v) in cols.iter().zip(vals) {
if k.clone() == id_column {
id = v.as_string().ok();
}
if k.clone() == content_column {
content = convert_nu_value_to_json_value(&v, span).ok();
}
}
if let Some(i) = id {
if let Some(c) = content {
return Some((i, c));
}
}
}
None
});
let mut all_items = vec![];
for item in filtered.chain(input_args) {
let value =
serde_json::to_vec(&item.1).map_err(|e| serialize_error(e.to_string(), span))?;
all_items.push((item.0, value));
}
run_kv_mutations(
state,
engine_state,
stack,
call,
span,
all_items,
req_builder,
)
}
pub fn run_kv_mutations(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
span: Span,
all_items: Vec<(String, Vec<u8>)>,
req_builder: fn(String, Vec<u8>, u32) -> KeyValueRequest,
) -> Result<Vec<Value>, ShellError> {
let ctrl_c = engine_state.ctrlc.as_ref().unwrap().clone();
let expiry: i64 = call.get_flag(engine_state, stack, "expiry")?.unwrap_or(0);
let batch_size: Option<i64> = call.get_flag(engine_state, stack, "batch-size")?;
let bucket_flag = call.get_flag(engine_state, stack, "bucket")?;
let scope_flag = call.get_flag(engine_state, stack, "scope")?;
let collection_flag = call.get_flag(engine_state, stack, "collection")?;
let halt_on_error = call.has_flag("halt-on-error");
let cluster_identifiers = cluster_identifiers_from(engine_state, stack, &state, call, true)?;
let guard = state.lock().unwrap();
let mut all_values = vec![];
if let Some(size) = batch_size {
all_values = build_batched_kv_items(size as u32, all_items.clone());
}
let mut results = vec![];
for identifier in cluster_identifiers {
let rt = Runtime::new().unwrap();
let (active_cluster, client, cid) = match get_active_cluster_client_cid(
&rt,
identifier.clone(),
&guard,
bucket_flag.clone(),
scope_flag.clone(),
collection_flag.clone(),
ctrl_c.clone(),
span,
) {
Ok(c) => c,
Err(e) => {
if halt_on_error {
return Err(e);
}
let mut failures = HashSet::new();
failures.insert(e.to_string());
let collected = MutationResult::new(identifier.clone())
.fail_reasons(failures)
.into_value(call.head);
results.push(collected);
continue;
}
};
if all_values.is_empty() {
all_values = build_batched_kv_items(active_cluster.kv_batch_size(), all_items.clone());
}
let mut workers = FuturesUnordered::new();
let mut success = 0;
let mut failed = 0;
let mut fail_reasons: HashSet<String> = HashSet::new();
for items in all_values.clone() {
for item in items.clone() {
let deadline = Instant::now().add(active_cluster.timeouts().data_timeout());
let ctrl_c = ctrl_c.clone();
let client = client.clone();
workers.push(async move {
client
.request(
req_builder(item.0, item.1, expiry as u32),
cid,
deadline,
ctrl_c,
)
.await
});
}
// process_kv_workers will handle creating an error for us if halt_on_error is set so we
// can just bubble it.
let worked = process_kv_workers(workers, &rt, halt_on_error, span)?;
success += worked.success;
failed += worked.failed;
fail_reasons.extend(worked.fail_reasons);
workers = FuturesUnordered::new()
}
let collected = MutationResult::new(identifier.clone())
.success(success)
.failed(failed)
.fail_reasons(fail_reasons);
results.push(collected.into_value(span));
}
Ok(results)
}
pub(crate) struct WorkerResponse {
pub(crate) success: i32,
pub(crate) failed: i32,
pub(crate) fail_reasons: HashSet<String>,
}
pub(crate) fn process_kv_workers(
mut workers: FuturesUnordered<impl Future<Output = Result<KvResponse, ClientError>>>,
rt: &Runtime,
halt_on_error: bool,
span: Span,
) -> Result<WorkerResponse, ShellError> {
let (success, failed, fail_reasons) = rt.block_on(async {
let mut success = 0;
let mut failed = 0;
let mut fail_reasons: HashSet<String> = HashSet::new();
while let Some(result) = workers.next().await {
match result {
Ok(_) => success += 1,
Err(e) => {
if halt_on_error {
return Err(client_error_to_shell_error(e, span));
}
failed += 1;
fail_reasons.insert(e.to_string());
}
}
}
Ok((success, failed, fail_reasons))
})?;
Ok(WorkerResponse {
success,
failed,
fail_reasons,
})
}
pub(crate | run_upsert | identifier_name |
doc_upsert.rs | self) -> Signature {
Signature::build("doc upsert")
.optional("id", SyntaxShape::String, "the document id")
.optional("content", SyntaxShape::Any, "the document content")
.named(
"id-column",
SyntaxShape::String,
"the name of the id column if used with an input stream",
None,
)
.named(
"bucket",
SyntaxShape::String,
"the name of the bucket",
None,
)
.named(
"content-column",
SyntaxShape::String,
"the name of the content column if used with an input stream",
None,
)
.named(
"expiry",
SyntaxShape::Number,
"the expiry for the documents in seconds, or absolute",
None,
)
.named("scope", SyntaxShape::String, "the name of the scope", None)
.named(
"collection",
SyntaxShape::String,
"the name of the collection",
None,
)
.named(
"databases",
SyntaxShape::String,
"the databases which should be contacted",
None,
)
.named(
"batch-size",
SyntaxShape::Number,
"the maximum number of items to batch send at a time",
None,
)
.switch("halt-on-error", "halt on any errors", Some('e'))
.category(Category::Custom("couchbase".to_string()))
}
fn usage(&self) -> &str {
"Upsert (insert or override) a document through the data service"
}
fn run(
&self,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
run_upsert(self.state.clone(), engine_state, stack, call, input)
}
}
fn build_req(key: String, value: Vec<u8>, expiry: u32) -> KeyValueRequest |
fn run_upsert(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
let results = run_kv_store_ops(state, engine_state, stack, call, input, build_req)?;
Ok(Value::List {
vals: results,
span: call.head,
}
.into_pipeline_data())
}
pub(crate) fn run_kv_store_ops(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
req_builder: fn(String, Vec<u8>, u32) -> KeyValueRequest,
) -> Result<Vec<Value>, ShellError> {
let span = call.head;
let id_column = call
.get_flag(engine_state, stack, "id-column")?
.unwrap_or_else(|| String::from("id"));
let content_column = call
.get_flag(engine_state, stack, "content-column")?
.unwrap_or_else(|| String::from("content"));
let input_args = if let Some(id) = call.opt::<String>(engine_state, stack, 0)? {
if let Some(v) = call.opt::<Value>(engine_state, stack, 1)? {
let content = convert_nu_value_to_json_value(&v, span)?;
vec![(id, content)]
} else {
vec![]
}
} else {
vec![]
};
let filtered = input.into_iter().filter_map(move |i| {
let id_column = id_column.clone();
let content_column = content_column.clone();
if let Value::Record { cols, vals, .. } = i {
let mut id = None;
let mut content = None;
for (k, v) in cols.iter().zip(vals) {
if k.clone() == id_column {
id = v.as_string().ok();
}
if k.clone() == content_column {
content = convert_nu_value_to_json_value(&v, span).ok();
}
}
if let Some(i) = id {
if let Some(c) = content {
return Some((i, c));
}
}
}
None
});
let mut all_items = vec![];
for item in filtered.chain(input_args) {
let value =
serde_json::to_vec(&item.1).map_err(|e| serialize_error(e.to_string(), span))?;
all_items.push((item.0, value));
}
run_kv_mutations(
state,
engine_state,
stack,
call,
span,
all_items,
req_builder,
)
}
pub fn run_kv_mutations(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
span: Span,
all_items: Vec<(String, Vec<u8>)>,
req_builder: fn(String, Vec<u8>, u32) -> KeyValueRequest,
) -> Result<Vec<Value>, ShellError> {
let ctrl_c = engine_state.ctrlc.as_ref().unwrap().clone();
let expiry: i64 = call.get_flag(engine_state, stack, "expiry")?.unwrap_or(0);
let batch_size: Option<i64> = call.get_flag(engine_state, stack, "batch-size")?;
let bucket_flag = call.get_flag(engine_state, stack, "bucket")?;
let scope_flag = call.get_flag(engine_state, stack, "scope")?;
let collection_flag = call.get_flag(engine_state, stack, "collection")?;
let halt_on_error = call.has_flag("halt-on-error");
let cluster_identifiers = cluster_identifiers_from(engine_state, stack, &state, call, true)?;
let guard = state.lock().unwrap();
let mut all_values = vec![];
if let Some(size) = batch_size {
all_values = build_batched_kv_items(size as u32, all_items.clone());
}
let mut results = vec![];
for identifier in cluster_identifiers {
let rt = Runtime::new().unwrap();
let (active_cluster, client, cid) = match get_active_cluster_client_cid(
&rt,
identifier.clone(),
&guard,
bucket_flag.clone(),
scope_flag.clone(),
collection_flag.clone(),
ctrl_c.clone(),
span,
) {
Ok(c) => c,
Err(e) => {
if halt_on_error {
return Err(e);
}
let mut failures = HashSet::new();
failures.insert(e.to_string());
let collected = MutationResult::new(identifier.clone())
.fail_reasons(failures)
.into_value(call.head);
results.push(collected);
continue;
}
};
if all_values.is_empty() {
all_values = build_batched_kv_items(active_cluster.kv_batch_size(), all_items.clone());
}
let mut workers = FuturesUnordered::new();
let mut success = 0;
let mut failed = 0;
let mut fail_reasons: HashSet<String> = HashSet::new();
for items in all_values.clone() {
for item in items.clone() {
let deadline = Instant::now().add(active_cluster.timeouts().data_timeout());
let ctrl_c = ctrl_c.clone();
let client = client.clone();
workers.push(async move {
client
.request(
req_builder(item.0, item.1, expiry as u32),
cid,
deadline,
ctrl_c,
)
.await
});
}
// process_kv_workers will handle creating an error for us if halt_on_error is set so we
// can just bubble it.
let worked = process_kv_workers(workers, &rt, halt_on_error, span)?;
success += worked.success;
failed += worked.failed;
fail_reasons.extend(worked.fail_reasons);
workers = FuturesUnordered::new()
}
let collected = MutationResult::new(identifier.clone())
.success(success)
.failed(failed)
.fail_reasons(fail_reasons);
results.push(collected.into_value(span));
}
Ok(results)
}
pub(crate) struct WorkerResponse {
pub(crate) success: i32,
pub(crate) failed: i32,
pub(crate) fail_reasons: HashSet<String>,
}
pub(crate) fn process_kv_workers(
mut workers: FuturesUnordered<impl Future<Output = Result<KvResponse, ClientError>>>,
rt: &Runtime,
halt_on_error: bool,
span: Span,
) -> Result<WorkerResponse, ShellError> {
let (success, failed, fail_reasons) = rt.block_on(async {
let mut success = 0;
let mut failed = 0;
let mut fail_reasons: HashSet<String> = HashSet::new();
while let Some(result) = workers.next().await {
match result {
Ok(_) => success += 1,
Err(e) => {
if halt_on_error {
return Err(client_error_to_shell_error(e, span));
}
failed += 1;
fail_reasons.insert(e.to_string());
}
}
}
Ok((success, failed, fail_reasons))
})?;
Ok(WorkerResponse {
success,
failed,
fail_reasons,
})
}
pub | {
KeyValueRequest::Set { key, value, expiry }
} | identifier_body |
doc_upsert.rs | self) -> Signature {
Signature::build("doc upsert")
.optional("id", SyntaxShape::String, "the document id")
.optional("content", SyntaxShape::Any, "the document content")
.named(
"id-column",
SyntaxShape::String,
"the name of the id column if used with an input stream",
None,
)
.named(
"bucket",
SyntaxShape::String,
"the name of the bucket",
None,
)
.named(
"content-column",
SyntaxShape::String,
"the name of the content column if used with an input stream",
None,
)
.named(
"expiry",
SyntaxShape::Number,
"the expiry for the documents in seconds, or absolute",
None,
)
.named("scope", SyntaxShape::String, "the name of the scope", None)
.named(
"collection",
SyntaxShape::String,
"the name of the collection",
None,
)
.named(
"databases",
SyntaxShape::String,
"the databases which should be contacted",
None,
)
.named(
"batch-size",
SyntaxShape::Number,
"the maximum number of items to batch send at a time",
None,
)
.switch("halt-on-error", "halt on any errors", Some('e'))
.category(Category::Custom("couchbase".to_string()))
}
fn usage(&self) -> &str {
"Upsert (insert or override) a document through the data service"
}
fn run(
&self,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
run_upsert(self.state.clone(), engine_state, stack, call, input)
}
}
fn build_req(key: String, value: Vec<u8>, expiry: u32) -> KeyValueRequest {
KeyValueRequest::Set { key, value, expiry }
}
fn run_upsert(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
let results = run_kv_store_ops(state, engine_state, stack, call, input, build_req)?;
Ok(Value::List {
vals: results,
span: call.head,
}
.into_pipeline_data())
}
pub(crate) fn run_kv_store_ops(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
req_builder: fn(String, Vec<u8>, u32) -> KeyValueRequest,
) -> Result<Vec<Value>, ShellError> {
let span = call.head;
let id_column = call
.get_flag(engine_state, stack, "id-column")?
.unwrap_or_else(|| String::from("id"));
let content_column = call
.get_flag(engine_state, stack, "content-column")?
.unwrap_or_else(|| String::from("content"));
let input_args = if let Some(id) = call.opt::<String>(engine_state, stack, 0)? {
if let Some(v) = call.opt::<Value>(engine_state, stack, 1)? {
let content = convert_nu_value_to_json_value(&v, span)?;
vec![(id, content)]
} else {
vec![]
}
} else {
vec![]
};
let filtered = input.into_iter().filter_map(move |i| {
let id_column = id_column.clone();
let content_column = content_column.clone();
if let Value::Record { cols, vals, .. } = i {
let mut id = None;
let mut content = None;
for (k, v) in cols.iter().zip(vals) {
if k.clone() == id_column |
if k.clone() == content_column {
content = convert_nu_value_to_json_value(&v, span).ok();
}
}
if let Some(i) = id {
if let Some(c) = content {
return Some((i, c));
}
}
}
None
});
let mut all_items = vec![];
for item in filtered.chain(input_args) {
let value =
serde_json::to_vec(&item.1).map_err(|e| serialize_error(e.to_string(), span))?;
all_items.push((item.0, value));
}
run_kv_mutations(
state,
engine_state,
stack,
call,
span,
all_items,
req_builder,
)
}
pub fn run_kv_mutations(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
span: Span,
all_items: Vec<(String, Vec<u8>)>,
req_builder: fn(String, Vec<u8>, u32) -> KeyValueRequest,
) -> Result<Vec<Value>, ShellError> {
let ctrl_c = engine_state.ctrlc.as_ref().unwrap().clone();
let expiry: i64 = call.get_flag(engine_state, stack, "expiry")?.unwrap_or(0);
let batch_size: Option<i64> = call.get_flag(engine_state, stack, "batch-size")?;
let bucket_flag = call.get_flag(engine_state, stack, "bucket")?;
let scope_flag = call.get_flag(engine_state, stack, "scope")?;
let collection_flag = call.get_flag(engine_state, stack, "collection")?;
let halt_on_error = call.has_flag("halt-on-error");
let cluster_identifiers = cluster_identifiers_from(engine_state, stack, &state, call, true)?;
let guard = state.lock().unwrap();
let mut all_values = vec![];
if let Some(size) = batch_size {
all_values = build_batched_kv_items(size as u32, all_items.clone());
}
let mut results = vec![];
for identifier in cluster_identifiers {
let rt = Runtime::new().unwrap();
let (active_cluster, client, cid) = match get_active_cluster_client_cid(
&rt,
identifier.clone(),
&guard,
bucket_flag.clone(),
scope_flag.clone(),
collection_flag.clone(),
ctrl_c.clone(),
span,
) {
Ok(c) => c,
Err(e) => {
if halt_on_error {
return Err(e);
}
let mut failures = HashSet::new();
failures.insert(e.to_string());
let collected = MutationResult::new(identifier.clone())
.fail_reasons(failures)
.into_value(call.head);
results.push(collected);
continue;
}
};
if all_values.is_empty() {
all_values = build_batched_kv_items(active_cluster.kv_batch_size(), all_items.clone());
}
let mut workers = FuturesUnordered::new();
let mut success = 0;
let mut failed = 0;
let mut fail_reasons: HashSet<String> = HashSet::new();
for items in all_values.clone() {
for item in items.clone() {
let deadline = Instant::now().add(active_cluster.timeouts().data_timeout());
let ctrl_c = ctrl_c.clone();
let client = client.clone();
workers.push(async move {
client
.request(
req_builder(item.0, item.1, expiry as u32),
cid,
deadline,
ctrl_c,
)
.await
});
}
// process_kv_workers will handle creating an error for us if halt_on_error is set so we
// can just bubble it.
let worked = process_kv_workers(workers, &rt, halt_on_error, span)?;
success += worked.success;
failed += worked.failed;
fail_reasons.extend(worked.fail_reasons);
workers = FuturesUnordered::new()
}
let collected = MutationResult::new(identifier.clone())
.success(success)
.failed(failed)
.fail_reasons(fail_reasons);
results.push(collected.into_value(span));
}
Ok(results)
}
pub(crate) struct WorkerResponse {
pub(crate) success: i32,
pub(crate) failed: i32,
pub(crate) fail_reasons: HashSet<String>,
}
pub(crate) fn process_kv_workers(
mut workers: FuturesUnordered<impl Future<Output = Result<KvResponse, ClientError>>>,
rt: &Runtime,
halt_on_error: bool,
span: Span,
) -> Result<WorkerResponse, ShellError> {
let (success, failed, fail_reasons) = rt.block_on(async {
let mut success = 0;
let mut failed = 0;
let mut fail_reasons: HashSet<String> = HashSet::new();
while let Some(result) = workers.next().await {
match result {
Ok(_) => success += 1,
Err(e) => {
if halt_on_error {
return Err(client_error_to_shell_error(e, span));
}
failed += 1;
fail_reasons.insert(e.to_string());
}
}
}
Ok((success, failed, fail_reasons))
})?;
Ok(WorkerResponse {
success,
failed,
fail_reasons,
})
}
pub | {
id = v.as_string().ok();
} | conditional_block |
pipeline.fromIlastik.py | circularity=","meanIntensity=","totalIntensity=","pos="])
except getopt.GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
usage()
sys.exit(2)
print(opts)
usecols = ()
for o, a in opts:
if o in ("-i", "--input"):
filename = a # morphology measurements file
elif o in ("-s", "--seed"):
rndsample = int(a) # Leiden seed
elif o in ("-n", "--nn"):
nn = int(a) # Leiden seed
elif o in ("-a", "--area"):
if a == '1': usecols = usecols+(3,)
elif o in ("-p", "--perimeter"):
if a == '1': usecols = usecols+(4,)
elif o in ("-e", "--eccentricity"):
if a == '1': usecols = usecols+(5,)
elif o in ("-c", "--circularity"):
if a == '1': usecols = usecols+(6,)
elif o in ("-m", "--meanIntensity"):
if a == '1': usecols = usecols+(7,)
elif o in ("-t", "--totalIntensity"):
if a == '1': usecols = usecols+(8,)
elif o in ("--pos"):
if a == '1':
position = True
else:
position = False
else:
assert False, "unhandled option"
####################################################################################################
# Define basic filenames
# !!! have the base name dependent on the parameters !!!
####################################################################################################
basename_graph = os.path.splitext(os.path.basename(filename))[0]
if os.path.splitext(os.path.basename(filename))[1] == '.gz':
basename = os.path.splitext(os.path.splitext(os.path.basename(filename))[0])[0]+'.s'+str(rndsample)
dirname = os.path.dirname(filename)
####################################################################################################
# Construct the UMAP graph
# and save the adjacency matrix
# and the degree and clustering coefficient vectors
###################################################################################################
print('Prepare the topological graph ...')
path = os.path.join(dirname, basename_graph)+'.nn'+str(nn)+'.adj.npz'
if os.path.exists(path) and os.path.exists( os.path.join(dirname, basename_graph) + ".graph.pickle" ):
print('The graph exists already and I am now loading it...')
A = sparse.load_npz(path)
pos = np.loadtxt(filename, delimiter="\t",skiprows=True,usecols=(1,2)) # chose x and y and do not consider header
G = nx.read_gpickle(os.path.join(dirname, basename_graph) + ".graph.pickle")
d = getdegree(G)
cc = clusteringCoeff(A)
else:
print('The graph does not exists yet and I am going to create one...')
pos = np.loadtxt(filename, delimiter="\t",skiprows=True,usecols=(1,2))
A = space2graph(pos,nn) # create the topology graph
sparse.save_npz(path, A)
G = nx.from_scipy_sparse_matrix(A, edge_attribute='weight')
d = getdegree(G)
cc = clusteringCoeff(A)
outfile = os.path.join(dirname, basename_graph)+'.nn'+str(nn)+'.degree.gz'
np.savetxt(outfile, d)
outfile = os.path.join(dirname, basename_graph)+'.nn'+str(nn)+'.cc.gz'
np.savetxt(outfile, cc)
nx.write_gpickle(G, os.path.join(dirname, basename_graph) + ".graph.pickle")
pos2norm = np.linalg.norm(pos,axis=1).reshape((pos.shape[0],1)) # the length of the position vector |
print('Topological graph ready!')
print('...the graph has '+str(A.shape[0])+' nodes')
####################################################################################################
# Select the morphological features,
# and set the min number of nodes per subgraph
# Features list:
# fov_name x_centroid y_centroid area perimeter eccentricity circularity mean_intensity total_intensity
# !!!optimize the threshold!!!
###################################################################################################
print('Prepare the morphology array')
print('...the features tuple that we consider is: ',str(usecols))
morphology = np.loadtxt(filename, delimiter="\t", skiprows=True, usecols=usecols).reshape((A.shape[0],len(usecols)))
threshold = (morphology.shape[1]+4)*2 # set the min subgraph size based on the dim of the feature matrix
morphologies = morphology.shape[1] # number of morphological features
####################################################################################################
# Weight the graph taking into account topology and morphology
# the new weight is the ratio topological_similarity/(1+morpho_dissimilarity)
# !!!need to be optimized!!!
###################################################################################################
print('Rescale graph weights by local morphology')
morphology_normed = normalize(morphology, norm='l1', axis=0) # normalize features
GG = copy.deepcopy(G) # create a topology+morphology new graph
for ijw in G.edges(data='weight'): # loop over edges
feature = np.asarray([ abs(morphology_normed[ijw[0],f]-morphology_normed[ijw[1],f]) for f in range(morphologies) ])
GG[ijw[0]][ijw[1]]['weight'] = ijw[2]/(1.0+np.sum(feature))
####################################################################################################
# Community detection in the topology+morphology graph
# !!! find a way to avoid writing the edge list on disk !!!
###################################################################################################
print('Find the communities in GG')
from cdlib import algorithms
from cdlib import evaluation
from cdlib.utils import convert_graph_formats
import igraph
import leidenalg
from networkx.algorithms.community.quality import modularity
print('...generate connected components as subgraphs...')
graphs = list(nx.connected_component_subgraphs(GG)) # list the connected components
print('...convert networkx graph to igraph object...')
communities = []
for graph in graphs:
nx.write_weighted_edgelist(graph, basename+".edgelist.txt") # write the edge list on disk
g = igraph.Graph.Read_Ncol(basename+".edgelist.txt", names=True, weights="if_present", directed=False) # define the igraph obj
os.remove(basename+".edgelist.txt") # delete the edge list
part = leidenalg.find_partition(g,
leidenalg.ModularityVertexPartition,
initial_membership=None,
weights='weight',
seed=rndsample,
n_iterations=2) # find partitions
communities.extend([g.vs[x]['name'] for x in part]) # create a list of communities
bigcommunities = [g for g in communities if len(g) > threshold] # list of big enough communities
outfile = os.path.join(dirname, basename)+'.bigcommunities'
np.save(outfile, bigcommunities) # store the big communities
print('There are '+str(len(bigcommunities))+' big communities and '+str(len(communities))+' communities in total')
####################################################################################################
# Generate the covariance descriptors of the topology graph
# !!! insert a switch for the position !!!
###################################################################################################
print('Generate the covariance descriptor')
if position:
print('...the position information is included')
features = np.hstack((pos2norm,morphology)) # this is rotational invariant
else:
print('...the position information is not included')
features = morphology # this is without positions
outfile_covd = os.path.join(dirname, basename)+'.covd.npy' # name of the covd file
if os.path.exists(outfile_covd):
print('... loading the descriptors ...')
covdata = np.load(outfile_covd,allow_pickle=True) # load covd data
else:
print('... creating the descriptors ...')
covdata = community_covd(features,G,bigcommunities) # get list of cov matrices and a list of nodes per matrix
np.save(outfile_covd,covdata) # store covd data
print('There are '+str(len(covdata))+' covariance descriptors ')
####################################################################################################
# Cluster the covariance descriptors
###################################################################################################
print('Clustering the descriptors')
import umap
import hdbscan
import sklearn.cluster as cluster
from sklearn.cluster import OPTICS
from sklearn.metrics import adjusted_rand_score, adjusted_mutual_info_score
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import SpectralClustering
print('...prepare the data...')
outfile_logvec = os.path.join(dirname, basename)+'.logvec.npy'
if os.path.exists(outfile_logvec):
print('...load the logvec dataset...')
X = np.load(outfile_logvec,allow_pickle=True)
else:
print('...create the logvec dataset...')
logvec = [ linalg.logm(m).reshape((1,m.shape[0]*m.shape[1])) for m in covdata] #calculate the logm and vectorize
X = np.vstack(logvec) #create the array of vectorized covd data
np.save(outfile_logvec,X)
print('The vectorized covd array has shape '+str(X.shape))
outfile_clusterable_embedding = os.path.join(dirname, basename)+'.clusterable_embedding.npy'
if os.path.exists(outfile_clusterable_embedding):
print('...load the clusterable embedding...')
clusterable_embedding = np.load(outfile_clusterable_embedding,allow_pickle=True)
else:
print('...create the clusterable embedding...')
clusterable_embedding = umap.UMAP(min_dist=0.0,n_components= | random_line_split |
|
pipeline.fromIlastik.py | circularity=","meanIntensity=","totalIntensity=","pos="])
except getopt.GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
usage()
sys.exit(2)
print(opts)
usecols = ()
for o, a in opts:
if o in ("-i", "--input"):
filename = a # morphology measurements file
elif o in ("-s", "--seed"):
rndsample = int(a) # Leiden seed
elif o in ("-n", "--nn"):
nn = int(a) # Leiden seed
elif o in ("-a", "--area"):
if a == '1': usecols = usecols+(3,)
elif o in ("-p", "--perimeter"):
if a == '1': usecols = usecols+(4,)
elif o in ("-e", "--eccentricity"):
if a == '1': usecols = usecols+(5,)
elif o in ("-c", "--circularity"):
if a == '1': usecols = usecols+(6,)
elif o in ("-m", "--meanIntensity"):
if a == '1': usecols = usecols+(7,)
elif o in ("-t", "--totalIntensity"):
if a == '1': usecols = usecols+(8,)
elif o in ("--pos"):
if a == '1':
position = True
else:
position = False
else:
assert False, "unhandled option"
####################################################################################################
# Define basic filenames
# !!! have the base name dependent on the parameters !!!
####################################################################################################
basename_graph = os.path.splitext(os.path.basename(filename))[0]
if os.path.splitext(os.path.basename(filename))[1] == '.gz':
basename = os.path.splitext(os.path.splitext(os.path.basename(filename))[0])[0]+'.s'+str(rndsample)
dirname = os.path.dirname(filename)
####################################################################################################
# Construct the UMAP graph
# and save the adjacency matrix
# and the degree and clustering coefficient vectors
###################################################################################################
print('Prepare the topological graph ...')
path = os.path.join(dirname, basename_graph)+'.nn'+str(nn)+'.adj.npz'
if os.path.exists(path) and os.path.exists( os.path.join(dirname, basename_graph) + ".graph.pickle" ):
|
else:
print('The graph does not exists yet and I am going to create one...')
pos = np.loadtxt(filename, delimiter="\t",skiprows=True,usecols=(1,2))
A = space2graph(pos,nn) # create the topology graph
sparse.save_npz(path, A)
G = nx.from_scipy_sparse_matrix(A, edge_attribute='weight')
d = getdegree(G)
cc = clusteringCoeff(A)
outfile = os.path.join(dirname, basename_graph)+'.nn'+str(nn)+'.degree.gz'
np.savetxt(outfile, d)
outfile = os.path.join(dirname, basename_graph)+'.nn'+str(nn)+'.cc.gz'
np.savetxt(outfile, cc)
nx.write_gpickle(G, os.path.join(dirname, basename_graph) + ".graph.pickle")
pos2norm = np.linalg.norm(pos,axis=1).reshape((pos.shape[0],1)) # the length of the position vector
print('Topological graph ready!')
print('...the graph has '+str(A.shape[0])+' nodes')
####################################################################################################
# Select the morphological features,
# and set the min number of nodes per subgraph
# Features list:
# fov_name x_centroid y_centroid area perimeter eccentricity circularity mean_intensity total_intensity
# !!!optimize the threshold!!!
###################################################################################################
print('Prepare the morphology array')
print('...the features tuple that we consider is: ',str(usecols))
morphology = np.loadtxt(filename, delimiter="\t", skiprows=True, usecols=usecols).reshape((A.shape[0],len(usecols)))
threshold = (morphology.shape[1]+4)*2 # set the min subgraph size based on the dim of the feature matrix
morphologies = morphology.shape[1] # number of morphological features
####################################################################################################
# Weight the graph taking into account topology and morphology
# the new weight is the ratio topological_similarity/(1+morpho_dissimilarity)
# !!!need to be optimized!!!
###################################################################################################
print('Rescale graph weights by local morphology')
morphology_normed = normalize(morphology, norm='l1', axis=0) # normalize features
GG = copy.deepcopy(G) # create a topology+morphology new graph
for ijw in G.edges(data='weight'): # loop over edges
feature = np.asarray([ abs(morphology_normed[ijw[0],f]-morphology_normed[ijw[1],f]) for f in range(morphologies) ])
GG[ijw[0]][ijw[1]]['weight'] = ijw[2]/(1.0+np.sum(feature))
####################################################################################################
# Community detection in the topology+morphology graph
# !!! find a way to avoid writing the edge list on disk !!!
###################################################################################################
print('Find the communities in GG')
from cdlib import algorithms
from cdlib import evaluation
from cdlib.utils import convert_graph_formats
import igraph
import leidenalg
from networkx.algorithms.community.quality import modularity
print('...generate connected components as subgraphs...')
graphs = list(nx.connected_component_subgraphs(GG)) # list the connected components
print('...convert networkx graph to igraph object...')
communities = []
for graph in graphs:
nx.write_weighted_edgelist(graph, basename+".edgelist.txt") # write the edge list on disk
g = igraph.Graph.Read_Ncol(basename+".edgelist.txt", names=True, weights="if_present", directed=False) # define the igraph obj
os.remove(basename+".edgelist.txt") # delete the edge list
part = leidenalg.find_partition(g,
leidenalg.ModularityVertexPartition,
initial_membership=None,
weights='weight',
seed=rndsample,
n_iterations=2) # find partitions
communities.extend([g.vs[x]['name'] for x in part]) # create a list of communities
bigcommunities = [g for g in communities if len(g) > threshold] # list of big enough communities
outfile = os.path.join(dirname, basename)+'.bigcommunities'
np.save(outfile, bigcommunities) # store the big communities
print('There are '+str(len(bigcommunities))+' big communities and '+str(len(communities))+' communities in total')
####################################################################################################
# Generate the covariance descriptors of the topology graph
# !!! insert a switch for the position !!!
###################################################################################################
print('Generate the covariance descriptor')
if position:
print('...the position information is included')
features = np.hstack((pos2norm,morphology)) # this is rotational invariant
else:
print('...the position information is not included')
features = morphology # this is without positions
outfile_covd = os.path.join(dirname, basename)+'.covd.npy' # name of the covd file
if os.path.exists(outfile_covd):
print('... loading the descriptors ...')
covdata = np.load(outfile_covd,allow_pickle=True) # load covd data
else:
print('... creating the descriptors ...')
covdata = community_covd(features,G,bigcommunities) # get list of cov matrices and a list of nodes per matrix
np.save(outfile_covd,covdata) # store covd data
print('There are '+str(len(covdata))+' covariance descriptors ')
####################################################################################################
# Cluster the covariance descriptors
###################################################################################################
print('Clustering the descriptors')
import umap
import hdbscan
import sklearn.cluster as cluster
from sklearn.cluster import OPTICS
from sklearn.metrics import adjusted_rand_score, adjusted_mutual_info_score
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import SpectralClustering
print('...prepare the data...')
outfile_logvec = os.path.join(dirname, basename)+'.logvec.npy'
if os.path.exists(outfile_logvec):
print('...load the logvec dataset...')
X = np.load(outfile_logvec,allow_pickle=True)
else:
print('...create the logvec dataset...')
logvec = [ linalg.logm(m).reshape((1,m.shape[0]*m.shape[1])) for m in covdata] #calculate the logm and vectorize
X = np.vstack(logvec) #create the array of vectorized covd data
np.save(outfile_logvec,X)
print('The vectorized covd array has shape '+str(X.shape))
outfile_clusterable_embedding = os.path.join(dirname, basename)+'.clusterable_embedding.npy'
if os.path.exists(outfile_clusterable_embedding):
print('...load the clusterable embedding...')
clusterable_embedding = np.load(outfile_clusterable_embedding,allow_pickle=True)
else:
print('...create the clusterable embedding...')
clusterable_embedding = umap.UMAP(min_dist=0.0,n_components | print('The graph exists already and I am now loading it...')
A = sparse.load_npz(path)
pos = np.loadtxt(filename, delimiter="\t",skiprows=True,usecols=(1,2)) # chose x and y and do not consider header
G = nx.read_gpickle(os.path.join(dirname, basename_graph) + ".graph.pickle")
d = getdegree(G)
cc = clusteringCoeff(A) | conditional_block |
save_hist.py | ')
# Timing
mjd_day = t.root.I3EventHeader.col('time_start_mjd_day')
mjd_sec = t.root.I3EventHeader.col('time_start_mjd_sec')
mjd_ns = t.root.I3EventHeader.col('time_start_mjd_ns')
q['mjd'] = np.zeros(len(mjd_day), dtype=np.float64)
for i in range(len(mjd_day)):
day = int(mjd_day[i])
sec = int(mjd_sec[i])
ns = int(mjd_ns[i])
t1.SetTime(day, sec, ns)
q['mjd'][i] = t1.GetMJD()
# Event ID
run = t.root.I3EventHeader.col('Run')
event = t.root.I3EventHeader.col('Event')
subevent = t.root.I3EventHeader.col('SubEvent')
eventIDs = []
for i in range(len(run)):
eventIDs += ['%s_%s_%s' % (run[i], event[i], subevent[i])]
q['eventIDs'] = np.asarray(eventIDs)
# Condition and prescale passed (filter[condition, prescale])
# For notes on weights see bottom of file
filtermask = df.filter_mask(config)
filternames = df.filter_names(config)
f = {}
for fname in filternames:
f[fname] = t.getNode('/'+filtermask).col(fname)
f[fname] = f[fname][:,0].astype(float)
filterArray = np.array([f[fname] * df.it_weights(fname)
for fname in f.keys()])
filterArray[filterArray == 0] = 100.
q['weights'] = np.amin(filterArray, axis=0)
# Other reconstruction info
q['NStations'] = t.root.NStations.col('value')
t.close()
# Laputop values
#for key in ['x','y','zenith','azimuth','s125','e_proton','e_iron','beta']:
# arrays += ['lap_'+key]
# Get Laputop info
#for value in ['x', 'y', 'zenith', 'azimuth']:
# q['lap_'+value] = t.root.Laputop.col(value)
#for value in ['s125', 'e_proton', 'e_iron', 'beta']:
# q['lap_'+value] = t.root.LaputopParams.col(value)
# Get most likely composition
rList = [rDict[comp] for comp in compList]
full_llhs = np.array([q[r+'LLH'] for r in rList])
max_llh = np.amax(full_llhs, axis=0)
q['llh_comp'] = np.array(['' for i in range(len(q['pLLH']))])
for r in rList:
q['llh_comp'][q[r+'LLH'] == max_llh] = r
for key in ['x', 'y', 'energy']:
q['ML_'+key] = np.array([q[r+'ML_'+key][i]
for i, r in enumerate(q['llh_comp'])])
# Check for multiple most-likely compositions (mark as bad)
badVals = np.sum(full_llhs == max_llh, axis=0)
badVals = (badVals-1).astype('bool')
q['llh_comp'][badVals] = ''
for key in ['x','y','energy']:
q['ML_'+key][badVals] = np.nan
# Calculate sky positions
q['dec'], q['ra'] = getDecRA(q, verbose=False)
# Containment cut
it_geo = df.it_geo(config)
q['cuts'] = {}
q['cuts']['llh'] = inPoly(q['ML_x'], q['ML_y'], 0, config=it_geo)
return q
def histWriter(config, file, outfile):
# Bin values
eList = ['p','h','o','f']
decbins = ['0-12','12-24','24-40']
rabins = ['0-60','60-120','120-180','180-240','240-300','300-360']
# Build general list of key names to write
keyList = []
keyList += ['energy','energy_w','energy_z','energy_w_z']
keyList += ['zenith','zenith_w','core','core_w']
keyList += ['%s_err' % k for k in keyList]
# Split by composition
keyList = ['%s_%s' % (k, e) for k in keyList for e in eList]
# Split spatially into 3 dec bins (~12 degrees each) and 6 ra bins
keyList = ['%s_%s_%s' % (k, dec, ra) for k in keyList \
for dec in decbins for ra in rabins]
# Extract information from hdf5 file
q = hdf5extractor(config, file)
c0 = q['cuts']['llh']
r = np.log10(q['ML_energy'])[c0]
cosz = np.cos(q['zenith'])[c0]
dist = np.sqrt(q['ML_x']**2 + q['ML_y']**2)[c0]
fit = zfix(q['zenith'], bintype='logdist')[c0]
w = q['weights'][c0]
# Make cuts
degree = np.pi / 180.
for e in eList:
q[e] = (q['llh_comp'] == e)[c0]
for dec in decbins:
decmin = (180 - float(dec.split('-')[1])) * degree
decmax = (180 - float(dec.split('-')[0])) * degree
q[dec] = ((q['dec'] >= decmin) * (q['dec'] < decmax))[c0]
for ra in rabins:
ramin = float(ra.split('-')[0]) * degree
ramax = float(ra.split('-')[1]) * degree
q[ra] = ((q['ra'] >= ramin) * (q['ra'] < ramax))[c0]
# Method of intelligently producing histograms based on key names
def smartHist(key, x, bins):
tempx = x
wts = None
params = key.split('_')
e, dec, ra = params[-3:]
c1 = q[e] * q[dec] * q[ra]
if 'z' in params:
tempx = x - fit
if 'w' in params:
wts = w[c1]
if 'err' in params:
wts = (w[c1])**2
h0 = np.histogram(tempx[c1], bins=bins, weights=wts)[0]
return h0
# Energy distribution
h = {}
print 'Calculating energy distributions...'
bins = getEbins(reco=True)
energyKeys = [k for k in keyList if 'energy' in k]
for key in energyKeys:
h[key] = smartHist(key, r, bins)
# Zenith distribution
print 'Calculating zenith distributions...'
bins = np.linspace(0.8, 1, 81)
zenithKeys = [k for k in keyList if 'zenith' in k]
for key in zenithKeys:
h[key] = smartHist(key, cosz, bins)
# Core distribution
print 'Calculating core position distributions...'
bins = np.linspace(0, 700, 141)
coreKeys = [k for k in keyList if 'core' in k]
for key in coreKeys:
h[key] = smartHist(key, dist, bins)
print 'Saving...'
np.save(outfile, h)
def | (config, file, outfile):
nside = 64
npix = hp.nside2npix(nside)
# Binning for various parameters
sbins = np.arange(npix+1, dtype=int)
ebins = np.arange(5, 9.501, 0.05)
dbins = np.linspace(0, 700, 141)
lbins = np.linspace(-20, 20, 151)
# Get desired information from hdf5 file
d = hdf5extractor(config, file)
c0 = d['cuts']['llh']
r = np.log10(d['ML_energy'])[c0]
fit = zfix(d['zenith'], bintype='logdist')[c0]
w = d['weights'][c0]
xy = np.sqrt(d['ML_x']**2 + d['ML_y']**2)[c0]
dllh = (d['fLLH'] - d['pLLH'])[c0]
# Bin in sky
#zen = np.pi - d['zenith'][c0]
#azi = d | skyWriter | identifier_name |
save_hist.py | .getNode('/'+filtermask).col(fname)
f[fname] = f[fname][:,0].astype(float)
filterArray = np.array([f[fname] * df.it_weights(fname)
for fname in f.keys()])
filterArray[filterArray == 0] = 100.
q['weights'] = np.amin(filterArray, axis=0)
# Other reconstruction info
q['NStations'] = t.root.NStations.col('value')
t.close()
# Laputop values
#for key in ['x','y','zenith','azimuth','s125','e_proton','e_iron','beta']:
# arrays += ['lap_'+key]
# Get Laputop info
#for value in ['x', 'y', 'zenith', 'azimuth']:
# q['lap_'+value] = t.root.Laputop.col(value)
#for value in ['s125', 'e_proton', 'e_iron', 'beta']:
# q['lap_'+value] = t.root.LaputopParams.col(value)
# Get most likely composition
rList = [rDict[comp] for comp in compList]
full_llhs = np.array([q[r+'LLH'] for r in rList])
max_llh = np.amax(full_llhs, axis=0)
q['llh_comp'] = np.array(['' for i in range(len(q['pLLH']))])
for r in rList:
q['llh_comp'][q[r+'LLH'] == max_llh] = r
for key in ['x', 'y', 'energy']:
q['ML_'+key] = np.array([q[r+'ML_'+key][i]
for i, r in enumerate(q['llh_comp'])])
# Check for multiple most-likely compositions (mark as bad)
badVals = np.sum(full_llhs == max_llh, axis=0)
badVals = (badVals-1).astype('bool')
q['llh_comp'][badVals] = ''
for key in ['x','y','energy']:
q['ML_'+key][badVals] = np.nan
# Calculate sky positions
q['dec'], q['ra'] = getDecRA(q, verbose=False)
# Containment cut
it_geo = df.it_geo(config)
q['cuts'] = {}
q['cuts']['llh'] = inPoly(q['ML_x'], q['ML_y'], 0, config=it_geo)
return q
def histWriter(config, file, outfile):
# Bin values
eList = ['p','h','o','f']
decbins = ['0-12','12-24','24-40']
rabins = ['0-60','60-120','120-180','180-240','240-300','300-360']
# Build general list of key names to write
keyList = []
keyList += ['energy','energy_w','energy_z','energy_w_z']
keyList += ['zenith','zenith_w','core','core_w']
keyList += ['%s_err' % k for k in keyList]
# Split by composition
keyList = ['%s_%s' % (k, e) for k in keyList for e in eList]
# Split spatially into 3 dec bins (~12 degrees each) and 6 ra bins
keyList = ['%s_%s_%s' % (k, dec, ra) for k in keyList \
for dec in decbins for ra in rabins]
# Extract information from hdf5 file
q = hdf5extractor(config, file)
c0 = q['cuts']['llh']
r = np.log10(q['ML_energy'])[c0]
cosz = np.cos(q['zenith'])[c0]
dist = np.sqrt(q['ML_x']**2 + q['ML_y']**2)[c0]
fit = zfix(q['zenith'], bintype='logdist')[c0]
w = q['weights'][c0]
# Make cuts
degree = np.pi / 180.
for e in eList:
q[e] = (q['llh_comp'] == e)[c0]
for dec in decbins:
decmin = (180 - float(dec.split('-')[1])) * degree
decmax = (180 - float(dec.split('-')[0])) * degree
q[dec] = ((q['dec'] >= decmin) * (q['dec'] < decmax))[c0]
for ra in rabins:
ramin = float(ra.split('-')[0]) * degree
ramax = float(ra.split('-')[1]) * degree
q[ra] = ((q['ra'] >= ramin) * (q['ra'] < ramax))[c0]
# Method of intelligently producing histograms based on key names
def smartHist(key, x, bins):
tempx = x
wts = None
params = key.split('_')
e, dec, ra = params[-3:]
c1 = q[e] * q[dec] * q[ra]
if 'z' in params:
tempx = x - fit
if 'w' in params:
wts = w[c1]
if 'err' in params:
wts = (w[c1])**2
h0 = np.histogram(tempx[c1], bins=bins, weights=wts)[0]
return h0
# Energy distribution
h = {}
print 'Calculating energy distributions...'
bins = getEbins(reco=True)
energyKeys = [k for k in keyList if 'energy' in k]
for key in energyKeys:
h[key] = smartHist(key, r, bins)
# Zenith distribution
print 'Calculating zenith distributions...'
bins = np.linspace(0.8, 1, 81)
zenithKeys = [k for k in keyList if 'zenith' in k]
for key in zenithKeys:
h[key] = smartHist(key, cosz, bins)
# Core distribution
print 'Calculating core position distributions...'
bins = np.linspace(0, 700, 141)
coreKeys = [k for k in keyList if 'core' in k]
for key in coreKeys:
h[key] = smartHist(key, dist, bins)
print 'Saving...'
np.save(outfile, h)
def skyWriter(config, file, outfile):
nside = 64
npix = hp.nside2npix(nside)
# Binning for various parameters
sbins = np.arange(npix+1, dtype=int)
ebins = np.arange(5, 9.501, 0.05)
dbins = np.linspace(0, 700, 141)
lbins = np.linspace(-20, 20, 151)
# Get desired information from hdf5 file
d = hdf5extractor(config, file)
c0 = d['cuts']['llh']
r = np.log10(d['ML_energy'])[c0]
fit = zfix(d['zenith'], bintype='logdist')[c0]
w = d['weights'][c0]
xy = np.sqrt(d['ML_x']**2 + d['ML_y']**2)[c0]
dllh = (d['fLLH'] - d['pLLH'])[c0]
# Bin in sky
#zen = np.pi - d['zenith'][c0]
#azi = d['azimuth'][c0]
dec = d['dec'][c0]
ra = d['ra'][c0]
x = hp.ang2pix(nside, dec, ra)
# Energy cut
ecut = (r >= 6.2)
p = {'weights':w}
q = {}
q['energy_w'] = np.histogram2d(x, r-fit, bins=(sbins,ebins), **p)[0]
q['dist_w'] = np.histogram2d(x, xy, bins=(sbins,dbins), **p)[0]
q['llh_w'] = np.histogram2d(x, dllh, bins=(sbins,lbins), **p)[0]
p['weights'] = w**2
q['energy_err_w'] = np.histogram2d(x, r-fit, bins=(sbins,ebins), **p)[0]
q['dist_err_w'] = np.histogram2d(x, xy, bins=(sbins,dbins), **p)[0]
q['llh_err_w'] = np.histogram2d(x, dllh, bins=(sbins,lbins), **p)[0]
# Energy cut versions
q['llhcut_w'] = np.histogram2d(x[ecut], dllh[ecut], bins=(sbins,lbins), | weights=w[ecut])[0] | random_line_split |
|
save_hist.py | = ''
for key in ['x','y','energy']:
q['ML_'+key][badVals] = np.nan
# Calculate sky positions
q['dec'], q['ra'] = getDecRA(q, verbose=False)
# Containment cut
it_geo = df.it_geo(config)
q['cuts'] = {}
q['cuts']['llh'] = inPoly(q['ML_x'], q['ML_y'], 0, config=it_geo)
return q
def histWriter(config, file, outfile):
# Bin values
eList = ['p','h','o','f']
decbins = ['0-12','12-24','24-40']
rabins = ['0-60','60-120','120-180','180-240','240-300','300-360']
# Build general list of key names to write
keyList = []
keyList += ['energy','energy_w','energy_z','energy_w_z']
keyList += ['zenith','zenith_w','core','core_w']
keyList += ['%s_err' % k for k in keyList]
# Split by composition
keyList = ['%s_%s' % (k, e) for k in keyList for e in eList]
# Split spatially into 3 dec bins (~12 degrees each) and 6 ra bins
keyList = ['%s_%s_%s' % (k, dec, ra) for k in keyList \
for dec in decbins for ra in rabins]
# Extract information from hdf5 file
q = hdf5extractor(config, file)
c0 = q['cuts']['llh']
r = np.log10(q['ML_energy'])[c0]
cosz = np.cos(q['zenith'])[c0]
dist = np.sqrt(q['ML_x']**2 + q['ML_y']**2)[c0]
fit = zfix(q['zenith'], bintype='logdist')[c0]
w = q['weights'][c0]
# Make cuts
degree = np.pi / 180.
for e in eList:
q[e] = (q['llh_comp'] == e)[c0]
for dec in decbins:
decmin = (180 - float(dec.split('-')[1])) * degree
decmax = (180 - float(dec.split('-')[0])) * degree
q[dec] = ((q['dec'] >= decmin) * (q['dec'] < decmax))[c0]
for ra in rabins:
ramin = float(ra.split('-')[0]) * degree
ramax = float(ra.split('-')[1]) * degree
q[ra] = ((q['ra'] >= ramin) * (q['ra'] < ramax))[c0]
# Method of intelligently producing histograms based on key names
def smartHist(key, x, bins):
tempx = x
wts = None
params = key.split('_')
e, dec, ra = params[-3:]
c1 = q[e] * q[dec] * q[ra]
if 'z' in params:
tempx = x - fit
if 'w' in params:
wts = w[c1]
if 'err' in params:
wts = (w[c1])**2
h0 = np.histogram(tempx[c1], bins=bins, weights=wts)[0]
return h0
# Energy distribution
h = {}
print 'Calculating energy distributions...'
bins = getEbins(reco=True)
energyKeys = [k for k in keyList if 'energy' in k]
for key in energyKeys:
h[key] = smartHist(key, r, bins)
# Zenith distribution
print 'Calculating zenith distributions...'
bins = np.linspace(0.8, 1, 81)
zenithKeys = [k for k in keyList if 'zenith' in k]
for key in zenithKeys:
h[key] = smartHist(key, cosz, bins)
# Core distribution
print 'Calculating core position distributions...'
bins = np.linspace(0, 700, 141)
coreKeys = [k for k in keyList if 'core' in k]
for key in coreKeys:
h[key] = smartHist(key, dist, bins)
print 'Saving...'
np.save(outfile, h)
def skyWriter(config, file, outfile):
nside = 64
npix = hp.nside2npix(nside)
# Binning for various parameters
sbins = np.arange(npix+1, dtype=int)
ebins = np.arange(5, 9.501, 0.05)
dbins = np.linspace(0, 700, 141)
lbins = np.linspace(-20, 20, 151)
# Get desired information from hdf5 file
d = hdf5extractor(config, file)
c0 = d['cuts']['llh']
r = np.log10(d['ML_energy'])[c0]
fit = zfix(d['zenith'], bintype='logdist')[c0]
w = d['weights'][c0]
xy = np.sqrt(d['ML_x']**2 + d['ML_y']**2)[c0]
dllh = (d['fLLH'] - d['pLLH'])[c0]
# Bin in sky
#zen = np.pi - d['zenith'][c0]
#azi = d['azimuth'][c0]
dec = d['dec'][c0]
ra = d['ra'][c0]
x = hp.ang2pix(nside, dec, ra)
# Energy cut
ecut = (r >= 6.2)
p = {'weights':w}
q = {}
q['energy_w'] = np.histogram2d(x, r-fit, bins=(sbins,ebins), **p)[0]
q['dist_w'] = np.histogram2d(x, xy, bins=(sbins,dbins), **p)[0]
q['llh_w'] = np.histogram2d(x, dllh, bins=(sbins,lbins), **p)[0]
p['weights'] = w**2
q['energy_err_w'] = np.histogram2d(x, r-fit, bins=(sbins,ebins), **p)[0]
q['dist_err_w'] = np.histogram2d(x, xy, bins=(sbins,dbins), **p)[0]
q['llh_err_w'] = np.histogram2d(x, dllh, bins=(sbins,lbins), **p)[0]
# Energy cut versions
q['llhcut_w'] = np.histogram2d(x[ecut], dllh[ecut], bins=(sbins,lbins),
weights=w[ecut])[0]
q['llhcut_err_w'] = np.histogram2d(x[ecut], dllh[ecut], bins=(sbins,lbins),
weights=(w[ecut])**2)[0]
q['energy'] = np.histogram2d(x, r-fit, bins=(sbins,ebins))[0]
q['dist'] = np.histogram2d(x, xy, bins=(sbins,dbins))[0]
q['llh'] = np.histogram2d(x, dllh, bins=(sbins,lbins))[0]
q['energy_err'] = np.histogram2d(x, r-fit, bins=(sbins,ebins))[0]
q['dist_err'] = np.histogram2d(x, xy, bins=(sbins,dbins))[0]
q['llh_err'] = np.histogram2d(x, dllh, bins=(sbins,lbins))[0]
# Energy cut versions
q['llhcut'] = np.histogram2d(x[ecut], dllh[ecut], bins=(sbins,lbins))[0]
q['llhcut_err'] = np.histogram2d(x[ecut], dllh[ecut], bins=(sbins,lbins))[0]
np.save(outfile, q)
if __name__ == "__main__":
p = argparse.ArgumentParser(description='Converts hdf5 file to npy dict')
p.add_argument('-c', '--config', dest='config',
help='Detector configuration [IT73 --> IT81-IV]')
p.add_argument('-f', '--files', dest='files', nargs='*',
help='Input files')
p.add_argument('-o', '--outfiles', dest='outfiles', nargs='*',
help='Output files')
p.add_argument('--sky', dest='sky',
default=False, action='store_true',
help='Write the sky histograms')
args = p.parse_args()
for infile, outfile in zip(args.files, args.outfiles):
| if args.sky:
skyWriter(args.config, infile, outfile)
else:
histWriter(args.config, infile, outfile) | conditional_block |
|
save_hist.py | for comp in compList:
r = rDict[comp]
for value in ['x','y','energy']:
q[r+'ML_'+value] = t.getNode('/ShowerLLH_'+comp).col(value)
q[r+'LLH'] = t.getNode('/ShowerLLHParams_'+comp).col('maxLLH')
# Timing
mjd_day = t.root.I3EventHeader.col('time_start_mjd_day')
mjd_sec = t.root.I3EventHeader.col('time_start_mjd_sec')
mjd_ns = t.root.I3EventHeader.col('time_start_mjd_ns')
q['mjd'] = np.zeros(len(mjd_day), dtype=np.float64)
for i in range(len(mjd_day)):
day = int(mjd_day[i])
sec = int(mjd_sec[i])
ns = int(mjd_ns[i])
t1.SetTime(day, sec, ns)
q['mjd'][i] = t1.GetMJD()
# Event ID
run = t.root.I3EventHeader.col('Run')
event = t.root.I3EventHeader.col('Event')
subevent = t.root.I3EventHeader.col('SubEvent')
eventIDs = []
for i in range(len(run)):
eventIDs += ['%s_%s_%s' % (run[i], event[i], subevent[i])]
q['eventIDs'] = np.asarray(eventIDs)
# Condition and prescale passed (filter[condition, prescale])
# For notes on weights see bottom of file
filtermask = df.filter_mask(config)
filternames = df.filter_names(config)
f = {}
for fname in filternames:
f[fname] = t.getNode('/'+filtermask).col(fname)
f[fname] = f[fname][:,0].astype(float)
filterArray = np.array([f[fname] * df.it_weights(fname)
for fname in f.keys()])
filterArray[filterArray == 0] = 100.
q['weights'] = np.amin(filterArray, axis=0)
# Other reconstruction info
q['NStations'] = t.root.NStations.col('value')
t.close()
# Laputop values
#for key in ['x','y','zenith','azimuth','s125','e_proton','e_iron','beta']:
# arrays += ['lap_'+key]
# Get Laputop info
#for value in ['x', 'y', 'zenith', 'azimuth']:
# q['lap_'+value] = t.root.Laputop.col(value)
#for value in ['s125', 'e_proton', 'e_iron', 'beta']:
# q['lap_'+value] = t.root.LaputopParams.col(value)
# Get most likely composition
rList = [rDict[comp] for comp in compList]
full_llhs = np.array([q[r+'LLH'] for r in rList])
max_llh = np.amax(full_llhs, axis=0)
q['llh_comp'] = np.array(['' for i in range(len(q['pLLH']))])
for r in rList:
q['llh_comp'][q[r+'LLH'] == max_llh] = r
for key in ['x', 'y', 'energy']:
q['ML_'+key] = np.array([q[r+'ML_'+key][i]
for i, r in enumerate(q['llh_comp'])])
# Check for multiple most-likely compositions (mark as bad)
badVals = np.sum(full_llhs == max_llh, axis=0)
badVals = (badVals-1).astype('bool')
q['llh_comp'][badVals] = ''
for key in ['x','y','energy']:
q['ML_'+key][badVals] = np.nan
# Calculate sky positions
q['dec'], q['ra'] = getDecRA(q, verbose=False)
# Containment cut
it_geo = df.it_geo(config)
q['cuts'] = {}
q['cuts']['llh'] = inPoly(q['ML_x'], q['ML_y'], 0, config=it_geo)
return q
def histWriter(config, file, outfile):
# Bin values
eList = ['p','h','o','f']
decbins = ['0-12','12-24','24-40']
rabins = ['0-60','60-120','120-180','180-240','240-300','300-360']
# Build general list of key names to write
keyList = []
keyList += ['energy','energy_w','energy_z','energy_w_z']
keyList += ['zenith','zenith_w','core','core_w']
keyList += ['%s_err' % k for k in keyList]
# Split by composition
keyList = ['%s_%s' % (k, e) for k in keyList for e in eList]
# Split spatially into 3 dec bins (~12 degrees each) and 6 ra bins
keyList = ['%s_%s_%s' % (k, dec, ra) for k in keyList \
for dec in decbins for ra in rabins]
# Extract information from hdf5 file
q = hdf5extractor(config, file)
c0 = q['cuts']['llh']
r = np.log10(q['ML_energy'])[c0]
cosz = np.cos(q['zenith'])[c0]
dist = np.sqrt(q['ML_x']**2 + q['ML_y']**2)[c0]
fit = zfix(q['zenith'], bintype='logdist')[c0]
w = q['weights'][c0]
# Make cuts
degree = np.pi / 180.
for e in eList:
q[e] = (q['llh_comp'] == e)[c0]
for dec in decbins:
decmin = (180 - float(dec.split('-')[1])) * degree
decmax = (180 - float(dec.split('-')[0])) * degree
q[dec] = ((q['dec'] >= decmin) * (q['dec'] < decmax))[c0]
for ra in rabins:
ramin = float(ra.split('-')[0]) * degree
ramax = float(ra.split('-')[1]) * degree
q[ra] = ((q['ra'] >= ramin) * (q['ra'] < ramax))[c0]
# Method of intelligently producing histograms based on key names
def smartHist(key, x, bins):
tempx = x
wts = None
params = key.split('_')
e, dec, ra = params[-3:]
c1 = q[e] * q[dec] * q[ra]
if 'z' in params:
tempx = x - fit
if 'w' in params:
wts = w[c1]
if 'err' in params:
wts = (w[c1])**2
h0 = np.histogram(tempx[c1], bins=bins, weights=wts)[0]
return h0
# Energy distribution
h = {}
print 'Calculating energy distributions...'
bins = getEbins(reco=True)
energyKeys = [k for k in keyList if 'energy' in k]
for key in energyKeys:
h[key] = smartHist(key, r, bins)
# Zenith distribution
print 'Calculating zenith distributions...'
bins = np.linspace(0.8, 1, 81)
zenithKeys = [k for k in keyList if 'zenith' in k]
for key in zenithKeys:
h[key] = smartHist(key, cosz, bins)
# Core distribution
print 'Calculating core position distributions...'
bins = np.linspace(0, 700, 141)
coreKeys = [k for k in keyList if 'core' in k]
for key in coreKeys:
h[key] = smartHist(key, dist, bins)
print 'Saving...'
np.save(outfile, h)
| rDict = {'proton':'p','helium':'h','oxygen':'o','iron':'f'}
t1 = astro.Time()
print 'Building arrays from %s...' % file
t = tables.openFile(file)
q = {}
# Get reconstructed compositions from list of children in file
children = []
for node in t.walk_nodes('/'):
try: children += [node.name]
except tables.NoSuchNodeError:
continue
children = list(set(children))
compList = [n.split('_')[-1] for n in children if 'ShowerLLH_' in n]
# Get ShowerLLH cuts and info
rrc = t.root.ShowerLLH_proton.col('exists').astype('bool')
for value in ['zenith', 'azimuth']:
q[value] = t.root.ShowerLLH_proton.col(value) | identifier_body |
|
repocachemanager.go | bool
logger log.Logger
cacheClient Client
sync.Mutex
}
func newRepoCacheManager(now time.Time,
repoID image.Name, clientFactory registry.ClientFactory, creds registry.Credentials, repoClientTimeout time.Duration,
burst int, trace bool, logger log.Logger, cacheClient Client) (*repoCacheManager, error) {
client, err := clientFactory.ClientFor(repoID.CanonicalName(), creds)
if err != nil {
return nil, err
}
manager := &repoCacheManager{
now: now,
repoID: repoID,
client: client,
clientTimeout: repoClientTimeout,
burst: burst,
trace: trace,
logger: logger,
cacheClient: cacheClient,
}
return manager, nil
}
// fetchRepository fetches the repository from the cache
func (c *repoCacheManager) fetchRepository() (ImageRepository, error) {
var result ImageRepository
repoKey := NewRepositoryKey(c.repoID.CanonicalName())
bytes, _, err := c.cacheClient.GetKey(repoKey)
if err != nil {
return ImageRepository{}, err
}
if err = json.Unmarshal(bytes, &result); err != nil {
return ImageRepository{}, err
}
return result, nil
}
// getTags gets the tags from the repository
func (c *repoCacheManager) | (ctx context.Context) ([]string, error) {
ctx, cancel := context.WithTimeout(ctx, c.clientTimeout)
defer cancel()
tags, err := c.client.Tags(ctx)
if ctx.Err() == context.DeadlineExceeded {
return nil, c.clientTimeoutError()
}
return tags, err
}
// storeRepository stores the repository from the cache
func (c *repoCacheManager) storeRepository(repo ImageRepository) error {
repoKey := NewRepositoryKey(c.repoID.CanonicalName())
bytes, err := json.Marshal(repo)
if err != nil {
return err
}
return c.cacheClient.SetKey(repoKey, c.now.Add(repoRefresh), bytes)
}
// fetchImagesResult is the result of fetching images from the cache
// invariant: len(imagesToUpdate) == imagesToUpdateRefreshCount + imagesToUpdateMissingCount
type fetchImagesResult struct {
imagesFound map[string]image.Info // images found in the cache
imagesToUpdate []imageToUpdate // images which need to be updated
imagesToUpdateRefreshCount int // number of imagesToUpdate which need updating due to their cache entry expiring
imagesToUpdateMissingCount int // number of imagesToUpdate which need updating due to being missing
}
// fetchImages attempts to fetch the images with the provided tags from the cache.
// It returns the images found, those which require updating and details about
// why they need to be updated.
func (c *repoCacheManager) fetchImages(tags []string) (fetchImagesResult, error) {
images := map[string]image.Info{}
// Create a list of images that need updating
var toUpdate []imageToUpdate
// Counters for reporting what happened
var missing, refresh int
for _, tag := range tags {
if tag == "" {
return fetchImagesResult{}, fmt.Errorf("empty tag in fetched tags")
}
// See if we have the manifest already cached
newID := c.repoID.ToRef(tag)
key := NewManifestKey(newID.CanonicalRef())
bytes, deadline, err := c.cacheClient.GetKey(key)
// If err, then we don't have it yet. Update.
switch {
case err != nil: // by and large these are cache misses, but any error shall count as "not found"
if err != ErrNotCached {
c.logger.Log("warning", "error from cache", "err", err, "ref", newID)
}
missing++
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: initialRefresh})
case len(bytes) == 0:
c.logger.Log("warning", "empty result from cache", "ref", newID)
missing++
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: initialRefresh})
default:
var entry registry.ImageEntry
if err := json.Unmarshal(bytes, &entry); err == nil {
if c.trace {
c.logger.Log("trace", "found cached manifest", "ref", newID, "last_fetched", entry.LastFetched.Format(time.RFC3339), "deadline", deadline.Format(time.RFC3339))
}
if entry.ExcludedReason == "" {
images[tag] = entry.Info
if c.now.After(deadline) {
previousRefresh := minRefresh
lastFetched := entry.Info.LastFetched
if !lastFetched.IsZero() {
previousRefresh = deadline.Sub(lastFetched)
}
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: previousRefresh, previousDigest: entry.Info.Digest})
refresh++
}
} else {
if c.trace {
c.logger.Log("trace", "excluded in cache", "ref", newID, "reason", entry.ExcludedReason)
}
if c.now.After(deadline) {
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: excludedRefresh})
refresh++
}
}
}
}
}
result := fetchImagesResult{
imagesFound: images,
imagesToUpdate: toUpdate,
imagesToUpdateRefreshCount: refresh,
imagesToUpdateMissingCount: missing,
}
return result, nil
}
// updateImages, refreshes the cache entries for the images passed. It may not succeed for all images.
// It returns the values stored in cache, the number of images it succeeded for and the number
// of images whose manifest wasn't found in the registry.
func (c *repoCacheManager) updateImages(ctx context.Context, images []imageToUpdate) (map[string]image.Info, int, int) {
// The upper bound for concurrent fetches against a single host is
// w.Burst, so limit the number of fetching goroutines to that.
fetchers := make(chan struct{}, c.burst)
awaitFetchers := &sync.WaitGroup{}
ctxc, cancel := context.WithCancel(ctx)
defer cancel()
var successCount int
var manifestUnknownCount int
var result = map[string]image.Info{}
var warnAboutRateLimit sync.Once
updates:
for _, up := range images {
// to avoid race condition, when accessing it in the go routine
upCopy := up
select {
case <-ctxc.Done():
break updates
case fetchers <- struct{}{}:
}
awaitFetchers.Add(1)
go func() {
defer func() { awaitFetchers.Done(); <-fetchers }()
ctxcc, cancel := context.WithTimeout(ctxc, c.clientTimeout)
defer cancel()
entry, err := c.updateImage(ctxcc, upCopy)
if err != nil {
if err, ok := errors.Cause(err).(net.Error); (ok && err.Timeout()) || ctxcc.Err() == context.DeadlineExceeded {
// This was due to a context timeout, don't bother logging
return
}
switch {
case strings.Contains(err.Error(), "429"), strings.Contains(err.Error(), "toomanyrequests"):
// abort the image tags fetching if we've been rate limited
warnAboutRateLimit.Do(func() {
c.logger.Log("warn", "aborting image tag fetching due to rate limiting, will try again later")
cancel()
})
case strings.Contains(err.Error(), "manifest unknown"):
// Registry is corrupted, keep going, this manifest may not be relevant for automatic updates
c.Lock()
manifestUnknownCount++
c.Unlock()
c.logger.Log("warn", fmt.Sprintf("manifest for tag %s missing in repository %s", up.ref.Tag, up.ref.Name),
"impact", "flux will fail to auto-release workloads with matching images, ask the repository administrator to fix the inconsistency")
default:
c.logger.Log("err", err, "ref", up.ref)
}
return
}
c.Lock()
successCount++
if entry.ExcludedReason == "" {
result[upCopy.ref.Tag] = entry.Info
}
c.Unlock()
}()
}
awaitFetchers.Wait()
return result, successCount, manifestUnknownCount
}
func (c *repoCacheManager) updateImage(ctx context.Context, update imageToUpdate) (registry.ImageEntry, error) {
imageID := update.ref
if c.trace {
c.logger.Log("trace", "refreshing manifest", "ref", imageID, "previous_refresh", update.previousRefresh.String())
}
ctx, cancel := context.WithTimeout(ctx, c.clientTimeout)
defer cancel()
// Get the image from the remote
entry, err := c.client.Manifest(ctx, imageID.Tag)
if err != nil {
if ctx.Err() == context.DeadlineExceeded {
return registry.ImageEntry{}, c.clientTimeoutError()
}
if _, ok := err.(*image.LabelTimestampFormatError); !ok {
return registry.ImageEntry{}, err
}
c.logger.Log("err", err, "ref", imageID)
}
refresh := update.previousRefresh
reason := ""
switch {
case entry.ExcludedReason != "":
c.logger.Log("excluded | getTags | identifier_name |
repocachemanager.go | bool
logger log.Logger
cacheClient Client
sync.Mutex
}
func newRepoCacheManager(now time.Time,
repoID image.Name, clientFactory registry.ClientFactory, creds registry.Credentials, repoClientTimeout time.Duration,
burst int, trace bool, logger log.Logger, cacheClient Client) (*repoCacheManager, error) {
client, err := clientFactory.ClientFor(repoID.CanonicalName(), creds)
if err != nil {
return nil, err
}
manager := &repoCacheManager{
now: now,
repoID: repoID,
client: client,
clientTimeout: repoClientTimeout,
burst: burst,
trace: trace,
logger: logger,
cacheClient: cacheClient,
}
return manager, nil
}
// fetchRepository fetches the repository from the cache
func (c *repoCacheManager) fetchRepository() (ImageRepository, error) {
var result ImageRepository
repoKey := NewRepositoryKey(c.repoID.CanonicalName())
bytes, _, err := c.cacheClient.GetKey(repoKey)
if err != nil {
return ImageRepository{}, err
}
if err = json.Unmarshal(bytes, &result); err != nil {
return ImageRepository{}, err
}
return result, nil
}
// getTags gets the tags from the repository
func (c *repoCacheManager) getTags(ctx context.Context) ([]string, error) {
ctx, cancel := context.WithTimeout(ctx, c.clientTimeout)
defer cancel()
tags, err := c.client.Tags(ctx)
if ctx.Err() == context.DeadlineExceeded {
return nil, c.clientTimeoutError()
}
return tags, err
}
// storeRepository stores the repository from the cache
func (c *repoCacheManager) storeRepository(repo ImageRepository) error {
repoKey := NewRepositoryKey(c.repoID.CanonicalName())
bytes, err := json.Marshal(repo)
if err != nil {
return err
}
return c.cacheClient.SetKey(repoKey, c.now.Add(repoRefresh), bytes)
}
// fetchImagesResult is the result of fetching images from the cache
// invariant: len(imagesToUpdate) == imagesToUpdateRefreshCount + imagesToUpdateMissingCount
type fetchImagesResult struct {
imagesFound map[string]image.Info // images found in the cache
imagesToUpdate []imageToUpdate // images which need to be updated
imagesToUpdateRefreshCount int // number of imagesToUpdate which need updating due to their cache entry expiring
imagesToUpdateMissingCount int // number of imagesToUpdate which need updating due to being missing
}
// fetchImages attempts to fetch the images with the provided tags from the cache.
// It returns the images found, those which require updating and details about
// why they need to be updated.
func (c *repoCacheManager) fetchImages(tags []string) (fetchImagesResult, error) {
images := map[string]image.Info{}
// Create a list of images that need updating
var toUpdate []imageToUpdate
// Counters for reporting what happened
var missing, refresh int
for _, tag := range tags {
if tag == "" {
return fetchImagesResult{}, fmt.Errorf("empty tag in fetched tags")
}
// See if we have the manifest already cached
newID := c.repoID.ToRef(tag)
key := NewManifestKey(newID.CanonicalRef())
bytes, deadline, err := c.cacheClient.GetKey(key)
// If err, then we don't have it yet. Update.
switch {
case err != nil: // by and large these are cache misses, but any error shall count as "not found"
if err != ErrNotCached {
c.logger.Log("warning", "error from cache", "err", err, "ref", newID)
}
missing++
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: initialRefresh})
case len(bytes) == 0:
c.logger.Log("warning", "empty result from cache", "ref", newID)
missing++
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: initialRefresh})
default:
var entry registry.ImageEntry
if err := json.Unmarshal(bytes, &entry); err == nil {
if c.trace {
c.logger.Log("trace", "found cached manifest", "ref", newID, "last_fetched", entry.LastFetched.Format(time.RFC3339), "deadline", deadline.Format(time.RFC3339))
}
if entry.ExcludedReason == "" {
images[tag] = entry.Info
if c.now.After(deadline) {
previousRefresh := minRefresh
lastFetched := entry.Info.LastFetched
if !lastFetched.IsZero() {
previousRefresh = deadline.Sub(lastFetched)
}
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: previousRefresh, previousDigest: entry.Info.Digest})
refresh++
} | if c.now.After(deadline) {
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: excludedRefresh})
refresh++
}
}
}
}
}
result := fetchImagesResult{
imagesFound: images,
imagesToUpdate: toUpdate,
imagesToUpdateRefreshCount: refresh,
imagesToUpdateMissingCount: missing,
}
return result, nil
}
// updateImages, refreshes the cache entries for the images passed. It may not succeed for all images.
// It returns the values stored in cache, the number of images it succeeded for and the number
// of images whose manifest wasn't found in the registry.
func (c *repoCacheManager) updateImages(ctx context.Context, images []imageToUpdate) (map[string]image.Info, int, int) {
// The upper bound for concurrent fetches against a single host is
// w.Burst, so limit the number of fetching goroutines to that.
fetchers := make(chan struct{}, c.burst)
awaitFetchers := &sync.WaitGroup{}
ctxc, cancel := context.WithCancel(ctx)
defer cancel()
var successCount int
var manifestUnknownCount int
var result = map[string]image.Info{}
var warnAboutRateLimit sync.Once
updates:
for _, up := range images {
// to avoid race condition, when accessing it in the go routine
upCopy := up
select {
case <-ctxc.Done():
break updates
case fetchers <- struct{}{}:
}
awaitFetchers.Add(1)
go func() {
defer func() { awaitFetchers.Done(); <-fetchers }()
ctxcc, cancel := context.WithTimeout(ctxc, c.clientTimeout)
defer cancel()
entry, err := c.updateImage(ctxcc, upCopy)
if err != nil {
if err, ok := errors.Cause(err).(net.Error); (ok && err.Timeout()) || ctxcc.Err() == context.DeadlineExceeded {
// This was due to a context timeout, don't bother logging
return
}
switch {
case strings.Contains(err.Error(), "429"), strings.Contains(err.Error(), "toomanyrequests"):
// abort the image tags fetching if we've been rate limited
warnAboutRateLimit.Do(func() {
c.logger.Log("warn", "aborting image tag fetching due to rate limiting, will try again later")
cancel()
})
case strings.Contains(err.Error(), "manifest unknown"):
// Registry is corrupted, keep going, this manifest may not be relevant for automatic updates
c.Lock()
manifestUnknownCount++
c.Unlock()
c.logger.Log("warn", fmt.Sprintf("manifest for tag %s missing in repository %s", up.ref.Tag, up.ref.Name),
"impact", "flux will fail to auto-release workloads with matching images, ask the repository administrator to fix the inconsistency")
default:
c.logger.Log("err", err, "ref", up.ref)
}
return
}
c.Lock()
successCount++
if entry.ExcludedReason == "" {
result[upCopy.ref.Tag] = entry.Info
}
c.Unlock()
}()
}
awaitFetchers.Wait()
return result, successCount, manifestUnknownCount
}
func (c *repoCacheManager) updateImage(ctx context.Context, update imageToUpdate) (registry.ImageEntry, error) {
imageID := update.ref
if c.trace {
c.logger.Log("trace", "refreshing manifest", "ref", imageID, "previous_refresh", update.previousRefresh.String())
}
ctx, cancel := context.WithTimeout(ctx, c.clientTimeout)
defer cancel()
// Get the image from the remote
entry, err := c.client.Manifest(ctx, imageID.Tag)
if err != nil {
if ctx.Err() == context.DeadlineExceeded {
return registry.ImageEntry{}, c.clientTimeoutError()
}
if _, ok := err.(*image.LabelTimestampFormatError); !ok {
return registry.ImageEntry{}, err
}
c.logger.Log("err", err, "ref", imageID)
}
refresh := update.previousRefresh
reason := ""
switch {
case entry.ExcludedReason != "":
c.logger.Log("excluded", | } else {
if c.trace {
c.logger.Log("trace", "excluded in cache", "ref", newID, "reason", entry.ExcludedReason)
} | random_line_split |
repocachemanager.go | tags, err := c.client.Tags(ctx)
if ctx.Err() == context.DeadlineExceeded {
return nil, c.clientTimeoutError()
}
return tags, err
}
// storeRepository stores the repository from the cache
func (c *repoCacheManager) storeRepository(repo ImageRepository) error {
repoKey := NewRepositoryKey(c.repoID.CanonicalName())
bytes, err := json.Marshal(repo)
if err != nil {
return err
}
return c.cacheClient.SetKey(repoKey, c.now.Add(repoRefresh), bytes)
}
// fetchImagesResult is the result of fetching images from the cache
// invariant: len(imagesToUpdate) == imagesToUpdateRefreshCount + imagesToUpdateMissingCount
type fetchImagesResult struct {
imagesFound map[string]image.Info // images found in the cache
imagesToUpdate []imageToUpdate // images which need to be updated
imagesToUpdateRefreshCount int // number of imagesToUpdate which need updating due to their cache entry expiring
imagesToUpdateMissingCount int // number of imagesToUpdate which need updating due to being missing
}
// fetchImages attempts to fetch the images with the provided tags from the cache.
// It returns the images found, those which require updating and details about
// why they need to be updated.
func (c *repoCacheManager) fetchImages(tags []string) (fetchImagesResult, error) {
images := map[string]image.Info{}
// Create a list of images that need updating
var toUpdate []imageToUpdate
// Counters for reporting what happened
var missing, refresh int
for _, tag := range tags {
if tag == "" {
return fetchImagesResult{}, fmt.Errorf("empty tag in fetched tags")
}
// See if we have the manifest already cached
newID := c.repoID.ToRef(tag)
key := NewManifestKey(newID.CanonicalRef())
bytes, deadline, err := c.cacheClient.GetKey(key)
// If err, then we don't have it yet. Update.
switch {
case err != nil: // by and large these are cache misses, but any error shall count as "not found"
if err != ErrNotCached {
c.logger.Log("warning", "error from cache", "err", err, "ref", newID)
}
missing++
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: initialRefresh})
case len(bytes) == 0:
c.logger.Log("warning", "empty result from cache", "ref", newID)
missing++
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: initialRefresh})
default:
var entry registry.ImageEntry
if err := json.Unmarshal(bytes, &entry); err == nil {
if c.trace {
c.logger.Log("trace", "found cached manifest", "ref", newID, "last_fetched", entry.LastFetched.Format(time.RFC3339), "deadline", deadline.Format(time.RFC3339))
}
if entry.ExcludedReason == "" {
images[tag] = entry.Info
if c.now.After(deadline) {
previousRefresh := minRefresh
lastFetched := entry.Info.LastFetched
if !lastFetched.IsZero() {
previousRefresh = deadline.Sub(lastFetched)
}
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: previousRefresh, previousDigest: entry.Info.Digest})
refresh++
}
} else {
if c.trace {
c.logger.Log("trace", "excluded in cache", "ref", newID, "reason", entry.ExcludedReason)
}
if c.now.After(deadline) {
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: excludedRefresh})
refresh++
}
}
}
}
}
result := fetchImagesResult{
imagesFound: images,
imagesToUpdate: toUpdate,
imagesToUpdateRefreshCount: refresh,
imagesToUpdateMissingCount: missing,
}
return result, nil
}
// updateImages, refreshes the cache entries for the images passed. It may not succeed for all images.
// It returns the values stored in cache, the number of images it succeeded for and the number
// of images whose manifest wasn't found in the registry.
func (c *repoCacheManager) updateImages(ctx context.Context, images []imageToUpdate) (map[string]image.Info, int, int) {
// The upper bound for concurrent fetches against a single host is
// w.Burst, so limit the number of fetching goroutines to that.
fetchers := make(chan struct{}, c.burst)
awaitFetchers := &sync.WaitGroup{}
ctxc, cancel := context.WithCancel(ctx)
defer cancel()
var successCount int
var manifestUnknownCount int
var result = map[string]image.Info{}
var warnAboutRateLimit sync.Once
updates:
for _, up := range images {
// to avoid race condition, when accessing it in the go routine
upCopy := up
select {
case <-ctxc.Done():
break updates
case fetchers <- struct{}{}:
}
awaitFetchers.Add(1)
go func() {
defer func() { awaitFetchers.Done(); <-fetchers }()
ctxcc, cancel := context.WithTimeout(ctxc, c.clientTimeout)
defer cancel()
entry, err := c.updateImage(ctxcc, upCopy)
if err != nil {
if err, ok := errors.Cause(err).(net.Error); (ok && err.Timeout()) || ctxcc.Err() == context.DeadlineExceeded {
// This was due to a context timeout, don't bother logging
return
}
switch {
case strings.Contains(err.Error(), "429"), strings.Contains(err.Error(), "toomanyrequests"):
// abort the image tags fetching if we've been rate limited
warnAboutRateLimit.Do(func() {
c.logger.Log("warn", "aborting image tag fetching due to rate limiting, will try again later")
cancel()
})
case strings.Contains(err.Error(), "manifest unknown"):
// Registry is corrupted, keep going, this manifest may not be relevant for automatic updates
c.Lock()
manifestUnknownCount++
c.Unlock()
c.logger.Log("warn", fmt.Sprintf("manifest for tag %s missing in repository %s", up.ref.Tag, up.ref.Name),
"impact", "flux will fail to auto-release workloads with matching images, ask the repository administrator to fix the inconsistency")
default:
c.logger.Log("err", err, "ref", up.ref)
}
return
}
c.Lock()
successCount++
if entry.ExcludedReason == "" {
result[upCopy.ref.Tag] = entry.Info
}
c.Unlock()
}()
}
awaitFetchers.Wait()
return result, successCount, manifestUnknownCount
}
func (c *repoCacheManager) updateImage(ctx context.Context, update imageToUpdate) (registry.ImageEntry, error) {
imageID := update.ref
if c.trace {
c.logger.Log("trace", "refreshing manifest", "ref", imageID, "previous_refresh", update.previousRefresh.String())
}
ctx, cancel := context.WithTimeout(ctx, c.clientTimeout)
defer cancel()
// Get the image from the remote
entry, err := c.client.Manifest(ctx, imageID.Tag)
if err != nil {
if ctx.Err() == context.DeadlineExceeded {
return registry.ImageEntry{}, c.clientTimeoutError()
}
if _, ok := err.(*image.LabelTimestampFormatError); !ok {
return registry.ImageEntry{}, err
}
c.logger.Log("err", err, "ref", imageID)
}
refresh := update.previousRefresh
reason := ""
switch {
case entry.ExcludedReason != "":
c.logger.Log("excluded", entry.ExcludedReason, "ref", imageID)
refresh = excludedRefresh
reason = "image is excluded"
case update.previousDigest == "":
entry.Info.LastFetched = c.now
refresh = update.previousRefresh
reason = "no prior cache entry for image"
case entry.Info.Digest == update.previousDigest:
entry.Info.LastFetched = c.now
refresh = clipRefresh(refresh * 2)
reason = "image digest is same"
default: // i.e., not excluded, but the digests differ -> the tag was moved
entry.Info.LastFetched = c.now
refresh = clipRefresh(refresh / 2)
reason = "image digest is different"
}
if c.trace {
c.logger.Log("trace", "caching manifest", "ref", imageID, "last_fetched", c.now.Format(time.RFC3339), "refresh", refresh.String(), "reason", reason)
}
key := NewManifestKey(imageID.CanonicalRef())
// Write back to memcached
val, err := json.Marshal(entry)
if err != nil {
return registry.ImageEntry{}, err
}
err = c.cacheClient.SetKey(key, c.now.Add(refresh), val)
if err != nil {
return registry.ImageEntry{}, err
}
return entry, nil
}
func (r *repoCacheManager) clientTimeoutError() error | {
return fmt.Errorf("client timeout (%s) exceeded", r.clientTimeout)
} | identifier_body |
|
repocachemanager.go | func (c *repoCacheManager) getTags(ctx context.Context) ([]string, error) {
ctx, cancel := context.WithTimeout(ctx, c.clientTimeout)
defer cancel()
tags, err := c.client.Tags(ctx)
if ctx.Err() == context.DeadlineExceeded {
return nil, c.clientTimeoutError()
}
return tags, err
}
// storeRepository stores the repository from the cache
func (c *repoCacheManager) storeRepository(repo ImageRepository) error {
repoKey := NewRepositoryKey(c.repoID.CanonicalName())
bytes, err := json.Marshal(repo)
if err != nil {
return err
}
return c.cacheClient.SetKey(repoKey, c.now.Add(repoRefresh), bytes)
}
// fetchImagesResult is the result of fetching images from the cache
// invariant: len(imagesToUpdate) == imagesToUpdateRefreshCount + imagesToUpdateMissingCount
type fetchImagesResult struct {
imagesFound map[string]image.Info // images found in the cache
imagesToUpdate []imageToUpdate // images which need to be updated
imagesToUpdateRefreshCount int // number of imagesToUpdate which need updating due to their cache entry expiring
imagesToUpdateMissingCount int // number of imagesToUpdate which need updating due to being missing
}
// fetchImages attempts to fetch the images with the provided tags from the cache.
// It returns the images found, those which require updating and details about
// why they need to be updated.
func (c *repoCacheManager) fetchImages(tags []string) (fetchImagesResult, error) {
images := map[string]image.Info{}
// Create a list of images that need updating
var toUpdate []imageToUpdate
// Counters for reporting what happened
var missing, refresh int
for _, tag := range tags {
if tag == "" {
return fetchImagesResult{}, fmt.Errorf("empty tag in fetched tags")
}
// See if we have the manifest already cached
newID := c.repoID.ToRef(tag)
key := NewManifestKey(newID.CanonicalRef())
bytes, deadline, err := c.cacheClient.GetKey(key)
// If err, then we don't have it yet. Update.
switch {
case err != nil: // by and large these are cache misses, but any error shall count as "not found"
if err != ErrNotCached {
c.logger.Log("warning", "error from cache", "err", err, "ref", newID)
}
missing++
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: initialRefresh})
case len(bytes) == 0:
c.logger.Log("warning", "empty result from cache", "ref", newID)
missing++
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: initialRefresh})
default:
var entry registry.ImageEntry
if err := json.Unmarshal(bytes, &entry); err == nil {
if c.trace {
c.logger.Log("trace", "found cached manifest", "ref", newID, "last_fetched", entry.LastFetched.Format(time.RFC3339), "deadline", deadline.Format(time.RFC3339))
}
if entry.ExcludedReason == "" {
images[tag] = entry.Info
if c.now.After(deadline) {
previousRefresh := minRefresh
lastFetched := entry.Info.LastFetched
if !lastFetched.IsZero() {
previousRefresh = deadline.Sub(lastFetched)
}
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: previousRefresh, previousDigest: entry.Info.Digest})
refresh++
}
} else {
if c.trace {
c.logger.Log("trace", "excluded in cache", "ref", newID, "reason", entry.ExcludedReason)
}
if c.now.After(deadline) {
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: excludedRefresh})
refresh++
}
}
}
}
}
result := fetchImagesResult{
imagesFound: images,
imagesToUpdate: toUpdate,
imagesToUpdateRefreshCount: refresh,
imagesToUpdateMissingCount: missing,
}
return result, nil
}
// updateImages, refreshes the cache entries for the images passed. It may not succeed for all images.
// It returns the values stored in cache, the number of images it succeeded for and the number
// of images whose manifest wasn't found in the registry.
func (c *repoCacheManager) updateImages(ctx context.Context, images []imageToUpdate) (map[string]image.Info, int, int) {
// The upper bound for concurrent fetches against a single host is
// w.Burst, so limit the number of fetching goroutines to that.
fetchers := make(chan struct{}, c.burst)
awaitFetchers := &sync.WaitGroup{}
ctxc, cancel := context.WithCancel(ctx)
defer cancel()
var successCount int
var manifestUnknownCount int
var result = map[string]image.Info{}
var warnAboutRateLimit sync.Once
updates:
for _, up := range images {
// to avoid race condition, when accessing it in the go routine
upCopy := up
select {
case <-ctxc.Done():
break updates
case fetchers <- struct{}{}:
}
awaitFetchers.Add(1)
go func() {
defer func() { awaitFetchers.Done(); <-fetchers }()
ctxcc, cancel := context.WithTimeout(ctxc, c.clientTimeout)
defer cancel()
entry, err := c.updateImage(ctxcc, upCopy)
if err != nil {
if err, ok := errors.Cause(err).(net.Error); (ok && err.Timeout()) || ctxcc.Err() == context.DeadlineExceeded {
// This was due to a context timeout, don't bother logging
return
}
switch {
case strings.Contains(err.Error(), "429"), strings.Contains(err.Error(), "toomanyrequests"):
// abort the image tags fetching if we've been rate limited
warnAboutRateLimit.Do(func() {
c.logger.Log("warn", "aborting image tag fetching due to rate limiting, will try again later")
cancel()
})
case strings.Contains(err.Error(), "manifest unknown"):
// Registry is corrupted, keep going, this manifest may not be relevant for automatic updates
c.Lock()
manifestUnknownCount++
c.Unlock()
c.logger.Log("warn", fmt.Sprintf("manifest for tag %s missing in repository %s", up.ref.Tag, up.ref.Name),
"impact", "flux will fail to auto-release workloads with matching images, ask the repository administrator to fix the inconsistency")
default:
c.logger.Log("err", err, "ref", up.ref)
}
return
}
c.Lock()
successCount++
if entry.ExcludedReason == "" {
result[upCopy.ref.Tag] = entry.Info
}
c.Unlock()
}()
}
awaitFetchers.Wait()
return result, successCount, manifestUnknownCount
}
func (c *repoCacheManager) updateImage(ctx context.Context, update imageToUpdate) (registry.ImageEntry, error) {
imageID := update.ref
if c.trace {
c.logger.Log("trace", "refreshing manifest", "ref", imageID, "previous_refresh", update.previousRefresh.String())
}
ctx, cancel := context.WithTimeout(ctx, c.clientTimeout)
defer cancel()
// Get the image from the remote
entry, err := c.client.Manifest(ctx, imageID.Tag)
if err != nil {
if ctx.Err() == context.DeadlineExceeded {
return registry.ImageEntry{}, c.clientTimeoutError()
}
if _, ok := err.(*image.LabelTimestampFormatError); !ok {
return registry.ImageEntry{}, err
}
c.logger.Log("err", err, "ref", imageID)
}
refresh := update.previousRefresh
reason := ""
switch {
case entry.ExcludedReason != "":
c.logger.Log("excluded", entry.ExcludedReason, "ref", imageID)
refresh = excludedRefresh
reason = "image is excluded"
case update.previousDigest == "":
entry.Info.LastFetched = c.now
refresh = update.previousRefresh
reason = "no prior cache entry for image"
case entry.Info.Digest == update.previousDigest:
entry.Info.LastFetched = c.now
refresh = clipRefresh(refresh * 2)
reason = "image digest is same"
default: // i.e., not excluded, but the digests differ -> the tag was moved
entry.Info.LastFetched = c.now
refresh = clipRefresh(refresh / 2)
reason = "image digest is different"
}
if c.trace {
c.logger.Log("trace", "caching manifest", "ref", imageID, "last_fetched", c.now.Format(time.RFC3339), "refresh", refresh.String(), "reason", reason)
}
key := NewManifestKey(imageID.CanonicalRef())
// Write back to memcached
val, err := json.Marshal(entry)
if err != nil {
return registry.ImageEntry{}, err
}
err = c.cacheClient.SetKey(key, c.now.Add(refresh), val)
if err != nil | {
return registry.ImageEntry{}, err
} | conditional_block |
|
sdss_sqldata.py | 1], 'f_SDSS_z')
sql.rename_column(sqlc[11-1], 'e_SDSS_z')
sql['#id'] = sql['#id'].astype('str')
targets = sql['#id']
sqlc = sql.colnames
#%% FAST input
os.chdir(path_lib)
default = 'hdfn_fs99'
ftrinfo = open('FILTER.RES.latest.info', 'r').readlines() # https://github.com/gbrammer/eazy-photoz
translate = ascii.read('translate.cat') # write down manually
filters = [f for f in sqlc if f.startswith('f_')]
ftrtbl = Table()
for f in filters:
if f not in translate['filter']:
print("Warning: Filter name '{}' is not defined in your translate file.".format(f))
else:
linenum = int(translate[translate['filter']==f]['lines'][0][1:])
lambda_c = float(ftrinfo[linenum-1].split('lambda_c= ')[-1].split(' ')[0])
dummy = Table([[f], [lambda_c]], names=['filter', 'lambda_c'])
ftrtbl = vstack([ftrtbl, dummy])
# catalog file
os.chdir('{}/{}'.format(path_input, sdssdate))
sqlcat = copy.deepcopy(sql)
if mode == 'spec':
for i in range(len(sql)):
for j in range(len(sqlc)):
if j == 0:
sqlcat[i][j] = str(i+1).zfill(5)
elif j==1:
sqlcat[i][j] = sql[i][j]
elif j%2 == 0:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.AB2Jy(sql[i][j]), 5)
elif j%2 == 1:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.eAB2Jy(sql[i][j-1], sql[i][j]), 5)
else:
for i in range(len(sql)):
for j in range(len(sqlc)):
if j == 0:
sqlcat[i][j] = str(i+1).zfill(5)
elif j%2 == 1:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.AB2Jy(sql[i][j]), 5)
elif j%2 == 0:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.eAB2Jy(sql[i][j-1], sql[i][j]), 5)
sqlcat.write('{}.cat'.format(sdssdate), format='ascii', delimiter='\t', overwrite=True)
# parameter file
with open('{}/{}.param'.format(path_lib, default), 'r') as f:
contents = f.readlines()
with open('{}.param'.format(sdssdate), 'w') as p:
for line in contents:
if line.startswith('CATALOG'):
|
# elif line.startswith('N_SIM'):
# p.write(line.replace('0', '100'))
elif line.startswith('RESOLUTION'):
p.write(line.replace("= 'hr'", "= 'lr'"))
# elif line.startswith('NO_MAX_AGE'):
# p.write(line.replace('= 0', '= 1'))
else:
p.write(line)
f.close()
p.close()
# translate file
with open('{}/{}.translate'.format(path_lib, default), 'r') as f:
contents = f.readlines()
with open('{}.translate'.format(sdssdate), 'w') as t:
for line in contents:
t.write(line)
f.close()
t.close()
#%% FAST output
def absolmag(m, z):
dist = np.array(cosmo.luminosity_distance(z))
return m - 5*np.log10(dist) - 25
# input data = sql
os.chdir(path_output+sdssdate)
if mode == 'spec':
specin = copy.deepcopy(sql)
specfout = ascii.read('{}.fout'.format(sdssdate), header_start=16)
passive = specfout[10**specfout['lssfr']*1e9 < np.array(1/(3*cosmo.age(specfout['z'])))]
passive_r = []
passive_z = []
for i in range(len(passive)):
j = passive['id'][i]
passive_r.append(specin['f_SDSS_r'][j-1])
passive_z.append(specin['z_spec'][j-1])
passive_R = absolmag(passive_r, passive_z)
passive['rmag'] = passive_r
passive['z_spec'] = passive_z
passive['rmag(ab)'] = passive_R
else:
photin = copy.deepcopy(sql)
photzout = ascii.read('{}.zout'.format(sdssdate))
photfout = ascii.read('{}.fout'.format(sdssdate), header_start=17)
phot_R = absolmag(photin['f_SDSS_r'], photzout['z_m1'])
photfout['z_phot'] = photzout['z_m1']
# photfout['z_phot'] = photzout[:37578]['z_m1']
photfout['rmag(ab)']= phot_R#[:37578]
#%% volume limited sample
vlspec = passive[passive['z_spec']>0.03]; vlspec = vlspec[vlspec['z_spec']<0.05]; vlspec = vlspec[vlspec['rmag(ab)']<-19]
vlphot = photfout[photfout['z_phot']>0.06]; vlphot = vlphot[vlphot['z_phot']<0.12]; vlphot = vlphot[vlphot['rmag(ab)']<-17]
#%% priority
g1 = 0.701
g2 = 0.356
g3 = 0.411
g4 = -1.968 # Artale+19, BNS, z=0.1
vlspec['prior'] = g1*vlspec['lmass']+g2*vlspec['lsfr']+g3*vlspec['metal']+g4
vlphot['prior'] = g1*vlphot['lmass']+g2*vlphot['lsfr']+g3*vlphot['metal']+g4
vlphot.sort('prior')
vlspec.sort('prior')
#%%
def Mr2logM(Mr):
gr = 0.88
return -0.39*Mr+1.05*gr+1.60
# Define a closure function to register as a callback
def convert(ax_Mr):
"""
Update second axis according with first axis.
"""
y1, y2 = ax_Mr.get_ylim()
ax_lM.set_ylim(Mr2logM(y1), Mr2logM(y2))
ax_lM.figure.canvas.draw()
# plt.rcParams.update({'font.size': 14})
fig, axs = plt.subplots(2, 1, figsize=(9,12))
plt.rcParams.update({'font.size': 18})
ax_Mr = axs[1]
ax_lM = ax_Mr.twinx()
# ax_Mr.set_title('SDSS DR12 Galaxies Distribution')
# automatically update ylim of ax2 when ylim of ax1 changes.
ax_Mr.callbacks.connect("ylim_changed", convert)
ax_Mr.scatter(specin['z_spec'], absolmag(specin['f_SDSS_r'], specin['z_spec']), c='grey', alpha=0.2, label='Galaxies (Spectroscopy)')# (n={})'.format(len(specin)))
ax_Mr.scatter(passive_z, absolmag(passive_r, passive_z), c='crimson', alpha=0.2, label='Passive Galaxies')# (n={})'.format(len(passive)))
# ax_Mr.hlines(-21, 0.07, 0.13, linestyle='--')#, label='Volume Limited Samples')
# ax_Mr.vlines(0.07, -21, -50, linestyle='--')
# ax_Mr.vlines(0.13, -21, -50, linestyle='--')
ax_Mr.hlines(-19, 0.03, 0.05, linestyle='--', label='Volume Limited Samples')
ax_Mr.vlines(0.03, -19, -50, linestyle='--')
ax_Mr.vlines( | p.write(line.replace(default, sdssdate)) | conditional_block |
sdss_sqldata.py | 1], 'f_SDSS_z')
sql.rename_column(sqlc[11-1], 'e_SDSS_z')
sql['#id'] = sql['#id'].astype('str')
targets = sql['#id']
sqlc = sql.colnames
#%% FAST input
os.chdir(path_lib)
default = 'hdfn_fs99'
ftrinfo = open('FILTER.RES.latest.info', 'r').readlines() # https://github.com/gbrammer/eazy-photoz
translate = ascii.read('translate.cat') # write down manually
filters = [f for f in sqlc if f.startswith('f_')]
ftrtbl = Table()
for f in filters:
if f not in translate['filter']:
print("Warning: Filter name '{}' is not defined in your translate file.".format(f))
else:
linenum = int(translate[translate['filter']==f]['lines'][0][1:])
lambda_c = float(ftrinfo[linenum-1].split('lambda_c= ')[-1].split(' ')[0])
dummy = Table([[f], [lambda_c]], names=['filter', 'lambda_c'])
ftrtbl = vstack([ftrtbl, dummy])
# catalog file
os.chdir('{}/{}'.format(path_input, sdssdate))
sqlcat = copy.deepcopy(sql)
if mode == 'spec':
for i in range(len(sql)):
for j in range(len(sqlc)):
if j == 0:
sqlcat[i][j] = str(i+1).zfill(5)
elif j==1:
sqlcat[i][j] = sql[i][j]
elif j%2 == 0:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.AB2Jy(sql[i][j]), 5)
elif j%2 == 1:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.eAB2Jy(sql[i][j-1], sql[i][j]), 5)
else:
for i in range(len(sql)):
for j in range(len(sqlc)):
if j == 0:
sqlcat[i][j] = str(i+1).zfill(5)
elif j%2 == 1:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.AB2Jy(sql[i][j]), 5)
elif j%2 == 0:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.eAB2Jy(sql[i][j-1], sql[i][j]), 5)
sqlcat.write('{}.cat'.format(sdssdate), format='ascii', delimiter='\t', overwrite=True)
# parameter file
with open('{}/{}.param'.format(path_lib, default), 'r') as f:
contents = f.readlines()
with open('{}.param'.format(sdssdate), 'w') as p:
for line in contents:
if line.startswith('CATALOG'):
p.write(line.replace(default, sdssdate))
# elif line.startswith('N_SIM'):
# p.write(line.replace('0', '100'))
elif line.startswith('RESOLUTION'):
p.write(line.replace("= 'hr'", "= 'lr'"))
# elif line.startswith('NO_MAX_AGE'):
# p.write(line.replace('= 0', '= 1'))
else:
p.write(line)
f.close()
p.close()
# translate file
with open('{}/{}.translate'.format(path_lib, default), 'r') as f:
contents = f.readlines()
with open('{}.translate'.format(sdssdate), 'w') as t:
for line in contents:
t.write(line)
f.close()
t.close()
#%% FAST output
def absolmag(m, z):
dist = np.array(cosmo.luminosity_distance(z))
return m - 5*np.log10(dist) - 25
# input data = sql
os.chdir(path_output+sdssdate)
if mode == 'spec':
specin = copy.deepcopy(sql)
specfout = ascii.read('{}.fout'.format(sdssdate), header_start=16)
passive = specfout[10**specfout['lssfr']*1e9 < np.array(1/(3*cosmo.age(specfout['z'])))]
passive_r = []
passive_z = []
for i in range(len(passive)):
j = passive['id'][i]
passive_r.append(specin['f_SDSS_r'][j-1])
passive_z.append(specin['z_spec'][j-1])
passive_R = absolmag(passive_r, passive_z)
passive['rmag'] = passive_r
passive['z_spec'] = passive_z
passive['rmag(ab)'] = passive_R
else:
photin = copy.deepcopy(sql)
photzout = ascii.read('{}.zout'.format(sdssdate))
photfout = ascii.read('{}.fout'.format(sdssdate), header_start=17)
phot_R = absolmag(photin['f_SDSS_r'], photzout['z_m1'])
photfout['z_phot'] = photzout['z_m1']
# photfout['z_phot'] = photzout[:37578]['z_m1']
photfout['rmag(ab)']= phot_R#[:37578]
#%% volume limited sample
vlspec = passive[passive['z_spec']>0.03]; vlspec = vlspec[vlspec['z_spec']<0.05]; vlspec = vlspec[vlspec['rmag(ab)']<-19]
vlphot = photfout[photfout['z_phot']>0.06]; vlphot = vlphot[vlphot['z_phot']<0.12]; vlphot = vlphot[vlphot['rmag(ab)']<-17]
#%% priority
g1 = 0.701
g2 = 0.356
g3 = 0.411
g4 = -1.968 # Artale+19, BNS, z=0.1
vlspec['prior'] = g1*vlspec['lmass']+g2*vlspec['lsfr']+g3*vlspec['metal']+g4
vlphot['prior'] = g1*vlphot['lmass']+g2*vlphot['lsfr']+g3*vlphot['metal']+g4
vlphot.sort('prior')
vlspec.sort('prior')
#%%
def Mr2logM(Mr):
gr = 0.88
return -0.39*Mr+1.05*gr+1.60
# Define a closure function to register as a callback
def convert(ax_Mr):
|
# plt.rcParams.update({'font.size': 14})
fig, axs = plt.subplots(2, 1, figsize=(9,12))
plt.rcParams.update({'font.size': 18})
ax_Mr = axs[1]
ax_lM = ax_Mr.twinx()
# ax_Mr.set_title('SDSS DR12 Galaxies Distribution')
# automatically update ylim of ax2 when ylim of ax1 changes.
ax_Mr.callbacks.connect("ylim_changed", convert)
ax_Mr.scatter(specin['z_spec'], absolmag(specin['f_SDSS_r'], specin['z_spec']), c='grey', alpha=0.2, label='Galaxies (Spectroscopy)')# (n={})'.format(len(specin)))
ax_Mr.scatter(passive_z, absolmag(passive_r, passive_z), c='crimson', alpha=0.2, label='Passive Galaxies')# (n={})'.format(len(passive)))
# ax_Mr.hlines(-21, 0.07, 0.13, linestyle='--')#, label='Volume Limited Samples')
# ax_Mr.vlines(0.07, -21, -50, linestyle='--')
# ax_Mr.vlines(0.13, -21, -50, linestyle='--')
ax_Mr.hlines(-19, 0.03, 0.05, linestyle='--', label='Volume Limited Samples')
ax_Mr.vlines(0.03, -19, -50, linestyle='--')
ax_Mr.vlines( | """
Update second axis according with first axis.
"""
y1, y2 = ax_Mr.get_ylim()
ax_lM.set_ylim(Mr2logM(y1), Mr2logM(y2))
ax_lM.figure.canvas.draw() | identifier_body |
sdss_sqldata.py | = open('FILTER.RES.latest.info', 'r').readlines() # https://github.com/gbrammer/eazy-photoz
translate = ascii.read('translate.cat') # write down manually
filters = [f for f in sqlc if f.startswith('f_')]
ftrtbl = Table()
for f in filters:
if f not in translate['filter']:
print("Warning: Filter name '{}' is not defined in your translate file.".format(f))
else:
linenum = int(translate[translate['filter']==f]['lines'][0][1:])
lambda_c = float(ftrinfo[linenum-1].split('lambda_c= ')[-1].split(' ')[0])
dummy = Table([[f], [lambda_c]], names=['filter', 'lambda_c'])
ftrtbl = vstack([ftrtbl, dummy])
# catalog file
os.chdir('{}/{}'.format(path_input, sdssdate))
sqlcat = copy.deepcopy(sql)
if mode == 'spec':
for i in range(len(sql)):
for j in range(len(sqlc)):
if j == 0:
sqlcat[i][j] = str(i+1).zfill(5)
elif j==1:
sqlcat[i][j] = sql[i][j]
elif j%2 == 0:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.AB2Jy(sql[i][j]), 5)
elif j%2 == 1:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.eAB2Jy(sql[i][j-1], sql[i][j]), 5)
else:
for i in range(len(sql)):
for j in range(len(sqlc)):
if j == 0:
sqlcat[i][j] = str(i+1).zfill(5)
elif j%2 == 1:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.AB2Jy(sql[i][j]), 5)
elif j%2 == 0:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.eAB2Jy(sql[i][j-1], sql[i][j]), 5)
sqlcat.write('{}.cat'.format(sdssdate), format='ascii', delimiter='\t', overwrite=True)
# parameter file
with open('{}/{}.param'.format(path_lib, default), 'r') as f:
contents = f.readlines()
with open('{}.param'.format(sdssdate), 'w') as p:
for line in contents:
if line.startswith('CATALOG'):
p.write(line.replace(default, sdssdate))
# elif line.startswith('N_SIM'):
# p.write(line.replace('0', '100'))
elif line.startswith('RESOLUTION'):
p.write(line.replace("= 'hr'", "= 'lr'"))
# elif line.startswith('NO_MAX_AGE'):
# p.write(line.replace('= 0', '= 1'))
else:
p.write(line)
f.close()
p.close()
# translate file
with open('{}/{}.translate'.format(path_lib, default), 'r') as f:
contents = f.readlines()
with open('{}.translate'.format(sdssdate), 'w') as t:
for line in contents:
t.write(line)
f.close()
t.close()
#%% FAST output
def absolmag(m, z):
dist = np.array(cosmo.luminosity_distance(z))
return m - 5*np.log10(dist) - 25
# input data = sql
os.chdir(path_output+sdssdate)
if mode == 'spec':
specin = copy.deepcopy(sql)
specfout = ascii.read('{}.fout'.format(sdssdate), header_start=16)
passive = specfout[10**specfout['lssfr']*1e9 < np.array(1/(3*cosmo.age(specfout['z'])))]
passive_r = []
passive_z = []
for i in range(len(passive)):
j = passive['id'][i]
passive_r.append(specin['f_SDSS_r'][j-1])
passive_z.append(specin['z_spec'][j-1])
passive_R = absolmag(passive_r, passive_z)
passive['rmag'] = passive_r
passive['z_spec'] = passive_z
passive['rmag(ab)'] = passive_R
else:
photin = copy.deepcopy(sql)
photzout = ascii.read('{}.zout'.format(sdssdate))
photfout = ascii.read('{}.fout'.format(sdssdate), header_start=17)
phot_R = absolmag(photin['f_SDSS_r'], photzout['z_m1'])
photfout['z_phot'] = photzout['z_m1']
# photfout['z_phot'] = photzout[:37578]['z_m1']
photfout['rmag(ab)']= phot_R#[:37578]
#%% volume limited sample
vlspec = passive[passive['z_spec']>0.03]; vlspec = vlspec[vlspec['z_spec']<0.05]; vlspec = vlspec[vlspec['rmag(ab)']<-19]
vlphot = photfout[photfout['z_phot']>0.06]; vlphot = vlphot[vlphot['z_phot']<0.12]; vlphot = vlphot[vlphot['rmag(ab)']<-17]
#%% priority
g1 = 0.701
g2 = 0.356
g3 = 0.411
g4 = -1.968 # Artale+19, BNS, z=0.1
vlspec['prior'] = g1*vlspec['lmass']+g2*vlspec['lsfr']+g3*vlspec['metal']+g4
vlphot['prior'] = g1*vlphot['lmass']+g2*vlphot['lsfr']+g3*vlphot['metal']+g4
vlphot.sort('prior')
vlspec.sort('prior')
#%%
def Mr2logM(Mr):
gr = 0.88
return -0.39*Mr+1.05*gr+1.60
# Define a closure function to register as a callback
def convert(ax_Mr):
"""
Update second axis according with first axis.
"""
y1, y2 = ax_Mr.get_ylim()
ax_lM.set_ylim(Mr2logM(y1), Mr2logM(y2))
ax_lM.figure.canvas.draw()
# plt.rcParams.update({'font.size': 14})
fig, axs = plt.subplots(2, 1, figsize=(9,12))
plt.rcParams.update({'font.size': 18})
ax_Mr = axs[1]
ax_lM = ax_Mr.twinx()
# ax_Mr.set_title('SDSS DR12 Galaxies Distribution')
# automatically update ylim of ax2 when ylim of ax1 changes.
ax_Mr.callbacks.connect("ylim_changed", convert)
ax_Mr.scatter(specin['z_spec'], absolmag(specin['f_SDSS_r'], specin['z_spec']), c='grey', alpha=0.2, label='Galaxies (Spectroscopy)')# (n={})'.format(len(specin)))
ax_Mr.scatter(passive_z, absolmag(passive_r, passive_z), c='crimson', alpha=0.2, label='Passive Galaxies')# (n={})'.format(len(passive)))
# ax_Mr.hlines(-21, 0.07, 0.13, linestyle='--')#, label='Volume Limited Samples')
# ax_Mr.vlines(0.07, -21, -50, linestyle='--')
# ax_Mr.vlines(0.13, -21, -50, linestyle='--')
ax_Mr.hlines(-19, 0.03, 0.05, linestyle='--', label='Volume Limited Samples')
ax_Mr.vlines(0.03, -19, -50, linestyle='--')
ax_Mr.vlines(0.05, -19, -50, linestyle='--') | ax_Mr.set_xlim(0.0075, 0.20)
ax_Mr.set_ylim(-15, -24)
ax_Mr.legend(loc='lower right')
# ax_Mr.set_title('Spectroscopically Confirmed')
ax_Mr.set_ylabel('$M_r$') | random_line_split |
|
sdss_sqldata.py | 1], 'f_SDSS_z')
sql.rename_column(sqlc[11-1], 'e_SDSS_z')
sql['#id'] = sql['#id'].astype('str')
targets = sql['#id']
sqlc = sql.colnames
#%% FAST input
os.chdir(path_lib)
default = 'hdfn_fs99'
ftrinfo = open('FILTER.RES.latest.info', 'r').readlines() # https://github.com/gbrammer/eazy-photoz
translate = ascii.read('translate.cat') # write down manually
filters = [f for f in sqlc if f.startswith('f_')]
ftrtbl = Table()
for f in filters:
if f not in translate['filter']:
print("Warning: Filter name '{}' is not defined in your translate file.".format(f))
else:
linenum = int(translate[translate['filter']==f]['lines'][0][1:])
lambda_c = float(ftrinfo[linenum-1].split('lambda_c= ')[-1].split(' ')[0])
dummy = Table([[f], [lambda_c]], names=['filter', 'lambda_c'])
ftrtbl = vstack([ftrtbl, dummy])
# catalog file
os.chdir('{}/{}'.format(path_input, sdssdate))
sqlcat = copy.deepcopy(sql)
if mode == 'spec':
for i in range(len(sql)):
for j in range(len(sqlc)):
if j == 0:
sqlcat[i][j] = str(i+1).zfill(5)
elif j==1:
sqlcat[i][j] = sql[i][j]
elif j%2 == 0:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.AB2Jy(sql[i][j]), 5)
elif j%2 == 1:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.eAB2Jy(sql[i][j-1], sql[i][j]), 5)
else:
for i in range(len(sql)):
for j in range(len(sqlc)):
if j == 0:
sqlcat[i][j] = str(i+1).zfill(5)
elif j%2 == 1:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.AB2Jy(sql[i][j]), 5)
elif j%2 == 0:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.eAB2Jy(sql[i][j-1], sql[i][j]), 5)
sqlcat.write('{}.cat'.format(sdssdate), format='ascii', delimiter='\t', overwrite=True)
# parameter file
with open('{}/{}.param'.format(path_lib, default), 'r') as f:
contents = f.readlines()
with open('{}.param'.format(sdssdate), 'w') as p:
for line in contents:
if line.startswith('CATALOG'):
p.write(line.replace(default, sdssdate))
# elif line.startswith('N_SIM'):
# p.write(line.replace('0', '100'))
elif line.startswith('RESOLUTION'):
p.write(line.replace("= 'hr'", "= 'lr'"))
# elif line.startswith('NO_MAX_AGE'):
# p.write(line.replace('= 0', '= 1'))
else:
p.write(line)
f.close()
p.close()
# translate file
with open('{}/{}.translate'.format(path_lib, default), 'r') as f:
contents = f.readlines()
with open('{}.translate'.format(sdssdate), 'w') as t:
for line in contents:
t.write(line)
f.close()
t.close()
#%% FAST output
def absolmag(m, z):
dist = np.array(cosmo.luminosity_distance(z))
return m - 5*np.log10(dist) - 25
# input data = sql
os.chdir(path_output+sdssdate)
if mode == 'spec':
specin = copy.deepcopy(sql)
specfout = ascii.read('{}.fout'.format(sdssdate), header_start=16)
passive = specfout[10**specfout['lssfr']*1e9 < np.array(1/(3*cosmo.age(specfout['z'])))]
passive_r = []
passive_z = []
for i in range(len(passive)):
j = passive['id'][i]
passive_r.append(specin['f_SDSS_r'][j-1])
passive_z.append(specin['z_spec'][j-1])
passive_R = absolmag(passive_r, passive_z)
passive['rmag'] = passive_r
passive['z_spec'] = passive_z
passive['rmag(ab)'] = passive_R
else:
photin = copy.deepcopy(sql)
photzout = ascii.read('{}.zout'.format(sdssdate))
photfout = ascii.read('{}.fout'.format(sdssdate), header_start=17)
phot_R = absolmag(photin['f_SDSS_r'], photzout['z_m1'])
photfout['z_phot'] = photzout['z_m1']
# photfout['z_phot'] = photzout[:37578]['z_m1']
photfout['rmag(ab)']= phot_R#[:37578]
#%% volume limited sample
vlspec = passive[passive['z_spec']>0.03]; vlspec = vlspec[vlspec['z_spec']<0.05]; vlspec = vlspec[vlspec['rmag(ab)']<-19]
vlphot = photfout[photfout['z_phot']>0.06]; vlphot = vlphot[vlphot['z_phot']<0.12]; vlphot = vlphot[vlphot['rmag(ab)']<-17]
#%% priority
g1 = 0.701
g2 = 0.356
g3 = 0.411
g4 = -1.968 # Artale+19, BNS, z=0.1
vlspec['prior'] = g1*vlspec['lmass']+g2*vlspec['lsfr']+g3*vlspec['metal']+g4
vlphot['prior'] = g1*vlphot['lmass']+g2*vlphot['lsfr']+g3*vlphot['metal']+g4
vlphot.sort('prior')
vlspec.sort('prior')
#%%
def Mr2logM(Mr):
gr = 0.88
return -0.39*Mr+1.05*gr+1.60
# Define a closure function to register as a callback
def | (ax_Mr):
"""
Update second axis according with first axis.
"""
y1, y2 = ax_Mr.get_ylim()
ax_lM.set_ylim(Mr2logM(y1), Mr2logM(y2))
ax_lM.figure.canvas.draw()
# plt.rcParams.update({'font.size': 14})
fig, axs = plt.subplots(2, 1, figsize=(9,12))
plt.rcParams.update({'font.size': 18})
ax_Mr = axs[1]
ax_lM = ax_Mr.twinx()
# ax_Mr.set_title('SDSS DR12 Galaxies Distribution')
# automatically update ylim of ax2 when ylim of ax1 changes.
ax_Mr.callbacks.connect("ylim_changed", convert)
ax_Mr.scatter(specin['z_spec'], absolmag(specin['f_SDSS_r'], specin['z_spec']), c='grey', alpha=0.2, label='Galaxies (Spectroscopy)')# (n={})'.format(len(specin)))
ax_Mr.scatter(passive_z, absolmag(passive_r, passive_z), c='crimson', alpha=0.2, label='Passive Galaxies')# (n={})'.format(len(passive)))
# ax_Mr.hlines(-21, 0.07, 0.13, linestyle='--')#, label='Volume Limited Samples')
# ax_Mr.vlines(0.07, -21, -50, linestyle='--')
# ax_Mr.vlines(0.13, -21, -50, linestyle='--')
ax_Mr.hlines(-19, 0.03, 0.05, linestyle='--', label='Volume Limited Samples')
ax_Mr.vlines(0.03, -19, -50, linestyle='--')
ax_Mr.vlines( | convert | identifier_name |
instance.go | FlavorRef string
SystemDiskSizeGB int `json:"vdisk"`
SystemDiskId string
ServerType string
ServerVmType string
EcStatus string
BootVolumeType string
Deleted int
Visible bool
Region string
PortDetail []SInstanceNic
}
func (i *SInstance) GetBillingType() string {
return billing_api.BILLING_TYPE_POSTPAID
}
func (i *SInstance) GetExpiredAt() time.Time {
return time.Time{}
}
func (i *SInstance) GetId() string {
return i.Id
}
func (i *SInstance) GetName() string {
return i.Name
}
func (i *SInstance) GetGlobalId() string {
return i.GetId()
}
func (i *SInstance) GetStatus() string {
switch i.EcStatus {
case "active":
return api.VM_RUNNING
case "suspended", "paused":
return api.VM_SUSPEND
case "build", "rebuild", "resize", "verify_resize", "revert_resize", "password":
return api.VM_STARTING
case "reboot", "hard_reboot":
return api.VM_STOPPING
case "stopped", "shutoff":
return api.VM_READY
case "migrating":
return api.VM_MIGRATING
case "backuping":
return api.VM_BACKUP_CREATING
default:
return api.VM_UNKNOWN
}
}
func (i *SInstance) Refresh() error {
// TODO
return nil
}
func (i *SInstance) IsEmulated() bool {
return false
}
func (self *SInstance) GetBootOrder() string {
return "dcn"
}
func (self *SInstance) GetVga() string {
return "std"
}
func (self *SInstance) GetVdi() string {
return "vnc"
}
func (i *SInstance) GetImage() (*SImage, error) {
if i.image != nil {
return i.image, nil
}
image, err := i.host.zone.region.GetImage(i.ImageRef)
if err != nil {
return nil, err
}
i.image = image
return i.image, nil
}
func (i *SInstance) GetOSType() string {
return i.ImageOsType
}
func (i *SInstance) GetOSName() string {
image, err := i.GetImage()
if err != nil {
return ""
}
return image.OsName
}
func (i *SInstance) GetBios() string {
return "BIOS"
}
func (i *SInstance) GetMachine() string {
return "pc"
}
func (i *SInstance) GetInstanceType() string {
return i.FlavorRef
}
func (self *SInstance) GetSysTags() map[string]string {
data := map[string]string{}
// TODO
lowerOs := self.GetOSType()
priceKey := fmt.Sprintf("%s::%s::%s", self.host.zone.region.GetId(), self.GetInstanceType(), lowerOs)
data["price_key"] = priceKey
data["zone_ext_id"] = self.host.zone.GetGlobalId()
image, _ := self.GetImage()
if image != nil {
meta := image.GetSysTags()
for k, v := range meta {
data[k] = v
}
}
return data
}
func (in *SInstance) GetProjectId() string {
return ""
}
func (in *SInstance) GetIHost() cloudprovider.ICloudHost {
return in.host
}
func (in *SInstance) GetIDisks() ([]cloudprovider.ICloudDisk, error) {
if in.sysDisk == nil {
in.fetchSysDisk()
}
if in.dataDisks == nil {
err := in.fetchDataDisks()
if err != nil {
return nil, err
}
}
return append([]cloudprovider.ICloudDisk{in.sysDisk}, in.dataDisks...), nil
}
func (in *SInstance) GetINics() ([]cloudprovider.ICloudNic, error) {
if !in.nicComplete {
err := in.makeNicComplete()
if err != nil {
return nil, errors.Wrap(err, "unable to make nics complete")
}
in.nicComplete = true
}
inics := make([]cloudprovider.ICloudNic, len(in.PortDetail))
for i := range in.PortDetail {
in.PortDetail[i].instance = in
inics[i] = &in.PortDetail[i]
}
return inics, nil
}
func (in *SInstance) GetIEIP() (cloudprovider.ICloudEIP, error) {
if !in.nicComplete {
err := in.makeNicComplete()
if err != nil {
return nil, errors.Wrap(err, "unable to make nics complete")
}
in.nicComplete = true
}
var eipId string
for i := range in.PortDetail {
if len(in.PortDetail[i].IpId) > 0 {
eipId = in.PortDetail[i].IpId
break
}
}
if len(eipId) == 0 {
return nil, nil
}
return in.host.zone.region.GetEipById(eipId)
}
func (in *SInstance) GetSecurityGroupIds() ([]string, error) {
if !in.nicComplete {
err := in.makeNicComplete()
if err != nil {
return nil, errors.Wrap(err, "unable to make nics complete")
}
in.nicComplete = true
}
ret := sets.NewString()
for i := range in.PortDetail {
for _, group := range in.PortDetail[i].SecurityGroups {
ret.Insert(group.Id)
}
}
return ret.UnsortedList(), nil
}
func (in *SInstance) GetVcpuCount() int {
return in.Vcpu
}
func (in *SInstance) GetVmemSizeMB() int {
return in.Vmemory
}
func (in *SInstance) AssignSecurityGroup(id string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) SetSecurityGroups(ids []string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) GetHypervisor() string {
return api.HYPERVISOR_ECLOUD
}
func (in *SInstance) StartVM(ctx context.Context) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) StopVM(ctx context.Context, opts *cloudprovider.ServerStopOptions) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) DeleteVM(ctx context.Context) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) UpdateVM(ctx context.Context, name string) error {
return cloudprovider.ErrNotSupported
}
func (self *SInstance) UpdateUserData(userData string) error {
return cloudprovider.ErrNotSupported
}
func (self *SInstance) RebuildRoot(ctx context.Context, config *cloudprovider.SManagedVMRebuildRootConfig) (string, error) {
return "", cloudprovider.ErrNotImplemented
}
func (self *SInstance) DeployVM(ctx context.Context, name string, username string, password string, publicKey string, deleteKeypair bool, description string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) ChangeConfig(ctx context.Context, config *cloudprovider.SManagedVMChangeConfig) error {
return errors.ErrNotImplemented
}
func (in *SInstance) GetVNCInfo() (jsonutils.JSONObject, error) {
url, err := in.host.zone.region.GetInstanceVNCUrl(in.GetId())
if err != nil {
return nil, err
}
ret := jsonutils.NewDict()
ret.Add(jsonutils.NewString(url), "url")
ret.Add(jsonutils.NewString("ecloud"), "protocol")
ret.Add(jsonutils.NewString(in.GetId()), "instance_id")
return ret, nil
}
func (in *SInstance) AttachDisk(ctx context.Context, diskId string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) DetachDisk(ctx context.Context, diskId string) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) CreateDisk(ctx context.Context, sizeMb int, uuid string, driver string) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) Renew(bc billing.SBillingCycle) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) GetError() error |
func (in *SInstance) fetchSysDisk() {
storage, _ := in.host.zone.getStorageByType(api.STORAGE_ECLOUD_SYSTEM)
disk := SDisk{
storage: storage,
ManualAttr: SDiskManualAttr{
IsVirtual: true,
TempalteId: in.ImageRef,
ServerId: in.Id,
},
SCreateTime: in.SCreateTime,
SZoneRegionBase: in.SZoneRegionBase,
ServerId: []string{in.Id},
IsShare: false,
IsDelete: false,
SizeGB: in.SystemDiskSizeGB,
ID: in.SystemDiskId,
Name: fmt.Sprintf("%s-root", in.Name),
Status: "in-use",
Type: api.STORAGE_ECLOUD_SYSTEM,
}
in.sysDisk = &disk
return
}
func (in *SInstance) fetchDataDisks() error {
request | {
return nil
} | identifier_body |
instance.go | FlavorRef string
SystemDiskSizeGB int `json:"vdisk"`
SystemDiskId string
ServerType string
ServerVmType string
EcStatus string
BootVolumeType string
Deleted int
Visible bool
Region string
PortDetail []SInstanceNic
}
func (i *SInstance) GetBillingType() string {
return billing_api.BILLING_TYPE_POSTPAID
}
func (i *SInstance) GetExpiredAt() time.Time {
return time.Time{}
}
func (i *SInstance) GetId() string {
return i.Id
}
func (i *SInstance) GetName() string {
return i.Name
}
func (i *SInstance) GetGlobalId() string {
return i.GetId()
}
func (i *SInstance) GetStatus() string {
switch i.EcStatus {
case "active":
return api.VM_RUNNING
case "suspended", "paused":
return api.VM_SUSPEND
case "build", "rebuild", "resize", "verify_resize", "revert_resize", "password":
return api.VM_STARTING
case "reboot", "hard_reboot":
return api.VM_STOPPING
case "stopped", "shutoff":
return api.VM_READY
case "migrating":
return api.VM_MIGRATING
case "backuping":
return api.VM_BACKUP_CREATING
default:
return api.VM_UNKNOWN
}
}
func (i *SInstance) Refresh() error {
// TODO
return nil
}
func (i *SInstance) IsEmulated() bool {
return false
}
func (self *SInstance) GetBootOrder() string {
return "dcn"
}
func (self *SInstance) GetVga() string {
return "std"
}
func (self *SInstance) GetVdi() string {
return "vnc"
}
func (i *SInstance) GetImage() (*SImage, error) {
if i.image != nil {
return i.image, nil
}
image, err := i.host.zone.region.GetImage(i.ImageRef)
if err != nil {
return nil, err
}
i.image = image
return i.image, nil
}
func (i *SInstance) GetOSType() string {
return i.ImageOsType
}
func (i *SInstance) GetOSName() string {
image, err := i.GetImage()
if err != nil {
return ""
}
return image.OsName
}
func (i *SInstance) GetBios() string {
return "BIOS"
}
func (i *SInstance) GetMachine() string {
return "pc"
}
func (i *SInstance) GetInstanceType() string {
return i.FlavorRef
}
func (self *SInstance) GetSysTags() map[string]string {
data := map[string]string{}
// TODO
lowerOs := self.GetOSType()
priceKey := fmt.Sprintf("%s::%s::%s", self.host.zone.region.GetId(), self.GetInstanceType(), lowerOs)
data["price_key"] = priceKey
data["zone_ext_id"] = self.host.zone.GetGlobalId()
image, _ := self.GetImage()
if image != nil {
meta := image.GetSysTags()
for k, v := range meta {
data[k] = v
}
}
return data
}
func (in *SInstance) GetProjectId() string {
return ""
}
func (in *SInstance) GetIHost() cloudprovider.ICloudHost {
return in.host
}
func (in *SInstance) GetIDisks() ([]cloudprovider.ICloudDisk, error) {
if in.sysDisk == nil {
in.fetchSysDisk()
}
if in.dataDisks == nil {
err := in.fetchDataDisks()
if err != nil {
return nil, err
}
}
return append([]cloudprovider.ICloudDisk{in.sysDisk}, in.dataDisks...), nil
}
func (in *SInstance) GetINics() ([]cloudprovider.ICloudNic, error) {
if !in.nicComplete {
err := in.makeNicComplete()
if err != nil {
return nil, errors.Wrap(err, "unable to make nics complete")
}
in.nicComplete = true
}
inics := make([]cloudprovider.ICloudNic, len(in.PortDetail))
for i := range in.PortDetail {
in.PortDetail[i].instance = in
inics[i] = &in.PortDetail[i]
}
return inics, nil
}
func (in *SInstance) GetIEIP() (cloudprovider.ICloudEIP, error) {
if !in.nicComplete {
err := in.makeNicComplete()
if err != nil {
return nil, errors.Wrap(err, "unable to make nics complete")
}
in.nicComplete = true
}
var eipId string
for i := range in.PortDetail {
if len(in.PortDetail[i].IpId) > 0 {
eipId = in.PortDetail[i].IpId
break
}
}
if len(eipId) == 0 {
return nil, nil
}
return in.host.zone.region.GetEipById(eipId)
}
func (in *SInstance) GetSecurityGroupIds() ([]string, error) {
if !in.nicComplete {
err := in.makeNicComplete()
if err != nil {
return nil, errors.Wrap(err, "unable to make nics complete")
}
in.nicComplete = true
}
ret := sets.NewString()
for i := range in.PortDetail {
for _, group := range in.PortDetail[i].SecurityGroups {
ret.Insert(group.Id)
}
}
return ret.UnsortedList(), nil
}
func (in *SInstance) GetVcpuCount() int {
return in.Vcpu
}
func (in *SInstance) GetVmemSizeMB() int {
return in.Vmemory
}
func (in *SInstance) AssignSecurityGroup(id string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) SetSecurityGroups(ids []string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) GetHypervisor() string {
return api.HYPERVISOR_ECLOUD
}
func (in *SInstance) StartVM(ctx context.Context) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) StopVM(ctx context.Context, opts *cloudprovider.ServerStopOptions) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) DeleteVM(ctx context.Context) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) UpdateVM(ctx context.Context, name string) error {
return cloudprovider.ErrNotSupported
}
func (self *SInstance) UpdateUserData(userData string) error {
return cloudprovider.ErrNotSupported
}
func (self *SInstance) RebuildRoot(ctx context.Context, config *cloudprovider.SManagedVMRebuildRootConfig) (string, error) {
return "", cloudprovider.ErrNotImplemented
}
func (self *SInstance) DeployVM(ctx context.Context, name string, username string, password string, publicKey string, deleteKeypair bool, description string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) ChangeConfig(ctx context.Context, config *cloudprovider.SManagedVMChangeConfig) error {
return errors.ErrNotImplemented
}
func (in *SInstance) GetVNCInfo() (jsonutils.JSONObject, error) {
url, err := in.host.zone.region.GetInstanceVNCUrl(in.GetId())
if err != nil {
return nil, err
}
ret := jsonutils.NewDict()
ret.Add(jsonutils.NewString(url), "url")
ret.Add(jsonutils.NewString("ecloud"), "protocol")
ret.Add(jsonutils.NewString(in.GetId()), "instance_id")
return ret, nil
}
func (in *SInstance) AttachDisk(ctx context.Context, diskId string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) DetachDisk(ctx context.Context, diskId string) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) CreateDisk(ctx context.Context, sizeMb int, uuid string, driver string) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) | (bc billing.SBillingCycle) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) GetError() error {
return nil
}
func (in *SInstance) fetchSysDisk() {
storage, _ := in.host.zone.getStorageByType(api.STORAGE_ECLOUD_SYSTEM)
disk := SDisk{
storage: storage,
ManualAttr: SDiskManualAttr{
IsVirtual: true,
TempalteId: in.ImageRef,
ServerId: in.Id,
},
SCreateTime: in.SCreateTime,
SZoneRegionBase: in.SZoneRegionBase,
ServerId: []string{in.Id},
IsShare: false,
IsDelete: false,
SizeGB: in.SystemDiskSizeGB,
ID: in.SystemDiskId,
Name: fmt.Sprintf("%s-root", in.Name),
Status: "in-use",
Type: api.STORAGE_ECLOUD_SYSTEM,
}
in.sysDisk = &disk
return
}
func (in *SInstance) fetchDataDisks() error {
request | Renew | identifier_name |
instance.go |
FlavorRef string
SystemDiskSizeGB int `json:"vdisk"`
SystemDiskId string
ServerType string
ServerVmType string
EcStatus string
BootVolumeType string
Deleted int
Visible bool
Region string
PortDetail []SInstanceNic
}
func (i *SInstance) GetBillingType() string {
return billing_api.BILLING_TYPE_POSTPAID
}
func (i *SInstance) GetExpiredAt() time.Time {
return time.Time{}
}
func (i *SInstance) GetId() string {
return i.Id
}
func (i *SInstance) GetName() string {
return i.Name
}
func (i *SInstance) GetGlobalId() string {
return i.GetId()
}
func (i *SInstance) GetStatus() string {
switch i.EcStatus {
case "active":
return api.VM_RUNNING
case "suspended", "paused":
return api.VM_SUSPEND
case "build", "rebuild", "resize", "verify_resize", "revert_resize", "password":
return api.VM_STARTING
case "reboot", "hard_reboot":
return api.VM_STOPPING
case "stopped", "shutoff":
return api.VM_READY
case "migrating":
return api.VM_MIGRATING
case "backuping":
return api.VM_BACKUP_CREATING
default:
return api.VM_UNKNOWN
}
}
func (i *SInstance) Refresh() error {
// TODO
return nil
}
func (i *SInstance) IsEmulated() bool {
return false
}
func (self *SInstance) GetBootOrder() string {
return "dcn"
}
func (self *SInstance) GetVga() string {
return "std"
}
func (self *SInstance) GetVdi() string {
return "vnc"
}
func (i *SInstance) GetImage() (*SImage, error) {
if i.image != nil {
return i.image, nil
}
image, err := i.host.zone.region.GetImage(i.ImageRef)
if err != nil {
return nil, err
}
i.image = image
return i.image, nil
}
func (i *SInstance) GetOSType() string {
return i.ImageOsType
}
func (i *SInstance) GetOSName() string {
image, err := i.GetImage()
if err != nil {
return ""
}
return image.OsName
}
func (i *SInstance) GetBios() string {
return "BIOS"
}
func (i *SInstance) GetMachine() string {
return "pc"
}
func (i *SInstance) GetInstanceType() string {
return i.FlavorRef
}
func (self *SInstance) GetSysTags() map[string]string {
data := map[string]string{}
// TODO
lowerOs := self.GetOSType()
priceKey := fmt.Sprintf("%s::%s::%s", self.host.zone.region.GetId(), self.GetInstanceType(), lowerOs)
data["price_key"] = priceKey
data["zone_ext_id"] = self.host.zone.GetGlobalId()
image, _ := self.GetImage()
if image != nil {
meta := image.GetSysTags()
for k, v := range meta {
data[k] = v
}
}
return data
}
func (in *SInstance) GetProjectId() string {
return ""
}
func (in *SInstance) GetIHost() cloudprovider.ICloudHost {
return in.host
}
func (in *SInstance) GetIDisks() ([]cloudprovider.ICloudDisk, error) {
if in.sysDisk == nil {
in.fetchSysDisk()
}
if in.dataDisks == nil {
err := in.fetchDataDisks()
if err != nil {
return nil, err
}
}
return append([]cloudprovider.ICloudDisk{in.sysDisk}, in.dataDisks...), nil
}
func (in *SInstance) GetINics() ([]cloudprovider.ICloudNic, error) {
if !in.nicComplete {
err := in.makeNicComplete()
if err != nil {
return nil, errors.Wrap(err, "unable to make nics complete")
}
in.nicComplete = true
}
inics := make([]cloudprovider.ICloudNic, len(in.PortDetail))
for i := range in.PortDetail {
in.PortDetail[i].instance = in
inics[i] = &in.PortDetail[i]
}
return inics, nil
}
func (in *SInstance) GetIEIP() (cloudprovider.ICloudEIP, error) {
if !in.nicComplete {
err := in.makeNicComplete()
if err != nil {
return nil, errors.Wrap(err, "unable to make nics complete")
}
in.nicComplete = true
}
var eipId string
for i := range in.PortDetail {
if len(in.PortDetail[i].IpId) > 0 {
eipId = in.PortDetail[i].IpId
break
}
}
if len(eipId) == 0 {
return nil, nil
}
return in.host.zone.region.GetEipById(eipId)
}
func (in *SInstance) GetSecurityGroupIds() ([]string, error) {
if !in.nicComplete {
err := in.makeNicComplete()
if err != nil {
return nil, errors.Wrap(err, "unable to make nics complete")
}
in.nicComplete = true
}
ret := sets.NewString()
for i := range in.PortDetail {
for _, group := range in.PortDetail[i].SecurityGroups {
ret.Insert(group.Id)
}
}
return ret.UnsortedList(), nil
}
func (in *SInstance) GetVcpuCount() int {
return in.Vcpu
}
func (in *SInstance) GetVmemSizeMB() int {
return in.Vmemory
}
func (in *SInstance) AssignSecurityGroup(id string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) SetSecurityGroups(ids []string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) GetHypervisor() string {
return api.HYPERVISOR_ECLOUD
}
func (in *SInstance) StartVM(ctx context.Context) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) StopVM(ctx context.Context, opts *cloudprovider.ServerStopOptions) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) DeleteVM(ctx context.Context) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) UpdateVM(ctx context.Context, name string) error {
return cloudprovider.ErrNotSupported
}
func (self *SInstance) UpdateUserData(userData string) error {
return cloudprovider.ErrNotSupported
}
func (self *SInstance) RebuildRoot(ctx context.Context, config *cloudprovider.SManagedVMRebuildRootConfig) (string, error) {
return "", cloudprovider.ErrNotImplemented
}
func (self *SInstance) DeployVM(ctx context.Context, name string, username string, password string, publicKey string, deleteKeypair bool, description string) error { | }
func (in *SInstance) ChangeConfig(ctx context.Context, config *cloudprovider.SManagedVMChangeConfig) error {
return errors.ErrNotImplemented
}
func (in *SInstance) GetVNCInfo() (jsonutils.JSONObject, error) {
url, err := in.host.zone.region.GetInstanceVNCUrl(in.GetId())
if err != nil {
return nil, err
}
ret := jsonutils.NewDict()
ret.Add(jsonutils.NewString(url), "url")
ret.Add(jsonutils.NewString("ecloud"), "protocol")
ret.Add(jsonutils.NewString(in.GetId()), "instance_id")
return ret, nil
}
func (in *SInstance) AttachDisk(ctx context.Context, diskId string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) DetachDisk(ctx context.Context, diskId string) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) CreateDisk(ctx context.Context, sizeMb int, uuid string, driver string) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) Renew(bc billing.SBillingCycle) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) GetError() error {
return nil
}
func (in *SInstance) fetchSysDisk() {
storage, _ := in.host.zone.getStorageByType(api.STORAGE_ECLOUD_SYSTEM)
disk := SDisk{
storage: storage,
ManualAttr: SDiskManualAttr{
IsVirtual: true,
TempalteId: in.ImageRef,
ServerId: in.Id,
},
SCreateTime: in.SCreateTime,
SZoneRegionBase: in.SZoneRegionBase,
ServerId: []string{in.Id},
IsShare: false,
IsDelete: false,
SizeGB: in.SystemDiskSizeGB,
ID: in.SystemDiskId,
Name: fmt.Sprintf("%s-root", in.Name),
Status: "in-use",
Type: api.STORAGE_ECLOUD_SYSTEM,
}
in.sysDisk = &disk
return
}
func (in *SInstance) fetchDataDisks() error {
request := New | return cloudprovider.ErrNotImplemented | random_line_split |
instance.go | ":
return api.VM_READY
case "migrating":
return api.VM_MIGRATING
case "backuping":
return api.VM_BACKUP_CREATING
default:
return api.VM_UNKNOWN
}
}
func (i *SInstance) Refresh() error {
// TODO
return nil
}
func (i *SInstance) IsEmulated() bool {
return false
}
func (self *SInstance) GetBootOrder() string {
return "dcn"
}
func (self *SInstance) GetVga() string {
return "std"
}
func (self *SInstance) GetVdi() string {
return "vnc"
}
func (i *SInstance) GetImage() (*SImage, error) {
if i.image != nil {
return i.image, nil
}
image, err := i.host.zone.region.GetImage(i.ImageRef)
if err != nil {
return nil, err
}
i.image = image
return i.image, nil
}
func (i *SInstance) GetOSType() string {
return i.ImageOsType
}
func (i *SInstance) GetOSName() string {
image, err := i.GetImage()
if err != nil {
return ""
}
return image.OsName
}
func (i *SInstance) GetBios() string {
return "BIOS"
}
func (i *SInstance) GetMachine() string {
return "pc"
}
func (i *SInstance) GetInstanceType() string {
return i.FlavorRef
}
func (self *SInstance) GetSysTags() map[string]string {
data := map[string]string{}
// TODO
lowerOs := self.GetOSType()
priceKey := fmt.Sprintf("%s::%s::%s", self.host.zone.region.GetId(), self.GetInstanceType(), lowerOs)
data["price_key"] = priceKey
data["zone_ext_id"] = self.host.zone.GetGlobalId()
image, _ := self.GetImage()
if image != nil {
meta := image.GetSysTags()
for k, v := range meta {
data[k] = v
}
}
return data
}
func (in *SInstance) GetProjectId() string {
return ""
}
func (in *SInstance) GetIHost() cloudprovider.ICloudHost {
return in.host
}
func (in *SInstance) GetIDisks() ([]cloudprovider.ICloudDisk, error) {
if in.sysDisk == nil {
in.fetchSysDisk()
}
if in.dataDisks == nil {
err := in.fetchDataDisks()
if err != nil {
return nil, err
}
}
return append([]cloudprovider.ICloudDisk{in.sysDisk}, in.dataDisks...), nil
}
func (in *SInstance) GetINics() ([]cloudprovider.ICloudNic, error) {
if !in.nicComplete {
err := in.makeNicComplete()
if err != nil {
return nil, errors.Wrap(err, "unable to make nics complete")
}
in.nicComplete = true
}
inics := make([]cloudprovider.ICloudNic, len(in.PortDetail))
for i := range in.PortDetail {
in.PortDetail[i].instance = in
inics[i] = &in.PortDetail[i]
}
return inics, nil
}
func (in *SInstance) GetIEIP() (cloudprovider.ICloudEIP, error) {
if !in.nicComplete {
err := in.makeNicComplete()
if err != nil {
return nil, errors.Wrap(err, "unable to make nics complete")
}
in.nicComplete = true
}
var eipId string
for i := range in.PortDetail {
if len(in.PortDetail[i].IpId) > 0 {
eipId = in.PortDetail[i].IpId
break
}
}
if len(eipId) == 0 {
return nil, nil
}
return in.host.zone.region.GetEipById(eipId)
}
func (in *SInstance) GetSecurityGroupIds() ([]string, error) {
if !in.nicComplete {
err := in.makeNicComplete()
if err != nil {
return nil, errors.Wrap(err, "unable to make nics complete")
}
in.nicComplete = true
}
ret := sets.NewString()
for i := range in.PortDetail {
for _, group := range in.PortDetail[i].SecurityGroups {
ret.Insert(group.Id)
}
}
return ret.UnsortedList(), nil
}
func (in *SInstance) GetVcpuCount() int {
return in.Vcpu
}
func (in *SInstance) GetVmemSizeMB() int {
return in.Vmemory
}
func (in *SInstance) AssignSecurityGroup(id string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) SetSecurityGroups(ids []string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) GetHypervisor() string {
return api.HYPERVISOR_ECLOUD
}
func (in *SInstance) StartVM(ctx context.Context) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) StopVM(ctx context.Context, opts *cloudprovider.ServerStopOptions) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) DeleteVM(ctx context.Context) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) UpdateVM(ctx context.Context, name string) error {
return cloudprovider.ErrNotSupported
}
func (self *SInstance) UpdateUserData(userData string) error {
return cloudprovider.ErrNotSupported
}
func (self *SInstance) RebuildRoot(ctx context.Context, config *cloudprovider.SManagedVMRebuildRootConfig) (string, error) {
return "", cloudprovider.ErrNotImplemented
}
func (self *SInstance) DeployVM(ctx context.Context, name string, username string, password string, publicKey string, deleteKeypair bool, description string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) ChangeConfig(ctx context.Context, config *cloudprovider.SManagedVMChangeConfig) error {
return errors.ErrNotImplemented
}
func (in *SInstance) GetVNCInfo() (jsonutils.JSONObject, error) {
url, err := in.host.zone.region.GetInstanceVNCUrl(in.GetId())
if err != nil {
return nil, err
}
ret := jsonutils.NewDict()
ret.Add(jsonutils.NewString(url), "url")
ret.Add(jsonutils.NewString("ecloud"), "protocol")
ret.Add(jsonutils.NewString(in.GetId()), "instance_id")
return ret, nil
}
func (in *SInstance) AttachDisk(ctx context.Context, diskId string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) DetachDisk(ctx context.Context, diskId string) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) CreateDisk(ctx context.Context, sizeMb int, uuid string, driver string) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) Renew(bc billing.SBillingCycle) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) GetError() error {
return nil
}
func (in *SInstance) fetchSysDisk() {
storage, _ := in.host.zone.getStorageByType(api.STORAGE_ECLOUD_SYSTEM)
disk := SDisk{
storage: storage,
ManualAttr: SDiskManualAttr{
IsVirtual: true,
TempalteId: in.ImageRef,
ServerId: in.Id,
},
SCreateTime: in.SCreateTime,
SZoneRegionBase: in.SZoneRegionBase,
ServerId: []string{in.Id},
IsShare: false,
IsDelete: false,
SizeGB: in.SystemDiskSizeGB,
ID: in.SystemDiskId,
Name: fmt.Sprintf("%s-root", in.Name),
Status: "in-use",
Type: api.STORAGE_ECLOUD_SYSTEM,
}
in.sysDisk = &disk
return
}
func (in *SInstance) fetchDataDisks() error {
request := NewNovaRequest(NewApiRequest(in.host.zone.region.ID, "/api/v2/volume/volume/mount/list",
map[string]string{"serverId": in.Id}, nil))
disks := make([]SDisk, 0, 5)
err := in.host.zone.region.client.doList(context.Background(), request, &disks)
if err != nil {
return err
}
idisks := make([]cloudprovider.ICloudDisk, len(disks))
for i := range idisks {
storageType := disks[i].Type
storage, err := in.host.zone.getStorageByType(storageType)
if err != nil {
return errors.Wrapf(err, "unable to fetch storage with stoageType %s", storageType)
}
disks[i].storage = storage
idisks[i] = &disks[i]
}
in.dataDisks = idisks
return nil
}
func (in *SInstance) makeNicComplete() error {
routerIds := sets.NewString()
nics := make(map[string]*SInstanceNic, len(in.PortDetail))
for i := range in.PortDetail | {
nic := &in.PortDetail[i]
routerIds.Insert(nic.RouterId)
nics[nic.PortId] = nic
} | conditional_block |
|
lifecycle.go | nil
}
// pumpMessages reads and decodes messages from a Journal & offset into the provided channel.
func pumpMessages(shard Shard, app Application, journal pb.Journal, offset int64, msgCh chan<- message.Envelope) error {
var spec, err = fetchJournalSpec(shard.Context(), journal, shard.JournalClient())
if err != nil {
return extendErr(err, "fetching JournalSpec")
}
framing, err := message.FramingByContentType(spec.LabelSet.ValueOf(labels.ContentType))
if err != nil {
return extendErr(err, "determining framing (%s)", journal)
}
var rr = client.NewRetryReader(shard.Context(), shard.JournalClient(), pb.ReadRequest{
Journal: journal,
Offset: offset,
Block: true,
DoNotProxy: !shard.JournalClient().IsNoopRouter(),
})
var br = bufio.NewReader(rr)
for next := offset; ; offset = next {
var frame []byte
var msg message.Message
if frame, err = framing.Unpack(br); err != nil {
// Swallow ErrNoProgress from our bufio.Reader. client.Reader returns
// an empty read to allow for inspection of the ReadResponse message,
// and client.RetryReader also surface these empty reads. A journal
// with no active appends can eventually cause our bufio.Reader to
// give up, though no error has occurred.
if errors.Cause(err) == io.ErrNoProgress {
continue
}
// ErrOffsetJump indicates the next byte of available content is at an
// offset larger than the one requested. This can happen if a range of
// content was deleted from the journal. Log a warning, but continue
// processing at the jumped-to offset.
if errors.Cause(err) == client.ErrOffsetJump {
log.WithFields(log.Fields{"journal": journal, "from": offset, "to": rr.Offset()}).
Warn("source journal offset jump")
next = rr.Offset()
continue
}
return extendErr(err, "unpacking frame (%s:%d)", spec.Name, offset)
}
next = rr.AdjustedOffset(br)
if msg, err = app.NewMessage(spec); err != nil {
return extendErr(err, "NewMessage (%s)", journal)
} else if err = framing.Unmarshal(frame, msg); err != nil {
log.WithFields(log.Fields{"journal": journal, "offset": offset, "err": err}).
Error("failed to unmarshal message")
continue
}
select {
case msgCh <- message.Envelope{
JournalSpec: spec,
Fragment: rr.Reader.Response.Fragment,
NextOffset: next,
Message: msg,
}: // Pass.
case <-shard.Context().Done():
return extendErr(shard.Context().Err(), "sending msg (%s:%d)", spec.Name, offset)
}
metrics.GazetteConsumerBytesConsumedTotal.Add(float64(next - offset))
}
}
// consumeMessages runs consumer transactions, consuming from the provided
// |msgCh| and, when notified by |hintsCh|, occasionally stores recorded FSMHints.
func consumeMessages(shard Shard, store Store, app Application, etcd *clientv3.Client,
msgCh <-chan message.Envelope, hintsCh <-chan time.Time) (err error) {
// Supply an idle timer for txnStep's use in timing transaction durations.
var realTimer = time.NewTimer(0)
if !realTimer.Stop() {
<-realTimer.C
}
var timer = txnTimer{
C: realTimer.C,
Reset: realTimer.Reset,
Stop: realTimer.Stop,
}
var txn, prior transaction
for {
select {
case <-hintsCh:
var hints recoverylog.FSMHints
if hints, err = store.Recorder().BuildHints(); err == nil {
err = storeRecordedHints(shard, hints, etcd)
}
if err != nil {
err = extendErr(err, "storeRecordedHints")
return
}
default:
// Pass.
}
var spec = shard.Spec()
txn.minDur, txn.maxDur = spec.MinTxnDuration, spec.MaxTxnDuration
txn.msgCh = msgCh
txn.offsets = make(map[pb.Journal]int64)
// Run the transaction until completion or error.
for done := false; !done && err == nil; done, err = txnStep(&txn, &prior, shard, store, app, timer) {
}
if err != nil {
err = extendErr(err, "txnStep")
}
if ba, ok := app.(BeginFinisher); ok && txn.msgCount != 0 {
if finishErr := ba.FinishTxn(shard, store, err); err == nil && finishErr != nil {
err = extendErr(finishErr, "FinishTxn")
}
}
if err != nil {
return
}
recordMetrics(&prior)
prior, txn = txn, transaction{doneCh: txn.barrier.Done()}
}
}
// fetchJournalSpec retrieves the current JournalSpec.
func fetchJournalSpec(ctx context.Context, name pb.Journal, journals pb.JournalClient) (spec *pb.JournalSpec, err error) {
var lr *pb.ListResponse
lr, err = client.ListAllJournals(ctx, journals, pb.ListRequest{
Selector: pb.LabelSelector{
Include: pb.LabelSet{Labels: []pb.Label{{Name: "name", Value: name.String()}}},
},
})
if err == nil && len(lr.Journals) == 0 {
err = errors.Errorf("named journal does not exist (%s)", name)
}
if err == nil {
spec = &lr.Journals[0].Spec
}
return
}
type fetchedHints struct {
spec *pc.ShardSpec
txnResp *clientv3.TxnResponse
hints []*recoverylog.FSMHints
}
| func pickFirstHints(f fetchedHints) recoverylog.FSMHints {
for _, currHints := range f.hints {
if currHints == nil {
continue
}
return *currHints
}
return recoverylog.FSMHints{Log: f.spec.RecoveryLog()}
}
// fetchHints retrieves and decodes all FSMHints for the ShardSpec.
// Nil values will be returned where hint values have not been written. It also
// returns a TxnResponse holding each of the hints values, which can be used for
// transactional updates of hints.
func fetchHints(ctx context.Context, spec *pc.ShardSpec, etcd *clientv3.Client) (out fetchedHints, err error) {
var ops = []clientv3.Op{clientv3.OpGet(spec.HintPrimaryKey())}
for _, hk := range spec.HintBackupKeys() {
ops = append(ops, clientv3.OpGet(hk))
}
out.spec = spec
if out.txnResp, err = etcd.Txn(ctx).If().Then(ops...).Commit(); err != nil {
err = extendErr(err, "fetching ShardSpec.HintKeys")
return
}
for i := range out.txnResp.Responses {
var currHints recoverylog.FSMHints
if kvs := out.txnResp.Responses[i].GetResponseRange().Kvs; len(kvs) == 0 {
out.hints = append(out.hints, nil)
continue
} else if err = json.Unmarshal(kvs[0].Value, &currHints); err != nil {
err = extendErr(err, "unmarshal FSMHints")
} else if _, err = recoverylog.NewFSM(currHints); err != nil { // Validate hints.
err = extendErr(err, "validating FSMHints")
} else if currHints.Log != spec.RecoveryLog() {
err = errors.Errorf("recovered hints recovery log doesn't match ShardSpec.RecoveryLog (%s vs %s)",
currHints.Log, spec.RecoveryLog())
}
if err != nil {
return
}
out.hints = append(out.hints, &currHints)
}
return
}
// storeRecordedHints writes FSMHints into the primary hint key of the spec.
func storeRecordedHints(shard Shard, hints recoverylog.FSMHints, etcd *clientv3.Client) error {
var val, err = json.Marshal(hints)
if err != nil {
return extendErr(err, "marshal FSMHints")
}
var asn = shard.Assignment()
_, err = etcd.Txn(shard.Context()).
// Verify our Assignment is still in effect (eg, we're still primary), then write |hints| to HintPrimaryKey.
// Compare CreateRevision to allow for a raced ReplicaState update.
If(clientv3.Compare(clientv3.CreateRevision(string(asn.Raw.Key)), "=", asn.Raw.CreateRevision)).
Then(clientv3.OpPut(shard.Spec().HintPrimaryKey(), string(val))).
Commit()
if etcdErr, ok | // pickFirstHints retrieves the first hints from |f|. If there are no primary
// hints available the most recent backup hints will be returned. If there are
// no hints available an empty set of hints is returned. | random_line_split |
lifecycle.go | nil {
return extendErr(err, "storing recorded FSMHints")
}
return nil
}
// storeRecoveredHints writes the FSMHints into the first backup hint key of the spec,
// rotating hints previously stored under that key to the second backup hint key,
// and so on as a single transaction.
func storeRecoveredHints(shard Shard, hints recoverylog.FSMHints, etcd *clientv3.Client) (err error) {
var (
spec = shard.Spec()
asn = shard.Assignment()
backups = shard.Spec().HintBackupKeys()
h fetchedHints
)
if h, err = fetchHints(shard.Context(), spec, etcd); err != nil {
return
}
// |hints| is serialized and written to backups[1]. In the same txn,
// rotate the current value at backups[1] => backups[2], and so on.
var val []byte
if val, err = json.Marshal(hints); err != nil {
return
}
var cmp []clientv3.Cmp
var ops []clientv3.Op
// The txn responses returned from fetchHints are structured such that the first response will
// be the primary response and the subsequent responses are the backup responses, this slice
// represents just the backup responses.
var backupResponses = h.txnResp.Responses[1:]
// Verify our Assignment is still in effect (eg, we're still primary).
cmp = append(cmp, clientv3.Compare(clientv3.CreateRevision(string(asn.Raw.Key)),
"=", asn.Raw.CreateRevision))
for i := 0; i != len(backups) && val != nil; i++ {
ops = append(ops, clientv3.OpPut(backups[i], string(val)))
if kvs := backupResponses[i].GetResponseRange().Kvs; len(kvs) == 0 {
// Verify there is still no current key/value at this hints key slot.
cmp = append(cmp, clientv3.Compare(clientv3.ModRevision(backups[i]), "=", 0))
val = nil
} else {
// Verify the key/value at this hints key slot is unchanged.
// Retain its value to rotate into the next slot (if one exists).
cmp = append(cmp, clientv3.Compare(clientv3.ModRevision(backups[i]), "=", kvs[0].ModRevision))
val = kvs[0].Value
}
}
if _, err = etcd.Txn(shard.Context()).If(cmp...).Then(ops...).Commit(); err != nil {
err = extendErr(err, "storing recovered FSMHints")
}
return
}
// transaction models state and metrics used in the execution of a consumer transaction.
type transaction struct {
barrier *client.AsyncAppend // Write barrier of the txn at commit.
minDur, maxDur time.Duration // Minimum and maximum durations. Marked as -1 when elapsed.
msgCh <-chan message.Envelope // Message source. Nil'd upon reaching |maxDur|.
msgCount int // Number of messages batched into this transaction.
offsets map[pb.Journal]int64 // End (exclusive) journal offsets of the transaction.
doneCh <-chan struct{} // DoneCh of prior transaction barrier.
beganAt time.Time // Time at which transaction began.
stalledAt time.Time // Time at which processing stalled while waiting on IO.
flushedAt time.Time // Time at which flush began.
committedAt time.Time // Time at which commit began.
syncedAt time.Time // Time at which txn |barrier| resolved.
}
// txnTimer is a time.Timer which can be mocked within unit tests.
type txnTimer struct {
C <-chan time.Time
Reset func(time.Duration) bool
Stop func() bool
}
// txnStep progresses a consumer transaction by a single step. If the transaction
// is complete, it returns done=true. Otherwise, txnStep should be called again
// to continue making progress on the transaction.
func txnStep(txn, prior *transaction, shard Shard, store Store, app Application, timer txnTimer) (done bool, err error) {
// If the minimum batching duration hasn't elapsed *or* the prior transaction
// barrier hasn't completed, continue performing blocking reads of messages.
if txn.msgCount == 0 || txn.minDur != -1 || txn.doneCh != nil {
select {
case msg := <-txn.msgCh:
if txn.msgCount == 0 {
if ba, ok := app.(BeginFinisher); ok {
// BeginTxn may block arbitrarily.
if err = ba.BeginTxn(shard, store); err != nil {
err = extendErr(err, "app.BeginTxn")
return
}
}
txn.beganAt = timeNow()
timer.Reset(txn.minDur)
}
txn.msgCount++
txn.offsets[msg.JournalSpec.Name] = msg.NextOffset
if err = app.ConsumeMessage(shard, store, msg); err != nil {
err = extendErr(err, "app.ConsumeMessage")
}
return
case tick := <-timer.C:
if tick.Before(txn.beganAt.Add(txn.minDur)) {
panic("unexpected tick")
}
txn.minDur = -1 // Mark as completed.
if tick.Before(txn.beganAt.Add(txn.maxDur)) {
timer.Reset(txn.beganAt.Add(txn.maxDur).Sub(tick))
} else {
txn.maxDur = -1 // Mark as completed.
txn.msgCh = nil // Stop reading messages.
txn.stalledAt = timeNow() // We're stalled waiting for prior txn IO.
}
return
case _ = <-txn.doneCh:
prior.syncedAt = timeNow()
txn.doneCh = nil
return
case _ = <-shard.Context().Done():
err = shard.Context().Err()
return
}
panic("not reached")
}
// Continue reading messages so long as we do not block or reach |maxDur|.
select {
case msg := <-txn.msgCh:
txn.msgCount++
txn.offsets[msg.JournalSpec.Name] = msg.NextOffset
if err = app.ConsumeMessage(shard, store, msg); err != nil {
err = extendErr(err, "app.ConsumeMessage")
}
return
case tick := <-timer.C:
if tick.Before(txn.beganAt.Add(txn.maxDur)) {
panic("unexpected tick")
}
txn.maxDur = -1 // Mark as completed.
txn.msgCh = nil // Stop reading messages.
return
case _ = <-shard.Context().Done():
err = shard.Context().Err()
return
default:
// |msgCh| stalled. Fallthrough to complete the transaction.
}
if txn.flushedAt = timeNow(); txn.stalledAt.IsZero() {
txn.stalledAt = txn.flushedAt // We spent no time stalled.
}
if err = app.FinalizeTxn(shard, store); err != nil {
err = extendErr(err, "app.FinalizeTxn")
return
}
// Inject a strong write barrier which resolves only after pending writes
// to all journals have completed. We do this before store.Flush to ensure
// that writes driven by transaction messages have completed before we
// persist updated offsets which step past those messages.
store.Recorder().StrongBarrier()
if err = store.Flush(txn.offsets); err != nil {
err = extendErr(err, "store.Flush")
return
}
txn.barrier = store.Recorder().WeakBarrier()
txn.committedAt = timeNow()
// If the timer is still running, stop and drain it.
if txn.maxDur != -1 && !timer.Stop() {
<-timer.C
}
done = true
return
}
// recordMetrics of a fully completed transaction.
func recordMetrics(txn *transaction) {
metrics.GazetteConsumerTxCountTotal.Inc()
metrics.GazetteConsumerTxMessagesTotal.Add(float64(txn.msgCount))
metrics.GazetteConsumerTxSecondsTotal.Add(txn.syncedAt.Sub(txn.beganAt).Seconds())
metrics.GazetteConsumerTxConsumeSecondsTotal.Add(txn.stalledAt.Sub(txn.beganAt).Seconds())
metrics.GazetteConsumerTxStalledSecondsTotal.Add(txn.flushedAt.Sub(txn.stalledAt).Seconds())
metrics.GazetteConsumerTxFlushSecondsTotal.Add(txn.committedAt.Sub(txn.flushedAt).Seconds())
metrics.GazetteConsumerTxSyncSecondsTotal.Add(txn.syncedAt.Sub(txn.committedAt).Seconds())
}
func extendErr(err error, mFmt string, args ...interface{}) error | {
if err == nil {
panic("expected error")
} else if err == context.Canceled || err == context.DeadlineExceeded {
return err
} else if _, ok := err.(interface{ StackTrace() errors.StackTrace }); ok {
// Avoid attaching another errors.StackTrace if one is already present.
return errors.WithMessage(err, fmt.Sprintf(mFmt, args...))
} else {
// Use Wrapf to simultaneously attach |mFmt| and the current stack trace.
return errors.Wrapf(err, mFmt, args...)
}
} | identifier_body |
|
lifecycle.go |
}
// pumpMessages reads and decodes messages from a Journal & offset into the provided channel.
func pumpMessages(shard Shard, app Application, journal pb.Journal, offset int64, msgCh chan<- message.Envelope) error {
var spec, err = fetchJournalSpec(shard.Context(), journal, shard.JournalClient())
if err != nil {
return extendErr(err, "fetching JournalSpec")
}
framing, err := message.FramingByContentType(spec.LabelSet.ValueOf(labels.ContentType))
if err != nil {
return extendErr(err, "determining framing (%s)", journal)
}
var rr = client.NewRetryReader(shard.Context(), shard.JournalClient(), pb.ReadRequest{
Journal: journal,
Offset: offset,
Block: true,
DoNotProxy: !shard.JournalClient().IsNoopRouter(),
})
var br = bufio.NewReader(rr)
for next := offset; ; offset = next {
var frame []byte
var msg message.Message
if frame, err = framing.Unpack(br); err != nil {
// Swallow ErrNoProgress from our bufio.Reader. client.Reader returns
// an empty read to allow for inspection of the ReadResponse message,
// and client.RetryReader also surface these empty reads. A journal
// with no active appends can eventually cause our bufio.Reader to
// give up, though no error has occurred.
if errors.Cause(err) == io.ErrNoProgress {
continue
}
// ErrOffsetJump indicates the next byte of available content is at an
// offset larger than the one requested. This can happen if a range of
// content was deleted from the journal. Log a warning, but continue
// processing at the jumped-to offset.
if errors.Cause(err) == client.ErrOffsetJump {
log.WithFields(log.Fields{"journal": journal, "from": offset, "to": rr.Offset()}).
Warn("source journal offset jump")
next = rr.Offset()
continue
}
return extendErr(err, "unpacking frame (%s:%d)", spec.Name, offset)
}
next = rr.AdjustedOffset(br)
if msg, err = app.NewMessage(spec); err != nil {
return extendErr(err, "NewMessage (%s)", journal)
} else if err = framing.Unmarshal(frame, msg); err != nil {
log.WithFields(log.Fields{"journal": journal, "offset": offset, "err": err}).
Error("failed to unmarshal message")
continue
}
select {
case msgCh <- message.Envelope{
JournalSpec: spec,
Fragment: rr.Reader.Response.Fragment,
NextOffset: next,
Message: msg,
}: // Pass.
case <-shard.Context().Done():
return extendErr(shard.Context().Err(), "sending msg (%s:%d)", spec.Name, offset)
}
metrics.GazetteConsumerBytesConsumedTotal.Add(float64(next - offset))
}
}
// consumeMessages runs consumer transactions, consuming from the provided
// |msgCh| and, when notified by |hintsCh|, occasionally stores recorded FSMHints.
func consumeMessages(shard Shard, store Store, app Application, etcd *clientv3.Client,
msgCh <-chan message.Envelope, hintsCh <-chan time.Time) (err error) {
// Supply an idle timer for txnStep's use in timing transaction durations.
var realTimer = time.NewTimer(0)
if !realTimer.Stop() {
<-realTimer.C
}
var timer = txnTimer{
C: realTimer.C,
Reset: realTimer.Reset,
Stop: realTimer.Stop,
}
var txn, prior transaction
for {
select {
case <-hintsCh:
var hints recoverylog.FSMHints
if hints, err = store.Recorder().BuildHints(); err == nil {
err = storeRecordedHints(shard, hints, etcd)
}
if err != nil {
err = extendErr(err, "storeRecordedHints")
return
}
default:
// Pass.
}
var spec = shard.Spec()
txn.minDur, txn.maxDur = spec.MinTxnDuration, spec.MaxTxnDuration
txn.msgCh = msgCh
txn.offsets = make(map[pb.Journal]int64)
// Run the transaction until completion or error.
for done := false; !done && err == nil; done, err = txnStep(&txn, &prior, shard, store, app, timer) {
}
if err != nil {
err = extendErr(err, "txnStep")
}
if ba, ok := app.(BeginFinisher); ok && txn.msgCount != 0 {
if finishErr := ba.FinishTxn(shard, store, err); err == nil && finishErr != nil {
err = extendErr(finishErr, "FinishTxn")
}
}
if err != nil {
return
}
recordMetrics(&prior)
prior, txn = txn, transaction{doneCh: txn.barrier.Done()}
}
}
// fetchJournalSpec retrieves the current JournalSpec.
func fetchJournalSpec(ctx context.Context, name pb.Journal, journals pb.JournalClient) (spec *pb.JournalSpec, err error) {
var lr *pb.ListResponse
lr, err = client.ListAllJournals(ctx, journals, pb.ListRequest{
Selector: pb.LabelSelector{
Include: pb.LabelSet{Labels: []pb.Label{{Name: "name", Value: name.String()}}},
},
})
if err == nil && len(lr.Journals) == 0 {
err = errors.Errorf("named journal does not exist (%s)", name)
}
if err == nil {
spec = &lr.Journals[0].Spec
}
return
}
type fetchedHints struct {
spec *pc.ShardSpec
txnResp *clientv3.TxnResponse
hints []*recoverylog.FSMHints
}
// pickFirstHints retrieves the first hints from |f|. If there are no primary
// hints available the most recent backup hints will be returned. If there are
// no hints available an empty set of hints is returned.
func pickFirstHints(f fetchedHints) recoverylog.FSMHints {
for _, currHints := range f.hints {
if currHints == nil {
continue
}
return *currHints
}
return recoverylog.FSMHints{Log: f.spec.RecoveryLog()}
}
// fetchHints retrieves and decodes all FSMHints for the ShardSpec.
// Nil values will be returned where hint values have not been written. It also
// returns a TxnResponse holding each of the hints values, which can be used for
// transactional updates of hints.
func fetchHints(ctx context.Context, spec *pc.ShardSpec, etcd *clientv3.Client) (out fetchedHints, err error) {
var ops = []clientv3.Op{clientv3.OpGet(spec.HintPrimaryKey())}
for _, hk := range spec.HintBackupKeys() {
ops = append(ops, clientv3.OpGet(hk))
}
out.spec = spec
if out.txnResp, err = etcd.Txn(ctx).If().Then(ops...).Commit(); err != nil |
for i := range out.txnResp.Responses {
var currHints recoverylog.FSMHints
if kvs := out.txnResp.Responses[i].GetResponseRange().Kvs; len(kvs) == 0 {
out.hints = append(out.hints, nil)
continue
} else if err = json.Unmarshal(kvs[0].Value, &currHints); err != nil {
err = extendErr(err, "unmarshal FSMHints")
} else if _, err = recoverylog.NewFSM(currHints); err != nil { // Validate hints.
err = extendErr(err, "validating FSMHints")
} else if currHints.Log != spec.RecoveryLog() {
err = errors.Errorf("recovered hints recovery log doesn't match ShardSpec.RecoveryLog (%s vs %s)",
currHints.Log, spec.RecoveryLog())
}
if err != nil {
return
}
out.hints = append(out.hints, &currHints)
}
return
}
// storeRecordedHints writes FSMHints into the primary hint key of the spec.
func storeRecordedHints(shard Shard, hints recoverylog.FSMHints, etcd *clientv3.Client) error {
var val, err = json.Marshal(hints)
if err != nil {
return extendErr(err, "marshal FSMHints")
}
var asn = shard.Assignment()
_, err = etcd.Txn(shard.Context()).
// Verify our Assignment is still in effect (eg, we're still primary), then write |hints| to HintPrimaryKey.
// Compare CreateRevision to allow for a raced ReplicaState update.
If(clientv3.Compare(clientv3.CreateRevision(string(asn.Raw.Key)), "=", asn.Raw.CreateRevision)).
Then(clientv3.OpPut(shard.Spec().HintPrimaryKey(), string(val))).
Commit()
if etcdErr, | {
err = extendErr(err, "fetching ShardSpec.HintKeys")
return
} | conditional_block |
lifecycle.go | := err.(rpctypes.EtcdError); ok && etcdErr.Code() == codes.Unavailable {
// Recorded hints are advisory and can generally tolerate omitted
// updates. It's also annoying for temporary Etcd partitions to abort
// an otherwise-fine shard primary. So, log but allow shard processing
// to continue; we'll retry on the next hints flush interval.
log.WithFields(log.Fields{
"key": shard.Spec().HintPrimaryKey(),
"err": err,
}).Warn("failed to store recorded FSMHints (will retry)")
} else if err != nil {
return extendErr(err, "storing recorded FSMHints")
}
return nil
}
// storeRecoveredHints writes the FSMHints into the first backup hint key of the spec,
// rotating hints previously stored under that key to the second backup hint key,
// and so on as a single transaction.
func storeRecoveredHints(shard Shard, hints recoverylog.FSMHints, etcd *clientv3.Client) (err error) {
var (
spec = shard.Spec()
asn = shard.Assignment()
backups = shard.Spec().HintBackupKeys()
h fetchedHints
)
if h, err = fetchHints(shard.Context(), spec, etcd); err != nil {
return
}
// |hints| is serialized and written to backups[1]. In the same txn,
// rotate the current value at backups[1] => backups[2], and so on.
var val []byte
if val, err = json.Marshal(hints); err != nil {
return
}
var cmp []clientv3.Cmp
var ops []clientv3.Op
// The txn responses returned from fetchHints are structured such that the first response will
// be the primary response and the subsequent responses are the backup responses, this slice
// represents just the backup responses.
var backupResponses = h.txnResp.Responses[1:]
// Verify our Assignment is still in effect (eg, we're still primary).
cmp = append(cmp, clientv3.Compare(clientv3.CreateRevision(string(asn.Raw.Key)),
"=", asn.Raw.CreateRevision))
for i := 0; i != len(backups) && val != nil; i++ {
ops = append(ops, clientv3.OpPut(backups[i], string(val)))
if kvs := backupResponses[i].GetResponseRange().Kvs; len(kvs) == 0 {
// Verify there is still no current key/value at this hints key slot.
cmp = append(cmp, clientv3.Compare(clientv3.ModRevision(backups[i]), "=", 0))
val = nil
} else {
// Verify the key/value at this hints key slot is unchanged.
// Retain its value to rotate into the next slot (if one exists).
cmp = append(cmp, clientv3.Compare(clientv3.ModRevision(backups[i]), "=", kvs[0].ModRevision))
val = kvs[0].Value
}
}
if _, err = etcd.Txn(shard.Context()).If(cmp...).Then(ops...).Commit(); err != nil {
err = extendErr(err, "storing recovered FSMHints")
}
return
}
// transaction models state and metrics used in the execution of a consumer transaction.
type transaction struct {
barrier *client.AsyncAppend // Write barrier of the txn at commit.
minDur, maxDur time.Duration // Minimum and maximum durations. Marked as -1 when elapsed.
msgCh <-chan message.Envelope // Message source. Nil'd upon reaching |maxDur|.
msgCount int // Number of messages batched into this transaction.
offsets map[pb.Journal]int64 // End (exclusive) journal offsets of the transaction.
doneCh <-chan struct{} // DoneCh of prior transaction barrier.
beganAt time.Time // Time at which transaction began.
stalledAt time.Time // Time at which processing stalled while waiting on IO.
flushedAt time.Time // Time at which flush began.
committedAt time.Time // Time at which commit began.
syncedAt time.Time // Time at which txn |barrier| resolved.
}
// txnTimer is a time.Timer which can be mocked within unit tests.
type txnTimer struct {
C <-chan time.Time
Reset func(time.Duration) bool
Stop func() bool
}
// txnStep progresses a consumer transaction by a single step. If the transaction
// is complete, it returns done=true. Otherwise, txnStep should be called again
// to continue making progress on the transaction.
func txnStep(txn, prior *transaction, shard Shard, store Store, app Application, timer txnTimer) (done bool, err error) {
// If the minimum batching duration hasn't elapsed *or* the prior transaction
// barrier hasn't completed, continue performing blocking reads of messages.
if txn.msgCount == 0 || txn.minDur != -1 || txn.doneCh != nil {
select {
case msg := <-txn.msgCh:
if txn.msgCount == 0 {
if ba, ok := app.(BeginFinisher); ok {
// BeginTxn may block arbitrarily.
if err = ba.BeginTxn(shard, store); err != nil {
err = extendErr(err, "app.BeginTxn")
return
}
}
txn.beganAt = timeNow()
timer.Reset(txn.minDur)
}
txn.msgCount++
txn.offsets[msg.JournalSpec.Name] = msg.NextOffset
if err = app.ConsumeMessage(shard, store, msg); err != nil {
err = extendErr(err, "app.ConsumeMessage")
}
return
case tick := <-timer.C:
if tick.Before(txn.beganAt.Add(txn.minDur)) {
panic("unexpected tick")
}
txn.minDur = -1 // Mark as completed.
if tick.Before(txn.beganAt.Add(txn.maxDur)) {
timer.Reset(txn.beganAt.Add(txn.maxDur).Sub(tick))
} else {
txn.maxDur = -1 // Mark as completed.
txn.msgCh = nil // Stop reading messages.
txn.stalledAt = timeNow() // We're stalled waiting for prior txn IO.
}
return
case _ = <-txn.doneCh:
prior.syncedAt = timeNow()
txn.doneCh = nil
return
case _ = <-shard.Context().Done():
err = shard.Context().Err()
return
}
panic("not reached")
}
// Continue reading messages so long as we do not block or reach |maxDur|.
select {
case msg := <-txn.msgCh:
txn.msgCount++
txn.offsets[msg.JournalSpec.Name] = msg.NextOffset
if err = app.ConsumeMessage(shard, store, msg); err != nil {
err = extendErr(err, "app.ConsumeMessage")
}
return
case tick := <-timer.C:
if tick.Before(txn.beganAt.Add(txn.maxDur)) {
panic("unexpected tick")
}
txn.maxDur = -1 // Mark as completed.
txn.msgCh = nil // Stop reading messages.
return
case _ = <-shard.Context().Done():
err = shard.Context().Err()
return
default:
// |msgCh| stalled. Fallthrough to complete the transaction.
}
if txn.flushedAt = timeNow(); txn.stalledAt.IsZero() {
txn.stalledAt = txn.flushedAt // We spent no time stalled.
}
if err = app.FinalizeTxn(shard, store); err != nil {
err = extendErr(err, "app.FinalizeTxn")
return
}
// Inject a strong write barrier which resolves only after pending writes
// to all journals have completed. We do this before store.Flush to ensure
// that writes driven by transaction messages have completed before we
// persist updated offsets which step past those messages.
store.Recorder().StrongBarrier()
if err = store.Flush(txn.offsets); err != nil {
err = extendErr(err, "store.Flush")
return
}
txn.barrier = store.Recorder().WeakBarrier()
txn.committedAt = timeNow()
// If the timer is still running, stop and drain it.
if txn.maxDur != -1 && !timer.Stop() {
<-timer.C
}
done = true
return
}
// recordMetrics of a fully completed transaction.
func recordMetrics(txn *transaction) {
metrics.GazetteConsumerTxCountTotal.Inc()
metrics.GazetteConsumerTxMessagesTotal.Add(float64(txn.msgCount))
metrics.GazetteConsumerTxSecondsTotal.Add(txn.syncedAt.Sub(txn.beganAt).Seconds())
metrics.GazetteConsumerTxConsumeSecondsTotal.Add(txn.stalledAt.Sub(txn.beganAt).Seconds())
metrics.GazetteConsumerTxStalledSecondsTotal.Add(txn.flushedAt.Sub(txn.stalledAt).Seconds())
metrics.GazetteConsumerTxFlushSecondsTotal.Add(txn.committedAt.Sub(txn.flushedAt).Seconds())
metrics.GazetteConsumerTxSyncSecondsTotal.Add(txn.syncedAt.Sub(txn.committedAt).Seconds())
}
func | extendErr | identifier_name |
|
incident-sk.ts | </pre>
*
* @evt assign Sent when the user want to assign the incident to someone else.
* The detail includes the key of the incident.
*
* <pre>
* detail {
* key: "12312123123",
* }
* </pre>
*
*/
import { define } from '../../../elements-sk/modules/define';
import '../../../elements-sk/modules/icons/alarm-off-icon-sk';
import '../../../elements-sk/modules/icons/delete-icon-sk';
import '../../../elements-sk/modules/icons/thumbs-up-down-icon-sk';
import '../../../infra-sk/modules/clipboard-sk';
import '../silence-sk';
import { $$ } from '../../../infra-sk/modules/dom';
import { diffDate, strDuration } from '../../../infra-sk/modules/human';
import { errorMessage } from '../../../elements-sk/modules/errorMessage';
import { html, render, TemplateResult } from 'lit-html';
import { until } from 'lit-html/directives/until';
import { jsonOrThrow } from '../../../infra-sk/modules/jsonOrThrow';
import { abbr, linkify, displayNotes } from '../am';
import * as paramset from '../paramset';
import {
Silence,
Incident,
Params,
RecentIncidentsResponse,
Note,
} from '../json';
const MAX_MATCHING_SILENCES_TO_DISPLAY = 50;
const PARAMS_TO_DISPLAY_COPY_ICON = ['abbr', 'alertname', 'app', 'bot'];
class State {
key: string = '';
id: string = '';
params: Params = {};
start: number = 0;
last_seen: number = 0;
active: boolean = false;
notes: Note[] = [];
}
export class IncidentSk extends HTMLElement {
private silences: Silence[] = [];
private displaySilencesWithComments: boolean = false;
private flaky: boolean = false;
private recently_expired_silence: boolean = false;
private state: State = {
key: '',
id: '',
params: {},
start: 0,
last_seen: 0,
active: false,
notes: [],
};
private static template = (ele: IncidentSk) => html`
<h2 class=${ele.classOfH2()}>
${ele.state.params.alertname} ${abbr(ele.state.params.abbr)}
${ele.displayRecentlyExpired(ele.recently_expired_silence)}
${ele.displayFlakiness(ele.flaky)}
</h2>
<section class="detail">
${ele.actionButtons()}
<table class="timing">
<tr>
<th>Started</th>
<td title=${new Date(ele.state.start * 1000).toLocaleString()}>
${diffDate(ele.state.start * 1000)}
</td>
</tr>
${ele.lastSeen()} ${ele.duration()}
</table>
<table class="params">
${ele.table()}
</table>
${displayNotes(ele.state.notes, ele.state.key, 'del-note')}
<section class="addNote">
<textarea rows="2" cols="80"></textarea>
<button @click=${ele.addNote}>Submit</button>
</section>
<section class="matchingSilences">
<span class="matchingSilencesHeaders">
<h3>Matching Silences</h3>
<checkbox-sk
?checked=${ele.displaySilencesWithComments}
@click=${ele.toggleSilencesWithComments}
label="Show only silences with comments">
</checkbox-sk>
</span>
${ele.matchingSilences()}
</section>
<section class="history">
<h3>History</h3>
${until(ele.history(), html`<div class="loading">Loading...</div>`)}
</section>
</section>
`;
/** @prop incident_state An Incident. */
get incident_state(): State {
return this.state;
}
set incident_state(val: State) {
this.state = val;
this._render();
}
/** @prop incident_silences The list of active silences. */
get incident_silences(): Silence[] {
return this.silences;
}
set incident_silences(val: Silence[]) {
this._render();
this.silences = val;
}
/** @prop recently_expired_silence Whether silence recently expired. */
get incident_has_recently_expired_silence(): boolean {
return this.recently_expired_silence;
}
set incident_has_recently_expired_silence(val: boolean) {
// No need to render again if value is same as old value.
if (val !== this.recently_expired_silence) {
this.recently_expired_silence = val;
this._render();
}
}
/** @prop flaky Whether this incident has been flaky. */
get incident_flaky(): boolean {
return this.flaky;
}
set incident_flaky(val: boolean) {
// No need to render again if value is same as old value.
if (val !== this.flaky) {
this.flaky = val;
this._render();
}
}
private classOfH2(): string {
if (!this.state.active) {
return 'inactive';
}
if (this.state.params.assigned_to) {
return 'assigned';
}
return '';
}
private table(): TemplateResult[] {
const params = this.state.params;
const keys = Object.keys(params);
keys.sort();
return keys
.filter((k) => !k.startsWith('__'))
.map(
(k) => html`
<tr>
<th>${k}</th>
<td>
<span class="respect-newlines">${linkify(params[k])}</span>
${this.maybeDisplayCopyIcon(k)}
</td>
</tr>
`
);
}
private maybeDisplayCopyIcon(k: string): TemplateResult {
if (PARAMS_TO_DISPLAY_COPY_ICON.includes(k)) {
return html`<clipboard-sk value=${this.state.params[k]}></clipboard-sk>`;
}
return html``;
}
private actionButtons(): TemplateResult {
if (this.state.active) {
let assignToOwnerButton = html``;
if (this.state.params.owner) {
assignToOwnerButton = html`<button @click=${this.assignToOwner}>
Assign to Owner
</button>`;
}
return html`<section class="assign">
<button @click=${this.take}>Take</button>
${assignToOwnerButton}
<button @click=${this.assign}>Assign</button>
</section>`;
}
return html``;
}
private matchingSilences(): TemplateResult[] {
if (this.hasAttribute('minimized')) {
return [];
}
// Filter out silences whose paramsets do not match and
// which have no notes if displaySilencesWithComments is true.
const filteredSilences = this.silences.filter(
(silence: Silence) =>
paramset.match(silence.param_set, this.state.params) &&
!(
this.displaySilencesWithComments &&
this.doesSilenceHaveNoNotes(silence)
)
);
const ret = filteredSilences
.slice(0, MAX_MATCHING_SILENCES_TO_DISPLAY)
.map(
(silence: Silence) =>
html`<silence-sk
.silence_state=${silence}
collapsable
collapsed></silence-sk>`
);
if (!ret.length) {
ret.push(html`<div class="nosilences">None</div>`);
}
return ret;
}
private doesSilenceHaveNoNotes(silence: Silence): boolean {
return (
!silence.notes ||
(silence.notes.length === 1 && silence.notes[0].text === '')
);
}
private lastSeen(): TemplateResult {
if (this.state.active) {
return html``;
}
return html`<tr>
<th>Last Seen</th>
<td title=${new Date(this.state.last_seen * 1000).toLocaleString()}>
${diffDate(this.state.last_seen * 1000)}
</td>
</tr>`;
}
private duration(): TemplateResult {
if (this.state.active) {
return html``;
}
return html`<tr>
<th>Duration</th>
<td>${strDuration(this.state.last_seen - this.state.start)}</td>
</tr>`;
}
private history(): Promise<any> {
if (
this.hasAttribute('minimized') ||
this.state.id === '' ||
this.state.key === ''
) {
return Promise.resolve();
}
return fetch(
`/_/recent_incidents?id=${this.state.id}&key=${this.state.key}`,
{
headers: {
'content-type': 'application/json',
},
credentials: 'include',
method: 'GET',
}
)
.then(jsonOrThrow)
.then((json: RecentIncidentsResponse) => {
const incidents = json.incidents || [];
this.incident_flaky = json.flaky;
this.incident_has_recently_expired_silence =
json.recently_expired_silence;
return incidents.map( | (i: Incident) =>
html`<incident-sk .incident_state=${i} minimized></incident-sk>`
);
})
.catch(errorMessage); | random_line_split |
|
incident-sk.ts | key: "12312123123",
* index: 0,
* }
* </pre>
*
* @evt take Sent when the user wants the incident assigned to themselves.
* The detail includes the key of the incident.
*
* <pre>
* detail {
* key: "12312123123",
* }
* </pre>
*
* @evt assign Sent when the user want to assign the incident to someone else.
* The detail includes the key of the incident.
*
* <pre>
* detail {
* key: "12312123123",
* }
* </pre>
*
*/
import { define } from '../../../elements-sk/modules/define';
import '../../../elements-sk/modules/icons/alarm-off-icon-sk';
import '../../../elements-sk/modules/icons/delete-icon-sk';
import '../../../elements-sk/modules/icons/thumbs-up-down-icon-sk';
import '../../../infra-sk/modules/clipboard-sk';
import '../silence-sk';
import { $$ } from '../../../infra-sk/modules/dom';
import { diffDate, strDuration } from '../../../infra-sk/modules/human';
import { errorMessage } from '../../../elements-sk/modules/errorMessage';
import { html, render, TemplateResult } from 'lit-html';
import { until } from 'lit-html/directives/until';
import { jsonOrThrow } from '../../../infra-sk/modules/jsonOrThrow';
import { abbr, linkify, displayNotes } from '../am';
import * as paramset from '../paramset';
import {
Silence,
Incident,
Params,
RecentIncidentsResponse,
Note,
} from '../json';
const MAX_MATCHING_SILENCES_TO_DISPLAY = 50;
const PARAMS_TO_DISPLAY_COPY_ICON = ['abbr', 'alertname', 'app', 'bot'];
class State {
key: string = '';
id: string = '';
params: Params = {};
start: number = 0;
last_seen: number = 0;
active: boolean = false;
notes: Note[] = [];
}
export class IncidentSk extends HTMLElement {
private silences: Silence[] = [];
private displaySilencesWithComments: boolean = false;
private flaky: boolean = false;
private recently_expired_silence: boolean = false;
private state: State = {
key: '',
id: '',
params: {},
start: 0,
last_seen: 0,
active: false,
notes: [],
};
private static template = (ele: IncidentSk) => html`
<h2 class=${ele.classOfH2()}>
${ele.state.params.alertname} ${abbr(ele.state.params.abbr)}
${ele.displayRecentlyExpired(ele.recently_expired_silence)}
${ele.displayFlakiness(ele.flaky)}
</h2>
<section class="detail">
${ele.actionButtons()}
<table class="timing">
<tr>
<th>Started</th>
<td title=${new Date(ele.state.start * 1000).toLocaleString()}>
${diffDate(ele.state.start * 1000)}
</td>
</tr>
${ele.lastSeen()} ${ele.duration()}
</table>
<table class="params">
${ele.table()}
</table>
${displayNotes(ele.state.notes, ele.state.key, 'del-note')}
<section class="addNote">
<textarea rows="2" cols="80"></textarea>
<button @click=${ele.addNote}>Submit</button>
</section>
<section class="matchingSilences">
<span class="matchingSilencesHeaders">
<h3>Matching Silences</h3>
<checkbox-sk
?checked=${ele.displaySilencesWithComments}
@click=${ele.toggleSilencesWithComments}
label="Show only silences with comments">
</checkbox-sk>
</span>
${ele.matchingSilences()}
</section>
<section class="history">
<h3>History</h3>
${until(ele.history(), html`<div class="loading">Loading...</div>`)}
</section>
</section>
`;
/** @prop incident_state An Incident. */
get incident_state(): State {
return this.state;
}
set incident_state(val: State) {
this.state = val;
this._render();
}
/** @prop incident_silences The list of active silences. */
get incident_silences(): Silence[] {
return this.silences;
}
set | (val: Silence[]) {
this._render();
this.silences = val;
}
/** @prop recently_expired_silence Whether silence recently expired. */
get incident_has_recently_expired_silence(): boolean {
return this.recently_expired_silence;
}
set incident_has_recently_expired_silence(val: boolean) {
// No need to render again if value is same as old value.
if (val !== this.recently_expired_silence) {
this.recently_expired_silence = val;
this._render();
}
}
/** @prop flaky Whether this incident has been flaky. */
get incident_flaky(): boolean {
return this.flaky;
}
set incident_flaky(val: boolean) {
// No need to render again if value is same as old value.
if (val !== this.flaky) {
this.flaky = val;
this._render();
}
}
private classOfH2(): string {
if (!this.state.active) {
return 'inactive';
}
if (this.state.params.assigned_to) {
return 'assigned';
}
return '';
}
private table(): TemplateResult[] {
const params = this.state.params;
const keys = Object.keys(params);
keys.sort();
return keys
.filter((k) => !k.startsWith('__'))
.map(
(k) => html`
<tr>
<th>${k}</th>
<td>
<span class="respect-newlines">${linkify(params[k])}</span>
${this.maybeDisplayCopyIcon(k)}
</td>
</tr>
`
);
}
private maybeDisplayCopyIcon(k: string): TemplateResult {
if (PARAMS_TO_DISPLAY_COPY_ICON.includes(k)) {
return html`<clipboard-sk value=${this.state.params[k]}></clipboard-sk>`;
}
return html``;
}
private actionButtons(): TemplateResult {
if (this.state.active) {
let assignToOwnerButton = html``;
if (this.state.params.owner) {
assignToOwnerButton = html`<button @click=${this.assignToOwner}>
Assign to Owner
</button>`;
}
return html`<section class="assign">
<button @click=${this.take}>Take</button>
${assignToOwnerButton}
<button @click=${this.assign}>Assign</button>
</section>`;
}
return html``;
}
private matchingSilences(): TemplateResult[] {
if (this.hasAttribute('minimized')) {
return [];
}
// Filter out silences whose paramsets do not match and
// which have no notes if displaySilencesWithComments is true.
const filteredSilences = this.silences.filter(
(silence: Silence) =>
paramset.match(silence.param_set, this.state.params) &&
!(
this.displaySilencesWithComments &&
this.doesSilenceHaveNoNotes(silence)
)
);
const ret = filteredSilences
.slice(0, MAX_MATCHING_SILENCES_TO_DISPLAY)
.map(
(silence: Silence) =>
html`<silence-sk
.silence_state=${silence}
collapsable
collapsed></silence-sk>`
);
if (!ret.length) {
ret.push(html`<div class="nosilences">None</div>`);
}
return ret;
}
private doesSilenceHaveNoNotes(silence: Silence): boolean {
return (
!silence.notes ||
(silence.notes.length === 1 && silence.notes[0].text === '')
);
}
private lastSeen(): TemplateResult {
if (this.state.active) {
return html``;
}
return html`<tr>
<th>Last Seen</th>
<td title=${new Date(this.state.last_seen * 1000).toLocaleString()}>
${diffDate(this.state.last_seen * 1000)}
</td>
</tr>`;
}
private duration(): TemplateResult {
if (this.state.active) {
return html``;
}
return html`<tr>
<th>Duration</th>
<td>${strDuration(this.state.last_seen - this.state.start)}</td>
</tr>`;
}
private history(): Promise<any> {
if (
this.hasAttribute('minimized') ||
this.state.id === '' ||
this.state.key === ''
) {
return Promise.resolve();
}
return fetch(
`/_/recent_incidents?id=${this.state.id}&key=${this.state.key}`,
{
headers: {
'content-type': 'application/json',
},
credentials: 'include',
method: 'GET',
}
)
.then(jsonOrThrow)
. | incident_silences | identifier_name |
incident-sk.ts | key: "12312123123",
* index: 0,
* }
* </pre>
*
* @evt take Sent when the user wants the incident assigned to themselves.
* The detail includes the key of the incident.
*
* <pre>
* detail {
* key: "12312123123",
* }
* </pre>
*
* @evt assign Sent when the user want to assign the incident to someone else.
* The detail includes the key of the incident.
*
* <pre>
* detail {
* key: "12312123123",
* }
* </pre>
*
*/
import { define } from '../../../elements-sk/modules/define';
import '../../../elements-sk/modules/icons/alarm-off-icon-sk';
import '../../../elements-sk/modules/icons/delete-icon-sk';
import '../../../elements-sk/modules/icons/thumbs-up-down-icon-sk';
import '../../../infra-sk/modules/clipboard-sk';
import '../silence-sk';
import { $$ } from '../../../infra-sk/modules/dom';
import { diffDate, strDuration } from '../../../infra-sk/modules/human';
import { errorMessage } from '../../../elements-sk/modules/errorMessage';
import { html, render, TemplateResult } from 'lit-html';
import { until } from 'lit-html/directives/until';
import { jsonOrThrow } from '../../../infra-sk/modules/jsonOrThrow';
import { abbr, linkify, displayNotes } from '../am';
import * as paramset from '../paramset';
import {
Silence,
Incident,
Params,
RecentIncidentsResponse,
Note,
} from '../json';
const MAX_MATCHING_SILENCES_TO_DISPLAY = 50;
const PARAMS_TO_DISPLAY_COPY_ICON = ['abbr', 'alertname', 'app', 'bot'];
class State {
key: string = '';
id: string = '';
params: Params = {};
start: number = 0;
last_seen: number = 0;
active: boolean = false;
notes: Note[] = [];
}
export class IncidentSk extends HTMLElement {
private silences: Silence[] = [];
private displaySilencesWithComments: boolean = false;
private flaky: boolean = false;
private recently_expired_silence: boolean = false;
private state: State = {
key: '',
id: '',
params: {},
start: 0,
last_seen: 0,
active: false,
notes: [],
};
private static template = (ele: IncidentSk) => html`
<h2 class=${ele.classOfH2()}>
${ele.state.params.alertname} ${abbr(ele.state.params.abbr)}
${ele.displayRecentlyExpired(ele.recently_expired_silence)}
${ele.displayFlakiness(ele.flaky)}
</h2>
<section class="detail">
${ele.actionButtons()}
<table class="timing">
<tr>
<th>Started</th>
<td title=${new Date(ele.state.start * 1000).toLocaleString()}>
${diffDate(ele.state.start * 1000)}
</td>
</tr>
${ele.lastSeen()} ${ele.duration()}
</table>
<table class="params">
${ele.table()}
</table>
${displayNotes(ele.state.notes, ele.state.key, 'del-note')}
<section class="addNote">
<textarea rows="2" cols="80"></textarea>
<button @click=${ele.addNote}>Submit</button>
</section>
<section class="matchingSilences">
<span class="matchingSilencesHeaders">
<h3>Matching Silences</h3>
<checkbox-sk
?checked=${ele.displaySilencesWithComments}
@click=${ele.toggleSilencesWithComments}
label="Show only silences with comments">
</checkbox-sk>
</span>
${ele.matchingSilences()}
</section>
<section class="history">
<h3>History</h3>
${until(ele.history(), html`<div class="loading">Loading...</div>`)}
</section>
</section>
`;
/** @prop incident_state An Incident. */
get incident_state(): State {
return this.state;
}
set incident_state(val: State) {
this.state = val;
this._render();
}
/** @prop incident_silences The list of active silences. */
get incident_silences(): Silence[] {
return this.silences;
}
set incident_silences(val: Silence[]) {
this._render();
this.silences = val;
}
/** @prop recently_expired_silence Whether silence recently expired. */
get incident_has_recently_expired_silence(): boolean {
return this.recently_expired_silence;
}
set incident_has_recently_expired_silence(val: boolean) {
// No need to render again if value is same as old value.
if (val !== this.recently_expired_silence) {
this.recently_expired_silence = val;
this._render();
}
}
/** @prop flaky Whether this incident has been flaky. */
get incident_flaky(): boolean {
return this.flaky;
}
set incident_flaky(val: boolean) |
private classOfH2(): string {
if (!this.state.active) {
return 'inactive';
}
if (this.state.params.assigned_to) {
return 'assigned';
}
return '';
}
private table(): TemplateResult[] {
const params = this.state.params;
const keys = Object.keys(params);
keys.sort();
return keys
.filter((k) => !k.startsWith('__'))
.map(
(k) => html`
<tr>
<th>${k}</th>
<td>
<span class="respect-newlines">${linkify(params[k])}</span>
${this.maybeDisplayCopyIcon(k)}
</td>
</tr>
`
);
}
private maybeDisplayCopyIcon(k: string): TemplateResult {
if (PARAMS_TO_DISPLAY_COPY_ICON.includes(k)) {
return html`<clipboard-sk value=${this.state.params[k]}></clipboard-sk>`;
}
return html``;
}
private actionButtons(): TemplateResult {
if (this.state.active) {
let assignToOwnerButton = html``;
if (this.state.params.owner) {
assignToOwnerButton = html`<button @click=${this.assignToOwner}>
Assign to Owner
</button>`;
}
return html`<section class="assign">
<button @click=${this.take}>Take</button>
${assignToOwnerButton}
<button @click=${this.assign}>Assign</button>
</section>`;
}
return html``;
}
private matchingSilences(): TemplateResult[] {
if (this.hasAttribute('minimized')) {
return [];
}
// Filter out silences whose paramsets do not match and
// which have no notes if displaySilencesWithComments is true.
const filteredSilences = this.silences.filter(
(silence: Silence) =>
paramset.match(silence.param_set, this.state.params) &&
!(
this.displaySilencesWithComments &&
this.doesSilenceHaveNoNotes(silence)
)
);
const ret = filteredSilences
.slice(0, MAX_MATCHING_SILENCES_TO_DISPLAY)
.map(
(silence: Silence) =>
html`<silence-sk
.silence_state=${silence}
collapsable
collapsed></silence-sk>`
);
if (!ret.length) {
ret.push(html`<div class="nosilences">None</div>`);
}
return ret;
}
private doesSilenceHaveNoNotes(silence: Silence): boolean {
return (
!silence.notes ||
(silence.notes.length === 1 && silence.notes[0].text === '')
);
}
private lastSeen(): TemplateResult {
if (this.state.active) {
return html``;
}
return html`<tr>
<th>Last Seen</th>
<td title=${new Date(this.state.last_seen * 1000).toLocaleString()}>
${diffDate(this.state.last_seen * 1000)}
</td>
</tr>`;
}
private duration(): TemplateResult {
if (this.state.active) {
return html``;
}
return html`<tr>
<th>Duration</th>
<td>${strDuration(this.state.last_seen - this.state.start)}</td>
</tr>`;
}
private history(): Promise<any> {
if (
this.hasAttribute('minimized') ||
this.state.id === '' ||
this.state.key === ''
) {
return Promise.resolve();
}
return fetch(
`/_/recent_incidents?id=${this.state.id}&key=${this.state.key}`,
{
headers: {
'content-type': 'application/json',
},
credentials: 'include',
method: 'GET',
}
)
.then(jsonOrThrow)
. | {
// No need to render again if value is same as old value.
if (val !== this.flaky) {
this.flaky = val;
this._render();
}
} | identifier_body |
incident-sk.ts | key: "12312123123",
* index: 0,
* }
* </pre>
*
* @evt take Sent when the user wants the incident assigned to themselves.
* The detail includes the key of the incident.
*
* <pre>
* detail {
* key: "12312123123",
* }
* </pre>
*
* @evt assign Sent when the user want to assign the incident to someone else.
* The detail includes the key of the incident.
*
* <pre>
* detail {
* key: "12312123123",
* }
* </pre>
*
*/
import { define } from '../../../elements-sk/modules/define';
import '../../../elements-sk/modules/icons/alarm-off-icon-sk';
import '../../../elements-sk/modules/icons/delete-icon-sk';
import '../../../elements-sk/modules/icons/thumbs-up-down-icon-sk';
import '../../../infra-sk/modules/clipboard-sk';
import '../silence-sk';
import { $$ } from '../../../infra-sk/modules/dom';
import { diffDate, strDuration } from '../../../infra-sk/modules/human';
import { errorMessage } from '../../../elements-sk/modules/errorMessage';
import { html, render, TemplateResult } from 'lit-html';
import { until } from 'lit-html/directives/until';
import { jsonOrThrow } from '../../../infra-sk/modules/jsonOrThrow';
import { abbr, linkify, displayNotes } from '../am';
import * as paramset from '../paramset';
import {
Silence,
Incident,
Params,
RecentIncidentsResponse,
Note,
} from '../json';
const MAX_MATCHING_SILENCES_TO_DISPLAY = 50;
const PARAMS_TO_DISPLAY_COPY_ICON = ['abbr', 'alertname', 'app', 'bot'];
class State {
key: string = '';
id: string = '';
params: Params = {};
start: number = 0;
last_seen: number = 0;
active: boolean = false;
notes: Note[] = [];
}
export class IncidentSk extends HTMLElement {
private silences: Silence[] = [];
private displaySilencesWithComments: boolean = false;
private flaky: boolean = false;
private recently_expired_silence: boolean = false;
private state: State = {
key: '',
id: '',
params: {},
start: 0,
last_seen: 0,
active: false,
notes: [],
};
private static template = (ele: IncidentSk) => html`
<h2 class=${ele.classOfH2()}>
${ele.state.params.alertname} ${abbr(ele.state.params.abbr)}
${ele.displayRecentlyExpired(ele.recently_expired_silence)}
${ele.displayFlakiness(ele.flaky)}
</h2>
<section class="detail">
${ele.actionButtons()}
<table class="timing">
<tr>
<th>Started</th>
<td title=${new Date(ele.state.start * 1000).toLocaleString()}>
${diffDate(ele.state.start * 1000)}
</td>
</tr>
${ele.lastSeen()} ${ele.duration()}
</table>
<table class="params">
${ele.table()}
</table>
${displayNotes(ele.state.notes, ele.state.key, 'del-note')}
<section class="addNote">
<textarea rows="2" cols="80"></textarea>
<button @click=${ele.addNote}>Submit</button>
</section>
<section class="matchingSilences">
<span class="matchingSilencesHeaders">
<h3>Matching Silences</h3>
<checkbox-sk
?checked=${ele.displaySilencesWithComments}
@click=${ele.toggleSilencesWithComments}
label="Show only silences with comments">
</checkbox-sk>
</span>
${ele.matchingSilences()}
</section>
<section class="history">
<h3>History</h3>
${until(ele.history(), html`<div class="loading">Loading...</div>`)}
</section>
</section>
`;
/** @prop incident_state An Incident. */
get incident_state(): State {
return this.state;
}
set incident_state(val: State) {
this.state = val;
this._render();
}
/** @prop incident_silences The list of active silences. */
get incident_silences(): Silence[] {
return this.silences;
}
set incident_silences(val: Silence[]) {
this._render();
this.silences = val;
}
/** @prop recently_expired_silence Whether silence recently expired. */
get incident_has_recently_expired_silence(): boolean {
return this.recently_expired_silence;
}
set incident_has_recently_expired_silence(val: boolean) {
// No need to render again if value is same as old value.
if (val !== this.recently_expired_silence) |
}
/** @prop flaky Whether this incident has been flaky. */
get incident_flaky(): boolean {
return this.flaky;
}
set incident_flaky(val: boolean) {
// No need to render again if value is same as old value.
if (val !== this.flaky) {
this.flaky = val;
this._render();
}
}
private classOfH2(): string {
if (!this.state.active) {
return 'inactive';
}
if (this.state.params.assigned_to) {
return 'assigned';
}
return '';
}
private table(): TemplateResult[] {
const params = this.state.params;
const keys = Object.keys(params);
keys.sort();
return keys
.filter((k) => !k.startsWith('__'))
.map(
(k) => html`
<tr>
<th>${k}</th>
<td>
<span class="respect-newlines">${linkify(params[k])}</span>
${this.maybeDisplayCopyIcon(k)}
</td>
</tr>
`
);
}
private maybeDisplayCopyIcon(k: string): TemplateResult {
if (PARAMS_TO_DISPLAY_COPY_ICON.includes(k)) {
return html`<clipboard-sk value=${this.state.params[k]}></clipboard-sk>`;
}
return html``;
}
private actionButtons(): TemplateResult {
if (this.state.active) {
let assignToOwnerButton = html``;
if (this.state.params.owner) {
assignToOwnerButton = html`<button @click=${this.assignToOwner}>
Assign to Owner
</button>`;
}
return html`<section class="assign">
<button @click=${this.take}>Take</button>
${assignToOwnerButton}
<button @click=${this.assign}>Assign</button>
</section>`;
}
return html``;
}
private matchingSilences(): TemplateResult[] {
if (this.hasAttribute('minimized')) {
return [];
}
// Filter out silences whose paramsets do not match and
// which have no notes if displaySilencesWithComments is true.
const filteredSilences = this.silences.filter(
(silence: Silence) =>
paramset.match(silence.param_set, this.state.params) &&
!(
this.displaySilencesWithComments &&
this.doesSilenceHaveNoNotes(silence)
)
);
const ret = filteredSilences
.slice(0, MAX_MATCHING_SILENCES_TO_DISPLAY)
.map(
(silence: Silence) =>
html`<silence-sk
.silence_state=${silence}
collapsable
collapsed></silence-sk>`
);
if (!ret.length) {
ret.push(html`<div class="nosilences">None</div>`);
}
return ret;
}
private doesSilenceHaveNoNotes(silence: Silence): boolean {
return (
!silence.notes ||
(silence.notes.length === 1 && silence.notes[0].text === '')
);
}
private lastSeen(): TemplateResult {
if (this.state.active) {
return html``;
}
return html`<tr>
<th>Last Seen</th>
<td title=${new Date(this.state.last_seen * 1000).toLocaleString()}>
${diffDate(this.state.last_seen * 1000)}
</td>
</tr>`;
}
private duration(): TemplateResult {
if (this.state.active) {
return html``;
}
return html`<tr>
<th>Duration</th>
<td>${strDuration(this.state.last_seen - this.state.start)}</td>
</tr>`;
}
private history(): Promise<any> {
if (
this.hasAttribute('minimized') ||
this.state.id === '' ||
this.state.key === ''
) {
return Promise.resolve();
}
return fetch(
`/_/recent_incidents?id=${this.state.id}&key=${this.state.key}`,
{
headers: {
'content-type': 'application/json',
},
credentials: 'include',
method: 'GET',
}
)
.then(jsonOrThrow)
. | {
this.recently_expired_silence = val;
this._render();
} | conditional_block |
parse.go | ) {
p := newParser(input)
p.lex.seriesDesc = true
return p.parseSeriesDesc()
}
// parseSeriesDesc parses a description of a time series into its metric and value sequence.
func (p *parser) parseSeriesDesc() (m labels.Labels, vals []sequenceValue, err error) {
defer p.recover(&err)
m = p.metric()
const ctx = "series values"
for {
for p.peek().typ == itemSpace {
p.next()
}
if p.peek().typ == itemEOF {
break
}
// Extract blanks.
if p.peek().typ == itemBlank {
p.next()
times := uint64(1)
if p.peek().typ == itemTimes {
p.next()
times, err = strconv.ParseUint(p.expect(itemNumber, ctx).val, 10, 64)
if err != nil {
p.errorf("invalid repetition in %s: %s", ctx, err)
}
}
for i := uint64(0); i < times; i++ {
vals = append(vals, sequenceValue{omitted: true})
}
// This is to ensure that there is a space between this and the next number.
// This is especially required if the next number is negative.
if t := p.expectOneOf(itemSpace, itemEOF, ctx).typ; t == itemEOF {
break
}
continue
}
// Extract values.
sign := 1.0
if t := p.peek().typ; t == itemSUB || t == itemADD {
if p.next().typ == itemSUB {
sign = -1
}
}
var k float64
if t := p.peek().typ; t == itemNumber {
k = sign * p.number(p.expect(itemNumber, ctx).val)
} else if t == itemIdentifier && p.peek().val == "stale" {
p.next()
k = math.Float64frombits(value.StaleNaN)
} else {
p.errorf("expected number or 'stale' in %s but got %s (value: %s)", ctx, t.desc(), p.peek())
}
vals = append(vals, sequenceValue{
value: k,
})
// If there are no offset repetitions specified, proceed with the next value.
if t := p.peek(); t.typ == itemSpace {
// This ensures there is a space between every value.
continue
} else if t.typ == itemEOF {
break
} else if t.typ != itemADD && t.typ != itemSUB {
p.errorf("expected next value or relative expansion in %s but got %s (value: %s)", ctx, t.desc(), p.peek())
}
// Expand the repeated offsets into values.
sign = 1.0
if p.next().typ == itemSUB {
sign = -1.0
}
offset := sign * p.number(p.expect(itemNumber, ctx).val)
p.expect(itemTimes, ctx)
times, err := strconv.ParseUint(p.expect(itemNumber, ctx).val, 10, 64)
if err != nil {
p.errorf("invalid repetition in %s: %s", ctx, err)
}
for i := uint64(0); i < times; i++ {
k += offset
vals = append(vals, sequenceValue{
value: k,
})
}
// This is to ensure that there is a space between this expanding notation
// and the next number. This is especially required if the next number
// is negative.
if t := p.expectOneOf(itemSpace, itemEOF, ctx).typ; t == itemEOF {
break
}
}
return m, vals, nil
}
// next returns the next token.
func (p *parser) next() item {
if p.peekCount > 0 {
p.peekCount--
} else {
t := p.lex.nextItem()
// Skip comments.
for t.typ == itemComment {
t = p.lex.nextItem()
}
p.token[0] = t
}
if p.token[p.peekCount].typ == itemError {
p.errorf("%s", p.token[p.peekCount].val)
}
return p.token[p.peekCount]
}
// peek returns but does not consume the next token.
func (p *parser) peek() item {
if p.peekCount > 0 {
return p.token[p.peekCount-1]
}
p.peekCount = 1
t := p.lex.nextItem()
// Skip comments.
for t.typ == itemComment {
t = p.lex.nextItem()
}
p.token[0] = t
return p.token[0]
}
// backup backs the input stream up one token.
func (p *parser) backup() {
p.peekCount++
}
// errorf formats the error and terminates processing.
func (p *parser) errorf(format string, args ...interface{}) {
p.error(fmt.Errorf(format, args...))
}
// error terminates processing.
func (p *parser) error(err error) {
perr := &ParseErr{
Line: p.lex.lineNumber(),
Pos: p.lex.linePosition(),
Err: err,
}
if strings.Count(strings.TrimSpace(p.lex.input), "\n") == 0 {
perr.Line = 0
}
panic(perr)
}
// expect consumes the next token and guarantees it has the required type.
func (p *parser) expect(exp ItemType, context string) item {
token := p.next()
if token.typ != exp {
p.errorf("unexpected %s in %s, expected %s", token.desc(), context, exp.desc())
}
return token
}
// expectOneOf consumes the next token and guarantees it has one of the required types.
func (p *parser) expectOneOf(exp1, exp2 ItemType, context string) item {
token := p.next()
if token.typ != exp1 && token.typ != exp2 {
p.errorf("unexpected %s in %s, expected %s or %s", token.desc(), context, exp1.desc(), exp2.desc())
}
return token
}
var errUnexpected = fmt.Errorf("unexpected error")
// recover is the handler that turns panics into returns from the top level of Parse.
func (p *parser) recover(errp *error) {
e := recover()
if e != nil {
if _, ok := e.(runtime.Error); ok {
// Print the stack trace but do not inhibit the running application.
buf := make([]byte, 64<<10)
buf = buf[:runtime.Stack(buf, false)]
fmt.Fprintf(os.Stderr, "parser panic: %v\n%s", e, buf)
*errp = errUnexpected
} else {
*errp = e.(error)
}
}
p.lex.close()
}
// number parses a number.
func (p *parser) number(val string) float64 {
n, err := strconv.ParseInt(val, 0, 64)
f := float64(n)
if err != nil {
f, err = strconv.ParseFloat(val, 64)
}
if err != nil {
p.errorf("error parsing number: %s", err)
}
return f
}
// labels parses a list of labelnames.
//
// '(' <label_name>, ... ')'
//
func (p *parser) labels() []string {
const ctx = "grouping opts"
p.expect(itemLeftParen, ctx)
labels := []string{}
if p.peek().typ != itemRightParen {
for {
id := p.next()
if !isLabel(id.val) {
p.errorf("unexpected %s in %s, expected label", id.desc(), ctx)
}
labels = append(labels, id.val)
if p.peek().typ != itemComma {
break
}
p.next()
}
}
p.expect(itemRightParen, ctx)
return labels
}
// labelSet parses a set of label matchers
//
// '{' [ <labelname> '=' <match_string>, ... ] '}'
//
func (p *parser) labelSet() labels.Labels {
set := []labels.Label{}
for _, lm := range p.labelMatchers(itemEQL) {
set = append(set, labels.Label{Name: lm.Name, Value: lm.Value})
}
return labels.New(set...)
}
// labelMatchers parses a set of label matchers.
//
// '{' [ <labelname> <match_op> <match_string>, ... ] '}'
//
// if no 'operators' is given, then all operator type is valid
func (p *parser) labelMatchers(operators ...ItemType) []*labels.Matcher {
const ctx = "label matching"
matchers := []*labels.Matcher{}
p.expect(itemLeftBrace, ctx)
// Check if no matchers are provided.
if p.peek().typ == itemRightBrace {
p.next()
return matchers
}
for {
label := p.expect(itemIdentifier, ctx)
op := p.next().typ
if !op.isOperator() { | p.errorf("expected label matching operator but got %s", op)
}
var validOp = false
for _, allowedOp := range operators { | random_line_split |
|
parse.go | newParser(input)
defer p.recover(&err)
m = p.metric()
if p.peek().typ != itemEOF {
p.errorf("could not parse remaining input %.15q...", p.lex.input[p.lex.lastPos:])
}
return m, nil
}
// newParser returns a new parser.
func newParser(input string) *parser {
p := &parser{
lex: lex(input),
}
return p
}
// sequenceValue is an omittable value in a sequence of time series values.
type sequenceValue struct {
value float64
omitted bool
}
func (v sequenceValue) String() string {
if v.omitted {
return "_"
}
return fmt.Sprintf("%f", v.value)
}
// parseSeriesDesc parses the description of a time series.
func parseSeriesDesc(input string) (labels.Labels, []sequenceValue, error) {
p := newParser(input)
p.lex.seriesDesc = true
return p.parseSeriesDesc()
}
// parseSeriesDesc parses a description of a time series into its metric and value sequence.
func (p *parser) parseSeriesDesc() (m labels.Labels, vals []sequenceValue, err error) {
defer p.recover(&err)
m = p.metric()
const ctx = "series values"
for {
for p.peek().typ == itemSpace {
p.next()
}
if p.peek().typ == itemEOF {
break
}
// Extract blanks.
if p.peek().typ == itemBlank |
// Extract values.
sign := 1.0
if t := p.peek().typ; t == itemSUB || t == itemADD {
if p.next().typ == itemSUB {
sign = -1
}
}
var k float64
if t := p.peek().typ; t == itemNumber {
k = sign * p.number(p.expect(itemNumber, ctx).val)
} else if t == itemIdentifier && p.peek().val == "stale" {
p.next()
k = math.Float64frombits(value.StaleNaN)
} else {
p.errorf("expected number or 'stale' in %s but got %s (value: %s)", ctx, t.desc(), p.peek())
}
vals = append(vals, sequenceValue{
value: k,
})
// If there are no offset repetitions specified, proceed with the next value.
if t := p.peek(); t.typ == itemSpace {
// This ensures there is a space between every value.
continue
} else if t.typ == itemEOF {
break
} else if t.typ != itemADD && t.typ != itemSUB {
p.errorf("expected next value or relative expansion in %s but got %s (value: %s)", ctx, t.desc(), p.peek())
}
// Expand the repeated offsets into values.
sign = 1.0
if p.next().typ == itemSUB {
sign = -1.0
}
offset := sign * p.number(p.expect(itemNumber, ctx).val)
p.expect(itemTimes, ctx)
times, err := strconv.ParseUint(p.expect(itemNumber, ctx).val, 10, 64)
if err != nil {
p.errorf("invalid repetition in %s: %s", ctx, err)
}
for i := uint64(0); i < times; i++ {
k += offset
vals = append(vals, sequenceValue{
value: k,
})
}
// This is to ensure that there is a space between this expanding notation
// and the next number. This is especially required if the next number
// is negative.
if t := p.expectOneOf(itemSpace, itemEOF, ctx).typ; t == itemEOF {
break
}
}
return m, vals, nil
}
// next returns the next token.
func (p *parser) next() item {
if p.peekCount > 0 {
p.peekCount--
} else {
t := p.lex.nextItem()
// Skip comments.
for t.typ == itemComment {
t = p.lex.nextItem()
}
p.token[0] = t
}
if p.token[p.peekCount].typ == itemError {
p.errorf("%s", p.token[p.peekCount].val)
}
return p.token[p.peekCount]
}
// peek returns but does not consume the next token.
func (p *parser) peek() item {
if p.peekCount > 0 {
return p.token[p.peekCount-1]
}
p.peekCount = 1
t := p.lex.nextItem()
// Skip comments.
for t.typ == itemComment {
t = p.lex.nextItem()
}
p.token[0] = t
return p.token[0]
}
// backup backs the input stream up one token.
func (p *parser) backup() {
p.peekCount++
}
// errorf formats the error and terminates processing.
func (p *parser) errorf(format string, args ...interface{}) {
p.error(fmt.Errorf(format, args...))
}
// error terminates processing.
func (p *parser) error(err error) {
perr := &ParseErr{
Line: p.lex.lineNumber(),
Pos: p.lex.linePosition(),
Err: err,
}
if strings.Count(strings.TrimSpace(p.lex.input), "\n") == 0 {
perr.Line = 0
}
panic(perr)
}
// expect consumes the next token and guarantees it has the required type.
func (p *parser) expect(exp ItemType, context string) item {
token := p.next()
if token.typ != exp {
p.errorf("unexpected %s in %s, expected %s", token.desc(), context, exp.desc())
}
return token
}
// expectOneOf consumes the next token and guarantees it has one of the required types.
func (p *parser) expectOneOf(exp1, exp2 ItemType, context string) item {
token := p.next()
if token.typ != exp1 && token.typ != exp2 {
p.errorf("unexpected %s in %s, expected %s or %s", token.desc(), context, exp1.desc(), exp2.desc())
}
return token
}
var errUnexpected = fmt.Errorf("unexpected error")
// recover is the handler that turns panics into returns from the top level of Parse.
func (p *parser) recover(errp *error) {
e := recover()
if e != nil {
if _, ok := e.(runtime.Error); ok {
// Print the stack trace but do not inhibit the running application.
buf := make([]byte, 64<<10)
buf = buf[:runtime.Stack(buf, false)]
fmt.Fprintf(os.Stderr, "parser panic: %v\n%s", e, buf)
*errp = errUnexpected
} else {
*errp = e.(error)
}
}
p.lex.close()
}
// number parses a number.
func (p *parser) number(val string) float64 {
n, err := strconv.ParseInt(val, 0, 64)
f := float64(n)
if err != nil {
f, err = strconv.ParseFloat(val, 64)
}
if err != nil {
p.errorf("error parsing number: %s", err)
}
return f
}
// labels parses a list of labelnames.
//
// '(' <label_name>, ... ')'
//
func (p *parser) labels() []string {
const ctx = "grouping opts"
p.expect(itemLeftParen, ctx)
labels := []string{}
if p.peek().typ != itemRightParen {
for {
id := p.next()
if !isLabel(id.val) {
p.errorf("unexpected %s in %s, expected label", id.desc(), ctx)
}
labels = append(labels, id.val)
if p.peek().typ != itemComma {
break
}
p.next()
}
}
p.expect(itemRightParen, ctx)
return labels
}
// labelSet parses a set of label matchers
//
// '{' [ <labelname> '=' <match_string>, ... ] '}'
//
func (p *parser) labelSet() labels.Labels {
set := []labels.Label{}
for _, lm := range p.labelMatchers(itemEQL) {
set = append(set, labels.Label{Name: lm.Name, Value: lm.Value})
}
return labels.New(set...)
| {
p.next()
times := uint64(1)
if p.peek().typ == itemTimes {
p.next()
times, err = strconv.ParseUint(p.expect(itemNumber, ctx).val, 10, 64)
if err != nil {
p.errorf("invalid repetition in %s: %s", ctx, err)
}
}
for i := uint64(0); i < times; i++ {
vals = append(vals, sequenceValue{omitted: true})
}
// This is to ensure that there is a space between this and the next number.
// This is especially required if the next number is negative.
if t := p.expectOneOf(itemSpace, itemEOF, ctx).typ; t == itemEOF {
break
}
continue
} | conditional_block |
parse.go | newParser(input)
defer p.recover(&err)
m = p.metric()
if p.peek().typ != itemEOF {
p.errorf("could not parse remaining input %.15q...", p.lex.input[p.lex.lastPos:])
}
return m, nil
}
// newParser returns a new parser.
func newParser(input string) *parser {
p := &parser{
lex: lex(input),
}
return p
}
// sequenceValue is an omittable value in a sequence of time series values.
type sequenceValue struct {
value float64
omitted bool
}
func (v sequenceValue) String() string {
if v.omitted {
return "_"
}
return fmt.Sprintf("%f", v.value)
}
// parseSeriesDesc parses the description of a time series.
func parseSeriesDesc(input string) (labels.Labels, []sequenceValue, error) {
p := newParser(input)
p.lex.seriesDesc = true
return p.parseSeriesDesc()
}
// parseSeriesDesc parses a description of a time series into its metric and value sequence.
func (p *parser) parseSeriesDesc() (m labels.Labels, vals []sequenceValue, err error) {
defer p.recover(&err)
m = p.metric()
const ctx = "series values"
for {
for p.peek().typ == itemSpace {
p.next()
}
if p.peek().typ == itemEOF {
break
}
// Extract blanks.
if p.peek().typ == itemBlank {
p.next()
times := uint64(1)
if p.peek().typ == itemTimes {
p.next()
times, err = strconv.ParseUint(p.expect(itemNumber, ctx).val, 10, 64)
if err != nil {
p.errorf("invalid repetition in %s: %s", ctx, err)
}
}
for i := uint64(0); i < times; i++ {
vals = append(vals, sequenceValue{omitted: true})
}
// This is to ensure that there is a space between this and the next number.
// This is especially required if the next number is negative.
if t := p.expectOneOf(itemSpace, itemEOF, ctx).typ; t == itemEOF {
break
}
continue
}
// Extract values.
sign := 1.0
if t := p.peek().typ; t == itemSUB || t == itemADD {
if p.next().typ == itemSUB {
sign = -1
}
}
var k float64
if t := p.peek().typ; t == itemNumber {
k = sign * p.number(p.expect(itemNumber, ctx).val)
} else if t == itemIdentifier && p.peek().val == "stale" {
p.next()
k = math.Float64frombits(value.StaleNaN)
} else {
p.errorf("expected number or 'stale' in %s but got %s (value: %s)", ctx, t.desc(), p.peek())
}
vals = append(vals, sequenceValue{
value: k,
})
// If there are no offset repetitions specified, proceed with the next value.
if t := p.peek(); t.typ == itemSpace {
// This ensures there is a space between every value.
continue
} else if t.typ == itemEOF {
break
} else if t.typ != itemADD && t.typ != itemSUB {
p.errorf("expected next value or relative expansion in %s but got %s (value: %s)", ctx, t.desc(), p.peek())
}
// Expand the repeated offsets into values.
sign = 1.0
if p.next().typ == itemSUB {
sign = -1.0
}
offset := sign * p.number(p.expect(itemNumber, ctx).val)
p.expect(itemTimes, ctx)
times, err := strconv.ParseUint(p.expect(itemNumber, ctx).val, 10, 64)
if err != nil {
p.errorf("invalid repetition in %s: %s", ctx, err)
}
for i := uint64(0); i < times; i++ {
k += offset
vals = append(vals, sequenceValue{
value: k,
})
}
// This is to ensure that there is a space between this expanding notation
// and the next number. This is especially required if the next number
// is negative.
if t := p.expectOneOf(itemSpace, itemEOF, ctx).typ; t == itemEOF {
break
}
}
return m, vals, nil
}
// next returns the next token.
func (p *parser) next() item {
if p.peekCount > 0 {
p.peekCount--
} else {
t := p.lex.nextItem()
// Skip comments.
for t.typ == itemComment {
t = p.lex.nextItem()
}
p.token[0] = t
}
if p.token[p.peekCount].typ == itemError {
p.errorf("%s", p.token[p.peekCount].val)
}
return p.token[p.peekCount]
}
// peek returns but does not consume the next token.
func (p *parser) peek() item {
if p.peekCount > 0 {
return p.token[p.peekCount-1]
}
p.peekCount = 1
t := p.lex.nextItem()
// Skip comments.
for t.typ == itemComment {
t = p.lex.nextItem()
}
p.token[0] = t
return p.token[0]
}
// backup backs the input stream up one token.
func (p *parser) backup() {
p.peekCount++
}
// errorf formats the error and terminates processing.
func (p *parser) errorf(format string, args ...interface{}) {
p.error(fmt.Errorf(format, args...))
}
// error terminates processing.
func (p *parser) error(err error) {
perr := &ParseErr{
Line: p.lex.lineNumber(),
Pos: p.lex.linePosition(),
Err: err,
}
if strings.Count(strings.TrimSpace(p.lex.input), "\n") == 0 {
perr.Line = 0
}
panic(perr)
}
// expect consumes the next token and guarantees it has the required type.
func (p *parser) expect(exp ItemType, context string) item {
token := p.next()
if token.typ != exp {
p.errorf("unexpected %s in %s, expected %s", token.desc(), context, exp.desc())
}
return token
}
// expectOneOf consumes the next token and guarantees it has one of the required types.
func (p *parser) expectOneOf(exp1, exp2 ItemType, context string) item |
var errUnexpected = fmt.Errorf("unexpected error")
// recover is the handler that turns panics into returns from the top level of Parse.
func (p *parser) recover(errp *error) {
e := recover()
if e != nil {
if _, ok := e.(runtime.Error); ok {
// Print the stack trace but do not inhibit the running application.
buf := make([]byte, 64<<10)
buf = buf[:runtime.Stack(buf, false)]
fmt.Fprintf(os.Stderr, "parser panic: %v\n%s", e, buf)
*errp = errUnexpected
} else {
*errp = e.(error)
}
}
p.lex.close()
}
// number parses a number.
func (p *parser) number(val string) float64 {
n, err := strconv.ParseInt(val, 0, 64)
f := float64(n)
if err != nil {
f, err = strconv.ParseFloat(val, 64)
}
if err != nil {
p.errorf("error parsing number: %s", err)
}
return f
}
// labels parses a list of labelnames.
//
// '(' <label_name>, ... ')'
//
func (p *parser) labels() []string {
const ctx = "grouping opts"
p.expect(itemLeftParen, ctx)
labels := []string{}
if p.peek().typ != itemRightParen {
for {
id := p.next()
if !isLabel(id.val) {
p.errorf("unexpected %s in %s, expected label", id.desc(), ctx)
}
labels = append(labels, id.val)
if p.peek().typ != itemComma {
break
}
p.next()
}
}
p.expect(itemRightParen, ctx)
return labels
}
// labelSet parses a set of label matchers
//
// '{' [ <labelname> '=' <match_string>, ... ] '}'
//
func (p *parser) labelSet() labels.Labels {
set := []labels.Label{}
for _, lm := range p.labelMatchers(itemEQL) {
set = append(set, labels.Label{Name: lm.Name, Value: lm.Value})
}
return labels.New(set...)
| {
token := p.next()
if token.typ != exp1 && token.typ != exp2 {
p.errorf("unexpected %s in %s, expected %s or %s", token.desc(), context, exp1.desc(), exp2.desc())
}
return token
} | identifier_body |
parse.go | := newParser(input)
defer p.recover(&err)
m = p.metric()
if p.peek().typ != itemEOF {
p.errorf("could not parse remaining input %.15q...", p.lex.input[p.lex.lastPos:])
}
return m, nil
}
// newParser returns a new parser.
func newParser(input string) *parser {
p := &parser{
lex: lex(input),
}
return p
}
// sequenceValue is an omittable value in a sequence of time series values.
type sequenceValue struct {
value float64
omitted bool
}
func (v sequenceValue) String() string {
if v.omitted {
return "_"
}
return fmt.Sprintf("%f", v.value)
}
// parseSeriesDesc parses the description of a time series.
func parseSeriesDesc(input string) (labels.Labels, []sequenceValue, error) {
p := newParser(input)
p.lex.seriesDesc = true
return p.parseSeriesDesc()
}
// parseSeriesDesc parses a description of a time series into its metric and value sequence.
func (p *parser) parseSeriesDesc() (m labels.Labels, vals []sequenceValue, err error) {
defer p.recover(&err)
m = p.metric()
const ctx = "series values"
for {
for p.peek().typ == itemSpace {
p.next()
}
if p.peek().typ == itemEOF {
break
}
// Extract blanks.
if p.peek().typ == itemBlank {
p.next()
times := uint64(1)
if p.peek().typ == itemTimes {
p.next()
times, err = strconv.ParseUint(p.expect(itemNumber, ctx).val, 10, 64)
if err != nil {
p.errorf("invalid repetition in %s: %s", ctx, err)
}
}
for i := uint64(0); i < times; i++ {
vals = append(vals, sequenceValue{omitted: true})
}
// This is to ensure that there is a space between this and the next number.
// This is especially required if the next number is negative.
if t := p.expectOneOf(itemSpace, itemEOF, ctx).typ; t == itemEOF {
break
}
continue
}
// Extract values.
sign := 1.0
if t := p.peek().typ; t == itemSUB || t == itemADD {
if p.next().typ == itemSUB {
sign = -1
}
}
var k float64
if t := p.peek().typ; t == itemNumber {
k = sign * p.number(p.expect(itemNumber, ctx).val)
} else if t == itemIdentifier && p.peek().val == "stale" {
p.next()
k = math.Float64frombits(value.StaleNaN)
} else {
p.errorf("expected number or 'stale' in %s but got %s (value: %s)", ctx, t.desc(), p.peek())
}
vals = append(vals, sequenceValue{
value: k,
})
// If there are no offset repetitions specified, proceed with the next value.
if t := p.peek(); t.typ == itemSpace {
// This ensures there is a space between every value.
continue
} else if t.typ == itemEOF {
break
} else if t.typ != itemADD && t.typ != itemSUB {
p.errorf("expected next value or relative expansion in %s but got %s (value: %s)", ctx, t.desc(), p.peek())
}
// Expand the repeated offsets into values.
sign = 1.0
if p.next().typ == itemSUB {
sign = -1.0
}
offset := sign * p.number(p.expect(itemNumber, ctx).val)
p.expect(itemTimes, ctx)
times, err := strconv.ParseUint(p.expect(itemNumber, ctx).val, 10, 64)
if err != nil {
p.errorf("invalid repetition in %s: %s", ctx, err)
}
for i := uint64(0); i < times; i++ {
k += offset
vals = append(vals, sequenceValue{
value: k,
})
}
// This is to ensure that there is a space between this expanding notation
// and the next number. This is especially required if the next number
// is negative.
if t := p.expectOneOf(itemSpace, itemEOF, ctx).typ; t == itemEOF {
break
}
}
return m, vals, nil
}
// next returns the next token.
func (p *parser) next() item {
if p.peekCount > 0 {
p.peekCount--
} else {
t := p.lex.nextItem()
// Skip comments.
for t.typ == itemComment {
t = p.lex.nextItem()
}
p.token[0] = t
}
if p.token[p.peekCount].typ == itemError {
p.errorf("%s", p.token[p.peekCount].val)
}
return p.token[p.peekCount]
}
// peek returns but does not consume the next token.
func (p *parser) peek() item {
if p.peekCount > 0 {
return p.token[p.peekCount-1]
}
p.peekCount = 1
t := p.lex.nextItem()
// Skip comments.
for t.typ == itemComment {
t = p.lex.nextItem()
}
p.token[0] = t
return p.token[0]
}
// backup backs the input stream up one token.
func (p *parser) backup() {
p.peekCount++
}
// errorf formats the error and terminates processing.
func (p *parser) errorf(format string, args ...interface{}) {
p.error(fmt.Errorf(format, args...))
}
// error terminates processing.
func (p *parser) error(err error) {
perr := &ParseErr{
Line: p.lex.lineNumber(),
Pos: p.lex.linePosition(),
Err: err,
}
if strings.Count(strings.TrimSpace(p.lex.input), "\n") == 0 {
perr.Line = 0
}
panic(perr)
}
// expect consumes the next token and guarantees it has the required type.
func (p *parser) expect(exp ItemType, context string) item {
token := p.next()
if token.typ != exp {
p.errorf("unexpected %s in %s, expected %s", token.desc(), context, exp.desc())
}
return token
}
// expectOneOf consumes the next token and guarantees it has one of the required types.
func (p *parser) expectOneOf(exp1, exp2 ItemType, context string) item {
token := p.next()
if token.typ != exp1 && token.typ != exp2 {
p.errorf("unexpected %s in %s, expected %s or %s", token.desc(), context, exp1.desc(), exp2.desc())
}
return token
}
var errUnexpected = fmt.Errorf("unexpected error")
// recover is the handler that turns panics into returns from the top level of Parse.
func (p *parser) | (errp *error) {
e := recover()
if e != nil {
if _, ok := e.(runtime.Error); ok {
// Print the stack trace but do not inhibit the running application.
buf := make([]byte, 64<<10)
buf = buf[:runtime.Stack(buf, false)]
fmt.Fprintf(os.Stderr, "parser panic: %v\n%s", e, buf)
*errp = errUnexpected
} else {
*errp = e.(error)
}
}
p.lex.close()
}
// number parses a number.
func (p *parser) number(val string) float64 {
n, err := strconv.ParseInt(val, 0, 64)
f := float64(n)
if err != nil {
f, err = strconv.ParseFloat(val, 64)
}
if err != nil {
p.errorf("error parsing number: %s", err)
}
return f
}
// labels parses a list of labelnames.
//
// '(' <label_name>, ... ')'
//
func (p *parser) labels() []string {
const ctx = "grouping opts"
p.expect(itemLeftParen, ctx)
labels := []string{}
if p.peek().typ != itemRightParen {
for {
id := p.next()
if !isLabel(id.val) {
p.errorf("unexpected %s in %s, expected label", id.desc(), ctx)
}
labels = append(labels, id.val)
if p.peek().typ != itemComma {
break
}
p.next()
}
}
p.expect(itemRightParen, ctx)
return labels
}
// labelSet parses a set of label matchers
//
// '{' [ <labelname> '=' <match_string>, ... ] '}'
//
func (p *parser) labelSet() labels.Labels {
set := []labels.Label{}
for _, lm := range p.labelMatchers(itemEQL) {
set = append(set, labels.Label{Name: lm.Name, Value: lm.Value})
}
return labels.New(set...)
| recover | identifier_name |
mock_cr50_agent.rs | Error,
fidl_fuchsia_tpm_cr50::{
InsertLeafResponse, PinWeaverRequest, PinWeaverRequestStream, TryAuthFailed,
TryAuthRateLimited, TryAuthResponse, TryAuthSuccess,
},
fuchsia_async as fasync,
fuchsia_component::server as fserver,
fuchsia_component_test::LocalComponentHandles,
futures::stream::{StreamExt, TryStreamExt},
parking_lot::Mutex,
std::collections::VecDeque,
std::sync::Arc,
};
/// Struct that builds a set of mock responses for CR50 agent requests.
/// Mock responses should be created (via add_* functions) in FIFO order and
/// will panic at test time if the order of requests does not match the order
/// of responses added to the builder.
/// Successful TryAuth responses take neither a `he_secret` nor a
/// `reset_secret` as those are generated via cprng by password_authenticator.
/// Instead, a successful TryAuth response will always return the `he_secret`
/// provided to the most recent InsertLeaf call, and will always return an
/// empty `reset_secret`.
/// TODO(fxb/89060, arkay): This logic could be improved upon to match the
/// `he_secret` to the credential label if necessary.
pub(crate) struct MockCr50AgentBuilder {
responses: VecDeque<MockResponse>,
}
/// Defines the type of a Hash as CR50 expects it.
pub(crate) type Hash = [u8; 32];
/// Defines an enum of known MockResponse types.
#[derive(Clone, Debug)]
pub(crate) enum MockResponse {
GetVersion { version: u8 },
ResetTree { root_hash: Hash },
InsertLeaf { response: InsertLeafResponse },
RemoveLeaf { root_hash: Hash },
TryAuth { response: TryAuthResponse },
}
#[allow(dead_code)]
impl MockCr50AgentBuilder {
/// Initializes a new MockCr50AgentBuilder.
pub(crate) fn new() -> Self {
MockCr50AgentBuilder { responses: VecDeque::new() }
}
/// Adds a GetVersion response.
pub(crate) fn add_get_version_response(mut self, version: u8) -> Self {
self.responses.push_back(MockResponse::GetVersion { version });
self
}
/// Adds a ResetTree response.
pub(crate) fn add_reset_tree_response(mut self, root_hash: Hash) -> Self {
self.responses.push_back(MockResponse::ResetTree { root_hash });
self
}
/// Adds an InsertLeaf response.
/// This function does not take an he_secret or reset_secret, see
/// [`MockCr50AgentBuilder`] for more information.
pub(crate) fn add_insert_leaf_response(
mut self,
root_hash: Hash,
mac: Hash,
cred_metadata: Vec<u8>,
) -> Self {
let response = InsertLeafResponse {
root_hash: Some(root_hash),
mac: Some(mac),
cred_metadata: Some(cred_metadata),
..InsertLeafResponse::EMPTY
};
self.responses.push_back(MockResponse::InsertLeaf { response });
self
}
/// Adds a RemoveLeaf response.
pub(crate) fn add_remove_leaf_response(mut self, root_hash: Hash) -> Self |
/// Adds a successful TryAuth response.
pub(crate) fn add_try_auth_success_response(
mut self,
root_hash: Hash,
cred_metadata: Vec<u8>,
mac: Hash,
) -> Self {
let success = TryAuthSuccess {
root_hash: Some(root_hash),
cred_metadata: Some(cred_metadata),
mac: Some(mac),
..TryAuthSuccess::EMPTY
};
self.responses
.push_back(MockResponse::TryAuth { response: TryAuthResponse::Success(success) });
self
}
/// Adds a failed TryAuth response.
pub(crate) fn add_try_auth_failed_response(
mut self,
root_hash: Hash,
cred_metadata: Vec<u8>,
mac: Hash,
) -> Self {
let failed = TryAuthFailed {
root_hash: Some(root_hash),
cred_metadata: Some(cred_metadata),
mac: Some(mac),
..TryAuthFailed::EMPTY
};
self.responses
.push_back(MockResponse::TryAuth { response: TryAuthResponse::Failed(failed) });
self
}
/// Adds a rate limited TryAuth response.
pub(crate) fn add_try_auth_rate_limited_response(mut self, time_to_wait: i64) -> Self {
let ratelimited =
TryAuthRateLimited { time_to_wait: Some(time_to_wait), ..TryAuthRateLimited::EMPTY };
self.responses.push_back(MockResponse::TryAuth {
response: TryAuthResponse::RateLimited(ratelimited),
});
self
}
/// Consumes the builder and returns the VecDeque of responses for use with `mock()`.
pub(crate) fn build(self) -> VecDeque<MockResponse> {
self.responses
}
}
async fn handle_request(
request: PinWeaverRequest,
next_response: MockResponse,
he_secret: &Arc<Mutex<Vec<u8>>>,
) {
// Match the next response with the request, panicking if requests are out
// of the expected order.
match request {
PinWeaverRequest::GetVersion { responder: resp } => {
match next_response {
MockResponse::GetVersion { version } => {
resp.send(version).expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected GetVersion.",
next_response
),
};
}
PinWeaverRequest::ResetTree { bits_per_level: _, height: _, responder: resp } => {
match next_response {
MockResponse::ResetTree { root_hash } => {
resp.send(&mut std::result::Result::Ok(root_hash))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected ResetTree.",
next_response
),
};
}
PinWeaverRequest::InsertLeaf { params, responder: resp } => {
match next_response {
MockResponse::InsertLeaf { response } => {
// Store the he_secret received in the most recent
// InsertLeaf response to return in subsequent successful
// TryAuth responses.
let mut secret = he_secret.lock();
*secret = params.he_secret.expect("expected he_secret provided in params");
resp.send(&mut std::result::Result::Ok(response))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected InsertLeaf.",
next_response
),
};
}
PinWeaverRequest::RemoveLeaf { params: _, responder: resp } => {
match next_response {
MockResponse::RemoveLeaf { root_hash } => {
resp.send(&mut std::result::Result::Ok(root_hash))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected RemoveLeaf.",
next_response
),
};
}
PinWeaverRequest::TryAuth { params: _, responder: resp } => {
match next_response {
MockResponse::TryAuth { response } => {
if let TryAuthResponse::Success(success) = response {
// If it's a success, grab the last he_secret provided via InsertLeaf.
let secret = he_secret.lock();
resp.send(&mut std::result::Result::Ok(TryAuthResponse::Success(
TryAuthSuccess { he_secret: Some((*secret).clone()), ..success },
)))
.expect("failed to send response");
} else {
resp.send(&mut std::result::Result::Ok(response))
.expect("failed to send response");
}
}
_ => {
panic!("Next mock response type was {:?} but expected TryAuth.", next_response)
}
};
}
// GetLog and LogReplay are unimplemented as testing log replay is out
// of scope for pwauth-credmgr integration tests.
PinWeaverRequest::GetLog { root_hash: _, responder: _ } => {
unimplemented!();
}
PinWeaverRequest::LogReplay { params: _, responder: _ } => {
unimplemented!();
}
}
}
pub(crate) async fn mock(
mock_responses: VecDeque<MockResponse>,
handles: LocalComponentHandles,
) -> Result<(), Error> {
// Create a new ServiceFs to host FIDL protocols from
let mut fs = fserver::ServiceFs::new();
let mut tasks = vec![];
let last_he_secret: Arc<Mutex<Vec<u8>>> = Arc::new(Mutex::new(vec![0; 32]));
// Add the echo protocol to the ServiceFs
fs.dir("svc").add_fidl_service(move |mut stream: PinWeaverRequestStream| {
// Need to clone the mock responses again because this is a FnMut not a FnOnce
let mut task_mock_responses = mock_responses.clone();
let he_secret = Arc::clone(&last_he_secret);
tasks.push(fasync::Task::local(async move {
while let Some(request) =
stream.try_next().await.expect("failed to serve pinweaver service")
{
// Look | {
self.responses.push_back(MockResponse::RemoveLeaf { root_hash });
self
} | identifier_body |
mock_cr50_agent.rs | Error,
fidl_fuchsia_tpm_cr50::{
InsertLeafResponse, PinWeaverRequest, PinWeaverRequestStream, TryAuthFailed,
TryAuthRateLimited, TryAuthResponse, TryAuthSuccess,
},
fuchsia_async as fasync,
fuchsia_component::server as fserver,
fuchsia_component_test::LocalComponentHandles,
futures::stream::{StreamExt, TryStreamExt},
parking_lot::Mutex,
std::collections::VecDeque,
std::sync::Arc,
};
/// Struct that builds a set of mock responses for CR50 agent requests.
/// Mock responses should be created (via add_* functions) in FIFO order and
/// will panic at test time if the order of requests does not match the order
/// of responses added to the builder.
/// Successful TryAuth responses take neither a `he_secret` nor a
/// `reset_secret` as those are generated via cprng by password_authenticator.
/// Instead, a successful TryAuth response will always return the `he_secret`
/// provided to the most recent InsertLeaf call, and will always return an
/// empty `reset_secret`.
/// TODO(fxb/89060, arkay): This logic could be improved upon to match the
/// `he_secret` to the credential label if necessary.
pub(crate) struct MockCr50AgentBuilder {
responses: VecDeque<MockResponse>,
}
/// Defines the type of a Hash as CR50 expects it.
pub(crate) type Hash = [u8; 32];
/// Defines an enum of known MockResponse types.
#[derive(Clone, Debug)]
pub(crate) enum MockResponse {
GetVersion { version: u8 },
ResetTree { root_hash: Hash },
InsertLeaf { response: InsertLeafResponse },
RemoveLeaf { root_hash: Hash },
TryAuth { response: TryAuthResponse },
}
#[allow(dead_code)]
impl MockCr50AgentBuilder {
/// Initializes a new MockCr50AgentBuilder.
pub(crate) fn new() -> Self {
MockCr50AgentBuilder { responses: VecDeque::new() }
}
/// Adds a GetVersion response.
pub(crate) fn add_get_version_response(mut self, version: u8) -> Self {
self.responses.push_back(MockResponse::GetVersion { version });
self
}
/// Adds a ResetTree response.
pub(crate) fn add_reset_tree_response(mut self, root_hash: Hash) -> Self {
self.responses.push_back(MockResponse::ResetTree { root_hash });
self
}
/// Adds an InsertLeaf response.
/// This function does not take an he_secret or reset_secret, see
/// [`MockCr50AgentBuilder`] for more information.
pub(crate) fn add_insert_leaf_response(
mut self,
root_hash: Hash,
mac: Hash,
cred_metadata: Vec<u8>,
) -> Self {
let response = InsertLeafResponse {
root_hash: Some(root_hash),
mac: Some(mac),
cred_metadata: Some(cred_metadata),
..InsertLeafResponse::EMPTY
};
self.responses.push_back(MockResponse::InsertLeaf { response });
self
}
/// Adds a RemoveLeaf response.
pub(crate) fn add_remove_leaf_response(mut self, root_hash: Hash) -> Self {
self.responses.push_back(MockResponse::RemoveLeaf { root_hash });
self
}
/// Adds a successful TryAuth response.
pub(crate) fn add_try_auth_success_response(
mut self,
root_hash: Hash,
cred_metadata: Vec<u8>,
mac: Hash,
) -> Self {
let success = TryAuthSuccess {
root_hash: Some(root_hash),
cred_metadata: Some(cred_metadata),
mac: Some(mac),
..TryAuthSuccess::EMPTY
};
self.responses
.push_back(MockResponse::TryAuth { response: TryAuthResponse::Success(success) });
self
}
/// Adds a failed TryAuth response.
pub(crate) fn add_try_auth_failed_response(
mut self,
root_hash: Hash,
cred_metadata: Vec<u8>,
mac: Hash,
) -> Self {
let failed = TryAuthFailed {
root_hash: Some(root_hash),
cred_metadata: Some(cred_metadata),
mac: Some(mac),
..TryAuthFailed::EMPTY
};
self.responses
.push_back(MockResponse::TryAuth { response: TryAuthResponse::Failed(failed) });
self
}
/// Adds a rate limited TryAuth response.
pub(crate) fn add_try_auth_rate_limited_response(mut self, time_to_wait: i64) -> Self {
let ratelimited =
TryAuthRateLimited { time_to_wait: Some(time_to_wait), ..TryAuthRateLimited::EMPTY };
self.responses.push_back(MockResponse::TryAuth {
response: TryAuthResponse::RateLimited(ratelimited),
});
self
}
/// Consumes the builder and returns the VecDeque of responses for use with `mock()`.
pub(crate) fn build(self) -> VecDeque<MockResponse> {
self.responses
}
}
async fn handle_request(
request: PinWeaverRequest,
next_response: MockResponse,
he_secret: &Arc<Mutex<Vec<u8>>>,
) {
// Match the next response with the request, panicking if requests are out
// of the expected order.
match request {
PinWeaverRequest::GetVersion { responder: resp } => {
match next_response {
MockResponse::GetVersion { version } => {
resp.send(version).expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected GetVersion.",
next_response
),
};
}
PinWeaverRequest::ResetTree { bits_per_level: _, height: _, responder: resp } => {
match next_response {
MockResponse::ResetTree { root_hash } => {
resp.send(&mut std::result::Result::Ok(root_hash))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected ResetTree.",
next_response
),
};
}
PinWeaverRequest::InsertLeaf { params, responder: resp } => {
match next_response {
MockResponse::InsertLeaf { response } => {
// Store the he_secret received in the most recent
// InsertLeaf response to return in subsequent successful
// TryAuth responses.
let mut secret = he_secret.lock();
*secret = params.he_secret.expect("expected he_secret provided in params");
resp.send(&mut std::result::Result::Ok(response))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected InsertLeaf.",
next_response
),
};
}
PinWeaverRequest::RemoveLeaf { params: _, responder: resp } => {
match next_response {
MockResponse::RemoveLeaf { root_hash } => {
resp.send(&mut std::result::Result::Ok(root_hash))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected RemoveLeaf.",
next_response
),
};
}
PinWeaverRequest::TryAuth { params: _, responder: resp } => {
match next_response {
MockResponse::TryAuth { response } => {
if let TryAuthResponse::Success(success) = response {
// If it's a success, grab the last he_secret provided via InsertLeaf.
let secret = he_secret.lock();
resp.send(&mut std::result::Result::Ok(TryAuthResponse::Success(
TryAuthSuccess { he_secret: Some((*secret).clone()), ..success },
)))
.expect("failed to send response");
} else |
}
_ => {
panic!("Next mock response type was {:?} but expected TryAuth.", next_response)
}
};
}
// GetLog and LogReplay are unimplemented as testing log replay is out
// of scope for pwauth-credmgr integration tests.
PinWeaverRequest::GetLog { root_hash: _, responder: _ } => {
unimplemented!();
}
PinWeaverRequest::LogReplay { params: _, responder: _ } => {
unimplemented!();
}
}
}
pub(crate) async fn mock(
mock_responses: VecDeque<MockResponse>,
handles: LocalComponentHandles,
) -> Result<(), Error> {
// Create a new ServiceFs to host FIDL protocols from
let mut fs = fserver::ServiceFs::new();
let mut tasks = vec![];
let last_he_secret: Arc<Mutex<Vec<u8>>> = Arc::new(Mutex::new(vec![0; 32]));
// Add the echo protocol to the ServiceFs
fs.dir("svc").add_fidl_service(move |mut stream: PinWeaverRequestStream| {
// Need to clone the mock responses again because this is a FnMut not a FnOnce
let mut task_mock_responses = mock_responses.clone();
let he_secret = Arc::clone(&last_he_secret);
tasks.push(fasync::Task::local(async move {
while let Some(request) =
stream.try_next().await.expect("failed to serve pinweaver service")
{
// Look | {
resp.send(&mut std::result::Result::Ok(response))
.expect("failed to send response");
} | conditional_block |
mock_cr50_agent.rs | Error,
fidl_fuchsia_tpm_cr50::{
InsertLeafResponse, PinWeaverRequest, PinWeaverRequestStream, TryAuthFailed,
TryAuthRateLimited, TryAuthResponse, TryAuthSuccess,
},
fuchsia_async as fasync,
fuchsia_component::server as fserver,
fuchsia_component_test::LocalComponentHandles,
futures::stream::{StreamExt, TryStreamExt},
parking_lot::Mutex,
std::collections::VecDeque,
std::sync::Arc,
};
/// Struct that builds a set of mock responses for CR50 agent requests.
/// Mock responses should be created (via add_* functions) in FIFO order and
/// will panic at test time if the order of requests does not match the order
/// of responses added to the builder.
/// Successful TryAuth responses take neither a `he_secret` nor a
/// `reset_secret` as those are generated via cprng by password_authenticator.
/// Instead, a successful TryAuth response will always return the `he_secret`
/// provided to the most recent InsertLeaf call, and will always return an
/// empty `reset_secret`.
/// TODO(fxb/89060, arkay): This logic could be improved upon to match the
/// `he_secret` to the credential label if necessary.
pub(crate) struct MockCr50AgentBuilder {
responses: VecDeque<MockResponse>,
}
/// Defines the type of a Hash as CR50 expects it.
pub(crate) type Hash = [u8; 32];
/// Defines an enum of known MockResponse types.
#[derive(Clone, Debug)]
pub(crate) enum MockResponse {
GetVersion { version: u8 },
ResetTree { root_hash: Hash },
InsertLeaf { response: InsertLeafResponse },
RemoveLeaf { root_hash: Hash },
TryAuth { response: TryAuthResponse },
}
#[allow(dead_code)]
impl MockCr50AgentBuilder {
/// Initializes a new MockCr50AgentBuilder.
pub(crate) fn new() -> Self {
MockCr50AgentBuilder { responses: VecDeque::new() }
}
/// Adds a GetVersion response.
pub(crate) fn add_get_version_response(mut self, version: u8) -> Self {
self.responses.push_back(MockResponse::GetVersion { version });
self
}
/// Adds a ResetTree response.
pub(crate) fn add_reset_tree_response(mut self, root_hash: Hash) -> Self {
self.responses.push_back(MockResponse::ResetTree { root_hash });
self
}
/// Adds an InsertLeaf response.
/// This function does not take an he_secret or reset_secret, see
/// [`MockCr50AgentBuilder`] for more information.
pub(crate) fn add_insert_leaf_response(
mut self,
root_hash: Hash,
mac: Hash,
cred_metadata: Vec<u8>,
) -> Self {
let response = InsertLeafResponse {
root_hash: Some(root_hash),
mac: Some(mac),
cred_metadata: Some(cred_metadata),
..InsertLeafResponse::EMPTY
};
self.responses.push_back(MockResponse::InsertLeaf { response });
self
}
/// Adds a RemoveLeaf response.
pub(crate) fn add_remove_leaf_response(mut self, root_hash: Hash) -> Self {
self.responses.push_back(MockResponse::RemoveLeaf { root_hash });
self
}
/// Adds a successful TryAuth response.
pub(crate) fn add_try_auth_success_response(
mut self,
root_hash: Hash,
cred_metadata: Vec<u8>,
mac: Hash,
) -> Self {
let success = TryAuthSuccess {
root_hash: Some(root_hash),
cred_metadata: Some(cred_metadata),
mac: Some(mac),
..TryAuthSuccess::EMPTY
};
self.responses
.push_back(MockResponse::TryAuth { response: TryAuthResponse::Success(success) });
self
}
/// Adds a failed TryAuth response.
pub(crate) fn add_try_auth_failed_response(
mut self,
root_hash: Hash,
cred_metadata: Vec<u8>,
mac: Hash,
) -> Self {
let failed = TryAuthFailed {
root_hash: Some(root_hash),
cred_metadata: Some(cred_metadata),
mac: Some(mac),
..TryAuthFailed::EMPTY
};
self.responses
.push_back(MockResponse::TryAuth { response: TryAuthResponse::Failed(failed) });
self
}
/// Adds a rate limited TryAuth response.
pub(crate) fn add_try_auth_rate_limited_response(mut self, time_to_wait: i64) -> Self {
let ratelimited =
TryAuthRateLimited { time_to_wait: Some(time_to_wait), ..TryAuthRateLimited::EMPTY };
self.responses.push_back(MockResponse::TryAuth {
response: TryAuthResponse::RateLimited(ratelimited),
});
self
}
/// Consumes the builder and returns the VecDeque of responses for use with `mock()`.
pub(crate) fn build(self) -> VecDeque<MockResponse> {
self.responses
}
}
async fn | (
request: PinWeaverRequest,
next_response: MockResponse,
he_secret: &Arc<Mutex<Vec<u8>>>,
) {
// Match the next response with the request, panicking if requests are out
// of the expected order.
match request {
PinWeaverRequest::GetVersion { responder: resp } => {
match next_response {
MockResponse::GetVersion { version } => {
resp.send(version).expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected GetVersion.",
next_response
),
};
}
PinWeaverRequest::ResetTree { bits_per_level: _, height: _, responder: resp } => {
match next_response {
MockResponse::ResetTree { root_hash } => {
resp.send(&mut std::result::Result::Ok(root_hash))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected ResetTree.",
next_response
),
};
}
PinWeaverRequest::InsertLeaf { params, responder: resp } => {
match next_response {
MockResponse::InsertLeaf { response } => {
// Store the he_secret received in the most recent
// InsertLeaf response to return in subsequent successful
// TryAuth responses.
let mut secret = he_secret.lock();
*secret = params.he_secret.expect("expected he_secret provided in params");
resp.send(&mut std::result::Result::Ok(response))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected InsertLeaf.",
next_response
),
};
}
PinWeaverRequest::RemoveLeaf { params: _, responder: resp } => {
match next_response {
MockResponse::RemoveLeaf { root_hash } => {
resp.send(&mut std::result::Result::Ok(root_hash))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected RemoveLeaf.",
next_response
),
};
}
PinWeaverRequest::TryAuth { params: _, responder: resp } => {
match next_response {
MockResponse::TryAuth { response } => {
if let TryAuthResponse::Success(success) = response {
// If it's a success, grab the last he_secret provided via InsertLeaf.
let secret = he_secret.lock();
resp.send(&mut std::result::Result::Ok(TryAuthResponse::Success(
TryAuthSuccess { he_secret: Some((*secret).clone()), ..success },
)))
.expect("failed to send response");
} else {
resp.send(&mut std::result::Result::Ok(response))
.expect("failed to send response");
}
}
_ => {
panic!("Next mock response type was {:?} but expected TryAuth.", next_response)
}
};
}
// GetLog and LogReplay are unimplemented as testing log replay is out
// of scope for pwauth-credmgr integration tests.
PinWeaverRequest::GetLog { root_hash: _, responder: _ } => {
unimplemented!();
}
PinWeaverRequest::LogReplay { params: _, responder: _ } => {
unimplemented!();
}
}
}
pub(crate) async fn mock(
mock_responses: VecDeque<MockResponse>,
handles: LocalComponentHandles,
) -> Result<(), Error> {
// Create a new ServiceFs to host FIDL protocols from
let mut fs = fserver::ServiceFs::new();
let mut tasks = vec![];
let last_he_secret: Arc<Mutex<Vec<u8>>> = Arc::new(Mutex::new(vec![0; 32]));
// Add the echo protocol to the ServiceFs
fs.dir("svc").add_fidl_service(move |mut stream: PinWeaverRequestStream| {
// Need to clone the mock responses again because this is a FnMut not a FnOnce
let mut task_mock_responses = mock_responses.clone();
let he_secret = Arc::clone(&last_he_secret);
tasks.push(fasync::Task::local(async move {
while let Some(request) =
stream.try_next().await.expect("failed to serve pinweaver service")
{
// Look at | handle_request | identifier_name |
mock_cr50_agent.rs | ::Error,
fidl_fuchsia_tpm_cr50::{
InsertLeafResponse, PinWeaverRequest, PinWeaverRequestStream, TryAuthFailed,
TryAuthRateLimited, TryAuthResponse, TryAuthSuccess,
},
fuchsia_async as fasync,
fuchsia_component::server as fserver,
fuchsia_component_test::LocalComponentHandles,
futures::stream::{StreamExt, TryStreamExt},
parking_lot::Mutex,
std::collections::VecDeque,
std::sync::Arc,
};
/// Struct that builds a set of mock responses for CR50 agent requests.
/// Mock responses should be created (via add_* functions) in FIFO order and
/// will panic at test time if the order of requests does not match the order
/// of responses added to the builder.
/// Successful TryAuth responses take neither a `he_secret` nor a
/// `reset_secret` as those are generated via cprng by password_authenticator.
/// Instead, a successful TryAuth response will always return the `he_secret`
/// provided to the most recent InsertLeaf call, and will always return an
/// empty `reset_secret`.
/// TODO(fxb/89060, arkay): This logic could be improved upon to match the
/// `he_secret` to the credential label if necessary.
pub(crate) struct MockCr50AgentBuilder {
responses: VecDeque<MockResponse>,
}
/// Defines the type of a Hash as CR50 expects it.
pub(crate) type Hash = [u8; 32];
/// Defines an enum of known MockResponse types.
#[derive(Clone, Debug)]
pub(crate) enum MockResponse {
GetVersion { version: u8 },
ResetTree { root_hash: Hash },
InsertLeaf { response: InsertLeafResponse },
RemoveLeaf { root_hash: Hash },
TryAuth { response: TryAuthResponse },
}
#[allow(dead_code)]
impl MockCr50AgentBuilder {
/// Initializes a new MockCr50AgentBuilder.
pub(crate) fn new() -> Self {
MockCr50AgentBuilder { responses: VecDeque::new() }
}
/// Adds a GetVersion response.
pub(crate) fn add_get_version_response(mut self, version: u8) -> Self {
self.responses.push_back(MockResponse::GetVersion { version });
self
}
/// Adds a ResetTree response.
pub(crate) fn add_reset_tree_response(mut self, root_hash: Hash) -> Self {
self.responses.push_back(MockResponse::ResetTree { root_hash });
self
}
/// Adds an InsertLeaf response.
/// This function does not take an he_secret or reset_secret, see
/// [`MockCr50AgentBuilder`] for more information.
pub(crate) fn add_insert_leaf_response(
mut self,
root_hash: Hash,
mac: Hash,
cred_metadata: Vec<u8>,
) -> Self {
let response = InsertLeafResponse {
root_hash: Some(root_hash),
mac: Some(mac),
cred_metadata: Some(cred_metadata),
..InsertLeafResponse::EMPTY
};
self.responses.push_back(MockResponse::InsertLeaf { response });
self
}
/// Adds a RemoveLeaf response.
pub(crate) fn add_remove_leaf_response(mut self, root_hash: Hash) -> Self {
self.responses.push_back(MockResponse::RemoveLeaf { root_hash });
self
}
/// Adds a successful TryAuth response.
pub(crate) fn add_try_auth_success_response(
mut self,
root_hash: Hash,
cred_metadata: Vec<u8>,
mac: Hash,
) -> Self {
let success = TryAuthSuccess {
root_hash: Some(root_hash),
cred_metadata: Some(cred_metadata),
mac: Some(mac),
..TryAuthSuccess::EMPTY
};
self.responses
.push_back(MockResponse::TryAuth { response: TryAuthResponse::Success(success) });
self
}
/// Adds a failed TryAuth response.
pub(crate) fn add_try_auth_failed_response(
mut self,
root_hash: Hash,
cred_metadata: Vec<u8>,
mac: Hash,
) -> Self {
let failed = TryAuthFailed {
root_hash: Some(root_hash),
cred_metadata: Some(cred_metadata),
mac: Some(mac),
..TryAuthFailed::EMPTY
};
self.responses
.push_back(MockResponse::TryAuth { response: TryAuthResponse::Failed(failed) });
self
}
/// Adds a rate limited TryAuth response.
pub(crate) fn add_try_auth_rate_limited_response(mut self, time_to_wait: i64) -> Self {
let ratelimited =
TryAuthRateLimited { time_to_wait: Some(time_to_wait), ..TryAuthRateLimited::EMPTY };
self.responses.push_back(MockResponse::TryAuth {
response: TryAuthResponse::RateLimited(ratelimited),
});
self
}
/// Consumes the builder and returns the VecDeque of responses for use with `mock()`.
pub(crate) fn build(self) -> VecDeque<MockResponse> {
self.responses
}
}
async fn handle_request(
request: PinWeaverRequest,
next_response: MockResponse,
he_secret: &Arc<Mutex<Vec<u8>>>,
) {
// Match the next response with the request, panicking if requests are out
// of the expected order.
match request {
PinWeaverRequest::GetVersion { responder: resp } => {
match next_response {
MockResponse::GetVersion { version } => {
resp.send(version).expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected GetVersion.",
next_response
),
};
}
PinWeaverRequest::ResetTree { bits_per_level: _, height: _, responder: resp } => {
match next_response {
MockResponse::ResetTree { root_hash } => {
resp.send(&mut std::result::Result::Ok(root_hash))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected ResetTree.",
next_response
),
};
}
PinWeaverRequest::InsertLeaf { params, responder: resp } => {
match next_response {
MockResponse::InsertLeaf { response } => {
// Store the he_secret received in the most recent
// InsertLeaf response to return in subsequent successful
// TryAuth responses.
let mut secret = he_secret.lock();
*secret = params.he_secret.expect("expected he_secret provided in params");
resp.send(&mut std::result::Result::Ok(response))
.expect("failed to send response"); | ),
};
}
PinWeaverRequest::RemoveLeaf { params: _, responder: resp } => {
match next_response {
MockResponse::RemoveLeaf { root_hash } => {
resp.send(&mut std::result::Result::Ok(root_hash))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected RemoveLeaf.",
next_response
),
};
}
PinWeaverRequest::TryAuth { params: _, responder: resp } => {
match next_response {
MockResponse::TryAuth { response } => {
if let TryAuthResponse::Success(success) = response {
// If it's a success, grab the last he_secret provided via InsertLeaf.
let secret = he_secret.lock();
resp.send(&mut std::result::Result::Ok(TryAuthResponse::Success(
TryAuthSuccess { he_secret: Some((*secret).clone()), ..success },
)))
.expect("failed to send response");
} else {
resp.send(&mut std::result::Result::Ok(response))
.expect("failed to send response");
}
}
_ => {
panic!("Next mock response type was {:?} but expected TryAuth.", next_response)
}
};
}
// GetLog and LogReplay are unimplemented as testing log replay is out
// of scope for pwauth-credmgr integration tests.
PinWeaverRequest::GetLog { root_hash: _, responder: _ } => {
unimplemented!();
}
PinWeaverRequest::LogReplay { params: _, responder: _ } => {
unimplemented!();
}
}
}
pub(crate) async fn mock(
mock_responses: VecDeque<MockResponse>,
handles: LocalComponentHandles,
) -> Result<(), Error> {
// Create a new ServiceFs to host FIDL protocols from
let mut fs = fserver::ServiceFs::new();
let mut tasks = vec![];
let last_he_secret: Arc<Mutex<Vec<u8>>> = Arc::new(Mutex::new(vec![0; 32]));
// Add the echo protocol to the ServiceFs
fs.dir("svc").add_fidl_service(move |mut stream: PinWeaverRequestStream| {
// Need to clone the mock responses again because this is a FnMut not a FnOnce
let mut task_mock_responses = mock_responses.clone();
let he_secret = Arc::clone(&last_he_secret);
tasks.push(fasync::Task::local(async move {
while let Some(request) =
stream.try_next().await.expect("failed to serve pinweaver service")
{
// Look at the | }
_ => panic!(
"Next mock response type was {:?} but expected InsertLeaf.",
next_response | random_line_split |
hash_aggregator.go | // aggregation group, so we perform aggregation on each of them in turn.
// After we do so, we will have three buckets and the hash table will contain
// three tuples (with buckets and tuples corresponding to each other):
// buckets = [<bucket for -3>, <bucket for -2>, <bucket for -1>]
// ht.Vals = [-3, -2, -1].
// We have fully processed the first batch.
//
// II. we get a batch [-4, -1, -1, -4].
// 1. a) compute hash buckets: ProbeScratch.next = [reserved, 0, 1, 1, 0]
// b) build 'next' chains between hash buckets:
// ProbeScratch.first = [1, 2]
// ProbeScratch.next = [reserved, 4, 3, 0, 0]
// c) find "equality" buckets:
// ProbeScratch.HeadID = [1, 2, 2, 1]
// 2. divide all tuples into the equality chains based on HeadID:
// eqChains[0] = [0, 3]
// eqChains[1] = [1, 2]
// The special "heads of equality chains" selection vector is [0, 1].
// 3. probe that special "heads" selection vector against the tuples already
// present in the hash table:
// ProbeScratch.HeadID = [0, 3]
// Value 0 indicates that the first equality chain doesn't have an
// existing bucket, but the second chain does and the ID of its bucket is
// HeadID-1 = 2. We aggregate the second equality chain into that bucket.
// 4. the first equality chain contains tuples from a new aggregation group,
// so we create a new bucket for it and perform the aggregation.
// After we do so, we will have four buckets and the hash table will contain
// four tuples:
// buckets = [<bucket for -3>, <bucket for -2>, <bucket for -1>, <bucket for -4>]
// ht.Vals = [-3, -2, -1, -4].
// We have fully processed the second batch.
//
// We have processed the input fully, so we're ready to emit the output.
//
// NOTE: b *must* be non-zero length batch.
func (op *hashAggregator) onlineAgg(ctx context.Context, b coldata.Batch) {
op.setupScratchSlices(b.Length())
inputVecs := b.ColVecs()
// Step 1: find "equality" buckets: we compute the hash buckets for all
// tuples, build 'next' chains between them, and then find equality buckets
// for the tuples.
op.ht.ComputeHashAndBuildChains(ctx, b)
op.ht.FindBuckets(
b, op.ht.Keys, op.ht.ProbeScratch.First, op.ht.ProbeScratch.Next, op.ht.CheckProbeForDistinct,
)
// Step 2: now that we have op.ht.ProbeScratch.HeadID populated we can
// populate the equality chains.
eqChainsCount, eqChainsHeadsSel := op.populateEqChains(b)
b.SetLength(eqChainsCount)
// Make a copy of the selection vector that contains heads of the
// corresponding equality chains because the underlying memory will be
// modified below.
eqChainsHeads := op.scratch.intSlice[:eqChainsCount]
copy(eqChainsHeads, eqChainsHeadsSel)
// Step 3: if we have any existing buckets, we need to probe the heads of
// the equality chains (which the selection vector on b currently contains)
// against the heads of the existing groups.
if len(op.buckets) > 0 {
op.ht.FindBuckets(
b, op.ht.Keys, op.ht.BuildScratch.First, op.ht.BuildScratch.Next, op.ht.CheckBuildForAggregation,
)
for eqChainsSlot, HeadID := range op.ht.ProbeScratch.HeadID[:eqChainsCount] {
if HeadID != 0 {
// Tuples in this equality chain belong to an already existing
// group.
eqChain := op.scratch.eqChains[eqChainsSlot]
bucket := op.buckets[HeadID-1]
op.aggHelper.performAggregation(
ctx, inputVecs, len(eqChain), eqChain, bucket, nil, /* groups */
)
// We have fully processed this equality chain, so we need to
// reset its length.
op.scratch.eqChains[eqChainsSlot] = op.scratch.eqChains[eqChainsSlot][:0]
}
}
}
// Step 4: now we go over all equality chains and check whether there are
// any that haven't been processed yet (they will be of non-zero length).
// If we find any, we'll create a new bucket for each.
newGroupsHeadsSel := op.scratch.anotherIntSlice[:0]
newGroupCount := 0
for eqChainSlot, eqChain := range op.scratch.eqChains[:eqChainsCount] {
if len(eqChain) > 0 {
// Tuples in this equality chain belong to a new aggregation group,
// so we'll use a new bucket and make sure that the head of this
// equality chain is appended to the hash table in the
// corresponding position.
var bucket *aggBucket
if nextBucketIdx := len(op.buckets); op.numPreviouslyCreatedBuckets > nextBucketIdx {
// We still have a bucket created on the previous run of the
// hash aggregator. Increase the length of op.buckets, using
// previously-allocated capacity, and then reset the bucket for
// reuse.
op.buckets = op.buckets[:nextBucketIdx+1]
bucket = op.buckets[nextBucketIdx]
bucket.reset()
} else {
// Need to allocate a new bucket.
bucket = op.hashAlloc.newAggBucket()
op.buckets = append(op.buckets, bucket)
// We know that all selected tuples belong to the same single
// group, so we can pass 'nil' for the 'groups' argument.
bucket.init(
op.aggFnsAlloc.MakeAggregateFuncs(), op.aggHelper.makeSeenMaps(), nil, /* groups */
)
}
op.aggHelper.performAggregation(
ctx, inputVecs, len(eqChain), eqChain, bucket, nil, /* groups */
)
newGroupsHeadsSel = append(newGroupsHeadsSel, eqChainsHeads[eqChainSlot])
// We need to compact the hash buffer according to the new groups
// head tuples selection vector we're building.
op.ht.ProbeScratch.HashBuffer[newGroupCount] = op.ht.ProbeScratch.HashBuffer[eqChainSlot]
newGroupCount++
op.scratch.eqChains[eqChainSlot] = op.scratch.eqChains[eqChainSlot][:0]
}
}
if newGroupCount > 0 {
// We have created new buckets, so we need to append the heads of those
// buckets to the hash table.
copy(b.Selection(), newGroupsHeadsSel)
b.SetLength(newGroupCount)
op.ht.AppendAllDistinct(ctx, b)
}
}
func (op *hashAggregator) ExportBuffered(ctx context.Context, _ colexecop.Operator) coldata.Batch {
if !op.inputTrackingState.zeroBatchEnqueued {
// Per the contract of the spilling queue, we need to append a
// zero-length batch.
op.inputTrackingState.tuples.Enqueue(ctx, coldata.ZeroBatch)
op.inputTrackingState.zeroBatchEnqueued = true
}
batch, err := op.inputTrackingState.tuples.Dequeue(ctx)
if err != nil {
colexecerror.InternalError(err)
}
return batch
}
func (op *hashAggregator) Reset(ctx context.Context) {
if r, ok := op.Input.(colexecop.Resetter); ok {
r.Reset(ctx)
}
op.bufferingState.tuples.ResetInternalBatch()
op.bufferingState.pendingBatch = nil
op.bufferingState.unprocessedIdx = 0
if op.numPreviouslyCreatedBuckets < len(op.buckets) {
op.numPreviouslyCreatedBuckets = len(op.buckets)
}
// Set up buckets for reuse.
op.buckets = op.buckets[:0]
op.ht.Reset(ctx)
if op.inputTrackingState.tuples != nil {
if err := op.inputTrackingState.tuples.Close(ctx); err != nil {
colexecerror.InternalError(err)
}
op.inputTrackingState.zeroBatchEnqueued = false
}
op.curOutputBucketIdx = 0
op.state = hashAggregatorBuffering
}
func (op *hashAggregator) Close(ctx context.Context) error | {
var retErr error
if op.inputTrackingState.tuples != nil {
retErr = op.inputTrackingState.tuples.Close(ctx)
}
if err := op.toClose.Close(ctx); err != nil {
retErr = err
}
return retErr
} | identifier_body |
|
hash_aggregator.go | all tuples from the
// batch will be included into one of these chains). These chains must
// be set to zero length once the batch has been processed so that the
// memory could be reused.
eqChains [][]int
// intSlice and anotherIntSlice are simply scratch int slices that are
// reused for several purposes by the hashAggregator.
intSlice []int
anotherIntSlice []int
}
// inputTrackingState tracks all the input tuples which is needed in order
// to fallback to the external hash aggregator.
inputTrackingState struct {
tuples *colexecutils.SpillingQueue
zeroBatchEnqueued bool
}
// curOutputBucketIdx tracks the index in buckets to be flushed next when
// populating the output.
curOutputBucketIdx int
output coldata.Batch
aggFnsAlloc *colexecagg.AggregateFuncsAlloc
hashAlloc aggBucketAlloc
datumAlloc rowenc.DatumAlloc
toClose colexecop.Closers
}
var _ colexecop.ResettableOperator = &hashAggregator{}
var _ colexecop.BufferingInMemoryOperator = &hashAggregator{}
var _ colexecop.ClosableOperator = &hashAggregator{}
// hashAggregatorAllocSize determines the allocation size used by the hash
// aggregator's allocators. This number was chosen after running benchmarks of
// 'sum' aggregation on ints and decimals with varying group sizes (powers of 2
// from 1 to 4096).
const hashAggregatorAllocSize = 128
// NewHashAggregator creates a hash aggregator on the given grouping columns.
// The input specifications to this function are the same as that of the
// NewOrderedAggregator function.
// newSpillingQueueArgs - when non-nil - specifies the arguments to
// instantiate a SpillingQueue with which will be used to keep all of the
// input tuples in case the in-memory hash aggregator needs to fallback to
// the disk-backed operator. Pass in nil in order to not track all input
// tuples.
func NewHashAggregator(
args *colexecagg.NewAggregatorArgs, newSpillingQueueArgs *colexecutils.NewSpillingQueueArgs,
) (colexecop.ResettableOperator, error) {
aggFnsAlloc, inputArgsConverter, toClose, err := colexecagg.NewAggregateFuncsAlloc(
args, hashAggregatorAllocSize, true, /* isHashAgg */
)
// We want this number to be coldata.MaxBatchSize, but then we would lose
// some test coverage due to disabling of the randomization of the batch
// size, so we, instead, use 4 x coldata.BatchSize() (which ends up being
// coldata.MaxBatchSize in non-test environment).
maxBuffered := 4 * coldata.BatchSize()
if maxBuffered > coldata.MaxBatchSize {
// When randomizing coldata.BatchSize() in tests we might exceed
// coldata.MaxBatchSize, so we need to shrink it.
maxBuffered = coldata.MaxBatchSize
}
hashAgg := &hashAggregator{
OneInputNode: colexecop.NewOneInputNode(args.Input),
allocator: args.Allocator,
spec: args.Spec,
state: hashAggregatorBuffering,
inputTypes: args.InputTypes,
outputTypes: args.OutputTypes,
inputArgsConverter: inputArgsConverter,
maxBuffered: maxBuffered,
toClose: toClose,
aggFnsAlloc: aggFnsAlloc,
hashAlloc: aggBucketAlloc{allocator: args.Allocator},
}
hashAgg.bufferingState.tuples = colexecutils.NewAppendOnlyBufferedBatch(args.Allocator, args.InputTypes, nil /* colsToStore */)
hashAgg.datumAlloc.AllocSize = hashAggregatorAllocSize
hashAgg.aggHelper = newAggregatorHelper(args, &hashAgg.datumAlloc, true /* isHashAgg */, hashAgg.maxBuffered)
if newSpillingQueueArgs != nil {
hashAgg.inputTrackingState.tuples = colexecutils.NewSpillingQueue(newSpillingQueueArgs)
}
return hashAgg, err
}
func (op *hashAggregator) Init() {
op.Input.Init()
// These numbers were chosen after running the micro-benchmarks and relevant
// TPCH queries using tpchvec/bench.
const hashTableLoadFactor = 0.1
const hashTableNumBuckets = 256
op.ht = colexechash.NewHashTable(
op.allocator,
hashTableLoadFactor,
hashTableNumBuckets,
op.inputTypes,
op.spec.GroupCols,
true, /* allowNullEquality */
colexechash.HashTableDistinctBuildMode,
colexechash.HashTableDefaultProbeMode,
)
}
func (op *hashAggregator) Next(ctx context.Context) coldata.Batch {
for {
switch op.state {
case hashAggregatorBuffering:
if op.bufferingState.pendingBatch != nil && op.bufferingState.unprocessedIdx < op.bufferingState.pendingBatch.Length() {
op.allocator.PerformOperation(op.bufferingState.tuples.ColVecs(), func() {
op.bufferingState.tuples.AppendTuples(
op.bufferingState.pendingBatch, op.bufferingState.unprocessedIdx, op.bufferingState.pendingBatch.Length(),
)
})
}
op.bufferingState.pendingBatch, op.bufferingState.unprocessedIdx = op.Input.Next(ctx), 0
n := op.bufferingState.pendingBatch.Length()
if op.inputTrackingState.tuples != nil {
op.inputTrackingState.tuples.Enqueue(ctx, op.bufferingState.pendingBatch)
op.inputTrackingState.zeroBatchEnqueued = n == 0
}
if n == 0 {
// This is the last input batch.
if op.bufferingState.tuples.Length() == 0 {
// There are currently no buffered tuples to perform the
// aggregation on.
if len(op.buckets) == 0 {
// We don't have any buckets which means that there were
// no input tuples whatsoever, so we can transition to
// finished state right away.
op.state = hashAggregatorDone
} else {
// There are some buckets, so we proceed to the
// outputting state.
op.state = hashAggregatorOutputting
}
} else {
// There are some buffered tuples on which we need to run
// the aggregation.
op.state = hashAggregatorAggregating
}
continue
}
toBuffer := n
if op.bufferingState.tuples.Length()+toBuffer > op.maxBuffered {
toBuffer = op.maxBuffered - op.bufferingState.tuples.Length()
}
if toBuffer > 0 {
op.allocator.PerformOperation(op.bufferingState.tuples.ColVecs(), func() {
op.bufferingState.tuples.AppendTuples(op.bufferingState.pendingBatch, 0 /* startIdx */, toBuffer)
})
op.bufferingState.unprocessedIdx = toBuffer
}
if op.bufferingState.tuples.Length() == op.maxBuffered {
op.state = hashAggregatorAggregating
continue
}
case hashAggregatorAggregating:
op.inputArgsConverter.ConvertBatch(op.bufferingState.tuples)
op.onlineAgg(ctx, op.bufferingState.tuples)
if op.bufferingState.pendingBatch.Length() == 0 {
if len(op.buckets) == 0 {
op.state = hashAggregatorDone
} else {
op.state = hashAggregatorOutputting
}
continue
}
op.bufferingState.tuples.ResetInternalBatch()
op.state = hashAggregatorBuffering
case hashAggregatorOutputting:
// Note that ResetMaybeReallocate truncates the requested capacity
// at coldata.BatchSize(), so we can just try asking for
// len(op.buckets) capacity. Note that in hashAggregatorOutputting
// state we always have at least 1 bucket.
//
// For now, we don't enforce any footprint-based memory limit.
// TODO(yuzefovich): refactor this.
const maxBatchMemSize = math.MaxInt64
op.output, _ = op.allocator.ResetMaybeReallocate(
op.outputTypes, op.output, len(op.buckets), maxBatchMemSize,
)
curOutputIdx := 0
op.allocator.PerformOperation(op.output.ColVecs(), func() {
for curOutputIdx < op.output.Capacity() && op.curOutputBucketIdx < len(op.buckets) {
bucket := op.buckets[op.curOutputBucketIdx]
for fnIdx, fn := range bucket.fns {
fn.SetOutput(op.output.ColVec(fnIdx))
fn.Flush(curOutputIdx)
}
curOutputIdx++
op.curOutputBucketIdx++
}
})
if op.curOutputBucketIdx >= len(op.buckets) |
op.output.SetLength(curOutputIdx)
| {
op.state = hashAggregatorDone
} | conditional_block |
hash_aggregator.go | ([]int, numBuffered)
}
}
// onlineAgg groups all tuples in b into equality chains, then probes the
// heads of those chains against already existing groups, aggregates matched
// chains into the corresponding buckets and creates new buckets for new
// aggregation groups.
//
// Let's go through an example of how this function works: our input stream
// contains the following tuples:
// {-3}, {-3}, {-2}, {-1}, {-4}, {-1}, {-1}, {-4}.
// (Note that negative values are chosen in order to visually distinguish them
// from the IDs that we'll be working with below.)
// We will use coldata.BatchSize() == 4 and let's assume that we will use a
// simple hash function h(i) = i % 2 with two buckets in the hash table.
//
// I. we get a batch [-3, -3, -2, -1].
// 1. a) compute hash buckets: ProbeScratch.next = [reserved, 1, 1, 0, 1]
// b) build 'next' chains between hash buckets:
// ProbeScratch.first = [3, 1] (length of first == # of hash buckets)
// ProbeScratch.next = [reserved, 2, 4, 0, 0]
// (Note that we have a hash collision in the bucket with hash 1.)
// c) find "equality" buckets (populate HeadID):
// ProbeScratch.HeadID = [1, 1, 3, 4]
// (This means that tuples at position 0 and 1 are the same, and the
// tuple at position HeadID-1 is the head of the equality chain.)
// 2. divide all tuples into the equality chains based on HeadID:
// eqChains[0] = [0, 1]
// eqChains[1] = [2]
// eqChains[2] = [3]
// The special "heads of equality chains" selection vector is [0, 2, 3].
// 3. we don't have any existing buckets yet, so this step is a noop.
// 4. each of the three equality chains contains tuples from a separate
// aggregation group, so we perform aggregation on each of them in turn.
// After we do so, we will have three buckets and the hash table will contain
// three tuples (with buckets and tuples corresponding to each other):
// buckets = [<bucket for -3>, <bucket for -2>, <bucket for -1>]
// ht.Vals = [-3, -2, -1].
// We have fully processed the first batch.
//
// II. we get a batch [-4, -1, -1, -4].
// 1. a) compute hash buckets: ProbeScratch.next = [reserved, 0, 1, 1, 0]
// b) build 'next' chains between hash buckets:
// ProbeScratch.first = [1, 2]
// ProbeScratch.next = [reserved, 4, 3, 0, 0]
// c) find "equality" buckets:
// ProbeScratch.HeadID = [1, 2, 2, 1]
// 2. divide all tuples into the equality chains based on HeadID:
// eqChains[0] = [0, 3]
// eqChains[1] = [1, 2]
// The special "heads of equality chains" selection vector is [0, 1].
// 3. probe that special "heads" selection vector against the tuples already
// present in the hash table:
// ProbeScratch.HeadID = [0, 3]
// Value 0 indicates that the first equality chain doesn't have an
// existing bucket, but the second chain does and the ID of its bucket is
// HeadID-1 = 2. We aggregate the second equality chain into that bucket.
// 4. the first equality chain contains tuples from a new aggregation group,
// so we create a new bucket for it and perform the aggregation.
// After we do so, we will have four buckets and the hash table will contain
// four tuples:
// buckets = [<bucket for -3>, <bucket for -2>, <bucket for -1>, <bucket for -4>]
// ht.Vals = [-3, -2, -1, -4].
// We have fully processed the second batch.
//
// We have processed the input fully, so we're ready to emit the output.
//
// NOTE: b *must* be non-zero length batch.
func (op *hashAggregator) onlineAgg(ctx context.Context, b coldata.Batch) {
op.setupScratchSlices(b.Length())
inputVecs := b.ColVecs()
// Step 1: find "equality" buckets: we compute the hash buckets for all
// tuples, build 'next' chains between them, and then find equality buckets
// for the tuples.
op.ht.ComputeHashAndBuildChains(ctx, b)
op.ht.FindBuckets(
b, op.ht.Keys, op.ht.ProbeScratch.First, op.ht.ProbeScratch.Next, op.ht.CheckProbeForDistinct,
)
// Step 2: now that we have op.ht.ProbeScratch.HeadID populated we can
// populate the equality chains.
eqChainsCount, eqChainsHeadsSel := op.populateEqChains(b)
b.SetLength(eqChainsCount)
// Make a copy of the selection vector that contains heads of the
// corresponding equality chains because the underlying memory will be
// modified below.
eqChainsHeads := op.scratch.intSlice[:eqChainsCount]
copy(eqChainsHeads, eqChainsHeadsSel)
// Step 3: if we have any existing buckets, we need to probe the heads of
// the equality chains (which the selection vector on b currently contains)
// against the heads of the existing groups.
if len(op.buckets) > 0 {
op.ht.FindBuckets(
b, op.ht.Keys, op.ht.BuildScratch.First, op.ht.BuildScratch.Next, op.ht.CheckBuildForAggregation,
)
for eqChainsSlot, HeadID := range op.ht.ProbeScratch.HeadID[:eqChainsCount] {
if HeadID != 0 {
// Tuples in this equality chain belong to an already existing
// group.
eqChain := op.scratch.eqChains[eqChainsSlot]
bucket := op.buckets[HeadID-1]
op.aggHelper.performAggregation(
ctx, inputVecs, len(eqChain), eqChain, bucket, nil, /* groups */
)
// We have fully processed this equality chain, so we need to
// reset its length.
op.scratch.eqChains[eqChainsSlot] = op.scratch.eqChains[eqChainsSlot][:0]
}
}
}
// Step 4: now we go over all equality chains and check whether there are
// any that haven't been processed yet (they will be of non-zero length).
// If we find any, we'll create a new bucket for each.
newGroupsHeadsSel := op.scratch.anotherIntSlice[:0]
newGroupCount := 0
for eqChainSlot, eqChain := range op.scratch.eqChains[:eqChainsCount] {
if len(eqChain) > 0 {
// Tuples in this equality chain belong to a new aggregation group,
// so we'll use a new bucket and make sure that the head of this
// equality chain is appended to the hash table in the
// corresponding position.
var bucket *aggBucket
if nextBucketIdx := len(op.buckets); op.numPreviouslyCreatedBuckets > nextBucketIdx {
// We still have a bucket created on the previous run of the
// hash aggregator. Increase the length of op.buckets, using
// previously-allocated capacity, and then reset the bucket for
// reuse.
op.buckets = op.buckets[:nextBucketIdx+1]
bucket = op.buckets[nextBucketIdx]
bucket.reset()
} else {
// Need to allocate a new bucket.
bucket = op.hashAlloc.newAggBucket()
op.buckets = append(op.buckets, bucket)
// We know that all selected tuples belong to the same single
// group, so we can pass 'nil' for the 'groups' argument.
bucket.init(
op.aggFnsAlloc.MakeAggregateFuncs(), op.aggHelper.makeSeenMaps(), nil, /* groups */
)
}
op.aggHelper.performAggregation(
ctx, inputVecs, len(eqChain), eqChain, bucket, nil, /* groups */
)
newGroupsHeadsSel = append(newGroupsHeadsSel, eqChainsHeads[eqChainSlot])
// We need to compact the hash buffer according to the new groups
// head tuples selection vector we're building. | op.ht.ProbeScratch.HashBuffer[newGroupCount] = op.ht.ProbeScratch.HashBuffer[eqChainSlot]
newGroupCount++ | random_line_split |
|
hash_aggregator.go | 3]
// The special "heads of equality chains" selection vector is [0, 2, 3].
// 3. we don't have any existing buckets yet, so this step is a noop.
// 4. each of the three equality chains contains tuples from a separate
// aggregation group, so we perform aggregation on each of them in turn.
// After we do so, we will have three buckets and the hash table will contain
// three tuples (with buckets and tuples corresponding to each other):
// buckets = [<bucket for -3>, <bucket for -2>, <bucket for -1>]
// ht.Vals = [-3, -2, -1].
// We have fully processed the first batch.
//
// II. we get a batch [-4, -1, -1, -4].
// 1. a) compute hash buckets: ProbeScratch.next = [reserved, 0, 1, 1, 0]
// b) build 'next' chains between hash buckets:
// ProbeScratch.first = [1, 2]
// ProbeScratch.next = [reserved, 4, 3, 0, 0]
// c) find "equality" buckets:
// ProbeScratch.HeadID = [1, 2, 2, 1]
// 2. divide all tuples into the equality chains based on HeadID:
// eqChains[0] = [0, 3]
// eqChains[1] = [1, 2]
// The special "heads of equality chains" selection vector is [0, 1].
// 3. probe that special "heads" selection vector against the tuples already
// present in the hash table:
// ProbeScratch.HeadID = [0, 3]
// Value 0 indicates that the first equality chain doesn't have an
// existing bucket, but the second chain does and the ID of its bucket is
// HeadID-1 = 2. We aggregate the second equality chain into that bucket.
// 4. the first equality chain contains tuples from a new aggregation group,
// so we create a new bucket for it and perform the aggregation.
// After we do so, we will have four buckets and the hash table will contain
// four tuples:
// buckets = [<bucket for -3>, <bucket for -2>, <bucket for -1>, <bucket for -4>]
// ht.Vals = [-3, -2, -1, -4].
// We have fully processed the second batch.
//
// We have processed the input fully, so we're ready to emit the output.
//
// NOTE: b *must* be non-zero length batch.
func (op *hashAggregator) onlineAgg(ctx context.Context, b coldata.Batch) {
op.setupScratchSlices(b.Length())
inputVecs := b.ColVecs()
// Step 1: find "equality" buckets: we compute the hash buckets for all
// tuples, build 'next' chains between them, and then find equality buckets
// for the tuples.
op.ht.ComputeHashAndBuildChains(ctx, b)
op.ht.FindBuckets(
b, op.ht.Keys, op.ht.ProbeScratch.First, op.ht.ProbeScratch.Next, op.ht.CheckProbeForDistinct,
)
// Step 2: now that we have op.ht.ProbeScratch.HeadID populated we can
// populate the equality chains.
eqChainsCount, eqChainsHeadsSel := op.populateEqChains(b)
b.SetLength(eqChainsCount)
// Make a copy of the selection vector that contains heads of the
// corresponding equality chains because the underlying memory will be
// modified below.
eqChainsHeads := op.scratch.intSlice[:eqChainsCount]
copy(eqChainsHeads, eqChainsHeadsSel)
// Step 3: if we have any existing buckets, we need to probe the heads of
// the equality chains (which the selection vector on b currently contains)
// against the heads of the existing groups.
if len(op.buckets) > 0 {
op.ht.FindBuckets(
b, op.ht.Keys, op.ht.BuildScratch.First, op.ht.BuildScratch.Next, op.ht.CheckBuildForAggregation,
)
for eqChainsSlot, HeadID := range op.ht.ProbeScratch.HeadID[:eqChainsCount] {
if HeadID != 0 {
// Tuples in this equality chain belong to an already existing
// group.
eqChain := op.scratch.eqChains[eqChainsSlot]
bucket := op.buckets[HeadID-1]
op.aggHelper.performAggregation(
ctx, inputVecs, len(eqChain), eqChain, bucket, nil, /* groups */
)
// We have fully processed this equality chain, so we need to
// reset its length.
op.scratch.eqChains[eqChainsSlot] = op.scratch.eqChains[eqChainsSlot][:0]
}
}
}
// Step 4: now we go over all equality chains and check whether there are
// any that haven't been processed yet (they will be of non-zero length).
// If we find any, we'll create a new bucket for each.
newGroupsHeadsSel := op.scratch.anotherIntSlice[:0]
newGroupCount := 0
for eqChainSlot, eqChain := range op.scratch.eqChains[:eqChainsCount] {
if len(eqChain) > 0 {
// Tuples in this equality chain belong to a new aggregation group,
// so we'll use a new bucket and make sure that the head of this
// equality chain is appended to the hash table in the
// corresponding position.
var bucket *aggBucket
if nextBucketIdx := len(op.buckets); op.numPreviouslyCreatedBuckets > nextBucketIdx {
// We still have a bucket created on the previous run of the
// hash aggregator. Increase the length of op.buckets, using
// previously-allocated capacity, and then reset the bucket for
// reuse.
op.buckets = op.buckets[:nextBucketIdx+1]
bucket = op.buckets[nextBucketIdx]
bucket.reset()
} else {
// Need to allocate a new bucket.
bucket = op.hashAlloc.newAggBucket()
op.buckets = append(op.buckets, bucket)
// We know that all selected tuples belong to the same single
// group, so we can pass 'nil' for the 'groups' argument.
bucket.init(
op.aggFnsAlloc.MakeAggregateFuncs(), op.aggHelper.makeSeenMaps(), nil, /* groups */
)
}
op.aggHelper.performAggregation(
ctx, inputVecs, len(eqChain), eqChain, bucket, nil, /* groups */
)
newGroupsHeadsSel = append(newGroupsHeadsSel, eqChainsHeads[eqChainSlot])
// We need to compact the hash buffer according to the new groups
// head tuples selection vector we're building.
op.ht.ProbeScratch.HashBuffer[newGroupCount] = op.ht.ProbeScratch.HashBuffer[eqChainSlot]
newGroupCount++
op.scratch.eqChains[eqChainSlot] = op.scratch.eqChains[eqChainSlot][:0]
}
}
if newGroupCount > 0 {
// We have created new buckets, so we need to append the heads of those
// buckets to the hash table.
copy(b.Selection(), newGroupsHeadsSel)
b.SetLength(newGroupCount)
op.ht.AppendAllDistinct(ctx, b)
}
}
func (op *hashAggregator) ExportBuffered(ctx context.Context, _ colexecop.Operator) coldata.Batch {
if !op.inputTrackingState.zeroBatchEnqueued {
// Per the contract of the spilling queue, we need to append a
// zero-length batch.
op.inputTrackingState.tuples.Enqueue(ctx, coldata.ZeroBatch)
op.inputTrackingState.zeroBatchEnqueued = true
}
batch, err := op.inputTrackingState.tuples.Dequeue(ctx)
if err != nil {
colexecerror.InternalError(err)
}
return batch
}
func (op *hashAggregator) Reset(ctx context.Context) {
if r, ok := op.Input.(colexecop.Resetter); ok {
r.Reset(ctx)
}
op.bufferingState.tuples.ResetInternalBatch()
op.bufferingState.pendingBatch = nil
op.bufferingState.unprocessedIdx = 0
if op.numPreviouslyCreatedBuckets < len(op.buckets) {
op.numPreviouslyCreatedBuckets = len(op.buckets)
}
// Set up buckets for reuse.
op.buckets = op.buckets[:0]
op.ht.Reset(ctx)
if op.inputTrackingState.tuples != nil {
if err := op.inputTrackingState.tuples.Close(ctx); err != nil {
colexecerror.InternalError(err)
}
op.inputTrackingState.zeroBatchEnqueued = false
}
op.curOutputBucketIdx = 0
op.state = hashAggregatorBuffering
}
func (op *hashAggregator) | Close | identifier_name |
|
watcher.go | the
// specific language governing permissions and limitations
// under the License.
package docker
import (
"fmt"
"net/http"
"sync"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters"
"github.com/docker/go-connections/tlsconfig"
"golang.org/x/net/context"
"github.com/elastic/beats/libbeat/common/bus"
"github.com/elastic/beats/libbeat/logp"
)
// Select Docker API version
const (
shortIDLen = 12
)
// Watcher reads docker events and keeps a list of known containers
type Watcher interface {
// Start watching docker API for new containers
Start() error
// Stop watching docker API for new containers
Stop()
// Container returns the running container with the given ID or nil if unknown
Container(ID string) *Container
// Containers returns the list of known containers
Containers() map[string]*Container
// ListenStart returns a bus listener to receive container started events, with a `container` key holding it
ListenStart() bus.Listener
// ListenStop returns a bus listener to receive container stopped events, with a `container` key holding it
ListenStop() bus.Listener
}
// TLSConfig for docker socket connection
type TLSConfig struct {
CA string `config:"certificate_authority"`
Certificate string `config:"certificate"`
Key string `config:"key"`
}
type watcher struct {
sync.RWMutex
client Client
ctx context.Context
stop context.CancelFunc
containers map[string]*Container
deleted map[string]time.Time // deleted annotations key -> last access time
cleanupTimeout time.Duration
lastValidTimestamp int64
stopped sync.WaitGroup
bus bus.Bus
shortID bool // whether to store short ID in "containers" too
}
// Container info retrieved by the watcher
type Container struct {
ID string
Name string
Image string
Labels map[string]string
IPAddresses []string
Ports []types.Port
}
// Client for docker interface
type Client interface {
ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error)
Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error)
}
// WatcherConstructor represent a function that creates a new Watcher from giving parameters
type WatcherConstructor func(host string, tls *TLSConfig, storeShortID bool) (Watcher, error)
// NewWatcher returns a watcher running for the given settings
func NewWatcher(host string, tls *TLSConfig, storeShortID bool) (Watcher, error) {
var httpClient *http.Client
if tls != nil {
options := tlsconfig.Options{
CAFile: tls.CA,
CertFile: tls.Certificate,
KeyFile: tls.Key,
}
tlsc, err := tlsconfig.Client(options)
if err != nil {
return nil, err
}
httpClient = &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsc,
},
}
}
client, err := NewClient(host, httpClient, nil)
if err != nil {
return nil, err
}
return NewWatcherWithClient(client, 60*time.Second, storeShortID)
}
// NewWatcherWithClient creates a new Watcher from a given Docker client
func NewWatcherWithClient(client Client, cleanupTimeout time.Duration, storeShortID bool) (Watcher, error) {
ctx, cancel := context.WithCancel(context.Background())
return &watcher{
client: client,
ctx: ctx,
stop: cancel,
containers: make(map[string]*Container),
deleted: make(map[string]time.Time),
cleanupTimeout: cleanupTimeout,
bus: bus.New("docker"),
shortID: storeShortID,
}, nil
}
// Container returns the running container with the given ID or nil if unknown
func (w *watcher) Container(ID string) *Container {
w.RLock()
container := w.containers[ID]
if container == nil {
w.RUnlock()
return nil
}
_, ok := w.deleted[container.ID]
w.RUnlock()
// Update last access time if it's deleted
if ok {
w.Lock()
w.deleted[container.ID] = time.Now()
w.Unlock()
}
return container
}
// Containers returns the list of known containers
func (w *watcher) Containers() map[string]*Container {
w.RLock()
defer w.RUnlock()
res := make(map[string]*Container)
for k, v := range w.containers {
if !w.shortID || len(k) != shortIDLen {
res[k] = v
}
}
return res
}
// Start watching docker API for new containers
func (w *watcher) Start() error {
// Do initial scan of existing containers
logp.Debug("docker", "Start docker containers scanner")
w.lastValidTimestamp = time.Now().Unix()
w.Lock()
defer w.Unlock()
containers, err := w.listContainers(types.ContainerListOptions{})
if err != nil {
return err
}
for _, c := range containers {
w.containers[c.ID] = c
if w.shortID {
w.containers[c.ID[:shortIDLen]] = c
}
}
// Emit all start events (avoid blocking if the bus get's blocked)
go func() {
for _, c := range containers {
w.bus.Publish(bus.Event{
"start": true,
"container": c,
})
}
}()
w.stopped.Add(2)
go w.watch()
go w.cleanupWorker()
return nil
}
func (w *watcher) Stop() {
w.stop()
}
func (w *watcher) watch() {
filter := filters.NewArgs()
filter.Add("type", "container")
options := types.EventsOptions{
Since: fmt.Sprintf("%d", w.lastValidTimestamp),
Filters: filter,
}
for {
events, errors := w.client.Events(w.ctx, options)
WATCH:
for {
select {
case event := <-events:
logp.Debug("docker", "Got a new docker event: %v", event)
w.lastValidTimestamp = event.Time
// Add / update
if event.Action == "start" || event.Action == "update" {
filter := filters.NewArgs()
filter.Add("id", event.Actor.ID)
containers, err := w.listContainers(types.ContainerListOptions{
Filters: filter,
})
if err != nil || len(containers) != 1 {
logp.Err("Error getting container info: %v", err)
continue
}
container := containers[0]
w.Lock()
w.containers[event.Actor.ID] = container
if w.shortID {
w.containers[event.Actor.ID[:shortIDLen]] = container
}
// un-delete if it's flagged (in case of update or recreation) | "container": container,
})
}
// Delete
if event.Action == "die" {
container := w.Container(event.Actor.ID)
if container != nil {
w.bus.Publish(bus.Event{
"stop": true,
"container": container,
})
}
w.Lock()
w.deleted[event.Actor.ID] = time.Now()
w.Unlock()
}
case err := <-errors:
// Restart watch call
logp.Err("Error watching for docker events: %v", err)
time.Sleep(1 * time.Second)
break WATCH
case <-w.ctx.Done():
logp.Debug("docker", "Watcher stopped")
w.stopped.Done()
return
}
}
}
}
func (w *watcher) listContainers(options types.ContainerListOptions) ([]*Container, error) {
containers, err := w.client.ContainerList(w.ctx, options)
if err != nil {
return nil, err
}
var result []*Container
for _, c := range containers {
var ipaddresses []string
for _, net := range c.NetworkSettings.Networks {
if net.IPAddress != "" {
ipaddresses = append(ipaddresses, net.IPAddress)
}
}
// If there are no network interfaces, assume that the container is on host network
// Inspect the container directly and use the hostname as the IP address in order
if len(ipaddresses) == 0 {
info, err := w.client.ContainerInspect(w.ctx, c.ID)
if err == nil {
ipaddresses = append(ipaddresses, info.Config.Hostname)
} else {
logp.Warn("unable to inspect container %s due to error %v", c.ID, err)
}
}
result = append(result, &Container{
ID: c.ID,
Name: c.Names[0][1:], // Strip '/' from container names
Image: c.Image,
Labels: c.Labels,
Ports: c.Ports,
IPAddresses: ipaddresses,
})
}
return result, nil
}
// Clean | delete(w.deleted, event.Actor.ID)
w.Unlock()
w.bus.Publish(bus.Event{
"start": true, | random_line_split |
watcher.go | cleanupTimeout time.Duration
lastValidTimestamp int64
stopped sync.WaitGroup
bus bus.Bus
shortID bool // whether to store short ID in "containers" too
}
// Container info retrieved by the watcher
type Container struct {
ID string
Name string
Image string
Labels map[string]string
IPAddresses []string
Ports []types.Port
}
// Client for docker interface
type Client interface {
ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error)
Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error)
}
// WatcherConstructor represent a function that creates a new Watcher from giving parameters
type WatcherConstructor func(host string, tls *TLSConfig, storeShortID bool) (Watcher, error)
// NewWatcher returns a watcher running for the given settings
func NewWatcher(host string, tls *TLSConfig, storeShortID bool) (Watcher, error) {
var httpClient *http.Client
if tls != nil {
options := tlsconfig.Options{
CAFile: tls.CA,
CertFile: tls.Certificate,
KeyFile: tls.Key,
}
tlsc, err := tlsconfig.Client(options)
if err != nil {
return nil, err
}
httpClient = &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsc,
},
}
}
client, err := NewClient(host, httpClient, nil)
if err != nil {
return nil, err
}
return NewWatcherWithClient(client, 60*time.Second, storeShortID)
}
// NewWatcherWithClient creates a new Watcher from a given Docker client
func NewWatcherWithClient(client Client, cleanupTimeout time.Duration, storeShortID bool) (Watcher, error) {
ctx, cancel := context.WithCancel(context.Background())
return &watcher{
client: client,
ctx: ctx,
stop: cancel,
containers: make(map[string]*Container),
deleted: make(map[string]time.Time),
cleanupTimeout: cleanupTimeout,
bus: bus.New("docker"),
shortID: storeShortID,
}, nil
}
// Container returns the running container with the given ID or nil if unknown
func (w *watcher) Container(ID string) *Container {
w.RLock()
container := w.containers[ID]
if container == nil {
w.RUnlock()
return nil
}
_, ok := w.deleted[container.ID]
w.RUnlock()
// Update last access time if it's deleted
if ok {
w.Lock()
w.deleted[container.ID] = time.Now()
w.Unlock()
}
return container
}
// Containers returns the list of known containers
func (w *watcher) Containers() map[string]*Container {
w.RLock()
defer w.RUnlock()
res := make(map[string]*Container)
for k, v := range w.containers {
if !w.shortID || len(k) != shortIDLen {
res[k] = v
}
}
return res
}
// Start watching docker API for new containers
func (w *watcher) Start() error {
// Do initial scan of existing containers
logp.Debug("docker", "Start docker containers scanner")
w.lastValidTimestamp = time.Now().Unix()
w.Lock()
defer w.Unlock()
containers, err := w.listContainers(types.ContainerListOptions{})
if err != nil {
return err
}
for _, c := range containers {
w.containers[c.ID] = c
if w.shortID {
w.containers[c.ID[:shortIDLen]] = c
}
}
// Emit all start events (avoid blocking if the bus get's blocked)
go func() {
for _, c := range containers {
w.bus.Publish(bus.Event{
"start": true,
"container": c,
})
}
}()
w.stopped.Add(2)
go w.watch()
go w.cleanupWorker()
return nil
}
func (w *watcher) Stop() {
w.stop()
}
func (w *watcher) watch() {
filter := filters.NewArgs()
filter.Add("type", "container")
options := types.EventsOptions{
Since: fmt.Sprintf("%d", w.lastValidTimestamp),
Filters: filter,
}
for {
events, errors := w.client.Events(w.ctx, options)
WATCH:
for {
select {
case event := <-events:
logp.Debug("docker", "Got a new docker event: %v", event)
w.lastValidTimestamp = event.Time
// Add / update
if event.Action == "start" || event.Action == "update" {
filter := filters.NewArgs()
filter.Add("id", event.Actor.ID)
containers, err := w.listContainers(types.ContainerListOptions{
Filters: filter,
})
if err != nil || len(containers) != 1 {
logp.Err("Error getting container info: %v", err)
continue
}
container := containers[0]
w.Lock()
w.containers[event.Actor.ID] = container
if w.shortID {
w.containers[event.Actor.ID[:shortIDLen]] = container
}
// un-delete if it's flagged (in case of update or recreation)
delete(w.deleted, event.Actor.ID)
w.Unlock()
w.bus.Publish(bus.Event{
"start": true,
"container": container,
})
}
// Delete
if event.Action == "die" {
container := w.Container(event.Actor.ID)
if container != nil {
w.bus.Publish(bus.Event{
"stop": true,
"container": container,
})
}
w.Lock()
w.deleted[event.Actor.ID] = time.Now()
w.Unlock()
}
case err := <-errors:
// Restart watch call
logp.Err("Error watching for docker events: %v", err)
time.Sleep(1 * time.Second)
break WATCH
case <-w.ctx.Done():
logp.Debug("docker", "Watcher stopped")
w.stopped.Done()
return
}
}
}
}
func (w *watcher) listContainers(options types.ContainerListOptions) ([]*Container, error) {
containers, err := w.client.ContainerList(w.ctx, options)
if err != nil {
return nil, err
}
var result []*Container
for _, c := range containers {
var ipaddresses []string
for _, net := range c.NetworkSettings.Networks {
if net.IPAddress != "" {
ipaddresses = append(ipaddresses, net.IPAddress)
}
}
// If there are no network interfaces, assume that the container is on host network
// Inspect the container directly and use the hostname as the IP address in order
if len(ipaddresses) == 0 {
info, err := w.client.ContainerInspect(w.ctx, c.ID)
if err == nil {
ipaddresses = append(ipaddresses, info.Config.Hostname)
} else {
logp.Warn("unable to inspect container %s due to error %v", c.ID, err)
}
}
result = append(result, &Container{
ID: c.ID,
Name: c.Names[0][1:], // Strip '/' from container names
Image: c.Image,
Labels: c.Labels,
Ports: c.Ports,
IPAddresses: ipaddresses,
})
}
return result, nil
}
// Clean up deleted containers after they are not used anymore
func (w *watcher) cleanupWorker() {
for {
// Wait a full period
time.Sleep(w.cleanupTimeout)
select {
case <-w.ctx.Done():
w.stopped.Done()
return
default:
// Check entries for timeout
var toDelete []string
timeout := time.Now().Add(-w.cleanupTimeout)
w.RLock()
for key, lastSeen := range w.deleted {
if lastSeen.Before(timeout) {
logp.Debug("docker", "Removing container %s after cool down timeout", key)
toDelete = append(toDelete, key)
}
}
w.RUnlock()
// Delete timed out entries:
for _, key := range toDelete {
container := w.Container(key)
if container != nil {
w.bus.Publish(bus.Event{
"delete": true,
"container": container,
})
}
}
w.Lock()
for _, key := range toDelete {
delete(w.deleted, key)
delete(w.containers, key)
if w.shortID {
delete(w.containers, key[:shortIDLen])
}
}
w.Unlock()
}
}
}
// ListenStart returns a bus listener to receive container started events, with a `container` key holding it
func (w *watcher) ListenStart() bus.Listener {
return w.bus.Subscribe("start")
}
// ListenStop returns a bus listener to receive container stopped events, with a `container` key holding it
func (w *watcher) ListenStop() bus.Listener | {
return w.bus.Subscribe("stop")
} | identifier_body |
|
watcher.go | the
// specific language governing permissions and limitations
// under the License.
package docker
import (
"fmt"
"net/http"
"sync"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters"
"github.com/docker/go-connections/tlsconfig"
"golang.org/x/net/context"
"github.com/elastic/beats/libbeat/common/bus"
"github.com/elastic/beats/libbeat/logp"
)
// Select Docker API version
const (
shortIDLen = 12
)
// Watcher reads docker events and keeps a list of known containers
type Watcher interface {
// Start watching docker API for new containers
Start() error
// Stop watching docker API for new containers
Stop()
// Container returns the running container with the given ID or nil if unknown
Container(ID string) *Container
// Containers returns the list of known containers
Containers() map[string]*Container
// ListenStart returns a bus listener to receive container started events, with a `container` key holding it
ListenStart() bus.Listener
// ListenStop returns a bus listener to receive container stopped events, with a `container` key holding it
ListenStop() bus.Listener
}
// TLSConfig for docker socket connection
type TLSConfig struct {
CA string `config:"certificate_authority"`
Certificate string `config:"certificate"`
Key string `config:"key"`
}
type watcher struct {
sync.RWMutex
client Client
ctx context.Context
stop context.CancelFunc
containers map[string]*Container
deleted map[string]time.Time // deleted annotations key -> last access time
cleanupTimeout time.Duration
lastValidTimestamp int64
stopped sync.WaitGroup
bus bus.Bus
shortID bool // whether to store short ID in "containers" too
}
// Container info retrieved by the watcher
type Container struct {
ID string
Name string
Image string
Labels map[string]string
IPAddresses []string
Ports []types.Port
}
// Client for docker interface
type Client interface {
ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error)
Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error)
}
// WatcherConstructor represent a function that creates a new Watcher from giving parameters
type WatcherConstructor func(host string, tls *TLSConfig, storeShortID bool) (Watcher, error)
// NewWatcher returns a watcher running for the given settings
func NewWatcher(host string, tls *TLSConfig, storeShortID bool) (Watcher, error) {
var httpClient *http.Client
if tls != nil {
options := tlsconfig.Options{
CAFile: tls.CA,
CertFile: tls.Certificate,
KeyFile: tls.Key,
}
tlsc, err := tlsconfig.Client(options)
if err != nil {
return nil, err
}
httpClient = &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsc,
},
}
}
client, err := NewClient(host, httpClient, nil)
if err != nil {
return nil, err
}
return NewWatcherWithClient(client, 60*time.Second, storeShortID)
}
// NewWatcherWithClient creates a new Watcher from a given Docker client
func NewWatcherWithClient(client Client, cleanupTimeout time.Duration, storeShortID bool) (Watcher, error) {
ctx, cancel := context.WithCancel(context.Background())
return &watcher{
client: client,
ctx: ctx,
stop: cancel,
containers: make(map[string]*Container),
deleted: make(map[string]time.Time),
cleanupTimeout: cleanupTimeout,
bus: bus.New("docker"),
shortID: storeShortID,
}, nil
}
// Container returns the running container with the given ID or nil if unknown
func (w *watcher) Container(ID string) *Container {
w.RLock()
container := w.containers[ID]
if container == nil {
w.RUnlock()
return nil
}
_, ok := w.deleted[container.ID]
w.RUnlock()
// Update last access time if it's deleted
if ok {
w.Lock()
w.deleted[container.ID] = time.Now()
w.Unlock()
}
return container
}
// Containers returns the list of known containers
func (w *watcher) Containers() map[string]*Container {
w.RLock()
defer w.RUnlock()
res := make(map[string]*Container)
for k, v := range w.containers |
return res
}
// Start watching docker API for new containers
func (w *watcher) Start() error {
// Do initial scan of existing containers
logp.Debug("docker", "Start docker containers scanner")
w.lastValidTimestamp = time.Now().Unix()
w.Lock()
defer w.Unlock()
containers, err := w.listContainers(types.ContainerListOptions{})
if err != nil {
return err
}
for _, c := range containers {
w.containers[c.ID] = c
if w.shortID {
w.containers[c.ID[:shortIDLen]] = c
}
}
// Emit all start events (avoid blocking if the bus get's blocked)
go func() {
for _, c := range containers {
w.bus.Publish(bus.Event{
"start": true,
"container": c,
})
}
}()
w.stopped.Add(2)
go w.watch()
go w.cleanupWorker()
return nil
}
func (w *watcher) Stop() {
w.stop()
}
func (w *watcher) watch() {
filter := filters.NewArgs()
filter.Add("type", "container")
options := types.EventsOptions{
Since: fmt.Sprintf("%d", w.lastValidTimestamp),
Filters: filter,
}
for {
events, errors := w.client.Events(w.ctx, options)
WATCH:
for {
select {
case event := <-events:
logp.Debug("docker", "Got a new docker event: %v", event)
w.lastValidTimestamp = event.Time
// Add / update
if event.Action == "start" || event.Action == "update" {
filter := filters.NewArgs()
filter.Add("id", event.Actor.ID)
containers, err := w.listContainers(types.ContainerListOptions{
Filters: filter,
})
if err != nil || len(containers) != 1 {
logp.Err("Error getting container info: %v", err)
continue
}
container := containers[0]
w.Lock()
w.containers[event.Actor.ID] = container
if w.shortID {
w.containers[event.Actor.ID[:shortIDLen]] = container
}
// un-delete if it's flagged (in case of update or recreation)
delete(w.deleted, event.Actor.ID)
w.Unlock()
w.bus.Publish(bus.Event{
"start": true,
"container": container,
})
}
// Delete
if event.Action == "die" {
container := w.Container(event.Actor.ID)
if container != nil {
w.bus.Publish(bus.Event{
"stop": true,
"container": container,
})
}
w.Lock()
w.deleted[event.Actor.ID] = time.Now()
w.Unlock()
}
case err := <-errors:
// Restart watch call
logp.Err("Error watching for docker events: %v", err)
time.Sleep(1 * time.Second)
break WATCH
case <-w.ctx.Done():
logp.Debug("docker", "Watcher stopped")
w.stopped.Done()
return
}
}
}
}
func (w *watcher) listContainers(options types.ContainerListOptions) ([]*Container, error) {
containers, err := w.client.ContainerList(w.ctx, options)
if err != nil {
return nil, err
}
var result []*Container
for _, c := range containers {
var ipaddresses []string
for _, net := range c.NetworkSettings.Networks {
if net.IPAddress != "" {
ipaddresses = append(ipaddresses, net.IPAddress)
}
}
// If there are no network interfaces, assume that the container is on host network
// Inspect the container directly and use the hostname as the IP address in order
if len(ipaddresses) == 0 {
info, err := w.client.ContainerInspect(w.ctx, c.ID)
if err == nil {
ipaddresses = append(ipaddresses, info.Config.Hostname)
} else {
logp.Warn("unable to inspect container %s due to error %v", c.ID, err)
}
}
result = append(result, &Container{
ID: c.ID,
Name: c.Names[0][1:], // Strip '/' from container names
Image: c.Image,
Labels: c.Labels,
Ports: c.Ports,
IPAddresses: ipaddresses,
})
}
return result, nil
}
| {
if !w.shortID || len(k) != shortIDLen {
res[k] = v
}
} | conditional_block |
watcher.go | the
// specific language governing permissions and limitations
// under the License.
package docker
import (
"fmt"
"net/http"
"sync"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters"
"github.com/docker/go-connections/tlsconfig"
"golang.org/x/net/context"
"github.com/elastic/beats/libbeat/common/bus"
"github.com/elastic/beats/libbeat/logp"
)
// Select Docker API version
const (
shortIDLen = 12
)
// Watcher reads docker events and keeps a list of known containers
type Watcher interface {
// Start watching docker API for new containers
Start() error
// Stop watching docker API for new containers
Stop()
// Container returns the running container with the given ID or nil if unknown
Container(ID string) *Container
// Containers returns the list of known containers
Containers() map[string]*Container
// ListenStart returns a bus listener to receive container started events, with a `container` key holding it
ListenStart() bus.Listener
// ListenStop returns a bus listener to receive container stopped events, with a `container` key holding it
ListenStop() bus.Listener
}
// TLSConfig for docker socket connection
type TLSConfig struct {
CA string `config:"certificate_authority"`
Certificate string `config:"certificate"`
Key string `config:"key"`
}
type watcher struct {
sync.RWMutex
client Client
ctx context.Context
stop context.CancelFunc
containers map[string]*Container
deleted map[string]time.Time // deleted annotations key -> last access time
cleanupTimeout time.Duration
lastValidTimestamp int64
stopped sync.WaitGroup
bus bus.Bus
shortID bool // whether to store short ID in "containers" too
}
// Container info retrieved by the watcher
type Container struct {
ID string
Name string
Image string
Labels map[string]string
IPAddresses []string
Ports []types.Port
}
// Client for docker interface
type Client interface {
ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error)
Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error)
}
// WatcherConstructor represent a function that creates a new Watcher from giving parameters
type WatcherConstructor func(host string, tls *TLSConfig, storeShortID bool) (Watcher, error)
// NewWatcher returns a watcher running for the given settings
func NewWatcher(host string, tls *TLSConfig, storeShortID bool) (Watcher, error) {
var httpClient *http.Client
if tls != nil {
options := tlsconfig.Options{
CAFile: tls.CA,
CertFile: tls.Certificate,
KeyFile: tls.Key,
}
tlsc, err := tlsconfig.Client(options)
if err != nil {
return nil, err
}
httpClient = &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsc,
},
}
}
client, err := NewClient(host, httpClient, nil)
if err != nil {
return nil, err
}
return NewWatcherWithClient(client, 60*time.Second, storeShortID)
}
// NewWatcherWithClient creates a new Watcher from a given Docker client
func NewWatcherWithClient(client Client, cleanupTimeout time.Duration, storeShortID bool) (Watcher, error) {
ctx, cancel := context.WithCancel(context.Background())
return &watcher{
client: client,
ctx: ctx,
stop: cancel,
containers: make(map[string]*Container),
deleted: make(map[string]time.Time),
cleanupTimeout: cleanupTimeout,
bus: bus.New("docker"),
shortID: storeShortID,
}, nil
}
// Container returns the running container with the given ID or nil if unknown
func (w *watcher) Container(ID string) *Container {
w.RLock()
container := w.containers[ID]
if container == nil {
w.RUnlock()
return nil
}
_, ok := w.deleted[container.ID]
w.RUnlock()
// Update last access time if it's deleted
if ok {
w.Lock()
w.deleted[container.ID] = time.Now()
w.Unlock()
}
return container
}
// Containers returns the list of known containers
func (w *watcher) | () map[string]*Container {
w.RLock()
defer w.RUnlock()
res := make(map[string]*Container)
for k, v := range w.containers {
if !w.shortID || len(k) != shortIDLen {
res[k] = v
}
}
return res
}
// Start watching docker API for new containers
func (w *watcher) Start() error {
// Do initial scan of existing containers
logp.Debug("docker", "Start docker containers scanner")
w.lastValidTimestamp = time.Now().Unix()
w.Lock()
defer w.Unlock()
containers, err := w.listContainers(types.ContainerListOptions{})
if err != nil {
return err
}
for _, c := range containers {
w.containers[c.ID] = c
if w.shortID {
w.containers[c.ID[:shortIDLen]] = c
}
}
// Emit all start events (avoid blocking if the bus get's blocked)
go func() {
for _, c := range containers {
w.bus.Publish(bus.Event{
"start": true,
"container": c,
})
}
}()
w.stopped.Add(2)
go w.watch()
go w.cleanupWorker()
return nil
}
func (w *watcher) Stop() {
w.stop()
}
func (w *watcher) watch() {
filter := filters.NewArgs()
filter.Add("type", "container")
options := types.EventsOptions{
Since: fmt.Sprintf("%d", w.lastValidTimestamp),
Filters: filter,
}
for {
events, errors := w.client.Events(w.ctx, options)
WATCH:
for {
select {
case event := <-events:
logp.Debug("docker", "Got a new docker event: %v", event)
w.lastValidTimestamp = event.Time
// Add / update
if event.Action == "start" || event.Action == "update" {
filter := filters.NewArgs()
filter.Add("id", event.Actor.ID)
containers, err := w.listContainers(types.ContainerListOptions{
Filters: filter,
})
if err != nil || len(containers) != 1 {
logp.Err("Error getting container info: %v", err)
continue
}
container := containers[0]
w.Lock()
w.containers[event.Actor.ID] = container
if w.shortID {
w.containers[event.Actor.ID[:shortIDLen]] = container
}
// un-delete if it's flagged (in case of update or recreation)
delete(w.deleted, event.Actor.ID)
w.Unlock()
w.bus.Publish(bus.Event{
"start": true,
"container": container,
})
}
// Delete
if event.Action == "die" {
container := w.Container(event.Actor.ID)
if container != nil {
w.bus.Publish(bus.Event{
"stop": true,
"container": container,
})
}
w.Lock()
w.deleted[event.Actor.ID] = time.Now()
w.Unlock()
}
case err := <-errors:
// Restart watch call
logp.Err("Error watching for docker events: %v", err)
time.Sleep(1 * time.Second)
break WATCH
case <-w.ctx.Done():
logp.Debug("docker", "Watcher stopped")
w.stopped.Done()
return
}
}
}
}
func (w *watcher) listContainers(options types.ContainerListOptions) ([]*Container, error) {
containers, err := w.client.ContainerList(w.ctx, options)
if err != nil {
return nil, err
}
var result []*Container
for _, c := range containers {
var ipaddresses []string
for _, net := range c.NetworkSettings.Networks {
if net.IPAddress != "" {
ipaddresses = append(ipaddresses, net.IPAddress)
}
}
// If there are no network interfaces, assume that the container is on host network
// Inspect the container directly and use the hostname as the IP address in order
if len(ipaddresses) == 0 {
info, err := w.client.ContainerInspect(w.ctx, c.ID)
if err == nil {
ipaddresses = append(ipaddresses, info.Config.Hostname)
} else {
logp.Warn("unable to inspect container %s due to error %v", c.ID, err)
}
}
result = append(result, &Container{
ID: c.ID,
Name: c.Names[0][1:], // Strip '/' from container names
Image: c.Image,
Labels: c.Labels,
Ports: c.Ports,
IPAddresses: ipaddresses,
})
}
return result, nil
}
// | Containers | identifier_name |
catsass.py | _ can do here is save the sys.ENVIRONMENT
by reducing print-ed waste. Mew.
returns: probably what you want
"""
return __cat_whisperer()[Cat.ASS]
# === comb() ===
def comb(cat, *brush):
"""
Filter the results of poking the cat. Takes variable
names as strings for the args.
cat:
the_cats_ass() or similar
brush:
the variables you wish to see (as strings)
returns: hairballs
"""
return PrettyKitty(cat.ctx, {k: v for k, v in cat.values.items()
if k in brush})
# === avoid() ===
def avoid(cat, *undesirables):
"""
Omit any undesirable variables from the result.
cat:
the_cats_ass() or similar
undesirables:
variables you wish to have omitted (as strings)
returns: from whence it came
"""
return PrettyKitty(cat.ctx, {k: v for k, v in cat.values.items()
if k not in undesirables})
# === poke_the_cat() ===
def poke_the_cat(where, catnip=False):
| """
if not catnip:
from random import randint
class BadCat(InterruptedError):
pass
if randint(1, 10) == 7:
mew = "You attempt to poke the cat but it attacks. " \
"Maybe if you gave it some catnip?"
raise BadCat(mew)
return __cat_whisperer()[where]
# === schrodingers_cat() ===
def schrodingers_cat(peek=False):
"""
Peek in the box for a 50/50 shot of retrieving your
desired output, while the other half of the time the
cat is dead and the function returns nothing at all.
If you decide not to peek, the cat -being neither
dead nor alive- responds with random nonsense.
peek:
whether to peek in the box
returns: depends
"""
from random import choice, randint
if peek:
if randint(1, 10) % 2 == 0:
# RIP
return "Nothing at all"
else:
return poke_the_cat(Cat.LEGS, catnip=True)
else:
garbled_cries = "mew meow wokka beocat ekkie".split()
return choice(garbled_cries)
def calico_kitty():
"""I can haz colorz?"""
return __cat_whisperer(colors=True, coat='calico_colorz', logo_colorz='logo_colorz')[Cat.ASS]
def tuxedo_cat():
"""Finer than a pheasant"""
return __cat_whisperer(colors=True, coat='tuxedo_colorz', logo_colorz='dark_logo_colorz')[Cat.ASS]
__LIBDIR__ = os.path.abspath(os.path.dirname(__file__))
# === PrettyKitty() ===
class PrettyKitty:
"""I can has repr?"""
def __init__(self, ctx, values, cat=None, logo=None,
marker='|/', logo_offset=6, template=None,
formatter=pformat, colors=False, coat=None,
logo_colorz=None, term_bg="dark", title=None,
title_start=(6, 45)):
# The callers name usually.
self.ctx = ctx
# The local variables data.
self.values = values
# The line where the talk bubble should
# start must end with the marker, or the
# data will be printed just below the logo,
# which may not be what you want.
self.marker = marker
# Other formatters can be swapped in, JSON
# -for example- should work, or yaml.
self.formatter = formatter
# Allows the logo to be offset to either side;
# positive numbers move it left and negative
# numbers move it right.
self.logo_offset = logo_offset
self.term_bg = term_bg
if title is None:
title = "Meowed with love by Duroktar, 2017"
self.title = title
self.title_location = title_start
# Colors ~*%~
self.colors = colors
self.coat = coat
self.logo_colorz = logo_colorz
# TODO Should be public
Template = namedtuple("Template", "view offset")
if template is None:
# Option 1
template = Template({
"Name": self.ctx,
"Vars": self.values}, 1)
# Option 2
template = Template({self.ctx: self.values}, 1)
self.template = template
if cat is None:
cat = open(os.path.join(__LIBDIR__, 'octocat'), 'r').readlines()
self.cat = cat
if logo is None:
logo = open(os.path.join(__LIBDIR__, 'logo'), 'r').readlines()
self.logo = logo
def __repr__(self):
return self.haz_format() + "\n\n"
def haz_format(self):
from shutil import get_terminal_size
def yield_pads(lines, l):
for line in lines:
line = line.rstrip("\n").rstrip()
length = len(line)
if length < l:
yield line + " " * (l - length - 1)
else:
yield line
def rfill_lines(filler, start=0, offset=0, column=None):
height = len(filler)
for i in range(height):
index = start + i
try:
line = cat[index]
except IndexError:
cat.append(f"{' ' * pivot}{filler[i]}")
else:
if column is not None:
new_l = f"{line[:-(term_width - column) - offset]}{filler[i]}"
else:
new_l = f"{line[:-(term_width - pivot) - offset]}{filler[i]}"
cat[index] = new_l
term_width, term_height = get_terminal_size((80, 30))
cat = list(yield_pads(self.cat, term_width))
pivot = max((len(l.encode('unicode-escape')) for l in self.cat))
logo_offset = self.logo_offset
logo_width = max((len(str(l)) for l in self.logo))
logo = list(yield_pads(self.logo, logo_width - 1))
logo_height = len(logo)
marker = self.marker
data_start_line = [i - 1 for i, v in enumerate(cat) if v.strip().endswith(marker)]
if not data_start_line:
data_start_line = [logo_height + 1]
data_start_line = data_start_line[0]
if logo_height > data_start_line:
data_start_line = logo_height + 1
title = [self.title]
title_start_line, title_start_column = self.title_location
data = self.formatter(self.template.view, width=(term_width - pivot))
if self.colors:
cat, logo, title, data = self.haz_colorz(cat, logo, title, data)
rfill_lines(logo, offset=logo_offset)
rfill_lines(title, start=title_start_line, column=title_start_column)
rfill_lines(data.splitlines(), start=data_start_line, offset=self.template.offset)
return "\n".join((l.rstrip() for l in cat))
def haz_colorz(self, cat, logo, title, data):
from catsass.colorz import kitty_colorz
color_stuffs = kitty_colorz()
if color_stuffs is None:
return cat, logo, title, data
def color_lines(lines, color_mapping, words=False):
if any([len(k) > 1 for k in color_mapping.keys()]):
words = True
search_lines = [groupby(line.split()) for line in lines]
else:
search_lines = [groupby(line) for line in lines]
rv = []
for groups in search_lines:
line = []
for item, group in groups:
color = color_mapping.get(item)
if color is None:
line.append("".join(group))
else:
line.append(color("".join(group)).color_str)
if words:
rv.append(" ".join(line))
else:
rv.append("".join(line))
return rv
highlight = color_stuffs.get('highlight')
# Customz
cat_colorz = color_stuffs.get(self.coat) or {}
logo_colorz = color_stuffs.get(self.logo_colorz) or {}
| """
You really shouldn't be poking cats. But if you insist,
it is recommended to bring catnip as it's not unusual for
cats to attack dicks who poke them.
where:
I leave this as an exercise for the reader. But
a word of wisdom from my 1st grade teacher: never do
anything that you wouldn't want to be caught dead doing.
Sadly he went to jail not long after whispering those
words in my ear.
catnip:
catnip can grow in the wild in many places around
the world. If no catnip can readily be found in yours
or any of your neighbors yards then just pass True as the
argument.
returns: possibly what you want. | identifier_body |
catsass.py | you_ can do here is save the sys.ENVIRONMENT
by reducing print-ed waste. Mew.
returns: probably what you want
"""
return __cat_whisperer()[Cat.ASS]
# === comb() ===
def comb(cat, *brush):
"""
Filter the results of poking the cat. Takes variable
names as strings for the args.
cat:
the_cats_ass() or similar
brush:
the variables you wish to see (as strings)
returns: hairballs
"""
return PrettyKitty(cat.ctx, {k: v for k, v in cat.values.items()
if k in brush})
# === avoid() ===
def avoid(cat, *undesirables):
"""
Omit any undesirable variables from the result.
cat:
the_cats_ass() or similar
undesirables:
variables you wish to have omitted (as strings)
returns: from whence it came
"""
return PrettyKitty(cat.ctx, {k: v for k, v in cat.values.items()
if k not in undesirables})
# === poke_the_cat() ===
def poke_the_cat(where, catnip=False):
"""
You really shouldn't be poking cats. But if you insist,
it is recommended to bring catnip as it's not unusual for
cats to attack dicks who poke them.
where:
I leave this as an exercise for the reader. But
a word of wisdom from my 1st grade teacher: never do
anything that you wouldn't want to be caught dead doing.
Sadly he went to jail not long after whispering those
words in my ear.
catnip:
catnip can grow in the wild in many places around
the world. If no catnip can readily be found in yours
or any of your neighbors yards then just pass True as the
argument.
returns: possibly what you want.
"""
if not catnip:
from random import randint
class BadCat(InterruptedError):
pass
if randint(1, 10) == 7:
mew = "You attempt to poke the cat but it attacks. " \
"Maybe if you gave it some catnip?"
raise BadCat(mew)
return __cat_whisperer()[where]
# === schrodingers_cat() ===
def schrodingers_cat(peek=False):
"""
Peek in the box for a 50/50 shot of retrieving your
desired output, while the other half of the time the
cat is dead and the function returns nothing at all.
If you decide not to peek, the cat -being neither
dead nor alive- responds with random nonsense.
peek:
whether to peek in the box
returns: depends
"""
from random import choice, randint
if peek:
if randint(1, 10) % 2 == 0:
# RIP
return "Nothing at all"
else:
return poke_the_cat(Cat.LEGS, catnip=True)
else:
garbled_cries = "mew meow wokka beocat ekkie".split()
return choice(garbled_cries)
def calico_kitty():
"""I can haz colorz?"""
return __cat_whisperer(colors=True, coat='calico_colorz', logo_colorz='logo_colorz')[Cat.ASS]
def tuxedo_cat():
"""Finer than a pheasant"""
return __cat_whisperer(colors=True, coat='tuxedo_colorz', logo_colorz='dark_logo_colorz')[Cat.ASS]
__LIBDIR__ = os.path.abspath(os.path.dirname(__file__))
# === PrettyKitty() ===
class PrettyKitty:
"""I can has repr?"""
def __init__(self, ctx, values, cat=None, logo=None,
marker='|/', logo_offset=6, template=None,
formatter=pformat, colors=False, coat=None,
logo_colorz=None, term_bg="dark", title=None,
title_start=(6, 45)):
# The callers name usually.
self.ctx = ctx
# The local variables data.
self.values = values
# The line where the talk bubble should
# start must end with the marker, or the
# data will be printed just below the logo,
# which may not be what you want.
self.marker = marker
# Other formatters can be swapped in, JSON
# -for example- should work, or yaml.
self.formatter = formatter
# Allows the logo to be offset to either side;
# positive numbers move it left and negative
# numbers move it right.
self.logo_offset = logo_offset
self.term_bg = term_bg
if title is None:
title = "Meowed with love by Duroktar, 2017"
self.title = title
self.title_location = title_start
# Colors ~*%~
self.colors = colors
self.coat = coat | if template is None:
# Option 1
template = Template({
"Name": self.ctx,
"Vars": self.values}, 1)
# Option 2
template = Template({self.ctx: self.values}, 1)
self.template = template
if cat is None:
cat = open(os.path.join(__LIBDIR__, 'octocat'), 'r').readlines()
self.cat = cat
if logo is None:
logo = open(os.path.join(__LIBDIR__, 'logo'), 'r').readlines()
self.logo = logo
def __repr__(self):
return self.haz_format() + "\n\n"
def haz_format(self):
from shutil import get_terminal_size
def yield_pads(lines, l):
for line in lines:
line = line.rstrip("\n").rstrip()
length = len(line)
if length < l:
yield line + " " * (l - length - 1)
else:
yield line
def rfill_lines(filler, start=0, offset=0, column=None):
height = len(filler)
for i in range(height):
index = start + i
try:
line = cat[index]
except IndexError:
cat.append(f"{' ' * pivot}{filler[i]}")
else:
if column is not None:
new_l = f"{line[:-(term_width - column) - offset]}{filler[i]}"
else:
new_l = f"{line[:-(term_width - pivot) - offset]}{filler[i]}"
cat[index] = new_l
term_width, term_height = get_terminal_size((80, 30))
cat = list(yield_pads(self.cat, term_width))
pivot = max((len(l.encode('unicode-escape')) for l in self.cat))
logo_offset = self.logo_offset
logo_width = max((len(str(l)) for l in self.logo))
logo = list(yield_pads(self.logo, logo_width - 1))
logo_height = len(logo)
marker = self.marker
data_start_line = [i - 1 for i, v in enumerate(cat) if v.strip().endswith(marker)]
if not data_start_line:
data_start_line = [logo_height + 1]
data_start_line = data_start_line[0]
if logo_height > data_start_line:
data_start_line = logo_height + 1
title = [self.title]
title_start_line, title_start_column = self.title_location
data = self.formatter(self.template.view, width=(term_width - pivot))
if self.colors:
cat, logo, title, data = self.haz_colorz(cat, logo, title, data)
rfill_lines(logo, offset=logo_offset)
rfill_lines(title, start=title_start_line, column=title_start_column)
rfill_lines(data.splitlines(), start=data_start_line, offset=self.template.offset)
return "\n".join((l.rstrip() for l in cat))
def haz_colorz(self, cat, logo, title, data):
from catsass.colorz import kitty_colorz
color_stuffs = kitty_colorz()
if color_stuffs is None:
return cat, logo, title, data
def color_lines(lines, color_mapping, words=False):
if any([len(k) > 1 for k in color_mapping.keys()]):
words = True
search_lines = [groupby(line.split()) for line in lines]
else:
search_lines = [groupby(line) for line in lines]
rv = []
for groups in search_lines:
line = []
for item, group in groups:
color = color_mapping.get(item)
if color is None:
line.append("".join(group))
else:
line.append(color("".join(group)).color_str)
if words:
rv.append(" ".join(line))
else:
rv.append("".join(line))
return rv
highlight = color_stuffs.get('highlight')
# Customz
cat_colorz = color_stuffs.get(self.coat) or {}
logo_colorz = color_stuffs.get(self.logo_colorz) or {}
| self.logo_colorz = logo_colorz
# TODO Should be public
Template = namedtuple("Template", "view offset") | random_line_split |
catsass.py | _ can do here is save the sys.ENVIRONMENT
by reducing print-ed waste. Mew.
returns: probably what you want
"""
return __cat_whisperer()[Cat.ASS]
# === comb() ===
def comb(cat, *brush):
"""
Filter the results of poking the cat. Takes variable
names as strings for the args.
cat:
the_cats_ass() or similar
brush:
the variables you wish to see (as strings)
returns: hairballs
"""
return PrettyKitty(cat.ctx, {k: v for k, v in cat.values.items()
if k in brush})
# === avoid() ===
def avoid(cat, *undesirables):
"""
Omit any undesirable variables from the result.
cat:
the_cats_ass() or similar
undesirables:
variables you wish to have omitted (as strings)
returns: from whence it came
"""
return PrettyKitty(cat.ctx, {k: v for k, v in cat.values.items()
if k not in undesirables})
# === poke_the_cat() ===
def poke_the_cat(where, catnip=False):
"""
You really shouldn't be poking cats. But if you insist,
it is recommended to bring catnip as it's not unusual for
cats to attack dicks who poke them.
where:
I leave this as an exercise for the reader. But
a word of wisdom from my 1st grade teacher: never do
anything that you wouldn't want to be caught dead doing.
Sadly he went to jail not long after whispering those
words in my ear.
catnip:
catnip can grow in the wild in many places around
the world. If no catnip can readily be found in yours
or any of your neighbors yards then just pass True as the
argument.
returns: possibly what you want.
"""
if not catnip:
from random import randint
class | (InterruptedError):
pass
if randint(1, 10) == 7:
mew = "You attempt to poke the cat but it attacks. " \
"Maybe if you gave it some catnip?"
raise BadCat(mew)
return __cat_whisperer()[where]
# === schrodingers_cat() ===
def schrodingers_cat(peek=False):
"""
Peek in the box for a 50/50 shot of retrieving your
desired output, while the other half of the time the
cat is dead and the function returns nothing at all.
If you decide not to peek, the cat -being neither
dead nor alive- responds with random nonsense.
peek:
whether to peek in the box
returns: depends
"""
from random import choice, randint
if peek:
if randint(1, 10) % 2 == 0:
# RIP
return "Nothing at all"
else:
return poke_the_cat(Cat.LEGS, catnip=True)
else:
garbled_cries = "mew meow wokka beocat ekkie".split()
return choice(garbled_cries)
def calico_kitty():
"""I can haz colorz?"""
return __cat_whisperer(colors=True, coat='calico_colorz', logo_colorz='logo_colorz')[Cat.ASS]
def tuxedo_cat():
"""Finer than a pheasant"""
return __cat_whisperer(colors=True, coat='tuxedo_colorz', logo_colorz='dark_logo_colorz')[Cat.ASS]
__LIBDIR__ = os.path.abspath(os.path.dirname(__file__))
# === PrettyKitty() ===
class PrettyKitty:
"""I can has repr?"""
def __init__(self, ctx, values, cat=None, logo=None,
marker='|/', logo_offset=6, template=None,
formatter=pformat, colors=False, coat=None,
logo_colorz=None, term_bg="dark", title=None,
title_start=(6, 45)):
# The callers name usually.
self.ctx = ctx
# The local variables data.
self.values = values
# The line where the talk bubble should
# start must end with the marker, or the
# data will be printed just below the logo,
# which may not be what you want.
self.marker = marker
# Other formatters can be swapped in, JSON
# -for example- should work, or yaml.
self.formatter = formatter
# Allows the logo to be offset to either side;
# positive numbers move it left and negative
# numbers move it right.
self.logo_offset = logo_offset
self.term_bg = term_bg
if title is None:
title = "Meowed with love by Duroktar, 2017"
self.title = title
self.title_location = title_start
# Colors ~*%~
self.colors = colors
self.coat = coat
self.logo_colorz = logo_colorz
# TODO Should be public
Template = namedtuple("Template", "view offset")
if template is None:
# Option 1
template = Template({
"Name": self.ctx,
"Vars": self.values}, 1)
# Option 2
template = Template({self.ctx: self.values}, 1)
self.template = template
if cat is None:
cat = open(os.path.join(__LIBDIR__, 'octocat'), 'r').readlines()
self.cat = cat
if logo is None:
logo = open(os.path.join(__LIBDIR__, 'logo'), 'r').readlines()
self.logo = logo
def __repr__(self):
return self.haz_format() + "\n\n"
def haz_format(self):
from shutil import get_terminal_size
def yield_pads(lines, l):
for line in lines:
line = line.rstrip("\n").rstrip()
length = len(line)
if length < l:
yield line + " " * (l - length - 1)
else:
yield line
def rfill_lines(filler, start=0, offset=0, column=None):
height = len(filler)
for i in range(height):
index = start + i
try:
line = cat[index]
except IndexError:
cat.append(f"{' ' * pivot}{filler[i]}")
else:
if column is not None:
new_l = f"{line[:-(term_width - column) - offset]}{filler[i]}"
else:
new_l = f"{line[:-(term_width - pivot) - offset]}{filler[i]}"
cat[index] = new_l
term_width, term_height = get_terminal_size((80, 30))
cat = list(yield_pads(self.cat, term_width))
pivot = max((len(l.encode('unicode-escape')) for l in self.cat))
logo_offset = self.logo_offset
logo_width = max((len(str(l)) for l in self.logo))
logo = list(yield_pads(self.logo, logo_width - 1))
logo_height = len(logo)
marker = self.marker
data_start_line = [i - 1 for i, v in enumerate(cat) if v.strip().endswith(marker)]
if not data_start_line:
data_start_line = [logo_height + 1]
data_start_line = data_start_line[0]
if logo_height > data_start_line:
data_start_line = logo_height + 1
title = [self.title]
title_start_line, title_start_column = self.title_location
data = self.formatter(self.template.view, width=(term_width - pivot))
if self.colors:
cat, logo, title, data = self.haz_colorz(cat, logo, title, data)
rfill_lines(logo, offset=logo_offset)
rfill_lines(title, start=title_start_line, column=title_start_column)
rfill_lines(data.splitlines(), start=data_start_line, offset=self.template.offset)
return "\n".join((l.rstrip() for l in cat))
def haz_colorz(self, cat, logo, title, data):
from catsass.colorz import kitty_colorz
color_stuffs = kitty_colorz()
if color_stuffs is None:
return cat, logo, title, data
def color_lines(lines, color_mapping, words=False):
if any([len(k) > 1 for k in color_mapping.keys()]):
words = True
search_lines = [groupby(line.split()) for line in lines]
else:
search_lines = [groupby(line) for line in lines]
rv = []
for groups in search_lines:
line = []
for item, group in groups:
color = color_mapping.get(item)
if color is None:
line.append("".join(group))
else:
line.append(color("".join(group)).color_str)
if words:
rv.append(" ".join(line))
else:
rv.append("".join(line))
return rv
highlight = color_stuffs.get('highlight')
# Customz
cat_colorz = color_stuffs.get(self.coat) or {}
logo_colorz = color_stuffs.get(self.logo_colorz) or | BadCat | identifier_name |
catsass.py | _ can do here is save the sys.ENVIRONMENT
by reducing print-ed waste. Mew.
returns: probably what you want
"""
return __cat_whisperer()[Cat.ASS]
# === comb() ===
def comb(cat, *brush):
"""
Filter the results of poking the cat. Takes variable
names as strings for the args.
cat:
the_cats_ass() or similar
brush:
the variables you wish to see (as strings)
returns: hairballs
"""
return PrettyKitty(cat.ctx, {k: v for k, v in cat.values.items()
if k in brush})
# === avoid() ===
def avoid(cat, *undesirables):
"""
Omit any undesirable variables from the result.
cat:
the_cats_ass() or similar
undesirables:
variables you wish to have omitted (as strings)
returns: from whence it came
"""
return PrettyKitty(cat.ctx, {k: v for k, v in cat.values.items()
if k not in undesirables})
# === poke_the_cat() ===
def poke_the_cat(where, catnip=False):
"""
You really shouldn't be poking cats. But if you insist,
it is recommended to bring catnip as it's not unusual for
cats to attack dicks who poke them.
where:
I leave this as an exercise for the reader. But
a word of wisdom from my 1st grade teacher: never do
anything that you wouldn't want to be caught dead doing.
Sadly he went to jail not long after whispering those
words in my ear.
catnip:
catnip can grow in the wild in many places around
the world. If no catnip can readily be found in yours
or any of your neighbors yards then just pass True as the
argument.
returns: possibly what you want.
"""
if not catnip:
from random import randint
class BadCat(InterruptedError):
pass
if randint(1, 10) == 7:
mew = "You attempt to poke the cat but it attacks. " \
"Maybe if you gave it some catnip?"
raise BadCat(mew)
return __cat_whisperer()[where]
# === schrodingers_cat() ===
def schrodingers_cat(peek=False):
"""
Peek in the box for a 50/50 shot of retrieving your
desired output, while the other half of the time the
cat is dead and the function returns nothing at all.
If you decide not to peek, the cat -being neither
dead nor alive- responds with random nonsense.
peek:
whether to peek in the box
returns: depends
"""
from random import choice, randint
if peek:
if randint(1, 10) % 2 == 0:
# RIP
return "Nothing at all"
else:
return poke_the_cat(Cat.LEGS, catnip=True)
else:
garbled_cries = "mew meow wokka beocat ekkie".split()
return choice(garbled_cries)
def calico_kitty():
"""I can haz colorz?"""
return __cat_whisperer(colors=True, coat='calico_colorz', logo_colorz='logo_colorz')[Cat.ASS]
def tuxedo_cat():
"""Finer than a pheasant"""
return __cat_whisperer(colors=True, coat='tuxedo_colorz', logo_colorz='dark_logo_colorz')[Cat.ASS]
__LIBDIR__ = os.path.abspath(os.path.dirname(__file__))
# === PrettyKitty() ===
class PrettyKitty:
"""I can has repr?"""
def __init__(self, ctx, values, cat=None, logo=None,
marker='|/', logo_offset=6, template=None,
formatter=pformat, colors=False, coat=None,
logo_colorz=None, term_bg="dark", title=None,
title_start=(6, 45)):
# The callers name usually.
self.ctx = ctx
# The local variables data.
self.values = values
# The line where the talk bubble should
# start must end with the marker, or the
# data will be printed just below the logo,
# which may not be what you want.
self.marker = marker
# Other formatters can be swapped in, JSON
# -for example- should work, or yaml.
self.formatter = formatter
# Allows the logo to be offset to either side;
# positive numbers move it left and negative
# numbers move it right.
self.logo_offset = logo_offset
self.term_bg = term_bg
if title is None:
title = "Meowed with love by Duroktar, 2017"
self.title = title
self.title_location = title_start
# Colors ~*%~
self.colors = colors
self.coat = coat
self.logo_colorz = logo_colorz
# TODO Should be public
Template = namedtuple("Template", "view offset")
if template is None:
# Option 1
template = Template({
"Name": self.ctx,
"Vars": self.values}, 1)
# Option 2
template = Template({self.ctx: self.values}, 1)
self.template = template
if cat is None:
cat = open(os.path.join(__LIBDIR__, 'octocat'), 'r').readlines()
self.cat = cat
if logo is None:
logo = open(os.path.join(__LIBDIR__, 'logo'), 'r').readlines()
self.logo = logo
def __repr__(self):
return self.haz_format() + "\n\n"
def haz_format(self):
from shutil import get_terminal_size
def yield_pads(lines, l):
for line in lines:
line = line.rstrip("\n").rstrip()
length = len(line)
if length < l:
yield line + " " * (l - length - 1)
else:
yield line
def rfill_lines(filler, start=0, offset=0, column=None):
height = len(filler)
for i in range(height):
index = start + i
try:
line = cat[index]
except IndexError:
cat.append(f"{' ' * pivot}{filler[i]}")
else:
if column is not None:
new_l = f"{line[:-(term_width - column) - offset]}{filler[i]}"
else:
new_l = f"{line[:-(term_width - pivot) - offset]}{filler[i]}"
cat[index] = new_l
term_width, term_height = get_terminal_size((80, 30))
cat = list(yield_pads(self.cat, term_width))
pivot = max((len(l.encode('unicode-escape')) for l in self.cat))
logo_offset = self.logo_offset
logo_width = max((len(str(l)) for l in self.logo))
logo = list(yield_pads(self.logo, logo_width - 1))
logo_height = len(logo)
marker = self.marker
data_start_line = [i - 1 for i, v in enumerate(cat) if v.strip().endswith(marker)]
if not data_start_line:
data_start_line = [logo_height + 1]
data_start_line = data_start_line[0]
if logo_height > data_start_line:
data_start_line = logo_height + 1
title = [self.title]
title_start_line, title_start_column = self.title_location
data = self.formatter(self.template.view, width=(term_width - pivot))
if self.colors:
cat, logo, title, data = self.haz_colorz(cat, logo, title, data)
rfill_lines(logo, offset=logo_offset)
rfill_lines(title, start=title_start_line, column=title_start_column)
rfill_lines(data.splitlines(), start=data_start_line, offset=self.template.offset)
return "\n".join((l.rstrip() for l in cat))
def haz_colorz(self, cat, logo, title, data):
from catsass.colorz import kitty_colorz
color_stuffs = kitty_colorz()
if color_stuffs is None:
return cat, logo, title, data
def color_lines(lines, color_mapping, words=False):
if any([len(k) > 1 for k in color_mapping.keys()]):
words = True
search_lines = [groupby(line.split()) for line in lines]
else:
search_lines = [groupby(line) for line in lines]
rv = []
for groups in search_lines:
line = []
for item, group in groups:
color = color_mapping.get(item)
if color is None:
line.append("".join(group))
else:
line.append(color("".join(group)).color_str)
if words:
rv.append(" ".join(line))
else:
|
return rv
highlight = color_stuffs.get('highlight')
# Customz
cat_colorz = color_stuffs.get(self.coat) or {}
logo_colorz = color_stuffs.get(self.logo_colorz) or | rv.append("".join(line)) | conditional_block |
Modules.py |
def updateScript(dbconnection):
"""
scans all rss feeds for new
"""
cursor = dbconnection.cursor()
cursor.execute("select rss, name, source from podcasts;")
rssArray = cursor.fetchall()
for rss in rssArray:
print("chekcing name " + str(rss[1]))
url = str(rss[0])
name = str(rss[1])
source = str(rss[2])
rssArray = DatabaseInteract.rssCheck(name, source, url)
for item in rssArray:
if(DatabaseInteract.checkIfExists(dbconnection, item[0]) == False):
DatabaseInteract.insertClip(dbconnection, item[2], name, item[3], item[1], item[0])
def resetScript(dbConnection, maxConcurrent):
"""
Waits for the running transcription processes to end (2 min intervals). \n
Then deletes everything in the 'podcasts' folder, parses all transcripts, and updates the
databases
"""
while (Tools.numRunningProcesses() != 0): # wait for the transcriptions to end. Pings every 2 mins
time.sleep(120)
emptyPodcastFolder = Tools.cleanupFolder("podcasts")
DatabaseInteract.refreshDatabase(dbConnection)
def parseUpload(dbconnection, fileName):
"""
Requires dbconnection and the filename (location) of the file being parsed
"""
nhContent = ParseText.nohupTranscriptionContent(fileName)
count = 0
while count < len(nhContent[0]):
try:
rtf = nhContent[0][count]
transcription = nhContent[1][count].replace("'", "''").replace("_", "")
dbID = nhContent[2][count].replace(".", "")
duration = nhContent[3][count]
DatabaseInteract.insertTranscription(dbconnection, rtf, transcription, duration, dbID)
count += 1
except:
print("couldnt upload one at index " + str(count))
count += 1
class ParseText:
"""
This class handles parsing of two entities:
\n\tText files containing one instance of a transcribed podcast or...
\n\tnohup files containing multiple instances of a transcribed podcast
"""
def nohupTranscriptionContent(filePath):
"""
This parses the content of nohup. The size of nohup is basically unlimited but
each line has to be under 300000 characters(?). This then returns the following...\n\n
index 0 -- a list of all the occurences of realTimeFactor\n
index 1 -- a list of all the occurences of transcriptions\n
index 2 -- a list of all the occurences of the transcription ID\n
index 3 -- a list of all the occurences of the total transcription time.\n\n
\n\n
\-\---Example usage----\n
parsedContent = nohupTranscriptionContent("ok.txt")
for i in range(len(parsedContent[0])):
print(parsedContent[0][i]) # realtimefactor
print(parsedContent[1][i]) # transcription
print(parsedContent[2][i]) # transcription ID
print(parsedContent[3][i]) # transcription time
"""
try:
continu = True
fileContent = ""
f = open(filePath, 'r')
while (continu):
temp = f.readline(900000)
if(len(temp) == 0):
continu = False
else:
fileContent += temp
results = []
realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent)
results.append(realTimeFactor)
transcription = re.findall(r'utterance-id(.*?) (.*?)\n', fileContent)
transcriptionList = []
transcriptionIDList = []
for item in transcription:
if(len(item[1]) > 1000):
transcriptionIDList.append(item[0])
transcriptionList.append(item[1])
results.append(transcriptionList)
results.append(transcriptionIDList)
transcriptionTime = re.findall(r'seconds / (.*?) seconds\.', fileContent)
results.append(transcriptionTime)
return results
except Exception as e:
Tools.writeException("nohupTranscriptionContent", e)
return False
def fileTranscriptionContent(filePath):
"""
This parses the content of the transcription file. The size of the file can basically be unlimited
but each line has to be under 300000 characters(?). This then returns the following...\n\n
index 0 -- url\n
index 1 -- realTimeFactor\n
index 2 -- transcription\n
"""
try:
continu = True
f = open(filePath, 'r')
fileContent = ""
while (continu):
temp = f.readline(300000)
if(len(temp) == 0):
continu = False
else:
fileContent += temp
results = []
f.close()
url = re.findall(r'URL:(.*?)\n', fileContent)
results.append(url)
realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent)
results.append(realTimeFactor)
transcription = re.findall(r'utterance-id1 (.*?)\n', fileContent)
for item in transcription:
if(len(item) > 500):
results.append(item.replace("'", "''"))
if((len(results[0]) > 0) and (len(results[1]) > 0) and (len(results[2]) > 0)):
return results
else:
Tools.writeException("fileTranscriptionContent", "ERROR attempted to parse " + filePath + " but got " + str(results))
return False
except Exception as e:
Tools.writeException("fileTranscriptionContent", e)
class Tools:
"""
Random functions
"""
def cleanupFolder(folderName):
"""
deletes all contents of the specified folder (but not the folder itself).\n
returns true if successful. False if an error was thrown or the number of running
processes is not = 0
"""
try:
if(Tools.numRunningProcesses() == 0):
process = subprocess.call('rm -r ./' + folderName + '/*', shell=True)
return True
else:
return False
except Exception as e:
Tools.writeException("cleanupFolder", e)
return False
def numRunningProcesses():
"""
gets the number of runnning transcription processes
"""
try:
proc = subprocess.run("ps -Af|grep -i \"online2-wav-nnet3-latgen-faster\"", stdout=subprocess.PIPE, shell=True)
np = (len(str(proc.stdout).split("\\n")) - 3)
if(np == None):
np = 0
return np
except Exception as e:
Tools.writeException("numRunningProcesses", e)
return -1
def writeException(className, exceptionString):
"""
Writes Exception given the string format of the class name and the 'e' in any
Exception as e premise
"""
errorFile = open("error.log", 'a')
errorFile.write("ERROR occured in " + className + " at " + str(datetime.now()) + " with the following message\n" + str(exceptionString) + "\n\n")
errorFile.close()
def getFirstFile(folderName):
"""
Returns with the filename of the first file in the given directory. Just provide the directory's name
with no leading './'
"""
listFiles = subprocess.run("ls ./" + folderName, shell=True, stdout=subprocess.PIPE)
fileName = re.search(r"b'(.*?)\\n", str(listFiles.stdout))[1]
if(len(fileName) > 0):
return fileName
else:
return False
def transcribeAll(service, url, fileName):
"""
Does everything you need to transcribe a podcast given the filename\n
Download | def runAutoCheck(dbConnection, maxConcurrent):
"""
runs an automatic check to see if any transcriptions need to be started or are already finished
and need to be reuploded\n\n
Needs dbConnection & an integer representing the max concurrent transcriptons that can be ran at a time\n\n
This is a function that you dont want to parse and upload files from the 'transcripts' folder into.
because you really dont know which files are in progress or not whatever. ill fix later .
"""
# checks if any shows are pending.
fileContent = DatabaseInteract.checkPre(dbConnection)
if(len(fileContent) > 0 and Tools.numRunningProcesses() < maxConcurrent):
cursor = dbConnection.cursor()
cursor.execute("UPDATE transcriptions SET pending = TRUE WHERE id = '" + str(fileContent[1]) + "';")
dbConnection.commit()
cursor.close()
url = fileContent[0]
indexID = str(fileContent[1]) # get the ID instead of the filename
service = str(fileContent[3])
# podcastName = fileContent[2]
Tools.transcribeAll(service, url, indexID) # download the mp3 will print when done | identifier_body |
|
Modules.py | , dbID)
count += 1
except:
print("couldnt upload one at index " + str(count))
count += 1
class ParseText:
"""
This class handles parsing of two entities:
\n\tText files containing one instance of a transcribed podcast or...
\n\tnohup files containing multiple instances of a transcribed podcast
"""
def nohupTranscriptionContent(filePath):
"""
This parses the content of nohup. The size of nohup is basically unlimited but
each line has to be under 300000 characters(?). This then returns the following...\n\n
index 0 -- a list of all the occurences of realTimeFactor\n
index 1 -- a list of all the occurences of transcriptions\n
index 2 -- a list of all the occurences of the transcription ID\n
index 3 -- a list of all the occurences of the total transcription time.\n\n
\n\n
\-\---Example usage----\n
parsedContent = nohupTranscriptionContent("ok.txt")
for i in range(len(parsedContent[0])):
print(parsedContent[0][i]) # realtimefactor
print(parsedContent[1][i]) # transcription
print(parsedContent[2][i]) # transcription ID
print(parsedContent[3][i]) # transcription time
"""
try:
continu = True
fileContent = ""
f = open(filePath, 'r')
while (continu):
temp = f.readline(900000)
if(len(temp) == 0):
continu = False
else:
fileContent += temp
results = []
realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent)
results.append(realTimeFactor)
transcription = re.findall(r'utterance-id(.*?) (.*?)\n', fileContent)
transcriptionList = []
transcriptionIDList = []
for item in transcription:
if(len(item[1]) > 1000):
transcriptionIDList.append(item[0])
transcriptionList.append(item[1])
results.append(transcriptionList)
results.append(transcriptionIDList)
transcriptionTime = re.findall(r'seconds / (.*?) seconds\.', fileContent)
results.append(transcriptionTime)
return results
except Exception as e:
Tools.writeException("nohupTranscriptionContent", e)
return False
def fileTranscriptionContent(filePath):
"""
This parses the content of the transcription file. The size of the file can basically be unlimited
but each line has to be under 300000 characters(?). This then returns the following...\n\n
index 0 -- url\n
index 1 -- realTimeFactor\n
index 2 -- transcription\n
"""
try:
continu = True
f = open(filePath, 'r')
fileContent = ""
while (continu):
temp = f.readline(300000)
if(len(temp) == 0):
continu = False
else:
fileContent += temp
results = []
f.close()
url = re.findall(r'URL:(.*?)\n', fileContent)
results.append(url)
realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent)
results.append(realTimeFactor)
transcription = re.findall(r'utterance-id1 (.*?)\n', fileContent)
for item in transcription:
if(len(item) > 500):
results.append(item.replace("'", "''"))
if((len(results[0]) > 0) and (len(results[1]) > 0) and (len(results[2]) > 0)):
return results
else:
Tools.writeException("fileTranscriptionContent", "ERROR attempted to parse " + filePath + " but got " + str(results))
return False
except Exception as e:
Tools.writeException("fileTranscriptionContent", e)
class Tools:
"""
Random functions
"""
def cleanupFolder(folderName):
"""
deletes all contents of the specified folder (but not the folder itself).\n
returns true if successful. False if an error was thrown or the number of running
processes is not = 0
"""
try:
if(Tools.numRunningProcesses() == 0):
process = subprocess.call('rm -r ./' + folderName + '/*', shell=True)
return True
else:
return False
except Exception as e:
Tools.writeException("cleanupFolder", e)
return False
def | ():
"""
gets the number of runnning transcription processes
"""
try:
proc = subprocess.run("ps -Af|grep -i \"online2-wav-nnet3-latgen-faster\"", stdout=subprocess.PIPE, shell=True)
np = (len(str(proc.stdout).split("\\n")) - 3)
if(np == None):
np = 0
return np
except Exception as e:
Tools.writeException("numRunningProcesses", e)
return -1
def writeException(className, exceptionString):
"""
Writes Exception given the string format of the class name and the 'e' in any
Exception as e premise
"""
errorFile = open("error.log", 'a')
errorFile.write("ERROR occured in " + className + " at " + str(datetime.now()) + " with the following message\n" + str(exceptionString) + "\n\n")
errorFile.close()
def getFirstFile(folderName):
"""
Returns with the filename of the first file in the given directory. Just provide the directory's name
with no leading './'
"""
listFiles = subprocess.run("ls ./" + folderName, shell=True, stdout=subprocess.PIPE)
fileName = re.search(r"b'(.*?)\\n", str(listFiles.stdout))[1]
if(len(fileName) > 0):
return fileName
else:
return False
def transcribeAll(service, url, fileName):
"""
Does everything you need to transcribe a podcast given the filename\n
Download podcast, wait 40 seconds, change podcast to .wav, wait 10 seconds,
remove the .mp3 file, run the transcription
"""
if(service == "omny.fm"):
url = url.replace(".mp3","") + ".mp3"
subprocess.Popen("wget -c -O ./podcasts/" + fileName + ".mp3 " + url + " && sleep 40 && ffmpeg -i ./podcasts/"
+ fileName + ".mp3 -acodec pcm_s16le -ac 1 -ar 8000 ./podcasts/" + fileName + ".wav && sleep 10 && rm ./podcasts/"
+ fileName + ".mp3 && nohup ./online2-wav-nnet3-latgen-faster --online=false --do-endpointing=false "
+ "--frame-subsampling-factor=3 --config=online.conf --max-mem=2000000000 --max-active=7000 --beam=15.0 --lattice-beam=6.0 "
+ "--acoustic-scale=1.0 --word-symbol-table=words.txt final.mdl HCLG.fst 'ark:echo utterance-id" + fileName
+ " utterance-id" + fileName + "|' 'scp:echo utterance-id" + fileName + " ./podcasts/" + fileName + ".wav|' 'ark:/dev/null' &", shell=True)
class DatabaseInteract:
"""
This is where the database is updated. Refer to the example clips/header for format information.\n\n
Seeding the database would include the usage of 'insertHeader' then 'insertClips'. Pretty much every
function in here will require a dbConnection argument
"""
def uploadPodcast(dbConnection, homepage, name, description, category, source, imageurl, web, twitter, facebook, rss):
"""
HomePage --> the homepage of the podcast (NOT NULL)\n
Name --> The name of the podcast (NOT NULL)\n
Description --> a short description of the podcast\n
Category --> The category of the podcast\n
Source --> The service of which the podcast is being accessed through\n
ImageURI --> Podcast cover art\n
Web --> The website of the podcaster\n
Twitter --> The twitter account of the podcaster\n
Facebook --> the facebook account of the podcaster\n
LastUpdated --> the date that this was last updated.\n
RSS --> The URL of the podcasts RSS feed\n
If you dont have values for a certain field just pass it in as an empty string
"""
try:
cursor = dbConnection.cursor()
name = name.replace("'", "''")
description = description.replace("'", "''")
cursor.execute("""INSERT INTO podcasts(homepage, name, description, category, source, imageuri, web, twitter, Facebook, rss) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);""", (homepage, name, description, category, source, imageurl, web, twitter, | numRunningProcesses | identifier_name |
Modules.py |
def updateScript(dbconnection):
"""
scans all rss feeds for new
"""
cursor = dbconnection.cursor()
cursor.execute("select rss, name, source from podcasts;")
rssArray = cursor.fetchall()
for rss in rssArray:
print("chekcing name " + str(rss[1]))
url = str(rss[0])
name = str(rss[1])
source = str(rss[2])
rssArray = DatabaseInteract.rssCheck(name, source, url)
for item in rssArray:
if(DatabaseInteract.checkIfExists(dbconnection, item[0]) == False):
DatabaseInteract.insertClip(dbconnection, item[2], name, item[3], item[1], item[0])
def resetScript(dbConnection, maxConcurrent):
"""
Waits for the running transcription processes to end (2 min intervals). \n
Then deletes everything in the 'podcasts' folder, parses all transcripts, and updates the
databases
"""
while (Tools.numRunningProcesses() != 0): # wait for the transcriptions to end. Pings every 2 mins
time.sleep(120)
emptyPodcastFolder = Tools.cleanupFolder("podcasts")
DatabaseInteract.refreshDatabase(dbConnection)
def parseUpload(dbconnection, fileName):
"""
Requires dbconnection and the filename (location) of the file being parsed
"""
nhContent = ParseText.nohupTranscriptionContent(fileName)
count = 0
while count < len(nhContent[0]):
try:
rtf = nhContent[0][count]
transcription = nhContent[1][count].replace("'", "''").replace("_", "")
dbID = nhContent[2][count].replace(".", "")
duration = nhContent[3][count]
DatabaseInteract.insertTranscription(dbconnection, rtf, transcription, duration, dbID)
count += 1
except:
print("couldnt upload one at index " + str(count))
count += 1
class ParseText:
"""
This class handles parsing of two entities:
\n\tText files containing one instance of a transcribed podcast or...
\n\tnohup files containing multiple instances of a transcribed podcast
"""
def nohupTranscriptionContent(filePath):
"""
This parses the content of nohup. The size of nohup is basically unlimited but
each line has to be under 300000 characters(?). This then returns the following...\n\n
index 0 -- a list of all the occurences of realTimeFactor\n
index 1 -- a list of all the occurences of transcriptions\n
index 2 -- a list of all the occurences of the transcription ID\n
index 3 -- a list of all the occurences of the total transcription time.\n\n
\n\n
\-\---Example usage----\n
parsedContent = nohupTranscriptionContent("ok.txt")
for i in range(len(parsedContent[0])):
print(parsedContent[0][i]) # realtimefactor
print(parsedContent[1][i]) # transcription
print(parsedContent[2][i]) # transcription ID
print(parsedContent[3][i]) # transcription time
"""
try:
continu = True
fileContent = ""
f = open(filePath, 'r')
while (continu):
temp = f.readline(900000)
if(len(temp) == 0):
continu = False
else:
fileContent += temp
results = []
realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent)
results.append(realTimeFactor)
transcription = re.findall(r'utterance-id(.*?) (.*?)\n', fileContent)
transcriptionList = []
transcriptionIDList = []
for item in transcription:
if(len(item[1]) > 1000):
transcriptionIDList.append(item[0])
transcriptionList.append(item[1])
results.append(transcriptionList)
results.append(transcriptionIDList)
transcriptionTime = re.findall(r'seconds / (.*?) seconds\.', fileContent)
results.append(transcriptionTime)
return results
except Exception as e:
Tools.writeException("nohupTranscriptionContent", e)
return False
def fileTranscriptionContent(filePath):
"""
This parses the content of the transcription file. The size of the file can basically be unlimited
but each line has to be under 300000 characters(?). This then returns the following...\n\n
index 0 -- url\n
index 1 -- realTimeFactor\n
index 2 -- transcription\n
"""
try:
continu = True
f = open(filePath, 'r')
fileContent = ""
while (continu):
temp = f.readline(300000)
if(len(temp) == 0):
continu = False
else:
fileContent += temp
results = []
f.close()
url = re.findall(r'URL:(.*?)\n', fileContent)
results.append(url)
realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent)
results.append(realTimeFactor)
transcription = re.findall(r'utterance-id1 (.*?)\n', fileContent)
for item in transcription:
if(len(item) > 500):
results.append(item.replace("'", "''"))
if((len(results[0]) > 0) and (len(results[1]) > 0) and (len(results[2]) > 0)):
return results
else:
Tools.writeException("fileTranscriptionContent", "ERROR attempted to parse " + filePath + " but got " + str(results))
return False
except Exception as e:
Tools.writeException("fileTranscriptionContent", e)
class Tools:
"""
Random functions
"""
def cleanupFolder(folderName):
"""
deletes all contents of the specified folder (but not the folder itself).\n
returns true if successful. False if an error was thrown or the number of running
processes is not = 0
"""
try:
if(Tools.numRunningProcesses() == 0):
process = subprocess.call('rm -r ./' + folderName + '/*', shell=True)
return True
else:
return False
except Exception as e:
Tools.writeException("cleanupFolder", e)
return False
def numRunningProcesses():
"""
gets the number of runnning transcription processes
"""
try:
proc = subprocess.run("ps -Af|grep -i \"online2-wav-nnet3-latgen-faster\"", stdout=subprocess.PIPE, shell=True)
np = (len(str(proc.stdout).split("\\n")) - 3)
if(np == None):
np = 0
return np
except Exception as e:
Tools.writeException("numRunningProcesses", e)
return -1
def writeException(className, exceptionString):
"""
Writes Exception given the string format of the class name and the 'e' in any
Exception as e premise
"""
errorFile = open("error.log", 'a')
errorFile.write("ERROR occured in " + className + " at " + str(datetime.now()) + " with the following message\n" + str(exceptionString) + "\n\n")
errorFile.close()
def getFirstFile(folderName):
"""
Returns with the filename of the first file in the given directory. Just provide the directory's name
with no leading './'
"""
listFiles = subprocess.run("ls ./" + folderName, shell=True, stdout=subprocess.PIPE)
fileName = re.search(r"b'(.*?)\\n", str(listFiles.stdout))[1]
if(len(fileName) > 0):
return fileName
else:
return False
def transcribeAll(service, url, fileName):
"""
Does everything you need to transcribe a podcast given the filename\n
Download podcast, wait 40 seconds, change podcast to .wav, wait 10 seconds,
remove the .mp3 file, run the transcription
"""
if(service == "omny.fm"):
url = url.replace(".mp3","") + ".mp3"
subprocess.Popen("wget -c -O ./podcasts/" + fileName + ".mp3 " + url + " && sleep 40 && ffmpeg -i ./podcasts/"
+ fileName + ".mp3 -acodec pcm_s16le -ac 1 -ar 8000 ./podcasts/" + fileName + ".wav && sleep 10 && rm ./podcasts/"
+ fileName + ".mp3 && noh | cursor = dbConnection.cursor()
cursor.execute("UPDATE transcriptions SET pending = TRUE WHERE id = '" + str(fileContent[1]) + "';")
dbConnection.commit()
cursor.close()
url = fileContent[0]
indexID = str(fileContent[1]) # get the ID instead of the filename
service = str(fileContent[3])
# podcastName = fileContent[2]
Tools.transcribeAll(service, url, indexID) # download the mp3 will print when done | conditional_block |
|
Modules.py | duration, dbID)
count += 1
except:
print("couldnt upload one at index " + str(count))
count += 1
class ParseText:
"""
This class handles parsing of two entities:
\n\tText files containing one instance of a transcribed podcast or...
\n\tnohup files containing multiple instances of a transcribed podcast
"""
def nohupTranscriptionContent(filePath):
"""
This parses the content of nohup. The size of nohup is basically unlimited but
each line has to be under 300000 characters(?). This then returns the following...\n\n
index 0 -- a list of all the occurences of realTimeFactor\n
index 1 -- a list of all the occurences of transcriptions\n
index 2 -- a list of all the occurences of the transcription ID\n
index 3 -- a list of all the occurences of the total transcription time.\n\n
\n\n
\-\---Example usage----\n
parsedContent = nohupTranscriptionContent("ok.txt")
for i in range(len(parsedContent[0])):
print(parsedContent[0][i]) # realtimefactor
print(parsedContent[1][i]) # transcription
print(parsedContent[2][i]) # transcription ID
print(parsedContent[3][i]) # transcription time
"""
try:
continu = True
fileContent = ""
f = open(filePath, 'r')
while (continu):
temp = f.readline(900000)
if(len(temp) == 0):
continu = False
else:
fileContent += temp | results = []
realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent)
results.append(realTimeFactor)
transcription = re.findall(r'utterance-id(.*?) (.*?)\n', fileContent)
transcriptionList = []
transcriptionIDList = []
for item in transcription:
if(len(item[1]) > 1000):
transcriptionIDList.append(item[0])
transcriptionList.append(item[1])
results.append(transcriptionList)
results.append(transcriptionIDList)
transcriptionTime = re.findall(r'seconds / (.*?) seconds\.', fileContent)
results.append(transcriptionTime)
return results
except Exception as e:
Tools.writeException("nohupTranscriptionContent", e)
return False
def fileTranscriptionContent(filePath):
"""
This parses the content of the transcription file. The size of the file can basically be unlimited
but each line has to be under 300000 characters(?). This then returns the following...\n\n
index 0 -- url\n
index 1 -- realTimeFactor\n
index 2 -- transcription\n
"""
try:
continu = True
f = open(filePath, 'r')
fileContent = ""
while (continu):
temp = f.readline(300000)
if(len(temp) == 0):
continu = False
else:
fileContent += temp
results = []
f.close()
url = re.findall(r'URL:(.*?)\n', fileContent)
results.append(url)
realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent)
results.append(realTimeFactor)
transcription = re.findall(r'utterance-id1 (.*?)\n', fileContent)
for item in transcription:
if(len(item) > 500):
results.append(item.replace("'", "''"))
if((len(results[0]) > 0) and (len(results[1]) > 0) and (len(results[2]) > 0)):
return results
else:
Tools.writeException("fileTranscriptionContent", "ERROR attempted to parse " + filePath + " but got " + str(results))
return False
except Exception as e:
Tools.writeException("fileTranscriptionContent", e)
class Tools:
"""
Random functions
"""
def cleanupFolder(folderName):
"""
deletes all contents of the specified folder (but not the folder itself).\n
returns true if successful. False if an error was thrown or the number of running
processes is not = 0
"""
try:
if(Tools.numRunningProcesses() == 0):
process = subprocess.call('rm -r ./' + folderName + '/*', shell=True)
return True
else:
return False
except Exception as e:
Tools.writeException("cleanupFolder", e)
return False
def numRunningProcesses():
"""
gets the number of runnning transcription processes
"""
try:
proc = subprocess.run("ps -Af|grep -i \"online2-wav-nnet3-latgen-faster\"", stdout=subprocess.PIPE, shell=True)
np = (len(str(proc.stdout).split("\\n")) - 3)
if(np == None):
np = 0
return np
except Exception as e:
Tools.writeException("numRunningProcesses", e)
return -1
def writeException(className, exceptionString):
"""
Writes Exception given the string format of the class name and the 'e' in any
Exception as e premise
"""
errorFile = open("error.log", 'a')
errorFile.write("ERROR occured in " + className + " at " + str(datetime.now()) + " with the following message\n" + str(exceptionString) + "\n\n")
errorFile.close()
def getFirstFile(folderName):
"""
Returns with the filename of the first file in the given directory. Just provide the directory's name
with no leading './'
"""
listFiles = subprocess.run("ls ./" + folderName, shell=True, stdout=subprocess.PIPE)
fileName = re.search(r"b'(.*?)\\n", str(listFiles.stdout))[1]
if(len(fileName) > 0):
return fileName
else:
return False
def transcribeAll(service, url, fileName):
"""
Does everything you need to transcribe a podcast given the filename\n
Download podcast, wait 40 seconds, change podcast to .wav, wait 10 seconds,
remove the .mp3 file, run the transcription
"""
if(service == "omny.fm"):
url = url.replace(".mp3","") + ".mp3"
subprocess.Popen("wget -c -O ./podcasts/" + fileName + ".mp3 " + url + " && sleep 40 && ffmpeg -i ./podcasts/"
+ fileName + ".mp3 -acodec pcm_s16le -ac 1 -ar 8000 ./podcasts/" + fileName + ".wav && sleep 10 && rm ./podcasts/"
+ fileName + ".mp3 && nohup ./online2-wav-nnet3-latgen-faster --online=false --do-endpointing=false "
+ "--frame-subsampling-factor=3 --config=online.conf --max-mem=2000000000 --max-active=7000 --beam=15.0 --lattice-beam=6.0 "
+ "--acoustic-scale=1.0 --word-symbol-table=words.txt final.mdl HCLG.fst 'ark:echo utterance-id" + fileName
+ " utterance-id" + fileName + "|' 'scp:echo utterance-id" + fileName + " ./podcasts/" + fileName + ".wav|' 'ark:/dev/null' &", shell=True)
class DatabaseInteract:
"""
This is where the database is updated. Refer to the example clips/header for format information.\n\n
Seeding the database would include the usage of 'insertHeader' then 'insertClips'. Pretty much every
function in here will require a dbConnection argument
"""
def uploadPodcast(dbConnection, homepage, name, description, category, source, imageurl, web, twitter, facebook, rss):
"""
HomePage --> the homepage of the podcast (NOT NULL)\n
Name --> The name of the podcast (NOT NULL)\n
Description --> a short description of the podcast\n
Category --> The category of the podcast\n
Source --> The service of which the podcast is being accessed through\n
ImageURI --> Podcast cover art\n
Web --> The website of the podcaster\n
Twitter --> The twitter account of the podcaster\n
Facebook --> the facebook account of the podcaster\n
LastUpdated --> the date that this was last updated.\n
RSS --> The URL of the podcasts RSS feed\n
If you dont have values for a certain field just pass it in as an empty string
"""
try:
cursor = dbConnection.cursor()
name = name.replace("'", "''")
description = description.replace("'", "''")
cursor.execute("""INSERT INTO podcasts(homepage, name, description, category, source, imageuri, web, twitter, Facebook, rss) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);""", (homepage, name, description, category, source, imageurl, web, twitter, facebook | random_line_split |
|
preprocess.py | ",
"i'll": "i will",
"i'll've": "i will have",
"i'm": "i am",
"i've": "i have",
"isn't": "is not",
"it'd": "it had",
"it'd've": "it would have",
"it'll": "it will",
"it'll've": "it will have",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"mightn't've": "might not have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"she'd": "she would",
"she'd've": "she would have",
"she'll": "she will",
"she'll've": "she will have",
"she's": "she is",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"so've": "so have",
"so's": "so as ",
"that'd": "that would",
"that'd've": "that would have",
"that's": "that is",
"there'd": "there would",
"there'd've": "there would have",
"there's": "there is",
"they'd": "they would",
"they'd've": "they would have",
"they'll": "they will",
"they'll've": "they will have",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
"we'd": "we would",
"we'd've": "we would have",
"we'll": "we will",
"we'll've": "we will have",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what'll've": "what will have",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"when's": "when is",
"when've": "when have",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'll": "who will",
"who'll've": "who will have",
"who's": "who is",
"who've": "who have",
"why's": "why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"y'all'd": "you all would",
"y'all'd've": "you all would have",
"y'all're": "you all are",
"y'all've": "you all have",
"you'd": "you had",
"you'd've": "you would have",
"you'll": "you will",
"you'll've": "you will have",
"you're": "you are",
"you've": "you have"}
words = text.split()
final_string = ""
try:
for word in words:
word = word.lower()
if hasNumbers(word) == False:
if word in contractions:
# print('Word: ' + word)
# print('Replacement: ' + contractions[word])
final_string += contractions[word]
final_string += ' '
flag = True
else:
final_string += word
final_string += ' '
flag = False
if(flag):
final_string = final_string[:-1]
except Exception as e:
print("type error: " + str(e))
exit()
return final_string
def removePunctuationFromList(all_words):
all_words = [''.join(c for c in s if c not in string.punctuation)
for s in all_words]
# Remove the empty strings:
all_words = [s for s in all_words if s]
return all_words
def cleanText(text):
"""Clean up the text."""
try:
text = str(text)
# remove contactions and stop words
text = contractions(text)
# remove html entities
cleanr = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')
new_text = cleanr.sub('', text.strip())
return re.sub(r'\s+', ' ', re.sub(r'\W+', " ", new_text))
# TAG_RE = re.compile(r'<[^>]+>')
except:
print("An exception occurred with: " + text)
return str(text)
def getAllWords(lines, stop_words):
all_words = {}
try:
for line in lines:
words = line.split()
for word in words:
if word not in stop_words:
all_words[word] = True
temp = all_words.keys()
# removePunctuationFromList(temp)
top_words = FreqDist(temp)
print("All Words list length : ", len(top_words))
# print(str(list(all_words1.keys())[:100]))
# use top 20000 words
return list(top_words.keys())[:20000]
# word_features = list(all_words.keys())[:6000]
# featuresets = [(find_features(rev, word_features), category)
# for (rev, category) in documents]
# print("Feature sets list length : ", len(featuresets))
except Exception as e:
print("type error: " + str(e))
exit()
def removeWordsNotIn(text, stop_words):
words = text.split()
final_string = ""
flag = False
try:
for word in words:
word = word.lower()
if word not in stop_words:
final_string += word
final_string += ' '
flag = True
else:
flag = False
if(flag): | return final_string
def shortenText(text, all_words):
# print('shortenText')
count = 0
final_string = ""
try:
words = text.split()
for word in words:
word = word.lower()
if len(word) > 7:
if word in all_words:
count += 1
if(count == MAX_WORDS-1):
# if we hit max number of token, stop parsing string
return final_string[:-1]
else:
final_string += word
final_string += ' '
final_string = final_string[:-1]
except Exception as e:
print("Error")
# exit()
print("type error: " + str(e))
return final_string
def addWordsIn(text, all_words):
""" Also does truncation """
count = 0
final_string = ""
try:
words = text.split()
for word in words:
word = word.lower()
if word in all_words:
count += 1
if(count == MAX_WORDS-1):
return shortenText(text, all_words)
else:
final_string += word
final_string += ' '
final_string = final_string[:-1]
except Exception as e:
print("Error")
# exit()
print("type error: " + str(e))
return final_string
def read_data(filepath):
"""Read the CSV from disk."""
df = pd.read_csv(filepath, delimiter=',')
stop_words = ["will", "done", "goes","let", "know", "just", "put" "also",
"got", "can", "get" "said", "mr", "mrs", "one", "two", "three",
"four", "five", "i", "me", "my", "myself", "we", "our",
"ours","ourselves","you","youre","your","yours","yourself","yourselves","he","him","his","himself","she","her","hers","herself","it","its","itself","they","them","their","theirs","themselves","what","which","who","whom","this","that","these | final_string = final_string[:-1]
except Exception as e:
# print("type error: " + str(e))
print("type error")
exit() | random_line_split |
preprocess.py | ",
"i'll": "i will",
"i'll've": "i will have",
"i'm": "i am",
"i've": "i have",
"isn't": "is not",
"it'd": "it had",
"it'd've": "it would have",
"it'll": "it will",
"it'll've": "it will have",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"mightn't've": "might not have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"she'd": "she would",
"she'd've": "she would have",
"she'll": "she will",
"she'll've": "she will have",
"she's": "she is",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"so've": "so have",
"so's": "so as ",
"that'd": "that would",
"that'd've": "that would have",
"that's": "that is",
"there'd": "there would",
"there'd've": "there would have",
"there's": "there is",
"they'd": "they would",
"they'd've": "they would have",
"they'll": "they will",
"they'll've": "they will have",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
"we'd": "we would",
"we'd've": "we would have",
"we'll": "we will",
"we'll've": "we will have",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what'll've": "what will have",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"when's": "when is",
"when've": "when have",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'll": "who will",
"who'll've": "who will have",
"who's": "who is",
"who've": "who have",
"why's": "why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"y'all'd": "you all would",
"y'all'd've": "you all would have",
"y'all're": "you all are",
"y'all've": "you all have",
"you'd": "you had",
"you'd've": "you would have",
"you'll": "you will",
"you'll've": "you will have",
"you're": "you are",
"you've": "you have"}
words = text.split()
final_string = ""
try:
for word in words:
word = word.lower()
if hasNumbers(word) == False:
if word in contractions:
# print('Word: ' + word)
# print('Replacement: ' + contractions[word])
final_string += contractions[word]
final_string += ' '
flag = True
else:
final_string += word
final_string += ' '
flag = False
if(flag):
final_string = final_string[:-1]
except Exception as e:
print("type error: " + str(e))
exit()
return final_string
def removePunctuationFromList(all_words):
all_words = [''.join(c for c in s if c not in string.punctuation)
for s in all_words]
# Remove the empty strings:
all_words = [s for s in all_words if s]
return all_words
def cleanText(text):
"""Clean up the text."""
try:
text = str(text)
# remove contactions and stop words
text = contractions(text)
# remove html entities
cleanr = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')
new_text = cleanr.sub('', text.strip())
return re.sub(r'\s+', ' ', re.sub(r'\W+', " ", new_text))
# TAG_RE = re.compile(r'<[^>]+>')
except:
print("An exception occurred with: " + text)
return str(text)
def getAllWords(lines, stop_words):
all_words = {}
try:
for line in lines:
words = line.split()
for word in words:
if word not in stop_words:
all_words[word] = True
temp = all_words.keys()
# removePunctuationFromList(temp)
top_words = FreqDist(temp)
print("All Words list length : ", len(top_words))
# print(str(list(all_words1.keys())[:100]))
# use top 20000 words
return list(top_words.keys())[:20000]
# word_features = list(all_words.keys())[:6000]
# featuresets = [(find_features(rev, word_features), category)
# for (rev, category) in documents]
# print("Feature sets list length : ", len(featuresets))
except Exception as e:
print("type error: " + str(e))
exit()
def removeWordsNotIn(text, stop_words):
words = text.split()
final_string = ""
flag = False
try:
for word in words:
word = word.lower()
if word not in stop_words:
final_string += word
final_string += ' '
flag = True
else:
flag = False
if(flag):
final_string = final_string[:-1]
except Exception as e:
# print("type error: " + str(e))
print("type error")
exit()
return final_string
def shortenText(text, all_words):
# print('shortenText')
count = 0
final_string = ""
try:
words = text.split()
for word in words:
word = word.lower()
if len(word) > 7:
if word in all_words:
count += 1
if(count == MAX_WORDS-1):
# if we hit max number of token, stop parsing string
return final_string[:-1]
else:
final_string += word
final_string += ' '
final_string = final_string[:-1]
except Exception as e:
print("Error")
# exit()
print("type error: " + str(e))
return final_string
def addWordsIn(text, all_words):
""" Also does truncation """
count = 0
final_string = ""
try:
words = text.split()
for word in words:
word = word.lower()
if word in all_words:
count += 1
if(count == MAX_WORDS-1):
return shortenText(text, all_words)
else:
final_string += word
final_string += ' '
final_string = final_string[:-1]
except Exception as e:
print("Error")
# exit()
print("type error: " + str(e))
return final_string
def | (filepath):
"""Read the CSV from disk."""
df = pd.read_csv(filepath, delimiter=',')
stop_words = ["will", "done", "goes","let", "know", "just", "put" "also",
"got", "can", "get" "said", "mr", "mrs", "one", "two", "three",
"four", "five", "i", "me", "my", "myself", "we", "our",
"ours","ourselves","you","youre","your","yours","yourself","yourselves","he","him","his","himself","she","her","hers","herself","it","its","itself","they","them","their","theirs","themselves","what","which","who","whom","this","that"," | read_data | identifier_name |
preprocess.py | ",
"i'll": "i will",
"i'll've": "i will have",
"i'm": "i am",
"i've": "i have",
"isn't": "is not",
"it'd": "it had",
"it'd've": "it would have",
"it'll": "it will",
"it'll've": "it will have",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"mightn't've": "might not have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"she'd": "she would",
"she'd've": "she would have",
"she'll": "she will",
"she'll've": "she will have",
"she's": "she is",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"so've": "so have",
"so's": "so as ",
"that'd": "that would",
"that'd've": "that would have",
"that's": "that is",
"there'd": "there would",
"there'd've": "there would have",
"there's": "there is",
"they'd": "they would",
"they'd've": "they would have",
"they'll": "they will",
"they'll've": "they will have",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
"we'd": "we would",
"we'd've": "we would have",
"we'll": "we will",
"we'll've": "we will have",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what'll've": "what will have",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"when's": "when is",
"when've": "when have",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'll": "who will",
"who'll've": "who will have",
"who's": "who is",
"who've": "who have",
"why's": "why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"y'all'd": "you all would",
"y'all'd've": "you all would have",
"y'all're": "you all are",
"y'all've": "you all have",
"you'd": "you had",
"you'd've": "you would have",
"you'll": "you will",
"you'll've": "you will have",
"you're": "you are",
"you've": "you have"}
words = text.split()
final_string = ""
try:
for word in words:
word = word.lower()
if hasNumbers(word) == False:
if word in contractions:
# print('Word: ' + word)
# print('Replacement: ' + contractions[word])
final_string += contractions[word]
final_string += ' '
flag = True
else:
|
if(flag):
final_string = final_string[:-1]
except Exception as e:
print("type error: " + str(e))
exit()
return final_string
def removePunctuationFromList(all_words):
all_words = [''.join(c for c in s if c not in string.punctuation)
for s in all_words]
# Remove the empty strings:
all_words = [s for s in all_words if s]
return all_words
def cleanText(text):
"""Clean up the text."""
try:
text = str(text)
# remove contactions and stop words
text = contractions(text)
# remove html entities
cleanr = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')
new_text = cleanr.sub('', text.strip())
return re.sub(r'\s+', ' ', re.sub(r'\W+', " ", new_text))
# TAG_RE = re.compile(r'<[^>]+>')
except:
print("An exception occurred with: " + text)
return str(text)
def getAllWords(lines, stop_words):
all_words = {}
try:
for line in lines:
words = line.split()
for word in words:
if word not in stop_words:
all_words[word] = True
temp = all_words.keys()
# removePunctuationFromList(temp)
top_words = FreqDist(temp)
print("All Words list length : ", len(top_words))
# print(str(list(all_words1.keys())[:100]))
# use top 20000 words
return list(top_words.keys())[:20000]
# word_features = list(all_words.keys())[:6000]
# featuresets = [(find_features(rev, word_features), category)
# for (rev, category) in documents]
# print("Feature sets list length : ", len(featuresets))
except Exception as e:
print("type error: " + str(e))
exit()
def removeWordsNotIn(text, stop_words):
words = text.split()
final_string = ""
flag = False
try:
for word in words:
word = word.lower()
if word not in stop_words:
final_string += word
final_string += ' '
flag = True
else:
flag = False
if(flag):
final_string = final_string[:-1]
except Exception as e:
# print("type error: " + str(e))
print("type error")
exit()
return final_string
def shortenText(text, all_words):
# print('shortenText')
count = 0
final_string = ""
try:
words = text.split()
for word in words:
word = word.lower()
if len(word) > 7:
if word in all_words:
count += 1
if(count == MAX_WORDS-1):
# if we hit max number of token, stop parsing string
return final_string[:-1]
else:
final_string += word
final_string += ' '
final_string = final_string[:-1]
except Exception as e:
print("Error")
# exit()
print("type error: " + str(e))
return final_string
def addWordsIn(text, all_words):
""" Also does truncation """
count = 0
final_string = ""
try:
words = text.split()
for word in words:
word = word.lower()
if word in all_words:
count += 1
if(count == MAX_WORDS-1):
return shortenText(text, all_words)
else:
final_string += word
final_string += ' '
final_string = final_string[:-1]
except Exception as e:
print("Error")
# exit()
print("type error: " + str(e))
return final_string
def read_data(filepath):
"""Read the CSV from disk."""
df = pd.read_csv(filepath, delimiter=',')
stop_words = ["will", "done", "goes","let", "know", "just", "put" "also",
"got", "can", "get" "said", "mr", "mrs", "one", "two", "three",
"four", "five", "i", "me", "my", "myself", "we", "our",
"ours","ourselves","you","youre","your","yours","yourself","yourselves","he","him","his","himself","she","her","hers","herself","it","its","itself","they","them","their","theirs","themselves","what","which","who","whom","this","that","these | final_string += word
final_string += ' '
flag = False | conditional_block |
preprocess.py | ",
"i'll": "i will",
"i'll've": "i will have",
"i'm": "i am",
"i've": "i have",
"isn't": "is not",
"it'd": "it had",
"it'd've": "it would have",
"it'll": "it will",
"it'll've": "it will have",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"mightn't've": "might not have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"she'd": "she would",
"she'd've": "she would have",
"she'll": "she will",
"she'll've": "she will have",
"she's": "she is",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"so've": "so have",
"so's": "so as ",
"that'd": "that would",
"that'd've": "that would have",
"that's": "that is",
"there'd": "there would",
"there'd've": "there would have",
"there's": "there is",
"they'd": "they would",
"they'd've": "they would have",
"they'll": "they will",
"they'll've": "they will have",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
"we'd": "we would",
"we'd've": "we would have",
"we'll": "we will",
"we'll've": "we will have",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what'll've": "what will have",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"when's": "when is",
"when've": "when have",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'll": "who will",
"who'll've": "who will have",
"who's": "who is",
"who've": "who have",
"why's": "why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"y'all'd": "you all would",
"y'all'd've": "you all would have",
"y'all're": "you all are",
"y'all've": "you all have",
"you'd": "you had",
"you'd've": "you would have",
"you'll": "you will",
"you'll've": "you will have",
"you're": "you are",
"you've": "you have"}
words = text.split()
final_string = ""
try:
for word in words:
word = word.lower()
if hasNumbers(word) == False:
if word in contractions:
# print('Word: ' + word)
# print('Replacement: ' + contractions[word])
final_string += contractions[word]
final_string += ' '
flag = True
else:
final_string += word
final_string += ' '
flag = False
if(flag):
final_string = final_string[:-1]
except Exception as e:
print("type error: " + str(e))
exit()
return final_string
def removePunctuationFromList(all_words):
all_words = [''.join(c for c in s if c not in string.punctuation)
for s in all_words]
# Remove the empty strings:
all_words = [s for s in all_words if s]
return all_words
def cleanText(text):
"""Clean up the text."""
try:
text = str(text)
# remove contactions and stop words
text = contractions(text)
# remove html entities
cleanr = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')
new_text = cleanr.sub('', text.strip())
return re.sub(r'\s+', ' ', re.sub(r'\W+', " ", new_text))
# TAG_RE = re.compile(r'<[^>]+>')
except:
print("An exception occurred with: " + text)
return str(text)
def getAllWords(lines, stop_words):
all_words = {}
try:
for line in lines:
words = line.split()
for word in words:
if word not in stop_words:
all_words[word] = True
temp = all_words.keys()
# removePunctuationFromList(temp)
top_words = FreqDist(temp)
print("All Words list length : ", len(top_words))
# print(str(list(all_words1.keys())[:100]))
# use top 20000 words
return list(top_words.keys())[:20000]
# word_features = list(all_words.keys())[:6000]
# featuresets = [(find_features(rev, word_features), category)
# for (rev, category) in documents]
# print("Feature sets list length : ", len(featuresets))
except Exception as e:
print("type error: " + str(e))
exit()
def removeWordsNotIn(text, stop_words):
|
def shortenText(text, all_words):
# print('shortenText')
count = 0
final_string = ""
try:
words = text.split()
for word in words:
word = word.lower()
if len(word) > 7:
if word in all_words:
count += 1
if(count == MAX_WORDS-1):
# if we hit max number of token, stop parsing string
return final_string[:-1]
else:
final_string += word
final_string += ' '
final_string = final_string[:-1]
except Exception as e:
print("Error")
# exit()
print("type error: " + str(e))
return final_string
def addWordsIn(text, all_words):
""" Also does truncation """
count = 0
final_string = ""
try:
words = text.split()
for word in words:
word = word.lower()
if word in all_words:
count += 1
if(count == MAX_WORDS-1):
return shortenText(text, all_words)
else:
final_string += word
final_string += ' '
final_string = final_string[:-1]
except Exception as e:
print("Error")
# exit()
print("type error: " + str(e))
return final_string
def read_data(filepath):
"""Read the CSV from disk."""
df = pd.read_csv(filepath, delimiter=',')
stop_words = ["will", "done", "goes","let", "know", "just", "put" "also",
"got", "can", "get" "said", "mr", "mrs", "one", "two", "three",
"four", "five", "i", "me", "my", "myself", "we", "our",
"ours","ourselves","you","youre","your","yours","yourself","yourselves","he","him","his","himself","she","her","hers","herself","it","its","itself","they","them","their","theirs","themselves","what","which","who","whom","this","that","these | words = text.split()
final_string = ""
flag = False
try:
for word in words:
word = word.lower()
if word not in stop_words:
final_string += word
final_string += ' '
flag = True
else:
flag = False
if(flag):
final_string = final_string[:-1]
except Exception as e:
# print("type error: " + str(e))
print("type error")
exit()
return final_string | identifier_body |
lib.rs | Option<TableId>,
// A mapping of string names to the function index, filled with all exported
// functions.
name_map: HashMap<String, FunctionId>,
// The current stack pointer (global 0) and wasm memory (the stack). Only
// used in a limited capacity.
sp: i32,
mem: Vec<i32>,
scratch: Vec<i32>,
// The descriptor which we're assembling, a list of `u32` entries. This is
// very specific to wasm-bindgen and is the purpose for the existence of
// this module.
descriptor: Vec<u32>,
// When invoking the `__wbindgen_describe_closure` imported function, this
// stores the last table index argument, used for finding a different
// descriptor.
descriptor_table_idx: Option<u32>,
}
impl Interpreter {
/// Creates a new interpreter from a provided `Module`, precomputing all
/// information necessary to interpret further.
///
/// Note that the `module` passed in to this function must be the same as
/// the `module` passed to `interpret` below.
pub fn new(module: &Module) -> Result<Interpreter, anyhow::Error> {
let mut ret = Interpreter::default();
// The descriptor functions shouldn't really use all that much memory
// (the LLVM call stack, now the wasm stack). To handle that let's give
// our selves a little bit of memory and set the stack pointer (global
// 0) to the top.
ret.mem = vec![0; 0x400];
ret.sp = ret.mem.len() as i32;
// Figure out where the `__wbindgen_describe` imported function is, if
// it exists. We'll special case calls to this function as our
// interpretation should only invoke this function as an imported
// function.
for import in module.imports.iter() {
let id = match import.kind {
walrus::ImportKind::Function(id) => id,
_ => continue,
};
if import.module != "__wbindgen_placeholder__" {
continue;
}
if import.name == "__wbindgen_describe" {
ret.describe_id = Some(id);
} else if import.name == "__wbindgen_describe_closure" {
ret.describe_closure_id = Some(id);
}
}
// Build up the mapping of exported functions to function ids.
for export in module.exports.iter() {
let id = match export.item {
walrus::ExportItem::Function(id) => id,
_ => continue,
};
ret.name_map.insert(export.name.to_string(), id);
}
ret.functions = module.tables.main_function_table()?;
Ok(ret)
}
/// Interprets the execution of the descriptor function `func`.
///
/// This function will execute `func` in the `module` provided. Note that
/// the `module` provided here must be the same as the one passed to `new`
/// when this `Interpreter` was constructed.
///
/// The `func` must be a wasm-bindgen descriptor function meaning that it
/// doesn't do anything like use floats or i64. Instead all it should do is
/// call other functions, sometimes some stack pointer manipulation, and
/// then call the one imported `__wbindgen_describe` function. Anything else
/// will cause this interpreter to panic.
///
/// When the descriptor has finished running the assembled descriptor list
/// is returned. The descriptor returned can then be re-parsed into an
/// actual `Descriptor` in the cli-support crate.
///
/// # Return value
///
/// Returns `Some` if `func` was found in the `module` and `None` if it was
/// not found in the `module`.
pub fn | (&mut self, id: FunctionId, module: &Module) -> Option<&[u32]> {
self.descriptor.truncate(0);
// We should have a blank wasm and LLVM stack at both the start and end
// of the call.
assert_eq!(self.sp, self.mem.len() as i32);
self.call(id, module, &[]);
assert_eq!(self.sp, self.mem.len() as i32);
Some(&self.descriptor)
}
/// Interprets a "closure descriptor", figuring out the signature of the
/// closure that was intended.
///
/// This function will take an `id` which is known to internally
/// execute `__wbindgen_describe_closure` and interpret it. The
/// `wasm-bindgen` crate controls all callers of this internal import. It
/// will then take the index passed to `__wbindgen_describe_closure` and
/// interpret it as a function pointer. This means it'll look up within the
/// element section (function table) which index it points to. Upon finding
/// the relevant entry it'll assume that function is a descriptor function,
/// and then it will execute the descriptor function.
///
/// The returned value is the return value of the descriptor function found.
/// The `entry_removal_list` list is also then populated with an index of
/// the entry in the elements section (and then the index within that
/// section) of the function that needs to be snip'd out.
pub fn interpret_closure_descriptor(
&mut self,
id: FunctionId,
module: &Module,
entry_removal_list: &mut HashSet<(ElementId, usize)>,
) -> Option<&[u32]> {
// Call the `id` function. This is an internal `#[inline(never)]`
// whose code is completely controlled by the `wasm-bindgen` crate, so
// it should take some arguments (the number of arguments depends on the
// optimization level) and return one (all of which we don't care about
// here). What we're interested in is that while executing this function
// it'll call `__wbindgen_describe_closure` with an argument that we
// look for.
assert!(self.descriptor_table_idx.is_none());
let func = module.funcs.get(id);
let params = module.types.get(func.ty()).params();
assert!(
params.iter().all(|p| *p == walrus::ValType::I32),
"closure descriptors should only have i32 params"
);
let num_params = params.len();
assert!(
num_params <= 2,
"closure descriptors have 2 parameters, but might lose some parameters due to LTO"
);
let args = vec![0; num_params];
self.call(id, module, &args);
let descriptor_table_idx = self
.descriptor_table_idx
.take()
.expect("descriptor function should return index");
// After we've got the table index of the descriptor function we're
// interested go take a look in the function table to find what the
// actual index of the function is.
let entry =
wasm_bindgen_wasm_conventions::get_function_table_entry(module, descriptor_table_idx)
.expect("failed to find entry in function table");
let descriptor_id = entry.func.expect("element segment slot wasn't set");
entry_removal_list.insert((entry.element, entry.idx));
// And now execute the descriptor!
self.interpret_descriptor(descriptor_id, module)
}
/// Returns the function id of the `__wbindgen_describe_closure`
/// imported function.
pub fn describe_closure_id(&self) -> Option<FunctionId> {
self.describe_closure_id
}
/// Returns the detected id of the function table.
pub fn function_table_id(&self) -> Option<TableId> {
self.functions
}
fn call(&mut self, id: FunctionId, module: &Module, args: &[i32]) -> Option<i32> {
let func = module.funcs.get(id);
log::debug!("starting a call of {:?} {:?}", id, func.name);
log::debug!("arguments {:?}", args);
let local = match &func.kind {
walrus::FunctionKind::Local(l) => l,
_ => panic!("can only call locally defined functions"),
};
let entry = local.entry_block();
let block = local.block(entry);
let mut frame = Frame {
module,
interp: self,
locals: BTreeMap::new(),
done: false,
};
assert_eq!(local.args.len(), args.len());
for (arg, val) in local.args.iter().zip(args) {
frame.locals.insert(*arg, *val);
}
for (instr, _) in block.instrs.iter() {
frame.eval(instr);
if frame.done {
break;
}
}
self.scratch.last().cloned()
}
}
struct Frame<'a> {
module: &'a Module,
interp: &'a mut Interpreter,
locals: BTreeMap<LocalId, i32>,
done: bool,
}
impl Frame<'_> {
fn eval(&mut self, instr: &Instr) {
use walrus::ir::*;
let stack = &mut self.interp.scratch;
match instr {
Instr::Const(c) => match c.value {
Value::I3 | interpret_descriptor | identifier_name |
lib.rs | Option<TableId>,
// A mapping of string names to the function index, filled with all exported
// functions.
name_map: HashMap<String, FunctionId>,
// The current stack pointer (global 0) and wasm memory (the stack). Only
// used in a limited capacity.
sp: i32,
mem: Vec<i32>,
scratch: Vec<i32>,
// The descriptor which we're assembling, a list of `u32` entries. This is
// very specific to wasm-bindgen and is the purpose for the existence of
// this module.
descriptor: Vec<u32>,
// When invoking the `__wbindgen_describe_closure` imported function, this
// stores the last table index argument, used for finding a different
// descriptor.
descriptor_table_idx: Option<u32>,
}
impl Interpreter {
/// Creates a new interpreter from a provided `Module`, precomputing all
/// information necessary to interpret further.
///
/// Note that the `module` passed in to this function must be the same as
/// the `module` passed to `interpret` below.
pub fn new(module: &Module) -> Result<Interpreter, anyhow::Error> {
let mut ret = Interpreter::default();
// The descriptor functions shouldn't really use all that much memory
// (the LLVM call stack, now the wasm stack). To handle that let's give
// our selves a little bit of memory and set the stack pointer (global
// 0) to the top.
ret.mem = vec![0; 0x400];
ret.sp = ret.mem.len() as i32;
// Figure out where the `__wbindgen_describe` imported function is, if
// it exists. We'll special case calls to this function as our
// interpretation should only invoke this function as an imported
// function.
for import in module.imports.iter() {
let id = match import.kind {
walrus::ImportKind::Function(id) => id,
_ => continue,
};
if import.module != "__wbindgen_placeholder__" {
continue;
}
if import.name == "__wbindgen_describe" {
ret.describe_id = Some(id);
} else if import.name == "__wbindgen_describe_closure" {
ret.describe_closure_id = Some(id);
}
}
// Build up the mapping of exported functions to function ids.
for export in module.exports.iter() {
let id = match export.item {
walrus::ExportItem::Function(id) => id,
_ => continue,
};
ret.name_map.insert(export.name.to_string(), id);
}
ret.functions = module.tables.main_function_table()?;
Ok(ret)
}
/// Interprets the execution of the descriptor function `func`.
///
/// This function will execute `func` in the `module` provided. Note that
/// the `module` provided here must be the same as the one passed to `new`
/// when this `Interpreter` was constructed.
///
/// The `func` must be a wasm-bindgen descriptor function meaning that it
/// doesn't do anything like use floats or i64. Instead all it should do is
/// call other functions, sometimes some stack pointer manipulation, and
/// then call the one imported `__wbindgen_describe` function. Anything else
/// will cause this interpreter to panic.
///
/// When the descriptor has finished running the assembled descriptor list
/// is returned. The descriptor returned can then be re-parsed into an
/// actual `Descriptor` in the cli-support crate.
///
/// # Return value
///
/// Returns `Some` if `func` was found in the `module` and `None` if it was
/// not found in the `module`.
pub fn interpret_descriptor(&mut self, id: FunctionId, module: &Module) -> Option<&[u32]> {
self.descriptor.truncate(0);
// We should have a blank wasm and LLVM stack at both the start and end
// of the call.
assert_eq!(self.sp, self.mem.len() as i32);
self.call(id, module, &[]);
assert_eq!(self.sp, self.mem.len() as i32);
Some(&self.descriptor)
}
/// Interprets a "closure descriptor", figuring out the signature of the
/// closure that was intended.
///
/// This function will take an `id` which is known to internally
/// execute `__wbindgen_describe_closure` and interpret it. The
/// `wasm-bindgen` crate controls all callers of this internal import. It
/// will then take the index passed to `__wbindgen_describe_closure` and
/// interpret it as a function pointer. This means it'll look up within the
/// element section (function table) which index it points to. Upon finding
/// the relevant entry it'll assume that function is a descriptor function,
/// and then it will execute the descriptor function.
///
/// The returned value is the return value of the descriptor function found.
/// The `entry_removal_list` list is also then populated with an index of
/// the entry in the elements section (and then the index within that
/// section) of the function that needs to be snip'd out.
pub fn interpret_closure_descriptor(
&mut self,
id: FunctionId,
module: &Module,
entry_removal_list: &mut HashSet<(ElementId, usize)>,
) -> Option<&[u32]> {
// Call the `id` function. This is an internal `#[inline(never)]`
// whose code is completely controlled by the `wasm-bindgen` crate, so
// it should take some arguments (the number of arguments depends on the
// optimization level) and return one (all of which we don't care about
// here). What we're interested in is that while executing this function
// it'll call `__wbindgen_describe_closure` with an argument that we
// look for.
assert!(self.descriptor_table_idx.is_none());
let func = module.funcs.get(id);
let params = module.types.get(func.ty()).params();
assert!(
params.iter().all(|p| *p == walrus::ValType::I32),
"closure descriptors should only have i32 params"
);
let num_params = params.len();
assert!(
num_params <= 2,
"closure descriptors have 2 parameters, but might lose some parameters due to LTO"
);
let args = vec![0; num_params];
self.call(id, module, &args);
let descriptor_table_idx = self
.descriptor_table_idx
.take()
.expect("descriptor function should return index");
// After we've got the table index of the descriptor function we're
// interested go take a look in the function table to find what the
// actual index of the function is.
let entry =
wasm_bindgen_wasm_conventions::get_function_table_entry(module, descriptor_table_idx)
.expect("failed to find entry in function table");
let descriptor_id = entry.func.expect("element segment slot wasn't set");
entry_removal_list.insert((entry.element, entry.idx));
// And now execute the descriptor!
self.interpret_descriptor(descriptor_id, module)
}
/// Returns the function id of the `__wbindgen_describe_closure`
/// imported function.
pub fn describe_closure_id(&self) -> Option<FunctionId> {
self.describe_closure_id
}
/// Returns the detected id of the function table.
pub fn function_table_id(&self) -> Option<TableId> {
self.functions
}
fn call(&mut self, id: FunctionId, module: &Module, args: &[i32]) -> Option<i32> | for (arg, val) in local.args.iter().zip(args) {
frame.locals.insert(*arg, *val);
}
for (instr, _) in block.instrs.iter() {
frame.eval(instr);
if frame.done {
break;
}
}
self.scratch.last().cloned()
}
}
struct Frame<'a> {
module: &'a Module,
interp: &'a mut Interpreter,
locals: BTreeMap<LocalId, i32>,
done: bool,
}
impl Frame<'_> {
fn eval(&mut self, instr: &Instr) {
use walrus::ir::*;
let stack = &mut self.interp.scratch;
match instr {
Instr::Const(c) => match c.value {
Value::I3 | {
let func = module.funcs.get(id);
log::debug!("starting a call of {:?} {:?}", id, func.name);
log::debug!("arguments {:?}", args);
let local = match &func.kind {
walrus::FunctionKind::Local(l) => l,
_ => panic!("can only call locally defined functions"),
};
let entry = local.entry_block();
let block = local.block(entry);
let mut frame = Frame {
module,
interp: self,
locals: BTreeMap::new(),
done: false,
};
assert_eq!(local.args.len(), args.len()); | identifier_body |
lib.rs | Option<TableId>,
// A mapping of string names to the function index, filled with all exported
// functions.
name_map: HashMap<String, FunctionId>,
// The current stack pointer (global 0) and wasm memory (the stack). Only
// used in a limited capacity.
sp: i32,
mem: Vec<i32>,
scratch: Vec<i32>,
// The descriptor which we're assembling, a list of `u32` entries. This is
// very specific to wasm-bindgen and is the purpose for the existence of
// this module.
descriptor: Vec<u32>,
// When invoking the `__wbindgen_describe_closure` imported function, this
// stores the last table index argument, used for finding a different
// descriptor.
descriptor_table_idx: Option<u32>, | impl Interpreter {
/// Creates a new interpreter from a provided `Module`, precomputing all
/// information necessary to interpret further.
///
/// Note that the `module` passed in to this function must be the same as
/// the `module` passed to `interpret` below.
pub fn new(module: &Module) -> Result<Interpreter, anyhow::Error> {
let mut ret = Interpreter::default();
// The descriptor functions shouldn't really use all that much memory
// (the LLVM call stack, now the wasm stack). To handle that let's give
// our selves a little bit of memory and set the stack pointer (global
// 0) to the top.
ret.mem = vec![0; 0x400];
ret.sp = ret.mem.len() as i32;
// Figure out where the `__wbindgen_describe` imported function is, if
// it exists. We'll special case calls to this function as our
// interpretation should only invoke this function as an imported
// function.
for import in module.imports.iter() {
let id = match import.kind {
walrus::ImportKind::Function(id) => id,
_ => continue,
};
if import.module != "__wbindgen_placeholder__" {
continue;
}
if import.name == "__wbindgen_describe" {
ret.describe_id = Some(id);
} else if import.name == "__wbindgen_describe_closure" {
ret.describe_closure_id = Some(id);
}
}
// Build up the mapping of exported functions to function ids.
for export in module.exports.iter() {
let id = match export.item {
walrus::ExportItem::Function(id) => id,
_ => continue,
};
ret.name_map.insert(export.name.to_string(), id);
}
ret.functions = module.tables.main_function_table()?;
Ok(ret)
}
/// Interprets the execution of the descriptor function `func`.
///
/// This function will execute `func` in the `module` provided. Note that
/// the `module` provided here must be the same as the one passed to `new`
/// when this `Interpreter` was constructed.
///
/// The `func` must be a wasm-bindgen descriptor function meaning that it
/// doesn't do anything like use floats or i64. Instead all it should do is
/// call other functions, sometimes some stack pointer manipulation, and
/// then call the one imported `__wbindgen_describe` function. Anything else
/// will cause this interpreter to panic.
///
/// When the descriptor has finished running the assembled descriptor list
/// is returned. The descriptor returned can then be re-parsed into an
/// actual `Descriptor` in the cli-support crate.
///
/// # Return value
///
/// Returns `Some` if `func` was found in the `module` and `None` if it was
/// not found in the `module`.
pub fn interpret_descriptor(&mut self, id: FunctionId, module: &Module) -> Option<&[u32]> {
self.descriptor.truncate(0);
// We should have a blank wasm and LLVM stack at both the start and end
// of the call.
assert_eq!(self.sp, self.mem.len() as i32);
self.call(id, module, &[]);
assert_eq!(self.sp, self.mem.len() as i32);
Some(&self.descriptor)
}
/// Interprets a "closure descriptor", figuring out the signature of the
/// closure that was intended.
///
/// This function will take an `id` which is known to internally
/// execute `__wbindgen_describe_closure` and interpret it. The
/// `wasm-bindgen` crate controls all callers of this internal import. It
/// will then take the index passed to `__wbindgen_describe_closure` and
/// interpret it as a function pointer. This means it'll look up within the
/// element section (function table) which index it points to. Upon finding
/// the relevant entry it'll assume that function is a descriptor function,
/// and then it will execute the descriptor function.
///
/// The returned value is the return value of the descriptor function found.
/// The `entry_removal_list` list is also then populated with an index of
/// the entry in the elements section (and then the index within that
/// section) of the function that needs to be snip'd out.
pub fn interpret_closure_descriptor(
&mut self,
id: FunctionId,
module: &Module,
entry_removal_list: &mut HashSet<(ElementId, usize)>,
) -> Option<&[u32]> {
// Call the `id` function. This is an internal `#[inline(never)]`
// whose code is completely controlled by the `wasm-bindgen` crate, so
// it should take some arguments (the number of arguments depends on the
// optimization level) and return one (all of which we don't care about
// here). What we're interested in is that while executing this function
// it'll call `__wbindgen_describe_closure` with an argument that we
// look for.
assert!(self.descriptor_table_idx.is_none());
let func = module.funcs.get(id);
let params = module.types.get(func.ty()).params();
assert!(
params.iter().all(|p| *p == walrus::ValType::I32),
"closure descriptors should only have i32 params"
);
let num_params = params.len();
assert!(
num_params <= 2,
"closure descriptors have 2 parameters, but might lose some parameters due to LTO"
);
let args = vec![0; num_params];
self.call(id, module, &args);
let descriptor_table_idx = self
.descriptor_table_idx
.take()
.expect("descriptor function should return index");
// After we've got the table index of the descriptor function we're
// interested go take a look in the function table to find what the
// actual index of the function is.
let entry =
wasm_bindgen_wasm_conventions::get_function_table_entry(module, descriptor_table_idx)
.expect("failed to find entry in function table");
let descriptor_id = entry.func.expect("element segment slot wasn't set");
entry_removal_list.insert((entry.element, entry.idx));
// And now execute the descriptor!
self.interpret_descriptor(descriptor_id, module)
}
/// Returns the function id of the `__wbindgen_describe_closure`
/// imported function.
pub fn describe_closure_id(&self) -> Option<FunctionId> {
self.describe_closure_id
}
/// Returns the detected id of the function table.
pub fn function_table_id(&self) -> Option<TableId> {
self.functions
}
fn call(&mut self, id: FunctionId, module: &Module, args: &[i32]) -> Option<i32> {
let func = module.funcs.get(id);
log::debug!("starting a call of {:?} {:?}", id, func.name);
log::debug!("arguments {:?}", args);
let local = match &func.kind {
walrus::FunctionKind::Local(l) => l,
_ => panic!("can only call locally defined functions"),
};
let entry = local.entry_block();
let block = local.block(entry);
let mut frame = Frame {
module,
interp: self,
locals: BTreeMap::new(),
done: false,
};
assert_eq!(local.args.len(), args.len());
for (arg, val) in local.args.iter().zip(args) {
frame.locals.insert(*arg, *val);
}
for (instr, _) in block.instrs.iter() {
frame.eval(instr);
if frame.done {
break;
}
}
self.scratch.last().cloned()
}
}
struct Frame<'a> {
module: &'a Module,
interp: &'a mut Interpreter,
locals: BTreeMap<LocalId, i32>,
done: bool,
}
impl Frame<'_> {
fn eval(&mut self, instr: &Instr) {
use walrus::ir::*;
let stack = &mut self.interp.scratch;
match instr {
Instr::Const(c) => match c.value {
Value::I32 | }
| random_line_split |
lib.rs | ` was found in the `module` and `None` if it was
/// not found in the `module`.
pub fn interpret_descriptor(&mut self, id: FunctionId, module: &Module) -> Option<&[u32]> {
self.descriptor.truncate(0);
// We should have a blank wasm and LLVM stack at both the start and end
// of the call.
assert_eq!(self.sp, self.mem.len() as i32);
self.call(id, module, &[]);
assert_eq!(self.sp, self.mem.len() as i32);
Some(&self.descriptor)
}
/// Interprets a "closure descriptor", figuring out the signature of the
/// closure that was intended.
///
/// This function will take an `id` which is known to internally
/// execute `__wbindgen_describe_closure` and interpret it. The
/// `wasm-bindgen` crate controls all callers of this internal import. It
/// will then take the index passed to `__wbindgen_describe_closure` and
/// interpret it as a function pointer. This means it'll look up within the
/// element section (function table) which index it points to. Upon finding
/// the relevant entry it'll assume that function is a descriptor function,
/// and then it will execute the descriptor function.
///
/// The returned value is the return value of the descriptor function found.
/// The `entry_removal_list` list is also then populated with an index of
/// the entry in the elements section (and then the index within that
/// section) of the function that needs to be snip'd out.
pub fn interpret_closure_descriptor(
&mut self,
id: FunctionId,
module: &Module,
entry_removal_list: &mut HashSet<(ElementId, usize)>,
) -> Option<&[u32]> {
// Call the `id` function. This is an internal `#[inline(never)]`
// whose code is completely controlled by the `wasm-bindgen` crate, so
// it should take some arguments (the number of arguments depends on the
// optimization level) and return one (all of which we don't care about
// here). What we're interested in is that while executing this function
// it'll call `__wbindgen_describe_closure` with an argument that we
// look for.
assert!(self.descriptor_table_idx.is_none());
let func = module.funcs.get(id);
let params = module.types.get(func.ty()).params();
assert!(
params.iter().all(|p| *p == walrus::ValType::I32),
"closure descriptors should only have i32 params"
);
let num_params = params.len();
assert!(
num_params <= 2,
"closure descriptors have 2 parameters, but might lose some parameters due to LTO"
);
let args = vec![0; num_params];
self.call(id, module, &args);
let descriptor_table_idx = self
.descriptor_table_idx
.take()
.expect("descriptor function should return index");
// After we've got the table index of the descriptor function we're
// interested go take a look in the function table to find what the
// actual index of the function is.
let entry =
wasm_bindgen_wasm_conventions::get_function_table_entry(module, descriptor_table_idx)
.expect("failed to find entry in function table");
let descriptor_id = entry.func.expect("element segment slot wasn't set");
entry_removal_list.insert((entry.element, entry.idx));
// And now execute the descriptor!
self.interpret_descriptor(descriptor_id, module)
}
/// Returns the function id of the `__wbindgen_describe_closure`
/// imported function.
pub fn describe_closure_id(&self) -> Option<FunctionId> {
self.describe_closure_id
}
/// Returns the detected id of the function table.
pub fn function_table_id(&self) -> Option<TableId> {
self.functions
}
fn call(&mut self, id: FunctionId, module: &Module, args: &[i32]) -> Option<i32> {
let func = module.funcs.get(id);
log::debug!("starting a call of {:?} {:?}", id, func.name);
log::debug!("arguments {:?}", args);
let local = match &func.kind {
walrus::FunctionKind::Local(l) => l,
_ => panic!("can only call locally defined functions"),
};
let entry = local.entry_block();
let block = local.block(entry);
let mut frame = Frame {
module,
interp: self,
locals: BTreeMap::new(),
done: false,
};
assert_eq!(local.args.len(), args.len());
for (arg, val) in local.args.iter().zip(args) {
frame.locals.insert(*arg, *val);
}
for (instr, _) in block.instrs.iter() {
frame.eval(instr);
if frame.done {
break;
}
}
self.scratch.last().cloned()
}
}
struct Frame<'a> {
module: &'a Module,
interp: &'a mut Interpreter,
locals: BTreeMap<LocalId, i32>,
done: bool,
}
impl Frame<'_> {
fn eval(&mut self, instr: &Instr) {
use walrus::ir::*;
let stack = &mut self.interp.scratch;
match instr {
Instr::Const(c) => match c.value {
Value::I32(n) => stack.push(n),
_ => panic!("non-i32 constant"),
},
Instr::LocalGet(e) => stack.push(self.locals.get(&e.local).cloned().unwrap_or(0)),
Instr::LocalSet(e) => {
let val = stack.pop().unwrap();
self.locals.insert(e.local, val);
}
Instr::LocalTee(e) => {
let val = *stack.last().unwrap();
self.locals.insert(e.local, val);
}
// Blindly assume all globals are the stack pointer
Instr::GlobalGet(_) => stack.push(self.interp.sp),
Instr::GlobalSet(_) => {
let val = stack.pop().unwrap();
self.interp.sp = val;
}
// Support simple arithmetic, mainly for the stack pointer
// manipulation
Instr::Binop(e) => {
let rhs = stack.pop().unwrap();
let lhs = stack.pop().unwrap();
stack.push(match e.op {
BinaryOp::I32Sub => lhs - rhs,
BinaryOp::I32Add => lhs + rhs,
op => panic!("invalid binary op {:?}", op),
});
}
// Support small loads/stores to the stack. These show up in debug
// mode where there's some traffic on the linear stack even when in
// theory there doesn't need to be.
Instr::Load(e) => {
let address = stack.pop().unwrap();
let address = address as u32 + e.arg.offset;
assert!(address % 4 == 0);
stack.push(self.interp.mem[address as usize / 4])
}
Instr::Store(e) => {
let value = stack.pop().unwrap();
let address = stack.pop().unwrap();
let address = address as u32 + e.arg.offset;
assert!(address % 4 == 0);
self.interp.mem[address as usize / 4] = value;
}
Instr::Return(_) => {
log::debug!("return");
self.done = true;
}
Instr::Drop(_) => {
log::debug!("drop");
stack.pop().unwrap();
}
Instr::Call(e) => {
// If this function is calling the `__wbindgen_describe`
// function, which we've precomputed the id for, then
// it's telling us about the next `u32` element in the
// descriptor to return. We "call" the imported function
// here by directly inlining it.
if Some(e.func) == self.interp.describe_id {
let val = stack.pop().unwrap();
log::debug!("__wbindgen_describe({})", val);
self.interp.descriptor.push(val as u32);
// If this function is calling the `__wbindgen_describe_closure`
// function then it's similar to the above, except there's a
// slightly different signature. Note that we don't eval the
// previous arguments because they shouldn't have any side
// effects we're interested in.
} else if Some(e.func) == self.interp.describe_closure_id {
let val = stack.pop().unwrap();
stack.pop();
stack.pop();
log::debug!("__wbindgen_describe_closure({})", val);
self.interp.descriptor_table_idx = Some(val as u32);
stack.push(0)
// ... otherwise this is a normal call so we recurse.
} else | {
let ty = self.module.types.get(self.module.funcs.get(e.func).ty());
let args = (0..ty.params().len())
.map(|_| stack.pop().unwrap())
.collect::<Vec<_>>();
self.interp.call(e.func, self.module, &args);
} | conditional_block |
|
maze.rs | "FPGA" being there.
//!
//! The other approach is trying to have just nice solution for normal processors - just implement
//! properly aligned A* as pretty easy and common solutions for pathfinding. Nothing special there,
//! but on SISD arch it should behave pretty nicely (it could be probably improved by using some
//! more sophisticated algo like double ended A*, but I am lazy - to much work not showing too
//! much, if I would really find more time I would rather try to do something more interesting -
//! visualization, or kind of optimization - but I don't believe I would find motivation for that).
//!
//! I figured out additional "approach" (except taking completely different search algo). Maze
//! could be easly preprocessed to directed graph, where each cell (so actually non wall maze field)
//! has connection to the closest path crossing, and then running any pathfinding alg on that.
//! Benefit of that is that pathfinding itself is performed on strongly reduced graph, downside is
//! obviously need of preprocessing (not this much - possible to be done in O(x * y), but every
//! field have to be visited, while most reasonable finding algorithms avoids visiting every
//! field). The problem that if exit is not on the crossing then there is no incomming path to it
//! is actually not difficult to solve - simple raycast from exit can be done to find all fields
//! "connected" to exit (O(x + y)).
//!
//! In terms of visualization (even printing to text) - I don't even try to be efficient.
use std::cmp::Ordering;
use std::io::BufRead;
mod flood;
pub use flood::flood;
mod astar;
pub use astar::astar;
/// Direction from which its needed to approach the field to achieve it with given cost. As it is
/// possible to have same distance from multiple directions, it is a simple bitset. This is needed,
/// as in oru problem cost of next step is dependent on the fact if there is a turn on this step.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
struct Dir(u8);
impl Dir {
pub const NONE: Dir = Dir(0);
pub const LEFT: Dir = Dir(1);
pub const UP: Dir = Dir(2);
pub const RIGHT: Dir = Dir(4);
pub const DOWN: Dir = Dir(8);
pub const ANY: Dir = Dir(1 | 2 | 4 | 8);
pub fn has_all(&self, Dir(other): Dir) -> bool {
self.0 & other == other
}
/// Returns directions in which at least one step is needed
pub fn vec((from_x, from_y): (usize, usize), (to_x, to_y): (usize, usize)) -> Self {
let h = match from_x.cmp(&to_x) {
Ordering::Less => Self::LEFT,
Ordering::Greater => Self::RIGHT,
Ordering::Equal => Self::NONE,
};
let v = match from_y.cmp(&to_y) {
Ordering::Less => Self::UP,
Ordering::Greater => Self::DOWN,
Ordering::Equal => Self::NONE,
};
h | v
}
/// Rotates left
pub fn left(mut self) -> Self {
let down = (self.0 & 1) << 3;
self.0 >>= 1;
self.0 |= down;
self
}
/// Rotates right
pub fn right(mut self) -> Self {
let left = (self.0 & 8) >> 3;
self.0 <<= 1;
self.0 |= left;
self.0 &= 0xf;
self
}
/// Returns minimal number of rotations so at least one encoded direction would match every
/// given direction at least once | // I have feeling it is strongly suboptimal; Actually as both directions are encoded as 4
// bits, just precalculated table would be best solution
let mut min = 4;
for dir in [Self::LEFT, Self::RIGHT, Self::UP, Self::DOWN].iter() {
let mut d = *dir;
if !self.has_all(d) {
continue;
}
let mut o = other.0 & !dir.0;
let mut cnt = 0;
while o != 0 {
cnt += 1;
d = d.left();
o &= !d.0;
}
min = std::cmp::min(min, cnt);
d = *dir;
o = other.0 & !dir.0;
cnt = 0;
while o != 0 {
cnt += 1;
d = d.right();
o &= !d.0;
}
min = std::cmp::min(min, cnt);
}
min
}
}
impl std::ops::BitOr for Dir {
type Output = Self;
fn bitor(self, rhs: Self) -> Self {
Self(self.0 | rhs.0)
}
}
/// Single field in maze
#[derive(Clone, Copy, Debug)]
enum Field {
Empty,
Wall,
/// Empty field with known distance from the start of the maze
/// It doesn't need to be the closes path - it is distance calulated using some path
Calculated(Dir, usize),
}
/// Whole maze reprezentation
pub struct Maze {
/// All fields flattened
maze: Box<[Field]>,
/// Width of maze as it is needed for proper addressing (inlcuding external wall)
w: usize,
}
impl Maze {
/// Maps coord to field index
fn idx(&self, x: usize, y: usize) -> usize {
// On overflow just give invalid (too big) index - anything from here would be wall by
// default which is simplification on purpose
y.saturating_mul(self.w).saturating_add(x)
}
/// Maps field index to coordinates
fn coords(&self, idx: usize) -> (usize, usize) {
(idx % self.w, idx / self.w)
}
/// Returns index of field in given direction (defined to be wrapping)
fn in_dir_idx(&self, idx: usize, dir: Dir) -> usize {
let (x, y) = self.coords(idx);
// Doing wrapping sub basically because maze size is way smaller than my indexing type size
// (considering >= 16bit machine), so after wrapping I would have invalid field, so Wall by
// default
let (x, y) = match dir {
Dir::UP => (x, y.wrapping_sub(1)),
Dir::DOWN => (x, y + 1),
Dir::LEFT => (x.wrapping_sub(1), y),
Dir::RIGHT => (x + 1, y),
_ => (x, y),
};
self.idx(x, y)
}
/// Returns field in given direction from given one (Wall if no such field)
/// If Dir has more than one direction encoded, field with same idx is returned
fn in_dir(&self, idx: usize, dir: Dir) -> Field {
self.maze
.get(self.in_dir_idx(idx, dir))
.copied()
.unwrap_or(Field::Wall)
}
/// Gives field from given coord (Wall if no such field)
fn field(&self, x: usize, y: usize) -> Field {
self.maze
.get(self.idx(x, y))
.copied()
.unwrap_or(Field::Wall)
}
/// Gives mutable field from given coord
fn field_mut(&mut self, x: usize, y: usize) -> Option<&mut Field> {
self.maze.get_mut(self.idx(x, y))
}
/// Creates valid maze from input containing maze description, and x/y dimentions of it
pub fn from_input(x: usize, y: usize, input: impl BufRead) -> Self {
// Iterating over bytes is bad idea, but only interesting charactes are 0 and 1 which
// happens to be ASCII bytes. I am aware it wont work with any non-ASCII UTF representation
// of 0 and 1 and "I don't care, what they're going to say..."
let maze = input
.lines()
.take(y)
.flat_map(|line| line.unwrap().into_bytes())
.map(|field| match field {
b'0' => Field::Wall,
b'1' => Field::Empty,
_ => panic!("Invalid input"),
})
.collect();
Maze { maze, w: x }
}
}
#[cfg(feature = "text_visualize")]
impl std::fmt::Display for Maze {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
// While printing maze, externall wall is not printed
for line in self.maze.chunks(self.w) {
let line: String = line
.iter()
.map(|field| match field {
Field::Empty => ' ',
Field::Wall => '#',
Field::Calculated(_, distance) => {
(distance % 10).to_string().chars().last().unwrap()
}
})
.chain(std::iter::once('\n'))
| pub fn min_rotation(self, other: Self) -> usize { | random_line_split |
maze.rs | FPGA" being there.
//!
//! The other approach is trying to have just nice solution for normal processors - just implement
//! properly aligned A* as pretty easy and common solutions for pathfinding. Nothing special there,
//! but on SISD arch it should behave pretty nicely (it could be probably improved by using some
//! more sophisticated algo like double ended A*, but I am lazy - to much work not showing too
//! much, if I would really find more time I would rather try to do something more interesting -
//! visualization, or kind of optimization - but I don't believe I would find motivation for that).
//!
//! I figured out additional "approach" (except taking completely different search algo). Maze
//! could be easly preprocessed to directed graph, where each cell (so actually non wall maze field)
//! has connection to the closest path crossing, and then running any pathfinding alg on that.
//! Benefit of that is that pathfinding itself is performed on strongly reduced graph, downside is
//! obviously need of preprocessing (not this much - possible to be done in O(x * y), but every
//! field have to be visited, while most reasonable finding algorithms avoids visiting every
//! field). The problem that if exit is not on the crossing then there is no incomming path to it
//! is actually not difficult to solve - simple raycast from exit can be done to find all fields
//! "connected" to exit (O(x + y)).
//!
//! In terms of visualization (even printing to text) - I don't even try to be efficient.
use std::cmp::Ordering;
use std::io::BufRead;
mod flood;
pub use flood::flood;
mod astar;
pub use astar::astar;
/// Direction from which its needed to approach the field to achieve it with given cost. As it is
/// possible to have same distance from multiple directions, it is a simple bitset. This is needed,
/// as in oru problem cost of next step is dependent on the fact if there is a turn on this step.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
struct Dir(u8);
impl Dir {
pub const NONE: Dir = Dir(0);
pub const LEFT: Dir = Dir(1);
pub const UP: Dir = Dir(2);
pub const RIGHT: Dir = Dir(4);
pub const DOWN: Dir = Dir(8);
pub const ANY: Dir = Dir(1 | 2 | 4 | 8);
pub fn has_all(&self, Dir(other): Dir) -> bool {
self.0 & other == other
}
/// Returns directions in which at least one step is needed
pub fn vec((from_x, from_y): (usize, usize), (to_x, to_y): (usize, usize)) -> Self {
let h = match from_x.cmp(&to_x) {
Ordering::Less => Self::LEFT,
Ordering::Greater => Self::RIGHT,
Ordering::Equal => Self::NONE,
};
let v = match from_y.cmp(&to_y) {
Ordering::Less => Self::UP,
Ordering::Greater => Self::DOWN,
Ordering::Equal => Self::NONE,
};
h | v
}
/// Rotates left
pub fn left(mut self) -> Self {
let down = (self.0 & 1) << 3;
self.0 >>= 1;
self.0 |= down;
self
}
/// Rotates right
pub fn right(mut self) -> Self {
let left = (self.0 & 8) >> 3;
self.0 <<= 1;
self.0 |= left;
self.0 &= 0xf;
self
}
/// Returns minimal number of rotations so at least one encoded direction would match every
/// given direction at least once
pub fn min_rotation(self, other: Self) -> usize {
// I have feeling it is strongly suboptimal; Actually as both directions are encoded as 4
// bits, just precalculated table would be best solution
let mut min = 4;
for dir in [Self::LEFT, Self::RIGHT, Self::UP, Self::DOWN].iter() {
let mut d = *dir;
if !self.has_all(d) {
continue;
}
let mut o = other.0 & !dir.0;
let mut cnt = 0;
while o != 0 {
cnt += 1;
d = d.left();
o &= !d.0;
}
min = std::cmp::min(min, cnt);
d = *dir;
o = other.0 & !dir.0;
cnt = 0;
while o != 0 {
cnt += 1;
d = d.right();
o &= !d.0;
}
min = std::cmp::min(min, cnt);
}
min
}
}
impl std::ops::BitOr for Dir {
type Output = Self;
fn bitor(self, rhs: Self) -> Self {
Self(self.0 | rhs.0)
}
}
/// Single field in maze
#[derive(Clone, Copy, Debug)]
enum | {
Empty,
Wall,
/// Empty field with known distance from the start of the maze
/// It doesn't need to be the closes path - it is distance calulated using some path
Calculated(Dir, usize),
}
/// Whole maze reprezentation
pub struct Maze {
/// All fields flattened
maze: Box<[Field]>,
/// Width of maze as it is needed for proper addressing (inlcuding external wall)
w: usize,
}
impl Maze {
/// Maps coord to field index
fn idx(&self, x: usize, y: usize) -> usize {
// On overflow just give invalid (too big) index - anything from here would be wall by
// default which is simplification on purpose
y.saturating_mul(self.w).saturating_add(x)
}
/// Maps field index to coordinates
fn coords(&self, idx: usize) -> (usize, usize) {
(idx % self.w, idx / self.w)
}
/// Returns index of field in given direction (defined to be wrapping)
fn in_dir_idx(&self, idx: usize, dir: Dir) -> usize {
let (x, y) = self.coords(idx);
// Doing wrapping sub basically because maze size is way smaller than my indexing type size
// (considering >= 16bit machine), so after wrapping I would have invalid field, so Wall by
// default
let (x, y) = match dir {
Dir::UP => (x, y.wrapping_sub(1)),
Dir::DOWN => (x, y + 1),
Dir::LEFT => (x.wrapping_sub(1), y),
Dir::RIGHT => (x + 1, y),
_ => (x, y),
};
self.idx(x, y)
}
/// Returns field in given direction from given one (Wall if no such field)
/// If Dir has more than one direction encoded, field with same idx is returned
fn in_dir(&self, idx: usize, dir: Dir) -> Field {
self.maze
.get(self.in_dir_idx(idx, dir))
.copied()
.unwrap_or(Field::Wall)
}
/// Gives field from given coord (Wall if no such field)
fn field(&self, x: usize, y: usize) -> Field {
self.maze
.get(self.idx(x, y))
.copied()
.unwrap_or(Field::Wall)
}
/// Gives mutable field from given coord
fn field_mut(&mut self, x: usize, y: usize) -> Option<&mut Field> {
self.maze.get_mut(self.idx(x, y))
}
/// Creates valid maze from input containing maze description, and x/y dimentions of it
pub fn from_input(x: usize, y: usize, input: impl BufRead) -> Self {
// Iterating over bytes is bad idea, but only interesting charactes are 0 and 1 which
// happens to be ASCII bytes. I am aware it wont work with any non-ASCII UTF representation
// of 0 and 1 and "I don't care, what they're going to say..."
let maze = input
.lines()
.take(y)
.flat_map(|line| line.unwrap().into_bytes())
.map(|field| match field {
b'0' => Field::Wall,
b'1' => Field::Empty,
_ => panic!("Invalid input"),
})
.collect();
Maze { maze, w: x }
}
}
#[cfg(feature = "text_visualize")]
impl std::fmt::Display for Maze {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
// While printing maze, externall wall is not printed
for line in self.maze.chunks(self.w) {
let line: String = line
.iter()
.map(|field| match field {
Field::Empty => ' ',
Field::Wall => '#',
Field::Calculated(_, distance) => {
(distance % 10).to_string().chars().last().unwrap()
}
})
.chain(std::iter::once('\n'))
| Field | identifier_name |
maze.rs | FPGA" being there.
//!
//! The other approach is trying to have just nice solution for normal processors - just implement
//! properly aligned A* as pretty easy and common solutions for pathfinding. Nothing special there,
//! but on SISD arch it should behave pretty nicely (it could be probably improved by using some
//! more sophisticated algo like double ended A*, but I am lazy - to much work not showing too
//! much, if I would really find more time I would rather try to do something more interesting -
//! visualization, or kind of optimization - but I don't believe I would find motivation for that).
//!
//! I figured out additional "approach" (except taking completely different search algo). Maze
//! could be easly preprocessed to directed graph, where each cell (so actually non wall maze field)
//! has connection to the closest path crossing, and then running any pathfinding alg on that.
//! Benefit of that is that pathfinding itself is performed on strongly reduced graph, downside is
//! obviously need of preprocessing (not this much - possible to be done in O(x * y), but every
//! field have to be visited, while most reasonable finding algorithms avoids visiting every
//! field). The problem that if exit is not on the crossing then there is no incomming path to it
//! is actually not difficult to solve - simple raycast from exit can be done to find all fields
//! "connected" to exit (O(x + y)).
//!
//! In terms of visualization (even printing to text) - I don't even try to be efficient.
use std::cmp::Ordering;
use std::io::BufRead;
mod flood;
pub use flood::flood;
mod astar;
pub use astar::astar;
/// Direction from which its needed to approach the field to achieve it with given cost. As it is
/// possible to have same distance from multiple directions, it is a simple bitset. This is needed,
/// as in oru problem cost of next step is dependent on the fact if there is a turn on this step.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
struct Dir(u8);
impl Dir {
pub const NONE: Dir = Dir(0);
pub const LEFT: Dir = Dir(1);
pub const UP: Dir = Dir(2);
pub const RIGHT: Dir = Dir(4);
pub const DOWN: Dir = Dir(8);
pub const ANY: Dir = Dir(1 | 2 | 4 | 8);
pub fn has_all(&self, Dir(other): Dir) -> bool {
self.0 & other == other
}
/// Returns directions in which at least one step is needed
pub fn vec((from_x, from_y): (usize, usize), (to_x, to_y): (usize, usize)) -> Self |
/// Rotates left
pub fn left(mut self) -> Self {
let down = (self.0 & 1) << 3;
self.0 >>= 1;
self.0 |= down;
self
}
/// Rotates right
pub fn right(mut self) -> Self {
let left = (self.0 & 8) >> 3;
self.0 <<= 1;
self.0 |= left;
self.0 &= 0xf;
self
}
/// Returns minimal number of rotations so at least one encoded direction would match every
/// given direction at least once
pub fn min_rotation(self, other: Self) -> usize {
// I have feeling it is strongly suboptimal; Actually as both directions are encoded as 4
// bits, just precalculated table would be best solution
let mut min = 4;
for dir in [Self::LEFT, Self::RIGHT, Self::UP, Self::DOWN].iter() {
let mut d = *dir;
if !self.has_all(d) {
continue;
}
let mut o = other.0 & !dir.0;
let mut cnt = 0;
while o != 0 {
cnt += 1;
d = d.left();
o &= !d.0;
}
min = std::cmp::min(min, cnt);
d = *dir;
o = other.0 & !dir.0;
cnt = 0;
while o != 0 {
cnt += 1;
d = d.right();
o &= !d.0;
}
min = std::cmp::min(min, cnt);
}
min
}
}
impl std::ops::BitOr for Dir {
type Output = Self;
fn bitor(self, rhs: Self) -> Self {
Self(self.0 | rhs.0)
}
}
/// Single field in maze
#[derive(Clone, Copy, Debug)]
enum Field {
Empty,
Wall,
/// Empty field with known distance from the start of the maze
/// It doesn't need to be the closes path - it is distance calulated using some path
Calculated(Dir, usize),
}
/// Whole maze reprezentation
pub struct Maze {
/// All fields flattened
maze: Box<[Field]>,
/// Width of maze as it is needed for proper addressing (inlcuding external wall)
w: usize,
}
impl Maze {
/// Maps coord to field index
fn idx(&self, x: usize, y: usize) -> usize {
// On overflow just give invalid (too big) index - anything from here would be wall by
// default which is simplification on purpose
y.saturating_mul(self.w).saturating_add(x)
}
/// Maps field index to coordinates
fn coords(&self, idx: usize) -> (usize, usize) {
(idx % self.w, idx / self.w)
}
/// Returns index of field in given direction (defined to be wrapping)
fn in_dir_idx(&self, idx: usize, dir: Dir) -> usize {
let (x, y) = self.coords(idx);
// Doing wrapping sub basically because maze size is way smaller than my indexing type size
// (considering >= 16bit machine), so after wrapping I would have invalid field, so Wall by
// default
let (x, y) = match dir {
Dir::UP => (x, y.wrapping_sub(1)),
Dir::DOWN => (x, y + 1),
Dir::LEFT => (x.wrapping_sub(1), y),
Dir::RIGHT => (x + 1, y),
_ => (x, y),
};
self.idx(x, y)
}
/// Returns field in given direction from given one (Wall if no such field)
/// If Dir has more than one direction encoded, field with same idx is returned
fn in_dir(&self, idx: usize, dir: Dir) -> Field {
self.maze
.get(self.in_dir_idx(idx, dir))
.copied()
.unwrap_or(Field::Wall)
}
/// Gives field from given coord (Wall if no such field)
fn field(&self, x: usize, y: usize) -> Field {
self.maze
.get(self.idx(x, y))
.copied()
.unwrap_or(Field::Wall)
}
/// Gives mutable field from given coord
fn field_mut(&mut self, x: usize, y: usize) -> Option<&mut Field> {
self.maze.get_mut(self.idx(x, y))
}
/// Creates valid maze from input containing maze description, and x/y dimentions of it
pub fn from_input(x: usize, y: usize, input: impl BufRead) -> Self {
// Iterating over bytes is bad idea, but only interesting charactes are 0 and 1 which
// happens to be ASCII bytes. I am aware it wont work with any non-ASCII UTF representation
// of 0 and 1 and "I don't care, what they're going to say..."
let maze = input
.lines()
.take(y)
.flat_map(|line| line.unwrap().into_bytes())
.map(|field| match field {
b'0' => Field::Wall,
b'1' => Field::Empty,
_ => panic!("Invalid input"),
})
.collect();
Maze { maze, w: x }
}
}
#[cfg(feature = "text_visualize")]
impl std::fmt::Display for Maze {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
// While printing maze, externall wall is not printed
for line in self.maze.chunks(self.w) {
let line: String = line
.iter()
.map(|field| match field {
Field::Empty => ' ',
Field::Wall => '#',
Field::Calculated(_, distance) => {
(distance % 10).to_string().chars().last().unwrap()
}
})
.chain(std::iter::once('\n | {
let h = match from_x.cmp(&to_x) {
Ordering::Less => Self::LEFT,
Ordering::Greater => Self::RIGHT,
Ordering::Equal => Self::NONE,
};
let v = match from_y.cmp(&to_y) {
Ordering::Less => Self::UP,
Ordering::Greater => Self::DOWN,
Ordering::Equal => Self::NONE,
};
h | v
} | identifier_body |
darts.js |
/*
* initialize game variables
*/
DG.prototype.init = function(){
DG.buttonEnable = true;
DG.numberPlay = 3;
DG.isEndGame = false;
DG.player1 = true;
DG.multi = 1
DG.lastScore = 0;
DG.keyPressed = 0;
DG.currentPlayer = 'player1'
DG.currentPlayerName = $(".player1Name")[0].innerText;
DG.prototype.initColors()
DG.prototype.initWinnerButtons()
$('.p1Result').val(DG.game);
$('.p2Result').val(DG.game);
$('.playerWinBackground').hide();
$('.playerWin').hide();
}
/*
* initialize game Sounds
*/
DG.prototype.initSounds = function(){
DG.sample = new RapidSoundsSample('medias/dart2.mp3', 0.2);
DG.sampleError = new RapidSoundsSample('medias/error.wav', 0.2);
DG.sampleChangePlayer = new RapidSoundsSample('medias/changePlayer.mp3', 0.5);
DG.sampleplayer1 = new RapidSoundsSample('medias/testoo.mp3', 1);
DG.sampleplayer2 = new RapidSoundsSample('medias/rouge.low.mp3', 1);
}
/*
* button factory
*/
DG.prototype.buttonFactory = function(){
for (i=0; i<=20 ;i++){
DG.prototype.createButton(i, 'keyPlayer', 'btn' + i);
}
i=25;
DG.prototype.createButton(i, 'keyPlayer', 'btn' + i);
i=50;
DG.prototype.createButton(i, 'keyPlayer', 'btn' + i);
for (i=1; i<=7 ;i++){
DG.prototype.createButtonEmpty(i, 'empty');
}
DG.prototype.createButton("x2", 'keyMulti', 2);
DG.prototype.createButton("x3", 'keyMulti', 3);
$( ".keyPlayer" ).on( "touchstart click", DG.prototype.minusResult );
$( ".keyMulti" ).on( "touchstart click", DG.prototype.multiScore );
$( ".startButton" ).on( "touchstart click", DG.prototype.savePlayers );
}
/*
* create button
*/
DG.prototype.createButton = function (i, buttonName = '', buttonId = ''){
newButtonWrapper = $("<span>").attr('class','button');
newButtonWrapper.attr('id','ok'+i);
newButtonWrapper.appendTo(".numbers");
newButton = $("<button>").attr('class',buttonName);
newButton.attr('id', buttonId);
newButton.appendTo("#ok"+i).html( i ).trigger('create');
$( "#"+buttonId ).on( "touchstart click", DG.prototype.PlaySound );
}
/*
* create empty button
*/
DG.prototype.createButtonEmpty = function (i,buttonName){
newButtonWrapper = $("<span>").attr('class','button ');
newButtonWrapper.attr('id',buttonName+i).appendTo(".numbers");
newButton = $("<button>").attr('disabled','true');
newButton.appendTo("#"+buttonName+i).html( " " );
}
/*
* initialize colors
*/
DG.prototype.initColors = function(){
DG.p1Color = '#333333'
DG.p2Color = '#cc0099'
DG.playerColor = DG.p1Color;
$('body').css({'background-color' : '#262626'})
$(".numberPlayLeftP1").css({'color' : 'white'});
$(".numberPlayLeftP2").css({'color' : 'white'});
$("button").css({'background-color' : DG.p1Color});
$(".p1Result").css({'background-color' : 'white'});
$(".p2Result").css({'background-color' : DG.p1Color});
DG.prototype.initDarts(true);
}
/*
* repeat a string
*/
DG.prototype.addFullDarts = function(){
return DG.prototype.strRepeat('.', 3);
}
/*
* init darts
*/
DG.prototype.initDarts = function(bBegin = false){
if(bBegin == true){
$(".numberPlayLeftP1")[0].innerText = DG.prototype.addFullDarts();
}else{
$(".numberPlayLeftP1")[0].innerText = "";
}
$(".numberPlayLeftP2")[0].innerText = "";
}
/*
* initialize winner buttons
* - click on yes button on win screen
* - click on no button on win screen
* - click on play again button on win screen
*/
DG.prototype.initWinnerButtons = function(){
$('.yes').on( "click", function() {
DG.prototype.stopSoundWin(DG.currentPlayer);
DG.prototype.init();
});
$('.no').on( "click", function() {
window.location.replace('./')
});
$('.playAgain').on( "click touchstart", function() {
window.location.replace('./')
});
}
/*
* manage the button multiply
*/
DG.prototype.multiManage = function(){
$(".keyMulti").css({'background-color' : DG.p1Color});
}
/*
* manage click on x2 or x3 button
*/
DG.prototype.multiScore = function(evt){
evt.preventDefault();
$(".keyMulti").css({'background-color' : DG.p1Color});
$(".keyMulti").css({'background-color' : DG.p1Color});
$(this).css({'background-color' : 'white'});
DG.multi = parseInt($(this)[0].id);
}
/*
* minus the score
*/
DG.prototype.minusResult = function(ev){
ev.preventDefault();
DG.lastMulti = 1
if ( DG.buttonEnable == true){
$(this).css({'background-color' : 'white'});
}else{
$(this).css({'background-color' : '#333333'});
return true;
}
if(true == DG.isEndGame) return false;
DG.prototype.multiManage();
// get the current pressed button score
DG.keyMark = $(this)[0].innerText;
if ('player1' == DG.currentPlayer){
DG.playerResult = ".p1Result";
}else{
DG.playerResult = ".p2Result";
}
if (DG.numberPlay == 3){
DG.lastScore = $(DG.playerResult).val() ;
}
DG.lastMulti = DG.multi
DG.result = $(DG.playerResult).val();
DG.result -= (DG.multi * DG.keyMark);
DG.prototype.saveData();
// initialize multi
DG.multi = 1
if (DG.result==1){
DG.prototype.endError()
return false;
}
if (DG.result == 0){
if(DG.lastMulti == 2){
DG.prototype.endGame();
}else{
DG.prototype.endError()
}
return true;
}else if (DG.result < 0 ){
DG.prototype.endError()
}else{
DG.numberPlay--;
if (DG.result > 0){
DG.prototype.initDarts();
if (DG.numberPlay == 0){
DG.prototype.remainingDarts();
DG.numberPlay=3;
DG.buttonEnable = false;
}else {
DG.prototype.remainingDarts();
}
// remaining darts
if ( DG.buttonEnable == false){
setTimeout(DG.prototype.changePlayer, 1000);
}else {
$(this).css({'background-color' : 'white'});
}
}
}
$(DG.playerResult).val(DG.result);
}
/*
* ucfirst the name
*/
DG.prototype.ucfirst = function(str) {
var f = str.charAt(0).toUpperCase();
return f + str.substr(1).toLowerCase();
}
/*
* ucfirst the name
*/
DG.prototype.hideSplash = function(str) {
$('.playerStart').hide();
$('.start').hide();
}
/*
* save Player name
* hide splash
*/
DG.prototype.savePlayers = function(ev){
if($('.p1ResultBlock').val() =='') DG.p1Name = "Player 1"
else DG.p1Name = DG.prototype.ucfirst($('.p1ResultBlock').val());
if($('.p2ResultBlock').val() =='') DG.p2Name = "Player 2"
else DG.p2Name = DG.prototype.ucfirst($('.p2ResultBlock').val());
$(".player1Name")[0].innerText = DG.p1Name;
DG.currentPlayerName = DG.p1Name;
$(".player2Name")[0].innerText = DG.p2Name;
setTimeout(DG.prototype.hideSplash, 500);
}
/*
* change player when a bad score was done
* score = 1
* score < 0
* last score was not multiplied by 2
*/
DG.prototype.endError = function(){
DG.sampleError.shootRound();
DG.result=DG.lastScore;
DG.numberPlay = 3;
DG.prototype.initDarts();
DG.buttonEnable = false;
$(DG.playerResult).val(DG.result);
setTimeout(DG.prototype.changePlayer, 500);
}
/*
* avoid double tap zoom on ipad and iphone
*/
$(this).bind('touchstart', function preventZoom(e) {
var t2 = e.timeStamp
, t1 | {
DG.scoreP1 = 0;
DG.scoreP2 = 0;
DG.game = 301
} | identifier_body |
|
darts.js | (){
DG.scoreP1 = 0;
DG.scoreP2 = 0;
DG.game = 301
}
/*
* initialize game variables
*/
DG.prototype.init = function(){
DG.buttonEnable = true;
DG.numberPlay = 3;
DG.isEndGame = false;
DG.player1 = true;
DG.multi = 1
DG.lastScore = 0;
DG.keyPressed = 0;
DG.currentPlayer = 'player1'
DG.currentPlayerName = $(".player1Name")[0].innerText;
DG.prototype.initColors()
DG.prototype.initWinnerButtons()
$('.p1Result').val(DG.game);
$('.p2Result').val(DG.game);
$('.playerWinBackground').hide();
$('.playerWin').hide();
}
/*
* initialize game Sounds
*/
DG.prototype.initSounds = function(){
DG.sample = new RapidSoundsSample('medias/dart2.mp3', 0.2);
DG.sampleError = new RapidSoundsSample('medias/error.wav', 0.2);
DG.sampleChangePlayer = new RapidSoundsSample('medias/changePlayer.mp3', 0.5);
DG.sampleplayer1 = new RapidSoundsSample('medias/testoo.mp3', 1);
DG.sampleplayer2 = new RapidSoundsSample('medias/rouge.low.mp3', 1);
}
/*
* button factory
*/
DG.prototype.buttonFactory = function(){
for (i=0; i<=20 ;i++){
DG.prototype.createButton(i, 'keyPlayer', 'btn' + i);
}
i=25;
DG.prototype.createButton(i, 'keyPlayer', 'btn' + i);
i=50;
DG.prototype.createButton(i, 'keyPlayer', 'btn' + i);
for (i=1; i<=7 ;i++){
DG.prototype.createButtonEmpty(i, 'empty');
}
DG.prototype.createButton("x2", 'keyMulti', 2);
DG.prototype.createButton("x3", 'keyMulti', 3);
$( ".keyPlayer" ).on( "touchstart click", DG.prototype.minusResult );
$( ".keyMulti" ).on( "touchstart click", DG.prototype.multiScore );
$( ".startButton" ).on( "touchstart click", DG.prototype.savePlayers );
}
/*
* create button
*/
DG.prototype.createButton = function (i, buttonName = '', buttonId = ''){
newButtonWrapper = $("<span>").attr('class','button');
newButtonWrapper.attr('id','ok'+i);
newButtonWrapper.appendTo(".numbers");
newButton = $("<button>").attr('class',buttonName);
newButton.attr('id', buttonId);
newButton.appendTo("#ok"+i).html( i ).trigger('create');
$( "#"+buttonId ).on( "touchstart click", DG.prototype.PlaySound );
}
/*
* create empty button
*/
DG.prototype.createButtonEmpty = function (i,buttonName){
newButtonWrapper = $("<span>").attr('class','button ');
newButtonWrapper.attr('id',buttonName+i).appendTo(".numbers");
newButton = $("<button>").attr('disabled','true');
newButton.appendTo("#"+buttonName+i).html( " " );
}
/*
* initialize colors
*/
DG.prototype.initColors = function(){
DG.p1Color = '#333333'
DG.p2Color = '#cc0099'
DG.playerColor = DG.p1Color;
$('body').css({'background-color' : '#262626'})
$(".numberPlayLeftP1").css({'color' : 'white'});
$(".numberPlayLeftP2").css({'color' : 'white'});
$("button").css({'background-color' : DG.p1Color});
$(".p1Result").css({'background-color' : 'white'});
$(".p2Result").css({'background-color' : DG.p1Color});
DG.prototype.initDarts(true);
}
/*
* repeat a string
*/
DG.prototype.addFullDarts = function(){
return DG.prototype.strRepeat('.', 3);
}
/*
* init darts
*/
DG.prototype.initDarts = function(bBegin = false){
if(bBegin == true){
$(".numberPlayLeftP1")[0].innerText = DG.prototype.addFullDarts();
}else{
$(".numberPlayLeftP1")[0].innerText = "";
}
$(".numberPlayLeftP2")[0].innerText = "";
}
/*
* initialize winner buttons
* - click on yes button on win screen
* - click on no button on win screen
* - click on play again button on win screen
*/
DG.prototype.initWinnerButtons = function(){
$('.yes').on( "click", function() {
DG.prototype.stopSoundWin(DG.currentPlayer);
DG.prototype.init();
});
$('.no').on( "click", function() {
window.location.replace('./')
});
$('.playAgain').on( "click touchstart", function() {
window.location.replace('./')
});
}
/*
* manage the button multiply
*/
DG.prototype.multiManage = function(){
$(".keyMulti").css({'background-color' : DG.p1Color});
}
/*
* manage click on x2 or x3 button
*/
DG.prototype.multiScore = function(evt){
evt.preventDefault();
$(".keyMulti").css({'background-color' : DG.p1Color});
$(".keyMulti").css({'background-color' : DG.p1Color});
$(this).css({'background-color' : 'white'});
DG.multi = parseInt($(this)[0].id);
}
/*
* minus the score
*/
DG.prototype.minusResult = function(ev){
ev.preventDefault();
DG.lastMulti = 1
if ( DG.buttonEnable == true){
$(this).css({'background-color' : 'white'});
}else{
$(this).css({'background-color' : '#333333'});
return true;
}
if(true == DG.isEndGame) return false;
DG.prototype.multiManage();
// get the current pressed button score
DG.keyMark = $(this)[0].innerText;
if ('player1' == DG.currentPlayer){
DG.playerResult = ".p1Result";
}else{
DG.playerResult = ".p2Result";
}
if (DG.numberPlay == 3){
DG.lastScore = $(DG.playerResult).val() ;
}
DG.lastMulti = DG.multi
DG.result = $(DG.playerResult).val();
DG.result -= (DG.multi * DG.keyMark);
DG.prototype.saveData();
// initialize multi
DG.multi = 1
if (DG.result==1){
DG.prototype.endError()
return false;
}
if (DG.result == 0){
if(DG.lastMulti == 2){
DG.prototype.endGame();
}else{
DG.prototype.endError()
}
return true;
}else if (DG.result < 0 ){
DG.prototype.endError()
}else{
DG.numberPlay--;
if (DG.result > 0){
DG.prototype.initDarts();
if (DG.numberPlay == 0){
DG.prototype.remainingDarts();
DG.numberPlay=3;
DG.buttonEnable = false;
}else {
DG.prototype.remainingDarts();
}
// remaining darts
if ( DG.buttonEnable == false){
setTimeout(DG.prototype.changePlayer, 1000);
}else {
$(this).css({'background-color' : 'white'});
}
}
}
$(DG.playerResult).val(DG.result);
}
/*
* ucfirst the name
*/
DG.prototype.ucfirst = function(str) {
var f = str.charAt(0).toUpperCase();
return f + str.substr(1).toLowerCase();
}
/*
* ucfirst the name
*/
DG.prototype.hideSplash = function(str) {
$('.playerStart').hide();
$('.start').hide();
}
/*
* save Player name
* hide splash
*/
DG.prototype.savePlayers = function(ev){
if($('.p1ResultBlock').val() =='') DG.p1Name = "Player 1"
else DG.p1Name = DG.prototype.ucfirst($('.p1ResultBlock').val());
if($('.p2ResultBlock').val() =='') DG.p2Name = "Player 2"
else DG.p2Name = DG.prototype.ucfirst($('.p2ResultBlock').val());
$(".player1Name")[0].innerText = DG.p1Name;
DG.currentPlayerName = DG.p1Name;
$(".player2Name")[0].innerText = DG.p2Name;
setTimeout(DG.prototype.hideSplash, 500);
}
/*
* change player when a bad score was done
* score = 1
* score < 0
* last score was not multiplied by 2
*/
DG.prototype.endError = function(){
DG.sampleError.shootRound();
DG.result=DG.lastScore;
DG.numberPlay = 3;
DG.prototype.initDarts();
DG.buttonEnable = false;
$(DG.playerResult).val(DG.result);
setTimeout(DG.prototype.changePlayer, 500);
}
/*
* avoid double tap zoom on ipad and iphone
*/
$(this).bind('touchstart', function preventZoom(e) {
var t2 = e.timeStamp
, t1 | DG | identifier_name |
|
darts.js | .prototype.initDarts = function(bBegin = false){
if(bBegin == true){
$(".numberPlayLeftP1")[0].innerText = DG.prototype.addFullDarts();
}else{
$(".numberPlayLeftP1")[0].innerText = "";
}
$(".numberPlayLeftP2")[0].innerText = "";
}
/*
* initialize winner buttons
* - click on yes button on win screen
* - click on no button on win screen
* - click on play again button on win screen
*/
DG.prototype.initWinnerButtons = function(){
$('.yes').on( "click", function() {
DG.prototype.stopSoundWin(DG.currentPlayer);
DG.prototype.init();
});
$('.no').on( "click", function() {
window.location.replace('./')
});
$('.playAgain').on( "click touchstart", function() {
window.location.replace('./')
});
}
/*
* manage the button multiply
*/
DG.prototype.multiManage = function(){
$(".keyMulti").css({'background-color' : DG.p1Color});
}
/*
* manage click on x2 or x3 button
*/
DG.prototype.multiScore = function(evt){
evt.preventDefault();
$(".keyMulti").css({'background-color' : DG.p1Color});
$(".keyMulti").css({'background-color' : DG.p1Color});
$(this).css({'background-color' : 'white'});
DG.multi = parseInt($(this)[0].id);
}
/*
* minus the score
*/
DG.prototype.minusResult = function(ev){
ev.preventDefault();
DG.lastMulti = 1
if ( DG.buttonEnable == true){
$(this).css({'background-color' : 'white'});
}else{
$(this).css({'background-color' : '#333333'});
return true;
}
if(true == DG.isEndGame) return false;
DG.prototype.multiManage();
// get the current pressed button score
DG.keyMark = $(this)[0].innerText;
if ('player1' == DG.currentPlayer){
DG.playerResult = ".p1Result";
}else{
DG.playerResult = ".p2Result";
}
if (DG.numberPlay == 3){
DG.lastScore = $(DG.playerResult).val() ;
}
DG.lastMulti = DG.multi
DG.result = $(DG.playerResult).val();
DG.result -= (DG.multi * DG.keyMark);
DG.prototype.saveData();
// initialize multi
DG.multi = 1
if (DG.result==1){
DG.prototype.endError()
return false;
}
if (DG.result == 0){
if(DG.lastMulti == 2){
DG.prototype.endGame();
}else{
DG.prototype.endError()
}
return true;
}else if (DG.result < 0 ){
DG.prototype.endError()
}else{
DG.numberPlay--;
if (DG.result > 0){
DG.prototype.initDarts();
if (DG.numberPlay == 0){
DG.prototype.remainingDarts();
DG.numberPlay=3;
DG.buttonEnable = false;
}else {
DG.prototype.remainingDarts();
}
// remaining darts
if ( DG.buttonEnable == false){
setTimeout(DG.prototype.changePlayer, 1000);
}else {
$(this).css({'background-color' : 'white'});
}
}
}
$(DG.playerResult).val(DG.result);
}
/*
* ucfirst the name
*/
DG.prototype.ucfirst = function(str) {
var f = str.charAt(0).toUpperCase();
return f + str.substr(1).toLowerCase();
}
/*
* ucfirst the name
*/
DG.prototype.hideSplash = function(str) {
$('.playerStart').hide();
$('.start').hide();
}
/*
* save Player name
* hide splash
*/
DG.prototype.savePlayers = function(ev){
if($('.p1ResultBlock').val() =='') DG.p1Name = "Player 1"
else DG.p1Name = DG.prototype.ucfirst($('.p1ResultBlock').val());
if($('.p2ResultBlock').val() =='') DG.p2Name = "Player 2"
else DG.p2Name = DG.prototype.ucfirst($('.p2ResultBlock').val());
$(".player1Name")[0].innerText = DG.p1Name;
DG.currentPlayerName = DG.p1Name;
$(".player2Name")[0].innerText = DG.p2Name;
setTimeout(DG.prototype.hideSplash, 500);
}
/*
* change player when a bad score was done
* score = 1
* score < 0
* last score was not multiplied by 2
*/
DG.prototype.endError = function(){
DG.sampleError.shootRound();
DG.result=DG.lastScore;
DG.numberPlay = 3;
DG.prototype.initDarts();
DG.buttonEnable = false;
$(DG.playerResult).val(DG.result);
setTimeout(DG.prototype.changePlayer, 500);
}
/*
* avoid double tap zoom on ipad and iphone
*/
$(this).bind('touchstart', function preventZoom(e) {
var t2 = e.timeStamp
, t1 = $(this).data('lastTouch') || t2
, dt = t2 - t1
, fingers = e.originalEvent.touches.length;
$(this).data('lastTouch', t2);
if (!dt || dt > 200 || fingers > 1) return; // not double-tap
e.preventDefault(); // double tap - prevent the zoom
// also synthesize click events we just swallowed up
$(this).trigger('click').trigger('click');
});
/*
* play sound calling lib audio.js
*/
DG.prototype.PlaySound = function() {
if ( DG.buttonEnable == true){
DG.sample.shootRound();
}
}
/*
* play sound calling lib audio.js
*/
DG.prototype.PlaySoundChangePlayer = function() {
DG.sampleChangePlayer.shootRound();
}
/*
* play sound when won
*/
DG.prototype.playSoundWin = function(currentPlayer) {
DG.playerSample = eval('DG.sample' + currentPlayer)
DG.playerSample.shootRound()
}
/*
* stop sound of winner
*/
DG.prototype.stopSoundWin = function(currentPlayer) {
DG.playerSample = eval('DG.sample' + currentPlayer)
DG.playerSample.stop()
}
/*
* display congratulations
*/
DG.prototype.endGame = function(){
$('.playerWinBackground').show();
DG.prototype.playSoundWin(DG.currentPlayer)
$('.playerWin').show();
//~ $(DG.playerResult).val('win');
DG.prototype.saveData();
DG.isEndGame = true;
if ('player1' == DG.currentPlayer){
DG.scoreP1 ++;
}else {
DG.scoreP2 ++;
}
$('.scorePlayer1')[0].innerText = DG.scoreP1;
$('.scorePlayer2')[0].innerText = DG.scoreP2;
if((DG.scoreP2 > 1)||(DG.scoreP1 > 1)){
$('.yes').hide();
$('.no').hide();
$('.gameStyle').hide();
$('.slash').hide();
$('.playAgain').show();
}
if((DG.scoreP2 + DG.scoreP1 == 2)){
// decider?
$('.gameStyle')[0].innerText = 'Decider ?';
}
if((DG.scoreP2 + DG.scoreP1 == 1)){
// revenge?
$('.gameStyle')[0].innerText = 'Revenge ?';
}
//~ DG.prototype.init();
}
/*
* save data with ajax
*/
DG.prototype.saveData = function(){
url="darts_save.php";
data = 'gameNumber=' + $(".gameNumber")[0].innerText;
data += '&' + DG.currentPlayer + '=' + DG.currentPlayerName;
data += '&keyPressed='+ (DG.multi * DG.keyMark);
data += '&scoreP1=' + $(".p1Result").val();
data += '&scoreP2=' + $(".p2Result").val();
dataType = 'json';
$.ajax({
type: "POST",
url: url,
data: data,
success: function (json) {
}
,
dataType: dataType
});
}
/*
* change the player
*/
DG.prototype.changePlayer = function(){
DG.buttonEnable =true;
DG.prototype.PlaySoundChangePlayer();
if ('player1' == DG.currentPlayer){
DG.currentPlayer = "player2";
DG.currentPlayerName = $(".player2Name")[0].innerText;
$(".p1Result").css({'background-color' : '#333333'});
$(".p2Result").css({'background-color' : 'white'});
$("button").css({'background-color' : DG.p1Color});
}else {
DG.currentPlayer = "player1";
DG.currentPlayerName = $(".player1Name")[0].innerText;
$(".p1Result").css({'background-color' : 'white'});
$(".p2Result").css({'background-color' : '#333333'});
$("button").css({'background-color' : DG.p1Color});
}
DG.prototype.remainingDarts();
}
/*
* repeat a string
*/
DG.prototype.strRepeat = function(str, count){
strOut = ''
for (i=1; i<=count; i++){
strOut = strOut + str;
} | random_line_split |
||
darts.js | 3', 0.2);
DG.sampleError = new RapidSoundsSample('medias/error.wav', 0.2);
DG.sampleChangePlayer = new RapidSoundsSample('medias/changePlayer.mp3', 0.5);
DG.sampleplayer1 = new RapidSoundsSample('medias/testoo.mp3', 1);
DG.sampleplayer2 = new RapidSoundsSample('medias/rouge.low.mp3', 1);
}
/*
* button factory
*/
DG.prototype.buttonFactory = function(){
for (i=0; i<=20 ;i++){
DG.prototype.createButton(i, 'keyPlayer', 'btn' + i);
}
i=25;
DG.prototype.createButton(i, 'keyPlayer', 'btn' + i);
i=50;
DG.prototype.createButton(i, 'keyPlayer', 'btn' + i);
for (i=1; i<=7 ;i++){
DG.prototype.createButtonEmpty(i, 'empty');
}
DG.prototype.createButton("x2", 'keyMulti', 2);
DG.prototype.createButton("x3", 'keyMulti', 3);
$( ".keyPlayer" ).on( "touchstart click", DG.prototype.minusResult );
$( ".keyMulti" ).on( "touchstart click", DG.prototype.multiScore );
$( ".startButton" ).on( "touchstart click", DG.prototype.savePlayers );
}
/*
* create button
*/
DG.prototype.createButton = function (i, buttonName = '', buttonId = ''){
newButtonWrapper = $("<span>").attr('class','button');
newButtonWrapper.attr('id','ok'+i);
newButtonWrapper.appendTo(".numbers");
newButton = $("<button>").attr('class',buttonName);
newButton.attr('id', buttonId);
newButton.appendTo("#ok"+i).html( i ).trigger('create');
$( "#"+buttonId ).on( "touchstart click", DG.prototype.PlaySound );
}
/*
* create empty button
*/
DG.prototype.createButtonEmpty = function (i,buttonName){
newButtonWrapper = $("<span>").attr('class','button ');
newButtonWrapper.attr('id',buttonName+i).appendTo(".numbers");
newButton = $("<button>").attr('disabled','true');
newButton.appendTo("#"+buttonName+i).html( " " );
}
/*
* initialize colors
*/
DG.prototype.initColors = function(){
DG.p1Color = '#333333'
DG.p2Color = '#cc0099'
DG.playerColor = DG.p1Color;
$('body').css({'background-color' : '#262626'})
$(".numberPlayLeftP1").css({'color' : 'white'});
$(".numberPlayLeftP2").css({'color' : 'white'});
$("button").css({'background-color' : DG.p1Color});
$(".p1Result").css({'background-color' : 'white'});
$(".p2Result").css({'background-color' : DG.p1Color});
DG.prototype.initDarts(true);
}
/*
* repeat a string
*/
DG.prototype.addFullDarts = function(){
return DG.prototype.strRepeat('.', 3);
}
/*
* init darts
*/
DG.prototype.initDarts = function(bBegin = false){
if(bBegin == true){
$(".numberPlayLeftP1")[0].innerText = DG.prototype.addFullDarts();
}else{
$(".numberPlayLeftP1")[0].innerText = "";
}
$(".numberPlayLeftP2")[0].innerText = "";
}
/*
* initialize winner buttons
* - click on yes button on win screen
* - click on no button on win screen
* - click on play again button on win screen
*/
DG.prototype.initWinnerButtons = function(){
$('.yes').on( "click", function() {
DG.prototype.stopSoundWin(DG.currentPlayer);
DG.prototype.init();
});
$('.no').on( "click", function() {
window.location.replace('./')
});
$('.playAgain').on( "click touchstart", function() {
window.location.replace('./')
});
}
/*
* manage the button multiply
*/
DG.prototype.multiManage = function(){
$(".keyMulti").css({'background-color' : DG.p1Color});
}
/*
* manage click on x2 or x3 button
*/
DG.prototype.multiScore = function(evt){
evt.preventDefault();
$(".keyMulti").css({'background-color' : DG.p1Color});
$(".keyMulti").css({'background-color' : DG.p1Color});
$(this).css({'background-color' : 'white'});
DG.multi = parseInt($(this)[0].id);
}
/*
* minus the score
*/
DG.prototype.minusResult = function(ev){
ev.preventDefault();
DG.lastMulti = 1
if ( DG.buttonEnable == true){
$(this).css({'background-color' : 'white'});
}else{
$(this).css({'background-color' : '#333333'});
return true;
}
if(true == DG.isEndGame) return false;
DG.prototype.multiManage();
// get the current pressed button score
DG.keyMark = $(this)[0].innerText;
if ('player1' == DG.currentPlayer){
DG.playerResult = ".p1Result";
}else{
DG.playerResult = ".p2Result";
}
if (DG.numberPlay == 3) |
DG.lastMulti = DG.multi
DG.result = $(DG.playerResult).val();
DG.result -= (DG.multi * DG.keyMark);
DG.prototype.saveData();
// initialize multi
DG.multi = 1
if (DG.result==1){
DG.prototype.endError()
return false;
}
if (DG.result == 0){
if(DG.lastMulti == 2){
DG.prototype.endGame();
}else{
DG.prototype.endError()
}
return true;
}else if (DG.result < 0 ){
DG.prototype.endError()
}else{
DG.numberPlay--;
if (DG.result > 0){
DG.prototype.initDarts();
if (DG.numberPlay == 0){
DG.prototype.remainingDarts();
DG.numberPlay=3;
DG.buttonEnable = false;
}else {
DG.prototype.remainingDarts();
}
// remaining darts
if ( DG.buttonEnable == false){
setTimeout(DG.prototype.changePlayer, 1000);
}else {
$(this).css({'background-color' : 'white'});
}
}
}
$(DG.playerResult).val(DG.result);
}
/*
* ucfirst the name
*/
DG.prototype.ucfirst = function(str) {
var f = str.charAt(0).toUpperCase();
return f + str.substr(1).toLowerCase();
}
/*
* ucfirst the name
*/
DG.prototype.hideSplash = function(str) {
$('.playerStart').hide();
$('.start').hide();
}
/*
* save Player name
* hide splash
*/
DG.prototype.savePlayers = function(ev){
if($('.p1ResultBlock').val() =='') DG.p1Name = "Player 1"
else DG.p1Name = DG.prototype.ucfirst($('.p1ResultBlock').val());
if($('.p2ResultBlock').val() =='') DG.p2Name = "Player 2"
else DG.p2Name = DG.prototype.ucfirst($('.p2ResultBlock').val());
$(".player1Name")[0].innerText = DG.p1Name;
DG.currentPlayerName = DG.p1Name;
$(".player2Name")[0].innerText = DG.p2Name;
setTimeout(DG.prototype.hideSplash, 500);
}
/*
* change player when a bad score was done
* score = 1
* score < 0
* last score was not multiplied by 2
*/
DG.prototype.endError = function(){
DG.sampleError.shootRound();
DG.result=DG.lastScore;
DG.numberPlay = 3;
DG.prototype.initDarts();
DG.buttonEnable = false;
$(DG.playerResult).val(DG.result);
setTimeout(DG.prototype.changePlayer, 500);
}
/*
* avoid double tap zoom on ipad and iphone
*/
$(this).bind('touchstart', function preventZoom(e) {
var t2 = e.timeStamp
, t1 = $(this).data('lastTouch') || t2
, dt = t2 - t1
, fingers = e.originalEvent.touches.length;
$(this).data('lastTouch', t2);
if (!dt || dt > 200 || fingers > 1) return; // not double-tap
e.preventDefault(); // double tap - prevent the zoom
// also synthesize click events we just swallowed up
$(this).trigger('click').trigger('click');
});
/*
* play sound calling lib audio.js
*/
DG.prototype.PlaySound = function() {
if ( DG.buttonEnable == true){
DG.sample.shootRound();
}
}
/*
* play sound calling lib audio.js
*/
DG.prototype.PlaySoundChangePlayer = function() {
DG.sampleChangePlayer.shootRound();
}
/*
* play sound when won
*/
DG.prototype.playSoundWin = function(currentPlayer) {
DG.playerSample = eval('DG.sample' + currentPlayer)
DG.playerSample.sh | {
DG.lastScore = $(DG.playerResult).val() ;
} | conditional_block |
table.ts | i-core/utils/request';
interface FieldTypes {
metric: 'metric';
threshold: 'metric';
dimension: 'dimension';
dateTime: 'dateTime';
}
type LegacyType = keyof FieldTypes;
type LegacyFieldType = FieldTypes[LegacyType];
interface LegacyColumn<K extends LegacyType> {
type: K;
field?: string | ({ [T in FieldTypes[K]]: string } & { parameters?: Parameters });
displayName?: string;
format?: string;
hasCustomDisplayName?: boolean;
sortDirection?: string;
attributes?: {
canAggregateSubtotal?: boolean;
field?: string;
format?: string;
name?: string;
parameters?: Parameters;
};
}
type LegacyMetadataColumn = LegacyColumn<'metric'> | LegacyColumn<'dateTime'> | LegacyColumn<'dimension'>;
export type TableVisMetadataPayloadV1 = {
type: 'table';
version: 1;
metadata: {
columns: (LegacyMetadataColumn | LegacyColumn<'threshold'>)[];
showTotals?: {
subtotal?: string;
grandTotal?: boolean;
};
};
};
export interface TableColumnAttributes {
canAggregateSubtotal?: boolean;
format?: string;
}
export type TableVisualizationMetadata = {
type: 'table';
version: 2;
metadata: {
columnAttributes: {
[cid: string]: TableColumnAttributes | undefined;
};
showTotals?: {
subtotal?: string;
grandTotal?: boolean;
};
};
};
type Field = { field: string; parameters: Parameters };
/**
*
* @param column the
*/
function getColumnField(column: LegacyColumn<LegacyFieldType>): Field {
if (typeof column.field === 'string') {
let { metric, parameters } = parseMetricName(column.field);
return {
field: metric,
parameters: parameters || {},
};
} else if (typeof column.field === 'object') {
return {
field: column.field[column.type],
parameters: column.field.parameters || {},
};
} else {
return {
field: column.attributes?.name as string,
parameters: column.attributes?.parameters || {},
};
}
}
type ColumnInfo =
| { requestIndex: number; requestColumn: Column; tableIndex: number; tableColumn: LegacyMetadataColumn }
| {
// These could be undefined if the table was not updated properly and tried to display a column that no longer existed
requestIndex: undefined;
requestColumn: undefined;
tableIndex: number;
tableColumn: LegacyMetadataColumn;
}
| {
requestIndex: number;
requestColumn: Column;
// These can be undefined since converting an all grain request will add a request column with no corresponding table column
tableIndex: undefined;
tableColumn: undefined;
};
/**
* Builds a map of column canonical name to both request and visualization data
* @param request the requested data for this table
* @param visualization the existing visualization metadata
*/
function buildColumnInfo(
request: RequestV2,
visualization: TableVisMetadataPayloadV1,
naviMetadata: NaviMetadataService
): Record<string, ColumnInfo> {
const columnData: Record<string, ColumnInfo> = {};
request.columns.forEach((column, index) => {
const { field, parameters } = column;
const canonicalName = canonicalizeMetric({ metric: field, parameters });
const data = columnData[canonicalName] || {};
data.requestIndex = index;
data.requestColumn = column;
columnData[canonicalName] = data;
});
visualization?.metadata?.columns.forEach((column, index) => {
column.type = column.type === 'threshold' ? 'metric' : column.type;
const newCol = column as LegacyColumn<LegacyFieldType>;
const { field, parameters } = getColumnField(newCol);
let canonicalName = canonicalizeMetric({ metric: field, parameters });
if (newCol.type === 'dateTime') {
const { table } = request;
const grain = request.columns.find((c) => c.field === `${table}.dateTime`)?.parameters.grain;
canonicalName = `${table}.${canonicalName}(grain=${grain})`;
} else if (newCol.type === 'dimension') {
const type = getRealDimensionType(field, request.dataSource, naviMetadata);
const requestColumn = request.columns.find((c) => c.type === type && c.field === field);
const fieldParam = column.attributes?.field ?? requestColumn?.parameters.field;
assert(`field param must be found for dimension ${canonicalName}`, fieldParam);
const newParams = {
...requestColumn?.parameters,
field: fieldParam,
};
canonicalName = canonicalizeMetric({ metric: field, parameters: newParams });
}
const data = columnData[canonicalName] || {};
data.tableIndex = index;
data.tableColumn = newCol;
columnData[canonicalName] = data;
});
return columnData;
}
/**
* The legacy table visualization would display by checking the following rules
* - 1) If the dimension had `show` fields -> make them individual columns (e.g. [Dim (key), Dim (desc)])
* - 2) If the desc field was available -> show it (with id only on hover)
* - 3) If the id field was available -> show it
* @param request - the requested data for this table
* @param metadata - the metadata service with the datasource already loaded
*/
function injectDimensionFields(request: RequestV2, naviMetadata: NaviMetadataService) {
const newColumns: RequestV2['columns'] = [];
request.columns.forEach((col) => {
const { type, field } = col;
if (type === 'dimension') {
const dimMeta = naviMetadata.getById(type, field, request.dataSource);
// get all show fields for dimension
let showFields = dimMeta?.getFieldsForTag('show').map((f) => f.name) ?? [];
if (showFields.length === 0) {
const allFields = dimMeta?.fields?.map((f) => f.name);
let bestField: string;
if (allFields) {
// Use desc or id if available. If neither match grab the first field
bestField = ['desc', 'id'].find((idOrDesc) => allFields.includes(idOrDesc)) ?? allFields[0];
} else {
bestField = 'desc'; // default to desc
}
showFields = [bestField];
}
showFields.forEach((field) => {
newColumns.push({
...col,
parameters: {
...col.parameters,
field,
},
});
});
} else {
newColumns.push(col);
}
});
request.columns = newColumns;
}
/**
* Normalizes a table visualization by
* - applying table order to request columns
* - moving custom display names to aliases
* - migrates existing attributes to a map based on column id
* - moves subtotal to use column id
* @param request the requested data for this table
* @param visualization the existing visualization metadata
*/
export function normalizeTableV2(
request: RequestV2,
visualization: TableVisMetadataPayloadV1 | TableVisualizationMetadata,
naviMetadata: NaviMetadataService
): TableVisualizationMetadata | columns[tableIndex] = requestColumn;
} else if (requestColumn !== undefined && tableColumn === undefined) {
// this column only exists in the request
missedRequestColumns.push(requestColumn);
}
return columns;
}, [])
.filter((c) => c); // remove skipped columns
request.columns = [...reorderedColumns, ...missedRequestColumns];
// extract column attributes
const columnAttributes = Object.values(columnData).reduce((columns, columnInfo) => {
const { tableColumn, requestColumn } = columnInfo;
if (tableColumn === undefined || requestColumn === undefined) {
// this column does not exist in the table
return columns;
}
const { attributes } = tableColumn;
assert(
`The request column ${requestColumn.field} should have a present 'cid' field`,
requestColumn.cid !== undefined
);
const canAggregateSubtotal = tableColumn.type === 'metric' ? attributes?.canAggregateSubtotal : undefined;
const format = tableColumn.format !== undefined ? tableColumn.format : attributes?.format;
columns[requestColumn.cid] = {
...(canAggregateSubtotal !== undefined ? { canAggregateSubtotal } : {}),
...(!isEmpty(format) ? { format } : {}),
};
return columns;
}, {} as Record<string, TableColumnAttributes>);
// update subtotal to use column index
const { showTotals | {
if (visualization.version === 2) {
return visualization;
}
injectDimensionFields(request, naviMetadata);
const columnData: Record<string, ColumnInfo> = buildColumnInfo(request, visualization, naviMetadata);
// Rearranges request columns to match table order
const missedRequestColumns: Column[] = [];
const reorderedColumns = Object.values(columnData)
.reduce((columns: Column[], columnInfo) => {
const { tableColumn, tableIndex, requestColumn } = columnInfo;
if (requestColumn && tableColumn) {
// this column exists in request and table
assert('Table index must exist if table column exists', tableIndex !== undefined);
if (tableColumn.hasCustomDisplayName) {
// If display name is custom move over to request
requestColumn.alias = tableColumn.displayName;
} | identifier_body |
table.ts | -core/utils/request';
interface FieldTypes {
metric: 'metric';
threshold: 'metric';
dimension: 'dimension';
dateTime: 'dateTime';
}
type LegacyType = keyof FieldTypes;
type LegacyFieldType = FieldTypes[LegacyType];
interface LegacyColumn<K extends LegacyType> {
type: K;
field?: string | ({ [T in FieldTypes[K]]: string } & { parameters?: Parameters });
displayName?: string;
format?: string;
hasCustomDisplayName?: boolean;
sortDirection?: string;
attributes?: {
canAggregateSubtotal?: boolean;
field?: string;
format?: string;
name?: string;
parameters?: Parameters;
};
}
type LegacyMetadataColumn = LegacyColumn<'metric'> | LegacyColumn<'dateTime'> | LegacyColumn<'dimension'>;
export type TableVisMetadataPayloadV1 = {
type: 'table';
version: 1;
metadata: {
columns: (LegacyMetadataColumn | LegacyColumn<'threshold'>)[];
showTotals?: {
subtotal?: string;
grandTotal?: boolean;
};
};
};
export interface TableColumnAttributes {
canAggregateSubtotal?: boolean;
format?: string;
}
export type TableVisualizationMetadata = {
type: 'table';
version: 2;
metadata: {
columnAttributes: {
[cid: string]: TableColumnAttributes | undefined;
};
showTotals?: {
subtotal?: string;
grandTotal?: boolean;
};
};
};
type Field = { field: string; parameters: Parameters };
/**
*
* @param column the
*/
function getColumnField(column: LegacyColumn<LegacyFieldType>): Field {
if (typeof column.field === 'string') {
let { metric, parameters } = parseMetricName(column.field);
return {
field: metric,
parameters: parameters || {},
};
} else if (typeof column.field === 'object') {
return {
field: column.field[column.type],
parameters: column.field.parameters || {},
};
} else {
return {
field: column.attributes?.name as string,
parameters: column.attributes?.parameters || {},
};
}
}
type ColumnInfo =
| { requestIndex: number; requestColumn: Column; tableIndex: number; tableColumn: LegacyMetadataColumn }
| {
// These could be undefined if the table was not updated properly and tried to display a column that no longer existed
requestIndex: undefined;
requestColumn: undefined;
tableIndex: number;
tableColumn: LegacyMetadataColumn;
}
| {
requestIndex: number;
requestColumn: Column;
// These can be undefined since converting an all grain request will add a request column with no corresponding table column
tableIndex: undefined;
tableColumn: undefined;
};
/**
* Builds a map of column canonical name to both request and visualization data
* @param request the requested data for this table
* @param visualization the existing visualization metadata
*/
function buildColumnInfo(
request: RequestV2,
visualization: TableVisMetadataPayloadV1,
naviMetadata: NaviMetadataService
): Record<string, ColumnInfo> {
const columnData: Record<string, ColumnInfo> = {};
request.columns.forEach((column, index) => {
const { field, parameters } = column;
const canonicalName = canonicalizeMetric({ metric: field, parameters });
const data = columnData[canonicalName] || {};
data.requestIndex = index;
data.requestColumn = column;
columnData[canonicalName] = data;
});
visualization?.metadata?.columns.forEach((column, index) => {
column.type = column.type === 'threshold' ? 'metric' : column.type;
const newCol = column as LegacyColumn<LegacyFieldType>;
const { field, parameters } = getColumnField(newCol);
let canonicalName = canonicalizeMetric({ metric: field, parameters });
if (newCol.type === 'dateTime') {
const { table } = request;
const grain = request.columns.find((c) => c.field === `${table}.dateTime`)?.parameters.grain;
canonicalName = `${table}.${canonicalName}(grain=${grain})`;
} else if (newCol.type === 'dimension') {
const type = getRealDimensionType(field, request.dataSource, naviMetadata);
const requestColumn = request.columns.find((c) => c.type === type && c.field === field);
const fieldParam = column.attributes?.field ?? requestColumn?.parameters.field;
assert(`field param must be found for dimension ${canonicalName}`, fieldParam);
const newParams = {
...requestColumn?.parameters,
field: fieldParam,
};
canonicalName = canonicalizeMetric({ metric: field, parameters: newParams });
}
const data = columnData[canonicalName] || {};
data.tableIndex = index;
data.tableColumn = newCol;
columnData[canonicalName] = data;
});
return columnData;
}
/**
* The legacy table visualization would display by checking the following rules
* - 1) If the dimension had `show` fields -> make them individual columns (e.g. [Dim (key), Dim (desc)])
* - 2) If the desc field was available -> show it (with id only on hover)
* - 3) If the id field was available -> show it
* @param request - the requested data for this table
* @param metadata - the metadata service with the datasource already loaded
*/
function injectDimensionFields(request: RequestV2, naviMetadata: NaviMetadataService) {
const newColumns: RequestV2['columns'] = [];
request.columns.forEach((col) => {
const { type, field } = col;
if (type === 'dimension') {
const dimMeta = naviMetadata.getById(type, field, request.dataSource);
// get all show fields for dimension
let showFields = dimMeta?.getFieldsForTag('show').map((f) => f.name) ?? [];
if (showFields.length === 0) {
const allFields = dimMeta?.fields?.map((f) => f.name);
let bestField: string;
if (allFields) {
// Use desc or id if available. If neither match grab the first field
bestField = ['desc', 'id'].find((idOrDesc) => allFields.includes(idOrDesc)) ?? allFields[0];
} else {
bestField = 'desc'; // default to desc
}
showFields = [bestField];
}
showFields.forEach((field) => {
newColumns.push({
...col,
parameters: {
...col.parameters,
field,
},
});
});
} else {
newColumns.push(col);
}
});
request.columns = newColumns;
}
/**
* Normalizes a table visualization by
* - applying table order to request columns
* - moving custom display names to aliases
* - migrates existing attributes to a map based on column id
* - moves subtotal to use column id
* @param request the requested data for this table
* @param visualization the existing visualization metadata
*/
export function normalizeTableV2(
request: RequestV2,
visualization: TableVisMetadataPayloadV1 | TableVisualizationMetadata,
naviMetadata: NaviMetadataService
): TableVisualizationMetadata {
if (visualization.version === 2) {
return visualization;
}
injectDimensionFields(request, naviMetadata);
const columnData: Record<string, ColumnInfo> = buildColumnInfo(request, visualization, naviMetadata);
// Rearranges request columns to match table order
const missedRequestColumns: Column[] = [];
const reorderedColumns = Object.values(columnData)
.reduce((columns: Column[], columnInfo) => {
const { tableColumn, tableIndex, requestColumn } = columnInfo;
if (requestColumn && tableColumn) {
// this column exists in request and table
assert('Table index must exist if table column exists', tableIndex !== undefined);
if (tableColumn.hasCustomDisplayName) |
columns[tableIndex] = requestColumn;
} else if (requestColumn !== undefined && tableColumn === undefined) {
// this column only exists in the request
missedRequestColumns.push(requestColumn);
}
return columns;
}, [])
.filter((c) => c); // remove skipped columns
request.columns = [...reorderedColumns, ...missedRequestColumns];
// extract column attributes
const columnAttributes = Object.values(columnData).reduce((columns, columnInfo) => {
const { tableColumn, requestColumn } = columnInfo;
if (tableColumn === undefined || requestColumn === undefined) {
// this column does not exist in the table
return columns;
}
const { attributes } = tableColumn;
assert(
`The request column ${requestColumn.field} should have a present 'cid' field`,
requestColumn.cid !== undefined
);
const canAggregateSubtotal = tableColumn.type === 'metric' ? attributes?.canAggregateSubtotal : undefined;
const format = tableColumn.format !== undefined ? tableColumn.format : attributes?.format;
columns[requestColumn.cid] = {
...(canAggregateSubtotal !== undefined ? { canAggregateSubtotal } : {}),
...(!isEmpty(format) ? { format } : {}),
};
return columns;
}, {} as Record<string, TableColumnAttributes>);
// update subtotal to use column index
const { showTotals | {
// If display name is custom move over to request
requestColumn.alias = tableColumn.displayName;
} | conditional_block |
table.ts | i-core/utils/request';
interface FieldTypes {
metric: 'metric';
threshold: 'metric';
dimension: 'dimension';
dateTime: 'dateTime';
}
type LegacyType = keyof FieldTypes;
type LegacyFieldType = FieldTypes[LegacyType];
interface LegacyColumn<K extends LegacyType> {
type: K;
field?: string | ({ [T in FieldTypes[K]]: string } & { parameters?: Parameters });
displayName?: string;
format?: string;
hasCustomDisplayName?: boolean;
sortDirection?: string;
attributes?: {
canAggregateSubtotal?: boolean;
field?: string;
format?: string;
name?: string;
parameters?: Parameters;
};
}
type LegacyMetadataColumn = LegacyColumn<'metric'> | LegacyColumn<'dateTime'> | LegacyColumn<'dimension'>;
export type TableVisMetadataPayloadV1 = {
type: 'table';
version: 1;
metadata: {
columns: (LegacyMetadataColumn | LegacyColumn<'threshold'>)[];
showTotals?: {
subtotal?: string;
grandTotal?: boolean;
};
};
};
export interface TableColumnAttributes {
canAggregateSubtotal?: boolean;
format?: string;
}
export type TableVisualizationMetadata = {
type: 'table';
version: 2;
metadata: {
columnAttributes: {
[cid: string]: TableColumnAttributes | undefined;
};
showTotals?: {
subtotal?: string;
grandTotal?: boolean;
};
};
};
type Field = { field: string; parameters: Parameters };
/**
*
* @param column the
*/
function getColumnField(column: LegacyColumn<LegacyFieldType>): Field {
if (typeof column.field === 'string') {
let { metric, parameters } = parseMetricName(column.field);
return {
field: metric,
parameters: parameters || {},
};
} else if (typeof column.field === 'object') {
return {
field: column.field[column.type],
parameters: column.field.parameters || {},
};
} else {
return {
field: column.attributes?.name as string,
parameters: column.attributes?.parameters || {},
};
}
}
type ColumnInfo =
| { requestIndex: number; requestColumn: Column; tableIndex: number; tableColumn: LegacyMetadataColumn }
| {
// These could be undefined if the table was not updated properly and tried to display a column that no longer existed
requestIndex: undefined;
requestColumn: undefined;
tableIndex: number;
tableColumn: LegacyMetadataColumn;
}
| {
requestIndex: number;
requestColumn: Column;
// These can be undefined since converting an all grain request will add a request column with no corresponding table column
tableIndex: undefined;
tableColumn: undefined;
};
/**
* Builds a map of column canonical name to both request and visualization data
* @param request the requested data for this table
* @param visualization the existing visualization metadata
*/
function buildColumnInfo(
request: RequestV2,
visualization: TableVisMetadataPayloadV1,
naviMetadata: NaviMetadataService
): Record<string, ColumnInfo> {
const columnData: Record<string, ColumnInfo> = {};
request.columns.forEach((column, index) => {
const { field, parameters } = column;
const canonicalName = canonicalizeMetric({ metric: field, parameters });
const data = columnData[canonicalName] || {};
data.requestIndex = index;
data.requestColumn = column;
columnData[canonicalName] = data;
});
visualization?.metadata?.columns.forEach((column, index) => {
column.type = column.type === 'threshold' ? 'metric' : column.type;
const newCol = column as LegacyColumn<LegacyFieldType>;
const { field, parameters } = getColumnField(newCol);
let canonicalName = canonicalizeMetric({ metric: field, parameters });
if (newCol.type === 'dateTime') {
const { table } = request;
const grain = request.columns.find((c) => c.field === `${table}.dateTime`)?.parameters.grain;
canonicalName = `${table}.${canonicalName}(grain=${grain})`;
} else if (newCol.type === 'dimension') {
const type = getRealDimensionType(field, request.dataSource, naviMetadata);
const requestColumn = request.columns.find((c) => c.type === type && c.field === field);
const fieldParam = column.attributes?.field ?? requestColumn?.parameters.field;
assert(`field param must be found for dimension ${canonicalName}`, fieldParam);
const newParams = {
...requestColumn?.parameters,
field: fieldParam,
};
canonicalName = canonicalizeMetric({ metric: field, parameters: newParams });
}
const data = columnData[canonicalName] || {};
data.tableIndex = index;
data.tableColumn = newCol;
columnData[canonicalName] = data;
});
return columnData;
}
/**
* The legacy table visualization would display by checking the following rules
* - 1) If the dimension had `show` fields -> make them individual columns (e.g. [Dim (key), Dim (desc)])
* - 2) If the desc field was available -> show it (with id only on hover)
* - 3) If the id field was available -> show it
* @param request - the requested data for this table
* @param metadata - the metadata service with the datasource already loaded
*/
function | (request: RequestV2, naviMetadata: NaviMetadataService) {
const newColumns: RequestV2['columns'] = [];
request.columns.forEach((col) => {
const { type, field } = col;
if (type === 'dimension') {
const dimMeta = naviMetadata.getById(type, field, request.dataSource);
// get all show fields for dimension
let showFields = dimMeta?.getFieldsForTag('show').map((f) => f.name) ?? [];
if (showFields.length === 0) {
const allFields = dimMeta?.fields?.map((f) => f.name);
let bestField: string;
if (allFields) {
// Use desc or id if available. If neither match grab the first field
bestField = ['desc', 'id'].find((idOrDesc) => allFields.includes(idOrDesc)) ?? allFields[0];
} else {
bestField = 'desc'; // default to desc
}
showFields = [bestField];
}
showFields.forEach((field) => {
newColumns.push({
...col,
parameters: {
...col.parameters,
field,
},
});
});
} else {
newColumns.push(col);
}
});
request.columns = newColumns;
}
/**
* Normalizes a table visualization by
* - applying table order to request columns
* - moving custom display names to aliases
* - migrates existing attributes to a map based on column id
* - moves subtotal to use column id
* @param request the requested data for this table
* @param visualization the existing visualization metadata
*/
export function normalizeTableV2(
request: RequestV2,
visualization: TableVisMetadataPayloadV1 | TableVisualizationMetadata,
naviMetadata: NaviMetadataService
): TableVisualizationMetadata {
if (visualization.version === 2) {
return visualization;
}
injectDimensionFields(request, naviMetadata);
const columnData: Record<string, ColumnInfo> = buildColumnInfo(request, visualization, naviMetadata);
// Rearranges request columns to match table order
const missedRequestColumns: Column[] = [];
const reorderedColumns = Object.values(columnData)
.reduce((columns: Column[], columnInfo) => {
const { tableColumn, tableIndex, requestColumn } = columnInfo;
if (requestColumn && tableColumn) {
// this column exists in request and table
assert('Table index must exist if table column exists', tableIndex !== undefined);
if (tableColumn.hasCustomDisplayName) {
// If display name is custom move over to request
requestColumn.alias = tableColumn.displayName;
}
columns[tableIndex] = requestColumn;
} else if (requestColumn !== undefined && tableColumn === undefined) {
// this column only exists in the request
missedRequestColumns.push(requestColumn);
}
return columns;
}, [])
.filter((c) => c); // remove skipped columns
request.columns = [...reorderedColumns, ...missedRequestColumns];
// extract column attributes
const columnAttributes = Object.values(columnData).reduce((columns, columnInfo) => {
const { tableColumn, requestColumn } = columnInfo;
if (tableColumn === undefined || requestColumn === undefined) {
// this column does not exist in the table
return columns;
}
const { attributes } = tableColumn;
assert(
`The request column ${requestColumn.field} should have a present 'cid' field`,
requestColumn.cid !== undefined
);
const canAggregateSubtotal = tableColumn.type === 'metric' ? attributes?.canAggregateSubtotal : undefined;
const format = tableColumn.format !== undefined ? tableColumn.format : attributes?.format;
columns[requestColumn.cid] = {
...(canAggregateSubtotal !== undefined ? { canAggregateSubtotal } : {}),
...(!isEmpty(format) ? { format } : {}),
};
return columns;
}, {} as Record<string, TableColumnAttributes>);
// update subtotal to use column index
const { showTotals | injectDimensionFields | identifier_name |
table.ts | i-core/utils/request';
interface FieldTypes {
metric: 'metric';
threshold: 'metric';
dimension: 'dimension';
dateTime: 'dateTime';
}
type LegacyType = keyof FieldTypes;
type LegacyFieldType = FieldTypes[LegacyType];
interface LegacyColumn<K extends LegacyType> {
type: K;
field?: string | ({ [T in FieldTypes[K]]: string } & { parameters?: Parameters });
displayName?: string;
format?: string;
hasCustomDisplayName?: boolean;
sortDirection?: string;
attributes?: {
canAggregateSubtotal?: boolean;
field?: string;
format?: string;
name?: string;
parameters?: Parameters;
};
}
type LegacyMetadataColumn = LegacyColumn<'metric'> | LegacyColumn<'dateTime'> | LegacyColumn<'dimension'>;
export type TableVisMetadataPayloadV1 = {
type: 'table';
version: 1;
metadata: {
columns: (LegacyMetadataColumn | LegacyColumn<'threshold'>)[];
showTotals?: {
subtotal?: string;
grandTotal?: boolean;
};
};
};
export interface TableColumnAttributes {
canAggregateSubtotal?: boolean;
format?: string;
}
export type TableVisualizationMetadata = {
type: 'table';
version: 2;
metadata: {
columnAttributes: {
[cid: string]: TableColumnAttributes | undefined;
};
showTotals?: {
subtotal?: string;
grandTotal?: boolean;
};
};
};
type Field = { field: string; parameters: Parameters };
/**
*
* @param column the
*/
function getColumnField(column: LegacyColumn<LegacyFieldType>): Field {
if (typeof column.field === 'string') {
let { metric, parameters } = parseMetricName(column.field);
return {
field: metric,
parameters: parameters || {},
};
} else if (typeof column.field === 'object') {
return {
field: column.field[column.type],
parameters: column.field.parameters || {},
};
} else {
return {
field: column.attributes?.name as string,
parameters: column.attributes?.parameters || {},
};
}
}
type ColumnInfo =
| { requestIndex: number; requestColumn: Column; tableIndex: number; tableColumn: LegacyMetadataColumn }
| {
// These could be undefined if the table was not updated properly and tried to display a column that no longer existed
requestIndex: undefined;
requestColumn: undefined;
tableIndex: number;
tableColumn: LegacyMetadataColumn;
}
| {
requestIndex: number;
requestColumn: Column;
// These can be undefined since converting an all grain request will add a request column with no corresponding table column
tableIndex: undefined;
tableColumn: undefined;
};
/**
* Builds a map of column canonical name to both request and visualization data
* @param request the requested data for this table
* @param visualization the existing visualization metadata
*/
function buildColumnInfo(
request: RequestV2,
visualization: TableVisMetadataPayloadV1,
naviMetadata: NaviMetadataService
): Record<string, ColumnInfo> {
const columnData: Record<string, ColumnInfo> = {};
request.columns.forEach((column, index) => {
const { field, parameters } = column;
const canonicalName = canonicalizeMetric({ metric: field, parameters });
const data = columnData[canonicalName] || {};
data.requestIndex = index;
data.requestColumn = column;
columnData[canonicalName] = data;
});
visualization?.metadata?.columns.forEach((column, index) => {
column.type = column.type === 'threshold' ? 'metric' : column.type;
const newCol = column as LegacyColumn<LegacyFieldType>;
const { field, parameters } = getColumnField(newCol);
let canonicalName = canonicalizeMetric({ metric: field, parameters });
if (newCol.type === 'dateTime') {
const { table } = request;
const grain = request.columns.find((c) => c.field === `${table}.dateTime`)?.parameters.grain;
canonicalName = `${table}.${canonicalName}(grain=${grain})`;
} else if (newCol.type === 'dimension') {
const type = getRealDimensionType(field, request.dataSource, naviMetadata);
const requestColumn = request.columns.find((c) => c.type === type && c.field === field);
const fieldParam = column.attributes?.field ?? requestColumn?.parameters.field;
assert(`field param must be found for dimension ${canonicalName}`, fieldParam);
const newParams = {
...requestColumn?.parameters,
field: fieldParam,
};
canonicalName = canonicalizeMetric({ metric: field, parameters: newParams });
}
const data = columnData[canonicalName] || {};
data.tableIndex = index;
data.tableColumn = newCol;
columnData[canonicalName] = data;
});
return columnData;
}
/**
* The legacy table visualization would display by checking the following rules
* - 1) If the dimension had `show` fields -> make them individual columns (e.g. [Dim (key), Dim (desc)])
* - 2) If the desc field was available -> show it (with id only on hover)
* - 3) If the id field was available -> show it
* @param request - the requested data for this table
* @param metadata - the metadata service with the datasource already loaded
*/
function injectDimensionFields(request: RequestV2, naviMetadata: NaviMetadataService) {
const newColumns: RequestV2['columns'] = [];
request.columns.forEach((col) => {
const { type, field } = col;
if (type === 'dimension') {
const dimMeta = naviMetadata.getById(type, field, request.dataSource);
// get all show fields for dimension
let showFields = dimMeta?.getFieldsForTag('show').map((f) => f.name) ?? [];
if (showFields.length === 0) {
const allFields = dimMeta?.fields?.map((f) => f.name);
let bestField: string;
if (allFields) {
// Use desc or id if available. If neither match grab the first field
bestField = ['desc', 'id'].find((idOrDesc) => allFields.includes(idOrDesc)) ?? allFields[0];
} else {
bestField = 'desc'; // default to desc
}
showFields = [bestField];
}
showFields.forEach((field) => {
newColumns.push({
...col,
parameters: {
...col.parameters,
field,
},
});
});
} else {
newColumns.push(col);
}
});
request.columns = newColumns;
}
/**
* Normalizes a table visualization by
* - applying table order to request columns
* - moving custom display names to aliases
* - migrates existing attributes to a map based on column id
* - moves subtotal to use column id
* @param request the requested data for this table
* @param visualization the existing visualization metadata
*/
export function normalizeTableV2(
request: RequestV2,
visualization: TableVisMetadataPayloadV1 | TableVisualizationMetadata,
naviMetadata: NaviMetadataService
): TableVisualizationMetadata {
if (visualization.version === 2) {
return visualization;
}
injectDimensionFields(request, naviMetadata);
const columnData: Record<string, ColumnInfo> = buildColumnInfo(request, visualization, naviMetadata);
// Rearranges request columns to match table order
const missedRequestColumns: Column[] = [];
const reorderedColumns = Object.values(columnData)
.reduce((columns: Column[], columnInfo) => {
const { tableColumn, tableIndex, requestColumn } = columnInfo;
if (requestColumn && tableColumn) {
// this column exists in request and table
assert('Table index must exist if table column exists', tableIndex !== undefined);
if (tableColumn.hasCustomDisplayName) {
// If display name is custom move over to request
requestColumn.alias = tableColumn.displayName;
}
columns[tableIndex] = requestColumn;
} else if (requestColumn !== undefined && tableColumn === undefined) {
// this column only exists in the request
missedRequestColumns.push(requestColumn);
}
return columns;
}, [])
.filter((c) => c); // remove skipped columns
request.columns = [...reorderedColumns, ...missedRequestColumns];
| if (tableColumn === undefined || requestColumn === undefined) {
// this column does not exist in the table
return columns;
}
const { attributes } = tableColumn;
assert(
`The request column ${requestColumn.field} should have a present 'cid' field`,
requestColumn.cid !== undefined
);
const canAggregateSubtotal = tableColumn.type === 'metric' ? attributes?.canAggregateSubtotal : undefined;
const format = tableColumn.format !== undefined ? tableColumn.format : attributes?.format;
columns[requestColumn.cid] = {
...(canAggregateSubtotal !== undefined ? { canAggregateSubtotal } : {}),
...(!isEmpty(format) ? { format } : {}),
};
return columns;
}, {} as Record<string, TableColumnAttributes>);
// update subtotal to use column index
const { showTotals } | // extract column attributes
const columnAttributes = Object.values(columnData).reduce((columns, columnInfo) => {
const { tableColumn, requestColumn } = columnInfo; | random_line_split |
lib.rs | : 1,
},
mip_level_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
});
Self {
texture,
sample_count,
}
}
pub fn create_view(&self) -> wgpu::TextureView {
self.texture.create_view(&Default::default())
}
}
pub struct SurfaceHandler {
surface: wgpu::Surface,
surface_configuration: wgpu::SurfaceConfiguration,
frame_buffer: Option<FrameBuffer>,
}
pub struct SurfaceHandlerConfiguration {
pub width: u32,
pub height: u32,
pub sample_count: SampleCount,
}
impl SurfaceHandler {
pub fn multisample_state(&self) -> wgpu::MultisampleState {
wgpu::MultisampleState {
count: self.sample_count() as u32,
..Default::default()
}
}
pub fn sample_count(&self) -> SampleCount {
if let Some(FrameBuffer { sample_count, .. }) = self.frame_buffer {
sample_count.into()
} else |
}
pub fn resize(&mut self, device: &wgpu::Device, width: u32, height: u32) {
self.configure(
&device,
&SurfaceHandlerConfiguration {
width,
height,
sample_count: self.sample_count(),
},
);
}
pub fn configure(&mut self, device: &wgpu::Device, config: &SurfaceHandlerConfiguration) {
self.surface_configuration = wgpu::SurfaceConfiguration {
width: config.width,
height: config.height,
..self.surface_configuration
};
self.surface.configure(&device, &self.surface_configuration);
match config.sample_count {
SampleCount::Single => {
self.frame_buffer = None;
}
SampleCount::Msaa4x => self.frame_buffer = Some(FrameBuffer::new(&device, &config)),
}
}
pub fn create_view_and_resolve_target(
&self,
surface_texture: &wgpu::SurfaceTexture,
) -> (wgpu::TextureView, Option<wgpu::TextureView>) {
let surface_texture_view = surface_texture.texture.create_view(&Default::default());
if let Some(ref frame_buffer) = self.frame_buffer {
(frame_buffer.create_view(), Some(surface_texture_view))
} else {
(surface_texture_view, None)
}
}
}
pub struct Gpu {
device: wgpu::Device,
queue: wgpu::Queue,
surface_handler: SurfaceHandler,
}
impl Gpu {
pub fn new(window: &Window) -> Self {
pollster::block_on(Self::new_async(window))
}
pub async fn new_async(window: &Window) -> Self {
let instance = wgpu::Instance::new(wgpu::Backends::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
compatible_surface: Some(&surface),
power_preference: wgpu::PowerPreference::HighPerformance,
})
.await
.expect("request adapter error");
let (device, queue) = adapter
.request_device(&wgpu::DeviceDescriptor::default(), None)
.await
.expect("request device error");
let preferred_texture_format = surface
.get_preferred_format(&adapter)
.expect("get preferred format error");
let window_size = window.inner_size();
let surface_configuration = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: preferred_texture_format,
width: window_size.width,
height: window_size.height,
present_mode: wgpu::PresentMode::Fifo,
};
surface.configure(&device, &surface_configuration);
let frame_buffer = FrameBuffer::new(
&device,
&SurfaceHandlerConfiguration {
width: window_size.width,
height: window_size.height,
sample_count: SampleCount::Msaa4x,
},
);
let surface_handler = SurfaceHandler {
surface,
surface_configuration,
frame_buffer: Some(frame_buffer),
};
Self {
device,
queue,
surface_handler,
}
}
pub fn resize_surface(&mut self, width: u32, height: u32) {
self.surface_handler.resize(&self.device, width, height);
}
pub fn device(&self) -> &wgpu::Device {
&self.device
}
pub fn queue(&self) -> &wgpu::Queue {
&self.queue
}
fn as_buffer_contents<T>(slice: &[T]) -> &[u8] {
unsafe {
std::slice::from_raw_parts(
slice.as_ptr() as *const u8,
std::mem::size_of::<T>() * slice.len(),
)
}
}
fn create_buffer<T>(
&self,
label: &str,
contents: &[T],
usage: wgpu::BufferUsages,
) -> wgpu::Buffer {
self.device
.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(label),
contents: Self::as_buffer_contents(contents),
usage,
})
}
pub fn create_index_buffer(&self, contents: &[Index]) -> wgpu::Buffer {
self.create_buffer(
"Index Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::INDEX,
)
}
pub fn create_vertex_buffer<T>(&self, contents: &[T]) -> wgpu::Buffer {
self.create_buffer(
"Vertex Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::VERTEX,
)
}
pub fn create_uniform_buffer<T>(&self, contents: &[T]) -> wgpu::Buffer {
self.create_buffer(
"Uniform Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::UNIFORM,
)
}
pub fn create_texture_from_image(&self, image: DynamicImage) -> wgpu::Texture {
use image::GenericImageView;
let image_buffer = image.as_rgba8().expect("image format error");
let dimensions = image.dimensions();
let texture_extent_3d = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth_or_array_layers: 1,
};
let texture = self.device.create_texture(&wgpu::TextureDescriptor {
label: None,
size: texture_extent_3d,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
});
self.queue.write_texture(
wgpu::ImageCopyTexture {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
image_buffer,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(dimensions.0 << 2),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
texture_extent_3d,
);
texture
}
pub fn preferred_texture_format(&self) -> wgpu::TextureFormat {
self.surface_handler.surface_configuration.format
}
pub fn multisample_state(&self) -> wgpu::MultisampleState {
self.surface_handler.multisample_state()
}
pub fn create_render_pass_resources(&self) -> Result<RenderPassResources, wgpu::SurfaceError> {
Ok(RenderPassResources {
command_encoder: self.device.create_command_encoder(&Default::default()),
surface_texture: self.surface_handler.surface.get_current_frame()?.output,
gpu: &self,
})
}
}
pub struct RenderPassResources<'a> {
pub command_encoder: wgpu::CommandEncoder,
surface_texture: wgpu::SurfaceTexture,
gpu: &'a Gpu,
}
impl RenderPassResources<'_> {
pub fn create_view_and_resolve_target(&self) -> (wgpu::TextureView, Option<wgpu::TextureView>) {
self.gpu
.surface_handler
.create_view_and_resolve_target(&self.surface_texture)
}
}
pub struct MainLoop {
event_loop: EventLoop<()>,
window: Window,
}
impl MainLoop {
pub fn new(title: &str) -> MainLoop {
let event_loop = EventLoop::new();
let mut window_builder = winit::window::WindowBuilder::new();
window_builder.window = WindowAttributes {
title: title.to_owned(),
min_inner_size: Some(Size::Physical(PhysicalSize {
width: 16,
height: 16,
})),
inner_size: Some(Size::Physical(PhysicalSize {
width: 16 * 2u32.pow(6),
height: 9 * 2u32.pow(6),
})),
..Default::default()
};
let window = window_builder.build(&event_loop).unwrap();
Self { event_loop, window }
}
pub fn window | {
SampleCount::Single
} | conditional_block |
lib.rs | : 1,
},
mip_level_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
});
Self {
texture,
sample_count,
}
}
pub fn create_view(&self) -> wgpu::TextureView {
self.texture.create_view(&Default::default())
}
}
pub struct SurfaceHandler {
surface: wgpu::Surface,
surface_configuration: wgpu::SurfaceConfiguration,
frame_buffer: Option<FrameBuffer>,
}
pub struct SurfaceHandlerConfiguration {
pub width: u32,
pub height: u32,
pub sample_count: SampleCount,
}
impl SurfaceHandler {
pub fn multisample_state(&self) -> wgpu::MultisampleState {
wgpu::MultisampleState {
count: self.sample_count() as u32,
..Default::default()
}
}
pub fn sample_count(&self) -> SampleCount {
if let Some(FrameBuffer { sample_count, .. }) = self.frame_buffer {
sample_count.into()
} else {
SampleCount::Single
}
}
pub fn resize(&mut self, device: &wgpu::Device, width: u32, height: u32) {
self.configure(
&device,
&SurfaceHandlerConfiguration {
width,
height,
sample_count: self.sample_count(),
},
);
}
pub fn configure(&mut self, device: &wgpu::Device, config: &SurfaceHandlerConfiguration) {
self.surface_configuration = wgpu::SurfaceConfiguration {
width: config.width,
height: config.height,
..self.surface_configuration
};
self.surface.configure(&device, &self.surface_configuration);
match config.sample_count {
SampleCount::Single => {
self.frame_buffer = None;
}
SampleCount::Msaa4x => self.frame_buffer = Some(FrameBuffer::new(&device, &config)),
}
}
pub fn create_view_and_resolve_target(
&self,
surface_texture: &wgpu::SurfaceTexture,
) -> (wgpu::TextureView, Option<wgpu::TextureView>) {
let surface_texture_view = surface_texture.texture.create_view(&Default::default());
if let Some(ref frame_buffer) = self.frame_buffer {
(frame_buffer.create_view(), Some(surface_texture_view))
} else {
(surface_texture_view, None)
}
}
}
pub struct Gpu {
device: wgpu::Device,
queue: wgpu::Queue,
surface_handler: SurfaceHandler,
}
impl Gpu {
pub fn | (window: &Window) -> Self {
pollster::block_on(Self::new_async(window))
}
pub async fn new_async(window: &Window) -> Self {
let instance = wgpu::Instance::new(wgpu::Backends::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
compatible_surface: Some(&surface),
power_preference: wgpu::PowerPreference::HighPerformance,
})
.await
.expect("request adapter error");
let (device, queue) = adapter
.request_device(&wgpu::DeviceDescriptor::default(), None)
.await
.expect("request device error");
let preferred_texture_format = surface
.get_preferred_format(&adapter)
.expect("get preferred format error");
let window_size = window.inner_size();
let surface_configuration = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: preferred_texture_format,
width: window_size.width,
height: window_size.height,
present_mode: wgpu::PresentMode::Fifo,
};
surface.configure(&device, &surface_configuration);
let frame_buffer = FrameBuffer::new(
&device,
&SurfaceHandlerConfiguration {
width: window_size.width,
height: window_size.height,
sample_count: SampleCount::Msaa4x,
},
);
let surface_handler = SurfaceHandler {
surface,
surface_configuration,
frame_buffer: Some(frame_buffer),
};
Self {
device,
queue,
surface_handler,
}
}
pub fn resize_surface(&mut self, width: u32, height: u32) {
self.surface_handler.resize(&self.device, width, height);
}
pub fn device(&self) -> &wgpu::Device {
&self.device
}
pub fn queue(&self) -> &wgpu::Queue {
&self.queue
}
fn as_buffer_contents<T>(slice: &[T]) -> &[u8] {
unsafe {
std::slice::from_raw_parts(
slice.as_ptr() as *const u8,
std::mem::size_of::<T>() * slice.len(),
)
}
}
fn create_buffer<T>(
&self,
label: &str,
contents: &[T],
usage: wgpu::BufferUsages,
) -> wgpu::Buffer {
self.device
.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(label),
contents: Self::as_buffer_contents(contents),
usage,
})
}
pub fn create_index_buffer(&self, contents: &[Index]) -> wgpu::Buffer {
self.create_buffer(
"Index Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::INDEX,
)
}
pub fn create_vertex_buffer<T>(&self, contents: &[T]) -> wgpu::Buffer {
self.create_buffer(
"Vertex Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::VERTEX,
)
}
pub fn create_uniform_buffer<T>(&self, contents: &[T]) -> wgpu::Buffer {
self.create_buffer(
"Uniform Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::UNIFORM,
)
}
pub fn create_texture_from_image(&self, image: DynamicImage) -> wgpu::Texture {
use image::GenericImageView;
let image_buffer = image.as_rgba8().expect("image format error");
let dimensions = image.dimensions();
let texture_extent_3d = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth_or_array_layers: 1,
};
let texture = self.device.create_texture(&wgpu::TextureDescriptor {
label: None,
size: texture_extent_3d,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
});
self.queue.write_texture(
wgpu::ImageCopyTexture {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
image_buffer,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(dimensions.0 << 2),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
texture_extent_3d,
);
texture
}
pub fn preferred_texture_format(&self) -> wgpu::TextureFormat {
self.surface_handler.surface_configuration.format
}
pub fn multisample_state(&self) -> wgpu::MultisampleState {
self.surface_handler.multisample_state()
}
pub fn create_render_pass_resources(&self) -> Result<RenderPassResources, wgpu::SurfaceError> {
Ok(RenderPassResources {
command_encoder: self.device.create_command_encoder(&Default::default()),
surface_texture: self.surface_handler.surface.get_current_frame()?.output,
gpu: &self,
})
}
}
pub struct RenderPassResources<'a> {
pub command_encoder: wgpu::CommandEncoder,
surface_texture: wgpu::SurfaceTexture,
gpu: &'a Gpu,
}
impl RenderPassResources<'_> {
pub fn create_view_and_resolve_target(&self) -> (wgpu::TextureView, Option<wgpu::TextureView>) {
self.gpu
.surface_handler
.create_view_and_resolve_target(&self.surface_texture)
}
}
pub struct MainLoop {
event_loop: EventLoop<()>,
window: Window,
}
impl MainLoop {
pub fn new(title: &str) -> MainLoop {
let event_loop = EventLoop::new();
let mut window_builder = winit::window::WindowBuilder::new();
window_builder.window = WindowAttributes {
title: title.to_owned(),
min_inner_size: Some(Size::Physical(PhysicalSize {
width: 16,
height: 16,
})),
inner_size: Some(Size::Physical(PhysicalSize {
width: 16 * 2u32.pow(6),
height: 9 * 2u32.pow(6),
})),
..Default::default()
};
let window = window_builder.build(&event_loop).unwrap();
Self { event_loop, window }
}
pub fn window(& | new | identifier_name |
lib.rs | : 1,
},
mip_level_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
});
Self {
texture,
sample_count,
}
}
pub fn create_view(&self) -> wgpu::TextureView {
self.texture.create_view(&Default::default())
}
}
pub struct SurfaceHandler {
surface: wgpu::Surface,
surface_configuration: wgpu::SurfaceConfiguration,
frame_buffer: Option<FrameBuffer>,
}
pub struct SurfaceHandlerConfiguration {
pub width: u32,
pub height: u32,
pub sample_count: SampleCount,
}
impl SurfaceHandler {
pub fn multisample_state(&self) -> wgpu::MultisampleState {
wgpu::MultisampleState {
count: self.sample_count() as u32,
..Default::default()
}
}
pub fn sample_count(&self) -> SampleCount {
if let Some(FrameBuffer { sample_count, .. }) = self.frame_buffer {
sample_count.into()
} else {
SampleCount::Single
}
}
pub fn resize(&mut self, device: &wgpu::Device, width: u32, height: u32) {
self.configure(
&device,
&SurfaceHandlerConfiguration {
width,
height,
sample_count: self.sample_count(),
},
);
}
pub fn configure(&mut self, device: &wgpu::Device, config: &SurfaceHandlerConfiguration) {
self.surface_configuration = wgpu::SurfaceConfiguration {
width: config.width,
height: config.height,
..self.surface_configuration
};
self.surface.configure(&device, &self.surface_configuration);
match config.sample_count {
SampleCount::Single => {
self.frame_buffer = None;
}
SampleCount::Msaa4x => self.frame_buffer = Some(FrameBuffer::new(&device, &config)),
}
}
pub fn create_view_and_resolve_target(
&self,
surface_texture: &wgpu::SurfaceTexture,
) -> (wgpu::TextureView, Option<wgpu::TextureView>) {
let surface_texture_view = surface_texture.texture.create_view(&Default::default());
if let Some(ref frame_buffer) = self.frame_buffer {
(frame_buffer.create_view(), Some(surface_texture_view))
} else {
(surface_texture_view, None)
}
}
}
pub struct Gpu {
device: wgpu::Device,
queue: wgpu::Queue,
surface_handler: SurfaceHandler,
}
impl Gpu {
pub fn new(window: &Window) -> Self {
pollster::block_on(Self::new_async(window))
}
pub async fn new_async(window: &Window) -> Self {
let instance = wgpu::Instance::new(wgpu::Backends::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
compatible_surface: Some(&surface),
power_preference: wgpu::PowerPreference::HighPerformance,
})
.await
.expect("request adapter error");
let (device, queue) = adapter
.request_device(&wgpu::DeviceDescriptor::default(), None)
.await
.expect("request device error");
let preferred_texture_format = surface
.get_preferred_format(&adapter)
.expect("get preferred format error");
let window_size = window.inner_size();
let surface_configuration = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: preferred_texture_format,
width: window_size.width,
height: window_size.height,
present_mode: wgpu::PresentMode::Fifo,
};
surface.configure(&device, &surface_configuration);
let frame_buffer = FrameBuffer::new(
&device,
&SurfaceHandlerConfiguration {
width: window_size.width,
height: window_size.height,
sample_count: SampleCount::Msaa4x,
},
);
let surface_handler = SurfaceHandler {
surface,
surface_configuration,
frame_buffer: Some(frame_buffer),
};
Self {
device,
queue,
surface_handler,
}
}
pub fn resize_surface(&mut self, width: u32, height: u32) {
self.surface_handler.resize(&self.device, width, height);
}
pub fn device(&self) -> &wgpu::Device {
&self.device
}
pub fn queue(&self) -> &wgpu::Queue {
&self.queue
}
fn as_buffer_contents<T>(slice: &[T]) -> &[u8] {
unsafe {
std::slice::from_raw_parts(
slice.as_ptr() as *const u8,
std::mem::size_of::<T>() * slice.len(),
)
}
}
fn create_buffer<T>(
&self,
label: &str,
contents: &[T],
usage: wgpu::BufferUsages,
) -> wgpu::Buffer {
self.device
.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(label),
contents: Self::as_buffer_contents(contents),
usage,
})
}
pub fn create_index_buffer(&self, contents: &[Index]) -> wgpu::Buffer {
self.create_buffer(
"Index Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::INDEX,
)
}
pub fn create_vertex_buffer<T>(&self, contents: &[T]) -> wgpu::Buffer {
self.create_buffer(
"Vertex Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::VERTEX,
)
}
pub fn create_uniform_buffer<T>(&self, contents: &[T]) -> wgpu::Buffer {
self.create_buffer(
"Uniform Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::UNIFORM,
)
}
pub fn create_texture_from_image(&self, image: DynamicImage) -> wgpu::Texture {
use image::GenericImageView;
let image_buffer = image.as_rgba8().expect("image format error");
let dimensions = image.dimensions();
let texture_extent_3d = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth_or_array_layers: 1,
};
let texture = self.device.create_texture(&wgpu::TextureDescriptor {
label: None,
size: texture_extent_3d,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
});
self.queue.write_texture(
wgpu::ImageCopyTexture {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
image_buffer,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(dimensions.0 << 2),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
texture_extent_3d,
);
texture
}
pub fn preferred_texture_format(&self) -> wgpu::TextureFormat {
self.surface_handler.surface_configuration.format
}
pub fn multisample_state(&self) -> wgpu::MultisampleState |
pub fn create_render_pass_resources(&self) -> Result<RenderPassResources, wgpu::SurfaceError> {
Ok(RenderPassResources {
command_encoder: self.device.create_command_encoder(&Default::default()),
surface_texture: self.surface_handler.surface.get_current_frame()?.output,
gpu: &self,
})
}
}
pub struct RenderPassResources<'a> {
pub command_encoder: wgpu::CommandEncoder,
surface_texture: wgpu::SurfaceTexture,
gpu: &'a Gpu,
}
impl RenderPassResources<'_> {
pub fn create_view_and_resolve_target(&self) -> (wgpu::TextureView, Option<wgpu::TextureView>) {
self.gpu
.surface_handler
.create_view_and_resolve_target(&self.surface_texture)
}
}
pub struct MainLoop {
event_loop: EventLoop<()>,
window: Window,
}
impl MainLoop {
pub fn new(title: &str) -> MainLoop {
let event_loop = EventLoop::new();
let mut window_builder = winit::window::WindowBuilder::new();
window_builder.window = WindowAttributes {
title: title.to_owned(),
min_inner_size: Some(Size::Physical(PhysicalSize {
width: 16,
height: 16,
})),
inner_size: Some(Size::Physical(PhysicalSize {
width: 16 * 2u32.pow(6),
height: 9 * 2u32.pow(6),
})),
..Default::default()
};
let window = window_builder.build(&event_loop).unwrap();
Self { event_loop, window }
}
pub fn window | {
self.surface_handler.multisample_state()
} | identifier_body |
lib.rs | : 1,
},
mip_level_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
});
Self {
texture,
sample_count,
}
}
pub fn create_view(&self) -> wgpu::TextureView {
self.texture.create_view(&Default::default())
}
}
pub struct SurfaceHandler {
surface: wgpu::Surface,
surface_configuration: wgpu::SurfaceConfiguration,
frame_buffer: Option<FrameBuffer>,
}
pub struct SurfaceHandlerConfiguration {
pub width: u32,
pub height: u32,
pub sample_count: SampleCount,
}
impl SurfaceHandler {
pub fn multisample_state(&self) -> wgpu::MultisampleState {
wgpu::MultisampleState {
count: self.sample_count() as u32,
..Default::default()
}
}
pub fn sample_count(&self) -> SampleCount {
if let Some(FrameBuffer { sample_count, .. }) = self.frame_buffer {
sample_count.into()
} else {
SampleCount::Single
}
}
pub fn resize(&mut self, device: &wgpu::Device, width: u32, height: u32) {
self.configure(
&device,
&SurfaceHandlerConfiguration {
width,
height,
sample_count: self.sample_count(),
},
);
}
pub fn configure(&mut self, device: &wgpu::Device, config: &SurfaceHandlerConfiguration) {
self.surface_configuration = wgpu::SurfaceConfiguration {
width: config.width,
height: config.height,
..self.surface_configuration
};
self.surface.configure(&device, &self.surface_configuration);
match config.sample_count {
SampleCount::Single => {
self.frame_buffer = None;
}
SampleCount::Msaa4x => self.frame_buffer = Some(FrameBuffer::new(&device, &config)),
}
}
pub fn create_view_and_resolve_target(
&self,
surface_texture: &wgpu::SurfaceTexture,
) -> (wgpu::TextureView, Option<wgpu::TextureView>) {
let surface_texture_view = surface_texture.texture.create_view(&Default::default());
if let Some(ref frame_buffer) = self.frame_buffer {
(frame_buffer.create_view(), Some(surface_texture_view))
} else {
(surface_texture_view, None)
}
}
}
pub struct Gpu {
device: wgpu::Device,
queue: wgpu::Queue,
surface_handler: SurfaceHandler,
}
impl Gpu {
pub fn new(window: &Window) -> Self {
pollster::block_on(Self::new_async(window))
}
pub async fn new_async(window: &Window) -> Self {
let instance = wgpu::Instance::new(wgpu::Backends::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
compatible_surface: Some(&surface),
power_preference: wgpu::PowerPreference::HighPerformance,
})
.await
.expect("request adapter error");
let (device, queue) = adapter
.request_device(&wgpu::DeviceDescriptor::default(), None)
.await
.expect("request device error");
let preferred_texture_format = surface
.get_preferred_format(&adapter)
.expect("get preferred format error");
let window_size = window.inner_size();
let surface_configuration = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: preferred_texture_format,
width: window_size.width,
height: window_size.height,
present_mode: wgpu::PresentMode::Fifo,
};
surface.configure(&device, &surface_configuration);
let frame_buffer = FrameBuffer::new(
&device,
&SurfaceHandlerConfiguration {
width: window_size.width,
height: window_size.height,
sample_count: SampleCount::Msaa4x,
},
);
let surface_handler = SurfaceHandler {
surface,
surface_configuration,
frame_buffer: Some(frame_buffer),
};
Self {
device,
queue,
surface_handler,
}
}
pub fn resize_surface(&mut self, width: u32, height: u32) {
self.surface_handler.resize(&self.device, width, height);
}
pub fn device(&self) -> &wgpu::Device {
&self.device
}
pub fn queue(&self) -> &wgpu::Queue {
&self.queue
}
fn as_buffer_contents<T>(slice: &[T]) -> &[u8] {
unsafe {
std::slice::from_raw_parts(
slice.as_ptr() as *const u8,
std::mem::size_of::<T>() * slice.len(),
)
}
}
| usage: wgpu::BufferUsages,
) -> wgpu::Buffer {
self.device
.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(label),
contents: Self::as_buffer_contents(contents),
usage,
})
}
pub fn create_index_buffer(&self, contents: &[Index]) -> wgpu::Buffer {
self.create_buffer(
"Index Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::INDEX,
)
}
pub fn create_vertex_buffer<T>(&self, contents: &[T]) -> wgpu::Buffer {
self.create_buffer(
"Vertex Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::VERTEX,
)
}
pub fn create_uniform_buffer<T>(&self, contents: &[T]) -> wgpu::Buffer {
self.create_buffer(
"Uniform Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::UNIFORM,
)
}
pub fn create_texture_from_image(&self, image: DynamicImage) -> wgpu::Texture {
use image::GenericImageView;
let image_buffer = image.as_rgba8().expect("image format error");
let dimensions = image.dimensions();
let texture_extent_3d = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth_or_array_layers: 1,
};
let texture = self.device.create_texture(&wgpu::TextureDescriptor {
label: None,
size: texture_extent_3d,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
});
self.queue.write_texture(
wgpu::ImageCopyTexture {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
image_buffer,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(dimensions.0 << 2),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
texture_extent_3d,
);
texture
}
pub fn preferred_texture_format(&self) -> wgpu::TextureFormat {
self.surface_handler.surface_configuration.format
}
pub fn multisample_state(&self) -> wgpu::MultisampleState {
self.surface_handler.multisample_state()
}
pub fn create_render_pass_resources(&self) -> Result<RenderPassResources, wgpu::SurfaceError> {
Ok(RenderPassResources {
command_encoder: self.device.create_command_encoder(&Default::default()),
surface_texture: self.surface_handler.surface.get_current_frame()?.output,
gpu: &self,
})
}
}
pub struct RenderPassResources<'a> {
pub command_encoder: wgpu::CommandEncoder,
surface_texture: wgpu::SurfaceTexture,
gpu: &'a Gpu,
}
impl RenderPassResources<'_> {
pub fn create_view_and_resolve_target(&self) -> (wgpu::TextureView, Option<wgpu::TextureView>) {
self.gpu
.surface_handler
.create_view_and_resolve_target(&self.surface_texture)
}
}
pub struct MainLoop {
event_loop: EventLoop<()>,
window: Window,
}
impl MainLoop {
pub fn new(title: &str) -> MainLoop {
let event_loop = EventLoop::new();
let mut window_builder = winit::window::WindowBuilder::new();
window_builder.window = WindowAttributes {
title: title.to_owned(),
min_inner_size: Some(Size::Physical(PhysicalSize {
width: 16,
height: 16,
})),
inner_size: Some(Size::Physical(PhysicalSize {
width: 16 * 2u32.pow(6),
height: 9 * 2u32.pow(6),
})),
..Default::default()
};
let window = window_builder.build(&event_loop).unwrap();
Self { event_loop, window }
}
pub fn window(&self | fn create_buffer<T>(
&self,
label: &str,
contents: &[T], | random_line_split |
cloudLibUtils.js | ;
const BUILD_TYPES = require('../constants').BUILD_TYPES;
const SOURCE_PATH = '/var/config/rest/iapps/f5-appsvcs/packages';
const IAPP_DIR = '/var/config/rest/iapps/f5-appsvcs';
const RETRY_OPTIONS = {
retries: 5,
delay: 1000
};
const readFile = function (path) {
return new Promise((resolve, reject) => {
fs.readFile(path, (error, data) => {
if (error) reject(error);
else resolve(data);
});
});
};
const getIControlPromise = (context, iControlOptions, failureMessage, options) => {
const promiseOptions = Object.assign({}, { checkStatus: true }, options);
return util.iControlRequest(context, iControlOptions)
.then((response) => {
if (promiseOptions.checkStatus && response.statusCode !== 200 && response.statusCode !== 202) {
throw new Error(`${failureMessage}: ${response.statusCode}`);
}
return response;
});
};
const install = function (context) {
log.info('Installing service discovery worker');
return Promise.resolve()
.then(() => getDiscoveryRpm(context, 'packageName'))
.then((discoveryRpmName) => uninstallDiscoveryRpm(context, discoveryRpmName))
.then(() => installDiscoveryRpm(context))
.then(() => waitForDiscoveryInit(context));
};
function getDiscoveryRpm(context, property) {
const options = {
path: '/mgmt/shared/iapp/package-management-tasks',
method: 'POST',
ctype: 'application/json',
why: 'Get query',
crude: true,
send: JSON.stringify({ operation: 'QUERY' })
};
const args = [context, options, 'Failed to get discovery RPM'];
return promiseUtil.retryPromise(getIControlPromise, RETRY_OPTIONS, args)
// give the request a moment to complete
.then((response) => promiseUtil.delay(200, response))
.then((response) => {
const opts = {
path: `/mgmt/shared/iapp/package-management-tasks/${JSON.parse(response.body).id}`,
method: 'GET',
ctype: 'application/json',
why: 'Get from response id',
crude: true
};
return getRPMInfo(context, opts, 1)
.then((info) => (property ? (info || {})[property] : info));
});
}
function getRPMInfo(context, options, attempts) {
if (attempts >= 5) {
log.debug('cloudLibUtils.getRPMName: Aborting after max retry attempts');
return undefined;
}
return util.iControlRequest(context, options)
.then((res) => {
const body = JSON.parse(res.body);
if (body.status !== 'FINISHED') {
return promiseUtil.delay(200)
.then(() => getRPMInfo(context, options, attempts + 1));
}
let discoveryRpm;
body.queryResponse.forEach((pack) => {
if (pack.name === 'f5-service-discovery') {
discoveryRpm = pack;
}
});
return discoveryRpm;
})
.catch(() => getRPMInfo(context, options, attempts + 1));
}
function checkUninstallTask(context, options, attempts) {
if (attempts >= 5) {
log.debug('cloudLibUtils.checkUninstallTask: Aborting after max retry attempts');
return false;
}
return util.iControlRequest(context, options)
.then((res) => {
const body = JSON.parse(res.body);
if (body.status !== 'FINISHED') {
return promiseUtil.delay(200)
.then(() => checkUninstallTask(context, options, attempts + 1));
}
return true;
})
.catch(() => checkUninstallTask(context, options, attempts + 1));
}
function | (context, discoveryRpm) {
// not installed
if (typeof discoveryRpm === 'undefined') {
return Promise.resolve(true);
}
const options = {
path: '/mgmt/shared/iapp/package-management-tasks',
method: 'POST',
ctype: 'application/json',
why: 'Uninstall discovery worker',
send: JSON.stringify({ operation: 'UNINSTALL', packageName: discoveryRpm }),
crude: true
};
log.debug('Uninstalling service discovery worker');
const args = [context, options, 'Failed to uninstall RPM'];
return promiseUtil.retryPromise(getIControlPromise, RETRY_OPTIONS, args)
// give the request a moment to complete
.then((response) => promiseUtil.delay(200, response))
.then((response) => {
const uninstallTaskId = JSON.parse(response.body).id;
const opts = {
path: `/mgmt/shared/iapp/package-management-tasks/${uninstallTaskId}`,
method: 'GET',
ctype: 'application/json',
why: 'Get status of uninstall',
crude: true
};
return checkUninstallTask(context, opts, 1);
})
.then((uninstalled) => {
if (!uninstalled) {
log.debug('Warning: Uninstall may not have completely finished.');
}
return undefined;
})
.catch((e) => {
log.debug(`Error during discoveryWorker uninstall: ${e.message} at ${e.stackTrace}`);
return undefined;
});
}
function copyDiscoveryRpm(context) {
const fileName = fs.readdirSync(SOURCE_PATH).find((name) => name.indexOf('f5-service-discovery') >= 0);
return new Promise((resolve, reject) => {
iappUtil.copyToHost(
context,
`${SOURCE_PATH}/${fileName}`,
(error) => {
if (error) reject(error);
else resolve(fileName);
}
);
});
}
function installDiscoveryRpm(context) {
// TODO add version checking
return promiseUtil.retryPromise(copyDiscoveryRpm, RETRY_OPTIONS, [context])
.then((fileName) => {
const options = {
path: '/mgmt/shared/iapp/package-management-tasks',
method: 'POST',
ctype: 'application/json',
why: 'Install discovery worker',
crude: true,
send: JSON.stringify({
operation: 'INSTALL',
packageFilePath: `/var/config/rest/downloads/${fileName}`
})
};
log.debug('Installing discovery worker');
// There is no status code returned for this request
const args = [context, options, 'Failed to install discovery RPM', { checkStatus: false }];
return promiseUtil.retryPromise(getIControlPromise, RETRY_OPTIONS, args);
});
}
function waitForDiscoveryInit(context) {
const options = {
path: '/mgmt/shared/service-discovery/info',
method: 'GET',
why: 'Get discovery worker info',
crude: true
};
const args = [context, options, 'Failed waiting for discovery to start'];
return promiseUtil.retryPromise(getIControlPromise, { retries: 60, delay: 1000 }, args);
}
function checkVersions(desiredVersions, foundVersions) {
if (desiredVersions.length !== foundVersions.length) {
let message = `Length of desired versions (${desiredVersions.length}) `;
message += `does not equal length of found versions (${foundVersions.length})`;
throw new Error(message);
}
return desiredVersions.every((desired, i) => semver.eq(desired, foundVersions[i]));
}
function getDesiredVersions() {
return readFile(`${IAPP_DIR}/lib/versions.json`)
.then((data) => JSON.parse(data));
}
function findCloudLibVersions(context) {
const versions = {};
const requests = [];
requests.push(
getDiscoveryRpm(context)
.then((rpmInfo) => {
versions.discoveryWorker = rpmInfo ? `${rpmInfo.version}-${rpmInfo.release}` : '0.0.0';
})
);
return Promise.all(requests).then(() => versions);
}
function getFoundVersions(context) {
return Promise.resolve({})
.then((versions) => findCloudLibVersions(context).then((results) => Object.assign(versions, results)));
}
function needCloudLibsInstall(context, fromStartup) {
if (context.host.deviceType === DEVICE_TYPES.BIG_IQ) {
return false;
}
// We can't install SD when running in a container on startup (no target),
// But we still need to when it's during a request
if (fromStartup && context.host.deviceType === DEVICE_TYPES.CONTAINER) {
return false;
}
return true;
}
function getIsInstalled(context) {
if (!needCloudLibsInstall(context)) {
return Promise.resolve(true);
}
function toArray(versions) {
return [
versions.discoveryWorker
];
}
let desiredVersions = [];
let foundVersions = [];
log.debug('Checking cloud-libs versions');
return getDesiredVersions()
.then((o) => {
log.debug(`Desired versions: ${JSON.stringify(o)}`);
desiredVersions = toArray(o);
})
.then(() => getFoundVersions(context))
.then((o) => {
log.debug(`Discovered versions: ${JSON.stringify(o)}`);
foundVersions = toArray(o);
})
.then(() => checkVersions(desiredVersions, foundVersions))
.then((result) => {
log.debug | uninstallDiscoveryRpm | identifier_name |
cloudLibUtils.js | const RETRY_OPTIONS = {
retries: 5,
delay: 1000
};
const readFile = function (path) {
return new Promise((resolve, reject) => {
fs.readFile(path, (error, data) => {
if (error) reject(error);
else resolve(data);
});
});
};
const getIControlPromise = (context, iControlOptions, failureMessage, options) => {
const promiseOptions = Object.assign({}, { checkStatus: true }, options);
return util.iControlRequest(context, iControlOptions)
.then((response) => {
if (promiseOptions.checkStatus && response.statusCode !== 200 && response.statusCode !== 202) {
throw new Error(`${failureMessage}: ${response.statusCode}`);
}
return response;
});
};
const install = function (context) {
log.info('Installing service discovery worker');
return Promise.resolve()
.then(() => getDiscoveryRpm(context, 'packageName'))
.then((discoveryRpmName) => uninstallDiscoveryRpm(context, discoveryRpmName))
.then(() => installDiscoveryRpm(context))
.then(() => waitForDiscoveryInit(context));
};
function getDiscoveryRpm(context, property) {
const options = {
path: '/mgmt/shared/iapp/package-management-tasks',
method: 'POST',
ctype: 'application/json',
why: 'Get query',
crude: true,
send: JSON.stringify({ operation: 'QUERY' })
};
const args = [context, options, 'Failed to get discovery RPM'];
return promiseUtil.retryPromise(getIControlPromise, RETRY_OPTIONS, args)
// give the request a moment to complete
.then((response) => promiseUtil.delay(200, response))
.then((response) => {
const opts = {
path: `/mgmt/shared/iapp/package-management-tasks/${JSON.parse(response.body).id}`,
method: 'GET',
ctype: 'application/json',
why: 'Get from response id',
crude: true
};
return getRPMInfo(context, opts, 1)
.then((info) => (property ? (info || {})[property] : info));
});
}
function getRPMInfo(context, options, attempts) {
if (attempts >= 5) {
log.debug('cloudLibUtils.getRPMName: Aborting after max retry attempts');
return undefined;
}
return util.iControlRequest(context, options)
.then((res) => {
const body = JSON.parse(res.body);
if (body.status !== 'FINISHED') {
return promiseUtil.delay(200)
.then(() => getRPMInfo(context, options, attempts + 1));
}
let discoveryRpm;
body.queryResponse.forEach((pack) => {
if (pack.name === 'f5-service-discovery') {
discoveryRpm = pack;
}
});
return discoveryRpm;
})
.catch(() => getRPMInfo(context, options, attempts + 1));
}
function checkUninstallTask(context, options, attempts) {
if (attempts >= 5) {
log.debug('cloudLibUtils.checkUninstallTask: Aborting after max retry attempts');
return false;
}
return util.iControlRequest(context, options)
.then((res) => {
const body = JSON.parse(res.body);
if (body.status !== 'FINISHED') {
return promiseUtil.delay(200)
.then(() => checkUninstallTask(context, options, attempts + 1));
}
return true;
})
.catch(() => checkUninstallTask(context, options, attempts + 1));
}
function uninstallDiscoveryRpm(context, discoveryRpm) {
// not installed
if (typeof discoveryRpm === 'undefined') {
return Promise.resolve(true);
}
const options = {
path: '/mgmt/shared/iapp/package-management-tasks',
method: 'POST',
ctype: 'application/json',
why: 'Uninstall discovery worker',
send: JSON.stringify({ operation: 'UNINSTALL', packageName: discoveryRpm }),
crude: true
};
log.debug('Uninstalling service discovery worker');
const args = [context, options, 'Failed to uninstall RPM'];
return promiseUtil.retryPromise(getIControlPromise, RETRY_OPTIONS, args)
// give the request a moment to complete
.then((response) => promiseUtil.delay(200, response))
.then((response) => {
const uninstallTaskId = JSON.parse(response.body).id;
const opts = {
path: `/mgmt/shared/iapp/package-management-tasks/${uninstallTaskId}`,
method: 'GET',
ctype: 'application/json',
why: 'Get status of uninstall',
crude: true
};
return checkUninstallTask(context, opts, 1);
})
.then((uninstalled) => {
if (!uninstalled) {
log.debug('Warning: Uninstall may not have completely finished.');
}
return undefined;
})
.catch((e) => {
log.debug(`Error during discoveryWorker uninstall: ${e.message} at ${e.stackTrace}`);
return undefined;
});
}
function copyDiscoveryRpm(context) {
const fileName = fs.readdirSync(SOURCE_PATH).find((name) => name.indexOf('f5-service-discovery') >= 0);
return new Promise((resolve, reject) => {
iappUtil.copyToHost(
context,
`${SOURCE_PATH}/${fileName}`,
(error) => {
if (error) reject(error);
else resolve(fileName);
}
);
});
}
function installDiscoveryRpm(context) {
// TODO add version checking
return promiseUtil.retryPromise(copyDiscoveryRpm, RETRY_OPTIONS, [context])
.then((fileName) => {
const options = {
path: '/mgmt/shared/iapp/package-management-tasks',
method: 'POST',
ctype: 'application/json',
why: 'Install discovery worker',
crude: true,
send: JSON.stringify({
operation: 'INSTALL',
packageFilePath: `/var/config/rest/downloads/${fileName}`
})
};
log.debug('Installing discovery worker');
// There is no status code returned for this request
const args = [context, options, 'Failed to install discovery RPM', { checkStatus: false }];
return promiseUtil.retryPromise(getIControlPromise, RETRY_OPTIONS, args);
});
}
function waitForDiscoveryInit(context) {
const options = {
path: '/mgmt/shared/service-discovery/info',
method: 'GET',
why: 'Get discovery worker info',
crude: true
};
const args = [context, options, 'Failed waiting for discovery to start'];
return promiseUtil.retryPromise(getIControlPromise, { retries: 60, delay: 1000 }, args);
}
function checkVersions(desiredVersions, foundVersions) {
if (desiredVersions.length !== foundVersions.length) {
let message = `Length of desired versions (${desiredVersions.length}) `;
message += `does not equal length of found versions (${foundVersions.length})`;
throw new Error(message);
}
return desiredVersions.every((desired, i) => semver.eq(desired, foundVersions[i]));
}
function getDesiredVersions() {
return readFile(`${IAPP_DIR}/lib/versions.json`)
.then((data) => JSON.parse(data));
}
function findCloudLibVersions(context) {
const versions = {};
const requests = [];
requests.push(
getDiscoveryRpm(context)
.then((rpmInfo) => {
versions.discoveryWorker = rpmInfo ? `${rpmInfo.version}-${rpmInfo.release}` : '0.0.0';
})
);
return Promise.all(requests).then(() => versions);
}
function getFoundVersions(context) {
return Promise.resolve({})
.then((versions) => findCloudLibVersions(context).then((results) => Object.assign(versions, results)));
}
function needCloudLibsInstall(context, fromStartup) {
if (context.host.deviceType === DEVICE_TYPES.BIG_IQ) {
return false;
}
// We can't install SD when running in a container on startup (no target),
// But we still need to when it's during a request
if (fromStartup && context.host.deviceType === DEVICE_TYPES.CONTAINER) {
return false;
}
return true;
}
function getIsInstalled(context) {
if (!needCloudLibsInstall(context)) {
return Promise.resolve(true);
}
function toArray(versions) {
return [
versions.discoveryWorker
];
}
let desiredVersions = [];
let foundVersions = [];
log.debug('Checking cloud-libs versions');
return getDesiredVersions()
.then((o) => {
log.debug(`Desired versions: ${JSON.stringify(o)}`);
desiredVersions = toArray(o);
})
.then(() => getFoundVersions(context))
.then((o) => {
log.debug(`Discovered versions: ${JSON.stringify(o)}`);
foundVersions = toArray(o);
})
.then(() => checkVersions(desiredVersions, foundVersions))
.then((result) => {
log.debug(`Versions match: ${result}`);
return result;
});
}
function ensureInstall(context) | {
return getIsInstalled(context)
.then((isInstalled) => (isInstalled ? Promise.resolve() : install(context)));
} | identifier_body |
|
cloudLibUtils.js | const log = require('../log');
const util = require('./util');
const iappUtil = require('./iappUtil');
const constants = require('../constants');
const DEVICE_TYPES = require('../constants').DEVICE_TYPES;
const BUILD_TYPES = require('../constants').BUILD_TYPES;
const SOURCE_PATH = '/var/config/rest/iapps/f5-appsvcs/packages';
const IAPP_DIR = '/var/config/rest/iapps/f5-appsvcs';
const RETRY_OPTIONS = {
retries: 5,
delay: 1000
};
const readFile = function (path) {
return new Promise((resolve, reject) => {
fs.readFile(path, (error, data) => {
if (error) reject(error);
else resolve(data);
});
});
};
const getIControlPromise = (context, iControlOptions, failureMessage, options) => {
const promiseOptions = Object.assign({}, { checkStatus: true }, options);
return util.iControlRequest(context, iControlOptions)
.then((response) => {
if (promiseOptions.checkStatus && response.statusCode !== 200 && response.statusCode !== 202) {
throw new Error(`${failureMessage}: ${response.statusCode}`);
}
return response;
});
};
const install = function (context) {
log.info('Installing service discovery worker');
return Promise.resolve()
.then(() => getDiscoveryRpm(context, 'packageName'))
.then((discoveryRpmName) => uninstallDiscoveryRpm(context, discoveryRpmName))
.then(() => installDiscoveryRpm(context))
.then(() => waitForDiscoveryInit(context));
};
function getDiscoveryRpm(context, property) {
const options = {
path: '/mgmt/shared/iapp/package-management-tasks',
method: 'POST',
ctype: 'application/json',
why: 'Get query',
crude: true,
send: JSON.stringify({ operation: 'QUERY' })
};
const args = [context, options, 'Failed to get discovery RPM'];
return promiseUtil.retryPromise(getIControlPromise, RETRY_OPTIONS, args)
// give the request a moment to complete
.then((response) => promiseUtil.delay(200, response))
.then((response) => {
const opts = {
path: `/mgmt/shared/iapp/package-management-tasks/${JSON.parse(response.body).id}`,
method: 'GET',
ctype: 'application/json',
why: 'Get from response id',
crude: true
};
return getRPMInfo(context, opts, 1)
.then((info) => (property ? (info || {})[property] : info));
});
}
function getRPMInfo(context, options, attempts) {
if (attempts >= 5) {
log.debug('cloudLibUtils.getRPMName: Aborting after max retry attempts');
return undefined;
}
return util.iControlRequest(context, options)
.then((res) => {
const body = JSON.parse(res.body);
if (body.status !== 'FINISHED') {
return promiseUtil.delay(200)
.then(() => getRPMInfo(context, options, attempts + 1));
}
let discoveryRpm;
body.queryResponse.forEach((pack) => {
if (pack.name === 'f5-service-discovery') {
discoveryRpm = pack;
}
});
return discoveryRpm;
})
.catch(() => getRPMInfo(context, options, attempts + 1));
}
function checkUninstallTask(context, options, attempts) {
if (attempts >= 5) {
log.debug('cloudLibUtils.checkUninstallTask: Aborting after max retry attempts');
return false;
}
return util.iControlRequest(context, options)
.then((res) => {
const body = JSON.parse(res.body);
if (body.status !== 'FINISHED') {
return promiseUtil.delay(200)
.then(() => checkUninstallTask(context, options, attempts + 1));
}
return true;
})
.catch(() => checkUninstallTask(context, options, attempts + 1));
}
function uninstallDiscoveryRpm(context, discoveryRpm) {
// not installed
if (typeof discoveryRpm === 'undefined') {
return Promise.resolve(true);
}
const options = {
path: '/mgmt/shared/iapp/package-management-tasks',
method: 'POST',
ctype: 'application/json',
why: 'Uninstall discovery worker',
send: JSON.stringify({ operation: 'UNINSTALL', packageName: discoveryRpm }),
crude: true
};
log.debug('Uninstalling service discovery worker');
const args = [context, options, 'Failed to uninstall RPM'];
return promiseUtil.retryPromise(getIControlPromise, RETRY_OPTIONS, args)
// give the request a moment to complete
.then((response) => promiseUtil.delay(200, response))
.then((response) => {
const uninstallTaskId = JSON.parse(response.body).id;
const opts = {
path: `/mgmt/shared/iapp/package-management-tasks/${uninstallTaskId}`,
method: 'GET',
ctype: 'application/json',
why: 'Get status of uninstall',
crude: true
};
return checkUninstallTask(context, opts, 1);
})
.then((uninstalled) => {
if (!uninstalled) {
log.debug('Warning: Uninstall may not have completely finished.');
}
return undefined;
})
.catch((e) => {
log.debug(`Error during discoveryWorker uninstall: ${e.message} at ${e.stackTrace}`);
return undefined;
});
}
function copyDiscoveryRpm(context) {
const fileName = fs.readdirSync(SOURCE_PATH).find((name) => name.indexOf('f5-service-discovery') >= 0);
return new Promise((resolve, reject) => {
iappUtil.copyToHost(
context,
`${SOURCE_PATH}/${fileName}`,
(error) => {
if (error) reject(error);
else resolve(fileName);
}
);
});
}
function installDiscoveryRpm(context) {
// TODO add version checking
return promiseUtil.retryPromise(copyDiscoveryRpm, RETRY_OPTIONS, [context])
.then((fileName) => {
const options = {
path: '/mgmt/shared/iapp/package-management-tasks',
method: 'POST',
ctype: 'application/json',
why: 'Install discovery worker',
crude: true,
send: JSON.stringify({
operation: 'INSTALL',
packageFilePath: `/var/config/rest/downloads/${fileName}`
})
};
log.debug('Installing discovery worker');
// There is no status code returned for this request
const args = [context, options, 'Failed to install discovery RPM', { checkStatus: false }];
return promiseUtil.retryPromise(getIControlPromise, RETRY_OPTIONS, args);
});
}
function waitForDiscoveryInit(context) {
const options = {
path: '/mgmt/shared/service-discovery/info',
method: 'GET',
why: 'Get discovery worker info',
crude: true
};
const args = [context, options, 'Failed waiting for discovery to start'];
return promiseUtil.retryPromise(getIControlPromise, { retries: 60, delay: 1000 }, args);
}
function checkVersions(desiredVersions, foundVersions) {
if (desiredVersions.length !== foundVersions.length) {
let message = `Length of desired versions (${desiredVersions.length}) `;
message += `does not equal length of found versions (${foundVersions.length})`;
throw new Error(message);
}
return desiredVersions.every((desired, i) => semver.eq(desired, foundVersions[i]));
}
function getDesiredVersions() {
return readFile(`${IAPP_DIR}/lib/versions.json`)
.then((data) => JSON.parse(data));
}
function findCloudLibVersions(context) {
const versions = {};
const requests = [];
requests.push(
getDiscoveryRpm(context)
.then((rpmInfo) => {
versions.discoveryWorker = rpmInfo ? `${rpmInfo.version}-${rpmInfo.release}` : '0.0.0';
})
);
return Promise.all(requests).then(() => versions);
}
function getFoundVersions(context) {
return Promise.resolve({})
.then((versions) => findCloudLibVersions(context).then((results) => Object.assign(versions, results)));
}
function needCloudLibsInstall(context, fromStartup) {
if (context.host.deviceType === DEVICE_TYPES.BIG_IQ) {
return false;
}
// We can't install SD when running in a container on startup (no target),
// But we still need to when it's during a request
if (fromStartup && context.host.deviceType === DEVICE_TYPES.CONTAINER) {
return false;
}
return true;
}
function getIsInstalled(context) {
if (!needCloudLibsInstall(context)) {
return Promise.resolve(true);
}
function toArray(versions) {
return [
versions.discoveryWorker
];
}
let desiredVersions = [];
let foundVersions = [];
log.debug('Checking cloud-libs versions');
return getDesiredVersions()
.then((o) => {
log.debug(`Desired versions: ${JSON.stringify(o)}`);
desiredVersions = toArray(o);
| const semver = require('semver');
const promiseUtil = require('@f5devcentral/atg-shared-utilities').promiseUtils; | random_line_split |
|
cloudLibUtils.js | ;
const BUILD_TYPES = require('../constants').BUILD_TYPES;
const SOURCE_PATH = '/var/config/rest/iapps/f5-appsvcs/packages';
const IAPP_DIR = '/var/config/rest/iapps/f5-appsvcs';
const RETRY_OPTIONS = {
retries: 5,
delay: 1000
};
const readFile = function (path) {
return new Promise((resolve, reject) => {
fs.readFile(path, (error, data) => {
if (error) reject(error);
else resolve(data);
});
});
};
const getIControlPromise = (context, iControlOptions, failureMessage, options) => {
const promiseOptions = Object.assign({}, { checkStatus: true }, options);
return util.iControlRequest(context, iControlOptions)
.then((response) => {
if (promiseOptions.checkStatus && response.statusCode !== 200 && response.statusCode !== 202) |
return response;
});
};
const install = function (context) {
log.info('Installing service discovery worker');
return Promise.resolve()
.then(() => getDiscoveryRpm(context, 'packageName'))
.then((discoveryRpmName) => uninstallDiscoveryRpm(context, discoveryRpmName))
.then(() => installDiscoveryRpm(context))
.then(() => waitForDiscoveryInit(context));
};
function getDiscoveryRpm(context, property) {
const options = {
path: '/mgmt/shared/iapp/package-management-tasks',
method: 'POST',
ctype: 'application/json',
why: 'Get query',
crude: true,
send: JSON.stringify({ operation: 'QUERY' })
};
const args = [context, options, 'Failed to get discovery RPM'];
return promiseUtil.retryPromise(getIControlPromise, RETRY_OPTIONS, args)
// give the request a moment to complete
.then((response) => promiseUtil.delay(200, response))
.then((response) => {
const opts = {
path: `/mgmt/shared/iapp/package-management-tasks/${JSON.parse(response.body).id}`,
method: 'GET',
ctype: 'application/json',
why: 'Get from response id',
crude: true
};
return getRPMInfo(context, opts, 1)
.then((info) => (property ? (info || {})[property] : info));
});
}
function getRPMInfo(context, options, attempts) {
if (attempts >= 5) {
log.debug('cloudLibUtils.getRPMName: Aborting after max retry attempts');
return undefined;
}
return util.iControlRequest(context, options)
.then((res) => {
const body = JSON.parse(res.body);
if (body.status !== 'FINISHED') {
return promiseUtil.delay(200)
.then(() => getRPMInfo(context, options, attempts + 1));
}
let discoveryRpm;
body.queryResponse.forEach((pack) => {
if (pack.name === 'f5-service-discovery') {
discoveryRpm = pack;
}
});
return discoveryRpm;
})
.catch(() => getRPMInfo(context, options, attempts + 1));
}
function checkUninstallTask(context, options, attempts) {
if (attempts >= 5) {
log.debug('cloudLibUtils.checkUninstallTask: Aborting after max retry attempts');
return false;
}
return util.iControlRequest(context, options)
.then((res) => {
const body = JSON.parse(res.body);
if (body.status !== 'FINISHED') {
return promiseUtil.delay(200)
.then(() => checkUninstallTask(context, options, attempts + 1));
}
return true;
})
.catch(() => checkUninstallTask(context, options, attempts + 1));
}
function uninstallDiscoveryRpm(context, discoveryRpm) {
// not installed
if (typeof discoveryRpm === 'undefined') {
return Promise.resolve(true);
}
const options = {
path: '/mgmt/shared/iapp/package-management-tasks',
method: 'POST',
ctype: 'application/json',
why: 'Uninstall discovery worker',
send: JSON.stringify({ operation: 'UNINSTALL', packageName: discoveryRpm }),
crude: true
};
log.debug('Uninstalling service discovery worker');
const args = [context, options, 'Failed to uninstall RPM'];
return promiseUtil.retryPromise(getIControlPromise, RETRY_OPTIONS, args)
// give the request a moment to complete
.then((response) => promiseUtil.delay(200, response))
.then((response) => {
const uninstallTaskId = JSON.parse(response.body).id;
const opts = {
path: `/mgmt/shared/iapp/package-management-tasks/${uninstallTaskId}`,
method: 'GET',
ctype: 'application/json',
why: 'Get status of uninstall',
crude: true
};
return checkUninstallTask(context, opts, 1);
})
.then((uninstalled) => {
if (!uninstalled) {
log.debug('Warning: Uninstall may not have completely finished.');
}
return undefined;
})
.catch((e) => {
log.debug(`Error during discoveryWorker uninstall: ${e.message} at ${e.stackTrace}`);
return undefined;
});
}
function copyDiscoveryRpm(context) {
const fileName = fs.readdirSync(SOURCE_PATH).find((name) => name.indexOf('f5-service-discovery') >= 0);
return new Promise((resolve, reject) => {
iappUtil.copyToHost(
context,
`${SOURCE_PATH}/${fileName}`,
(error) => {
if (error) reject(error);
else resolve(fileName);
}
);
});
}
function installDiscoveryRpm(context) {
// TODO add version checking
return promiseUtil.retryPromise(copyDiscoveryRpm, RETRY_OPTIONS, [context])
.then((fileName) => {
const options = {
path: '/mgmt/shared/iapp/package-management-tasks',
method: 'POST',
ctype: 'application/json',
why: 'Install discovery worker',
crude: true,
send: JSON.stringify({
operation: 'INSTALL',
packageFilePath: `/var/config/rest/downloads/${fileName}`
})
};
log.debug('Installing discovery worker');
// There is no status code returned for this request
const args = [context, options, 'Failed to install discovery RPM', { checkStatus: false }];
return promiseUtil.retryPromise(getIControlPromise, RETRY_OPTIONS, args);
});
}
function waitForDiscoveryInit(context) {
const options = {
path: '/mgmt/shared/service-discovery/info',
method: 'GET',
why: 'Get discovery worker info',
crude: true
};
const args = [context, options, 'Failed waiting for discovery to start'];
return promiseUtil.retryPromise(getIControlPromise, { retries: 60, delay: 1000 }, args);
}
function checkVersions(desiredVersions, foundVersions) {
if (desiredVersions.length !== foundVersions.length) {
let message = `Length of desired versions (${desiredVersions.length}) `;
message += `does not equal length of found versions (${foundVersions.length})`;
throw new Error(message);
}
return desiredVersions.every((desired, i) => semver.eq(desired, foundVersions[i]));
}
function getDesiredVersions() {
return readFile(`${IAPP_DIR}/lib/versions.json`)
.then((data) => JSON.parse(data));
}
function findCloudLibVersions(context) {
const versions = {};
const requests = [];
requests.push(
getDiscoveryRpm(context)
.then((rpmInfo) => {
versions.discoveryWorker = rpmInfo ? `${rpmInfo.version}-${rpmInfo.release}` : '0.0.0';
})
);
return Promise.all(requests).then(() => versions);
}
function getFoundVersions(context) {
return Promise.resolve({})
.then((versions) => findCloudLibVersions(context).then((results) => Object.assign(versions, results)));
}
function needCloudLibsInstall(context, fromStartup) {
if (context.host.deviceType === DEVICE_TYPES.BIG_IQ) {
return false;
}
// We can't install SD when running in a container on startup (no target),
// But we still need to when it's during a request
if (fromStartup && context.host.deviceType === DEVICE_TYPES.CONTAINER) {
return false;
}
return true;
}
function getIsInstalled(context) {
if (!needCloudLibsInstall(context)) {
return Promise.resolve(true);
}
function toArray(versions) {
return [
versions.discoveryWorker
];
}
let desiredVersions = [];
let foundVersions = [];
log.debug('Checking cloud-libs versions');
return getDesiredVersions()
.then((o) => {
log.debug(`Desired versions: ${JSON.stringify(o)}`);
desiredVersions = toArray(o);
})
.then(() => getFoundVersions(context))
.then((o) => {
log.debug(`Discovered versions: ${JSON.stringify(o)}`);
foundVersions = toArray(o);
})
.then(() => checkVersions(desiredVersions, foundVersions))
.then((result) => {
log.debug | {
throw new Error(`${failureMessage}: ${response.statusCode}`);
} | conditional_block |
ad_grabber_util.py | _to_visit = training_sites[train_category]
with open(os.path.join(export_folder, 'session_info.csv'), 'w') as fwtr:
fwtr.write('session_str : %s\n' % session_date)
fwtr.write('machine_info : %s\n' % machineid)
fwtr.write('vmid : %s\n' % vmid)
fwtr.write('profile : %s\n' % profile)
fwtr.write('train_mode : %s\n' % train_mode)
fwtr.write('num_of_refresh : %d\n' % num_of_refresh)
fwtr.write('training_topic : %s\n' % train_category)
fwtr.write('training_sites : ')
for site in train_sites_to_visit:
fwtr.write('%s, ' % site)
fwtr.write('\nnum_of_train_sites : %d\n' % len(train_sites_to_visit))
fwtr.write('test_sites : ')
for site in test_sites:
fwtr.write('%s, ' % site[1])
fwtr.write('\nnum_of_test_sites : %d\n' % len(test_sites))
def generate_stats(results, ads, vmid, session_date, export_folder, process_ex_time):
"""
Generates stats on
- uniq ads seen on the test sites
- total number of ads seen on the test sites
- total number of ads seen on all test sites
- total number of uniq ads seen on all test sites
"""
try:
os.makedirs(export_folder)
except OSError:
pass
# to be read and inserted into db
totalads = 0 # total number of ads seen during this session
totaluniqads = len(ads) # does not support multicategories at this point
# for each category, for each test site, count total number of ads seen
totalad_category = {}
# for each category, for each test site, count total number of uniq ads seen
uniqad_category = {}
with open(os.path.join(export_folder, 'session_bugs.csv'), 'w') as bugs_wtr:
bugs_wtr.write('#Ad-UID, Website-URL, Refresh-Num, Training-Topic,\
Site-Context, BugCount, BugSrc\n')
for train_category, cat_dict in results.items():
totalad_category[train_category] = {}
uniqad_category[train_category] = {}
for test_site, bug_dict_list in cat_dict.items():
total_ads = 0 # for each site
uniq_ads = [] # for each site
for refresh_num in range(len(bug_dict_list)):
bug_dict = bug_dict_list[refresh_num]
for bug, bugcount in bug_dict.items():
if bug.is_ad():
uuid = bug.get_uuid()
bugs_wtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(uuid, test_site,
refresh_num, train_category, 'N/A', bugcount, bug.get_src()))
total_ads += bugcount
if bug not in uniq_ads:
uniq_ads.append(bug)
totalad_category[train_category][test_site] = total_ads
uniqad_category[train_category][test_site] = len(uniq_ads)
totalads += total_ads # global count for total ads
with open(os.path.join(export_folder, 'session_stats.csv'), 'w') as ses_wtr:
# write some metadata information about this session
ses_wtr.write('#VMID: %s\n' % vmid)
ses_wtr.write('#Session-Date: %s\n' % session_date)
ses_wtr.write('#Time to complete: %s\n' % process_ex_time)
ses_wtr.write('#Training Categories: %s\n' % str(results.keys()))
ses_wtr.write('#Total Number of ads: %d\n' % totalads)
ses_wtr.write('#Total Uniq ads: %d\n\n' % totaluniqads)
ses_wtr.write('#TrainingTopic, Test-Site, NumberOfVisit, TotalAds, UniqAds\n')
for train_category, cat_dict in results.items():
for test_site, bug_dict_list in cat_dict.items():
num_of_visit = len(bug_dict_list)
ses_wtr.write('{0}, {1}, {2}, {3}, {4}\n'.format(train_category,
test_site, num_of_visit, totalad_category[train_category][test_site],
uniqad_category[train_category][test_site]))
def export_ads(results,out_folder):
"""
This function creates a csv file which contains all the unique ads seen in
each test site (including all the refreshes)
TODO update the doc
results is a dictionary of the following
results = { Category : Value, ... }
value = { test_site_url : [ result1, result2, ... resultN], ... }
resultN : { WebBug : count, ... }
"""
try:
os.makedirs(out_folder)
except OSError:
LOG.debug('Creating output file folder ...')
export_ad_counter = 1 # assign unique number to ads for export to mturk
#short_listed_companies = ['google adsense', 'doubleclick']
with open(os.path.join(out_folder,'ad_labelling.csv'), 'w') as fwtr:
# write the titles
fwtr.write('#{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}\n'.format(\
'Ad#', 'Company', 'FileType', 'Ad-Category', 'Website-URL',\
'Refresh-Num','Training-Topic', 'Context-of-site', 'Total', 'Ad-src'))
# make sure we only add one ad
for train_category, cat_dict in results.items():
for test_site, bug_dict_list in cat_dict.items():
for refresh_num in range(len(bug_dict_list)):
bug_dict = bug_dict_list[refresh_num]
for bug, bugcount in bug_dict.items():
if not bug.is_ad():
#TODO check bug_type in ffext
continue
if bug.get_filetype() in ['swf', 'png', 'gif', 'jpg']:
file_name = '%d.%s' % (export_ad_counter, bug.get_filetype())
new_location = os.path.join(out_folder, file_name)
copy2(bug.get_filepath(), new_location)
fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7} , {8}, {9},\
\n'.format(file_name, bug.get_name(), bug.get_filetype(),
'' ,test_site, refresh_num, train_category, 'N/A', bugcount,
bug.get_src()))
export_ad_counter += 1
def get_bug_type(file_type):
is_ad = False
bug_type = 'text'
if file_type.startswith('HTML') or \
file_type.startswith('ASCII') or \
file_type.startswith('UTF-8 Unicode English') or \
file_type.startswith('very short') :
bug_type = 'text'
elif (file_type.endswith('1 x 1') and file_type.startswith('GIF')):
bug_type = 'gif'
elif file_type.startswith('PNG'):
bug_type = 'png'
is_ad = True
elif file_type.startswith('GIF'):
bug_type = 'gif'
is_ad = True
elif file_type.startswith('Macromedia Flash'):
bug_type = 'swf'
is_ad = True
elif file_type.startswith('JPEG'):
bug_type = 'jpg'
is_ad = True
return bug_type, is_ad
def parse_buginfo(entry):
"""
Takes the json decoded bug information and inserts it into a WebBug instance
"""
bugname = entry['bug']['name'].replace(' ','').replace('/','_')
bugsrc = entry['ent']['policyContentLocation']
bugpattern = entry['bug']['pattern']
try :
bugaffiliation = entry['bug']['affiliation']
except KeyError:
bugaffiliation = ""
bugtype = entry['bug']['type']
bugpathname = entry['ent']['pathname']
return WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,
bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)
def curl_worker_legacy(args):
| output_dir = args[0]
saved_file_name = args[1]
path = args[2]
bug = args[3]
curl_result_queue = args[4]
# subprocess.call(['curl', '-o', path , bug.get_src() ])
subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path , bug.get_src()])
# Use the unix tool 'file' to check filetype
subpr_out = subprocess.check_output(['file', '-b', path]).strip()
filetype, is_ad = get_bug_type(subpr_out)
if is_ad:
new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name, filetype))
else:
new_path = os.path.join(output_dir, 'notad', '%s.%s' % (saved_file_name,\
filetype))
os.rename(path, new_path)
bug.set_is_ad(is_ad)
bug.set_filetype(filetype) | identifier_body |
|
ad_grabber_util.py |
try:
bug.set_dimension(height, width)
dimension = '%s-%s' % (height, width)
# check all the images in the bin with the dimensions
m_list = target_bin[dimension]
dup = None
for m in m_list:
if check_duplicate(bug_filepath, m.get_filepath()):
dup = m
break
if dup:
# check if the duplicate ad came from a different test site
if test_site in ads[dup]:
ads[dup][test_site] += bug_count
else :
ads[dup] = {test_site : bug_count}
# delete old bug reference, add new one and point to duplicated
# bug
del bug_dict[bug]
bug_dict[dup] = bug_count
else:
target_bin[dimension].append(bug)
ads[bug] = {test_site : bug_count}
# tally up the results
except KeyError: # The bin hasn't been created
target_bin[dimension] = [bug]
ads[bug] = {test_site : bug_count}
# else:
# notads
return ads,error_bugs
def export_uniq_ads(ads, out_folder, rel_folder):
"""
Takes all the uniq ads seen in this session and writes its metadata
information to a csv file
"""
try :
os.makedirs(out_folder)
os.makedirs(os.path.join(out_folder, rel_folder))
except OSError:
LOG.debug('Creating output folder')
fwtr = open(os.path.join(out_folder, 'uniq_ads.csv'), 'w')
# Relative location = Location of the ad within this current session
# Global location, added when an ad is matched with existing ads in DB
fwtr.write('#UID, Ad-Company, Ad-Filetype, Height, Width, Rel-Location, src\n')
for bug in ads.keys():
height, width = bug.get_dimension()
filepath = bug.get_filepath()
name = bug.get_name()
src = bug.get_src()
filetype = bug.get_filetype()
new_uuidname = '%s.%s' % (uuid1(), filetype)
bug.set_uuid(new_uuidname)
new_filepath = os.path.join(out_folder, new_uuidname)
rel_filepath = os.path.join(rel_folder, new_uuidname)
copy2(filepath, new_filepath)
fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(new_uuidname,
name, filetype, height, width, rel_filepath, src))
fwtr.close()
return ads
def write_run_info(RUNINFO_DIR, session_date):
# write to a file in runinfo_dir to tell automation script this run is done
fp = os.path.join(RUNINFO_DIR, '%s.info' % session_date)
with open(fp, 'w') as fwtr:
fwtr.write('OK')
def write_session_info(vmid, machineid, profile, session_date, train_mode, training_sites,
test_sites, num_of_refresh, export_folder):
train_category = training_sites.keys()[0]
train_sites_to_visit = training_sites[train_category]
with open(os.path.join(export_folder, 'session_info.csv'), 'w') as fwtr:
fwtr.write('session_str : %s\n' % session_date)
fwtr.write('machine_info : %s\n' % machineid)
fwtr.write('vmid : %s\n' % vmid)
fwtr.write('profile : %s\n' % profile)
fwtr.write('train_mode : %s\n' % train_mode)
fwtr.write('num_of_refresh : %d\n' % num_of_refresh)
fwtr.write('training_topic : %s\n' % train_category)
fwtr.write('training_sites : ')
for site in train_sites_to_visit:
fwtr.write('%s, ' % site)
fwtr.write('\nnum_of_train_sites : %d\n' % len(train_sites_to_visit))
fwtr.write('test_sites : ')
for site in test_sites:
fwtr.write('%s, ' % site[1])
fwtr.write('\nnum_of_test_sites : %d\n' % len(test_sites))
def generate_stats(results, ads, vmid, session_date, export_folder, process_ex_time):
"""
Generates stats on
- uniq ads seen on the test sites
- total number of ads seen on the test sites
- total number of ads seen on all test sites
- total number of uniq ads seen on all test sites
"""
try:
os.makedirs(export_folder)
except OSError:
pass
# to be read and inserted into db
totalads = 0 # total number of ads seen during this session
totaluniqads = len(ads) # does not support multicategories at this point
# for each category, for each test site, count total number of ads seen
totalad_category = {}
# for each category, for each test site, count total number of uniq ads seen
uniqad_category = {}
with open(os.path.join(export_folder, 'session_bugs.csv'), 'w') as bugs_wtr:
bugs_wtr.write('#Ad-UID, Website-URL, Refresh-Num, Training-Topic,\
Site-Context, BugCount, BugSrc\n')
for train_category, cat_dict in results.items():
totalad_category[train_category] = {}
uniqad_category[train_category] = {}
for test_site, bug_dict_list in cat_dict.items():
total_ads = 0 # for each site
uniq_ads = [] # for each site
for refresh_num in range(len(bug_dict_list)):
bug_dict = bug_dict_list[refresh_num]
for bug, bugcount in bug_dict.items():
if bug.is_ad():
uuid = bug.get_uuid()
bugs_wtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(uuid, test_site,
refresh_num, train_category, 'N/A', bugcount, bug.get_src()))
total_ads += bugcount
if bug not in uniq_ads:
uniq_ads.append(bug)
totalad_category[train_category][test_site] = total_ads
uniqad_category[train_category][test_site] = len(uniq_ads)
totalads += total_ads # global count for total ads
with open(os.path.join(export_folder, 'session_stats.csv'), 'w') as ses_wtr:
# write some metadata information about this session
ses_wtr.write('#VMID: %s\n' % vmid)
ses_wtr.write('#Session-Date: %s\n' % session_date)
ses_wtr.write('#Time to complete: %s\n' % process_ex_time)
ses_wtr.write('#Training Categories: %s\n' % str(results.keys()))
ses_wtr.write('#Total Number of ads: %d\n' % totalads)
ses_wtr.write('#Total Uniq ads: %d\n\n' % totaluniqads)
ses_wtr.write('#TrainingTopic, Test-Site, NumberOfVisit, TotalAds, UniqAds\n')
for train_category, cat_dict in results.items():
for test_site, bug_dict_list in cat_dict.items():
num_of_visit = len(bug_dict_list)
ses_wtr.write('{0}, {1}, {2}, {3}, {4}\n'.format(train_category,
test_site, num_of_visit, totalad_category[train_category][test_site],
uniqad_category[train_category][test_site]))
def export_ads(results,out_folder):
"""
This function creates a csv file which contains all the unique ads seen in
each test site (including all the refreshes)
TODO update the doc
results is a dictionary of the following
results = { Category : Value, ... }
value = { test_site_url : [ result1, result2, ... resultN], ... }
resultN : { WebBug : count, ... }
"""
try:
os.makedirs(out_folder)
except OSError:
LOG.debug('Creating output file folder ...')
export_ad_counter = 1 # assign unique number to ads for export to mturk
#short_listed_companies = ['google adsense', 'doubleclick']
with open(os.path.join(out_folder,'ad_labelling.csv'), 'w') as fwtr:
# write the titles
fwtr.write('#{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}\n'.format(\
'Ad#', 'Company', 'FileType', 'Ad-Category', 'Website-URL',\
'Refresh-Num','Training-Topic', 'Context-of-site', 'Total', 'Ad-src'))
# make sure we only add one ad
for train_category, cat_dict in results.items():
for test_site, bug_dict_list in cat_dict | target_bin = img_bin
LOG.debug(bug_filepath)
try:
height = subprocess.check_output(['identify', '-format', '"%h"',\
bug_filepath]).strip()
width = subprocess.check_output(['identify', '-format','"%w"',\
bug_filepath]).strip()
except subprocess.CalledProcessError:
LOG.exception("identify error on file %s" % bug_filepath) | conditional_block |
|
ad_grabber_util.py | (session_results):
"""
i) Identify duplicate ads
ii) bin the ads by their dimensions
iii) Keep track of the test sites and have many times they have displayed this
ad
"""
# bin by dimensions
ads = {}
notads = {}
swf_bin = {}
img_bin = {}
error_bugs = []
for train_category, cat_dict in session_results.items():
for test_site, bug_dict_list in cat_dict.items():
for index_count in range(len(bug_dict_list)):
bug_dict = bug_dict_list[index_count]
for bug, bug_count in bug_dict.items():
bug_filetype = bug.get_filetype()
bug_filepath = bug.get_filepath()
if bug_filepath == '':
#LOG.debug('did not manage to curl the scripts for bug:%s' % bug)
error_bugs.append(bug)
continue
if bug.is_ad(): # give zerofucks to non-ads
height = '999'
width = '999'
if bug_filetype == 'swf':
# choose from the swf media bin
target_bin = swf_bin
try:
width = subprocess.check_output(['swfdump', '-X',
bug_filepath]).split(' ')[-1].strip()
height = subprocess.check_output(['swfdump', '-Y',
bug_filepath]).split(' ')[-1].strip()
except subprocess.CalledProcessError :
LOG.exception("swfdump error on file %s" % bug_filepath)
else:
# choose from the img media bin
target_bin = img_bin
LOG.debug(bug_filepath)
try:
height = subprocess.check_output(['identify', '-format', '"%h"',\
bug_filepath]).strip()
width = subprocess.check_output(['identify', '-format','"%w"',\
bug_filepath]).strip()
except subprocess.CalledProcessError:
LOG.exception("identify error on file %s" % bug_filepath)
try:
bug.set_dimension(height, width)
dimension = '%s-%s' % (height, width)
# check all the images in the bin with the dimensions
m_list = target_bin[dimension]
dup = None
for m in m_list:
if check_duplicate(bug_filepath, m.get_filepath()):
dup = m
break
if dup:
# check if the duplicate ad came from a different test site
if test_site in ads[dup]:
ads[dup][test_site] += bug_count
else :
ads[dup] = {test_site : bug_count}
# delete old bug reference, add new one and point to duplicated
# bug
del bug_dict[bug]
bug_dict[dup] = bug_count
else:
target_bin[dimension].append(bug)
ads[bug] = {test_site : bug_count}
# tally up the results
except KeyError: # The bin hasn't been created
target_bin[dimension] = [bug]
ads[bug] = {test_site : bug_count}
# else:
# notads
return ads,error_bugs
def export_uniq_ads(ads, out_folder, rel_folder):
"""
Takes all the uniq ads seen in this session and writes its metadata
information to a csv file
"""
try :
os.makedirs(out_folder)
os.makedirs(os.path.join(out_folder, rel_folder))
except OSError:
LOG.debug('Creating output folder')
fwtr = open(os.path.join(out_folder, 'uniq_ads.csv'), 'w')
# Relative location = Location of the ad within this current session
# Global location, added when an ad is matched with existing ads in DB
fwtr.write('#UID, Ad-Company, Ad-Filetype, Height, Width, Rel-Location, src\n')
for bug in ads.keys():
height, width = bug.get_dimension()
filepath = bug.get_filepath()
name = bug.get_name()
src = bug.get_src()
filetype = bug.get_filetype()
new_uuidname = '%s.%s' % (uuid1(), filetype)
bug.set_uuid(new_uuidname)
new_filepath = os.path.join(out_folder, new_uuidname)
rel_filepath = os.path.join(rel_folder, new_uuidname)
copy2(filepath, new_filepath)
fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(new_uuidname,
name, filetype, height, width, rel_filepath, src))
fwtr.close()
return ads
def write_run_info(RUNINFO_DIR, session_date):
# write to a file in runinfo_dir to tell automation script this run is done
fp = os.path.join(RUNINFO_DIR, '%s.info' % session_date)
with open(fp, 'w') as fwtr:
fwtr.write('OK')
def write_session_info(vmid, machineid, profile, session_date, train_mode, training_sites,
test_sites, num_of_refresh, export_folder):
train_category = training_sites.keys()[0]
train_sites_to_visit = training_sites[train_category]
with open(os.path.join(export_folder, 'session_info.csv'), 'w') as fwtr:
fwtr.write('session_str : %s\n' % session_date)
fwtr.write('machine_info : %s\n' % machineid)
fwtr.write('vmid : %s\n' % vmid)
fwtr.write('profile : %s\n' % profile)
fwtr.write('train_mode : %s\n' % train_mode)
fwtr.write('num_of_refresh : %d\n' % num_of_refresh)
fwtr.write('training_topic : %s\n' % train_category)
fwtr.write('training_sites : ')
for site in train_sites_to_visit:
fwtr.write('%s, ' % site)
fwtr.write('\nnum_of_train_sites : %d\n' % len(train_sites_to_visit))
fwtr.write('test_sites : ')
for site in test_sites:
fwtr.write('%s, ' % site[1])
fwtr.write('\nnum_of_test_sites : %d\n' % len(test_sites))
def generate_stats(results, ads, vmid, session_date, export_folder, process_ex_time):
"""
Generates stats on
- uniq ads seen on the test sites
- total number of ads seen on the test sites
- total number of ads seen on all test sites
- total number of uniq ads seen on all test sites
"""
try:
os.makedirs(export_folder)
except OSError:
pass
# to be read and inserted into db
totalads = 0 # total number of ads seen during this session
totaluniqads = len(ads) # does not support multicategories at this point
# for each category, for each test site, count total number of ads seen
totalad_category = {}
# for each category, for each test site, count total number of uniq ads seen
uniqad_category = {}
with open(os.path.join(export_folder, 'session_bugs.csv'), 'w') as bugs_wtr:
bugs_wtr.write('#Ad-UID, Website-URL, Refresh-Num, Training-Topic,\
Site-Context, BugCount, BugSrc\n')
for train_category, cat_dict in results.items():
totalad_category[train_category] = {}
uniqad_category[train_category] = {}
for test_site, bug_dict_list in cat_dict.items():
total_ads = 0 # for each site
uniq_ads = [] # for each site
for refresh_num in range(len(bug_dict_list)):
bug_dict = bug_dict_list[refresh_num]
for bug, bugcount in bug_dict.items():
if bug.is_ad():
uuid = bug.get_uuid()
bugs_wtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(uuid, test_site,
refresh_num, train_category, 'N/A', bugcount, bug.get_src()))
total_ads += bugcount
if bug not in uniq_ads:
uniq_ads.append(bug)
totalad_category[train_category][test_site] = total_ads
uniqad_category[train_category][test_site] = len(uniq_ads)
totalads += total_ads # global count for total ads
with open(os.path.join(export_folder, 'session_stats.csv'), 'w') as ses_wtr:
# write some metadata information about this session
ses_wtr.write('#VMID: %s\n' % vmid)
ses_wtr.write('#Session-Date: %s\n' % session_date)
ses_wtr.write('#Time to complete: %s\n' % process_ex_time)
ses_wtr.write('#Training Categories: %s\n' % str(results.keys()))
ses_wtr.write('#Total Number of ads: %d\n' % totalads)
ses_wtr.write('#Total Uniq ads: %d\n\n' % totaluniqads)
ses_wtr.write('#TrainingTopic, Test-Site, NumberOfVisit, TotalAds, UniqAds\n')
for train_category, cat_dict in results.items():
for test_site, bug_dict_list in cat_dict.items():
num_of_visit = len(bug_dict_list)
ses_wtr.write('{0}, | identify_uniq_ads | identifier_name |
|
ad_grabber_util.py | , if
# exists
try:
# separate the non-ads from the ads for ease of handchecking
os.makedirs(output_dir)
os.makedirs(os.path.join(output_dir, 'notad'))
except OSError:
pass
# uses a pool of 'curl' workers
curl_worker_pool = Pool(processes=num_of_workers)
manager = Manager()
curl_result_queue = manager.Queue()
dl_counter = 0 # keep track of how many bugs downloaded
while True:
try:
found_bugs = json.loads(ext_queue.get(block=True, timeout=2))
except Exception:
LOG.debug('Timing out on get from queue...')
break
for entry in found_bugs:
bugname = entry['bug']['name'].replace(' ','').replace('/','_')
bugsrc = entry['ent']['policyContentLocation']
bugpattern = entry['bug']['pattern']
try :
bugaffiliation = entry['bug']['affiliation']
except KeyError:
bugaffiliation = ""
bugtype = entry['bug']['type']
bugpathname = entry['ent']['pathname']
bug = WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,
bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)
try:
# matched an entry in the bugdict, incr count and continue
bug_dict[bug] += 1
continue
except KeyError:
bug_dict[bug] = 1
saved_location ='Visit%d_%s%d' % (refresh_count, bugname,\
dl_counter)
dl_counter += 1
save_to_path = os.path.join( output_dir, '%s' % saved_location)
obj = curl_worker_pool.apply_async(curl_worker_legacy, \
((output_dir, saved_location, save_to_path, bug, curl_result_queue),))
try:
sleep(0.5)
curl_worker_pool.join()
curl_worker_pool.close()
curl_worker_pool.terminate()
except Exception:
LOG.debug('Closing pool')
while not curl_result_queue.empty():
cbug = curl_result_queue.get()
# ugly code here
bugcount = bug_dict[cbug]
del bug_dict[cbug]
bug_dict[cbug] = bugcount
with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:
cPickle.dump(bug_dict, fwtr)
result_queue.put(bug_dict)
def curl_worker(output_dir, input_queue, worker_output_queue, worker_id,\
ack_queue):
while True:
try:
task = input_queue.get()
if len(task) == 1 and task[0] == "STOP":
LOG.debug('curl_worker %d received stop' % worker_id)
break
except Exception:
LOG.error('Error:')
#LOG.debug(task)
saved_file_name = task[0]
path = task[1]
bug = task[2]
try:
# subprocess.call(['curl', '-o', path , bug.get_src()])
subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path , bug.get_src()])
subpr_out = subprocess.check_output(['file', '-b', path]).strip()
except Exception as e :
LOG.debug('Exception captured %s\n\n' % e)
filetype, is_ad = get_bug_type(subpr_out)
if is_ad:
new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name, filetype))
else:
new_path = os.path.join(output_dir, 'notad', '%s.%s' % (saved_file_name,\
filetype))
os.rename(path, new_path)
bug.set_is_ad(is_ad)
bug.set_filetype(filetype)
bug.set_filepath(new_path)
worker_output_queue.put(bug)
ack_queue.put(worker_id)
return
def build_nodes(jsonData):
"""
This function takes a JSON encoded output of the firefox addon and builds a
call graph for the javascript/HTML redirections
@rtype nodes: dict
@return: A graph of redirection chains
"""
nodes = {}
def _process_cookiestr(cookieStr):
"""
parses a dictionary of req/resp calls to extract the cookie information
returns a list of cookies set on this domain
"""
cookie_list = []
# parses cookie str if a cookie has been set
for cookie in cookieStr.split('\n'):
c = {}
for cook in cookie.split(';'):
token = cook.split('=', 1)
if len(token) < 2:
# usually this is just a flag e.g HTTPOnly, HTTPSOnly
continue
c[token[0]] = token[1]
cookie_list.append(c)
return cookie_list
def _check_node(d):
try:
domain_node = nodes[d]
except KeyError:
isBug, bug_name, bug_type = ADREGEX.search(domain)
domain_node = WebNode(domain, isBug, bug_name, bug_type)
nodes[d] = domain_node
return domain_node
#jsonData contains all the domains and all the req/resp pairs made to them
#iterating over the domains first
for domain, dval in jsonData.items():
# but first check if a node for this domain has been created or not
domain_node = _check_node(domain)
cookie_list = []
# iterating thru all the req/resp pairs on a domain
for info in dval:
domainPath = info['domainPath']
referrerPath = info['referrerPath']
referrer = info['referrer']
cookieBool = info['cookie']
parsed_cookie = None
if cookieBool:
cookieStr = info['cookiestr']
parsed_cookie = _process_cookiestr(cookieStr)
cookie_list.append(parsed_cookie)
domain_node.add_reqresp({'domainPath' : domainPath,
'referrer' : referrer,
'referrerPath' : referrerPath,
'cookieList' : parsed_cookie
})
# making sure that we also create the node for the referrer
referrer_node = _check_node(referrer)
referrer_node.add_child(domain_node)
domain_node.add_parent(referrer_node)
domain_node.set_cookies(cookie_list)
return nodes
def filter_results(extQueue, timeout_value, url):
"""
This function takes the JSON output of the firefox addon, and matches the
request URL against a list of known tracker/ads regexes.
Returns data structure containing request/resp info
Returns None if did not receive results from FF addon
"""
from Queue import Empty
try:
LOG.debug('Timeout value in filter_result :%d' % timeout_value)
nodes = extQueue.get(True, timeout=timeout_value)
except Empty as e:
LOG.info('Did not receive any results from FF plugin for %s' % url)
nodes = None
finally:
while not extQueue.empty():
extQueue.get()
return nodes
def process_results(refresh_count, output_dir, ext_queue, result_queue,
num_of_workers=8):
"""
This function goes through all the bugs identified by the firefox plugin and
aggregates each bug's occurence in a given page. The aggregation is necessary
for duplicate ads on the same page
"""
workers_dict = {} # keep track of worker processes
input_queue = Queue() # asynchronously feed workers task to do
worker_output_queue = Queue() # output queue from workers
ack_queue = Queue()
bug_dict = {} # dict to keep track of how many duplicates of each bug, if
# exists
try:
# separate the non-ads from the ads for ease of handchecking
os.makedirs(output_dir)
os.makedirs(os.path.join(output_dir, 'notad'))
except OSError:
# Directory is created, Okay to pass
pass
for i in range(num_of_workers):
p = Process(target=curl_worker, args=(output_dir, input_queue,\
worker_output_queue, i, ack_queue))
p.start()
workers_dict[i] = p
# uses a pool nodesurl' workers
# curl_worker_pool = Pool(processes=8)
# manager = Manager()
# curl_result_queue = manager.Queue()
dl_counter = 0 # keep track of how many bugs downloaded
while True:
try:
found_bugs = json.loads(ext_queue.get(block=True, timeout=2))
except Exception:
LOG.debug('No more bugs found, break out of queue')
break
for entry in found_bugs:
bug = parse_buginfo(entry)
try:
# matched an entry in the bugdict, incr count and continue
bug_dict[bug] += 1
continue
except KeyError:
bug_dict[bug] = 1
try:
saved_location ='Visit%d_%s%d' % (refresh_count, bug.get_name(), dl_counter)
dl_counter += 1
save_to_path = os.path.join( output_dir, '%s' % saved_location)
input_queue.put((saved_location, save_to_path, bug))
except Exception as e:
LOG.exception('%s' % e)
for i in range(num_of_workers): | # send stop signal
input_queue.put(("STOP",)) | random_line_split |
|
model.go | as assigned by the scheduler
// internally.
CronID int64 `gorm:"column:cron_id"`
// CronExpression specifies the scheduling of the job.
CronExpression string `gorm:"column:cron_expression"`
// Paused specifies whether this rawJob has been paused.
Paused bool `gorm:"column:paused"`
// CreatedAt specifies when this rawJob has been created.
CreatedAt time.Time `gorm:"column:created_at"`
// UpdatedAt specifies the last time this object has been updated,
// i.e., paused/resumed/schedule updated.
UpdatedAt time.Time `gorm:"column:updated_at"`
// SerializedJob is the base64(gob-encoded byte array)
// of the interface executing this rawJob
SerializedJob string `gorm:"column:serialized_job"`
// SerializedJobInput is the base64(gob-encoded byte array)
// of the map containing the argument for the job.
SerializedJobInput string `gorm:"column:serialized_job_input"`
}
func (j *RawJob) TableName() string {
return "jobs"
}
// JobWithSchedule is a RawJob object
// with a cron.Schedule object in it.
// The schedule can be accessed by using the Schedule() method.
// This object should be created only by calling the method
// toJobWithSchedule().
type JobWithSchedule struct {
rawJob RawJob
schedule cron.Schedule
run CronJob
runInput CronJobInput
}
// decodeSerializedFields decode j.serializedJob and j.SerializedJobInput.
func (j *RawJob) decodeSerializedFields() (CronJob, CronJobInput, error) {
var decoder *gob.Decoder
var err error
// decode from base64 the serialized job
decodedJob, err := base64.StdEncoding.DecodeString(j.SerializedJob)
if err != nil {
return nil, CronJobInput{}, err
}
// decode the interface executing the rawJob
decoder = gob.NewDecoder(bytes.NewBuffer(decodedJob))
var runJob CronJob
if err = decoder.Decode(&runJob); err != nil {
return nil, CronJobInput{}, err
}
// decode the input from json
var jobInputMap map[string]interface{}
if err := json.Unmarshal([]byte(j.SerializedJobInput), &jobInputMap); err != nil {
return nil, CronJobInput{}, err
}
// and build the overall object containing all the
// inputs will be passed to the Job
runJobInput := CronJobInput{
JobID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
CronExpression: j.CronExpression,
OtherInputs: jobInputMap,
}
return runJob, runJobInput, nil
}
// toJob converts j to a Job instance.
func (j *RawJob) toJob() (Job, error) {
job, jobInput, err := j.decodeSerializedFields()
if err != nil {
return Job{}, err
}
result := Job{
ID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
cronID: j.CronID,
CronExpression: j.CronExpression,
paused: j.Paused,
createdAt: j.CreatedAt,
updatedAt: j.UpdatedAt,
Job: job,
JobInput: jobInput.OtherInputs,
}
return result, nil
}
// ToJobWithSchedule returns a JobWithSchedule object from the current RawJob,
// by copy. It returns errors in case the given schedule is not valid,
// or in case the conversion of the rawJob interface/input fails.
// It does NOT copy the byte arrays from j.
func (j *RawJob) ToJobWithSchedule() (JobWithSchedule, error) {
var result JobWithSchedule
// decode the schedule
schedule, err := cron.ParseStandard(j.CronExpression)
if err != nil {
return result, err
}
runJob, runJobInput, err := j.decodeSerializedFields()
if err != nil {
return result, err
}
result = JobWithSchedule{
rawJob: RawJob{
ID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
CronID: j.CronID,
CronExpression: j.CronExpression,
Paused: j.Paused,
CreatedAt: j.CreatedAt,
UpdatedAt: j.UpdatedAt,
},
schedule: schedule,
run: runJob,
runInput: runJobInput,
}
return result, nil
}
// encodeJob encodes `job`. A separate function is needed because we need to pass
// a POINTER to interface.
func encodeJob(encoder *gob.Encoder, job CronJob) error {
return encoder.Encode(&job)
}
// BuildJob builds the raw version of the inner job, by encoding
// it. In particular, the encoding is done as follows:
// - for the serialized job, it is encoded in Gob and then in base64
// - for the job input, it is encoded in json.
//This is needed since, when converting from a `RawJob` to a `JobWithSchedule`,
// the binary serialization of the Job is not kept in memory.
func (j *JobWithSchedule) BuildJob() (RawJob, error) {
var bufferJob bytes.Buffer
encoderJob := gob.NewEncoder(&bufferJob)
// encode the CronJob interface keeping the unit of work
// to execute. We need to use the encodeJob method
// due to how gob interface encoding works.
if err := encodeJob(encoderJob, j.run); err != nil {
return RawJob{}, err
}
// finally, encode the bytes to base64
j.rawJob.SerializedJob = base64.StdEncoding.EncodeToString(bufferJob.Bytes())
// now, encode the job input
if err := j.encodeJobInput(); err != nil {
return RawJob{}, err
}
return j.rawJob, nil
}
// encodeJobInput encodes j.rawJob.SerializedJobInput.
func (j *JobWithSchedule) encodeJobInput() error {
encodedInput, err := json.Marshal(j.runInput.OtherInputs)
if err != nil {
return err
}
j.rawJob.SerializedJobInput = string(encodedInput)
return nil
}
// getIdsFromJobRawList basically does jobs.map(rawJob -> rawJob.id)
func getIdsFromJobRawList(jobs []RawJob) []int64 {
ids := make([]int64, len(jobs))
for i, test := range jobs {
ids[i] = test.ID
}
return ids
}
// getIdsFromJobsWithScheduleList basically does jobs.map(rawJob -> rawJob.id)
func getIdsFromJobsWithScheduleList(jobs []JobWithSchedule) []int64 {
ids := make([]int64, len(jobs))
for i, job := range jobs {
ids[i] = job.rawJob.ID
}
return ids
}
// getIdsFromJobList basically does jobs.map(rawJob -> rawJob.id)
func getIdsFromJobList(jobs []Job) []int64 {
ids := make([]int64, len(jobs))
for i, job := range jobs {
ids[i] = job.ID
}
return ids
}
// UpdateOption is the struct used to update
// a Job.
//
// An update on a Job consists in changing
// the schedule, by using the field `CronExpression`,
// or the Job input, by using the field `JobOtherInputs`.
//
// If none of those fields are specified, i.e., they are
// both nil, the struct is considered invalid.
type UpdateOption struct {
// JobID is the ID of the Job to update.
JobID int64
// CronExpression is the new schedule of the Job.
// If nil, it is ignored, i.e., the schedule
// is not changed.
CronExpression *string
// JobOtherInputs is the new OtherInputs of the Job.
// If nil, it is ignored, i.e.,
// the Job input is not changed.
JobOtherInputs *map[string]interface{}
}
func (u *UpdateOption) schedule() (cron.Schedule, error) {
return cron.ParseStandard(*u.CronExpression)
}
var (
// ErrUpdateOptionInvalid is returned when the fields
// of UpdateOption are invalid.
// This error is returned when the combination
// of the fields is not valid (i.e., both nil).
// For error in the CronExpression field,
// the specific error set by the library is returned,
ErrUpdateOptionInvalid = errors.New("invalid option")
)
// Valid returns whether the fields in this struct
// are valid. If the struct is valid, no errors
// are returned.
//
// UpdateOption is considered valid if
// at least one field between CronExpression and JobOtherInputs
// are not nil, and the cron string can be parsed.
func (u *UpdateOption) Valid() error {
if u.CronExpression == nil && u.JobOtherInputs == nil {
return ErrUpdateOptionInvalid
}
if u.CronExpression != nil {
if _, err := u.schedule(); err != nil | {
return err
} | conditional_block |
|
model.go | added to the scheduler.
func (j *Job) CreatedAt() time.Time {
return j.createdAt
}
// UpdatedAt returns the last time this Job has been updated, i.e.,
// paused, resumed, schedule changed.
func (j *Job) UpdatedAt() time.Time {
return j.updatedAt
}
// Paused returns whether this Job is currently paused
// or not.
func (j *Job) Paused() bool {
return j.paused
}
// toJobWithSchedule converts Job to a JobWithSchedule object.
// It returns an error in case the parsing of the cron expression fails.
func (j *Job) toJobWithSchedule() (JobWithSchedule, error) {
var result JobWithSchedule
// decode the schedule
schedule, err := cron.ParseStandard(j.CronExpression)
if err != nil {
return result, err
}
result = JobWithSchedule{
rawJob: RawJob{
ID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
CronID: 0,
CronExpression: j.CronExpression,
Paused: false,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
schedule: schedule,
run: j.Job,
runInput: CronJobInput{
JobID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
OtherInputs: j.JobInput,
CronExpression: j.CronExpression,
},
}
return result, nil
}
// RawJob models a raw rawJob coming from the database.
type RawJob struct {
// ID is a unique ID identifying the rawJob object.
// It is chosen by the user.
ID int64 `gorm:"primaryKey,column:id"`
// GroupID is the ID of the group this rawJob is inserted in.
GroupID int64 `gorm:"column:group_id"`
// SuperGroupID specifies the ID of the super group
// where this group is contained in.
SuperGroupID int64 `gorm:"column:super_group_id"`
// CronID is the ID of the cron rawJob as assigned by the scheduler
// internally.
CronID int64 `gorm:"column:cron_id"`
// CronExpression specifies the scheduling of the job.
CronExpression string `gorm:"column:cron_expression"`
// Paused specifies whether this rawJob has been paused.
Paused bool `gorm:"column:paused"`
// CreatedAt specifies when this rawJob has been created.
CreatedAt time.Time `gorm:"column:created_at"`
// UpdatedAt specifies the last time this object has been updated,
// i.e., paused/resumed/schedule updated.
UpdatedAt time.Time `gorm:"column:updated_at"`
// SerializedJob is the base64(gob-encoded byte array)
// of the interface executing this rawJob
SerializedJob string `gorm:"column:serialized_job"`
// SerializedJobInput is the base64(gob-encoded byte array)
// of the map containing the argument for the job.
SerializedJobInput string `gorm:"column:serialized_job_input"`
}
func (j *RawJob) TableName() string {
return "jobs"
}
// JobWithSchedule is a RawJob object
// with a cron.Schedule object in it.
// The schedule can be accessed by using the Schedule() method.
// This object should be created only by calling the method
// toJobWithSchedule().
type JobWithSchedule struct {
rawJob RawJob
schedule cron.Schedule
run CronJob
runInput CronJobInput
}
// decodeSerializedFields decode j.serializedJob and j.SerializedJobInput.
func (j *RawJob) decodeSerializedFields() (CronJob, CronJobInput, error) {
var decoder *gob.Decoder
var err error
// decode from base64 the serialized job
decodedJob, err := base64.StdEncoding.DecodeString(j.SerializedJob)
if err != nil {
return nil, CronJobInput{}, err
}
// decode the interface executing the rawJob
decoder = gob.NewDecoder(bytes.NewBuffer(decodedJob))
var runJob CronJob
if err = decoder.Decode(&runJob); err != nil {
return nil, CronJobInput{}, err
}
// decode the input from json
var jobInputMap map[string]interface{}
if err := json.Unmarshal([]byte(j.SerializedJobInput), &jobInputMap); err != nil {
return nil, CronJobInput{}, err
}
// and build the overall object containing all the
// inputs will be passed to the Job
runJobInput := CronJobInput{
JobID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
CronExpression: j.CronExpression,
OtherInputs: jobInputMap,
}
return runJob, runJobInput, nil
}
// toJob converts j to a Job instance.
func (j *RawJob) toJob() (Job, error) {
job, jobInput, err := j.decodeSerializedFields()
if err != nil {
return Job{}, err
}
result := Job{
ID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
cronID: j.CronID,
CronExpression: j.CronExpression,
paused: j.Paused,
createdAt: j.CreatedAt,
updatedAt: j.UpdatedAt,
Job: job,
JobInput: jobInput.OtherInputs,
}
return result, nil
}
// ToJobWithSchedule returns a JobWithSchedule object from the current RawJob,
// by copy. It returns errors in case the given schedule is not valid,
// or in case the conversion of the rawJob interface/input fails.
// It does NOT copy the byte arrays from j.
func (j *RawJob) ToJobWithSchedule() (JobWithSchedule, error) {
var result JobWithSchedule
// decode the schedule
schedule, err := cron.ParseStandard(j.CronExpression)
if err != nil {
return result, err
}
runJob, runJobInput, err := j.decodeSerializedFields()
if err != nil {
return result, err
}
result = JobWithSchedule{
rawJob: RawJob{
ID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
CronID: j.CronID,
CronExpression: j.CronExpression,
Paused: j.Paused,
CreatedAt: j.CreatedAt,
UpdatedAt: j.UpdatedAt,
},
schedule: schedule,
run: runJob,
runInput: runJobInput,
}
return result, nil
}
// encodeJob encodes `job`. A separate function is needed because we need to pass
// a POINTER to interface.
func encodeJob(encoder *gob.Encoder, job CronJob) error {
return encoder.Encode(&job)
}
// BuildJob builds the raw version of the inner job, by encoding
// it. In particular, the encoding is done as follows:
// - for the serialized job, it is encoded in Gob and then in base64
// - for the job input, it is encoded in json.
//This is needed since, when converting from a `RawJob` to a `JobWithSchedule`,
// the binary serialization of the Job is not kept in memory.
func (j *JobWithSchedule) BuildJob() (RawJob, error) {
var bufferJob bytes.Buffer
encoderJob := gob.NewEncoder(&bufferJob)
// encode the CronJob interface keeping the unit of work
// to execute. We need to use the encodeJob method
// due to how gob interface encoding works.
if err := encodeJob(encoderJob, j.run); err != nil {
return RawJob{}, err
}
// finally, encode the bytes to base64
j.rawJob.SerializedJob = base64.StdEncoding.EncodeToString(bufferJob.Bytes())
// now, encode the job input
if err := j.encodeJobInput(); err != nil {
return RawJob{}, err
}
return j.rawJob, nil
}
// encodeJobInput encodes j.rawJob.SerializedJobInput.
func (j *JobWithSchedule) encodeJobInput() error {
encodedInput, err := json.Marshal(j.runInput.OtherInputs)
if err != nil {
return err
}
j.rawJob.SerializedJobInput = string(encodedInput)
return nil
}
// getIdsFromJobRawList basically does jobs.map(rawJob -> rawJob.id)
func getIdsFromJobRawList(jobs []RawJob) []int64 {
ids := make([]int64, len(jobs))
for i, test := range jobs {
ids[i] = test.ID
}
return ids
}
// getIdsFromJobsWithScheduleList basically does jobs.map(rawJob -> rawJob.id)
func | (jobs []JobWithSchedule) []int64 {
ids := make([]int64, len(jobs))
for i, job := range jobs {
ids[i] = job.rawJob.ID
}
return ids
}
// getIdsFrom | getIdsFromJobsWithScheduleList | identifier_name |
model.go | added to the scheduler.
func (j *Job) CreatedAt() time.Time {
return j.createdAt
}
// UpdatedAt returns the last time this Job has been updated, i.e.,
// paused, resumed, schedule changed.
func (j *Job) UpdatedAt() time.Time {
return j.updatedAt
}
// Paused returns whether this Job is currently paused
// or not.
func (j *Job) Paused() bool {
return j.paused
}
// toJobWithSchedule converts Job to a JobWithSchedule object.
// It returns an error in case the parsing of the cron expression fails.
func (j *Job) toJobWithSchedule() (JobWithSchedule, error) {
var result JobWithSchedule
// decode the schedule
schedule, err := cron.ParseStandard(j.CronExpression)
if err != nil {
return result, err
}
result = JobWithSchedule{
rawJob: RawJob{
ID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
CronID: 0,
CronExpression: j.CronExpression,
Paused: false,
CreatedAt: time.Now(),
UpdatedAt: time.Now(), | run: j.Job,
runInput: CronJobInput{
JobID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
OtherInputs: j.JobInput,
CronExpression: j.CronExpression,
},
}
return result, nil
}
// RawJob models a raw rawJob coming from the database.
type RawJob struct {
// ID is a unique ID identifying the rawJob object.
// It is chosen by the user.
ID int64 `gorm:"primaryKey,column:id"`
// GroupID is the ID of the group this rawJob is inserted in.
GroupID int64 `gorm:"column:group_id"`
// SuperGroupID specifies the ID of the super group
// where this group is contained in.
SuperGroupID int64 `gorm:"column:super_group_id"`
// CronID is the ID of the cron rawJob as assigned by the scheduler
// internally.
CronID int64 `gorm:"column:cron_id"`
// CronExpression specifies the scheduling of the job.
CronExpression string `gorm:"column:cron_expression"`
// Paused specifies whether this rawJob has been paused.
Paused bool `gorm:"column:paused"`
// CreatedAt specifies when this rawJob has been created.
CreatedAt time.Time `gorm:"column:created_at"`
// UpdatedAt specifies the last time this object has been updated,
// i.e., paused/resumed/schedule updated.
UpdatedAt time.Time `gorm:"column:updated_at"`
// SerializedJob is the base64(gob-encoded byte array)
// of the interface executing this rawJob
SerializedJob string `gorm:"column:serialized_job"`
// SerializedJobInput is the base64(gob-encoded byte array)
// of the map containing the argument for the job.
SerializedJobInput string `gorm:"column:serialized_job_input"`
}
func (j *RawJob) TableName() string {
return "jobs"
}
// JobWithSchedule is a RawJob object
// with a cron.Schedule object in it.
// The schedule can be accessed by using the Schedule() method.
// This object should be created only by calling the method
// toJobWithSchedule().
type JobWithSchedule struct {
rawJob RawJob
schedule cron.Schedule
run CronJob
runInput CronJobInput
}
// decodeSerializedFields decode j.serializedJob and j.SerializedJobInput.
func (j *RawJob) decodeSerializedFields() (CronJob, CronJobInput, error) {
var decoder *gob.Decoder
var err error
// decode from base64 the serialized job
decodedJob, err := base64.StdEncoding.DecodeString(j.SerializedJob)
if err != nil {
return nil, CronJobInput{}, err
}
// decode the interface executing the rawJob
decoder = gob.NewDecoder(bytes.NewBuffer(decodedJob))
var runJob CronJob
if err = decoder.Decode(&runJob); err != nil {
return nil, CronJobInput{}, err
}
// decode the input from json
var jobInputMap map[string]interface{}
if err := json.Unmarshal([]byte(j.SerializedJobInput), &jobInputMap); err != nil {
return nil, CronJobInput{}, err
}
// and build the overall object containing all the
// inputs will be passed to the Job
runJobInput := CronJobInput{
JobID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
CronExpression: j.CronExpression,
OtherInputs: jobInputMap,
}
return runJob, runJobInput, nil
}
// toJob converts j to a Job instance.
func (j *RawJob) toJob() (Job, error) {
job, jobInput, err := j.decodeSerializedFields()
if err != nil {
return Job{}, err
}
result := Job{
ID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
cronID: j.CronID,
CronExpression: j.CronExpression,
paused: j.Paused,
createdAt: j.CreatedAt,
updatedAt: j.UpdatedAt,
Job: job,
JobInput: jobInput.OtherInputs,
}
return result, nil
}
// ToJobWithSchedule returns a JobWithSchedule object from the current RawJob,
// by copy. It returns errors in case the given schedule is not valid,
// or in case the conversion of the rawJob interface/input fails.
// It does NOT copy the byte arrays from j.
func (j *RawJob) ToJobWithSchedule() (JobWithSchedule, error) {
var result JobWithSchedule
// decode the schedule
schedule, err := cron.ParseStandard(j.CronExpression)
if err != nil {
return result, err
}
runJob, runJobInput, err := j.decodeSerializedFields()
if err != nil {
return result, err
}
result = JobWithSchedule{
rawJob: RawJob{
ID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
CronID: j.CronID,
CronExpression: j.CronExpression,
Paused: j.Paused,
CreatedAt: j.CreatedAt,
UpdatedAt: j.UpdatedAt,
},
schedule: schedule,
run: runJob,
runInput: runJobInput,
}
return result, nil
}
// encodeJob encodes `job`. A separate function is needed because we need to pass
// a POINTER to interface.
func encodeJob(encoder *gob.Encoder, job CronJob) error {
return encoder.Encode(&job)
}
// BuildJob builds the raw version of the inner job, by encoding
// it. In particular, the encoding is done as follows:
// - for the serialized job, it is encoded in Gob and then in base64
// - for the job input, it is encoded in json.
//This is needed since, when converting from a `RawJob` to a `JobWithSchedule`,
// the binary serialization of the Job is not kept in memory.
func (j *JobWithSchedule) BuildJob() (RawJob, error) {
var bufferJob bytes.Buffer
encoderJob := gob.NewEncoder(&bufferJob)
// encode the CronJob interface keeping the unit of work
// to execute. We need to use the encodeJob method
// due to how gob interface encoding works.
if err := encodeJob(encoderJob, j.run); err != nil {
return RawJob{}, err
}
// finally, encode the bytes to base64
j.rawJob.SerializedJob = base64.StdEncoding.EncodeToString(bufferJob.Bytes())
// now, encode the job input
if err := j.encodeJobInput(); err != nil {
return RawJob{}, err
}
return j.rawJob, nil
}
// encodeJobInput encodes j.rawJob.SerializedJobInput.
func (j *JobWithSchedule) encodeJobInput() error {
encodedInput, err := json.Marshal(j.runInput.OtherInputs)
if err != nil {
return err
}
j.rawJob.SerializedJobInput = string(encodedInput)
return nil
}
// getIdsFromJobRawList basically does jobs.map(rawJob -> rawJob.id)
func getIdsFromJobRawList(jobs []RawJob) []int64 {
ids := make([]int64, len(jobs))
for i, test := range jobs {
ids[i] = test.ID
}
return ids
}
// getIdsFromJobsWithScheduleList basically does jobs.map(rawJob -> rawJob.id)
func getIdsFromJobsWithScheduleList(jobs []JobWithSchedule) []int64 {
ids := make([]int64, len(jobs))
for i, job := range jobs {
ids[i] = job.rawJob.ID
}
return ids
}
// getIdsFromJob | },
schedule: schedule, | random_line_split |
model.go | to the scheduler.
func (j *Job) CreatedAt() time.Time {
return j.createdAt
}
// UpdatedAt returns the last time this Job has been updated, i.e.,
// paused, resumed, schedule changed.
func (j *Job) UpdatedAt() time.Time {
return j.updatedAt
}
// Paused returns whether this Job is currently paused
// or not.
func (j *Job) Paused() bool {
return j.paused
}
// toJobWithSchedule converts Job to a JobWithSchedule object.
// It returns an error in case the parsing of the cron expression fails.
func (j *Job) toJobWithSchedule() (JobWithSchedule, error) {
var result JobWithSchedule
// decode the schedule
schedule, err := cron.ParseStandard(j.CronExpression)
if err != nil {
return result, err
}
result = JobWithSchedule{
rawJob: RawJob{
ID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
CronID: 0,
CronExpression: j.CronExpression,
Paused: false,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
schedule: schedule,
run: j.Job,
runInput: CronJobInput{
JobID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
OtherInputs: j.JobInput,
CronExpression: j.CronExpression,
},
}
return result, nil
}
// RawJob models a raw rawJob coming from the database.
type RawJob struct {
// ID is a unique ID identifying the rawJob object.
// It is chosen by the user.
ID int64 `gorm:"primaryKey,column:id"`
// GroupID is the ID of the group this rawJob is inserted in.
GroupID int64 `gorm:"column:group_id"`
// SuperGroupID specifies the ID of the super group
// where this group is contained in.
SuperGroupID int64 `gorm:"column:super_group_id"`
// CronID is the ID of the cron rawJob as assigned by the scheduler
// internally.
CronID int64 `gorm:"column:cron_id"`
// CronExpression specifies the scheduling of the job.
CronExpression string `gorm:"column:cron_expression"`
// Paused specifies whether this rawJob has been paused.
Paused bool `gorm:"column:paused"`
// CreatedAt specifies when this rawJob has been created.
CreatedAt time.Time `gorm:"column:created_at"`
// UpdatedAt specifies the last time this object has been updated,
// i.e., paused/resumed/schedule updated.
UpdatedAt time.Time `gorm:"column:updated_at"`
// SerializedJob is the base64(gob-encoded byte array)
// of the interface executing this rawJob
SerializedJob string `gorm:"column:serialized_job"`
// SerializedJobInput is the base64(gob-encoded byte array)
// of the map containing the argument for the job.
SerializedJobInput string `gorm:"column:serialized_job_input"`
}
func (j *RawJob) TableName() string {
return "jobs"
}
// JobWithSchedule is a RawJob object
// with a cron.Schedule object in it.
// The schedule can be accessed by using the Schedule() method.
// This object should be created only by calling the method
// toJobWithSchedule().
type JobWithSchedule struct {
rawJob RawJob
schedule cron.Schedule
run CronJob
runInput CronJobInput
}
// decodeSerializedFields decode j.serializedJob and j.SerializedJobInput.
func (j *RawJob) decodeSerializedFields() (CronJob, CronJobInput, error) {
var decoder *gob.Decoder
var err error
// decode from base64 the serialized job
decodedJob, err := base64.StdEncoding.DecodeString(j.SerializedJob)
if err != nil {
return nil, CronJobInput{}, err
}
// decode the interface executing the rawJob
decoder = gob.NewDecoder(bytes.NewBuffer(decodedJob))
var runJob CronJob
if err = decoder.Decode(&runJob); err != nil {
return nil, CronJobInput{}, err
}
// decode the input from json
var jobInputMap map[string]interface{}
if err := json.Unmarshal([]byte(j.SerializedJobInput), &jobInputMap); err != nil {
return nil, CronJobInput{}, err
}
// and build the overall object containing all the
// inputs will be passed to the Job
runJobInput := CronJobInput{
JobID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
CronExpression: j.CronExpression,
OtherInputs: jobInputMap,
}
return runJob, runJobInput, nil
}
// toJob converts j to a Job instance.
func (j *RawJob) toJob() (Job, error) |
// ToJobWithSchedule returns a JobWithSchedule object from the current RawJob,
// by copy. It returns errors in case the given schedule is not valid,
// or in case the conversion of the rawJob interface/input fails.
// It does NOT copy the byte arrays from j.
func (j *RawJob) ToJobWithSchedule() (JobWithSchedule, error) {
var result JobWithSchedule
// decode the schedule
schedule, err := cron.ParseStandard(j.CronExpression)
if err != nil {
return result, err
}
runJob, runJobInput, err := j.decodeSerializedFields()
if err != nil {
return result, err
}
result = JobWithSchedule{
rawJob: RawJob{
ID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
CronID: j.CronID,
CronExpression: j.CronExpression,
Paused: j.Paused,
CreatedAt: j.CreatedAt,
UpdatedAt: j.UpdatedAt,
},
schedule: schedule,
run: runJob,
runInput: runJobInput,
}
return result, nil
}
// encodeJob encodes `job`. A separate function is needed because we need to pass
// a POINTER to interface.
func encodeJob(encoder *gob.Encoder, job CronJob) error {
return encoder.Encode(&job)
}
// BuildJob builds the raw version of the inner job, by encoding
// it. In particular, the encoding is done as follows:
// - for the serialized job, it is encoded in Gob and then in base64
// - for the job input, it is encoded in json.
//This is needed since, when converting from a `RawJob` to a `JobWithSchedule`,
// the binary serialization of the Job is not kept in memory.
func (j *JobWithSchedule) BuildJob() (RawJob, error) {
var bufferJob bytes.Buffer
encoderJob := gob.NewEncoder(&bufferJob)
// encode the CronJob interface keeping the unit of work
// to execute. We need to use the encodeJob method
// due to how gob interface encoding works.
if err := encodeJob(encoderJob, j.run); err != nil {
return RawJob{}, err
}
// finally, encode the bytes to base64
j.rawJob.SerializedJob = base64.StdEncoding.EncodeToString(bufferJob.Bytes())
// now, encode the job input
if err := j.encodeJobInput(); err != nil {
return RawJob{}, err
}
return j.rawJob, nil
}
// encodeJobInput encodes j.rawJob.SerializedJobInput.
func (j *JobWithSchedule) encodeJobInput() error {
encodedInput, err := json.Marshal(j.runInput.OtherInputs)
if err != nil {
return err
}
j.rawJob.SerializedJobInput = string(encodedInput)
return nil
}
// getIdsFromJobRawList basically does jobs.map(rawJob -> rawJob.id)
func getIdsFromJobRawList(jobs []RawJob) []int64 {
ids := make([]int64, len(jobs))
for i, test := range jobs {
ids[i] = test.ID
}
return ids
}
// getIdsFromJobsWithScheduleList basically does jobs.map(rawJob -> rawJob.id)
func getIdsFromJobsWithScheduleList(jobs []JobWithSchedule) []int64 {
ids := make([]int64, len(jobs))
for i, job := range jobs {
ids[i] = job.rawJob.ID
}
return ids
}
// getIdsFrom | {
job, jobInput, err := j.decodeSerializedFields()
if err != nil {
return Job{}, err
}
result := Job{
ID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
cronID: j.CronID,
CronExpression: j.CronExpression,
paused: j.Paused,
createdAt: j.CreatedAt,
updatedAt: j.UpdatedAt,
Job: job,
JobInput: jobInput.OtherInputs,
}
return result, nil
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.