Dataset Viewer
file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
main.rs | c| match c {
Self::And(es) => es,
x => vec![x],
})
.collect(),
)
}
/// convert the expression into disjunctive normal form
///
/// careful, for some expressions this can have exponential runtime. E.g. the disjunctive normal form
/// of `(a | b) & (c | d) & (e | f) & ...` will be very complex.
pub fn dnf(self) -> Dnf {
match self {
Expression::Literal(x) => Dnf::literal(x),
Expression::Or(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitor).unwrap(),
Expression::And(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitand).unwrap(),
}
}
}
impl BitOr for Expression {
type Output = Expression;
fn bitor(self, that: Self) -> Self {
Expression::or(vec![self, that])
}
}
impl BitAnd for Expression {
type Output = Expression;
fn bitand(self, that: Self) -> Self {
Expression::and(vec![self, that])
}
}
fn insert_unless_redundant(aa: &mut BTreeSet<BTreeSet<String>>, b: BTreeSet<String>) {
let mut to_remove = None;
for a in aa.iter() {
if a.is_subset(&b) {
// a is larger than b. E.g. x | x&y
// keep a, b is redundant
return;
} else if a.is_superset(&b) {
// a is smaller than b, E.g. x&y | x
// remove a, keep b
to_remove = Some(a.clone());
}
}
if let Some(r) = to_remove {
aa.remove(&r);
}
aa.insert(b);
}
impl From<Expression> for Dnf {
fn from(value: Expression) -> Self {
value.dnf()
}
}
impl From<Dnf> for Expression {
fn from(value: Dnf) -> Self {
value.expression()
}
}
impl BitAnd for Dnf {
type Output = Dnf;
fn bitand(self, that: Self) -> Self {
let mut rs = BTreeSet::new();
for a in self.0.iter() {
for b in that.0.iter() {
let mut r = BTreeSet::new();
r.extend(a.iter().cloned());
r.extend(b.iter().cloned());
insert_unless_redundant(&mut rs, r);
}
}
Dnf(rs)
}
}
impl BitOr for Dnf {
type Output = Dnf;
fn bitor(self, that: Self) -> Self {
let mut rs = self.0;
for b in that.0 {
insert_unless_redundant(&mut rs, b);
}
Dnf(rs)
}
}
fn l(x: &str) -> Expression {
Expression::literal(x.into())
}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck::{quickcheck, Arbitrary, Gen};
use rand::seq::SliceRandom;
#[test]
fn test_dnf_intersection_1() {
let a = l("a");
let b = l("b");
let c = l("c");
let expr = c & (a | b);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|b&c");
}
#[test]
fn test_dnf_intersection_2() {
let a = l("a");
let b = l("b");
let c = l("c");
let d = l("d");
let expr = (d | c) & (b | a);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|a&d|b&c|b&d");
}
#[test]
fn test_dnf_simplify_1() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) & a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_2() {
let a = l("a");
let b = l("b");
let expr = (a.clone() & b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_3() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a|b");
}
#[test]
fn test_matching_1() {
let index = Index::from_elements(&vec![
btreeset! {"a"},
btreeset! {"a", "b"},
btreeset! {"a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![1,3]);
let expr = l("c") & l("d");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_matching_2() {
let index = Index::from_elements(&vec![
btreeset! {"a", "b"},
btreeset! {"b", "c"},
btreeset! {"c", "a"},
btreeset! {"a", "b"},
]);
let expr = l("a") | l("b") | l("c");
assert_eq!(index.matching(expr.dnf()), vec![0,1,2,3]);
let expr = l("a") & l("b");
assert_eq!(index.matching(expr.dnf()), vec![0,3]);
let expr = l("a") & l("b") & l("c");
assert!(index.matching(expr.dnf()).is_empty());
}
#[test]
fn test_deser_error() {
// negative index - serde should catch this
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,-1]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
// index too large - we must catch this in order to uphold the invariants of the index
let e1 = r#"[["a","b"],[[0],[0,1],[0],[0,2]]]"#;
let x: std::result::Result<Index,_> = serde_json::from_str(e1);
assert!(x.is_err());
}
const STRINGS: &'static [&'static str] = &["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"];
#[derive(Clone, PartialOrd, Ord, PartialEq, Eq)]
struct IndexString(&'static str);
impl Arbitrary for IndexString {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
IndexString(STRINGS.choose(g).unwrap())
}
}
impl Arbitrary for Index {
fn arbitrary<G: Gen>(g: &mut G) -> Self {
let xs: Vec<BTreeSet<IndexString>> = Arbitrary::arbitrary(g);
let xs: Vec<BTreeSet<&str>> = xs.iter().map(|e| e.iter().map(|x| x.0).collect()).collect();
Index::from_elements(&xs)
}
}
quickcheck! {
fn serde_json_roundtrip(index: Index) -> bool {
let json = serde_json::to_string(&index).unwrap();
let index2: Index = serde_json::from_str(&json).unwrap();
index == index2
}
}
}
fn compresss_zstd_cbor<T: Serialize>(value: &T) -> std::result::Result<Vec<u8>, Box<dyn std::error::Error>> {
let cbor = serde_cbor::to_vec(&value)?;
let mut compressed: Vec<u8> = Vec::new();
zstd::stream::copy_encode(std::io::Cursor::new(cbor), &mut compressed, 10)?;
Ok(compressed)
}
fn decompress_zstd_cbor<T: DeserializeOwned>(compressed: &[u8]) -> std::result::Result<T, Box<dyn std::error::Error>> {
let mut decompressed: Vec<u8> = Vec::new();
zstd::stream::copy_decode(compressed, &mut decompressed)?; | Ok(serde_cbor::from_slice(&decompressed)?)
}
fn borrow_inner(elements: &[BTreeSet<String>]) -> Vec<BTreeSet<&str>> {
elements.iter().map(|x| x.iter().map(|e| e.as_ref()).collect()).collect() | random_line_split |
|
main.rs | <_>>();
// not a single query can possibly match, no need to iterate.
if query.is_empty() {
return Vec::new();
}
// check the remaining queries
self.elements
.iter()
.enumerate()
.filter_map(|(i, e)| {
if query.iter().any(|x| x.is_subset(e)) {
Some(i)
} else {
None
}
})
.collect()
}
pub fn as_elements<'a>(&'a self) -> Vec<BTreeSet<&'a str>> {
let strings = self.strings.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
self
.elements
.iter()
.map(|is| {
is.iter()
.map(|i| strings[*i as usize])
.collect::<BTreeSet<_>>()
})
.collect()
}
pub fn from_elements(e: &[BTreeSet<&str>]) -> Index {
let mut strings = BTreeSet::new();
for a in e.iter() {
strings.extend(a.iter().cloned());
}
let indices = strings
.iter()
.cloned()
.enumerate()
.map(|(i, e)| (e, i as u32))
.collect::<BTreeMap<_, _>>();
let elements = e
.iter()
.map(|a| a.iter().map(|e| indices[e]).collect::<BTreeSet<u32>>())
.collect::<Vec<_>>();
let strings = strings.into_iter().map(|x| x.to_owned()).collect();
Index { strings, elements }
}
}
/// a boolean expression, consisting of literals, union and intersection.
///
/// no attempt of simplification is made, except flattening identical operators.
///
/// `And([And([a,b]),c])` will be flattened to `And([a,b,c])`.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub enum Expression {
Literal(String),
And(Vec<Expression>),
Or(Vec<Expression>),
}
/// prints the expression with a minimum of brackets
impl std::fmt::Display for Expression {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
fn child_to_string(x: &Expression) -> String {
if let Expression::Or(_) = x {
format!("({})", x)
} else {
x.to_string()
}
}
write!(
f,
"{}",
match self {
Expression::Literal(text) => text.clone(),
Expression::And(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("&"),
Expression::Or(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("|"),
}
)
}
}
/// Disjunctive normal form of a boolean query expression
///
/// https://en.wikipedia.org/wiki/Disjunctive_normal_form
///
/// This is an unique represenation of a query using literals, union and intersection.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub struct Dnf(BTreeSet<BTreeSet<String>>);
impl Dnf {
fn literal(text: String) -> Self {
Self(btreeset![btreeset![text]])
}
/// converts the disjunctive normal form back to an expression
pub fn expression(self) -> Expression {
self.0
.into_iter()
.map(Dnf::and_expr)
.reduce(Expression::bitor)
.unwrap()
}
fn and_expr(v: BTreeSet<String>) -> Expression {
v.into_iter()
.map(Expression::literal)
.reduce(Expression::bitand)
.unwrap()
}
}
impl Expression {
pub fn literal(text: String) -> Self {
Self::Literal(text)
}
fn or(e: Vec<Expression>) -> Self |
fn and(e: Vec<Expression>) -> Self {
Self::And(
e.into_iter()
.flat_map(|c| match c {
Self::And(es) => es,
x => vec![x],
})
.collect(),
)
}
/// convert the expression into disjunctive normal form
///
/// careful, for some expressions this can have exponential runtime. E.g. the disjunctive normal form
/// of `(a | b) & (c | d) & (e | f) & ...` will be very complex.
pub fn dnf(self) -> Dnf {
match self {
Expression::Literal(x) => Dnf::literal(x),
Expression::Or(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitor).unwrap(),
Expression::And(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitand).unwrap(),
}
}
}
impl BitOr for Expression {
type Output = Expression;
fn bitor(self, that: Self) -> Self {
Expression::or(vec![self, that])
}
}
impl BitAnd for Expression {
type Output = Expression;
fn bitand(self, that: Self) -> Self {
Expression::and(vec![self, that])
}
}
fn insert_unless_redundant(aa: &mut BTreeSet<BTreeSet<String>>, b: BTreeSet<String>) {
let mut to_remove = None;
for a in aa.iter() {
if a.is_subset(&b) {
// a is larger than b. E.g. x | x&y
// keep a, b is redundant
return;
} else if a.is_superset(&b) {
// a is smaller than b, E.g. x&y | x
// remove a, keep b
to_remove = Some(a.clone());
}
}
if let Some(r) = to_remove {
aa.remove(&r);
}
aa.insert(b);
}
impl From<Expression> for Dnf {
fn from(value: Expression) -> Self {
value.dnf()
}
}
impl From<Dnf> for Expression {
fn from(value: Dnf) -> Self {
value.expression()
}
}
impl BitAnd for Dnf {
type Output = Dnf;
fn bitand(self, that: Self) -> Self {
let mut rs = BTreeSet::new();
for a in self.0.iter() {
for b in that.0.iter() {
let mut r = BTreeSet::new();
r.extend(a.iter().cloned());
r.extend(b.iter().cloned());
insert_unless_redundant(&mut rs, r);
}
}
Dnf(rs)
}
}
impl BitOr for Dnf {
type Output = Dnf;
fn bitor(self, that: Self) -> Self {
let mut rs = self.0;
for b in that.0 {
insert_unless_redundant(&mut rs, b);
}
Dnf(rs)
}
}
fn l(x: &str) -> Expression {
Expression::literal(x.into())
}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck::{quickcheck, Arbitrary, Gen};
use rand::seq::SliceRandom;
#[test]
fn test_dnf_intersection_1() {
let a = l("a");
let b = l("b");
let c = l("c");
let expr = c & (a | b);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|b&c");
}
#[test]
fn test_dnf_intersection_2() {
let a = l("a");
let b = l("b");
let c = l("c");
let d = l("d");
let expr = (d | c) & (b | a);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|a&d|b&c|b&d");
}
#[test]
fn test_dnf_simplify_1() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) & a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_2() {
let a = l("a");
let b = l("b");
let expr = (a.clone() & b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_3() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a|b");
}
#[test]
fn test_matching_1() {
let index = Index::from_elements(&vec![
btreeset! {"a"},
btreeset! | {
Self::Or(
e.into_iter()
.flat_map(|c| match c {
Self::Or(es) => es,
x => vec![x],
})
.collect(),
)
} | identifier_body |
main.rs | <_>>();
// not a single query can possibly match, no need to iterate.
if query.is_empty() {
return Vec::new();
}
// check the remaining queries
self.elements
.iter()
.enumerate()
.filter_map(|(i, e)| {
if query.iter().any(|x| x.is_subset(e)) {
Some(i)
} else {
None
}
})
.collect()
}
pub fn as_elements<'a>(&'a self) -> Vec<BTreeSet<&'a str>> {
let strings = self.strings.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
self
.elements
.iter()
.map(|is| {
is.iter()
.map(|i| strings[*i as usize])
.collect::<BTreeSet<_>>()
})
.collect()
}
pub fn from_elements(e: &[BTreeSet<&str>]) -> Index {
let mut strings = BTreeSet::new();
for a in e.iter() {
strings.extend(a.iter().cloned());
}
let indices = strings
.iter()
.cloned()
.enumerate()
.map(|(i, e)| (e, i as u32))
.collect::<BTreeMap<_, _>>();
let elements = e
.iter()
.map(|a| a.iter().map(|e| indices[e]).collect::<BTreeSet<u32>>())
.collect::<Vec<_>>();
let strings = strings.into_iter().map(|x| x.to_owned()).collect();
Index { strings, elements }
}
}
/// a boolean expression, consisting of literals, union and intersection.
///
/// no attempt of simplification is made, except flattening identical operators.
///
/// `And([And([a,b]),c])` will be flattened to `And([a,b,c])`.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub enum Expression {
Literal(String),
And(Vec<Expression>),
Or(Vec<Expression>),
}
/// prints the expression with a minimum of brackets
impl std::fmt::Display for Expression {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
fn child_to_string(x: &Expression) -> String {
if let Expression::Or(_) = x {
format!("({})", x)
} else {
x.to_string()
}
}
write!(
f,
"{}",
match self {
Expression::Literal(text) => text.clone(),
Expression::And(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("&"),
Expression::Or(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("|"),
}
)
}
}
/// Disjunctive normal form of a boolean query expression
///
/// https://en.wikipedia.org/wiki/Disjunctive_normal_form
///
/// This is an unique represenation of a query using literals, union and intersection.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub struct Dnf(BTreeSet<BTreeSet<String>>);
impl Dnf {
fn literal(text: String) -> Self {
Self(btreeset![btreeset![text]])
}
/// converts the disjunctive normal form back to an expression
pub fn expression(self) -> Expression {
self.0
.into_iter()
.map(Dnf::and_expr)
.reduce(Expression::bitor)
.unwrap()
}
fn and_expr(v: BTreeSet<String>) -> Expression {
v.into_iter()
.map(Expression::literal)
.reduce(Expression::bitand)
.unwrap()
}
}
impl Expression {
pub fn literal(text: String) -> Self {
Self::Literal(text)
}
fn | (e: Vec<Expression>) -> Self {
Self::Or(
e.into_iter()
.flat_map(|c| match c {
Self::Or(es) => es,
x => vec![x],
})
.collect(),
)
}
fn and(e: Vec<Expression>) -> Self {
Self::And(
e.into_iter()
.flat_map(|c| match c {
Self::And(es) => es,
x => vec![x],
})
.collect(),
)
}
/// convert the expression into disjunctive normal form
///
/// careful, for some expressions this can have exponential runtime. E.g. the disjunctive normal form
/// of `(a | b) & (c | d) & (e | f) & ...` will be very complex.
pub fn dnf(self) -> Dnf {
match self {
Expression::Literal(x) => Dnf::literal(x),
Expression::Or(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitor).unwrap(),
Expression::And(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitand).unwrap(),
}
}
}
impl BitOr for Expression {
type Output = Expression;
fn bitor(self, that: Self) -> Self {
Expression::or(vec![self, that])
}
}
impl BitAnd for Expression {
type Output = Expression;
fn bitand(self, that: Self) -> Self {
Expression::and(vec![self, that])
}
}
fn insert_unless_redundant(aa: &mut BTreeSet<BTreeSet<String>>, b: BTreeSet<String>) {
let mut to_remove = None;
for a in aa.iter() {
if a.is_subset(&b) {
// a is larger than b. E.g. x | x&y
// keep a, b is redundant
return;
} else if a.is_superset(&b) {
// a is smaller than b, E.g. x&y | x
// remove a, keep b
to_remove = Some(a.clone());
}
}
if let Some(r) = to_remove {
aa.remove(&r);
}
aa.insert(b);
}
impl From<Expression> for Dnf {
fn from(value: Expression) -> Self {
value.dnf()
}
}
impl From<Dnf> for Expression {
fn from(value: Dnf) -> Self {
value.expression()
}
}
impl BitAnd for Dnf {
type Output = Dnf;
fn bitand(self, that: Self) -> Self {
let mut rs = BTreeSet::new();
for a in self.0.iter() {
for b in that.0.iter() {
let mut r = BTreeSet::new();
r.extend(a.iter().cloned());
r.extend(b.iter().cloned());
insert_unless_redundant(&mut rs, r);
}
}
Dnf(rs)
}
}
impl BitOr for Dnf {
type Output = Dnf;
fn bitor(self, that: Self) -> Self {
let mut rs = self.0;
for b in that.0 {
insert_unless_redundant(&mut rs, b);
}
Dnf(rs)
}
}
fn l(x: &str) -> Expression {
Expression::literal(x.into())
}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck::{quickcheck, Arbitrary, Gen};
use rand::seq::SliceRandom;
#[test]
fn test_dnf_intersection_1() {
let a = l("a");
let b = l("b");
let c = l("c");
let expr = c & (a | b);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|b&c");
}
#[test]
fn test_dnf_intersection_2() {
let a = l("a");
let b = l("b");
let c = l("c");
let d = l("d");
let expr = (d | c) & (b | a);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|a&d|b&c|b&d");
}
#[test]
fn test_dnf_simplify_1() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) & a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_2() {
let a = l("a");
let b = l("b");
let expr = (a.clone() & b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a");
}
#[test]
fn test_dnf_simplify_3() {
let a = l("a");
let b = l("b");
let expr = (a.clone() | b) | a;
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a|b");
}
#[test]
fn test_matching_1() {
let index = Index::from_elements(&vec![
btreeset! {"a"},
btreeset! {" | or | identifier_name |
main.rs |
}
}
Ok(Index {
strings: strings.into_iter().collect(),
elements,
})
}
}
impl Index {
/// given a query expression in Dnf form, returns all matching indices
pub fn matching(&self, query: Dnf) -> Vec<usize> {
// lookup all strings and translate them into indices.
// if a single index does not match, the query can not match at all.
fn lookup(s: &BTreeSet<String>, t: &BTreeMap<&str, u32>) -> Option<BTreeSet<u32>> {
s.iter()
.map(|x| t.get(&x.as_ref()).cloned())
.collect::<Option<_>>()
}
// mapping from strings to indices
let strings = self
.strings
.iter()
.enumerate()
.map(|(i, s)| (s.as_ref(), i as u32))
.collect::<BTreeMap<&str, u32>>();
// translate the query from strings to indices
let query = query
.0
.iter()
.filter_map(|s| lookup(s, &strings))
.collect::<Vec<_>>();
// not a single query can possibly match, no need to iterate.
if query.is_empty() {
return Vec::new();
}
// check the remaining queries
self.elements
.iter()
.enumerate()
.filter_map(|(i, e)| {
if query.iter().any(|x| x.is_subset(e)) {
Some(i)
} else {
None
}
})
.collect()
}
pub fn as_elements<'a>(&'a self) -> Vec<BTreeSet<&'a str>> {
let strings = self.strings.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
self
.elements
.iter()
.map(|is| {
is.iter()
.map(|i| strings[*i as usize])
.collect::<BTreeSet<_>>()
})
.collect()
}
pub fn from_elements(e: &[BTreeSet<&str>]) -> Index {
let mut strings = BTreeSet::new();
for a in e.iter() {
strings.extend(a.iter().cloned());
}
let indices = strings
.iter()
.cloned()
.enumerate()
.map(|(i, e)| (e, i as u32))
.collect::<BTreeMap<_, _>>();
let elements = e
.iter()
.map(|a| a.iter().map(|e| indices[e]).collect::<BTreeSet<u32>>())
.collect::<Vec<_>>();
let strings = strings.into_iter().map(|x| x.to_owned()).collect();
Index { strings, elements }
}
}
/// a boolean expression, consisting of literals, union and intersection.
///
/// no attempt of simplification is made, except flattening identical operators.
///
/// `And([And([a,b]),c])` will be flattened to `And([a,b,c])`.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub enum Expression {
Literal(String),
And(Vec<Expression>),
Or(Vec<Expression>),
}
/// prints the expression with a minimum of brackets
impl std::fmt::Display for Expression {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
fn child_to_string(x: &Expression) -> String {
if let Expression::Or(_) = x {
format!("({})", x)
} else {
x.to_string()
}
}
write!(
f,
"{}",
match self {
Expression::Literal(text) => text.clone(),
Expression::And(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("&"),
Expression::Or(es) => es.iter().map(child_to_string).collect::<Vec<_>>().join("|"),
}
)
}
}
/// Disjunctive normal form of a boolean query expression
///
/// https://en.wikipedia.org/wiki/Disjunctive_normal_form
///
/// This is an unique represenation of a query using literals, union and intersection.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub struct Dnf(BTreeSet<BTreeSet<String>>);
impl Dnf {
fn literal(text: String) -> Self {
Self(btreeset![btreeset![text]])
}
/// converts the disjunctive normal form back to an expression
pub fn expression(self) -> Expression {
self.0
.into_iter()
.map(Dnf::and_expr)
.reduce(Expression::bitor)
.unwrap()
}
fn and_expr(v: BTreeSet<String>) -> Expression {
v.into_iter()
.map(Expression::literal)
.reduce(Expression::bitand)
.unwrap()
}
}
impl Expression {
pub fn literal(text: String) -> Self {
Self::Literal(text)
}
fn or(e: Vec<Expression>) -> Self {
Self::Or(
e.into_iter()
.flat_map(|c| match c {
Self::Or(es) => es,
x => vec![x],
})
.collect(),
)
}
fn and(e: Vec<Expression>) -> Self {
Self::And(
e.into_iter()
.flat_map(|c| match c {
Self::And(es) => es,
x => vec![x],
})
.collect(),
)
}
/// convert the expression into disjunctive normal form
///
/// careful, for some expressions this can have exponential runtime. E.g. the disjunctive normal form
/// of `(a | b) & (c | d) & (e | f) & ...` will be very complex.
pub fn dnf(self) -> Dnf {
match self {
Expression::Literal(x) => Dnf::literal(x),
Expression::Or(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitor).unwrap(),
Expression::And(es) => es.into_iter().map(|x| x.dnf()).reduce(Dnf::bitand).unwrap(),
}
}
}
impl BitOr for Expression {
type Output = Expression;
fn bitor(self, that: Self) -> Self {
Expression::or(vec![self, that])
}
}
impl BitAnd for Expression {
type Output = Expression;
fn bitand(self, that: Self) -> Self {
Expression::and(vec![self, that])
}
}
fn insert_unless_redundant(aa: &mut BTreeSet<BTreeSet<String>>, b: BTreeSet<String>) {
let mut to_remove = None;
for a in aa.iter() {
if a.is_subset(&b) {
// a is larger than b. E.g. x | x&y
// keep a, b is redundant
return;
} else if a.is_superset(&b) {
// a is smaller than b, E.g. x&y | x
// remove a, keep b
to_remove = Some(a.clone());
}
}
if let Some(r) = to_remove {
aa.remove(&r);
}
aa.insert(b);
}
impl From<Expression> for Dnf {
fn from(value: Expression) -> Self {
value.dnf()
}
}
impl From<Dnf> for Expression {
fn from(value: Dnf) -> Self {
value.expression()
}
}
impl BitAnd for Dnf {
type Output = Dnf;
fn bitand(self, that: Self) -> Self {
let mut rs = BTreeSet::new();
for a in self.0.iter() {
for b in that.0.iter() {
let mut r = BTreeSet::new();
r.extend(a.iter().cloned());
r.extend(b.iter().cloned());
insert_unless_redundant(&mut rs, r);
}
}
Dnf(rs)
}
}
impl BitOr for Dnf {
type Output = Dnf;
fn bitor(self, that: Self) -> Self {
let mut rs = self.0;
for b in that.0 {
insert_unless_redundant(&mut rs, b);
}
Dnf(rs)
}
}
fn l(x: &str) -> Expression {
Expression::literal(x.into())
}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck::{quickcheck, Arbitrary, Gen};
use rand::seq::SliceRandom;
#[test]
fn test_dnf_intersection_1() {
let a = l("a");
let b = l("b");
let c = l("c");
let expr = c & (a | b);
let c = expr.dnf().expression().to_string();
assert_eq!(c, "a&c|b&c");
}
#[test]
fn test_dnf_intersection_2() {
let a = l("a");
let b = l("b");
let c = l("c");
let d = l("d");
let expr = (d | c) & (b | a);
let c = expr.dnf().expression | {
return Err(serde::de::Error::custom("invalid string index"));
} | conditional_block |
|
xterm.rs | fn from(color: XtermColors) -> Self {
match color {
$(
XtermColors::$name => $xterm_num,
)*
}
}
}
}
$(
#[allow(missing_docs)]
pub struct $name;
impl crate::Color for $name {
const ANSI_FG: &'static str = concat!("\x1b[38;5;", stringify!($xterm_num), "m");
const ANSI_BG: &'static str = concat!("\x1b[48;5;", stringify!($xterm_num), "m");
const RAW_ANSI_BG: &'static str = concat!("48;5;", stringify!($xterm_num));
const RAW_ANSI_FG: &'static str = concat!("48;5;", stringify!($xterm_num));
#[doc(hidden)]
type DynEquivelant = dynamic::XtermColors;
#[doc(hidden)]
const DYN_EQUIVELANT: Self::DynEquivelant = dynamic::XtermColors::$name;
#[doc(hidden)]
fn into_dyncolors() -> crate::DynColors {
crate::DynColors::Xterm(dynamic::XtermColors::$name)
}
}
)*
};
}
xterm_colors! {
0 UserBlack (0,0,0)
1 UserRed (128,0,0)
2 UserGreen (0,128,0)
3 UserYellow (128,128,0)
4 UserBlue (0,0,128)
5 UserMagenta (128,0,128)
6 UserCyan (0,128,128)
7 UserWhite (192,192,192)
8 UserBrightBlack (128,128,128)
9 UserBrightRed (255,0,0)
10 UserBrightGreen (0,255,0)
11 UserBrightYellow (255,255,0)
12 UserBrightBlue (0,0,255)
13 UserBrightMagenta (255,0,255)
14 UserBrightCyan (0,255,255)
15 UserBrightWhite (255,255,255)
16 Black (0,0,0)
17 StratosBlue (0,0,95)
18 NavyBlue (0,0,135)
19 MidnightBlue (0,0,175)
20 DarkBlue (0,0,215)
21 Blue (0,0,255)
22 CamaroneGreen (0,95,0)
23 BlueStone (0,95,95)
24 OrientBlue (0,95,135)
25 EndeavourBlue (0,95,175)
26 ScienceBlue (0,95,215)
27 BlueRibbon (0,95,255)
28 JapaneseLaurel (0,135,0)
29 DeepSeaGreen (0,135,95)
30 Teal (0,135,135)
31 DeepCerulean (0,135,175)
32 LochmaraBlue (0,135,215)
33 AzureRadiance (0,135,255)
34 LightJapaneseLaurel (0,175,0)
35 Jade (0,175,95)
36 PersianGreen (0,175,135)
37 BondiBlue (0,175,175)
38 Cerulean (0,175,215)
39 LightAzureRadiance (0,175,255)
40 DarkGreen (0,215,0)
41 Malachite (0,215,95)
42 CaribbeanGreen (0,215,135)
43 LightCaribbeanGreen (0,215,175)
44 RobinEggBlue (0,215,215)
45 Aqua (0,215,255)
46 Green (0,255,0)
47 DarkSpringGreen (0,255,95)
48 SpringGreen (0,255,135)
49 LightSpringGreen (0,255,175)
50 BrightTurquoise (0,255,215)
51 Cyan (0,255,255)
52 Rosewood (95,0,0)
53 PompadourMagenta (95,0,95)
54 PigmentIndigo (95,0,135)
55 DarkPurple (95,0,175)
56 ElectricIndigo (95,0,215)
57 ElectricPurple (95,0,255)
58 VerdunGreen (95,95,0)
59 ScorpionOlive (95,95,95)
60 Lilac (95,95,135)
61 ScampiIndigo (95,95,175)
62 Indigo (95,95,215)
63 DarkCornflowerBlue (95,95,255)
64 DarkLimeade (95,135,0)
65 GladeGreen (95,135,95)
66 JuniperGreen (95,135,135)
67 HippieBlue (95,135,175)
68 HavelockBlue (95,135,215)
69 CornflowerBlue (95,135,255)
70 Limeade (95,175,0)
71 FernGreen (95,175,95)
72 SilverTree (95,175,135)
73 Tradewind (95,175,175)
74 ShakespeareBlue (95,175,215)
75 DarkMalibuBlue (95,175,255)
76 DarkBrightGreen (95,215,0)
77 DarkPastelGreen (95,215,95)
78 PastelGreen (95,215,135)
79 DownyTeal (95,215,175)
80 Viking (95,215,215)
81 MalibuBlue (95,215,255)
82 BrightGreen (95,255,0)
83 DarkScreaminGreen (95,255,95)
84 ScreaminGreen (95,255,135)
85 DarkAquamarine (95,255,175)
86 Aquamarine (95,255,215)
87 LightAquamarine (95,255,255)
88 Maroon (135,0,0)
89 DarkFreshEggplant (135,0,95)
90 LightFreshEggplant (135,0,135)
91 Purple (135,0,175)
92 ElectricViolet (135,0,215)
93 LightElectricViolet (135,0,255)
94 Brown (135,95,0)
95 CopperRose (135,95,95)
96 Strikemaster | impl From<XtermColors> for u8 { | random_line_split |
|
gtmaps.py | _z = min(reach_z)
nav_grid = np.zeros((c_x,c_z))
for i in range(nav_grid.shape[0]):
for j in range(nav_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords:
nav_grid[i,j] = 1
else:
nav_grid[i,j] = 0
#print("nav_grid after disabling every object ")
#print(nav_grid)
#sys.exit(0)
#print("Got nav_grid on empty room ",nav_grid)
obj_grids = {}
obj_grids['fixed_obstructions'] = nav_grid
#flr_grid = np.zeros_like(nav_grid)
for n in range(len(names)):
obj_grid = copy.copy(nav_grid)
#now enable just the object you want to map
print("Now enabling ",names[n], " back ")
event = env.step(dict({"action":"EnableObject", "objectId": names[n]}))
#getting reachable positions again
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
obj_center = [centers[n]['x'], centers[n]['z'] ]
for i in range(obj_grid.shape[0]):
for j in range(obj_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords and obj_grid[i,j] == 1:
obj_grid[i,j] = 0
'''
if int(m_x + i*0.25) == int(obj_center[0]) and int(m_z + j*0.25) == int(obj_center[1]):
print("object center matched for object ",names[n])
obj_grid[i,j] == 1
'''
obj_grids[names[n]] = obj_grid
#flr_grid = flr_grid + obj_grid
print("Disabling the object")
event = env.step(dict({"action":"DisableObject", "objectId": names[n]}))
for n in names:
print("Now enabling ",n, " back ")
event = env.step(dict({"action":"EnableObject", "objectId": n}))
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
flr_grid = np.zeros((c_x,c_z))
for i in range(flr_grid.shape[0]):
for j in range(flr_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords:
flr_grid[i,j] = 1
obj_grids['nav_space'] = flr_grid
#x = event.metadata['agent']['position']['x']
#y = event.metadata['agent']['position']['y']
#z = event.metadata['agent']['position']['z']
#obj_grids['agent_pos'] = {'x':x,'y':y,'z':z}
obj_grids['min_pos'] = {'mx':m_x,'mz':m_z}
return obj_grids
def prettyprint(mat,argmax = False, locator = [-1,-1,-1]):
| d = '>' #"\u2192" #right arrow
if locator[2]==270:
d = '^' #"\u2191" #up arrow
if locator[2]==90:
d = 'v' #"\u2193" #down arrow
if locator[2]==180:
d = '<' #"\u2190" #left arrow
print(d,end = '')
print(" ",end = '')
print(" --",repr(i))
#print(" ")
def surrounding_patch(agentloc, labeled_grid, R = 16, unreach_value = -1): #returns a visibility patch centered around the agent with radius R
#unreach_value = -1
mat = labeled_grid
position = agentloc
r=copy.copy(R)
init_shape = copy.copy(mat.shape)
p = copy.copy(position)
while position[0]-r<0: #append black columns to the left of agent position
#print("Increasing columns to left ")
mat = np.insert(mat,0, unreach_value,axis=1)
r-=1
p[0]+=1
r=copy.copy(R)
while position[0]+r>init_shape[1]-1: #append blank columns to the right of the agent position
#print("Increasing columns to right")
mat = np.insert(mat,mat.shape[1], unreach_value,axis=1)
r-=1
r=copy.copy(R)
while position[1]-r<0:
#print("Increasing rows above")
mat = np.insert(mat,0, unreach_value,axis=0) #append blank rows to the top of the agent position
r-=1
p[1]+=1
r=copy.copy(R)
while position[1]+r>init_shape[0]-1:
#print("Increasing rows below")
mat = np.insert(mat,mat.shape[0], unreach_value,axis=0) #append blank columns to the bottom of the agent position
r-=1
#print("mat shape ",mat.shape) #outputs (33x33)
return mat[p[1]-R:p[1]+R+1, p[0]-R:p[0]+R+1]
def target_navigation_map(o_grids, obj, agentloc, grid_size = 32, unk_id = 0,flr_id = 1, tar_id = 2, obs_id = 3, verbose = False):
m = o_grids['nav_space']
m = np.where(m==0,m,flr_id) #just to reinforce that the navigable spaces have the specified flr_id
#==========================
#if only asking about navigable space and not interested to navigate to a specific target object
if obj=="nav_space":
#print("Got nav_space in gtmaps line 200")
'''
for n in o_grids.keys():
if n!="nav_space":
m = np.where(o_grids[n]==0,m,obs_id)
'''
m = np.where(m!=0,m,obs_id)
agentloc = [int((agentloc['z']-o_grids['min_pos']['mz'])/0.25), int((agentloc['x']-o_grids['min_pos']['mx'])/0.25)]
if verbose:
print("Got grid agent location from agentloc ",agentloc)
m = surrounding_patch(agentloc, m, R=int(grid_size/2), unreach_value = unk_id)
return m
#two different modes of searching (if exact id is passed it is sometimes helpful if multiple objects of same type- ex- multiple chairs)
if '|' not in obj:
searchkey = obj+'|'
else:
searchkey = obj
#==========================
#if only asking about navigating to a specific target object
for n in o_grids.keys():
if searchkey in n:
if verbose:
print("Got exact objectid ",n)
t = tar_id*o_grids[n]
m = np.where(t==0,m,tar_id)
'''
else:
o = obs_id*o_grids[n]
m = np.where(o==0,m,obs_id)
'''
#identify obstacle locations
m = np.where(m!=0,m,obs_id)
#center the map according to agent location - agentloc
#3d position supplied by simulator need to be swapped in grid order - z gets first position and x gets 2nd position
agentloc = [int((agentloc['z']-o_grids['min_pos']['mz'])/0.25), int((agentloc['x']-o_grids['min_pos']['mx'])/0.25)]
if verbose:
print("Got grid agent location from agentloc ",agentloc)
| for j in range(mat.shape[1]):
d = repr(j)
if j<10:
d = '0'+d
print(d,end = '')
print(" ",end = '')
print(" ")
print(" ")
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
d = 0
if argmax:
d = np.argmax(mat[i,j,:])
#d = np.max(mat[i,j,:])
else:
d = repr(int(mat[i,j]))
if locator[0]==i and locator[1]==j:
if locator[2]==0:
| identifier_body |
gtmaps.py | m_z = min(reach_z)
nav_grid = np.zeros((c_x,c_z))
for i in range(nav_grid.shape[0]):
for j in range(nav_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords:
nav_grid[i,j] = 1
else:
nav_grid[i,j] = 0
#print("nav_grid after disabling every object ")
#print(nav_grid)
#sys.exit(0)
#print("Got nav_grid on empty room ",nav_grid)
obj_grids = {}
obj_grids['fixed_obstructions'] = nav_grid
#flr_grid = np.zeros_like(nav_grid)
for n in range(len(names)):
obj_grid = copy.copy(nav_grid)
#now enable just the object you want to map
print("Now enabling ",names[n], " back ")
event = env.step(dict({"action":"EnableObject", "objectId": names[n]}))
#getting reachable positions again
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
obj_center = [centers[n]['x'], centers[n]['z'] ]
for i in range(obj_grid.shape[0]):
for j in range(obj_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords and obj_grid[i,j] == 1:
obj_grid[i,j] = 0
'''
if int(m_x + i*0.25) == int(obj_center[0]) and int(m_z + j*0.25) == int(obj_center[1]):
print("object center matched for object ",names[n])
obj_grid[i,j] == 1
'''
obj_grids[names[n]] = obj_grid
#flr_grid = flr_grid + obj_grid
print("Disabling the object")
event = env.step(dict({"action":"DisableObject", "objectId": names[n]}))
for n in names:
print("Now enabling ",n, " back ")
event = env.step(dict({"action":"EnableObject", "objectId": n}))
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
flr_grid = np.zeros((c_x,c_z))
for i in range(flr_grid.shape[0]):
for j in range(flr_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords:
flr_grid[i,j] = 1
obj_grids['nav_space'] = flr_grid
#x = event.metadata['agent']['position']['x']
#y = event.metadata['agent']['position']['y']
#z = event.metadata['agent']['position']['z']
#obj_grids['agent_pos'] = {'x':x,'y':y,'z':z}
obj_grids['min_pos'] = {'mx':m_x,'mz':m_z}
return obj_grids
def prettyprint(mat,argmax = False, locator = [-1,-1,-1]):
for j in range(mat.shape[1]):
d = repr(j)
if j<10:
d = '0'+d
print(d,end = '')
print(" ",end = '')
print(" ")
print(" ")
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
d = 0
if argmax:
d = np.argmax(mat[i,j,:])
#d = np.max(mat[i,j,:])
else:
d = repr(int(mat[i,j]))
if locator[0]==i and locator[1]==j:
if locator[2]==0:
d = '>' #"\u2192" #right arrow
if locator[2]==270:
d = '^' #"\u2191" #up arrow
if locator[2]==90:
d = 'v' #"\u2193" #down arrow
| print(" ",end = '')
print(" --",repr(i))
#print(" ")
def surrounding_patch(agentloc, labeled_grid, R = 16, unreach_value = -1): #returns a visibility patch centered around the agent with radius R
#unreach_value = -1
mat = labeled_grid
position = agentloc
r=copy.copy(R)
init_shape = copy.copy(mat.shape)
p = copy.copy(position)
while position[0]-r<0: #append black columns to the left of agent position
#print("Increasing columns to left ")
mat = np.insert(mat,0, unreach_value,axis=1)
r-=1
p[0]+=1
r=copy.copy(R)
while position[0]+r>init_shape[1]-1: #append blank columns to the right of the agent position
#print("Increasing columns to right")
mat = np.insert(mat,mat.shape[1], unreach_value,axis=1)
r-=1
r=copy.copy(R)
while position[1]-r<0:
#print("Increasing rows above")
mat = np.insert(mat,0, unreach_value,axis=0) #append blank rows to the top of the agent position
r-=1
p[1]+=1
r=copy.copy(R)
while position[1]+r>init_shape[0]-1:
#print("Increasing rows below")
mat = np.insert(mat,mat.shape[0], unreach_value,axis=0) #append blank columns to the bottom of the agent position
r-=1
#print("mat shape ",mat.shape) #outputs (33x33)
return mat[p[1]-R:p[1]+R+1, p[0]-R:p[0]+R+1]
def target_navigation_map(o_grids, obj, agentloc, grid_size = 32, unk_id = 0,flr_id = 1, tar_id = 2, obs_id = 3, verbose = False):
m = o_grids['nav_space']
m = np.where(m==0,m,flr_id) #just to reinforce that the navigable spaces have the specified flr_id
#==========================
#if only asking about navigable space and not interested to navigate to a specific target object
if obj=="nav_space":
#print("Got nav_space in gtmaps line 200")
'''
for n in o_grids.keys():
if n!="nav_space":
m = np.where(o_grids[n]==0,m,obs_id)
'''
m = np.where(m!=0,m,obs_id)
agentloc = [int((agentloc['z']-o_grids['min_pos']['mz'])/0.25), int((agentloc['x']-o_grids['min_pos']['mx'])/0.25)]
if verbose:
print("Got grid agent location from agentloc ",agentloc)
m = surrounding_patch(agentloc, m, R=int(grid_size/2), unreach_value = unk_id)
return m
#two different modes of searching (if exact id is passed it is sometimes helpful if multiple objects of same type- ex- multiple chairs)
if '|' not in obj:
searchkey = obj+'|'
else:
searchkey = obj
#==========================
#if only asking about navigating to a specific target object
for n in o_grids.keys():
if searchkey in n:
if verbose:
print("Got exact objectid ",n)
t = tar_id*o_grids[n]
m = np.where(t==0,m,tar_id)
'''
else:
o = obs_id*o_grids[n]
m = np.where(o==0,m,obs_id)
'''
#identify obstacle locations
m = np.where(m!=0,m,obs_id)
#center the map according to agent location - agentloc
#3d position supplied by simulator need to be swapped in grid order - z gets first position and x gets 2nd position
agentloc = [int((agentloc['z']-o_grids['min_pos']['mz'])/0.25), int((agentloc['x']-o_grids['min_pos']['mx'])/0.25)]
if verbose:
print("Got grid agent location from agentloc ",agentloc)
| if locator[2]==180:
d = '<' #"\u2190" #left arrow
print(d,end = '')
| random_line_split |
gtmaps.py | append black columns to the left of agent position
#print("Increasing columns to left ")
mat = np.insert(mat,0, unreach_value,axis=1)
r-=1
p[0]+=1
r=copy.copy(R)
while position[0]+r>init_shape[1]-1: #append blank columns to the right of the agent position
#print("Increasing columns to right")
mat = np.insert(mat,mat.shape[1], unreach_value,axis=1)
r-=1
r=copy.copy(R)
while position[1]-r<0:
#print("Increasing rows above")
mat = np.insert(mat,0, unreach_value,axis=0) #append blank rows to the top of the agent position
r-=1
p[1]+=1
r=copy.copy(R)
while position[1]+r>init_shape[0]-1:
#print("Increasing rows below")
mat = np.insert(mat,mat.shape[0], unreach_value,axis=0) #append blank columns to the bottom of the agent position
r-=1
#print("mat shape ",mat.shape) #outputs (33x33)
return mat[p[1]-R:p[1]+R+1, p[0]-R:p[0]+R+1]
def target_navigation_map(o_grids, obj, agentloc, grid_size = 32, unk_id = 0,flr_id = 1, tar_id = 2, obs_id = 3, verbose = False):
m = o_grids['nav_space']
m = np.where(m==0,m,flr_id) #just to reinforce that the navigable spaces have the specified flr_id
#==========================
#if only asking about navigable space and not interested to navigate to a specific target object
if obj=="nav_space":
#print("Got nav_space in gtmaps line 200")
'''
for n in o_grids.keys():
if n!="nav_space":
m = np.where(o_grids[n]==0,m,obs_id)
'''
m = np.where(m!=0,m,obs_id)
agentloc = [int((agentloc['z']-o_grids['min_pos']['mz'])/0.25), int((agentloc['x']-o_grids['min_pos']['mx'])/0.25)]
if verbose:
print("Got grid agent location from agentloc ",agentloc)
m = surrounding_patch(agentloc, m, R=int(grid_size/2), unreach_value = unk_id)
return m
#two different modes of searching (if exact id is passed it is sometimes helpful if multiple objects of same type- ex- multiple chairs)
if '|' not in obj:
searchkey = obj+'|'
else:
searchkey = obj
#==========================
#if only asking about navigating to a specific target object
for n in o_grids.keys():
if searchkey in n:
if verbose:
print("Got exact objectid ",n)
t = tar_id*o_grids[n]
m = np.where(t==0,m,tar_id)
'''
else:
o = obs_id*o_grids[n]
m = np.where(o==0,m,obs_id)
'''
#identify obstacle locations
m = np.where(m!=0,m,obs_id)
#center the map according to agent location - agentloc
#3d position supplied by simulator need to be swapped in grid order - z gets first position and x gets 2nd position
agentloc = [int((agentloc['z']-o_grids['min_pos']['mz'])/0.25), int((agentloc['x']-o_grids['min_pos']['mx'])/0.25)]
if verbose:
print("Got grid agent location from agentloc ",agentloc)
m = surrounding_patch(agentloc, m, R=int(grid_size/2), unreach_value = unk_id)
return m
def manual_label(room): #function for manually correcting wrong maps (computed automatically)
#fname = '/home/hom/Desktop/ai2thor/mapping/gcdata/'+repr(room)+'.npy'
fname = '/ai2thor/mapper/data/targets/'+repr(room)+'.npy'
o_grids = np.load(fname,allow_pickle = 'TRUE').item()
print("The fixed obstructions map")
prettyprint(o_grids['fixed_obstructions']) #grid with 0s and 1s showing navigable spaces with all objects in the room removed
def exists(o_grids,obj):
for n in o_grids.keys():
if obj+'|' in n:
return True
return False
obj = ""
while True:
obj = input("Enter the name of the object you want to insert ")
if obj=='space':
p = input("Space on top(t),bottom(b),left(l) or right (r) ?")
num = input("Number of tiles (eg-1,2,3) ? ")
unreach_value = 0
m_x,m_z = o_grids['min_pos']['mx'], o_grids['min_pos']['mz']
for n in o_grids.keys():
mat = o_grids[n]
try:
isarray = mat.shape
except:
#the final element in the dictionary is not a numpy array its stores the min and max grid position in the map
#so skip this
continue
for _ in range(int(num)):
if p=='t':
mat = np.insert(mat,0, unreach_value,axis=0) #append blank rows to the top of the agent position
if p=='b':
mat = np.insert(mat,mat.shape[0], unreach_value,axis=0) #append blank columns to the bottom of the agent position
if p=='l':
mat = np.insert(mat,0, unreach_value,axis=1) #append blank columns to left of agent position
if p=='r':
mat = np.insert(mat,mat.shape[1], unreach_value,axis=1) #append blank columns to the right of the agent position
o_grids[n] = mat
if p=='t':
o_grids['min_pos'] = {'mx':m_x-int(num)*0.25,'mz':m_z}
if p=='l':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z-int(num)*0.25}
if p=='b':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z}
if p=='r':
o_grids['min_pos'] = {'mx':m_x,'mz':m_z}
save = input("Save data ? (y/n)")
if save=='y':
np.save(fname,o_grids) #overwrites the existing one
continue
if obj=='bye':
break
if obj!='space' or obj!='bye':
if exists(o_grids,obj):
overwrite = input("This name is already taken want to overwrite ? (y/n) ")
mat = np.zeros_like(o_grids['fixed_obstructions'])
for n in o_grids.keys():
if obj+'|' in n:
print("Found ",n)
mat+=o_grids[n]
prettyprint(mat)
if overwrite=='n':
continue
if overwrite=='y':
obj = input("In that case enter the exact objectid by searching from above ")
else:
o_grids[obj+'|'] = np.zeros_like(o_grids['fixed_obstructions'])
print("You can enter the corners like this ...")
print("<top left corner column number, top left corner row number _ bottom right corner column number, bottom right corner row number>")
corners = input("Enter the corners (eg- 0,0_7,8) ")
c1,c2 = corners.split('_')
[c1x,c1y], [c2x,c2y] = c1.split(','), c2.split(',')
print("Got coordinates ",c1x,c1y,c2x,c2y)
try:
if '|' in obj:
o_grids[obj][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
else:
o_grids[obj+'|'][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
except:
print("Error occured with accessing key")
if '|' in obj:
o_grids[obj] = np.zeros_like(o_grids['fixed_obstructions'])
o_grids[obj][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
else:
o_grids[obj+'|'] = np.zeros_like(o_grids['fixed_obstructions'])
o_grids[obj+'|'][int(c1y):int(c2y)+1,int(c1x):int(c2x)+1] = 1.0
print("Modified ",obj)
if '|' in obj:
| prettyprint(o_grids[obj]) | conditional_block |
|
gtmaps.py | (env,event):
#sometimes in a room there are fixed objects which cannot be removed from scene using disable command
#so need to go near them to check distance and then map them
return
def gtmap(env,event):
objs = event.metadata['objects']
print("There are a total of ",len(objs)," objects in the scene")
names = [o['objectId'] for o in objs]
centers = [o['position'] for o in objs]
print("Now disabling every object in the scene ")
for n in names:
event = env.step(dict({"action":"DisableObject", "objectId": n}))
#getting reachable positions for the empty room
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
#print("got reachable positions ",reach_pos)
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
#getting navigable spaces in the empty room (only walls should be blocking now)
c_x = int(math.fabs((max(reach_x)-min(reach_x))/0.25))+1 #0.25 is the grid movement size
c_z = int(math.fabs((max(reach_z)-min(reach_z))/0.25))+1
print("c_x ",c_x," c_z ",c_z)
m_x = min(reach_x)
m_z = min(reach_z)
nav_grid = np.zeros((c_x,c_z))
for i in range(nav_grid.shape[0]):
for j in range(nav_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords:
nav_grid[i,j] = 1
else:
nav_grid[i,j] = 0
#print("nav_grid after disabling every object ")
#print(nav_grid)
#sys.exit(0)
#print("Got nav_grid on empty room ",nav_grid)
obj_grids = {}
obj_grids['fixed_obstructions'] = nav_grid
#flr_grid = np.zeros_like(nav_grid)
for n in range(len(names)):
obj_grid = copy.copy(nav_grid)
#now enable just the object you want to map
print("Now enabling ",names[n], " back ")
event = env.step(dict({"action":"EnableObject", "objectId": names[n]}))
#getting reachable positions again
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
obj_center = [centers[n]['x'], centers[n]['z'] ]
for i in range(obj_grid.shape[0]):
for j in range(obj_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords and obj_grid[i,j] == 1:
obj_grid[i,j] = 0
'''
if int(m_x + i*0.25) == int(obj_center[0]) and int(m_z + j*0.25) == int(obj_center[1]):
print("object center matched for object ",names[n])
obj_grid[i,j] == 1
'''
obj_grids[names[n]] = obj_grid
#flr_grid = flr_grid + obj_grid
print("Disabling the object")
event = env.step(dict({"action":"DisableObject", "objectId": names[n]}))
for n in names:
print("Now enabling ",n, " back ")
event = env.step(dict({"action":"EnableObject", "objectId": n}))
event = env.step(dict(action = 'GetReachablePositions'))
reach_pos = event.metadata['actionReturn'] #stores all reachable positions for the current scene
reach_x = [i['x'] for i in reach_pos]
reach_z = [i['z'] for i in reach_pos]
coords = [[i['x'],i['z']] for i in reach_pos]
flr_grid = np.zeros((c_x,c_z))
for i in range(flr_grid.shape[0]):
for j in range(flr_grid.shape[1]):
if [m_x + i*0.25, m_z + j*0.25] in coords:
flr_grid[i,j] = 1
obj_grids['nav_space'] = flr_grid
#x = event.metadata['agent']['position']['x']
#y = event.metadata['agent']['position']['y']
#z = event.metadata['agent']['position']['z']
#obj_grids['agent_pos'] = {'x':x,'y':y,'z':z}
obj_grids['min_pos'] = {'mx':m_x,'mz':m_z}
return obj_grids
def prettyprint(mat,argmax = False, locator = [-1,-1,-1]):
for j in range(mat.shape[1]):
d = repr(j)
if j<10:
d = '0'+d
print(d,end = '')
print(" ",end = '')
print(" ")
print(" ")
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
d = 0
if argmax:
d = np.argmax(mat[i,j,:])
#d = np.max(mat[i,j,:])
else:
d = repr(int(mat[i,j]))
if locator[0]==i and locator[1]==j:
if locator[2]==0:
d = '>' #"\u2192" #right arrow
if locator[2]==270:
d = '^' #"\u2191" #up arrow
if locator[2]==90:
d = 'v' #"\u2193" #down arrow
if locator[2]==180:
d = '<' #"\u2190" #left arrow
print(d,end = '')
print(" ",end = '')
print(" --",repr(i))
#print(" ")
def surrounding_patch(agentloc, labeled_grid, R = 16, unreach_value = -1): #returns a visibility patch centered around the agent with radius R
#unreach_value = -1
mat = labeled_grid
position = agentloc
r=copy.copy(R)
init_shape = copy.copy(mat.shape)
p = copy.copy(position)
while position[0]-r<0: #append black columns to the left of agent position
#print("Increasing columns to left ")
mat = np.insert(mat,0, unreach_value,axis=1)
r-=1
p[0]+=1
r=copy.copy(R)
while position[0]+r>init_shape[1]-1: #append blank columns to the right of the agent position
#print("Increasing columns to right")
mat = np.insert(mat,mat.shape[1], unreach_value,axis=1)
r-=1
r=copy.copy(R)
while position[1]-r<0:
#print("Increasing rows above")
mat = np.insert(mat,0, unreach_value,axis=0) #append blank rows to the top of the agent position
r-=1
p[1]+=1
r=copy.copy(R)
while position[1]+r>init_shape[0]-1:
#print("Increasing rows below")
mat = np.insert(mat,mat.shape[0], unreach_value,axis=0) #append blank columns to the bottom of the agent position
r-=1
#print("mat shape ",mat.shape) #outputs (33x33)
return mat[p[1]-R:p[1]+R+1, p[0]-R:p[0]+R+1]
def target_navigation_map(o_grids, obj, agentloc, grid_size = 32, unk_id = 0,flr_id = 1, tar_id = 2, obs_id = 3, verbose = False):
m = o_grids['nav_space']
m = np.where(m==0,m,flr_id) #just to reinforce that the navigable spaces have the specified flr_id
#==========================
#if only asking about navigable space and not interested to navigate to a specific target object
if obj=="nav_space":
#print("Got nav_space in gtmaps line 200")
'''
for n in o_grids.keys():
if n!="nav_space":
m = np.where(o_grids[n]==0,m,obs_id)
'''
m = np.where(m!=0,m,obs_id)
agentloc = [int((agentloc['z']-o_grids['min_pos']['mz'])/0.25), int((agentloc['x']-o_grids | touchmap | identifier_name |
|
corpus_wikipedia.py | ', #'/home/arne/devel/ML/data/corpora/WIKIPEDIA/wikipedia-23886057.csv',#'/home/arne/devel/ML/data/corpora/WIKIPEDIA/documents_utf8_filtered_20pageviews.csv', # '/home/arne/devel/ML/data/corpora/SICK/sick_train/SICK_train.txt',
'The path to the SICK train data file.')
#tf.flags.DEFINE_string(
# 'corpus_data_input_test', '/home/arne/devel/ML/data/corpora/SICK/sick_test_annotated/SICK_test_annotated.txt',
# 'The path to the SICK test data file.')
tf.flags.DEFINE_string(
'corpus_data_output_dir', '/media/arne/WIN/Users/Arne/ML/data/corpora/wikipedia',#'data/corpora/wikipedia',
'The path to the output data files (samples, embedding vectors, mappings).')
tf.flags.DEFINE_string(
'corpus_data_output_fn', 'WIKIPEDIA',
'Base filename of the output data files (samples, embedding vectors, mappings).')
tf.flags.DEFINE_string(
'init_dict_filename', None, #'/media/arne/WIN/Users/Arne/ML/data/corpora/wikipedia/process_sentence7/WIKIPEDIA_articles1000_maxdepth10',#None, #'data/nlp/spacy/dict',
'The path to embedding and mapping files (without extension) to reuse them for the new corpus.')
tf.flags.DEFINE_integer(
'max_articles', 10000,
'How many articles to read.')
tf.flags.DEFINE_integer(
'article_batch_size', 250,
'How many articles to process in one batch.')
tf.flags.DEFINE_integer(
'max_depth', 10,
'The maximal depth of the sequence trees.')
tf.flags.DEFINE_integer(
'count_threshold', 2,
'Change data types which occur less then count_threshold times to UNKNOWN')
#tf.flags.DEFINE_integer(
# 'sample_count', 14,
# 'Amount of samples per tree. This excludes the correct tree.')
tf.flags.DEFINE_string(
'sentence_processor', 'process_sentence7', #'process_sentence8',#'process_sentence3',
'Defines which NLP features are taken into the embedding trees.')
tf.flags.DEFINE_string(
'tree_mode',
None,
#'aggregate',
#'sequence',
'How to structure the tree. '
+ '"sequence" -> parents point to next token, '
+ '"aggregate" -> parents point to an added, artificial token (TERMINATOR) in the end of the token sequence,'
+ 'None -> use parsed dependency tree')
FLAGS = tf.flags.FLAGS
def articles_from_csv_reader(filename, max_articles=100, skip=0):
csv.field_size_limit(maxsize)
print('parse', max_articles, 'articles...')
with open(filename, 'rb') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=['article-id', 'content'])
i = 0
for row in reader:
if skip > 0:
skip -= 1
continue
if i >= max_articles:
break
if (i * 10) % max_articles == 0:
# sys.stdout.write("progress: %d%% \r" % (i * 100 / max_rows))
# sys.stdout.flush()
print('read article:', row['article-id'], '... ', i * 100 / max_articles, '%')
i += 1
content = row['content'].decode('utf-8')
# cut the title (is separated by two spaces from main content)
yield content.split(' ', 1)[1]
@tools.fn_timer
def convert_wikipedia(in_filename, out_filename, init_dict_filename, sentence_processor, parser, #mapping, vecs,
max_articles=10000, max_depth=10, batch_size=100, tree_mode=None):
parent_dir = os.path.abspath(os.path.join(out_filename, os.pardir))
out_base_name = ntpath.basename(out_filename)
if not os.path.isfile(out_filename+'.data') \
or not os.path.isfile(out_filename + '.parent')\
or not os.path.isfile(out_filename + '.mapping')\
or not os.path.isfile(out_filename + '.vecs') \
or not os.path.isfile(out_filename + '.depth') \
or not os.path.isfile(out_filename + '.count'):
if parser is None:
print('load spacy ...')
parser = spacy.load('en')
parser.pipeline = [parser.tagger, parser.entity, parser.parser]
if init_dict_filename is not None:
print('initialize vecs and mapping from files ...')
vecs, mapping = corpus.create_or_read_dict(init_dict_filename, parser.vocab)
print('dump embeddings to: ' + out_filename + '.vecs ...')
vecs.dump(out_filename + '.vecs')
else:
vecs, mapping = corpus.create_or_read_dict(out_filename, parser.vocab)
# parse
seq_data, seq_parents, seq_depths, mapping = parse_articles(out_filename, parent_dir, in_filename, parser,
mapping, sentence_processor, max_depth,
max_articles, batch_size, tree_mode)
# sort and filter vecs/mappings by counts
seq_data, mapping, vecs, counts = preprocessing.sort_embeddings(seq_data, mapping, vecs,
count_threshold=FLAGS.count_threshold)
# write out vecs, mapping and tsv containing strings
corpus.write_dict(out_path, mapping, vecs, parser.vocab, constants.vocab_manual)
print('dump data to: ' + out_path + '.data ...')
seq_data.dump(out_path + '.data')
print('dump counts to: ' + out_path + '.count ...')
counts.dump(out_path + '.count')
else:
print('load depths from file: ' + out_filename + '.depth ...')
seq_depths = np.load(out_filename+'.depth')
preprocessing.calc_depths_collected(out_filename, parent_dir, max_depth, seq_depths)
preprocessing.rearrange_children_indices(out_filename, parent_dir, max_depth, max_articles, batch_size)
#preprocessing.concat_children_indices(out_filename, parent_dir, max_depth)
print('load and concatenate child indices batches ...')
for current_depth in range(1, max_depth + 1):
if not os.path.isfile(out_filename + '.children.depth' + str(current_depth)):
preprocessing.merge_numpy_batch_files(out_base_name + '.children.depth' + str(current_depth), parent_dir)
return parser
def parse_articles(out_path, parent_dir, in_filename, parser, mapping, sentence_processor, max_depth, max_articles, batch_size, tree_mode):
| tree_mode=tree_mode,
child_idx_offset=child_idx_offset)
print('dump data, parents, depths and child indices for offset=' + str(offset) + ' ...')
current_seq_data.dump(out_path + '.data.batch' + str(offset))
current_seq_parents.dump(out_path + '.parent.batch' + str(offset))
current_seq_depths.dump(out_path + '.depth.batch' + str(offset))
current_idx_tuples.dump(out_path + '.children.batch' + str(offset))
child_idx_offset += len(current_seq_data)
#if careful:
# print('dump mappings to: ' + out_path + '.mapping ...')
# with open(out_path + '.mapping', "wb") as f:
# pickle.dump(mapping, f)
#else:
# current_seq_data = np.load(out_path + '.data.batch' + str(offset))
# child_idx_offset += len(current_seq_data)
seq_data = preprocessing.merge_numpy_batch_files(out_fn+'.data', parent_dir)
seq_parents = preprocessing.merge_numpy_batch_files(out_fn + '.parent', parent_dir)
seq_depths = preprocessing.merge_numpy_batch_files(out_fn + '.depth', parent_dir)
print('parsed data size: '+str(len(seq_data)))
return seq_data, seq_parents, seq_depths, mapping
if __name__ == '__main__':
sentence_processor = getattr(preprocessing, FLAGS.sentence_processor)
out_dir = os.path.abspath(os.path.join(FLAGS.corpus_data_output_dir, sentence_processor.func_name))
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
out_path = os.path.join(out_dir, FLAGS.corpus_data_output_fn)
if FLAGS.tree_mode is not None:
out_path = | out_fn = ntpath.basename(out_path)
print('parse articles ...')
child_idx_offset = 0
for offset in range(0, max_articles, batch_size):
# all or none: otherwise the mapping lacks entries!
#if not careful or not os.path.isfile(out_path + '.data.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.parent.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.depth.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.children.batch' + str(offset)):
current_seq_data, current_seq_parents, current_idx_tuples, current_seq_depths = preprocessing.read_data_2(
articles_from_csv_reader,
sentence_processor, parser, mapping,
args={
'filename': in_filename,
'max_articles': min(batch_size, max_articles),
'skip': offset
},
max_depth=max_depth,
batch_size=batch_size, | identifier_body |
corpus_wikipedia.py | ', #'/home/arne/devel/ML/data/corpora/WIKIPEDIA/wikipedia-23886057.csv',#'/home/arne/devel/ML/data/corpora/WIKIPEDIA/documents_utf8_filtered_20pageviews.csv', # '/home/arne/devel/ML/data/corpora/SICK/sick_train/SICK_train.txt',
'The path to the SICK train data file.')
#tf.flags.DEFINE_string(
# 'corpus_data_input_test', '/home/arne/devel/ML/data/corpora/SICK/sick_test_annotated/SICK_test_annotated.txt',
# 'The path to the SICK test data file.')
tf.flags.DEFINE_string(
'corpus_data_output_dir', '/media/arne/WIN/Users/Arne/ML/data/corpora/wikipedia',#'data/corpora/wikipedia',
'The path to the output data files (samples, embedding vectors, mappings).')
tf.flags.DEFINE_string(
'corpus_data_output_fn', 'WIKIPEDIA',
'Base filename of the output data files (samples, embedding vectors, mappings).')
tf.flags.DEFINE_string(
'init_dict_filename', None, #'/media/arne/WIN/Users/Arne/ML/data/corpora/wikipedia/process_sentence7/WIKIPEDIA_articles1000_maxdepth10',#None, #'data/nlp/spacy/dict',
'The path to embedding and mapping files (without extension) to reuse them for the new corpus.')
tf.flags.DEFINE_integer(
'max_articles', 10000,
'How many articles to read.')
tf.flags.DEFINE_integer(
'article_batch_size', 250,
'How many articles to process in one batch.')
tf.flags.DEFINE_integer(
'max_depth', 10,
'The maximal depth of the sequence trees.')
tf.flags.DEFINE_integer(
'count_threshold', 2,
'Change data types which occur less then count_threshold times to UNKNOWN')
#tf.flags.DEFINE_integer(
# 'sample_count', 14,
# 'Amount of samples per tree. This excludes the correct tree.')
tf.flags.DEFINE_string(
'sentence_processor', 'process_sentence7', #'process_sentence8',#'process_sentence3',
'Defines which NLP features are taken into the embedding trees.')
tf.flags.DEFINE_string(
'tree_mode',
None,
#'aggregate',
#'sequence',
'How to structure the tree. '
+ '"sequence" -> parents point to next token, '
+ '"aggregate" -> parents point to an added, artificial token (TERMINATOR) in the end of the token sequence,'
+ 'None -> use parsed dependency tree')
FLAGS = tf.flags.FLAGS
def articles_from_csv_reader(filename, max_articles=100, skip=0):
csv.field_size_limit(maxsize)
print('parse', max_articles, 'articles...')
with open(filename, 'rb') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=['article-id', 'content'])
i = 0
for row in reader:
if skip > 0:
skip -= 1
continue
if i >= max_articles:
break
if (i * 10) % max_articles == 0:
# sys.stdout.write("progress: %d%% \r" % (i * 100 / max_rows))
# sys.stdout.flush()
print('read article:', row['article-id'], '... ', i * 100 / max_articles, '%')
i += 1
content = row['content'].decode('utf-8')
# cut the title (is separated by two spaces from main content)
yield content.split(' ', 1)[1]
@tools.fn_timer
def convert_wikipedia(in_filename, out_filename, init_dict_filename, sentence_processor, parser, #mapping, vecs,
max_articles=10000, max_depth=10, batch_size=100, tree_mode=None):
parent_dir = os.path.abspath(os.path.join(out_filename, os.pardir))
out_base_name = ntpath.basename(out_filename)
if not os.path.isfile(out_filename+'.data') \
or not os.path.isfile(out_filename + '.parent')\
or not os.path.isfile(out_filename + '.mapping')\
or not os.path.isfile(out_filename + '.vecs') \
or not os.path.isfile(out_filename + '.depth') \
or not os.path.isfile(out_filename + '.count'):
if parser is None:
print('load spacy ...')
parser = spacy.load('en')
parser.pipeline = [parser.tagger, parser.entity, parser.parser]
if init_dict_filename is not None:
print('initialize vecs and mapping from files ...')
vecs, mapping = corpus.create_or_read_dict(init_dict_filename, parser.vocab)
print('dump embeddings to: ' + out_filename + '.vecs ...')
vecs.dump(out_filename + '.vecs')
else:
vecs, mapping = corpus.create_or_read_dict(out_filename, parser.vocab)
# parse
seq_data, seq_parents, seq_depths, mapping = parse_articles(out_filename, parent_dir, in_filename, parser,
mapping, sentence_processor, max_depth,
max_articles, batch_size, tree_mode)
# sort and filter vecs/mappings by counts
seq_data, mapping, vecs, counts = preprocessing.sort_embeddings(seq_data, mapping, vecs,
count_threshold=FLAGS.count_threshold)
# write out vecs, mapping and tsv containing strings
corpus.write_dict(out_path, mapping, vecs, parser.vocab, constants.vocab_manual)
print('dump data to: ' + out_path + '.data ...')
seq_data.dump(out_path + '.data')
print('dump counts to: ' + out_path + '.count ...')
counts.dump(out_path + '.count')
else:
print('load depths from file: ' + out_filename + '.depth ...')
seq_depths = np.load(out_filename+'.depth')
preprocessing.calc_depths_collected(out_filename, parent_dir, max_depth, seq_depths)
preprocessing.rearrange_children_indices(out_filename, parent_dir, max_depth, max_articles, batch_size)
#preprocessing.concat_children_indices(out_filename, parent_dir, max_depth)
print('load and concatenate child indices batches ...')
for current_depth in range(1, max_depth + 1):
|
return parser
def parse_articles(out_path, parent_dir, in_filename, parser, mapping, sentence_processor, max_depth, max_articles, batch_size, tree_mode):
out_fn = ntpath.basename(out_path)
print('parse articles ...')
child_idx_offset = 0
for offset in range(0, max_articles, batch_size):
# all or none: otherwise the mapping lacks entries!
#if not careful or not os.path.isfile(out_path + '.data.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.parent.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.depth.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.children.batch' + str(offset)):
current_seq_data, current_seq_parents, current_idx_tuples, current_seq_depths = preprocessing.read_data_2(
articles_from_csv_reader,
sentence_processor, parser, mapping,
args={
'filename': in_filename,
'max_articles': min(batch_size, max_articles),
'skip': offset
},
max_depth=max_depth,
batch_size=batch_size,
tree_mode=tree_mode,
child_idx_offset=child_idx_offset)
print('dump data, parents, depths and child indices for offset=' + str(offset) + ' ...')
current_seq_data.dump(out_path + '.data.batch' + str(offset))
current_seq_parents.dump(out_path + '.parent.batch' + str(offset))
current_seq_depths.dump(out_path + '.depth.batch' + str(offset))
current_idx_tuples.dump(out_path + '.children.batch' + str(offset))
child_idx_offset += len(current_seq_data)
#if careful:
# print('dump mappings to: ' + out_path + '.mapping ...')
# with open(out_path + '.mapping', "wb") as f:
# pickle.dump(mapping, f)
#else:
# current_seq_data = np.load(out_path + '.data.batch' + str(offset))
# child_idx_offset += len(current_seq_data)
seq_data = preprocessing.merge_numpy_batch_files(out_fn+'.data', parent_dir)
seq_parents = preprocessing.merge_numpy_batch_files(out_fn + '.parent', parent_dir)
seq_depths = preprocessing.merge_numpy_batch_files(out_fn + '.depth', parent_dir)
print('parsed data size: '+str(len(seq_data)))
return seq_data, seq_parents, seq_depths, mapping
if __name__ == '__main__':
sentence_processor = getattr(preprocessing, FLAGS.sentence_processor)
out_dir = os.path.abspath(os.path.join(FLAGS.corpus_data_output_dir, sentence_processor.func_name))
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
out_path = os.path.join(out_dir, FLAGS.corpus_data_output_fn)
if FLAGS.tree_mode is not None:
out_path | if not os.path.isfile(out_filename + '.children.depth' + str(current_depth)):
preprocessing.merge_numpy_batch_files(out_base_name + '.children.depth' + str(current_depth), parent_dir) | conditional_block |
corpus_wikipedia.py | ', #'/home/arne/devel/ML/data/corpora/WIKIPEDIA/wikipedia-23886057.csv',#'/home/arne/devel/ML/data/corpora/WIKIPEDIA/documents_utf8_filtered_20pageviews.csv', # '/home/arne/devel/ML/data/corpora/SICK/sick_train/SICK_train.txt',
'The path to the SICK train data file.')
#tf.flags.DEFINE_string(
# 'corpus_data_input_test', '/home/arne/devel/ML/data/corpora/SICK/sick_test_annotated/SICK_test_annotated.txt',
# 'The path to the SICK test data file.')
tf.flags.DEFINE_string(
'corpus_data_output_dir', '/media/arne/WIN/Users/Arne/ML/data/corpora/wikipedia',#'data/corpora/wikipedia',
'The path to the output data files (samples, embedding vectors, mappings).')
tf.flags.DEFINE_string(
'corpus_data_output_fn', 'WIKIPEDIA',
'Base filename of the output data files (samples, embedding vectors, mappings).')
tf.flags.DEFINE_string(
'init_dict_filename', None, #'/media/arne/WIN/Users/Arne/ML/data/corpora/wikipedia/process_sentence7/WIKIPEDIA_articles1000_maxdepth10',#None, #'data/nlp/spacy/dict',
'The path to embedding and mapping files (without extension) to reuse them for the new corpus.')
tf.flags.DEFINE_integer(
'max_articles', 10000,
'How many articles to read.')
tf.flags.DEFINE_integer(
'article_batch_size', 250,
'How many articles to process in one batch.')
tf.flags.DEFINE_integer(
'max_depth', 10,
'The maximal depth of the sequence trees.')
tf.flags.DEFINE_integer(
'count_threshold', 2,
'Change data types which occur less then count_threshold times to UNKNOWN')
#tf.flags.DEFINE_integer(
# 'sample_count', 14,
# 'Amount of samples per tree. This excludes the correct tree.')
tf.flags.DEFINE_string(
'sentence_processor', 'process_sentence7', #'process_sentence8',#'process_sentence3',
'Defines which NLP features are taken into the embedding trees.')
tf.flags.DEFINE_string(
'tree_mode',
None,
#'aggregate',
#'sequence',
'How to structure the tree. '
+ '"sequence" -> parents point to next token, '
+ '"aggregate" -> parents point to an added, artificial token (TERMINATOR) in the end of the token sequence,'
+ 'None -> use parsed dependency tree')
FLAGS = tf.flags.FLAGS
def articles_from_csv_reader(filename, max_articles=100, skip=0):
csv.field_size_limit(maxsize)
print('parse', max_articles, 'articles...')
with open(filename, 'rb') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=['article-id', 'content'])
i = 0
for row in reader:
if skip > 0:
skip -= 1
continue
if i >= max_articles:
break
if (i * 10) % max_articles == 0:
# sys.stdout.write("progress: %d%% \r" % (i * 100 / max_rows))
# sys.stdout.flush()
print('read article:', row['article-id'], '... ', i * 100 / max_articles, '%')
i += 1
content = row['content'].decode('utf-8')
# cut the title (is separated by two spaces from main content)
yield content.split(' ', 1)[1]
@tools.fn_timer
def convert_wikipedia(in_filename, out_filename, init_dict_filename, sentence_processor, parser, #mapping, vecs,
max_articles=10000, max_depth=10, batch_size=100, tree_mode=None):
parent_dir = os.path.abspath(os.path.join(out_filename, os.pardir))
out_base_name = ntpath.basename(out_filename)
if not os.path.isfile(out_filename+'.data') \
or not os.path.isfile(out_filename + '.parent')\
or not os.path.isfile(out_filename + '.mapping')\
or not os.path.isfile(out_filename + '.vecs') \
or not os.path.isfile(out_filename + '.depth') \
or not os.path.isfile(out_filename + '.count'):
if parser is None:
print('load spacy ...')
parser = spacy.load('en')
parser.pipeline = [parser.tagger, parser.entity, parser.parser]
if init_dict_filename is not None:
print('initialize vecs and mapping from files ...')
vecs, mapping = corpus.create_or_read_dict(init_dict_filename, parser.vocab)
print('dump embeddings to: ' + out_filename + '.vecs ...')
vecs.dump(out_filename + '.vecs')
else:
vecs, mapping = corpus.create_or_read_dict(out_filename, parser.vocab)
# parse
seq_data, seq_parents, seq_depths, mapping = parse_articles(out_filename, parent_dir, in_filename, parser,
mapping, sentence_processor, max_depth,
max_articles, batch_size, tree_mode)
# sort and filter vecs/mappings by counts
seq_data, mapping, vecs, counts = preprocessing.sort_embeddings(seq_data, mapping, vecs,
count_threshold=FLAGS.count_threshold)
# write out vecs, mapping and tsv containing strings
corpus.write_dict(out_path, mapping, vecs, parser.vocab, constants.vocab_manual)
print('dump data to: ' + out_path + '.data ...')
seq_data.dump(out_path + '.data')
print('dump counts to: ' + out_path + '.count ...')
counts.dump(out_path + '.count')
else:
print('load depths from file: ' + out_filename + '.depth ...')
seq_depths = np.load(out_filename+'.depth')
preprocessing.calc_depths_collected(out_filename, parent_dir, max_depth, seq_depths)
preprocessing.rearrange_children_indices(out_filename, parent_dir, max_depth, max_articles, batch_size)
#preprocessing.concat_children_indices(out_filename, parent_dir, max_depth)
print('load and concatenate child indices batches ...')
for current_depth in range(1, max_depth + 1):
if not os.path.isfile(out_filename + '.children.depth' + str(current_depth)):
preprocessing.merge_numpy_batch_files(out_base_name + '.children.depth' + str(current_depth), parent_dir)
return parser
def parse_articles(out_path, parent_dir, in_filename, parser, mapping, sentence_processor, max_depth, max_articles, batch_size, tree_mode):
out_fn = ntpath.basename(out_path)
print('parse articles ...')
child_idx_offset = 0
for offset in range(0, max_articles, batch_size):
# all or none: otherwise the mapping lacks entries! | articles_from_csv_reader,
sentence_processor, parser, mapping,
args={
'filename': in_filename,
'max_articles': min(batch_size, max_articles),
'skip': offset
},
max_depth=max_depth,
batch_size=batch_size,
tree_mode=tree_mode,
child_idx_offset=child_idx_offset)
print('dump data, parents, depths and child indices for offset=' + str(offset) + ' ...')
current_seq_data.dump(out_path + '.data.batch' + str(offset))
current_seq_parents.dump(out_path + '.parent.batch' + str(offset))
current_seq_depths.dump(out_path + '.depth.batch' + str(offset))
current_idx_tuples.dump(out_path + '.children.batch' + str(offset))
child_idx_offset += len(current_seq_data)
#if careful:
# print('dump mappings to: ' + out_path + '.mapping ...')
# with open(out_path + '.mapping', "wb") as f:
# pickle.dump(mapping, f)
#else:
# current_seq_data = np.load(out_path + '.data.batch' + str(offset))
# child_idx_offset += len(current_seq_data)
seq_data = preprocessing.merge_numpy_batch_files(out_fn+'.data', parent_dir)
seq_parents = preprocessing.merge_numpy_batch_files(out_fn + '.parent', parent_dir)
seq_depths = preprocessing.merge_numpy_batch_files(out_fn + '.depth', parent_dir)
print('parsed data size: '+str(len(seq_data)))
return seq_data, seq_parents, seq_depths, mapping
if __name__ == '__main__':
sentence_processor = getattr(preprocessing, FLAGS.sentence_processor)
out_dir = os.path.abspath(os.path.join(FLAGS.corpus_data_output_dir, sentence_processor.func_name))
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
out_path = os.path.join(out_dir, FLAGS.corpus_data_output_fn)
if FLAGS.tree_mode is not None:
out_path = | #if not careful or not os.path.isfile(out_path + '.data.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.parent.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.depth.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.children.batch' + str(offset)):
current_seq_data, current_seq_parents, current_idx_tuples, current_seq_depths = preprocessing.read_data_2( | random_line_split |
corpus_wikipedia.py | ', #'/home/arne/devel/ML/data/corpora/WIKIPEDIA/wikipedia-23886057.csv',#'/home/arne/devel/ML/data/corpora/WIKIPEDIA/documents_utf8_filtered_20pageviews.csv', # '/home/arne/devel/ML/data/corpora/SICK/sick_train/SICK_train.txt',
'The path to the SICK train data file.')
#tf.flags.DEFINE_string(
# 'corpus_data_input_test', '/home/arne/devel/ML/data/corpora/SICK/sick_test_annotated/SICK_test_annotated.txt',
# 'The path to the SICK test data file.')
tf.flags.DEFINE_string(
'corpus_data_output_dir', '/media/arne/WIN/Users/Arne/ML/data/corpora/wikipedia',#'data/corpora/wikipedia',
'The path to the output data files (samples, embedding vectors, mappings).')
tf.flags.DEFINE_string(
'corpus_data_output_fn', 'WIKIPEDIA',
'Base filename of the output data files (samples, embedding vectors, mappings).')
tf.flags.DEFINE_string(
'init_dict_filename', None, #'/media/arne/WIN/Users/Arne/ML/data/corpora/wikipedia/process_sentence7/WIKIPEDIA_articles1000_maxdepth10',#None, #'data/nlp/spacy/dict',
'The path to embedding and mapping files (without extension) to reuse them for the new corpus.')
tf.flags.DEFINE_integer(
'max_articles', 10000,
'How many articles to read.')
tf.flags.DEFINE_integer(
'article_batch_size', 250,
'How many articles to process in one batch.')
tf.flags.DEFINE_integer(
'max_depth', 10,
'The maximal depth of the sequence trees.')
tf.flags.DEFINE_integer(
'count_threshold', 2,
'Change data types which occur less then count_threshold times to UNKNOWN')
#tf.flags.DEFINE_integer(
# 'sample_count', 14,
# 'Amount of samples per tree. This excludes the correct tree.')
tf.flags.DEFINE_string(
'sentence_processor', 'process_sentence7', #'process_sentence8',#'process_sentence3',
'Defines which NLP features are taken into the embedding trees.')
tf.flags.DEFINE_string(
'tree_mode',
None,
#'aggregate',
#'sequence',
'How to structure the tree. '
+ '"sequence" -> parents point to next token, '
+ '"aggregate" -> parents point to an added, artificial token (TERMINATOR) in the end of the token sequence,'
+ 'None -> use parsed dependency tree')
FLAGS = tf.flags.FLAGS
def articles_from_csv_reader(filename, max_articles=100, skip=0):
csv.field_size_limit(maxsize)
print('parse', max_articles, 'articles...')
with open(filename, 'rb') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=['article-id', 'content'])
i = 0
for row in reader:
if skip > 0:
skip -= 1
continue
if i >= max_articles:
break
if (i * 10) % max_articles == 0:
# sys.stdout.write("progress: %d%% \r" % (i * 100 / max_rows))
# sys.stdout.flush()
print('read article:', row['article-id'], '... ', i * 100 / max_articles, '%')
i += 1
content = row['content'].decode('utf-8')
# cut the title (is separated by two spaces from main content)
yield content.split(' ', 1)[1]
@tools.fn_timer
def convert_wikipedia(in_filename, out_filename, init_dict_filename, sentence_processor, parser, #mapping, vecs,
max_articles=10000, max_depth=10, batch_size=100, tree_mode=None):
parent_dir = os.path.abspath(os.path.join(out_filename, os.pardir))
out_base_name = ntpath.basename(out_filename)
if not os.path.isfile(out_filename+'.data') \
or not os.path.isfile(out_filename + '.parent')\
or not os.path.isfile(out_filename + '.mapping')\
or not os.path.isfile(out_filename + '.vecs') \
or not os.path.isfile(out_filename + '.depth') \
or not os.path.isfile(out_filename + '.count'):
if parser is None:
print('load spacy ...')
parser = spacy.load('en')
parser.pipeline = [parser.tagger, parser.entity, parser.parser]
if init_dict_filename is not None:
print('initialize vecs and mapping from files ...')
vecs, mapping = corpus.create_or_read_dict(init_dict_filename, parser.vocab)
print('dump embeddings to: ' + out_filename + '.vecs ...')
vecs.dump(out_filename + '.vecs')
else:
vecs, mapping = corpus.create_or_read_dict(out_filename, parser.vocab)
# parse
seq_data, seq_parents, seq_depths, mapping = parse_articles(out_filename, parent_dir, in_filename, parser,
mapping, sentence_processor, max_depth,
max_articles, batch_size, tree_mode)
# sort and filter vecs/mappings by counts
seq_data, mapping, vecs, counts = preprocessing.sort_embeddings(seq_data, mapping, vecs,
count_threshold=FLAGS.count_threshold)
# write out vecs, mapping and tsv containing strings
corpus.write_dict(out_path, mapping, vecs, parser.vocab, constants.vocab_manual)
print('dump data to: ' + out_path + '.data ...')
seq_data.dump(out_path + '.data')
print('dump counts to: ' + out_path + '.count ...')
counts.dump(out_path + '.count')
else:
print('load depths from file: ' + out_filename + '.depth ...')
seq_depths = np.load(out_filename+'.depth')
preprocessing.calc_depths_collected(out_filename, parent_dir, max_depth, seq_depths)
preprocessing.rearrange_children_indices(out_filename, parent_dir, max_depth, max_articles, batch_size)
#preprocessing.concat_children_indices(out_filename, parent_dir, max_depth)
print('load and concatenate child indices batches ...')
for current_depth in range(1, max_depth + 1):
if not os.path.isfile(out_filename + '.children.depth' + str(current_depth)):
preprocessing.merge_numpy_batch_files(out_base_name + '.children.depth' + str(current_depth), parent_dir)
return parser
def | (out_path, parent_dir, in_filename, parser, mapping, sentence_processor, max_depth, max_articles, batch_size, tree_mode):
out_fn = ntpath.basename(out_path)
print('parse articles ...')
child_idx_offset = 0
for offset in range(0, max_articles, batch_size):
# all or none: otherwise the mapping lacks entries!
#if not careful or not os.path.isfile(out_path + '.data.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.parent.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.depth.batch' + str(offset)) \
# or not os.path.isfile(out_path + '.children.batch' + str(offset)):
current_seq_data, current_seq_parents, current_idx_tuples, current_seq_depths = preprocessing.read_data_2(
articles_from_csv_reader,
sentence_processor, parser, mapping,
args={
'filename': in_filename,
'max_articles': min(batch_size, max_articles),
'skip': offset
},
max_depth=max_depth,
batch_size=batch_size,
tree_mode=tree_mode,
child_idx_offset=child_idx_offset)
print('dump data, parents, depths and child indices for offset=' + str(offset) + ' ...')
current_seq_data.dump(out_path + '.data.batch' + str(offset))
current_seq_parents.dump(out_path + '.parent.batch' + str(offset))
current_seq_depths.dump(out_path + '.depth.batch' + str(offset))
current_idx_tuples.dump(out_path + '.children.batch' + str(offset))
child_idx_offset += len(current_seq_data)
#if careful:
# print('dump mappings to: ' + out_path + '.mapping ...')
# with open(out_path + '.mapping', "wb") as f:
# pickle.dump(mapping, f)
#else:
# current_seq_data = np.load(out_path + '.data.batch' + str(offset))
# child_idx_offset += len(current_seq_data)
seq_data = preprocessing.merge_numpy_batch_files(out_fn+'.data', parent_dir)
seq_parents = preprocessing.merge_numpy_batch_files(out_fn + '.parent', parent_dir)
seq_depths = preprocessing.merge_numpy_batch_files(out_fn + '.depth', parent_dir)
print('parsed data size: '+str(len(seq_data)))
return seq_data, seq_parents, seq_depths, mapping
if __name__ == '__main__':
sentence_processor = getattr(preprocessing, FLAGS.sentence_processor)
out_dir = os.path.abspath(os.path.join(FLAGS.corpus_data_output_dir, sentence_processor.func_name))
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
out_path = os.path.join(out_dir, FLAGS.corpus_data_output_fn)
if FLAGS.tree_mode is not None:
out_path | parse_articles | identifier_name |
sync.go | () error {
if w.holeSize > 0 {
err := w.writer.PunchHole(w.offset, w.holeSize)
if err == nil {
w.offset += w.holeSize
w.holeSize = 0
}
return err
}
if len(w.buf) == 0 {
return nil
}
n, err := w.writer.WriteAt(w.buf, w.offset)
if err != nil {
return err
}
w.buf = w.buf[:0]
w.offset += int64(n)
return nil
}
func (w *batchingWriter) prepareWrite() error {
if w.holeSize > 0 {
err := w.Flush()
if err != nil {
return err
}
}
if cap(w.buf) < w.maxSize {
buf := make([]byte, w.maxSize)
copy(buf, w.buf)
w.buf = buf[:len(w.buf)]
}
return nil
}
func (w *batchingWriter) Write(p []byte) (int, error) {
if err := w.prepareWrite(); err != nil {
return 0, err
}
written := 0
for len(p) > 0 {
if len(p) >= w.maxSize && len(w.buf) == 0 {
residue := len(p) % w.maxSize
n, err := w.writer.WriteAt(p[:len(p)-residue], w.offset)
written += n
w.offset += int64(n)
if err != nil {
return written, err
}
p = p[n:]
} else {
n := copy(w.buf[len(w.buf):w.maxSize], p)
w.buf = w.buf[:len(w.buf)+n]
if len(w.buf) == w.maxSize {
n1, err := w.writer.WriteAt(w.buf, w.offset)
w.offset += int64(n1)
n2 := n1 - (len(w.buf) - n)
w.buf = w.buf[:0]
if n2 < 0 {
n2 = 0
}
written += n2
if err != nil {
return written, err
}
} else {
written += n
}
p = p[n:]
}
}
return written, nil
}
func (w *batchingWriter) ReadFrom(src io.Reader) (int64, error) {
if err := w.prepareWrite(); err != nil {
return 0, err
}
var read int64
for {
n, err := src.Read(w.buf[len(w.buf):w.maxSize])
read += int64(n)
w.buf = w.buf[:len(w.buf)+n]
if err == io.EOF {
return read, nil
}
if err != nil {
return read, err
}
if len(w.buf) == w.maxSize {
err = w.Flush()
if err != nil {
return read, err
}
}
}
}
func (w *batchingWriter) WriteHole(size int64) error {
if w.holeSize == 0 {
err := w.Flush()
if err != nil {
return err
}
}
w.holeSize += size
return nil
}
func (w *batchingWriter) Seek(offset int64, whence int) (int64, error) {
var o int64
if w.holeSize > 0 {
o = w.offset + w.holeSize
} else {
o = w.offset + int64(len(w.buf))
}
switch whence {
case io.SeekStart:
// no-op
case io.SeekCurrent:
offset = o + offset
case io.SeekEnd:
var err error
offset, err = w.writer.Seek(offset, whence)
if err != nil {
return offset, err
}
}
if offset != o {
err := w.Flush()
w.offset = offset
if err != nil {
return offset, err
}
}
return offset, nil
}
type counting struct {
count int64
}
type CountingReader struct {
io.Reader
counting
}
type CountingWriteCloser struct {
io.WriteCloser
counting
}
func (p *hashPool) get() (h hash.Hash) {
l := len(*p)
if l > 0 {
l--
h = (*p)[l]
(*p)[l] = nil
*p = (*p)[:l]
h.Reset()
} else {
h, _ = blake2b.New512(nil)
}
return
}
func (p *hashPool) put(h hash.Hash) {
*p = append(*p, h)
}
func (c *counting) Count() int64 {
return c.count
}
func (r *CountingReader) Read(buf []byte) (n int, err error) {
n, err = r.Reader.Read(buf)
r.count += int64(n)
return
}
func (r *CountingWriteCloser) Write(buf []byte) (n int, err error) {
n, err = r.WriteCloser.Write(buf)
r.count += int64(n)
return
}
func (n *node) next() *node {
if n.parent != nil {
if n.idx < len(n.parent.children)-1 {
return n.parent.children[n.idx+1]
}
nn := n.parent.next()
if nn != nil {
return nn.children[0]
}
}
return nil
}
func (n *node) childReady(child *node, pool *hashPool, h hash.Hash) {
if n.hash == nil {
if h != nil {
h.Reset()
n.hash = h
} else {
n.hash = pool.get()
}
} else {
if h != nil {
pool.put(h)
}
}
n.hash.Write(child.sum)
if child.idx == len(n.children)-1 {
n.sum = n.hash.Sum(n.buf[:0])
if n.parent != nil {
n.parent.childReady(n, pool, n.hash)
}
n.hash = nil
}
}
func (b *base) buffer(size int64) []byte {
if int64(cap(b.buf)) < size {
b.buf = make([]byte, size+1)
}
return b.buf[:size]
}
func (t *tree) build(offset, length int64, order, level int) *node {
n := &node{}
level--
if level > 0 {
n.children = make([]*node, order)
b := offset
for i := 0; i < order; i++ {
l := offset + (length * int64(i+1) / int64(order)) - b
child := t.build(b, l, order, level)
child.parent = n
child.idx = i
n.children[i] = child
b += l
}
} else {
n.size = int(length)
}
return n
}
func (t *tree) first(n *node) *node {
if len(n.children) > 0 {
return t.first(n.children[0])
}
return n
}
func (t *tree) calc(verbose bool, progressListener ProgressListener) error {
var targetBlockSize int64 = DefTargetBlockSize
for t.size/targetBlockSize > 1048576 {
targetBlockSize <<= 1
}
blocks := t.size / targetBlockSize
levels := 8
order := 1
if blocks > 0 {
var d int64 = -1
for {
b := int64(math.Pow(float64(order+1), 7))
bs := t.size / b
if bs < targetBlockSize/2 {
break
}
nd := targetBlockSize - bs
if nd < 0 {
nd = -nd
}
// log.Printf("b: %d, d: %d\n", b, nd)
if d != -1 && nd > d {
break
}
d = nd
order++
}
if order < 2 {
order = 2
levels = int(math.Log2(float64(blocks))) + 1
}
} else {
levels = 1
order = 1
}
bs := int(float64(t.size) / math.Pow(float64(order), float64(levels-1)))
if verbose {
log.Printf("Levels: %d, order: %d, target block size: %d, block size: %d\n", levels, order, targetBlockSize, bs)
}
t.root = t.build(0, t.size, order, levels)
rr := int64(0)
var reader io.Reader
if t.useBuffer {
var bufSize int
for bufSize = DefTargetBlockSize; bufSize < bs; bufSize <<= 1 {
}
reader = bufio.NewReaderSize(t.reader, bufSize)
} else {
reader = t.reader
}
var pool hashPool = make([]hash.Hash, 0, levels)
workItems := make([]*workCtx, 2)
for i := range workItems {
workItems[i | Flush | identifier_name |
|
sync.go | }
written += n2
if err != nil {
return written, err
}
} else {
written += n
}
p = p[n:]
}
}
return written, nil
}
func (w *batchingWriter) ReadFrom(src io.Reader) (int64, error) {
if err := w.prepareWrite(); err != nil {
return 0, err
}
var read int64
for {
n, err := src.Read(w.buf[len(w.buf):w.maxSize])
read += int64(n)
w.buf = w.buf[:len(w.buf)+n]
if err == io.EOF {
return read, nil
}
if err != nil {
return read, err
}
if len(w.buf) == w.maxSize {
err = w.Flush()
if err != nil {
return read, err
}
}
}
}
func (w *batchingWriter) WriteHole(size int64) error {
if w.holeSize == 0 {
err := w.Flush()
if err != nil {
return err
}
}
w.holeSize += size
return nil
}
func (w *batchingWriter) Seek(offset int64, whence int) (int64, error) {
var o int64
if w.holeSize > 0 {
o = w.offset + w.holeSize
} else {
o = w.offset + int64(len(w.buf))
}
switch whence {
case io.SeekStart:
// no-op
case io.SeekCurrent:
offset = o + offset
case io.SeekEnd:
var err error
offset, err = w.writer.Seek(offset, whence)
if err != nil {
return offset, err
}
}
if offset != o {
err := w.Flush()
w.offset = offset
if err != nil {
return offset, err
}
}
return offset, nil
}
type counting struct {
count int64
}
type CountingReader struct {
io.Reader
counting
}
type CountingWriteCloser struct {
io.WriteCloser
counting
}
func (p *hashPool) get() (h hash.Hash) {
l := len(*p)
if l > 0 {
l--
h = (*p)[l]
(*p)[l] = nil
*p = (*p)[:l]
h.Reset()
} else {
h, _ = blake2b.New512(nil)
}
return
}
func (p *hashPool) put(h hash.Hash) {
*p = append(*p, h)
}
func (c *counting) Count() int64 {
return c.count
}
func (r *CountingReader) Read(buf []byte) (n int, err error) {
n, err = r.Reader.Read(buf)
r.count += int64(n)
return
}
func (r *CountingWriteCloser) Write(buf []byte) (n int, err error) {
n, err = r.WriteCloser.Write(buf)
r.count += int64(n)
return
}
func (n *node) next() *node {
if n.parent != nil {
if n.idx < len(n.parent.children)-1 {
return n.parent.children[n.idx+1]
}
nn := n.parent.next()
if nn != nil {
return nn.children[0]
}
}
return nil
}
func (n *node) childReady(child *node, pool *hashPool, h hash.Hash) {
if n.hash == nil {
if h != nil {
h.Reset()
n.hash = h
} else {
n.hash = pool.get()
}
} else {
if h != nil {
pool.put(h)
}
}
n.hash.Write(child.sum)
if child.idx == len(n.children)-1 {
n.sum = n.hash.Sum(n.buf[:0])
if n.parent != nil {
n.parent.childReady(n, pool, n.hash)
}
n.hash = nil
}
}
func (b *base) buffer(size int64) []byte {
if int64(cap(b.buf)) < size {
b.buf = make([]byte, size+1)
}
return b.buf[:size]
}
func (t *tree) build(offset, length int64, order, level int) *node {
n := &node{}
level--
if level > 0 {
n.children = make([]*node, order)
b := offset
for i := 0; i < order; i++ {
l := offset + (length * int64(i+1) / int64(order)) - b
child := t.build(b, l, order, level)
child.parent = n
child.idx = i
n.children[i] = child
b += l
}
} else {
n.size = int(length)
}
return n
}
func (t *tree) first(n *node) *node {
if len(n.children) > 0 {
return t.first(n.children[0])
}
return n
}
func (t *tree) calc(verbose bool, progressListener ProgressListener) error {
var targetBlockSize int64 = DefTargetBlockSize
for t.size/targetBlockSize > 1048576 {
targetBlockSize <<= 1
}
blocks := t.size / targetBlockSize
levels := 8
order := 1
if blocks > 0 {
var d int64 = -1
for {
b := int64(math.Pow(float64(order+1), 7))
bs := t.size / b
if bs < targetBlockSize/2 {
break
}
nd := targetBlockSize - bs
if nd < 0 {
nd = -nd
}
// log.Printf("b: %d, d: %d\n", b, nd)
if d != -1 && nd > d {
break
}
d = nd
order++
}
if order < 2 {
order = 2
levels = int(math.Log2(float64(blocks))) + 1
}
} else {
levels = 1
order = 1
}
bs := int(float64(t.size) / math.Pow(float64(order), float64(levels-1)))
if verbose {
log.Printf("Levels: %d, order: %d, target block size: %d, block size: %d\n", levels, order, targetBlockSize, bs)
}
t.root = t.build(0, t.size, order, levels)
rr := int64(0)
var reader io.Reader
if t.useBuffer {
var bufSize int
for bufSize = DefTargetBlockSize; bufSize < bs; bufSize <<= 1 {
}
reader = bufio.NewReaderSize(t.reader, bufSize)
} else {
reader = t.reader
}
var pool hashPool = make([]hash.Hash, 0, levels)
workItems := make([]*workCtx, 2)
for i := range workItems {
workItems[i] = &workCtx{
buf: make([]byte, bs+1),
avail: make(chan struct{}, 1),
hashReady: make(chan struct{}, 1),
}
workItems[i].hash, _ = blake2b.New512(nil)
workItems[i].avail <- struct{}{}
}
go func() {
idx := 0
for {
wi := workItems[idx]
<-wi.hashReady
if wi.n == nil {
break
}
if wi.n.parent != nil {
wi.n.parent.childReady(wi.n, &pool, nil)
}
wi.avail <- struct{}{}
idx++
if idx >= len(workItems) {
idx = 0
}
}
}()
workIdx := 0
if progressListener != nil {
progressListener.Start(t.size)
}
for n := t.first(t.root); n != nil; n = n.next() {
if n.size == 0 {
panic("Leaf node size is zero")
}
wi := workItems[workIdx]
<-wi.avail
b := wi.buf[:n.size]
r, err := io.ReadFull(reader, b)
if err != nil {
return fmt.Errorf("in calc at %d (expected %d, read %d): %w", rr, len(b), r, err)
}
rr += int64(r)
if progressListener != nil {
progressListener.Update(rr)
}
wi.n = n
go func() {
wi.hash.Write(b)
wi.n.sum = wi.hash.Sum(wi.n.buf[:0])
wi.hash.Reset()
wi.hashReady <- struct{}{}
}()
workIdx++
if workIdx >= len(workItems) {
workIdx = 0
}
}
// wait until fully processed
for i := range workItems {
<-workItems[i].avail
}
// finish the goroutine
workItems | n2 = 0 | random_line_split |
|
sync.go | , error) | err := w.Flush()
w.offset = offset
if err != nil {
return offset, err
}
}
return offset, nil
}
type counting struct {
count int64
}
type CountingReader struct {
io.Reader
counting
}
type CountingWriteCloser struct {
io.WriteCloser
counting
}
func (p *hashPool) get() (h hash.Hash) {
l := len(*p)
if l > 0 {
l--
h = (*p)[l]
(*p)[l] = nil
*p = (*p)[:l]
h.Reset()
} else {
h, _ = blake2b.New512(nil)
}
return
}
func (p *hashPool) put(h hash.Hash) {
*p = append(*p, h)
}
func (c *counting) Count() int64 {
return c.count
}
func (r *CountingReader) Read(buf []byte) (n int, err error) {
n, err = r.Reader.Read(buf)
r.count += int64(n)
return
}
func (r *CountingWriteCloser) Write(buf []byte) (n int, err error) {
n, err = r.WriteCloser.Write(buf)
r.count += int64(n)
return
}
func (n *node) next() *node {
if n.parent != nil {
if n.idx < len(n.parent.children)-1 {
return n.parent.children[n.idx+1]
}
nn := n.parent.next()
if nn != nil {
return nn.children[0]
}
}
return nil
}
func (n *node) childReady(child *node, pool *hashPool, h hash.Hash) {
if n.hash == nil {
if h != nil {
h.Reset()
n.hash = h
} else {
n.hash = pool.get()
}
} else {
if h != nil {
pool.put(h)
}
}
n.hash.Write(child.sum)
if child.idx == len(n.children)-1 {
n.sum = n.hash.Sum(n.buf[:0])
if n.parent != nil {
n.parent.childReady(n, pool, n.hash)
}
n.hash = nil
}
}
func (b *base) buffer(size int64) []byte {
if int64(cap(b.buf)) < size {
b.buf = make([]byte, size+1)
}
return b.buf[:size]
}
func (t *tree) build(offset, length int64, order, level int) *node {
n := &node{}
level--
if level > 0 {
n.children = make([]*node, order)
b := offset
for i := 0; i < order; i++ {
l := offset + (length * int64(i+1) / int64(order)) - b
child := t.build(b, l, order, level)
child.parent = n
child.idx = i
n.children[i] = child
b += l
}
} else {
n.size = int(length)
}
return n
}
func (t *tree) first(n *node) *node {
if len(n.children) > 0 {
return t.first(n.children[0])
}
return n
}
func (t *tree) calc(verbose bool, progressListener ProgressListener) error {
var targetBlockSize int64 = DefTargetBlockSize
for t.size/targetBlockSize > 1048576 {
targetBlockSize <<= 1
}
blocks := t.size / targetBlockSize
levels := 8
order := 1
if blocks > 0 {
var d int64 = -1
for {
b := int64(math.Pow(float64(order+1), 7))
bs := t.size / b
if bs < targetBlockSize/2 {
break
}
nd := targetBlockSize - bs
if nd < 0 {
nd = -nd
}
// log.Printf("b: %d, d: %d\n", b, nd)
if d != -1 && nd > d {
break
}
d = nd
order++
}
if order < 2 {
order = 2
levels = int(math.Log2(float64(blocks))) + 1
}
} else {
levels = 1
order = 1
}
bs := int(float64(t.size) / math.Pow(float64(order), float64(levels-1)))
if verbose {
log.Printf("Levels: %d, order: %d, target block size: %d, block size: %d\n", levels, order, targetBlockSize, bs)
}
t.root = t.build(0, t.size, order, levels)
rr := int64(0)
var reader io.Reader
if t.useBuffer {
var bufSize int
for bufSize = DefTargetBlockSize; bufSize < bs; bufSize <<= 1 {
}
reader = bufio.NewReaderSize(t.reader, bufSize)
} else {
reader = t.reader
}
var pool hashPool = make([]hash.Hash, 0, levels)
workItems := make([]*workCtx, 2)
for i := range workItems {
workItems[i] = &workCtx{
buf: make([]byte, bs+1),
avail: make(chan struct{}, 1),
hashReady: make(chan struct{}, 1),
}
workItems[i].hash, _ = blake2b.New512(nil)
workItems[i].avail <- struct{}{}
}
go func() {
idx := 0
for {
wi := workItems[idx]
<-wi.hashReady
if wi.n == nil {
break
}
if wi.n.parent != nil {
wi.n.parent.childReady(wi.n, &pool, nil)
}
wi.avail <- struct{}{}
idx++
if idx >= len(workItems) {
idx = 0
}
}
}()
workIdx := 0
if progressListener != nil {
progressListener.Start(t.size)
}
for n := t.first(t.root); n != nil; n = n.next() {
if n.size == 0 {
panic("Leaf node size is zero")
}
wi := workItems[workIdx]
<-wi.avail
b := wi.buf[:n.size]
r, err := io.ReadFull(reader, b)
if err != nil {
return fmt.Errorf("in calc at %d (expected %d, read %d): %w", rr, len(b), r, err)
}
rr += int64(r)
if progressListener != nil {
progressListener.Update(rr)
}
wi.n = n
go func() {
wi.hash.Write(b)
wi.n.sum = wi.hash.Sum(wi.n.buf[:0])
wi.hash.Reset()
wi.hashReady <- struct{}{}
}()
workIdx++
if workIdx >= len(workItems) {
workIdx = 0
}
}
// wait until fully processed
for i := range workItems {
<-workItems[i].avail
}
// finish the goroutine
workItems[workIdx].n = nil
workItems[workIdx].hashReady <- struct{}{}
if rr < t.size {
return fmt.Errorf("read less data (%d) than expected (%d)", rr, t.size)
}
return nil
}
func readHeader(reader io.Reader) (size int64, err error) {
buf := make([]byte, len(hdrMagic)+8)
_, err = io.ReadFull(reader, buf)
if err != nil {
return
}
if string(buf[:len(hdrMagic)]) != hdrMagic {
err = ErrInvalidFormat
return
}
size = int64(binary.LittleEndian.Uint64(buf[len(hdrMagic):]))
return
}
func writeHeader(writer io.Writer, size int64) (err error) {
buf := make([]byte, len(hdrMagic)+8)
copy(buf, hdrMagic)
binary.LittleEndian.PutUint64(buf[len(hdrMagic):], uint64(size))
_, err = writer.Write(buf)
return
}
func Source(reader io.ReadSeeker, size int64, cmdReader io.Reader, cmdWriter io.Writer, useBuffer bool, verbose bool, calcPl, syncPl ProgressListener) (err error) {
err = writeHeader(cmdWriter, size)
if err != nil {
return
}
var remoteSize int64
remoteSize, err = readHeader(cmdReader)
if err != nil | {
var o int64
if w.holeSize > 0 {
o = w.offset + w.holeSize
} else {
o = w.offset + int64(len(w.buf))
}
switch whence {
case io.SeekStart:
// no-op
case io.SeekCurrent:
offset = o + offset
case io.SeekEnd:
var err error
offset, err = w.writer.Seek(offset, whence)
if err != nil {
return offset, err
}
}
if offset != o { | identifier_body |
sync.go |
}
}
}
func (w *batchingWriter) WriteHole(size int64) error {
if w.holeSize == 0 {
err := w.Flush()
if err != nil {
return err
}
}
w.holeSize += size
return nil
}
func (w *batchingWriter) Seek(offset int64, whence int) (int64, error) {
var o int64
if w.holeSize > 0 {
o = w.offset + w.holeSize
} else {
o = w.offset + int64(len(w.buf))
}
switch whence {
case io.SeekStart:
// no-op
case io.SeekCurrent:
offset = o + offset
case io.SeekEnd:
var err error
offset, err = w.writer.Seek(offset, whence)
if err != nil {
return offset, err
}
}
if offset != o {
err := w.Flush()
w.offset = offset
if err != nil {
return offset, err
}
}
return offset, nil
}
type counting struct {
count int64
}
type CountingReader struct {
io.Reader
counting
}
type CountingWriteCloser struct {
io.WriteCloser
counting
}
func (p *hashPool) get() (h hash.Hash) {
l := len(*p)
if l > 0 {
l--
h = (*p)[l]
(*p)[l] = nil
*p = (*p)[:l]
h.Reset()
} else {
h, _ = blake2b.New512(nil)
}
return
}
func (p *hashPool) put(h hash.Hash) {
*p = append(*p, h)
}
func (c *counting) Count() int64 {
return c.count
}
func (r *CountingReader) Read(buf []byte) (n int, err error) {
n, err = r.Reader.Read(buf)
r.count += int64(n)
return
}
func (r *CountingWriteCloser) Write(buf []byte) (n int, err error) {
n, err = r.WriteCloser.Write(buf)
r.count += int64(n)
return
}
func (n *node) next() *node {
if n.parent != nil {
if n.idx < len(n.parent.children)-1 {
return n.parent.children[n.idx+1]
}
nn := n.parent.next()
if nn != nil {
return nn.children[0]
}
}
return nil
}
func (n *node) childReady(child *node, pool *hashPool, h hash.Hash) {
if n.hash == nil {
if h != nil {
h.Reset()
n.hash = h
} else {
n.hash = pool.get()
}
} else {
if h != nil {
pool.put(h)
}
}
n.hash.Write(child.sum)
if child.idx == len(n.children)-1 {
n.sum = n.hash.Sum(n.buf[:0])
if n.parent != nil {
n.parent.childReady(n, pool, n.hash)
}
n.hash = nil
}
}
func (b *base) buffer(size int64) []byte {
if int64(cap(b.buf)) < size {
b.buf = make([]byte, size+1)
}
return b.buf[:size]
}
func (t *tree) build(offset, length int64, order, level int) *node {
n := &node{}
level--
if level > 0 {
n.children = make([]*node, order)
b := offset
for i := 0; i < order; i++ {
l := offset + (length * int64(i+1) / int64(order)) - b
child := t.build(b, l, order, level)
child.parent = n
child.idx = i
n.children[i] = child
b += l
}
} else {
n.size = int(length)
}
return n
}
func (t *tree) first(n *node) *node {
if len(n.children) > 0 {
return t.first(n.children[0])
}
return n
}
func (t *tree) calc(verbose bool, progressListener ProgressListener) error {
var targetBlockSize int64 = DefTargetBlockSize
for t.size/targetBlockSize > 1048576 {
targetBlockSize <<= 1
}
blocks := t.size / targetBlockSize
levels := 8
order := 1
if blocks > 0 {
var d int64 = -1
for {
b := int64(math.Pow(float64(order+1), 7))
bs := t.size / b
if bs < targetBlockSize/2 {
break
}
nd := targetBlockSize - bs
if nd < 0 {
nd = -nd
}
// log.Printf("b: %d, d: %d\n", b, nd)
if d != -1 && nd > d {
break
}
d = nd
order++
}
if order < 2 {
order = 2
levels = int(math.Log2(float64(blocks))) + 1
}
} else {
levels = 1
order = 1
}
bs := int(float64(t.size) / math.Pow(float64(order), float64(levels-1)))
if verbose {
log.Printf("Levels: %d, order: %d, target block size: %d, block size: %d\n", levels, order, targetBlockSize, bs)
}
t.root = t.build(0, t.size, order, levels)
rr := int64(0)
var reader io.Reader
if t.useBuffer {
var bufSize int
for bufSize = DefTargetBlockSize; bufSize < bs; bufSize <<= 1 {
}
reader = bufio.NewReaderSize(t.reader, bufSize)
} else {
reader = t.reader
}
var pool hashPool = make([]hash.Hash, 0, levels)
workItems := make([]*workCtx, 2)
for i := range workItems {
workItems[i] = &workCtx{
buf: make([]byte, bs+1),
avail: make(chan struct{}, 1),
hashReady: make(chan struct{}, 1),
}
workItems[i].hash, _ = blake2b.New512(nil)
workItems[i].avail <- struct{}{}
}
go func() {
idx := 0
for {
wi := workItems[idx]
<-wi.hashReady
if wi.n == nil {
break
}
if wi.n.parent != nil {
wi.n.parent.childReady(wi.n, &pool, nil)
}
wi.avail <- struct{}{}
idx++
if idx >= len(workItems) {
idx = 0
}
}
}()
workIdx := 0
if progressListener != nil {
progressListener.Start(t.size)
}
for n := t.first(t.root); n != nil; n = n.next() {
if n.size == 0 {
panic("Leaf node size is zero")
}
wi := workItems[workIdx]
<-wi.avail
b := wi.buf[:n.size]
r, err := io.ReadFull(reader, b)
if err != nil {
return fmt.Errorf("in calc at %d (expected %d, read %d): %w", rr, len(b), r, err)
}
rr += int64(r)
if progressListener != nil {
progressListener.Update(rr)
}
wi.n = n
go func() {
wi.hash.Write(b)
wi.n.sum = wi.hash.Sum(wi.n.buf[:0])
wi.hash.Reset()
wi.hashReady <- struct{}{}
}()
workIdx++
if workIdx >= len(workItems) {
workIdx = 0
}
}
// wait until fully processed
for i := range workItems {
<-workItems[i].avail
}
// finish the goroutine
workItems[workIdx].n = nil
workItems[workIdx].hashReady <- struct{}{}
if rr < t.size {
return fmt.Errorf("read less data (%d) than expected (%d)", rr, t.size)
}
return nil
}
func readHeader(reader io.Reader) (size int64, err error) {
buf := make([]byte, len(hdrMagic)+8)
_, err = io.ReadFull(reader, buf)
if err != nil {
return
}
if string(buf[:len(hdrMagic)]) != hdrMagic {
err = ErrInvalidFormat
return
}
size = int64(binary.LittleEndian.Uint64(buf[len(hdrMagic):]))
return
}
func writeHeader(writer io.Writer, size int64) (err error) {
buf := make([]byte, len(hdrMagic)+8)
copy(buf, hdrMagic)
binary.LittleEndian.PutUint64(buf[len(hdrMagic):], uint64(size | {
return read, err
} | conditional_block |
|
event.py | _* methods depend on this attribute.
Can be: 'QUIT', 'KEYDOWN', 'KEYUP', 'MOUSEDOWN', 'MOUSEUP', or 'MOUSEMOTION.'
"""
def __repr__(self):
"""List an events public attributes when printed.
"""
attrdict = {}
for varname in dir(self):
if '_' == varname[0]:
continue
attrdict[varname] = self.__getattribute__(varname)
return '%s Event %s' % (self.__class__.__name__, repr(attrdict))
class Quit(Event):
"""Fired when the window is closed by the user.
"""
__slots__ = ()
type = 'QUIT'
class KeyEvent(Event):
def __init__(self, key='', char='', text='', shift=False,
left_alt=False, right_alt=False,
left_control=False, right_control=False,
left_meta=False, right_meta=False):
# Convert keycodes into string, but use string if passed
self.key = key if isinstance(key, str) else _keyNames[key]
"""Human readable names of the key pressed.
Non special characters will show up as 'CHAR'.
Can be one of
'NONE', 'ESCAPE', 'BACKSPACE', 'TAB', 'ENTER', 'SHIFT', 'CONTROL',
'ALT', 'PAUSE', 'CAPSLOCK', 'PAGEUP', 'PAGEDOWN', 'END', 'HOME', 'UP',
'LEFT', 'RIGHT', 'DOWN', 'PRINTSCREEN', 'INSERT', 'DELETE', 'LWIN',
'RWIN', 'APPS', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'KP0', 'KP1', 'KP2', 'KP3', 'KP4', 'KP5', 'KP6', 'KP7', 'KP8', 'KP9',
'KPADD', 'KPSUB', 'KPDIV', 'KPMUL', 'KPDEC', 'KPENTER', 'F1', 'F2',
'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9', 'F10', 'F11', 'F12',
'NUMLOCK', 'SCROLLLOCK', 'SPACE', 'CHAR'
For the actual character instead of 'CHAR' use L{keychar}.
@type: string"""
self.char = char.replace('\x00', '') # change null to empty string
"""A single character string of the letter or symbol pressed.
Special characters like delete and return are not cross-platform.
L{key} or L{keychar} should be used instead for special keys.
Characters are also case sensitive.
@type: string"""
# get the best out of self.key and self.char
self.keychar = self.char if self.key == 'CHAR' else self.key
"""Similar to L{key} but returns a case sensitive letter or symbol
instead of 'CHAR'.
This variable makes available the widest variety of symbols and should
be used for key-mappings or anywhere where a narrower sample of keys
isn't needed.
"""
self.text = text
self.left_alt = self.leftAlt = bool(left_alt)
"""@type: boolean"""
self.right_alt = self.rightAlt = bool(right_alt)
"""@type: boolean"""
self.left_control = self.leftCtrl = bool(left_control)
"""@type: boolean"""
self.right_control = self.rightCtrl = bool(right_control)
"""@type: boolean"""
self.shift = bool(shift)
"""True if shift was held down during this event.
@type: boolean"""
self.alt = self.left_alt or self.right_alt
"""True if alt was held down during this event.
@type: boolean"""
self.control = self.left_control or self.right_control
"""True if control was held down during this event.
@type: boolean"""
self.left_meta = bool(left_meta)
self.right_meta = bool(right_meta)
self.meta = self.left_meta or self.right_meta
def __repr__(self):
parameters = []
for attr in ('key', 'char', 'text', 'shift',
'left_alt', 'right_alt',
'left_control', 'right_control',
'left_meta', 'right_meta'):
value = getattr(self, attr)
if value:
|
return '%s(%s)' % (self.__class__.__name__, ', '.join(parameters))
class KeyDown(KeyEvent):
"""Fired when the user presses a key on the keyboard or a key repeats.
"""
type = 'KEYDOWN'
class KeyUp(KeyEvent):
"""Fired when the user releases a key on the keyboard.
"""
type = 'KEYUP'
_mouseNames = {1: 'LEFT', 2: 'MIDDLE', 3: 'RIGHT', 4: 'SCROLLUP', 5: 'SCROLLDOWN'}
class MouseButtonEvent(Event):
def __init__(self, button, pos, cell):
self.button = _mouseNames[button]
"""Can be one of
'LEFT', 'MIDDLE', 'RIGHT', 'SCROLLUP', 'SCROLLDOWN'
@type: string"""
self.pos = pos
"""(x, y) position of the mouse on the screen
@type: (int, int)"""
self.cell = cell
"""(x, y) position of the mouse snapped to a cell on the root console
@type: (int, int)"""
class MouseDown(MouseButtonEvent):
"""Fired when a mouse button is pressed."""
__slots__ = ()
type = 'MOUSEDOWN'
class MouseUp(MouseButtonEvent):
"""Fired when a mouse button is released."""
__slots__ = ()
type = 'MOUSEUP'
class MouseMotion(Event):
"""Fired when the mouse is moved."""
type = 'MOUSEMOTION'
def __init__(self, pos, cell, motion, cellmotion):
self.pos = pos
"""(x, y) position of the mouse on the screen.
type: (int, int)"""
self.cell = cell
"""(x, y) position of the mouse snapped to a cell on the root console.
type: (int, int)"""
self.motion = motion
"""(x, y) motion of the mouse on the screen.
type: (int, int)"""
self.cellmotion = cellmotion
"""(x, y) mostion of the mouse moving over cells on the root console.
type: (int, int)"""
class App(object):
"""
Application framework.
- ev_*: Events are passed to methods based on their L{Event.type} attribute.
If an event type is 'KEYDOWN' the ev_KEYDOWN method will be called
with the event instance as a parameter.
- key_*: When a key is pressed another method will be called based on the
L{KeyEvent.key} attribute. For example the 'ENTER' key will call key_ENTER
with the associated L{KeyDown} event as its parameter.
- L{update}: This method is called every loop. It is passed a single
parameter detailing the time in seconds since the last update
(often known as deltaTime.)
You may want to call drawing routines in this method followed by
L{tdl.flush}.
"""
__slots__ = ('__running', '__prevTime')
def ev_QUIT(self, event):
"""Unless overridden this method raises a SystemExit exception closing
the program."""
raise SystemExit()
def ev_KEYDOWN(self, event):
"""Override this method to handle a L{KeyDown} event."""
def ev_KEYUP(self, event):
"""Override this method to handle a L{KeyUp} event."""
def ev_MOUSEDOWN(self, event):
"""Override this method to handle a L{MouseDown} event."""
def ev_MOUSEUP(self, event):
"""Override this method to handle a L{MouseUp} event."""
def ev_MOUSEMOTION(self, event):
"""Override this method to handle a L{MouseMotion} event."""
def update(self, deltaTime):
"""Override this method to handle per frame logic and drawing.
@type deltaTime: float
@param deltaTime: This parameter tells the amount of time passed since
the last call measured in seconds as a floating point
number.
You can use this variable to make your program
frame rate independent.
Use this parameter to adjust the speed of motion,
timers, and other game logic.
"""
pass
def suspend(self):
"""When called the App will begin to return control to where
L{App.run} was called.
Some further events are processed and the L{App.update} method will be
called one last time before exiting
(unless suspended during a call to L{App.update}.)
"""
self.__running = False
def run(self):
"""Delegate control over to this App instance. This function will
process all events and send them to the special methods ev_* and key_*.
A call to L{App.suspend} will return the control flow back to where
this function is called. And then the App can be run again.
But | parameters.append('%s=%r' % (attr, value)) | conditional_block |
event.py |
'QUIT', 'KEYDOWN', 'KEYUP', 'MOUSEDOWN', 'MOUSEUP', or 'MOUSEMOTION.'
- L{MouseButtonEvent.button} (found in L{MouseDown} and L{MouseUp} events)
'LEFT', 'MIDDLE', 'RIGHT', 'SCROLLUP', 'SCROLLDOWN'
- L{KeyEvent.key} (found in L{KeyDown} and L{KeyUp} events)
'NONE', 'ESCAPE', 'BACKSPACE', 'TAB', 'ENTER', 'SHIFT', 'CONTROL',
'ALT', 'PAUSE', 'CAPSLOCK', 'PAGEUP', 'PAGEDOWN', 'END', 'HOME', 'UP',
'LEFT', 'RIGHT', 'DOWN', 'PRINTSCREEN', 'INSERT', 'DELETE', 'LWIN',
'RWIN', 'APPS', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'KP0', 'KP1', 'KP2', 'KP3', 'KP4', 'KP5', 'KP6', 'KP7', 'KP8', 'KP9',
'KPADD', 'KPSUB', 'KPDIV', 'KPMUL', 'KPDEC', 'KPENTER', 'F1', 'F2',
'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9', 'F10', 'F11', 'F12',
'NUMLOCK', 'SCROLLLOCK', 'SPACE', 'CHAR'
"""
import time as _time
from tcod import ffi as _ffi
from tcod import lib as _lib
import tdl as _tdl
from . import style as _style
_eventQueue = []
_pushedEvents = []
_mousel = 0
_mousem = 0
_mouser = 0
# this interprets the constants from libtcod and makes a key -> keyname dictionary
def _parseKeyNames(lib):
"""
returns a dictionary mapping of human readable key names to their keycodes
this parses constants with the names of K_* and makes code=name pairs
this is for KeyEvent.key variable and that enables things like:
if (event.key == 'PAGEUP'):
"""
_keyNames = {}
for attr in dir(lib): # from the modules variables
if attr[:6] == 'TCODK_': # get the K_* constants
_keyNames[getattr(lib, attr)] = attr[6:] # and make CODE=NAME pairs
return _keyNames
_keyNames = _parseKeyNames(_lib)
class Event(object):
"""Base Event class.
You can easily subclass this to make your own events. Be sure to set
the class attribute L{Event.type} for it to be passed to a custom L{App}
ev_* method."""
type = None
"""String constant representing the type of event.
The L{App} ev_* methods depend on this attribute.
Can be: 'QUIT', 'KEYDOWN', 'KEYUP', 'MOUSEDOWN', 'MOUSEUP', or 'MOUSEMOTION.'
"""
def __repr__(self):
"""List an events public attributes when printed.
"""
attrdict = {}
for varname in dir(self):
if '_' == varname[0]:
continue
attrdict[varname] = self.__getattribute__(varname)
return '%s Event %s' % (self.__class__.__name__, repr(attrdict))
class Quit(Event):
"""Fired when the window is closed by the user.
"""
__slots__ = ()
type = 'QUIT'
class KeyEvent(Event):
def __init__(self, key='', char='', text='', shift=False,
left_alt=False, right_alt=False,
left_control=False, right_control=False,
left_meta=False, right_meta=False):
# Convert keycodes into string, but use string if passed
self.key = key if isinstance(key, str) else _keyNames[key]
"""Human readable names of the key pressed.
Non special characters will show up as 'CHAR'.
Can be one of
'NONE', 'ESCAPE', 'BACKSPACE', 'TAB', 'ENTER', 'SHIFT', 'CONTROL',
'ALT', 'PAUSE', 'CAPSLOCK', 'PAGEUP', 'PAGEDOWN', 'END', 'HOME', 'UP',
'LEFT', 'RIGHT', 'DOWN', 'PRINTSCREEN', 'INSERT', 'DELETE', 'LWIN',
'RWIN', 'APPS', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'KP0', 'KP1', 'KP2', 'KP3', 'KP4', 'KP5', 'KP6', 'KP7', 'KP8', 'KP9',
'KPADD', 'KPSUB', 'KPDIV', 'KPMUL', 'KPDEC', 'KPENTER', 'F1', 'F2',
'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9', 'F10', 'F11', 'F12',
'NUMLOCK', 'SCROLLLOCK', 'SPACE', 'CHAR'
For the actual character instead of 'CHAR' use L{keychar}.
@type: string"""
self.char = char.replace('\x00', '') # change null to empty string
"""A single character string of the letter or symbol pressed.
Special characters like delete and return are not cross-platform.
L{key} or L{keychar} should be used instead for special keys.
Characters are also case sensitive.
@type: string"""
# get the best out of self.key and self.char
self.keychar = self.char if self.key == 'CHAR' else self.key
"""Similar to L{key} but returns a case sensitive letter or symbol
instead of 'CHAR'.
This variable makes available the widest variety of symbols and should
be used for key-mappings or anywhere where a narrower sample of keys
isn't needed.
"""
self.text = text
self.left_alt = self.leftAlt = bool(left_alt)
"""@type: boolean"""
self.right_alt = self.rightAlt = bool(right_alt)
"""@type: boolean"""
self.left_control = self.leftCtrl = bool(left_control)
"""@type: boolean"""
self.right_control = self.rightCtrl = bool(right_control)
"""@type: boolean"""
self.shift = bool(shift)
"""True if shift was held down during this event.
@type: boolean"""
self.alt = self.left_alt or self.right_alt
"""True if alt was held down during this event.
@type: boolean"""
self.control = self.left_control or self.right_control
"""True if control was held down during this event.
@type: boolean"""
self.left_meta = bool(left_meta)
self.right_meta = bool(right_meta)
self.meta = self.left_meta or self.right_meta
def __repr__(self):
parameters = []
for attr in ('key', 'char', 'text', 'shift',
'left_alt', 'right_alt',
'left_control', 'right_control',
'left_meta', 'right_meta'):
value = getattr(self, attr)
if value:
parameters.append('%s=%r' % (attr, value))
return '%s(%s)' % (self.__class__.__name__, ', '.join(parameters))
class KeyDown(KeyEvent):
"""Fired when the user presses a key on the keyboard or a key repeats.
"""
type = 'KEYDOWN'
class KeyUp(KeyEvent):
"""Fired when the user releases a key on the keyboard.
"""
type = 'KEYUP'
_mouseNames = {1: 'LEFT', 2: 'MIDDLE', 3: 'RIGHT', 4: 'SCROLLUP', 5: 'SCROLLDOWN'}
class MouseButtonEvent(Event):
def __init__(self, button, pos, cell):
self.button = _mouseNames[button]
"""Can be one of
'LEFT', 'MIDDLE', 'RIGHT', 'SCROLLUP', 'SCROLLDOWN'
@type: string"""
self.pos = pos
"""(x, y) position of the mouse on the screen
@type: (int, int)"""
self.cell = cell
"""(x, y) position of the mouse snapped to a cell on the root console
@type: (int, int)"""
class MouseDown(MouseButtonEvent):
"""Fired when a mouse button is pressed."""
__slots__ = ()
type = 'MOUSEDOWN'
class MouseUp(MouseButtonEvent):
"""Fired when a mouse button is released."""
__slots__ = ()
type = 'MOUSEUP'
class MouseMotion(Event):
"""Fired when the mouse is moved."""
type = 'MOUSEMOTION'
def __init__(self, pos, cell, motion, cellmotion):
self.pos = pos
"""(x, y) position of the mouse on the screen.
type: (int, int)"""
self.cell = cell
"""(x, y) position of the mouse snapped to a cell on the root console.
type: (int | random_line_split |
||
event.py | _* methods depend on this attribute.
Can be: 'QUIT', 'KEYDOWN', 'KEYUP', 'MOUSEDOWN', 'MOUSEUP', or 'MOUSEMOTION.'
"""
def __repr__(self):
"""List an events public attributes when printed.
"""
attrdict = {}
for varname in dir(self):
if '_' == varname[0]:
continue
attrdict[varname] = self.__getattribute__(varname)
return '%s Event %s' % (self.__class__.__name__, repr(attrdict))
class Quit(Event):
"""Fired when the window is closed by the user.
"""
__slots__ = ()
type = 'QUIT'
class KeyEvent(Event):
def __init__(self, key='', char='', text='', shift=False,
left_alt=False, right_alt=False,
left_control=False, right_control=False,
left_meta=False, right_meta=False):
# Convert keycodes into string, but use string if passed
self.key = key if isinstance(key, str) else _keyNames[key]
"""Human readable names of the key pressed.
Non special characters will show up as 'CHAR'.
Can be one of
'NONE', 'ESCAPE', 'BACKSPACE', 'TAB', 'ENTER', 'SHIFT', 'CONTROL',
'ALT', 'PAUSE', 'CAPSLOCK', 'PAGEUP', 'PAGEDOWN', 'END', 'HOME', 'UP',
'LEFT', 'RIGHT', 'DOWN', 'PRINTSCREEN', 'INSERT', 'DELETE', 'LWIN',
'RWIN', 'APPS', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'KP0', 'KP1', 'KP2', 'KP3', 'KP4', 'KP5', 'KP6', 'KP7', 'KP8', 'KP9',
'KPADD', 'KPSUB', 'KPDIV', 'KPMUL', 'KPDEC', 'KPENTER', 'F1', 'F2',
'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9', 'F10', 'F11', 'F12',
'NUMLOCK', 'SCROLLLOCK', 'SPACE', 'CHAR'
For the actual character instead of 'CHAR' use L{keychar}.
@type: string"""
self.char = char.replace('\x00', '') # change null to empty string
"""A single character string of the letter or symbol pressed.
Special characters like delete and return are not cross-platform.
L{key} or L{keychar} should be used instead for special keys.
Characters are also case sensitive.
@type: string"""
# get the best out of self.key and self.char
self.keychar = self.char if self.key == 'CHAR' else self.key
"""Similar to L{key} but returns a case sensitive letter or symbol
instead of 'CHAR'.
This variable makes available the widest variety of symbols and should
be used for key-mappings or anywhere where a narrower sample of keys
isn't needed.
"""
self.text = text
self.left_alt = self.leftAlt = bool(left_alt)
"""@type: boolean"""
self.right_alt = self.rightAlt = bool(right_alt)
"""@type: boolean"""
self.left_control = self.leftCtrl = bool(left_control)
"""@type: boolean"""
self.right_control = self.rightCtrl = bool(right_control)
"""@type: boolean"""
self.shift = bool(shift)
"""True if shift was held down during this event.
@type: boolean"""
self.alt = self.left_alt or self.right_alt
"""True if alt was held down during this event.
@type: boolean"""
self.control = self.left_control or self.right_control
"""True if control was held down during this event.
@type: boolean"""
self.left_meta = bool(left_meta)
self.right_meta = bool(right_meta)
self.meta = self.left_meta or self.right_meta
def __repr__(self):
parameters = []
for attr in ('key', 'char', 'text', 'shift',
'left_alt', 'right_alt',
'left_control', 'right_control',
'left_meta', 'right_meta'):
value = getattr(self, attr)
if value:
parameters.append('%s=%r' % (attr, value))
return '%s(%s)' % (self.__class__.__name__, ', '.join(parameters))
class KeyDown(KeyEvent):
"""Fired when the user presses a key on the keyboard or a key repeats.
"""
type = 'KEYDOWN'
class KeyUp(KeyEvent):
"""Fired when the user releases a key on the keyboard.
"""
type = 'KEYUP'
_mouseNames = {1: 'LEFT', 2: 'MIDDLE', 3: 'RIGHT', 4: 'SCROLLUP', 5: 'SCROLLDOWN'}
class MouseButtonEvent(Event):
|
class MouseDown(MouseButtonEvent):
"""Fired when a mouse button is pressed."""
__slots__ = ()
type = 'MOUSEDOWN'
class MouseUp(MouseButtonEvent):
"""Fired when a mouse button is released."""
__slots__ = ()
type = 'MOUSEUP'
class MouseMotion(Event):
"""Fired when the mouse is moved."""
type = 'MOUSEMOTION'
def __init__(self, pos, cell, motion, cellmotion):
self.pos = pos
"""(x, y) position of the mouse on the screen.
type: (int, int)"""
self.cell = cell
"""(x, y) position of the mouse snapped to a cell on the root console.
type: (int, int)"""
self.motion = motion
"""(x, y) motion of the mouse on the screen.
type: (int, int)"""
self.cellmotion = cellmotion
"""(x, y) mostion of the mouse moving over cells on the root console.
type: (int, int)"""
class App(object):
"""
Application framework.
- ev_*: Events are passed to methods based on their L{Event.type} attribute.
If an event type is 'KEYDOWN' the ev_KEYDOWN method will be called
with the event instance as a parameter.
- key_*: When a key is pressed another method will be called based on the
L{KeyEvent.key} attribute. For example the 'ENTER' key will call key_ENTER
with the associated L{KeyDown} event as its parameter.
- L{update}: This method is called every loop. It is passed a single
parameter detailing the time in seconds since the last update
(often known as deltaTime.)
You may want to call drawing routines in this method followed by
L{tdl.flush}.
"""
__slots__ = ('__running', '__prevTime')
def ev_QUIT(self, event):
"""Unless overridden this method raises a SystemExit exception closing
the program."""
raise SystemExit()
def ev_KEYDOWN(self, event):
"""Override this method to handle a L{KeyDown} event."""
def ev_KEYUP(self, event):
"""Override this method to handle a L{KeyUp} event."""
def ev_MOUSEDOWN(self, event):
"""Override this method to handle a L{MouseDown} event."""
def ev_MOUSEUP(self, event):
"""Override this method to handle a L{MouseUp} event."""
def ev_MOUSEMOTION(self, event):
"""Override this method to handle a L{MouseMotion} event."""
def update(self, deltaTime):
"""Override this method to handle per frame logic and drawing.
@type deltaTime: float
@param deltaTime: This parameter tells the amount of time passed since
the last call measured in seconds as a floating point
number.
You can use this variable to make your program
frame rate independent.
Use this parameter to adjust the speed of motion,
timers, and other game logic.
"""
pass
def suspend(self):
"""When called the App will begin to return control to where
L{App.run} was called.
Some further events are processed and the L{App.update} method will be
called one last time before exiting
(unless suspended during a call to L{App.update}.)
"""
self.__running = False
def run(self):
"""Delegate control over to this App instance. This function will
process all events and send them to the special methods ev_* and key_*.
A call to L{App.suspend} will return the control flow back to where
this function is called. And then the App can be run again.
But | def __init__(self, button, pos, cell):
self.button = _mouseNames[button]
"""Can be one of
'LEFT', 'MIDDLE', 'RIGHT', 'SCROLLUP', 'SCROLLDOWN'
@type: string"""
self.pos = pos
"""(x, y) position of the mouse on the screen
@type: (int, int)"""
self.cell = cell
"""(x, y) position of the mouse snapped to a cell on the root console
@type: (int, int)""" | identifier_body |
event.py | on the screen
@type: (int, int)"""
self.cell = cell
"""(x, y) position of the mouse snapped to a cell on the root console
@type: (int, int)"""
class MouseDown(MouseButtonEvent):
"""Fired when a mouse button is pressed."""
__slots__ = ()
type = 'MOUSEDOWN'
class MouseUp(MouseButtonEvent):
"""Fired when a mouse button is released."""
__slots__ = ()
type = 'MOUSEUP'
class MouseMotion(Event):
"""Fired when the mouse is moved."""
type = 'MOUSEMOTION'
def __init__(self, pos, cell, motion, cellmotion):
self.pos = pos
"""(x, y) position of the mouse on the screen.
type: (int, int)"""
self.cell = cell
"""(x, y) position of the mouse snapped to a cell on the root console.
type: (int, int)"""
self.motion = motion
"""(x, y) motion of the mouse on the screen.
type: (int, int)"""
self.cellmotion = cellmotion
"""(x, y) mostion of the mouse moving over cells on the root console.
type: (int, int)"""
class App(object):
"""
Application framework.
- ev_*: Events are passed to methods based on their L{Event.type} attribute.
If an event type is 'KEYDOWN' the ev_KEYDOWN method will be called
with the event instance as a parameter.
- key_*: When a key is pressed another method will be called based on the
L{KeyEvent.key} attribute. For example the 'ENTER' key will call key_ENTER
with the associated L{KeyDown} event as its parameter.
- L{update}: This method is called every loop. It is passed a single
parameter detailing the time in seconds since the last update
(often known as deltaTime.)
You may want to call drawing routines in this method followed by
L{tdl.flush}.
"""
__slots__ = ('__running', '__prevTime')
def ev_QUIT(self, event):
"""Unless overridden this method raises a SystemExit exception closing
the program."""
raise SystemExit()
def ev_KEYDOWN(self, event):
"""Override this method to handle a L{KeyDown} event."""
def ev_KEYUP(self, event):
"""Override this method to handle a L{KeyUp} event."""
def ev_MOUSEDOWN(self, event):
"""Override this method to handle a L{MouseDown} event."""
def ev_MOUSEUP(self, event):
"""Override this method to handle a L{MouseUp} event."""
def ev_MOUSEMOTION(self, event):
"""Override this method to handle a L{MouseMotion} event."""
def update(self, deltaTime):
"""Override this method to handle per frame logic and drawing.
@type deltaTime: float
@param deltaTime: This parameter tells the amount of time passed since
the last call measured in seconds as a floating point
number.
You can use this variable to make your program
frame rate independent.
Use this parameter to adjust the speed of motion,
timers, and other game logic.
"""
pass
def suspend(self):
"""When called the App will begin to return control to where
L{App.run} was called.
Some further events are processed and the L{App.update} method will be
called one last time before exiting
(unless suspended during a call to L{App.update}.)
"""
self.__running = False
def run(self):
"""Delegate control over to this App instance. This function will
process all events and send them to the special methods ev_* and key_*.
A call to L{App.suspend} will return the control flow back to where
this function is called. And then the App can be run again.
But a single App instance can not be run multiple times simultaneously.
"""
if getattr(self, '_App__running', False):
raise _tdl.TDLError('An App can not be run multiple times simultaneously')
self.__running = True
while self.__running:
self.runOnce()
def run_once(self):
"""Pump events to this App instance and then return.
This works in the way described in L{App.run} except it immediately
returns after the first L{update} call.
Having multiple L{App} instances and selectively calling runOnce on
them is a decent way to create a state machine.
"""
if not hasattr(self, '_App__prevTime'):
self.__prevTime = _time.clock() # initiate __prevTime
for event in get():
if event.type: # exclude custom events with a blank type variable
# call the ev_* methods
method = 'ev_%s' % event.type # ev_TYPE
getattr(self, method)(event)
if event.type == 'KEYDOWN':
# call the key_* methods
method = 'key_%s' % event.key # key_KEYNAME
if hasattr(self, method): # silently exclude undefined methods
getattr(self, method)(event)
newTime = _time.clock()
self.update(newTime - self.__prevTime)
self.__prevTime = newTime
#_tdl.flush()
def _processEvents():
"""Flushes the event queue from libtcod into the global list _eventQueue"""
global _mousel, _mousem, _mouser, _eventsflushed, _pushedEvents
_eventsflushed = True
events = _pushedEvents # get events from event.push
_pushedEvents = [] # then clear the pushed events queue
mouse = _ffi.new('TCOD_mouse_t *')
libkey = _ffi.new('TCOD_key_t *')
while 1:
libevent = _lib.TCOD_sys_check_for_event(_lib.TCOD_EVENT_ANY, libkey, mouse)
if not libevent: # no more events from libtcod
break
#if mouse.dx or mouse.dy:
if libevent & _lib.TCOD_EVENT_MOUSE_MOVE:
events.append(MouseMotion((mouse.x, mouse.y),
(mouse.cx, mouse.cy),
(mouse.dx, mouse.dy),
(mouse.dcx, mouse.dcy)))
mousepos = ((mouse.x, mouse.y), (mouse.cx, mouse.cy))
for oldstate, newstate, released, button in \
zip((_mousel, _mousem, _mouser),
(mouse.lbutton, mouse.mbutton, mouse.rbutton),
(mouse.lbutton_pressed, mouse.mbutton_pressed,
mouse.rbutton_pressed),
(1, 2, 3)):
if released:
if not oldstate:
events.append(MouseDown(button, *mousepos))
events.append(MouseUp(button, *mousepos))
if newstate:
events.append(MouseDown(button, *mousepos))
elif newstate and not oldstate:
events.append(MouseDown(button, *mousepos))
if mouse.wheel_up:
events.append(MouseDown(4, *mousepos))
if mouse.wheel_down:
events.append(MouseDown(5, *mousepos))
_mousel = mouse.lbutton
_mousem = mouse.mbutton
_mouser = mouse.rbutton
if libkey.vk == _lib.TCODK_NONE:
break
if libkey.pressed:
keyevent = KeyDown
else:
keyevent = KeyUp
if libkey.vk == _lib.TCODK_TEXT:
# Hack 2017-03-22 HexDecimal
# Fix undefined libtcod behaviour which breaks 32-bit builds.
libkey.c = b'\x00'
libkey.shift = False
libkey.lalt = libkey.ralt = False
libkey.lctrl = libkey.rctrl = False
libkey.lmeta = libkey.rmeta = False
events.append(
keyevent(
libkey.vk,
libkey.c.decode('ascii', errors='ignore'),
_ffi.string(libkey.text).decode('utf-8'),
libkey.shift,
libkey.lalt,
libkey.ralt,
libkey.lctrl,
libkey.rctrl,
libkey.lmeta,
libkey.rmeta,
)
)
if _lib.TCOD_console_is_window_closed():
events.append(Quit())
_eventQueue.extend(events)
def get():
"""Flushes the event queue and returns the list of events.
This function returns L{Event} objects that can be identified by their
type attribute or their class.
@rtype: iterator
@return: Returns an iterable of objects derived from L{Event} or anything
put in a L{push} call. If the iterator is deleted or otherwise
interrupted before finishing the excess items are preserved for the
next call.
"""
_processEvents()
return _event_generator()
def _event_generator():
while _eventQueue:
# if there is an interruption the rest of the events stay untouched
# this means you can break out of a event.get loop without losing
# the leftover events
yield(_eventQueue.pop(0))
raise StopIteration()
def | wait | identifier_name |
|
dealerDispatcherList.js | *)
pageList: [10, 25, 50, 100],
//这个接口需要处理bootstrap table传递的固定参数,并返回特定格式的json数据
url: "${ctx}/process/shopmsg/shopMsg/dataDispatcher",
//默认值为 'limit',传给服务端的参数为:limit, offset, search, sort, order Else
//queryParamsType:'',
////查询参数,每次调用是会带上这个参数,可自定义
queryParams : function(params) {
var searchParam = $("#searchForm").serializeJSON();
searchParam.pageNo = params.limit === undefined? "1" :params.offset/params.limit+1;
searchParam.pageSize = params.limit === undefined? -1 : params.limit;
searchParam.orderBy = params.sort === undefined? "" : params.sort+ " "+ params.order;
return searchParam;
},
//分页方式:client客户端分页,server服务端分页(*)
sidePagination: "server",
contextMenuTrigger:"right",//pc端 按右键弹出菜单
contextMenuTriggerMobile:"press",//手机端 弹出菜单,click:单击, press:长按。
contextMenu: '#context-menu',
onContextMenuItem: function(row, $el){
if($el.data("item") == "edit"){
window.location = "${ctx}/shop/dealer/dealer/form?id=" + row.id;
} else if($el.data("item") == "delete"){
del(row.id);
}
},
onClickRow: function(row, $el){
},
columns: [{
checkbox: true
}
,{
field: 'companyCode',
title: '经销商编码',
sortable: true
}
,{
field: 'companyName',
title: '经销商名称',
sortable: true
}
,{
field: 'contacts',
title: '联系人',
sortable: true
}
,{
field: 'mobile',
title: '手机',
sortable: true
}
,{
field: 'undertakeArea',
title: '承接区域',
sortable: true
}
,{
field: 'underProduct',
title: '承接品类',
sortable: true
},{
field: 'gmName',
title: '工贸名称',
sortable: true
},
/*
,{
field: 'channelName',
title: '渠道名称',
sortable: true
}
,{
field: 'taxCode',
title: '税码',
sortable: true
}
,{
field: 'kjtAccount',
title: '快捷通账号',
sortable: true
}
,{
field: 'legalPersonName',
title: '法人姓名',
sortable: true
}
,{
field: 'legalPersonIdCard',
title: '法人身份号',
sortable: true
}
,{
field: 'companyTel',
title: '公司电话',
sortable: true
}
*/
/* ,{
field: 'undertakeArea',
title: '承接区域',
sortable: true
}*/
]
});
if(navigator.userAgent.match(/(iPhone|iPod|Android|ios)/i)){//如果是移动端
$('#dealerTable').bootstrapTable("toggleView");
}
$('#dealerTable').on('check.bs.table uncheck.bs.table load-success.bs.table ' +
'check-all.bs.table uncheck-all.bs.table', function () {
var sels = $('#dealerTable').bootstrapTable('getSelections');
$('#remove').prop('disabled', ! sels.length);
$('#edit').prop('disabled', sels.length!=1);
if(sels.length == 1 && sels[0].auditState =='0'){
$('#audit').prop('disabled', false);
} else {
$('#audit').prop('disabled', true);
}
});
$("#btnImport").click(function(){
jh.open({
type: 1,
area: [500, 300],
title:"导入数据",
content:$("#importBox").html() ,
btn: ['下载模板','确定', '关闭'],
btn1: function(index, layero){
window.location='${ctx}/shop/dealer/dealer/import/template';
},
btn2: function(index, layero){
var inputForm =top.$("#importForm");
var top_iframe = top.getActiveTab().attr("name");//获取当前active的tab的iframe
inputForm.attr("target",top_iframe);//表单提交成功后,从服务器返回的url在当前tab中展示
inputForm.onsubmit = function(){
jh.loading(' 正在导入,请稍等...');
}
inputForm.submit();
jh.close(index);
},
btn3: function(index){
jh.close(index);
}
});
});
$("#search").click("click", function() {// 绑定查询按扭
$('#dealerTable').bootstrapTable('refresh');
});
$("#reset").click("click", function() {// 绑定查询按扭
$("#searchForm input").val("");
$("#searchForm select").val("");
$("#searchForm .select-item").html("");
$('#dealerTable').bootstrapTable('refresh');
});
});
function getIdSelections() {
return $.map($("#dealerTable").bootstrapTable('getSelections'), function (row) {
return row.id
});
}
function getNameSelections() {
return $.map($("#dealerTable").bootstrapTable('getSelections'), function (row) {
return row.companyName
});
}
function del(id){
jh.confirm('确认要删除该经销商记录吗?', function(){
jh.loading();
jh.get("${ctx}/shop/dealer/dealer/delete?id="+id, function(data){
if(data.success){
$('#dealerTable').bootstrapTable('refresh');
jh.success(data.msg);
}else{
jh.error(data.msg);
}
})
});
}
function deleteAll(){
jh.confirm('确认要删除该经销商记录吗?', function(){
jh.loading();
jh.get("${ctx}/shop/dealer/dealer/deleteAll?ids=" + getIdSelections(), function(data){
if(data.success){
$('#dealerTable').bootstrapTable('refresh');
jh.success(data.msg);
}else{
jh.error(data.msg);
}
})
})
}
function edit(){
window.location = "${ctx}/shop/dealer/dealer/form?id=" + getIdSelections();
}
function audit(id){
if(id == undefined){
id = getIdSelections();
}
jh.open({
type: 1,
area: ['400px','200px'],
title:"审核",
content:$("#auditBox").html() ,
scrollbar: false,
btn: ['确定', '关闭'],
btn1: function(index, layero){
var inputForm = layero.find("#auditForm");
var sel = inputForm.find("input[name='auditState']:checked").val();
if(sel==undefined){
jh.alert('请选择是否同意');
return false;
}
if(sel=='2'){
var auditDesc = inputForm.find('#auditDesc');
if($.trim(auditDesc.val())==''){
jh.alert('请输入不同意原因');
return false;
}
}
jh.loading(' 正在审核,请稍等...');
jh.post("${ctx}/shop/dealer/dealer/audit",inputForm.serialize(),function(data){
if(data.success){
$('#dealerTable').bootstrapTable('refresh');
jh.success(data.msg);
}else{
jh.error(data.msg);
}
});
jh.close(index);
},
btn2: function(index){
jh.close(index);
},
success: function(layero, index){
//窗口打开后做初始化
var contElem = layero.find('.layui-layer-content');
var inputForm = contElem.find("#auditForm");
var idElem = inputForm.find('#auditId');
idElem.val(id);
var auditDescDiv = inputForm.find('#auditDescDiv');
var auditDesc = inputForm.find('#auditDesc');
var conHeight = contElem.height();
var layerHeight = layero.height();
inputForm.find("input[name='auditState']").change(function(){
var sel = $(this).val();
if(sel == "1"){
auditDescDiv.addClass('hide');
auditDesc.val('');
layero.height(layerHeight);
contElem.height(conHeight);
} else if(sel == "2"){
auditDescDiv.removeClass('hide');
layero.height(layerHeight+120);
contElem.height(conHeight+120);
auditDesc.focus();
}
})
}
});
}
var callbackdata = function () {
var arrIds = getIdSelections();
var arrNames = getNameSelections();
return {
arrIds:arrIds,
arrNames:arrNames
};
}
</script> | conditional_block |
||
dealerDispatcherList.js | true,
//显示 内容列下拉框
showColumns: true,
//显示到处按钮
showExport: true,
//显示切换分页按钮
showPaginationSwitch: true,
//最低显示2行
minimumCountColumns: 2,
//是否显示行间隔色
striped: true,
//是否使用缓存,默认为true,所以一般情况下需要设置一下这个属性(*)
cache: false,
//是否显示分页(*)
pagination: true,
//排序方式
sortOrder: "asc",
//初始化加载第一页,默认第一页
pageNumber:1,
//每页的记录行数(*)
pageSize: 10,
//可供选择的每页的行数(*)
pageList: [10, 25, 50, 100],
//这个接口需要处理bootstrap table传递的固定参数,并返回特定格式的json数据
url: "${ctx}/process/shopmsg/shopMsg/dataDispatcher",
//默认值为 'limit',传给服务端的参数为:limit, offset, search, sort, order Else
//queryParamsType:'',
////查询参数,每次调用是会带上这个参数,可自定义
queryParams : function(params) {
var searchParam = $("#searchForm").serializeJSON();
searchParam.pageNo = params.limit === undefined? "1" :params.offset/params.limit+1;
searchParam.pageSize = params.limit === undefined? -1 : params.limit;
searchParam.orderBy = params.sort === undefined? "" : params.sort+ " "+ params.order;
return searchParam;
},
//分页方式:client客户端分页,server服务端分页(*)
sidePagination: "server",
contextMenuTrigger:"right",//pc端 按右键弹出菜单
contextMenuTriggerMobile:"press",//手机端 弹出菜单,click:单击, press:长按。
contextMenu: '#context-menu',
onContextMenuItem: function(row, $el){
if($el.data("item") == "edit"){
window.location = "${ctx}/shop/dealer/dealer/form?id=" + row.id;
} else if($el.data("item") == "delete"){
del(row.id);
}
},
onClickRow: function(row, $el){
},
columns: [{
checkbox: true
}
,{
field: 'companyCode',
title: '经销商编码',
sortable: true
}
,{
field: 'companyName',
title: '经销商名称',
sortable: true
}
,{
field: 'contacts',
title: '联系人',
sortable: true
}
,{
field: 'mobile',
title: '手机',
sortable: true
}
,{
field: 'undertakeArea',
title: '承接区域',
sortable: true
}
,{
field: 'underProduct',
title: '承接品类',
sortable: true
},{
field: 'gmName',
title: '工贸名称',
sortable: true
},
/*
,{
field: 'channelName',
title: '渠道名称',
sortable: true
}
,{
field: 'taxCode',
title: '税码',
sortable: true
}
,{
field: 'kjtAccount',
title: '快捷通账号', |
}
,{
field: 'legalPersonName',
title: '法人姓名',
sortable: true
}
,{
field: 'legalPersonIdCard',
title: '法人身份号',
sortable: true
}
,{
field: 'companyTel',
title: '公司电话',
sortable: true
}
*/
/* ,{
field: 'undertakeArea',
title: '承接区域',
sortable: true
}*/
]
});
if(navigator.userAgent.match(/(iPhone|iPod|Android|ios)/i)){//如果是移动端
$('#dealerTable').bootstrapTable("toggleView");
}
$('#dealerTable').on('check.bs.table uncheck.bs.table load-success.bs.table ' +
'check-all.bs.table uncheck-all.bs.table', function () {
var sels = $('#dealerTable').bootstrapTable('getSelections');
$('#remove').prop('disabled', ! sels.length);
$('#edit').prop('disabled', sels.length!=1);
if(sels.length == 1 && sels[0].auditState =='0'){
$('#audit').prop('disabled', false);
} else {
$('#audit').prop('disabled', true);
}
});
$("#btnImport").click(function(){
jh.open({
type: 1,
area: [500, 300],
title:"导入数据",
content:$("#importBox").html() ,
btn: ['下载模板','确定', '关闭'],
btn1: function(index, layero){
window.location='${ctx}/shop/dealer/dealer/import/template';
},
btn2: function(index, layero){
var inputForm =top.$("#importForm");
var top_iframe = top.getActiveTab().attr("name");//获取当前active的tab的iframe
inputForm.attr("target",top_iframe);//表单提交成功后,从服务器返回的url在当前tab中展示
inputForm.onsubmit = function(){
jh.loading(' 正在导入,请稍等...');
}
inputForm.submit();
jh.close(index);
},
btn3: function(index){
jh.close(index);
}
});
});
$("#search").click("click", function() {// 绑定查询按扭
$('#dealerTable').bootstrapTable('refresh');
});
$("#reset").click("click", function() {// 绑定查询按扭
$("#searchForm input").val("");
$("#searchForm select").val("");
$("#searchForm .select-item").html("");
$('#dealerTable').bootstrapTable('refresh');
});
});
function getIdSelections() {
return $.map($("#dealerTable").bootstrapTable('getSelections'), function (row) {
return row.id
});
}
function getNameSelections() {
return $.map($("#dealerTable").bootstrapTable('getSelections'), function (row) {
return row.companyName
});
}
function del(id){
jh.confirm('确认要删除该经销商记录吗?', function(){
jh.loading();
jh.get("${ctx}/shop/dealer/dealer/delete?id="+id, function(data){
if(data.success){
$('#dealerTable').bootstrapTable('refresh');
jh.success(data.msg);
}else{
jh.error(data.msg);
}
})
});
}
function deleteAll(){
jh.confirm('确认要删除该经销商记录吗?', function(){
jh.loading();
jh.get("${ctx}/shop/dealer/dealer/deleteAll?ids=" + getIdSelections(), function(data){
if(data.success){
$('#dealerTable').bootstrapTable('refresh');
jh.success(data.msg);
}else{
jh.error(data.msg);
}
})
})
}
function edit(){
window.location = "${ctx}/shop/dealer/dealer/form?id=" + getIdSelections();
}
function audit(id){
if(id == undefined){
id = getIdSelections();
}
jh.open({
type: 1,
area: ['400px','200px'],
title:"审核",
content:$("#auditBox").html() ,
scrollbar: false,
btn: ['确定', '关闭'],
btn1: function(index, layero){
var inputForm = layero.find("#auditForm");
var sel = inputForm.find("input[name='auditState']:checked").val();
if(sel==undefined){
jh.alert('请选择是否同意');
return false;
}
if(sel=='2'){
var auditDesc = inputForm.find('#auditDesc');
if($.trim(auditDesc.val())==''){
jh.alert('请输入不同意原因');
return false;
}
}
jh.loading(' 正在审核,请稍等...');
jh.post("${ctx}/shop/dealer/dealer/audit",inputForm.serialize(),function(data){
if(data.success){
$('#dealerTable').bootstrapTable('refresh');
jh.success(data.msg);
}else{
jh.error(data.msg);
}
});
jh.close(index);
},
btn2: function(index){
jh.close(index);
},
success: function(layero, index){
//窗口打开后做初始化
var contElem = layero.find('.layui-layer-content');
var inputForm = contElem.find("#auditForm");
var idElem = inputForm.find('#auditId');
idElem.val(id);
var auditDescDiv = inputForm.find('#auditDescDiv');
var auditDesc = | sortable: true | random_line_split |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 51