file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
regexp.go | number of unclosed lpars
pos int;
ch int;
}
const endOfFile = -1
func (p *parser) c() int { return p.ch }
func (p *parser) nextc() int {
if p.pos >= len(p.re.expr) {
p.ch = endOfFile
} else {
c, w := utf8.DecodeRuneInString(p.re.expr[p.pos:len(p.re.expr)]);
p.ch = c;
p.pos += w;
}
return p.ch;
}
func newParser(re *Regexp) *parser {
p := new(parser);
p.re = re;
p.nextc(); // load p.ch
return p;
}
func special(c int) bool {
s := `\.+*?()|[]^$`;
for i := 0; i < len(s); i++ {
if c == int(s[i]) {
return true
}
}
return false;
}
func specialcclass(c int) bool {
s := `\-[]`;
for i := 0; i < len(s); i++ {
if c == int(s[i]) {
return true
}
}
return false;
}
func (p *parser) charClass() instr {
cc := newCharClass();
if p.c() == '^' {
cc.negate = true;
p.nextc();
}
left := -1;
for {
switch c := p.c(); c {
case ']', endOfFile:
if left >= 0 {
p.error = ErrBadRange;
return nil;
}
// Is it [^\n]?
if cc.negate && len(cc.ranges) == 2 &&
cc.ranges[0] == '\n' && cc.ranges[1] == '\n' {
nl := new(_NotNl);
p.re.add(nl);
return nl;
}
p.re.add(cc);
return cc;
case '-': // do this before backslash processing
p.error = ErrBadRange;
return nil;
case '\\':
c = p.nextc();
switch {
case c == endOfFile:
p.error = ErrExtraneousBackslash;
return nil;
case c == 'n':
c = '\n'
case specialcclass(c):
// c is as delivered
default:
p.error = ErrBadBackslash;
return nil;
}
fallthrough;
default:
p.nextc();
switch {
case left < 0: // first of pair
if p.c() == '-' { // range
p.nextc();
left = c;
} else { // single char
cc.addRange(c, c)
}
case left <= c: // second of pair
cc.addRange(left, c);
left = -1;
default:
p.error = ErrBadRange;
return nil;
}
}
}
return nil;
}
func (p *parser) term() (start, end instr) {
// term() is the leaf of the recursion, so it's sufficient to pick off the
// error state here for early exit.
// The other functions (closure(), concatenation() etc.) assume
// it's safe to recur to here.
if p.error != "" {
return
}
switch c := p.c(); c {
case '|', endOfFile:
return nil, nil
case '*', '+':
p.error = ErrBareClosure;
return;
case ')':
if p.nlpar == 0 {
p.error = ErrUnmatchedRpar;
return;
}
return nil, nil;
case ']':
p.error = ErrUnmatchedRbkt;
return;
case '^':
p.nextc();
start = p.re.add(new(_Bot));
return start, start;
case '$':
p.nextc();
start = p.re.add(new(_Eot));
return start, start;
case '.':
p.nextc();
start = p.re.add(new(_Any));
return start, start;
case '[':
p.nextc();
start = p.charClass();
if p.error != "" {
return
}
if p.c() != ']' {
p.error = ErrUnmatchedLbkt;
return;
}
p.nextc();
return start, start;
case '(':
p.nextc();
p.nlpar++;
p.re.nbra++; // increment first so first subexpr is \1
nbra := p.re.nbra;
start, end = p.regexp();
if p.c() != ')' {
p.error = ErrUnmatchedLpar;
return;
}
p.nlpar--;
p.nextc();
bra := new(_Bra);
p.re.add(bra);
ebra := new(_Ebra);
p.re.add(ebra);
bra.n = nbra;
ebra.n = nbra;
if start == nil {
if end == nil {
p.error = ErrInternal;
return;
}
start = ebra;
} else {
end.setNext(ebra)
}
bra.setNext(start);
return bra, ebra;
case '\\':
c = p.nextc();
switch {
case c == endOfFile:
p.error = ErrExtraneousBackslash;
return;
case c == 'n':
c = '\n'
case special(c):
// c is as delivered
default:
p.error = ErrBadBackslash;
return;
}
fallthrough;
default:
p.nextc();
start = newChar(c);
p.re.add(start);
return start, start;
}
panic("unreachable");
}
func (p *parser) closure() (start, end instr) {
start, end = p.term();
if start == nil || p.error != "" {
return
}
switch p.c() {
case '*':
// (start,end)*:
alt := new(_Alt);
p.re.add(alt);
end.setNext(alt); // after end, do alt
alt.left = start; // alternate brach: return to start
start = alt; // alt becomes new (start, end)
end = alt;
case '+':
// (start,end)+:
alt := new(_Alt);
p.re.add(alt);
end.setNext(alt); // after end, do alt
alt.left = start; // alternate brach: return to start
end = alt; // start is unchanged; end is alt
case '?':
// (start,end)?:
alt := new(_Alt);
p.re.add(alt);
nop := new(_Nop);
p.re.add(nop);
alt.left = start; // alternate branch is start
alt.setNext(nop); // follow on to nop
end.setNext(nop); // after end, go to nop
start = alt; // start is now alt
end = nop; // end is nop pointed to by both branches
default:
return
}
switch p.nextc() {
case '*', '+', '?':
p.error = ErrBadClosure
}
return;
}
func (p *parser) concatenation() (start, end instr) {
for {
nstart, nend := p.closure();
if p.error != "" {
return
}
switch {
case nstart == nil: // end of this concatenation
if start == nil { // this is the empty string
nop := p.re.add(new(_Nop));
return nop, nop;
}
return;
case start == nil: // this is first element of concatenation
start, end = nstart, nend
default:
end.setNext(nstart);
end = nend;
}
}
panic("unreachable");
}
func (p *parser) regexp() (start, end instr) {
start, end = p.concatenation();
if p.error != "" {
return
}
for {
switch p.c() {
default:
return
case '|':
p.nextc();
nstart, nend := p.concatenation();
if p.error != "" {
return
}
alt := new(_Alt);
p.re.add(alt);
alt.left = start;
alt.setNext(nstart);
nop := new(_Nop);
p.re.add(nop);
end.setNext(nop);
nend.setNext(nop);
start, end = alt, nop;
}
}
panic("unreachable");
}
func unNop(i instr) instr {
for i.kind() == _NOP {
i = i.next()
}
return i;
}
func (re *Regexp) eliminateNops() | {
for i := 0; i < len(re.inst); i++ {
inst := re.inst[i];
if inst.kind() == _END {
continue
}
inst.setNext(unNop(inst.next()));
if inst.kind() == _ALT {
alt := inst.(*_Alt);
alt.left = unNop(alt.left);
}
}
} | identifier_body |
|
regexp.go | .ranges[n] = a;
n++;
cclass.ranges[n] = b;
n++;
}
func (cclass *_CharClass) | (c int) bool {
for i := 0; i < len(cclass.ranges); i = i + 2 {
min := cclass.ranges[i];
max := cclass.ranges[i+1];
if min <= c && c <= max {
return !cclass.negate
}
}
return cclass.negate;
}
func newCharClass() *_CharClass {
c := new(_CharClass);
c.ranges = make([]int, 0, 20);
return c;
}
// --- ANY any character
type _Any struct {
common;
}
func (any *_Any) kind() int { return _ANY }
func (any *_Any) print() { print("any") }
// --- NOTNL any character but newline
type _NotNl struct {
common;
}
func (notnl *_NotNl) kind() int { return _NOTNL }
func (notnl *_NotNl) print() { print("notnl") }
// --- BRA parenthesized expression
type _Bra struct {
common;
n int; // subexpression number
}
func (bra *_Bra) kind() int { return _BRA }
func (bra *_Bra) print() { print("bra", bra.n) }
// --- EBRA end of parenthesized expression
type _Ebra struct {
common;
n int; // subexpression number
}
func (ebra *_Ebra) kind() int { return _EBRA }
func (ebra *_Ebra) print() { print("ebra ", ebra.n) }
// --- ALT alternation
type _Alt struct {
common;
left instr; // other branch
}
func (alt *_Alt) kind() int { return _ALT }
func (alt *_Alt) print() { print("alt(", alt.left.index(), ")") }
// --- NOP no operation
type _Nop struct {
common;
}
func (nop *_Nop) kind() int { return _NOP }
func (nop *_Nop) print() { print("nop") }
func (re *Regexp) add(i instr) instr {
n := len(re.inst);
i.setIndex(len(re.inst));
if n >= cap(re.inst) {
ni := make([]instr, n, 2*n);
for i, j := range re.inst {
ni[i] = j
}
re.inst = ni;
}
re.inst = re.inst[0 : n+1];
re.inst[n] = i;
return i;
}
type parser struct {
re *Regexp;
error string;
nlpar int; // number of unclosed lpars
pos int;
ch int;
}
const endOfFile = -1
func (p *parser) c() int { return p.ch }
func (p *parser) nextc() int {
if p.pos >= len(p.re.expr) {
p.ch = endOfFile
} else {
c, w := utf8.DecodeRuneInString(p.re.expr[p.pos:len(p.re.expr)]);
p.ch = c;
p.pos += w;
}
return p.ch;
}
func newParser(re *Regexp) *parser {
p := new(parser);
p.re = re;
p.nextc(); // load p.ch
return p;
}
func special(c int) bool {
s := `\.+*?()|[]^$`;
for i := 0; i < len(s); i++ {
if c == int(s[i]) {
return true
}
}
return false;
}
func specialcclass(c int) bool {
s := `\-[]`;
for i := 0; i < len(s); i++ {
if c == int(s[i]) {
return true
}
}
return false;
}
func (p *parser) charClass() instr {
cc := newCharClass();
if p.c() == '^' {
cc.negate = true;
p.nextc();
}
left := -1;
for {
switch c := p.c(); c {
case ']', endOfFile:
if left >= 0 {
p.error = ErrBadRange;
return nil;
}
// Is it [^\n]?
if cc.negate && len(cc.ranges) == 2 &&
cc.ranges[0] == '\n' && cc.ranges[1] == '\n' {
nl := new(_NotNl);
p.re.add(nl);
return nl;
}
p.re.add(cc);
return cc;
case '-': // do this before backslash processing
p.error = ErrBadRange;
return nil;
case '\\':
c = p.nextc();
switch {
case c == endOfFile:
p.error = ErrExtraneousBackslash;
return nil;
case c == 'n':
c = '\n'
case specialcclass(c):
// c is as delivered
default:
p.error = ErrBadBackslash;
return nil;
}
fallthrough;
default:
p.nextc();
switch {
case left < 0: // first of pair
if p.c() == '-' { // range
p.nextc();
left = c;
} else { // single char
cc.addRange(c, c)
}
case left <= c: // second of pair
cc.addRange(left, c);
left = -1;
default:
p.error = ErrBadRange;
return nil;
}
}
}
return nil;
}
func (p *parser) term() (start, end instr) {
// term() is the leaf of the recursion, so it's sufficient to pick off the
// error state here for early exit.
// The other functions (closure(), concatenation() etc.) assume
// it's safe to recur to here.
if p.error != "" {
return
}
switch c := p.c(); c {
case '|', endOfFile:
return nil, nil
case '*', '+':
p.error = ErrBareClosure;
return;
case ')':
if p.nlpar == 0 {
p.error = ErrUnmatchedRpar;
return;
}
return nil, nil;
case ']':
p.error = ErrUnmatchedRbkt;
return;
case '^':
p.nextc();
start = p.re.add(new(_Bot));
return start, start;
case '$':
p.nextc();
start = p.re.add(new(_Eot));
return start, start;
case '.':
p.nextc();
start = p.re.add(new(_Any));
return start, start;
case '[':
p.nextc();
start = p.charClass();
if p.error != "" {
return
}
if p.c() != ']' {
p.error = ErrUnmatchedLbkt;
return;
}
p.nextc();
return start, start;
case '(':
p.nextc();
p.nlpar++;
p.re.nbra++; // increment first so first subexpr is \1
nbra := p.re.nbra;
start, end = p.regexp();
if p.c() != ')' {
p.error = ErrUnmatchedLpar;
return;
}
p.nlpar--;
p.nextc();
bra := new(_Bra);
p.re.add(bra);
ebra := new(_Ebra);
p.re.add(ebra);
bra.n = nbra;
ebra.n = nbra;
if start == nil {
if end == nil {
p.error = ErrInternal;
return;
}
start = ebra;
} else {
end.setNext(ebra)
}
bra.setNext(start);
return bra, ebra;
case '\\':
c = p.nextc();
switch {
case c == endOfFile:
p.error = ErrExtraneousBackslash;
return;
case c == 'n':
c = '\n'
case special(c):
// c is as delivered
default:
p.error = ErrBadBackslash;
return;
}
fallthrough;
default:
p.nextc();
start = newChar(c);
p.re.add(start);
return start, start;
}
panic("unreachable");
}
func (p *parser) closure() (start, end instr) {
start, end = p.term();
if start == nil || p.error != "" {
return
}
switch p.c() {
case '*':
// (start,end)*:
alt := new(_Alt);
p.re.add(alt);
end.setNext(alt); // after end, do alt
alt.left = start; // alternate brach: return to start
start = alt; // alt becomes new (start, end)
end = alt;
case '+':
// (start,end)+:
alt := new(_Alt);
p.re.add(alt);
end.setNext(alt); // after end, do alt
alt.left = start; // alternate brach: return to start
end = alt; // start is unchanged | matches | identifier_name |
mod.rs | <usize>)> {
Ingredient::parent_columns(&**self, column)
}
/// Resolve where the given field originates from. If the view is materialized, or the value is
/// otherwise created by this view, None should be returned.
pub fn resolve(&self, i: usize) -> Option<Vec<(NodeIndex, usize)>> {
Ingredient::resolve(&**self, i)
}
/// Returns true if this operator requires a full materialization
pub fn requires_full_materialization(&self) -> bool {
Ingredient::requires_full_materialization(&**self)
}
pub fn can_query_through(&self) -> bool {
Ingredient::can_query_through(&**self)
}
pub fn is_join(&self) -> bool {
Ingredient::is_join(&**self)
}
pub fn ancestors(&self) -> Vec<NodeIndex> {
Ingredient::ancestors(&**self)
}
/// Produce a compact, human-readable description of this node for Graphviz.
///
/// If `detailed` is true, emit more info.
///
/// Symbol Description
/// --------|-------------
/// B | Base
/// || | Concat
/// ⧖ | Latest
/// γ | Group by
/// |*| | Count
/// 𝛴 | Sum
/// ⋈ | Join
/// ⋉ | Left join
/// ⋃ | Union
pub fn description(&self, detailed: bool) -> String {
Ingredient::description(&**self, detailed)
}
}
// publicly accessible attributes
impl Node {
pub fn name(&self) -> &str {
&*self.name
}
pub fn fields(&self) -> &[String] {
&self.fields[..]
}
pub fn sharded_by(&self) -> Sharding {
self.sharded_by
}
/// Set this node's sharding property.
pub fn shard_by(&mut self, s: Sharding) {
self.sharded_by = s;
}
}
// events
impl Node {
pub fn take(&mut self) -> DanglingDomainNode {
assert!(!self.taken);
assert!(
(!self.is_internal() && !self.is_base()) || self.domain.is_some(),
"tried to take unassigned node"
);
let inner = self.inner.take();
let mut n = self.mirror(inner);
n.index = self.index;
n.domain = self.domain;
n.purge = self.purge;
self.taken = true;
DanglingDomainNode(n)
}
pub fn remove(&mut self) {
self.inner = NodeType::Dropped;
}
}
// derefs
impl Node {
pub(crate) fn with_sharder_mut<F>(&mut self, f: F)
where
F: FnOnce(&mut special::Sharder),
{
match self.inner {
NodeType::Sharder(ref mut s) => f(s),
_ => unreachable!(),
}
}
pub fn with_sharder<'a, F, R>(&'a self, f: F) -> Option<R>
where
F: FnOnce(&'a special::Sharder) -> R,
R: 'a,
{
match self.inner {
NodeType::Sharder(ref s) => Some(f(s)),
_ => None,
}
}
pub(crate) fn with_egress_mut<F>(&mut self, f: F)
where
F: FnOnce(&mut special::Egress),
{
match self.inner {
NodeType::Egress(Some(ref mut e)) => f(e),
_ => unreachable!(),
}
}
pub fn with_reader_mut<'a, F, R>(&'a mut self, f: F) -> Result<R, ()>
where
F: FnOnce(&'a mut special::Reader) -> R,
R: 'a,
{
match self.inner {
NodeType::Reader(ref mut r) => Ok(f(r)),
_ => Err(()),
}
}
pub fn with_reader<'a, F, R>(&'a self, f: F) -> Result<R, ()>
where
F: FnOnce(&'a special::Reader) -> R,
R: 'a,
{
match self.inner {
NodeType::Reader(ref r) => Ok(f(r)),
_ => Err(()),
}
}
pub fn get_base(&self) -> Option<&special::Base> {
if let NodeType::Base(ref b) = self.inner {
Some(b)
} else {
None
}
}
pub fn suggest_indexes(&self, n: NodeIndex) -> HashMap<NodeIndex, Vec<usize>> {
match self.inner {
NodeType::Internal(ref i) => i.suggest_indexes(n),
NodeType::Base(ref b) => b.suggest_indexes(n),
_ => HashMap::new(),
}
}
}
impl Deref for Node {
type Target = ops::NodeOperator;
fn deref(&self) -> &Self::Target {
match self.inner {
NodeType::Internal(ref i) => i,
_ => unreachable!(),
}
}
}
impl DerefMut for Node {
fn deref_mut(&mut self) -> &mut Self::Target {
assert!(!self.taken);
match self.inner {
NodeType::Internal(ref mut i) => i,
_ => unreachable!(),
}
}
}
// neighbors
impl Node {
pub(crate) fn children(&self) -> &[LocalNodeIndex] {
&self.children
}
pub(crate) fn parents(&self) -> &[LocalNodeIndex] {
&self.parents
}
}
// attributes
impl Node {
pub(crate) fn beyond_mat_frontier(&self) -> bool {
self.purge
}
pub(crate) fn add_child(&mut self, child: LocalNodeIndex) {
self.children.push(child);
}
pub(crate) fn try_remove_child(&mut self, child: LocalNodeIndex) -> bool {
for i in 0..self.children.len() {
if self.children[i] == child {
self.children.swap_remove(i);
return true;
}
}
false
}
pub fn add_column(&mut self, field: &str) -> usize {
self.fields.push(field.to_string());
self.fields.len() - 1
}
pub fn has_domain(&self) -> bool {
self.domain.is_some()
}
pub fn domain(&self) -> domain::Index {
match self.domain {
Some(domain) => domain,
None => {
unreachable!(
"asked for unset domain for {:?} {}",
self,
self.global_addr().index()
);
}
}
}
pub fn local_addr(&self) -> LocalNodeIndex {
match self.index {
Some(idx) if idx.has_local() => *idx,
Some(_) | None => unreachable!("asked for unset addr for {:?}", self),
}
}
pub fn global_addr(&self) -> NodeIndex {
match self.index {
Some(ref index) => index.as_global(),
None => {
unreachable!("asked for unset index for {:?}", self);
}
}
}
pub fn get_base_mut(&mut self) -> Option<&mut special::Base> {
if let NodeType::Base(ref mut b) = self.inner {
Some(b)
} else {
None
}
}
pub fn add_to(&mut self, domain: domain::Index) {
assert_eq!(self.domain, None);
assert!(!self.is_dropped());
self.domain = Some(domain);
}
pub fn set_finalized_addr(&mut self, addr: IndexPair) {
self.index = Some(addr);
}
}
// is this or that?
impl Node {
pub fn is_dropped(&self) -> bool {
if let NodeType::Dropped = self.inner {
true
} else {
false
}
}
pub fn is_egress(&self) -> bool {
if let NodeType::Egress { .. } = self.inner {
true
} else {
false
}
}
pub fn is_reader(&self) -> bool {
if let NodeType::Reader { .. } = self.inner {
true
} else {
false
}
}
pub fn is_ingress(&self) -> bool {
if let NodeType::Ingress = self.inner {
true
} else {
false
}
}
pub fn is_sender(&self) -> bool {
match self.inner {
NodeType::Egress { .. } | NodeType::Sharder(..) => true,
_ => false,
}
}
pub fn is_internal(&self) -> bool {
if let NodeType::Internal(..) = self.inner {
true
} else {
false
}
}
pub fn is_source(&self) -> bool {
if let NodeType::Source { .. } = self.inner {
true
} else {
false
}
}
pub fn is_sharder(&self) -> bool {
if let NodeType::Sharder { .. } = self.inner {
true
} else {
false
}
}
pub fn is_base(&sel | f) -> b | identifier_name |
|
mod.rs | usize)>> {
Ingredient::resolve(&**self, i)
}
/// Returns true if this operator requires a full materialization
pub fn requires_full_materialization(&self) -> bool {
Ingredient::requires_full_materialization(&**self)
}
pub fn can_query_through(&self) -> bool {
Ingredient::can_query_through(&**self)
}
pub fn is_join(&self) -> bool {
Ingredient::is_join(&**self)
}
pub fn ancestors(&self) -> Vec<NodeIndex> {
Ingredient::ancestors(&**self)
}
/// Produce a compact, human-readable description of this node for Graphviz.
///
/// If `detailed` is true, emit more info.
///
/// Symbol Description
/// --------|-------------
/// B | Base
/// || | Concat
/// ⧖ | Latest
/// γ | Group by
/// |*| | Count
/// 𝛴 | Sum
/// ⋈ | Join
/// ⋉ | Left join
/// ⋃ | Union
pub fn description(&self, detailed: bool) -> String {
Ingredient::description(&**self, detailed)
}
}
// publicly accessible attributes
impl Node {
pub fn name(&self) -> &str {
&*self.name
}
pub fn fields(&self) -> &[String] {
&self.fields[..]
}
pub fn sharded_by(&self) -> Sharding {
self.sharded_by
}
/// Set this node's sharding property.
pub fn shard_by(&mut self, s: Sharding) {
self.sharded_by = s;
}
}
// events
impl Node {
pub fn take(&mut self) -> DanglingDomainNode {
assert!(!self.taken);
assert!(
(!self.is_internal() && !self.is_base()) || self.domain.is_some(),
"tried to take unassigned node"
);
let inner = self.inner.take();
let mut n = self.mirror(inner);
n.index = self.index;
n.domain = self.domain;
n.purge = self.purge;
self.taken = true;
DanglingDomainNode(n)
}
pub fn remove(&mut self) {
self.inner = NodeType::Dropped;
}
}
// derefs
impl Node {
pub(crate) fn with_sharder_mut<F>(&mut self, f: F)
where
F: FnOnce(&mut special::Sharder),
{
match self.inner {
NodeType::Sharder(ref mut s) => f(s),
_ => unreachable!(),
}
}
pub fn with_sharder<'a, F, R>(&'a self, f: F) -> Option<R>
where
F: FnOnce(&'a special::Sharder) -> R,
R: 'a,
{
match self.inner {
NodeType::Sharder(ref s) => Some(f(s)),
_ => None,
}
}
pub(crate) fn with_egress_mut<F>(&mut self, f: F)
where
F: FnOnce(&mut special::Egress),
{
match self.inner {
NodeType::Egress(Some(ref mut e)) => f(e),
_ => unreachable!(),
}
}
pub fn with_reader_mut<'a, F, R>(&'a mut self, f: F) -> Result<R, ()>
where
F: FnOnce(&'a mut special::Reader) -> R,
R: 'a,
{
match self.inner {
NodeType::Reader(ref mut r) => Ok(f(r)),
_ => Err(()),
}
}
pub fn with_reader<'a, F, R>(&'a self, f: F) -> Result<R, ()>
where
F: FnOnce(&'a special::Reader) -> R,
R: 'a,
{
match self.inner {
NodeType::Reader(ref r) => Ok(f(r)),
_ => Err(()),
}
}
pub fn get_base(&self) -> Option<&special::Base> {
if let NodeType::Base(ref b) = self.inner {
Some(b)
} else {
None
}
}
pub fn suggest_indexes(&self, n: NodeIndex) -> HashMap<NodeIndex, Vec<usize>> {
match self.inner {
NodeType::Internal(ref i) => i.suggest_indexes(n),
NodeType::Base(ref b) => b.suggest_indexes(n),
_ => HashMap::new(),
}
}
}
impl Deref for Node {
type Target = ops::NodeOperator;
fn deref(&self) -> &Self::Target {
match self.inner {
NodeType::Internal(ref i) => i,
_ => unreachable!(),
}
}
}
impl DerefMut for Node {
fn deref_mut(&mut self) -> &mut Self::Target {
assert!(!self.taken);
match self.inner {
NodeType::Internal(ref mut i) => i,
_ => unreachable!(),
}
}
}
// neighbors
impl Node {
pub(crate) fn children(&self) -> &[LocalNodeIndex] {
&self.children
}
pub(crate) fn parents(&self) -> &[LocalNodeIndex] {
&self.parents
}
}
// attributes
impl Node {
pub(crate) fn beyond_mat_frontier(&self) -> bool {
self.purge
}
pub(crate) fn add_child(&mut self, child: LocalNodeIndex) {
self.children.push(child);
}
pub(crate) fn try_remove_child(&mut self, child: LocalNodeIndex) -> bool {
for i in 0..self.children.len() {
if self.children[i] == child {
self.children.swap_remove(i);
return true;
}
}
false
}
pub fn add_column(&mut self, field: &str) -> usize {
self.fields.push(field.to_string());
self.fields.len() - 1
}
pub fn has_domain(&self) -> bool {
self.domain.is_some()
}
pub fn domain(&self) -> domain::Index {
match self.domain {
Some(domain) => domain,
None => {
unreachable!(
"asked for unset domain for {:?} {}",
self,
self.global_addr().index()
);
}
}
}
pub fn local_addr(&self) -> LocalNodeIndex {
match self.index {
Some(idx) if idx.has_local() => *idx,
Some(_) | None => unreachable!("asked for unset addr for {:?}", self),
}
}
pub fn global_addr(&self) -> NodeIndex {
match self.index {
Some(ref index) => index.as_global(),
None => {
unreachable!("asked for unset index for {:?}", self);
}
}
}
pub fn get_base_mut(&mut self) -> Option<&mut special::Base> {
if let NodeType::Base(ref mut b) = self.inner {
Some(b)
} else {
None
}
}
pub fn add_to(&mut self, domain: domain::Index) {
assert_eq!(self.domain, None);
assert!(!self.is_dropped());
self.domain = Some(domain);
}
pub fn set_finalized_addr(&mut self, addr: IndexPair) {
self.index = Some(addr);
}
}
// is this or that?
impl Node {
pub fn is_dropped(&self) -> bool {
if let NodeType::Dropped = self.inner {
true
} else {
false
}
}
pub fn is_egress(&self) -> bool {
if let NodeType::Egress { .. } = self.inner {
true
} else {
false
}
}
pub fn is_reader(&self) -> bool {
if let NodeType::Reader { .. } = self.inner {
true
} else {
false
}
}
pub fn is_ingress(&self) -> bool {
if let NodeType::Ingress = self.inner {
true
} else {
false
}
}
pub fn is_sender(&self) -> bool {
match self.inner {
NodeType::Egress { .. } | NodeType::Sharder(..) => true,
_ => false,
}
}
pub fn is_internal(&self) -> bool {
if let NodeType::Internal(..) = self.inner {
true
} else {
false
}
}
pub fn is_source(&self) -> bool {
if let NodeType::Source { .. } = self.inner {
true
} else {
false
}
}
pub fn is_sharder(&self) -> bool {
if let NodeType::Sharder { .. } = self.inner {
true
} else {
false
}
}
pub fn is_base(&self) -> bool {
if let NodeType::Base(..) = self.inner {
true
} else {
false
}
}
pub fn is_union(&self) -> bool {
if | let NodeType::Internal(NodeOperator::Union(_)) = self.inner {
true
} else {
false
}
}
pub fn | identifier_body |
|
mod.rs | children: Vec::new(),
inner: inner.into(),
taken: false,
purge: false,
sharded_by: Sharding::None,
}
}
pub fn mirror<NT: Into<NodeType>>(&self, n: NT) -> Node {
Self::new(&*self.name, &self.fields, n)
}
pub fn named_mirror<NT: Into<NodeType>>(&self, n: NT, name: String) -> Node {
Self::new(name, &self.fields, n)
}
}
#[must_use]
pub struct DanglingDomainNode(Node);
impl DanglingDomainNode {
pub fn finalize(self, graph: &Graph) -> Node {
let mut n = self.0;
let ni = n.global_addr();
let dm = n.domain();
n.children = graph
.neighbors_directed(ni, petgraph::EdgeDirection::Outgoing)
.filter(|&c| graph[c].domain() == dm)
.map(|ni| graph[ni].local_addr())
.collect();
n.parents = graph
.neighbors_directed(ni, petgraph::EdgeDirection::Incoming)
.filter(|&c| !graph[c].is_source() && graph[c].domain() == dm)
.map(|ni| graph[ni].local_addr())
.collect();
n
}
}
// expternal parts of Ingredient
impl Node {
/// Called when a node is first connected to the graph.
///
/// All its ancestors are present, but this node and its children may not have been connected
/// yet.
pub fn on_connected(&mut self, graph: &Graph) {
Ingredient::on_connected(&mut **self, graph)
}
pub fn on_commit(&mut self, remap: &HashMap<NodeIndex, IndexPair>) {
// this is *only* overwritten for these asserts.
assert!(!self.taken);
if let NodeType::Internal(ref mut i) = self.inner {
i.on_commit(self.index.unwrap().as_global(), remap)
}
}
/// May return a set of nodes such that *one* of the given ancestors *must* be the one to be
/// replayed if this node's state is to be initialized.
pub fn must_replay_among(&self) -> Option<HashSet<NodeIndex>> {
Ingredient::must_replay_among(&**self)
}
/// Translate a column in this ingredient into the corresponding column(s) in
/// parent ingredients. None for the column means that the parent doesn't
/// have an associated column. Similar to resolve, but does not depend on
/// materialization, and returns results even for computed columns.
pub fn parent_columns(&self, column: usize) -> Vec<(NodeIndex, Option<usize>)> {
Ingredient::parent_columns(&**self, column)
}
/// Resolve where the given field originates from. If the view is materialized, or the value is
/// otherwise created by this view, None should be returned.
pub fn resolve(&self, i: usize) -> Option<Vec<(NodeIndex, usize)>> {
Ingredient::resolve(&**self, i)
}
/// Returns true if this operator requires a full materialization
pub fn requires_full_materialization(&self) -> bool {
Ingredient::requires_full_materialization(&**self)
}
pub fn can_query_through(&self) -> bool {
Ingredient::can_query_through(&**self)
}
pub fn is_join(&self) -> bool {
Ingredient::is_join(&**self)
}
pub fn ancestors(&self) -> Vec<NodeIndex> {
Ingredient::ancestors(&**self)
}
/// Produce a compact, human-readable description of this node for Graphviz.
///
/// If `detailed` is true, emit more info.
///
/// Symbol Description
/// --------|-------------
/// B | Base
/// || | Concat
/// ⧖ | Latest
/// γ | Group by
/// |*| | Count
/// 𝛴 | Sum
/// ⋈ | Join
/// ⋉ | Left join
/// ⋃ | Union
pub fn description(&self, detailed: bool) -> String {
Ingredient::description(&**self, detailed)
}
}
// publicly accessible attributes
impl Node {
pub fn name(&self) -> &str {
&*self.name
}
pub fn fields(&self) -> &[String] {
&self.fields[..]
}
pub fn sharded_by(&self) -> Sharding {
self.sharded_by
}
/// Set this node's sharding property.
pub fn shard_by(&mut self, s: Sharding) {
self.sharded_by = s;
}
}
// events
impl Node {
pub fn take(&mut self) -> DanglingDomainNode {
assert!(!self.taken);
assert!(
(!self.is_internal() && !self.is_base()) || self.domain.is_some(),
"tried to take unassigned node"
);
let inner = self.inner.take();
let mut n = self.mirror(inner);
n.index = self.index;
n.domain = self.domain;
n.purge = self.purge;
self.taken = true;
DanglingDomainNode(n)
}
pub fn remove(&mut self) {
self.inner = NodeType::Dropped;
}
}
// derefs
impl Node {
pub(crate) fn with_sharder_mut<F>(&mut self, f: F)
where
F: FnOnce(&mut special::Sharder),
{
match self.inner {
NodeType::Sharder(ref mut s) => f(s),
_ => unreachable!(),
}
}
pub fn with_sharder<'a, F, R>(&'a self, f: F) -> Option<R>
where
F: FnOnce(&'a special::Sharder) -> R,
R: 'a,
{
match self.inner {
NodeType::Sharder(ref s) => Some(f(s)),
_ => None,
}
}
pub(crate) fn with_egress_mut<F>(&mut self, f: F)
where
F: FnOnce(&mut special::Egress),
{
match self.inner {
NodeType::Egress(Some(ref mut e)) => f(e),
_ => unreachable!(),
}
}
pub fn with_reader_mut<'a, F, R>(&'a mut self, f: F) -> Result<R, ()>
where
F: FnOnce(&'a mut special::Reader) -> R,
R: 'a,
{
match self.inner {
NodeType::Reader(ref mut r) => Ok(f(r)),
_ => Err(()),
}
}
pub fn with_reader<'a, F, R>(&'a self, f: F) -> Result<R, ()>
where
F: FnOnce(&'a special::Reader) -> R,
R: 'a,
{
match self.inner {
NodeType::Reader(ref r) => Ok(f(r)),
_ => Err(()),
}
}
pub fn get_base(&self) -> Option<&special::Base> {
if let NodeType::Base(ref b) = self.inner {
Some(b)
} else {
None
}
}
pub fn suggest_indexes(&self, n: NodeIndex) -> HashMap<NodeIndex, Vec<usize>> {
match self.inner {
NodeType::Internal(ref i) => i.suggest_indexes(n),
NodeType::Base(ref b) => b.suggest_indexes(n),
_ => HashMap::new(),
}
}
}
impl Deref for Node {
type Target = ops::NodeOperator;
fn deref(&self) -> &Self::Target {
match self.inner {
NodeType::Internal(ref i) => i,
_ => unreachable!(),
}
}
}
impl DerefMut for Node {
fn deref_mut(&mut self) -> &mut Self::Target {
assert!(!self.taken);
match self.inner {
NodeType::Internal(ref mut i) => i,
_ => unreachable!(),
}
}
}
// neighbors
impl Node {
pub(crate) fn children(&self) -> &[LocalNodeIndex] {
&self.children
}
pub(crate) fn parents(&self) -> &[LocalNodeIndex] {
&self.parents
}
}
// attributes
impl Node {
pub(crate) fn beyond_mat_frontier(&self) -> bool {
self.purge
}
pub(crate) fn add_child(&mut self, child: LocalNodeIndex) {
self.children.push(child);
}
pub(crate) fn try_remove_child(&mut self, child: LocalNodeIndex) -> bool {
for i in 0..self.children.len() {
if self.children[i] == child {
self.children.swap_remove(i);
return true;
}
}
false
}
pub fn add_column(&mut self, field: &str) -> usize {
self.fields.push(field.to_string());
self.fields.len() - 1
| index: None,
domain: None,
fields: fields.into_iter().map(|s| s.to_string()).collect(),
parents: Vec::new(), | random_line_split |
|
gdb_stub.rs | attached to a process
self.send(b"1")
}
b"HostInfo" => {
const MACH_O_ARM: u32 = 12;
const MACH_O_ARM_V4T: u32 = 5;
self.send_fmt(format_args!("cputype:{};cpusubtype:{};ostype:none;vendor:none;endian:little;ptrsize:4;", MACH_O_ARM, MACH_O_ARM_V4T))
}
_ => {
if let Some(tail) = strip_prefix(msg, b"Supported:") {
self.process_qsupported_command(tail)
} else {
self.unrecognised_command()
}
}
}
}
fn process_qsupported_command(&mut self, msg: &[u8]) -> GResult {
let mut have_capabilities = Vec::new();
for requested_capability in msg.split(|&b| b == b';' || b == b',') {
match requested_capability {
b"swbreak+" | b"hwbreak+" => {
have_capabilities.push(requested_capability);
}
b"arm" => {
have_capabilities.push(b"arm+");
}
// TODO: Support "vContSupported+"?
_ => {}
}
}
let capability_string = have_capabilities.join(&b';');
self.send(&capability_string)
}
fn process_qwrite_command(&mut self, msg: &[u8]) -> GResult {
match msg {
b"StartNoAckMode" => {
self.no_ack_mode = true;
self.send(b"OK")
}
_ => {
self.unrecognised_command()
}
}
}
fn read_gprs(&mut self) -> GResult {
let mut reg_string = Vec::with_capacity(16 * 8);
for reg in self.gba.arm.regs[..REG_PC].iter() {
reg_string.write(&int_to_hex_le(*reg))?;
}
reg_string.write(&int_to_hex_le(self.gba.arm.current_pc()))?;
self.send(®_string)
}
fn write_gprs(&mut self, msg: &[u8]) -> GResult {
for (i, value) in msg.chunks_exact(8).map(hex_to_int_le).enumerate() {
self.gba.arm.set_reg(i, value?);
}
self.send(b"OK")
}
fn read_gpr(&mut self, msg: &[u8]) -> GResult {
let reg_index: usize = hex_to_int(msg)?;
let reg = if reg_index == 25 {
self.gba.arm.cpsr.into()
} else if reg_index == REG_PC {
self.gba.arm.current_pc()
} else if reg_index < 16 {
self.gba.arm.regs[reg_index]
} else {
return self.send(b"E00");
};
self.send(&int_to_hex_le(reg))
}
fn write_gpr(&mut self, msg: &[u8]) -> GResult {
let (reg_index_str, value_str) = split_at(msg, b'=')?;
let reg_index = hex_to_int(reg_index_str)?;
let value = hex_to_int_le(value_str)?;
self.gba.arm.set_reg(reg_index, value);
self.send(b"OK")
}
fn read_memory(&mut self, msg: &[u8]) -> GResult {
let (addr_str, len_str) = split_at(msg, b',')?;
let addr: u32 = hex_to_int(addr_str)?;
let len: u32 = hex_to_int(len_str)?;
let mut result = Vec::<u8>::with_capacity(2 * len as usize);
for i in addr..addr + len {
let (_, byte) = self.gba.debug_read8(i);
result.write_fmt(format_args!("{:02X}", byte))?;
}
self.send(&result)
}
fn write_memory(&mut self, msg: &[u8]) -> GResult {
let (addr_str, len_str) = split_at(msg, b',')?;
let (len_str, data_str) = split_at(len_str, b':')?;
let start_addr: u32 = hex_to_int(addr_str)?;
let len: u32 = hex_to_int(len_str)?;
let data = data_str
.chunks(2)
.map(hex_to_int)
.collect::<Result<Vec<u8>, failure::Error>>()?;
for (addr, byte) in (start_addr..start_addr+len).zip(data) {
self.gba.debug_write8(addr, byte);
}
self.send(b"OK")
}
fn process_z_command(&mut self, msg: &[u8], is_insert: bool) -> GResult {
let (type_str, addr_str) = split_at(msg, b',')?;
let (addr_str, kind_str) = split_at(addr_str, b',')?;
let kind: u32 = hex_to_int(kind_str)?;
let start_addr = hex_to_int(addr_str)?;
let addr_set: &mut OrderedSet<u32> = match type_str {
b"0" | b"1" if kind != 2 && kind != 4 => {
return self.unrecognised_command();
}
b"0" => { // software breakpoint
// TODO: Does it matter that I'm just implementing this like a hardware breakpoint?
&mut self.bus_snooper.breakpoints
}
b"1" => { // hardware breakpoint
&mut self.bus_snooper.breakpoints
}
b"2" => { // write watchpoint
&mut self.bus_snooper.write_watchpoints
}
b"3" => { // read watchpoint
&mut self.bus_snooper.read_watchpoints
}
b"4" => { // access watchpoint
&mut self.bus_snooper.access_watchpoints
}
_ => {
return self.unrecognised_command();
}
};
for addr in start_addr..start_addr+kind {
if is_insert {
addr_set.insert(addr);
} else {
addr_set.remove(addr);
}
}
self.send(b"OK")
}
fn do_continue(&mut self, msg: &[u8]) -> GResult {
if !msg.is_empty() {
let addr = hex_to_int_le(msg)?;
self.gba.arm.branch_to(addr);
}
self.run_state = RunState::Running;
Ok(())
}
fn do_step(&mut self, msg: &[u8]) -> GResult {
if !msg.is_empty() {
let addr = hex_to_int_le(msg)?;
self.gba.arm.branch_to(addr);
}
self.step_gba();
let stop_reason = self.bus_snooper.stop_reason.take()
.unwrap_or(StopReason::Step);
self.send(&stop_reason.to_command())
}
fn step_gba(&mut self) {
let pc = self.gba.arm.regs[REG_PC] - self.gba.arm.get_op_size();
if self.bus_snooper.breakpoints.contains(pc) {
self.bus_snooper.stop_reason = Some(StopReason::Breakpoint(pc));
} else {
self.gba.step(&mut self.framebuffer);
}
}
fn process_detach_command(&mut self) -> GResult {
self.send(b"OK")?;
// Just close the stream, we have no other bookkeeping to do for detaching.
self.stream = None;
Ok(())
}
fn send_fmt(&mut self, args: Arguments) -> GResult {
let mut bytes = Vec::<u8>::new();
bytes.write_fmt(args)?;
self.send(&bytes)
}
fn send(&mut self, message: &[u8]) -> GResult {
let mut response = Vec::new();
response.push(b'$');
response.extend_from_slice(message);
response.push(b'#');
let checksum = checksum(message);
write!(response, "{:02X}", checksum)?;
self.send_raw(&response)
}
fn ack(&mut self) -> GResult {
if self.no_ack_mode {
return Ok(());
}
self.send_raw(b"+")
}
fn nak(&mut self) -> GResult {
if self.no_ack_mode {
return Ok(());
}
self.send_raw(b"-")
}
fn unrecognised_command(&mut self) -> GResult {
// https://www-zeuthen.desy.de/unix/unixguide/infohtml/gdb/Overview.html
// The empty response "$#00" indicates to the GDB client that the command is not supported
self.send(&[])
}
fn write_fmt(&mut self, args: Arguments) -> GResult {
use std::io::Write;
let mut v = Vec::new();
v.write_fmt(args)?;
Ok(())
}
fn send_raw(&mut self, bytes: &[u8]) -> GResult {
if let Some(stream) = self.stream.as_mut() {
let amount = stream.write(bytes);
trace!(GDB, "wrote {:?} bytes of {} ({:?})", amount, bytes.len(), bytes_as_ascii(bytes));
amount?;
} else {
trace!(GDB, "tried to send {} bytes but stream was None", bytes.len());
}
Ok(())
}
} |
#[derive(Debug)]
enum StopReason {
ReadWatchpoint(u32), | random_line_split |
|
gdb_stub.rs | psr.into()
} else if reg_index == REG_PC {
self.gba.arm.current_pc()
} else if reg_index < 16 {
self.gba.arm.regs[reg_index]
} else {
return self.send(b"E00");
};
self.send(&int_to_hex_le(reg))
}
fn write_gpr(&mut self, msg: &[u8]) -> GResult {
let (reg_index_str, value_str) = split_at(msg, b'=')?;
let reg_index = hex_to_int(reg_index_str)?;
let value = hex_to_int_le(value_str)?;
self.gba.arm.set_reg(reg_index, value);
self.send(b"OK")
}
fn read_memory(&mut self, msg: &[u8]) -> GResult {
let (addr_str, len_str) = split_at(msg, b',')?;
let addr: u32 = hex_to_int(addr_str)?;
let len: u32 = hex_to_int(len_str)?;
let mut result = Vec::<u8>::with_capacity(2 * len as usize);
for i in addr..addr + len {
let (_, byte) = self.gba.debug_read8(i);
result.write_fmt(format_args!("{:02X}", byte))?;
}
self.send(&result)
}
fn write_memory(&mut self, msg: &[u8]) -> GResult {
let (addr_str, len_str) = split_at(msg, b',')?;
let (len_str, data_str) = split_at(len_str, b':')?;
let start_addr: u32 = hex_to_int(addr_str)?;
let len: u32 = hex_to_int(len_str)?;
let data = data_str
.chunks(2)
.map(hex_to_int)
.collect::<Result<Vec<u8>, failure::Error>>()?;
for (addr, byte) in (start_addr..start_addr+len).zip(data) {
self.gba.debug_write8(addr, byte);
}
self.send(b"OK")
}
fn process_z_command(&mut self, msg: &[u8], is_insert: bool) -> GResult {
let (type_str, addr_str) = split_at(msg, b',')?;
let (addr_str, kind_str) = split_at(addr_str, b',')?;
let kind: u32 = hex_to_int(kind_str)?;
let start_addr = hex_to_int(addr_str)?;
let addr_set: &mut OrderedSet<u32> = match type_str {
b"0" | b"1" if kind != 2 && kind != 4 => {
return self.unrecognised_command();
}
b"0" => { // software breakpoint
// TODO: Does it matter that I'm just implementing this like a hardware breakpoint?
&mut self.bus_snooper.breakpoints
}
b"1" => { // hardware breakpoint
&mut self.bus_snooper.breakpoints
}
b"2" => { // write watchpoint
&mut self.bus_snooper.write_watchpoints
}
b"3" => { // read watchpoint
&mut self.bus_snooper.read_watchpoints
}
b"4" => { // access watchpoint
&mut self.bus_snooper.access_watchpoints
}
_ => {
return self.unrecognised_command();
}
};
for addr in start_addr..start_addr+kind {
if is_insert {
addr_set.insert(addr);
} else {
addr_set.remove(addr);
}
}
self.send(b"OK")
}
fn do_continue(&mut self, msg: &[u8]) -> GResult {
if !msg.is_empty() {
let addr = hex_to_int_le(msg)?;
self.gba.arm.branch_to(addr);
}
self.run_state = RunState::Running;
Ok(())
}
fn do_step(&mut self, msg: &[u8]) -> GResult {
if !msg.is_empty() {
let addr = hex_to_int_le(msg)?;
self.gba.arm.branch_to(addr);
}
self.step_gba();
let stop_reason = self.bus_snooper.stop_reason.take()
.unwrap_or(StopReason::Step);
self.send(&stop_reason.to_command())
}
fn step_gba(&mut self) {
let pc = self.gba.arm.regs[REG_PC] - self.gba.arm.get_op_size();
if self.bus_snooper.breakpoints.contains(pc) {
self.bus_snooper.stop_reason = Some(StopReason::Breakpoint(pc));
} else {
self.gba.step(&mut self.framebuffer);
}
}
fn process_detach_command(&mut self) -> GResult {
self.send(b"OK")?;
// Just close the stream, we have no other bookkeeping to do for detaching.
self.stream = None;
Ok(())
}
fn send_fmt(&mut self, args: Arguments) -> GResult {
let mut bytes = Vec::<u8>::new();
bytes.write_fmt(args)?;
self.send(&bytes)
}
fn send(&mut self, message: &[u8]) -> GResult {
let mut response = Vec::new();
response.push(b'$');
response.extend_from_slice(message);
response.push(b'#');
let checksum = checksum(message);
write!(response, "{:02X}", checksum)?;
self.send_raw(&response)
}
fn ack(&mut self) -> GResult {
if self.no_ack_mode {
return Ok(());
}
self.send_raw(b"+")
}
fn nak(&mut self) -> GResult {
if self.no_ack_mode {
return Ok(());
}
self.send_raw(b"-")
}
fn unrecognised_command(&mut self) -> GResult {
// https://www-zeuthen.desy.de/unix/unixguide/infohtml/gdb/Overview.html
// The empty response "$#00" indicates to the GDB client that the command is not supported
self.send(&[])
}
fn write_fmt(&mut self, args: Arguments) -> GResult {
use std::io::Write;
let mut v = Vec::new();
v.write_fmt(args)?;
Ok(())
}
fn send_raw(&mut self, bytes: &[u8]) -> GResult {
if let Some(stream) = self.stream.as_mut() {
let amount = stream.write(bytes);
trace!(GDB, "wrote {:?} bytes of {} ({:?})", amount, bytes.len(), bytes_as_ascii(bytes));
amount?;
} else {
trace!(GDB, "tried to send {} bytes but stream was None", bytes.len());
}
Ok(())
}
}
#[derive(Debug)]
enum StopReason {
ReadWatchpoint(u32),
WriteWatchpoint(u32),
AccessWatchpoint(u32),
Breakpoint(u32),
Step,
}
impl StopReason {
fn to_command(&self) -> Vec<u8> {
let mut result = Vec::new();
match self {
StopReason::ReadWatchpoint(addr) => write!(result, "T{:02}rwatch:{}", SIGTRAP, std::str::from_utf8(&int_to_hex_le(*addr)).unwrap()),
StopReason::WriteWatchpoint(addr) => write!(result, "T{:02}watch:{}", SIGTRAP, std::str::from_utf8(&int_to_hex_le(*addr)).unwrap()),
StopReason::AccessWatchpoint(addr) => write!(result, "T{:02}awatch:{}", SIGTRAP, std::str::from_utf8(&int_to_hex_le(*addr)).unwrap()),
StopReason::Breakpoint(_) => write!(result, "T{:02}hwbreak:", SIGTRAP),
StopReason::Step => write!(result, "S{:02}", SIGTRAP),
}.unwrap();
result
}
}
pub struct BusDebugSnooper {
delegate: BusPtr,
breakpoints: OrderedSet<u32>,
read_watchpoints: OrderedSet<u32>,
write_watchpoints: OrderedSet<u32>,
access_watchpoints: OrderedSet<u32>,
stop_reason: Option<StopReason>,
}
impl BusDebugSnooper {
pub fn wrap(delegate: BusPtr) -> Box<BusDebugSnooper> {
Box::new(BusDebugSnooper {
delegate,
breakpoints: OrderedSet::new(),
read_watchpoints: OrderedSet::new(),
write_watchpoints: OrderedSet::new(),
access_watchpoints: OrderedSet::new(),
stop_reason: None,
})
}
fn check_read(&mut self, addr: u32) {
if self.read_watchpoints.contains(addr) {
self.stop_reason = Some(StopReason::ReadWatchpoint(addr));
} else if self.access_watchpoints.contains(addr) {
self.stop_reason = Some(StopReason::AccessWatchpoint(addr));
}
}
fn check_write(&mut self, addr: u32) {
if self.write_watchpoints.contains(addr) {
self.stop_reason = Some(StopReason::WriteWatchpoint(addr));
}
}
}
impl Bus for BusDebugSnooper {
fn read8(&mut self, addr: u32) -> u8 {
self.check_read(addr);
self.delegate.read8(addr)
}
fn | read16 | identifier_name |
|
gdb_stub.rs | 6 {
self.gba.arm.regs[reg_index]
} else {
return self.send(b"E00");
};
self.send(&int_to_hex_le(reg))
}
fn write_gpr(&mut self, msg: &[u8]) -> GResult {
let (reg_index_str, value_str) = split_at(msg, b'=')?;
let reg_index = hex_to_int(reg_index_str)?;
let value = hex_to_int_le(value_str)?;
self.gba.arm.set_reg(reg_index, value);
self.send(b"OK")
}
fn read_memory(&mut self, msg: &[u8]) -> GResult {
let (addr_str, len_str) = split_at(msg, b',')?;
let addr: u32 = hex_to_int(addr_str)?;
let len: u32 = hex_to_int(len_str)?;
let mut result = Vec::<u8>::with_capacity(2 * len as usize);
for i in addr..addr + len {
let (_, byte) = self.gba.debug_read8(i);
result.write_fmt(format_args!("{:02X}", byte))?;
}
self.send(&result)
}
fn write_memory(&mut self, msg: &[u8]) -> GResult {
let (addr_str, len_str) = split_at(msg, b',')?;
let (len_str, data_str) = split_at(len_str, b':')?;
let start_addr: u32 = hex_to_int(addr_str)?;
let len: u32 = hex_to_int(len_str)?;
let data = data_str
.chunks(2)
.map(hex_to_int)
.collect::<Result<Vec<u8>, failure::Error>>()?;
for (addr, byte) in (start_addr..start_addr+len).zip(data) {
self.gba.debug_write8(addr, byte);
}
self.send(b"OK")
}
fn process_z_command(&mut self, msg: &[u8], is_insert: bool) -> GResult {
let (type_str, addr_str) = split_at(msg, b',')?;
let (addr_str, kind_str) = split_at(addr_str, b',')?;
let kind: u32 = hex_to_int(kind_str)?;
let start_addr = hex_to_int(addr_str)?;
let addr_set: &mut OrderedSet<u32> = match type_str {
b"0" | b"1" if kind != 2 && kind != 4 => {
return self.unrecognised_command();
}
b"0" => { // software breakpoint
// TODO: Does it matter that I'm just implementing this like a hardware breakpoint?
&mut self.bus_snooper.breakpoints
}
b"1" => { // hardware breakpoint
&mut self.bus_snooper.breakpoints
}
b"2" => { // write watchpoint
&mut self.bus_snooper.write_watchpoints
}
b"3" => { // read watchpoint
&mut self.bus_snooper.read_watchpoints
}
b"4" => { // access watchpoint
&mut self.bus_snooper.access_watchpoints
}
_ => {
return self.unrecognised_command();
}
};
for addr in start_addr..start_addr+kind {
if is_insert {
addr_set.insert(addr);
} else {
addr_set.remove(addr);
}
}
self.send(b"OK")
}
fn do_continue(&mut self, msg: &[u8]) -> GResult {
if !msg.is_empty() {
let addr = hex_to_int_le(msg)?;
self.gba.arm.branch_to(addr);
}
self.run_state = RunState::Running;
Ok(())
}
fn do_step(&mut self, msg: &[u8]) -> GResult {
if !msg.is_empty() {
let addr = hex_to_int_le(msg)?;
self.gba.arm.branch_to(addr);
}
self.step_gba();
let stop_reason = self.bus_snooper.stop_reason.take()
.unwrap_or(StopReason::Step);
self.send(&stop_reason.to_command())
}
fn step_gba(&mut self) {
let pc = self.gba.arm.regs[REG_PC] - self.gba.arm.get_op_size();
if self.bus_snooper.breakpoints.contains(pc) {
self.bus_snooper.stop_reason = Some(StopReason::Breakpoint(pc));
} else {
self.gba.step(&mut self.framebuffer);
}
}
fn process_detach_command(&mut self) -> GResult {
self.send(b"OK")?;
// Just close the stream, we have no other bookkeeping to do for detaching.
self.stream = None;
Ok(())
}
fn send_fmt(&mut self, args: Arguments) -> GResult {
let mut bytes = Vec::<u8>::new();
bytes.write_fmt(args)?;
self.send(&bytes)
}
fn send(&mut self, message: &[u8]) -> GResult {
let mut response = Vec::new();
response.push(b'$');
response.extend_from_slice(message);
response.push(b'#');
let checksum = checksum(message);
write!(response, "{:02X}", checksum)?;
self.send_raw(&response)
}
fn ack(&mut self) -> GResult {
if self.no_ack_mode {
return Ok(());
}
self.send_raw(b"+")
}
fn nak(&mut self) -> GResult {
if self.no_ack_mode {
return Ok(());
}
self.send_raw(b"-")
}
fn unrecognised_command(&mut self) -> GResult {
// https://www-zeuthen.desy.de/unix/unixguide/infohtml/gdb/Overview.html
// The empty response "$#00" indicates to the GDB client that the command is not supported
self.send(&[])
}
fn write_fmt(&mut self, args: Arguments) -> GResult {
use std::io::Write;
let mut v = Vec::new();
v.write_fmt(args)?;
Ok(())
}
fn send_raw(&mut self, bytes: &[u8]) -> GResult {
if let Some(stream) = self.stream.as_mut() {
let amount = stream.write(bytes);
trace!(GDB, "wrote {:?} bytes of {} ({:?})", amount, bytes.len(), bytes_as_ascii(bytes));
amount?;
} else {
trace!(GDB, "tried to send {} bytes but stream was None", bytes.len());
}
Ok(())
}
}
#[derive(Debug)]
enum StopReason {
ReadWatchpoint(u32),
WriteWatchpoint(u32),
AccessWatchpoint(u32),
Breakpoint(u32),
Step,
}
impl StopReason {
fn to_command(&self) -> Vec<u8> {
let mut result = Vec::new();
match self {
StopReason::ReadWatchpoint(addr) => write!(result, "T{:02}rwatch:{}", SIGTRAP, std::str::from_utf8(&int_to_hex_le(*addr)).unwrap()),
StopReason::WriteWatchpoint(addr) => write!(result, "T{:02}watch:{}", SIGTRAP, std::str::from_utf8(&int_to_hex_le(*addr)).unwrap()),
StopReason::AccessWatchpoint(addr) => write!(result, "T{:02}awatch:{}", SIGTRAP, std::str::from_utf8(&int_to_hex_le(*addr)).unwrap()),
StopReason::Breakpoint(_) => write!(result, "T{:02}hwbreak:", SIGTRAP),
StopReason::Step => write!(result, "S{:02}", SIGTRAP),
}.unwrap();
result
}
}
pub struct BusDebugSnooper {
delegate: BusPtr,
breakpoints: OrderedSet<u32>,
read_watchpoints: OrderedSet<u32>,
write_watchpoints: OrderedSet<u32>,
access_watchpoints: OrderedSet<u32>,
stop_reason: Option<StopReason>,
}
impl BusDebugSnooper {
pub fn wrap(delegate: BusPtr) -> Box<BusDebugSnooper> {
Box::new(BusDebugSnooper {
delegate,
breakpoints: OrderedSet::new(),
read_watchpoints: OrderedSet::new(),
write_watchpoints: OrderedSet::new(),
access_watchpoints: OrderedSet::new(),
stop_reason: None,
})
}
fn check_read(&mut self, addr: u32) {
if self.read_watchpoints.contains(addr) {
self.stop_reason = Some(StopReason::ReadWatchpoint(addr));
} else if self.access_watchpoints.contains(addr) {
self.stop_reason = Some(StopReason::AccessWatchpoint(addr));
}
}
fn check_write(&mut self, addr: u32) {
if self.write_watchpoints.contains(addr) {
self.stop_reason = Some(StopReason::WriteWatchpoint(addr));
}
}
}
impl Bus for BusDebugSnooper {
fn read8(&mut self, addr: u32) -> u8 {
self.check_read(addr);
self.delegate.read8(addr)
}
fn read16(&mut self, addr: u32) -> u16 | {
self.check_read(addr);
self.delegate.read16(addr)
} | identifier_body |
|
install.js | cp.execSync('npm install', {
cwd: __dirname,
});
console.log('[info] Please enter the command again. Sorry!');
process.exit(15);
}
const chalk = require('chalk');
const redis = require('ioredis');
const rl = require('prompt-sync')()
const log = console.log;
const info = (...args) => {
log(chalk.white('[info]'), ...args)
}
const warn = (...args) => {
log(chalk.yellow('[warning]'), ...args)
}
const err = (...args) => {
log(chalk.red('[err]'), ...args)
}
/*
if (process.platform !== 'linux') {
log(chalk.red('[ERROR]'), 'Your current platform', chalk.yellow(process.platform), 'is not supported by this install script. Sorry!');
// process.exit(1);
}
*/
// First, check if cookies file exists
if (!fs.existsSync(path.join(__dirname, '../cookies.txt'))) {
err('Please create a file called "cookies.txt" and paste in a list of .ROBLOSECURITY cookies, seperated by new lines.');
process.exit(1);
}
let wwwDir = path.join(__dirname, '../www/');
let confDir = path.join(wwwDir, './config.json');
const setupConfig = () => {
info('Creating random keys...');
/**
* @type {*}
*/
let conf = {
cookiesKey: crypto.randomBytes(64).toString('base64').replace(/"/g, '\"'),
csrfKey: crypto.randomBytes(64).toString('base64').replace(/"/g, '\"'),
};
// check for default conf
let add = '127.0.0.1';
let prefix = '2k12Roblox1';
let port = 6379;
let pass = '';
const testRedis = () => {
return new Promise((res, rej) => {
info('Trying to connect to redis...');
let attempt = new redis(port, add, {
keyPrefix: prefix,
password: pass,
enableOfflineQueue: false,
});
attempt.on('ready', e => {
attempt.setex('testing1234', 1, 'world', (rErr, ok) => {
if (rErr) {
// Ask for pass
err(rErr)
pass = rl('It looks like your redis server requires a password. Please enter it and press enter.\n');
if (!pass) {
err('Exiting due to no pass.');
process.exit(1);
}
attempt.disconnect();
testRedis().then(ok => {
res();
}).catch(e => {
err(e);
rej(e);
})
} else {
// Ok
conf.redis = {
host: add,
port,
keyPrefix: prefix,
enableOfflineQueue: true,
password: pass,
}
res()
}
});
});
attempt.on('error', (e) => {
// Install
try {
attempt.disconnect();
} catch (e) {
}
let isInstalled = rl('Do you have redis installed? [y/n]').toLowerCase();
if (isInstalled !== 'y') {
if (process.platform === 'win32') {
err('Please install redis. Although you can download it from various websites, it is much easier to use Windows Subsystem for Linux.\n\nMore info here: https://docs.microsoft.com/en-us/windows/wsl/install-win10\n')
} else {
err('Please install redis. If you use ubuntu, this is as simple as:\n\n' + chalk.bold('sudo apt-get update && sudo apt-get install redis-server\n'));
}
process.exit(1);
} else {
let newAdd = rl('Please specify the address/IP/Hostname (excluding the port). Currently: "' + add + '": ');
if (!newAdd) {
info('Address is not being updated.');
} else {
add = newAdd;
}
let newPort = parseInt(rl('Please specify the port of the redis server. Currently: "' + port + '": ').toLowerCase(), 10);
if (!Number.isInteger(newPort)) {
info('Port is not being updated.');
} else {
port = newPort;
}
let newPass = rl('If your redis server has a password/Auth, please enter it below. Currently: "' + pass + '": ');
if (!newPass && !pass) {
info('Password is not being updated.');
} else {
pass = newPass;
}
testRedis().then(() => {
res();
}).catch(err => {
rej(err);
})
}
})
})
}
testRedis().then(ok => {
log('Redis config OK. Continuing...');
let doRender = rl('Do you want to enable R6 avatar rendering? [y/n]').toLowerCase();
if (doRender === 'y') {
// Enable render setup
conf.avatarRender = {
enabled: true,
}
let r = conf.avatarRender;
let auth = crypto.randomBytes(32).toString('base64');
let port = 8196;
let add = 'http://127.0.0.1';
let customAdd = rl('Do you have a custom render server address? Currently: ' + add + ':' + port.toString() + ': ');
if (!customAdd) {
info('No custom server is being used - it will be self hosted.');
} else {
let newAdd = customAdd.slice(0, customAdd.indexOf(':'));
if (newAdd) {
add = newAdd;
}
let newPort = parseInt(customAdd.slice(customAdd.indexOf(':')), 10);
if (Number.isInteger(newPort) && newPort <= 99999) {
port = newPort;
}
}
r.address = add + ':' + port;
r.authorization = auth;
let rulesToAdd = rl('Please type out any rules, seperated by a comma.\nSee here for more info: https://github.com/Pokemonjpups/2012-roblox/tree/master/docs/avatar-render/rules.md').split(',');
r.rules = rulesToAdd;
info('Writing config to disk...')
fs.writeFileSync(confDir, JSON.stringify(conf));
info('Write OK. Writing avatar render config to disk...');
let renderPath = path.join(__dirname, '../avatar-render-node/config.json');
if (fs.existsSync(renderPath)) {
warn('config.json file for avatar service already exists.');
let isOk = rl('Can the config.json file in avatar-render-node folder be deleted [y/n]?\n');
if (isOk.toLowerCase() === 'y') {
fs.unlinkSync(renderPath);
info('avatar-render-node/config.json file was deleted.');
fs.writeFileSync(renderPath, JSON.stringify({
port: port,
authorization: auth,
}));
info('Avatar render config file created.');
installModules();
} else {
err('Exiting due to config conflict with avatar-render-node.');
process.exit(1);
}
} else {
fs.writeFileSync(renderPath, JSON.stringify({
port: port,
authorization: auth,
}));
info('Avatar render config file created.');
installModules();
}
} else {
// Dont enable render setup
info('Not setting up 3d avatar render service.');
info('Writing config to disk...')
fs.writeFileSync(confDir, JSON.stringify(conf));
installModules();
}
});
const installModules = () => {
let renderPath = path.join(__dirname, '../avatar-render-node/');
let wwwPath = path.join(__dirname, '../www/');
let ss = 0;
const startBuild = () => {
info('Complete.\n\nTo run the program, enter this command:\n' + chalk.bold(`node ./utils/start.js`));
process.exit(0);
}
let join = '; ';
if (process.platform === 'win32') {
join = '&& ';
}
cp.exec(`npm i ${join} npm run build`, {
cwd: renderPath,
}, (e, out, stdErr) => {
if (e) {
err(e);
} else {
if (stdErr) {
err(stdErr);
} else {
info('Node modules for render service installed.');
ss++;
if (ss >= 2) {
startBuild();
}
}
}
});
cp.exec(`npm i ${join} npm run build`, {
cwd: wwwPath,
}, (e, out, stdErr) => {
if (e) {
err(e);
} else {
if (stdErr) {
err(stdErr);
} else {
info('Node modules for www service installed.');
ss++;
if (ss >= 2) {
startBuild();
}
}
}
});
}
// Quick check if exists
// Check if redis port is ok. If works, and no auth, then just go with default (and maybe give warning about pass). If can't connect, ask for redis info, try to connect, then continue. Also can try installing it with child process (sudo apt-get install redis)
}
if (fs.existsSync(confDir)) { | JSON.parse(file);
} catch (err) {
| // Confirm it's valid
let file = fs.readFileSync(confDir).toString();
try { | random_line_split |
install.js | cp.execSync('npm install', {
cwd: __dirname,
});
console.log('[info] Please enter the command again. Sorry!');
process.exit(15);
}
const chalk = require('chalk');
const redis = require('ioredis');
const rl = require('prompt-sync')()
const log = console.log;
const info = (...args) => {
log(chalk.white('[info]'), ...args)
}
const warn = (...args) => {
log(chalk.yellow('[warning]'), ...args)
}
const err = (...args) => {
log(chalk.red('[err]'), ...args)
}
/*
if (process.platform !== 'linux') {
log(chalk.red('[ERROR]'), 'Your current platform', chalk.yellow(process.platform), 'is not supported by this install script. Sorry!');
// process.exit(1);
}
*/
// First, check if cookies file exists
if (!fs.existsSync(path.join(__dirname, '../cookies.txt'))) {
err('Please create a file called "cookies.txt" and paste in a list of .ROBLOSECURITY cookies, seperated by new lines.');
process.exit(1);
}
let wwwDir = path.join(__dirname, '../www/');
let confDir = path.join(wwwDir, './config.json');
const setupConfig = () => {
info('Creating random keys...');
/**
* @type {*}
*/
let conf = {
cookiesKey: crypto.randomBytes(64).toString('base64').replace(/"/g, '\"'),
csrfKey: crypto.randomBytes(64).toString('base64').replace(/"/g, '\"'),
};
// check for default conf
let add = '127.0.0.1';
let prefix = '2k12Roblox1';
let port = 6379;
let pass = '';
const testRedis = () => {
return new Promise((res, rej) => {
info('Trying to connect to redis...');
let attempt = new redis(port, add, {
keyPrefix: prefix,
password: pass,
enableOfflineQueue: false,
});
attempt.on('ready', e => {
attempt.setex('testing1234', 1, 'world', (rErr, ok) => {
if (rErr) | else {
// Ok
conf.redis = {
host: add,
port,
keyPrefix: prefix,
enableOfflineQueue: true,
password: pass,
}
res()
}
});
});
attempt.on('error', (e) => {
// Install
try {
attempt.disconnect();
} catch (e) {
}
let isInstalled = rl('Do you have redis installed? [y/n]').toLowerCase();
if (isInstalled !== 'y') {
if (process.platform === 'win32') {
err('Please install redis. Although you can download it from various websites, it is much easier to use Windows Subsystem for Linux.\n\nMore info here: https://docs.microsoft.com/en-us/windows/wsl/install-win10\n')
} else {
err('Please install redis. If you use ubuntu, this is as simple as:\n\n' + chalk.bold('sudo apt-get update && sudo apt-get install redis-server\n'));
}
process.exit(1);
} else {
let newAdd = rl('Please specify the address/IP/Hostname (excluding the port). Currently: "' + add + '": ');
if (!newAdd) {
info('Address is not being updated.');
} else {
add = newAdd;
}
let newPort = parseInt(rl('Please specify the port of the redis server. Currently: "' + port + '": ').toLowerCase(), 10);
if (!Number.isInteger(newPort)) {
info('Port is not being updated.');
} else {
port = newPort;
}
let newPass = rl('If your redis server has a password/Auth, please enter it below. Currently: "' + pass + '": ');
if (!newPass && !pass) {
info('Password is not being updated.');
} else {
pass = newPass;
}
testRedis().then(() => {
res();
}).catch(err => {
rej(err);
})
}
})
})
}
testRedis().then(ok => {
log('Redis config OK. Continuing...');
let doRender = rl('Do you want to enable R6 avatar rendering? [y/n]').toLowerCase();
if (doRender === 'y') {
// Enable render setup
conf.avatarRender = {
enabled: true,
}
let r = conf.avatarRender;
let auth = crypto.randomBytes(32).toString('base64');
let port = 8196;
let add = 'http://127.0.0.1';
let customAdd = rl('Do you have a custom render server address? Currently: ' + add + ':' + port.toString() + ': ');
if (!customAdd) {
info('No custom server is being used - it will be self hosted.');
} else {
let newAdd = customAdd.slice(0, customAdd.indexOf(':'));
if (newAdd) {
add = newAdd;
}
let newPort = parseInt(customAdd.slice(customAdd.indexOf(':')), 10);
if (Number.isInteger(newPort) && newPort <= 99999) {
port = newPort;
}
}
r.address = add + ':' + port;
r.authorization = auth;
let rulesToAdd = rl('Please type out any rules, seperated by a comma.\nSee here for more info: https://github.com/Pokemonjpups/2012-roblox/tree/master/docs/avatar-render/rules.md').split(',');
r.rules = rulesToAdd;
info('Writing config to disk...')
fs.writeFileSync(confDir, JSON.stringify(conf));
info('Write OK. Writing avatar render config to disk...');
let renderPath = path.join(__dirname, '../avatar-render-node/config.json');
if (fs.existsSync(renderPath)) {
warn('config.json file for avatar service already exists.');
let isOk = rl('Can the config.json file in avatar-render-node folder be deleted [y/n]?\n');
if (isOk.toLowerCase() === 'y') {
fs.unlinkSync(renderPath);
info('avatar-render-node/config.json file was deleted.');
fs.writeFileSync(renderPath, JSON.stringify({
port: port,
authorization: auth,
}));
info('Avatar render config file created.');
installModules();
} else {
err('Exiting due to config conflict with avatar-render-node.');
process.exit(1);
}
} else {
fs.writeFileSync(renderPath, JSON.stringify({
port: port,
authorization: auth,
}));
info('Avatar render config file created.');
installModules();
}
} else {
// Dont enable render setup
info('Not setting up 3d avatar render service.');
info('Writing config to disk...')
fs.writeFileSync(confDir, JSON.stringify(conf));
installModules();
}
});
const installModules = () => {
let renderPath = path.join(__dirname, '../avatar-render-node/');
let wwwPath = path.join(__dirname, '../www/');
let ss = 0;
const startBuild = () => {
info('Complete.\n\nTo run the program, enter this command:\n' + chalk.bold(`node ./utils/start.js`));
process.exit(0);
}
let join = '; ';
if (process.platform === 'win32') {
join = '&& ';
}
cp.exec(`npm i ${join} npm run build`, {
cwd: renderPath,
}, (e, out, stdErr) => {
if (e) {
err(e);
} else {
if (stdErr) {
err(stdErr);
} else {
info('Node modules for render service installed.');
ss++;
if (ss >= 2) {
startBuild();
}
}
}
});
cp.exec(`npm i ${join} npm run build`, {
cwd: wwwPath,
}, (e, out, stdErr) => {
if (e) {
err(e);
} else {
if (stdErr) {
err(stdErr);
} else {
info('Node modules for www service installed.');
ss++;
if (ss >= 2) {
startBuild();
}
}
}
});
}
// Quick check if exists
// Check if redis port is ok. If works, and no auth, then just go with default (and maybe give warning about pass). If can't connect, ask for redis info, try to connect, then continue. Also can try installing it with child process (sudo apt-get install redis)
}
if (fs.existsSync(confDir)) {
// Confirm it's valid
let file = fs.readFileSync(confDir).toString();
try {
JSON.parse(file);
} catch (err) {
| {
// Ask for pass
err(rErr)
pass = rl('It looks like your redis server requires a password. Please enter it and press enter.\n');
if (!pass) {
err('Exiting due to no pass.');
process.exit(1);
}
attempt.disconnect();
testRedis().then(ok => {
res();
}).catch(e => {
err(e);
rej(e);
})
} | conditional_block |
declare.rs | _decl_impl!();
method_decl_impl!(A);
method_decl_impl!(A, B);
method_decl_impl!(A, B, C);
method_decl_impl!(A, B, C, D);
method_decl_impl!(A, B, C, D, E);
method_decl_impl!(A, B, C, D, E, F);
method_decl_impl!(A, B, C, D, E, F, G);
method_decl_impl!(A, B, C, D, E, F, G, H);
method_decl_impl!(A, B, C, D, E, F, G, H, I);
method_decl_impl!(A, B, C, D, E, F, G, H, I, J);
method_decl_impl!(A, B, C, D, E, F, G, H, I, J, K);
method_decl_impl!(A, B, C, D, E, F, G, H, I, J, K, L);
fn count_args(sel: Sel) -> usize {
sel.name().chars().filter(|&c| c == ':').count()
}
fn method_type_encoding(ret: &Encoding, args: &[Encoding]) -> CString {
let mut types = ret.as_str().to_owned();
// First two arguments are always self and the selector
types.push_str(<*mut Object>::encode().as_str());
types.push_str(Sel::encode().as_str());
types.extend(args.iter().map(|e| e.as_str()));
CString::new(types).unwrap()
}
fn log2_align_of<T>() -> u8 {
let align = mem::align_of::<T>();
// Alignments are required to be powers of 2
debug_assert!(align.count_ones() == 1);
// log2 of a power of 2 is the number of trailing zeros
align.trailing_zeros() as u8
}
/// A type for declaring a new class and adding new methods and ivars to it
/// before registering it.
pub struct ClassDecl {
cls: *mut Class,
}
impl ClassDecl {
fn with_superclass(name: &str, superclass: Option<&Class>)
-> Option<ClassDecl> {
let name = CString::new(name).unwrap();
let super_ptr = superclass.map_or(ptr::null(), |c| c);
let cls = unsafe {
runtime::objc_allocateClassPair(super_ptr, name.as_ptr(), 0)
};
if cls.is_null() {
None
} else {
Some(ClassDecl { cls })
}
}
/// Constructs a `ClassDecl` with the given name and superclass.
/// Returns `None` if the class couldn't be allocated.
pub fn new(name: &str, superclass: &Class) -> Option<ClassDecl> {
ClassDecl::with_superclass(name, Some(superclass))
}
/**
Constructs a `ClassDecl` declaring a new root class with the given name.
Returns `None` if the class couldn't be allocated.
An implementation for `+initialize` must also be given; the runtime calls
this method for all classes, so it must be defined on root classes.
Note that implementing a root class is not a simple endeavor.
For example, your class probably cannot be passed to Cocoa code unless
the entire `NSObject` protocol is implemented.
Functionality it expects, like implementations of `-retain` and `-release`
used by ARC, will not be present otherwise.
*/
pub fn root(name: &str, intitialize_fn: extern fn(&Class, Sel))
-> Option<ClassDecl> {
let mut decl = ClassDecl::with_superclass(name, None);
if let Some(ref mut decl) = decl {
unsafe {
decl.add_class_method(sel!(initialize), intitialize_fn);
}
}
decl
}
/// Adds a method with the given name and implementation to self.
/// Panics if the method wasn't sucessfully added
/// or if the selector and function take different numbers of arguments.
/// Unsafe because the caller must ensure that the types match those that
/// are expected when the method is invoked from Objective-C.
pub unsafe fn add_method<F>(&mut self, sel: Sel, func: F)
where F: MethodImplementation<Callee=Object> {
let encs = F::Args::encodings();
let encs = encs.as_ref();
let sel_args = count_args(sel);
assert!(sel_args == encs.len(),
"Selector accepts {} arguments, but function accepts {}",
sel_args, encs.len(),
);
let types = method_type_encoding(&F::Ret::encode(), encs);
let success = runtime::class_addMethod(self.cls, sel, func.imp(),
types.as_ptr());
assert!(success != NO, "Failed to add method {:?}", sel);
}
/// Adds a class method with the given name and implementation to self.
/// Panics if the method wasn't sucessfully added
/// or if the selector and function take different numbers of arguments.
/// Unsafe because the caller must ensure that the types match those that
/// are expected when the method is invoked from Objective-C.
pub unsafe fn add_class_method<F>(&mut self, sel: Sel, func: F)
where F: MethodImplementation<Callee=Class> {
let encs = F::Args::encodings();
let encs = encs.as_ref();
let sel_args = count_args(sel);
assert!(sel_args == encs.len(),
"Selector accepts {} arguments, but function accepts {}",
sel_args, encs.len(),
);
let types = method_type_encoding(&F::Ret::encode(), encs);
let metaclass = (*self.cls).metaclass() as *const _ as *mut _;
let success = runtime::class_addMethod(metaclass, sel, func.imp(),
types.as_ptr());
assert!(success != NO, "Failed to add class method {:?}", sel);
}
/// Adds an ivar with type `T` and the provided name to self.
/// Panics if the ivar wasn't successfully added.
pub fn add_ivar<T>(&mut self, name: &str) where T: Encode {
let c_name = CString::new(name).unwrap();
let encoding = CString::new(T::encode().as_str()).unwrap();
let size = mem::size_of::<T>();
let align = log2_align_of::<T>();
let success = unsafe {
runtime::class_addIvar(self.cls, c_name.as_ptr(), size, align,
encoding.as_ptr())
};
assert!(success != NO, "Failed to add ivar {}", name);
}
/// Adds a protocol to self. Panics if the protocol wasn't successfully
/// added
pub fn add_protocol(&mut self, proto: &Protocol) {
let success = unsafe { runtime::class_addProtocol(self.cls, proto) };
assert!(success != NO, "Failed to add protocol {:?}", proto);
}
/// Registers self, consuming it and returning a reference to the
/// newly registered `Class`.
pub fn register(self) -> &'static Class {
unsafe {
let cls = self.cls;
runtime::objc_registerClassPair(cls);
// Forget self otherwise the class will be disposed in drop
mem::forget(self);
&*cls
}
}
}
impl Drop for ClassDecl {
fn drop(&mut self) {
unsafe {
runtime::objc_disposeClassPair(self.cls);
}
}
}
/// A type for declaring a new protocol and adding new methods to it
/// before registering it.
pub struct ProtocolDecl {
proto: *mut Protocol
}
impl ProtocolDecl {
/// Constructs a `ProtocolDecl` with the given name. Returns `None` if the
/// protocol couldn't be allocated.
pub fn new(name: &str) -> Option<ProtocolDecl> {
let c_name = CString::new(name).unwrap();
let proto = unsafe {
runtime::objc_allocateProtocol(c_name.as_ptr())
};
if proto.is_null() {
None
} else {
Some(ProtocolDecl { proto })
}
}
fn add_method_description_common<Args, Ret>(&mut self, sel: Sel, is_required: bool,
is_instance_method: bool)
where Args: EncodeArguments,
Ret: Encode {
let encs = Args::encodings();
let encs = encs.as_ref();
let sel_args = count_args(sel);
assert!(sel_args == encs.len(),
"Selector accepts {} arguments, but function accepts {}",
sel_args, encs.len(),
);
let types = method_type_encoding(&Ret::encode(), encs);
unsafe {
runtime::protocol_addMethodDescription(
self.proto, sel, types.as_ptr(), is_required as BOOL, is_instance_method as BOOL);
}
}
/// Adds an instance method declaration with a given description to self.
pub fn add_method_description<Args, Ret>(&mut self, sel: Sel, is_required: bool)
where Args: EncodeArguments,
Ret: Encode {
self.add_method_description_common::<Args, Ret>(sel, is_required, true)
}
/// Adds a class method declaration with a given description to self.
pub fn add_class_method_description<Args, Ret>(&mut self, sel: Sel, is_required: bool)
where Args: EncodeArguments,
Ret: Encode | {
self.add_method_description_common::<Args, Ret>(sel, is_required, false)
} | identifier_body |
|
declare.rs | }
```
*/
use std::ffi::CString;
use std::mem;
use std::ptr;
use runtime::{BOOL, Class, Imp, NO, Object, Protocol, Sel, self};
use {Encode, EncodeArguments, Encoding, Message};
/// Types that can be used as the implementation of an Objective-C method.
pub trait MethodImplementation {
/// The callee type of the method.
type Callee: Message;
/// The return type of the method.
type Ret: Encode;
/// The argument types of the method.
type Args: EncodeArguments;
/// Returns self as an `Imp` of a method.
fn imp(self) -> Imp;
}
macro_rules! method_decl_impl {
(-$s:ident, $r:ident, $f:ty, $($t:ident),*) => (
impl<$s, $r $(, $t)*> MethodImplementation for $f
where $s: Message, $r: Encode $(, $t: Encode)* {
type Callee = $s;
type Ret = $r;
type Args = ($($t,)*);
fn imp(self) -> Imp {
unsafe { mem::transmute(self) }
}
}
);
($($t:ident),*) => (
method_decl_impl!(-T, R, extern fn(&T, Sel $(, $t)*) -> R, $($t),*);
method_decl_impl!(-T, R, extern fn(&mut T, Sel $(, $t)*) -> R, $($t),*);
);
}
method_decl_impl!();
method_decl_impl!(A);
method_decl_impl!(A, B);
method_decl_impl!(A, B, C);
method_decl_impl!(A, B, C, D);
method_decl_impl!(A, B, C, D, E);
method_decl_impl!(A, B, C, D, E, F);
method_decl_impl!(A, B, C, D, E, F, G);
method_decl_impl!(A, B, C, D, E, F, G, H);
method_decl_impl!(A, B, C, D, E, F, G, H, I);
method_decl_impl!(A, B, C, D, E, F, G, H, I, J);
method_decl_impl!(A, B, C, D, E, F, G, H, I, J, K);
method_decl_impl!(A, B, C, D, E, F, G, H, I, J, K, L);
fn count_args(sel: Sel) -> usize {
sel.name().chars().filter(|&c| c == ':').count()
}
fn method_type_encoding(ret: &Encoding, args: &[Encoding]) -> CString {
let mut types = ret.as_str().to_owned();
// First two arguments are always self and the selector
types.push_str(<*mut Object>::encode().as_str());
types.push_str(Sel::encode().as_str());
types.extend(args.iter().map(|e| e.as_str()));
CString::new(types).unwrap()
}
fn log2_align_of<T>() -> u8 {
let align = mem::align_of::<T>();
// Alignments are required to be powers of 2
debug_assert!(align.count_ones() == 1);
// log2 of a power of 2 is the number of trailing zeros
align.trailing_zeros() as u8
}
/// A type for declaring a new class and adding new methods and ivars to it
/// before registering it.
pub struct ClassDecl {
cls: *mut Class,
}
impl ClassDecl {
fn with_superclass(name: &str, superclass: Option<&Class>)
-> Option<ClassDecl> {
let name = CString::new(name).unwrap();
let super_ptr = superclass.map_or(ptr::null(), |c| c);
let cls = unsafe {
runtime::objc_allocateClassPair(super_ptr, name.as_ptr(), 0)
};
if cls.is_null() | else {
Some(ClassDecl { cls })
}
}
/// Constructs a `ClassDecl` with the given name and superclass.
/// Returns `None` if the class couldn't be allocated.
pub fn new(name: &str, superclass: &Class) -> Option<ClassDecl> {
ClassDecl::with_superclass(name, Some(superclass))
}
/**
Constructs a `ClassDecl` declaring a new root class with the given name.
Returns `None` if the class couldn't be allocated.
An implementation for `+initialize` must also be given; the runtime calls
this method for all classes, so it must be defined on root classes.
Note that implementing a root class is not a simple endeavor.
For example, your class probably cannot be passed to Cocoa code unless
the entire `NSObject` protocol is implemented.
Functionality it expects, like implementations of `-retain` and `-release`
used by ARC, will not be present otherwise.
*/
pub fn root(name: &str, intitialize_fn: extern fn(&Class, Sel))
-> Option<ClassDecl> {
let mut decl = ClassDecl::with_superclass(name, None);
if let Some(ref mut decl) = decl {
unsafe {
decl.add_class_method(sel!(initialize), intitialize_fn);
}
}
decl
}
/// Adds a method with the given name and implementation to self.
/// Panics if the method wasn't sucessfully added
/// or if the selector and function take different numbers of arguments.
/// Unsafe because the caller must ensure that the types match those that
/// are expected when the method is invoked from Objective-C.
pub unsafe fn add_method<F>(&mut self, sel: Sel, func: F)
where F: MethodImplementation<Callee=Object> {
let encs = F::Args::encodings();
let encs = encs.as_ref();
let sel_args = count_args(sel);
assert!(sel_args == encs.len(),
"Selector accepts {} arguments, but function accepts {}",
sel_args, encs.len(),
);
let types = method_type_encoding(&F::Ret::encode(), encs);
let success = runtime::class_addMethod(self.cls, sel, func.imp(),
types.as_ptr());
assert!(success != NO, "Failed to add method {:?}", sel);
}
/// Adds a class method with the given name and implementation to self.
/// Panics if the method wasn't sucessfully added
/// or if the selector and function take different numbers of arguments.
/// Unsafe because the caller must ensure that the types match those that
/// are expected when the method is invoked from Objective-C.
pub unsafe fn add_class_method<F>(&mut self, sel: Sel, func: F)
where F: MethodImplementation<Callee=Class> {
let encs = F::Args::encodings();
let encs = encs.as_ref();
let sel_args = count_args(sel);
assert!(sel_args == encs.len(),
"Selector accepts {} arguments, but function accepts {}",
sel_args, encs.len(),
);
let types = method_type_encoding(&F::Ret::encode(), encs);
let metaclass = (*self.cls).metaclass() as *const _ as *mut _;
let success = runtime::class_addMethod(metaclass, sel, func.imp(),
types.as_ptr());
assert!(success != NO, "Failed to add class method {:?}", sel);
}
/// Adds an ivar with type `T` and the provided name to self.
/// Panics if the ivar wasn't successfully added.
pub fn add_ivar<T>(&mut self, name: &str) where T: Encode {
let c_name = CString::new(name).unwrap();
let encoding = CString::new(T::encode().as_str()).unwrap();
let size = mem::size_of::<T>();
let align = log2_align_of::<T>();
let success = unsafe {
runtime::class_addIvar(self.cls, c_name.as_ptr(), size, align,
encoding.as_ptr())
};
assert!(success != NO, "Failed to add ivar {}", name);
}
/// Adds a protocol to self. Panics if the protocol wasn't successfully
/// added
pub fn add_protocol(&mut self, proto: &Protocol) {
let success = unsafe { runtime::class_addProtocol(self.cls, proto) };
assert!(success != NO, "Failed to add protocol {:?}", proto);
}
/// Registers self, consuming it and returning a reference to the
/// newly registered `Class`.
pub fn register(self) -> &'static Class {
unsafe {
let cls = self.cls;
runtime::objc_registerClassPair(cls);
// Forget self otherwise the class will be disposed in drop
mem::forget(self);
&*cls
}
}
}
impl Drop for ClassDecl {
fn drop(&mut self) {
unsafe {
runtime::objc_disposeClassPair(self.cls);
}
}
}
/// A type for declaring a new protocol and adding new methods to it
/// before registering it.
pub struct ProtocolDecl {
proto: *mut Protocol
}
impl ProtocolDecl {
/// Constructs a `ProtocolDecl` with the given name. Returns `None` if the
/// protocol couldn't be allocated.
pub fn new(name: &str) -> Option<ProtocolDecl> {
let c_name = CString::new(name).unwrap();
let proto = unsafe {
| {
None
} | conditional_block |
declare.rs | }
```
*/
use std::ffi::CString;
use std::mem;
use std::ptr;
use runtime::{BOOL, Class, Imp, NO, Object, Protocol, Sel, self};
use {Encode, EncodeArguments, Encoding, Message};
/// Types that can be used as the implementation of an Objective-C method.
pub trait MethodImplementation {
/// The callee type of the method.
type Callee: Message;
/// The return type of the method.
type Ret: Encode;
/// The argument types of the method.
type Args: EncodeArguments;
/// Returns self as an `Imp` of a method.
fn imp(self) -> Imp;
}
macro_rules! method_decl_impl {
(-$s:ident, $r:ident, $f:ty, $($t:ident),*) => (
impl<$s, $r $(, $t)*> MethodImplementation for $f
where $s: Message, $r: Encode $(, $t: Encode)* {
type Callee = $s;
type Ret = $r;
type Args = ($($t,)*);
fn imp(self) -> Imp {
unsafe { mem::transmute(self) }
}
}
);
($($t:ident),*) => (
method_decl_impl!(-T, R, extern fn(&T, Sel $(, $t)*) -> R, $($t),*);
method_decl_impl!(-T, R, extern fn(&mut T, Sel $(, $t)*) -> R, $($t),*);
);
}
method_decl_impl!();
method_decl_impl!(A);
method_decl_impl!(A, B);
method_decl_impl!(A, B, C);
method_decl_impl!(A, B, C, D);
method_decl_impl!(A, B, C, D, E);
method_decl_impl!(A, B, C, D, E, F);
method_decl_impl!(A, B, C, D, E, F, G);
method_decl_impl!(A, B, C, D, E, F, G, H);
method_decl_impl!(A, B, C, D, E, F, G, H, I);
method_decl_impl!(A, B, C, D, E, F, G, H, I, J);
method_decl_impl!(A, B, C, D, E, F, G, H, I, J, K);
method_decl_impl!(A, B, C, D, E, F, G, H, I, J, K, L);
fn count_args(sel: Sel) -> usize {
sel.name().chars().filter(|&c| c == ':').count()
}
fn method_type_encoding(ret: &Encoding, args: &[Encoding]) -> CString {
let mut types = ret.as_str().to_owned();
// First two arguments are always self and the selector
types.push_str(<*mut Object>::encode().as_str());
types.push_str(Sel::encode().as_str());
types.extend(args.iter().map(|e| e.as_str()));
CString::new(types).unwrap()
}
fn log2_align_of<T>() -> u8 {
let align = mem::align_of::<T>();
// Alignments are required to be powers of 2
debug_assert!(align.count_ones() == 1);
// log2 of a power of 2 is the number of trailing zeros
align.trailing_zeros() as u8
}
/// A type for declaring a new class and adding new methods and ivars to it
/// before registering it.
pub struct ClassDecl {
cls: *mut Class,
}
impl ClassDecl {
fn with_superclass(name: &str, superclass: Option<&Class>)
-> Option<ClassDecl> {
let name = CString::new(name).unwrap();
let super_ptr = superclass.map_or(ptr::null(), |c| c);
let cls = unsafe {
runtime::objc_allocateClassPair(super_ptr, name.as_ptr(), 0)
};
if cls.is_null() {
None
} else {
Some(ClassDecl { cls })
}
}
/// Constructs a `ClassDecl` with the given name and superclass.
/// Returns `None` if the class couldn't be allocated.
pub fn new(name: &str, superclass: &Class) -> Option<ClassDecl> {
ClassDecl::with_superclass(name, Some(superclass))
}
/**
Constructs a `ClassDecl` declaring a new root class with the given name.
Returns `None` if the class couldn't be allocated.
An implementation for `+initialize` must also be given; the runtime calls
this method for all classes, so it must be defined on root classes.
Note that implementing a root class is not a simple endeavor.
For example, your class probably cannot be passed to Cocoa code unless
the entire `NSObject` protocol is implemented.
Functionality it expects, like implementations of `-retain` and `-release`
used by ARC, will not be present otherwise.
*/
pub fn root(name: &str, intitialize_fn: extern fn(&Class, Sel))
-> Option<ClassDecl> {
let mut decl = ClassDecl::with_superclass(name, None);
if let Some(ref mut decl) = decl {
unsafe {
decl.add_class_method(sel!(initialize), intitialize_fn);
}
}
decl
}
/// Adds a method with the given name and implementation to self.
/// Panics if the method wasn't sucessfully added
/// or if the selector and function take different numbers of arguments.
/// Unsafe because the caller must ensure that the types match those that
/// are expected when the method is invoked from Objective-C.
pub unsafe fn add_method<F>(&mut self, sel: Sel, func: F)
where F: MethodImplementation<Callee=Object> {
let encs = F::Args::encodings();
let encs = encs.as_ref();
let sel_args = count_args(sel);
assert!(sel_args == encs.len(),
"Selector accepts {} arguments, but function accepts {}",
sel_args, encs.len(),
);
let types = method_type_encoding(&F::Ret::encode(), encs);
let success = runtime::class_addMethod(self.cls, sel, func.imp(),
types.as_ptr());
assert!(success != NO, "Failed to add method {:?}", sel);
}
/// Adds a class method with the given name and implementation to self.
/// Panics if the method wasn't sucessfully added
/// or if the selector and function take different numbers of arguments.
/// Unsafe because the caller must ensure that the types match those that
/// are expected when the method is invoked from Objective-C.
pub unsafe fn add_class_method<F>(&mut self, sel: Sel, func: F)
where F: MethodImplementation<Callee=Class> {
let encs = F::Args::encodings();
let encs = encs.as_ref();
let sel_args = count_args(sel);
assert!(sel_args == encs.len(),
"Selector accepts {} arguments, but function accepts {}",
sel_args, encs.len(),
);
let types = method_type_encoding(&F::Ret::encode(), encs);
let metaclass = (*self.cls).metaclass() as *const _ as *mut _;
let success = runtime::class_addMethod(metaclass, sel, func.imp(),
types.as_ptr());
assert!(success != NO, "Failed to add class method {:?}", sel);
}
/// Adds an ivar with type `T` and the provided name to self.
/// Panics if the ivar wasn't successfully added.
pub fn add_ivar<T>(&mut self, name: &str) where T: Encode {
let c_name = CString::new(name).unwrap(); | let size = mem::size_of::<T>();
let align = log2_align_of::<T>();
let success = unsafe {
runtime::class_addIvar(self.cls, c_name.as_ptr(), size, align,
encoding.as_ptr())
};
assert!(success != NO, "Failed to add ivar {}", name);
}
/// Adds a protocol to self. Panics if the protocol wasn't successfully
/// added
pub fn add_protocol(&mut self, proto: &Protocol) {
let success = unsafe { runtime::class_addProtocol(self.cls, proto) };
assert!(success != NO, "Failed to add protocol {:?}", proto);
}
/// Registers self, consuming it and returning a reference to the
/// newly registered `Class`.
pub fn register(self) -> &'static Class {
unsafe {
let cls = self.cls;
runtime::objc_registerClassPair(cls);
// Forget self otherwise the class will be disposed in drop
mem::forget(self);
&*cls
}
}
}
impl Drop for ClassDecl {
fn drop(&mut self) {
unsafe {
runtime::objc_disposeClassPair(self.cls);
}
}
}
/// A type for declaring a new protocol and adding new methods to it
/// before registering it.
pub struct ProtocolDecl {
proto: *mut Protocol
}
impl ProtocolDecl {
/// Constructs a `ProtocolDecl` with the given name. Returns `None` if the
/// protocol couldn't be allocated.
pub fn new(name: &str) -> Option<ProtocolDecl> {
let c_name = CString::new(name).unwrap();
let proto = unsafe {
| let encoding = CString::new(T::encode().as_str()).unwrap(); | random_line_split |
declare.rs | }
```
*/
use std::ffi::CString;
use std::mem;
use std::ptr;
use runtime::{BOOL, Class, Imp, NO, Object, Protocol, Sel, self};
use {Encode, EncodeArguments, Encoding, Message};
/// Types that can be used as the implementation of an Objective-C method.
pub trait MethodImplementation {
/// The callee type of the method.
type Callee: Message;
/// The return type of the method.
type Ret: Encode;
/// The argument types of the method.
type Args: EncodeArguments;
/// Returns self as an `Imp` of a method.
fn imp(self) -> Imp;
}
macro_rules! method_decl_impl {
(-$s:ident, $r:ident, $f:ty, $($t:ident),*) => (
impl<$s, $r $(, $t)*> MethodImplementation for $f
where $s: Message, $r: Encode $(, $t: Encode)* {
type Callee = $s;
type Ret = $r;
type Args = ($($t,)*);
fn imp(self) -> Imp {
unsafe { mem::transmute(self) }
}
}
);
($($t:ident),*) => (
method_decl_impl!(-T, R, extern fn(&T, Sel $(, $t)*) -> R, $($t),*);
method_decl_impl!(-T, R, extern fn(&mut T, Sel $(, $t)*) -> R, $($t),*);
);
}
method_decl_impl!();
method_decl_impl!(A);
method_decl_impl!(A, B);
method_decl_impl!(A, B, C);
method_decl_impl!(A, B, C, D);
method_decl_impl!(A, B, C, D, E);
method_decl_impl!(A, B, C, D, E, F);
method_decl_impl!(A, B, C, D, E, F, G);
method_decl_impl!(A, B, C, D, E, F, G, H);
method_decl_impl!(A, B, C, D, E, F, G, H, I);
method_decl_impl!(A, B, C, D, E, F, G, H, I, J);
method_decl_impl!(A, B, C, D, E, F, G, H, I, J, K);
method_decl_impl!(A, B, C, D, E, F, G, H, I, J, K, L);
fn count_args(sel: Sel) -> usize {
sel.name().chars().filter(|&c| c == ':').count()
}
fn method_type_encoding(ret: &Encoding, args: &[Encoding]) -> CString {
let mut types = ret.as_str().to_owned();
// First two arguments are always self and the selector
types.push_str(<*mut Object>::encode().as_str());
types.push_str(Sel::encode().as_str());
types.extend(args.iter().map(|e| e.as_str()));
CString::new(types).unwrap()
}
fn log2_align_of<T>() -> u8 {
let align = mem::align_of::<T>();
// Alignments are required to be powers of 2
debug_assert!(align.count_ones() == 1);
// log2 of a power of 2 is the number of trailing zeros
align.trailing_zeros() as u8
}
/// A type for declaring a new class and adding new methods and ivars to it
/// before registering it.
pub struct ClassDecl {
cls: *mut Class,
}
impl ClassDecl {
fn with_superclass(name: &str, superclass: Option<&Class>)
-> Option<ClassDecl> {
let name = CString::new(name).unwrap();
let super_ptr = superclass.map_or(ptr::null(), |c| c);
let cls = unsafe {
runtime::objc_allocateClassPair(super_ptr, name.as_ptr(), 0)
};
if cls.is_null() {
None
} else {
Some(ClassDecl { cls })
}
}
/// Constructs a `ClassDecl` with the given name and superclass.
/// Returns `None` if the class couldn't be allocated.
pub fn new(name: &str, superclass: &Class) -> Option<ClassDecl> {
ClassDecl::with_superclass(name, Some(superclass))
}
/**
Constructs a `ClassDecl` declaring a new root class with the given name.
Returns `None` if the class couldn't be allocated.
An implementation for `+initialize` must also be given; the runtime calls
this method for all classes, so it must be defined on root classes.
Note that implementing a root class is not a simple endeavor.
For example, your class probably cannot be passed to Cocoa code unless
the entire `NSObject` protocol is implemented.
Functionality it expects, like implementations of `-retain` and `-release`
used by ARC, will not be present otherwise.
*/
pub fn root(name: &str, intitialize_fn: extern fn(&Class, Sel))
-> Option<ClassDecl> {
let mut decl = ClassDecl::with_superclass(name, None);
if let Some(ref mut decl) = decl {
unsafe {
decl.add_class_method(sel!(initialize), intitialize_fn);
}
}
decl
}
/// Adds a method with the given name and implementation to self.
/// Panics if the method wasn't sucessfully added
/// or if the selector and function take different numbers of arguments.
/// Unsafe because the caller must ensure that the types match those that
/// are expected when the method is invoked from Objective-C.
pub unsafe fn | <F>(&mut self, sel: Sel, func: F)
where F: MethodImplementation<Callee=Object> {
let encs = F::Args::encodings();
let encs = encs.as_ref();
let sel_args = count_args(sel);
assert!(sel_args == encs.len(),
"Selector accepts {} arguments, but function accepts {}",
sel_args, encs.len(),
);
let types = method_type_encoding(&F::Ret::encode(), encs);
let success = runtime::class_addMethod(self.cls, sel, func.imp(),
types.as_ptr());
assert!(success != NO, "Failed to add method {:?}", sel);
}
/// Adds a class method with the given name and implementation to self.
/// Panics if the method wasn't sucessfully added
/// or if the selector and function take different numbers of arguments.
/// Unsafe because the caller must ensure that the types match those that
/// are expected when the method is invoked from Objective-C.
pub unsafe fn add_class_method<F>(&mut self, sel: Sel, func: F)
where F: MethodImplementation<Callee=Class> {
let encs = F::Args::encodings();
let encs = encs.as_ref();
let sel_args = count_args(sel);
assert!(sel_args == encs.len(),
"Selector accepts {} arguments, but function accepts {}",
sel_args, encs.len(),
);
let types = method_type_encoding(&F::Ret::encode(), encs);
let metaclass = (*self.cls).metaclass() as *const _ as *mut _;
let success = runtime::class_addMethod(metaclass, sel, func.imp(),
types.as_ptr());
assert!(success != NO, "Failed to add class method {:?}", sel);
}
/// Adds an ivar with type `T` and the provided name to self.
/// Panics if the ivar wasn't successfully added.
pub fn add_ivar<T>(&mut self, name: &str) where T: Encode {
let c_name = CString::new(name).unwrap();
let encoding = CString::new(T::encode().as_str()).unwrap();
let size = mem::size_of::<T>();
let align = log2_align_of::<T>();
let success = unsafe {
runtime::class_addIvar(self.cls, c_name.as_ptr(), size, align,
encoding.as_ptr())
};
assert!(success != NO, "Failed to add ivar {}", name);
}
/// Adds a protocol to self. Panics if the protocol wasn't successfully
/// added
pub fn add_protocol(&mut self, proto: &Protocol) {
let success = unsafe { runtime::class_addProtocol(self.cls, proto) };
assert!(success != NO, "Failed to add protocol {:?}", proto);
}
/// Registers self, consuming it and returning a reference to the
/// newly registered `Class`.
pub fn register(self) -> &'static Class {
unsafe {
let cls = self.cls;
runtime::objc_registerClassPair(cls);
// Forget self otherwise the class will be disposed in drop
mem::forget(self);
&*cls
}
}
}
impl Drop for ClassDecl {
fn drop(&mut self) {
unsafe {
runtime::objc_disposeClassPair(self.cls);
}
}
}
/// A type for declaring a new protocol and adding new methods to it
/// before registering it.
pub struct ProtocolDecl {
proto: *mut Protocol
}
impl ProtocolDecl {
/// Constructs a `ProtocolDecl` with the given name. Returns `None` if the
/// protocol couldn't be allocated.
pub fn new(name: &str) -> Option<ProtocolDecl> {
let c_name = CString::new(name).unwrap();
let proto = unsafe {
| add_method | identifier_name |
annealing3.rs | , 0xe4, 0xaa, 0x35, 0x07, 0x99, 0xe3, 0x2b, 0x9d, 0xc6,
];
fn tscore(solution: &Vec<Point>, input: &Input) -> (f64, f64) |
(
dislike / (input.hole.exterior().coords_count() as f64),
-(vx + vy),
)
}
fn ascore(value: (f64, f64), progress: f64) -> f64 {
value.0 * progress + (1.0 - progress) * value.1
}
pub fn solve(
input: &Input,
mut solution: Vec<Point>,
time_limit: Duration,
fix_seed: bool,
initial_temperature: f64,
) -> (Vec<Point>, f64) {
let n = solution.len();
let mut rng = if fix_seed {
SmallRng::from_seed(SEED)
} else {
SmallRng::from_entropy()
};
let mut current_score = tscore(&solution, &input);
let out_edges = make_out_edges(&input.figure.edges, n);
let original_vertices = &input.figure.vertices;
let mut orders = vec![vec![]; n];
for i in 0..n {
orders[i] = make_determined_order(&out_edges, Some(i));
}
let start_at = Instant::now();
let mut best_solution = solution.clone();
let mut best_score = current_score;
let mut progress = 0.0;
let mut temperature = initial_temperature;
eprintln!("initial_temperature = {}", initial_temperature);
let distance_sums = calc_distance_sums(&out_edges, original_vertices.len());
let distance_total: usize = distance_sums.iter().sum();
// eprintln!("{} {:?}", distance_total, distance_sums);
let mut iter = 0;
let mut move_count = 0;
loop {
// check time limit
iter += 1;
if iter % 100 == 0 {
let elapsed = Instant::now() - start_at;
if best_score.0 == 0.0 || elapsed >= time_limit {
eprintln!("iter = {}, move_count = {}", iter, move_count);
let dislike = calculate_dislike(&best_solution, &input.hole);
return (best_solution, dislike);
}
// tweak temperature
progress = elapsed.as_secs_f64() / time_limit.as_secs_f64();
temperature = initial_temperature * (1.0 - progress) * (-progress).exp2();
}
// move to neighbor
let r = rng.gen::<f64>();
if r > progress {
let mut i = 0;
{
let r = rng.gen::<usize>() % distance_total;
let mut sum = 0;
for index in 0..n {
sum += distance_sums[index];
if r < sum {
i = index;
break;
}
}
}
let w = rng.gen::<usize>() % 40 + 5;
let next_solution =
random_move_one_point(i, w, &solution, &input, &mut rng, &out_edges, &orders);
if next_solution.is_none() {
continue;
}
move_count += 1;
let next_solution = next_solution.unwrap();
// calculate score. FIXME: slow
let new_score = tscore(&next_solution, &input);
let accept = {
let current = ascore(current_score, progress);
let new = ascore(new_score, progress);
if new < current {
true
} else {
// new_score >= current_score
let delta = new - current;
let accept_prob = (-delta / temperature).exp();
rng.gen::<f64>() < accept_prob
}
};
if accept {
// accept candidate
current_score = new_score;
solution = next_solution;
}
} else {
let i = rng.gen::<usize>() % n;
let candidate = make_next_candidates(
i,
original_vertices,
&input.hole,
input.epsilon,
&solution,
&out_edges,
&mut rng,
);
if candidate != original_vertices[i] {
move_count += 1;
}
// calculate score. FIXME: slow
let old = solution[i];
solution[i] = candidate;
let new_score = tscore(&solution, &input);
let accept = {
let current = ascore(current_score, progress);
let new = ascore(new_score, progress);
if new < current {
true
} else {
// new_score >= current_score
let delta = new - current;
let accept_prob = (-delta / temperature).exp();
rng.gen::<f64>() < accept_prob
}
};
if accept {
// accept candidate
current_score = new_score;
} else {
// reject candidate
solution[i] = old;
}
}
if current_score < best_score {
best_score = current_score;
best_solution = solution.clone();
}
}
}
fn make_next_candidates(
i: usize,
original_vertices: &[Point],
hole: &Polygon,
epsilon: i64,
solution: &[Point],
out_edges: &[Vec<usize>],
rng: &mut SmallRng,
) -> Point {
let some_neighbor = out_edges[i][0];
let original_squared_distance =
squared_distance(&original_vertices[i], &original_vertices[some_neighbor]);
if original_squared_distance < 100.0 || epsilon < 100000 {
let ring = Ring::from_epsilon(solution[some_neighbor], epsilon, original_squared_distance);
let mut points = ring_points(&ring);
points.shuffle(rng);
for &p in points.iter() {
if !is_valid_point_move(i, &p, solution, original_vertices, out_edges, hole, epsilon) {
continue;
}
return p;
}
} else {
let od = original_squared_distance.sqrt();
let low = od * (1.0 - epsilon as f64 / 1000000.0).sqrt();
let high = od * (1.0 + epsilon as f64 / 1000000.0).sqrt();
for _iter in 0..100 {
let d = low + (high - low) * rng.gen::<f64>();
let theta = 2.0 * std::f64::consts::PI * rng.gen::<f64>();
let vect = Point::new(
(theta.cos() * d + 0.5).floor(),
(theta.sin() * d + 0.5).floor(),
);
let p = solution[some_neighbor] + vect;
if !is_valid_point_move(i, &p, solution, original_vertices, out_edges, hole, epsilon) {
continue;
}
return p;
}
return solution[i];
}
unreachable!()
}
fn is_valid_point_move(
index: usize,
p: &Point,
solution: &[Point],
original_vertices: &[Point],
out_edges: &[Vec<usize>],
hole: &Polygon,
epsilon: i64,
) -> bool {
let ok1 = out_edges[index].iter().all(|&dst| {
is_allowed_distance(
&p,
&solution[dst],
&original_vertices[index],
&original_vertices[dst],
epsilon,
false,
)
});
if !ok1 {
return false;
}
let ok2 = out_edges[index]
.iter()
.all(|&dst| does_line_fit_in_hole(&p, &solution[dst], hole));
if !ok2 {
return false;
}
return true;
}
fn random_move_one_point(
from: usize,
w: usize,
solution: &Vec<Point>,
input: &Input,
rng: &mut SmallRng,
out_edges: &Vec<Vec<usize>>,
orders: &Vec<Vec<usize>>,
) -> Option<Vec<Point>> {
let mut gx: f64 = 0.0;
let mut gy: f64 = 0.0;
for p in solution.iter() {
gx += p.x();
gy += p.y();
}
gx /= solution.len() as f64;
gy /= solution.len() as f64;
let g = Point::new(g | {
let dislike = calculate_dislike(&solution, &input.hole);
let mut gx: f64 = 0.0;
let mut gy: f64 = 0.0;
for p in solution.iter() {
gx += p.x();
gy += p.y();
}
gx /= solution.len() as f64;
gy /= solution.len() as f64;
let mut vx: f64 = 0.0;
let mut vy: f64 = 0.0;
for p in solution.iter() {
vx += pow2(p.x() - gx);
vy += pow2(p.y() - gy);
}
vx /= solution.len() as f64;
vy /= solution.len() as f64; | identifier_body |
annealing3.rs | , 0xe4, 0xaa, 0x35, 0x07, 0x99, 0xe3, 0x2b, 0x9d, 0xc6,
];
fn tscore(solution: &Vec<Point>, input: &Input) -> (f64, f64) {
let dislike = calculate_dislike(&solution, &input.hole);
let mut gx: f64 = 0.0;
let mut gy: f64 = 0.0;
for p in solution.iter() {
gx += p.x();
gy += p.y();
}
gx /= solution.len() as f64;
gy /= solution.len() as f64;
let mut vx: f64 = 0.0;
let mut vy: f64 = 0.0;
for p in solution.iter() {
vx += pow2(p.x() - gx);
vy += pow2(p.y() - gy);
}
vx /= solution.len() as f64;
vy /= solution.len() as f64;
(
dislike / (input.hole.exterior().coords_count() as f64),
-(vx + vy),
)
}
fn ascore(value: (f64, f64), progress: f64) -> f64 {
value.0 * progress + (1.0 - progress) * value.1
}
pub fn solve(
input: &Input,
mut solution: Vec<Point>,
time_limit: Duration,
fix_seed: bool,
initial_temperature: f64,
) -> (Vec<Point>, f64) {
let n = solution.len();
let mut rng = if fix_seed {
SmallRng::from_seed(SEED)
} else {
SmallRng::from_entropy()
};
let mut current_score = tscore(&solution, &input);
let out_edges = make_out_edges(&input.figure.edges, n);
let original_vertices = &input.figure.vertices;
let mut orders = vec![vec![]; n];
for i in 0..n {
orders[i] = make_determined_order(&out_edges, Some(i));
}
let start_at = Instant::now();
let mut best_solution = solution.clone();
let mut best_score = current_score;
let mut progress = 0.0;
let mut temperature = initial_temperature;
eprintln!("initial_temperature = {}", initial_temperature);
let distance_sums = calc_distance_sums(&out_edges, original_vertices.len());
let distance_total: usize = distance_sums.iter().sum();
// eprintln!("{} {:?}", distance_total, distance_sums);
let mut iter = 0;
let mut move_count = 0;
loop {
// check time limit
iter += 1;
if iter % 100 == 0 {
let elapsed = Instant::now() - start_at;
if best_score.0 == 0.0 || elapsed >= time_limit {
eprintln!("iter = {}, move_count = {}", iter, move_count);
let dislike = calculate_dislike(&best_solution, &input.hole);
return (best_solution, dislike);
}
| temperature = initial_temperature * (1.0 - progress) * (-progress).exp2();
}
// move to neighbor
let r = rng.gen::<f64>();
if r > progress {
let mut i = 0;
{
let r = rng.gen::<usize>() % distance_total;
let mut sum = 0;
for index in 0..n {
sum += distance_sums[index];
if r < sum {
i = index;
break;
}
}
}
let w = rng.gen::<usize>() % 40 + 5;
let next_solution =
random_move_one_point(i, w, &solution, &input, &mut rng, &out_edges, &orders);
if next_solution.is_none() {
continue;
}
move_count += 1;
let next_solution = next_solution.unwrap();
// calculate score. FIXME: slow
let new_score = tscore(&next_solution, &input);
let accept = {
let current = ascore(current_score, progress);
let new = ascore(new_score, progress);
if new < current {
true
} else {
// new_score >= current_score
let delta = new - current;
let accept_prob = (-delta / temperature).exp();
rng.gen::<f64>() < accept_prob
}
};
if accept {
// accept candidate
current_score = new_score;
solution = next_solution;
}
} else {
let i = rng.gen::<usize>() % n;
let candidate = make_next_candidates(
i,
original_vertices,
&input.hole,
input.epsilon,
&solution,
&out_edges,
&mut rng,
);
if candidate != original_vertices[i] {
move_count += 1;
}
// calculate score. FIXME: slow
let old = solution[i];
solution[i] = candidate;
let new_score = tscore(&solution, &input);
let accept = {
let current = ascore(current_score, progress);
let new = ascore(new_score, progress);
if new < current {
true
} else {
// new_score >= current_score
let delta = new - current;
let accept_prob = (-delta / temperature).exp();
rng.gen::<f64>() < accept_prob
}
};
if accept {
// accept candidate
current_score = new_score;
} else {
// reject candidate
solution[i] = old;
}
}
if current_score < best_score {
best_score = current_score;
best_solution = solution.clone();
}
}
}
fn make_next_candidates(
i: usize,
original_vertices: &[Point],
hole: &Polygon,
epsilon: i64,
solution: &[Point],
out_edges: &[Vec<usize>],
rng: &mut SmallRng,
) -> Point {
let some_neighbor = out_edges[i][0];
let original_squared_distance =
squared_distance(&original_vertices[i], &original_vertices[some_neighbor]);
if original_squared_distance < 100.0 || epsilon < 100000 {
let ring = Ring::from_epsilon(solution[some_neighbor], epsilon, original_squared_distance);
let mut points = ring_points(&ring);
points.shuffle(rng);
for &p in points.iter() {
if !is_valid_point_move(i, &p, solution, original_vertices, out_edges, hole, epsilon) {
continue;
}
return p;
}
} else {
let od = original_squared_distance.sqrt();
let low = od * (1.0 - epsilon as f64 / 1000000.0).sqrt();
let high = od * (1.0 + epsilon as f64 / 1000000.0).sqrt();
for _iter in 0..100 {
let d = low + (high - low) * rng.gen::<f64>();
let theta = 2.0 * std::f64::consts::PI * rng.gen::<f64>();
let vect = Point::new(
(theta.cos() * d + 0.5).floor(),
(theta.sin() * d + 0.5).floor(),
);
let p = solution[some_neighbor] + vect;
if !is_valid_point_move(i, &p, solution, original_vertices, out_edges, hole, epsilon) {
continue;
}
return p;
}
return solution[i];
}
unreachable!()
}
fn is_valid_point_move(
index: usize,
p: &Point,
solution: &[Point],
original_vertices: &[Point],
out_edges: &[Vec<usize>],
hole: &Polygon,
epsilon: i64,
) -> bool {
let ok1 = out_edges[index].iter().all(|&dst| {
is_allowed_distance(
&p,
&solution[dst],
&original_vertices[index],
&original_vertices[dst],
epsilon,
false,
)
});
if !ok1 {
return false;
}
let ok2 = out_edges[index]
.iter()
.all(|&dst| does_line_fit_in_hole(&p, &solution[dst], hole));
if !ok2 {
return false;
}
return true;
}
fn random_move_one_point(
from: usize,
w: usize,
solution: &Vec<Point>,
input: &Input,
rng: &mut SmallRng,
out_edges: &Vec<Vec<usize>>,
orders: &Vec<Vec<usize>>,
) -> Option<Vec<Point>> {
let mut gx: f64 = 0.0;
let mut gy: f64 = 0.0;
for p in solution.iter() {
gx += p.x();
gy += p.y();
}
gx /= solution.len() as f64;
gy /= solution.len() as f64;
let g = Point::new(gx, | // tweak temperature
progress = elapsed.as_secs_f64() / time_limit.as_secs_f64(); | random_line_split |
annealing3.rs | , 0xe4, 0xaa, 0x35, 0x07, 0x99, 0xe3, 0x2b, 0x9d, 0xc6,
];
fn tscore(solution: &Vec<Point>, input: &Input) -> (f64, f64) {
let dislike = calculate_dislike(&solution, &input.hole);
let mut gx: f64 = 0.0;
let mut gy: f64 = 0.0;
for p in solution.iter() {
gx += p.x();
gy += p.y();
}
gx /= solution.len() as f64;
gy /= solution.len() as f64;
let mut vx: f64 = 0.0;
let mut vy: f64 = 0.0;
for p in solution.iter() {
vx += pow2(p.x() - gx);
vy += pow2(p.y() - gy);
}
vx /= solution.len() as f64;
vy /= solution.len() as f64;
(
dislike / (input.hole.exterior().coords_count() as f64),
-(vx + vy),
)
}
fn ascore(value: (f64, f64), progress: f64) -> f64 {
value.0 * progress + (1.0 - progress) * value.1
}
pub fn solve(
input: &Input,
mut solution: Vec<Point>,
time_limit: Duration,
fix_seed: bool,
initial_temperature: f64,
) -> (Vec<Point>, f64) {
let n = solution.len();
let mut rng = if fix_seed {
SmallRng::from_seed(SEED)
} else {
SmallRng::from_entropy()
};
let mut current_score = tscore(&solution, &input);
let out_edges = make_out_edges(&input.figure.edges, n);
let original_vertices = &input.figure.vertices;
let mut orders = vec![vec![]; n];
for i in 0..n {
orders[i] = make_determined_order(&out_edges, Some(i));
}
let start_at = Instant::now();
let mut best_solution = solution.clone();
let mut best_score = current_score;
let mut progress = 0.0;
let mut temperature = initial_temperature;
eprintln!("initial_temperature = {}", initial_temperature);
let distance_sums = calc_distance_sums(&out_edges, original_vertices.len());
let distance_total: usize = distance_sums.iter().sum();
// eprintln!("{} {:?}", distance_total, distance_sums);
let mut iter = 0;
let mut move_count = 0;
loop {
// check time limit
iter += 1;
if iter % 100 == 0 {
let elapsed = Instant::now() - start_at;
if best_score.0 == 0.0 || elapsed >= time_limit {
eprintln!("iter = {}, move_count = {}", iter, move_count);
let dislike = calculate_dislike(&best_solution, &input.hole);
return (best_solution, dislike);
}
// tweak temperature
progress = elapsed.as_secs_f64() / time_limit.as_secs_f64();
temperature = initial_temperature * (1.0 - progress) * (-progress).exp2();
}
// move to neighbor
let r = rng.gen::<f64>();
if r > progress {
let mut i = 0;
{
let r = rng.gen::<usize>() % distance_total;
let mut sum = 0;
for index in 0..n {
sum += distance_sums[index];
if r < sum {
i = index;
break;
}
}
}
let w = rng.gen::<usize>() % 40 + 5;
let next_solution =
random_move_one_point(i, w, &solution, &input, &mut rng, &out_edges, &orders);
if next_solution.is_none() {
continue;
}
move_count += 1;
let next_solution = next_solution.unwrap();
// calculate score. FIXME: slow
let new_score = tscore(&next_solution, &input);
let accept = {
let current = ascore(current_score, progress);
let new = ascore(new_score, progress);
if new < current {
true
} else {
// new_score >= current_score
let delta = new - current;
let accept_prob = (-delta / temperature).exp();
rng.gen::<f64>() < accept_prob
}
};
if accept {
// accept candidate
current_score = new_score;
solution = next_solution;
}
} else {
let i = rng.gen::<usize>() % n;
let candidate = make_next_candidates(
i,
original_vertices,
&input.hole,
input.epsilon,
&solution,
&out_edges,
&mut rng,
);
if candidate != original_vertices[i] {
move_count += 1;
}
// calculate score. FIXME: slow
let old = solution[i];
solution[i] = candidate;
let new_score = tscore(&solution, &input);
let accept = {
let current = ascore(current_score, progress);
let new = ascore(new_score, progress);
if new < current {
true
} else {
// new_score >= current_score
let delta = new - current;
let accept_prob = (-delta / temperature).exp();
rng.gen::<f64>() < accept_prob
}
};
if accept {
// accept candidate
current_score = new_score;
} else {
// reject candidate
solution[i] = old;
}
}
if current_score < best_score {
best_score = current_score;
best_solution = solution.clone();
}
}
}
fn make_next_candidates(
i: usize,
original_vertices: &[Point],
hole: &Polygon,
epsilon: i64,
solution: &[Point],
out_edges: &[Vec<usize>],
rng: &mut SmallRng,
) -> Point {
let some_neighbor = out_edges[i][0];
let original_squared_distance =
squared_distance(&original_vertices[i], &original_vertices[some_neighbor]);
if original_squared_distance < 100.0 || epsilon < 100000 {
let ring = Ring::from_epsilon(solution[some_neighbor], epsilon, original_squared_distance);
let mut points = ring_points(&ring);
points.shuffle(rng);
for &p in points.iter() {
if !is_valid_point_move(i, &p, solution, original_vertices, out_edges, hole, epsilon) {
continue;
}
return p;
}
} else {
let od = original_squared_distance.sqrt();
let low = od * (1.0 - epsilon as f64 / 1000000.0).sqrt();
let high = od * (1.0 + epsilon as f64 / 1000000.0).sqrt();
for _iter in 0..100 {
let d = low + (high - low) * rng.gen::<f64>();
let theta = 2.0 * std::f64::consts::PI * rng.gen::<f64>();
let vect = Point::new(
(theta.cos() * d + 0.5).floor(),
(theta.sin() * d + 0.5).floor(),
);
let p = solution[some_neighbor] + vect;
if !is_valid_point_move(i, &p, solution, original_vertices, out_edges, hole, epsilon) |
return p;
}
return solution[i];
}
unreachable!()
}
fn is_valid_point_move(
index: usize,
p: &Point,
solution: &[Point],
original_vertices: &[Point],
out_edges: &[Vec<usize>],
hole: &Polygon,
epsilon: i64,
) -> bool {
let ok1 = out_edges[index].iter().all(|&dst| {
is_allowed_distance(
&p,
&solution[dst],
&original_vertices[index],
&original_vertices[dst],
epsilon,
false,
)
});
if !ok1 {
return false;
}
let ok2 = out_edges[index]
.iter()
.all(|&dst| does_line_fit_in_hole(&p, &solution[dst], hole));
if !ok2 {
return false;
}
return true;
}
fn random_move_one_point(
from: usize,
w: usize,
solution: &Vec<Point>,
input: &Input,
rng: &mut SmallRng,
out_edges: &Vec<Vec<usize>>,
orders: &Vec<Vec<usize>>,
) -> Option<Vec<Point>> {
let mut gx: f64 = 0.0;
let mut gy: f64 = 0.0;
for p in solution.iter() {
gx += p.x();
gy += p.y();
}
gx /= solution.len() as f64;
gy /= solution.len() as f64;
let g = Point::new(g | {
continue;
} | conditional_block |
annealing3.rs | , 0xe4, 0xaa, 0x35, 0x07, 0x99, 0xe3, 0x2b, 0x9d, 0xc6,
];
fn tscore(solution: &Vec<Point>, input: &Input) -> (f64, f64) {
let dislike = calculate_dislike(&solution, &input.hole);
let mut gx: f64 = 0.0;
let mut gy: f64 = 0.0;
for p in solution.iter() {
gx += p.x();
gy += p.y();
}
gx /= solution.len() as f64;
gy /= solution.len() as f64;
let mut vx: f64 = 0.0;
let mut vy: f64 = 0.0;
for p in solution.iter() {
vx += pow2(p.x() - gx);
vy += pow2(p.y() - gy);
}
vx /= solution.len() as f64;
vy /= solution.len() as f64;
(
dislike / (input.hole.exterior().coords_count() as f64),
-(vx + vy),
)
}
fn ascore(value: (f64, f64), progress: f64) -> f64 {
value.0 * progress + (1.0 - progress) * value.1
}
pub fn solve(
input: &Input,
mut solution: Vec<Point>,
time_limit: Duration,
fix_seed: bool,
initial_temperature: f64,
) -> (Vec<Point>, f64) {
let n = solution.len();
let mut rng = if fix_seed {
SmallRng::from_seed(SEED)
} else {
SmallRng::from_entropy()
};
let mut current_score = tscore(&solution, &input);
let out_edges = make_out_edges(&input.figure.edges, n);
let original_vertices = &input.figure.vertices;
let mut orders = vec![vec![]; n];
for i in 0..n {
orders[i] = make_determined_order(&out_edges, Some(i));
}
let start_at = Instant::now();
let mut best_solution = solution.clone();
let mut best_score = current_score;
let mut progress = 0.0;
let mut temperature = initial_temperature;
eprintln!("initial_temperature = {}", initial_temperature);
let distance_sums = calc_distance_sums(&out_edges, original_vertices.len());
let distance_total: usize = distance_sums.iter().sum();
// eprintln!("{} {:?}", distance_total, distance_sums);
let mut iter = 0;
let mut move_count = 0;
loop {
// check time limit
iter += 1;
if iter % 100 == 0 {
let elapsed = Instant::now() - start_at;
if best_score.0 == 0.0 || elapsed >= time_limit {
eprintln!("iter = {}, move_count = {}", iter, move_count);
let dislike = calculate_dislike(&best_solution, &input.hole);
return (best_solution, dislike);
}
// tweak temperature
progress = elapsed.as_secs_f64() / time_limit.as_secs_f64();
temperature = initial_temperature * (1.0 - progress) * (-progress).exp2();
}
// move to neighbor
let r = rng.gen::<f64>();
if r > progress {
let mut i = 0;
{
let r = rng.gen::<usize>() % distance_total;
let mut sum = 0;
for index in 0..n {
sum += distance_sums[index];
if r < sum {
i = index;
break;
}
}
}
let w = rng.gen::<usize>() % 40 + 5;
let next_solution =
random_move_one_point(i, w, &solution, &input, &mut rng, &out_edges, &orders);
if next_solution.is_none() {
continue;
}
move_count += 1;
let next_solution = next_solution.unwrap();
// calculate score. FIXME: slow
let new_score = tscore(&next_solution, &input);
let accept = {
let current = ascore(current_score, progress);
let new = ascore(new_score, progress);
if new < current {
true
} else {
// new_score >= current_score
let delta = new - current;
let accept_prob = (-delta / temperature).exp();
rng.gen::<f64>() < accept_prob
}
};
if accept {
// accept candidate
current_score = new_score;
solution = next_solution;
}
} else {
let i = rng.gen::<usize>() % n;
let candidate = make_next_candidates(
i,
original_vertices,
&input.hole,
input.epsilon,
&solution,
&out_edges,
&mut rng,
);
if candidate != original_vertices[i] {
move_count += 1;
}
// calculate score. FIXME: slow
let old = solution[i];
solution[i] = candidate;
let new_score = tscore(&solution, &input);
let accept = {
let current = ascore(current_score, progress);
let new = ascore(new_score, progress);
if new < current {
true
} else {
// new_score >= current_score
let delta = new - current;
let accept_prob = (-delta / temperature).exp();
rng.gen::<f64>() < accept_prob
}
};
if accept {
// accept candidate
current_score = new_score;
} else {
// reject candidate
solution[i] = old;
}
}
if current_score < best_score {
best_score = current_score;
best_solution = solution.clone();
}
}
}
fn make_next_candidates(
i: usize,
original_vertices: &[Point],
hole: &Polygon,
epsilon: i64,
solution: &[Point],
out_edges: &[Vec<usize>],
rng: &mut SmallRng,
) -> Point {
let some_neighbor = out_edges[i][0];
let original_squared_distance =
squared_distance(&original_vertices[i], &original_vertices[some_neighbor]);
if original_squared_distance < 100.0 || epsilon < 100000 {
let ring = Ring::from_epsilon(solution[some_neighbor], epsilon, original_squared_distance);
let mut points = ring_points(&ring);
points.shuffle(rng);
for &p in points.iter() {
if !is_valid_point_move(i, &p, solution, original_vertices, out_edges, hole, epsilon) {
continue;
}
return p;
}
} else {
let od = original_squared_distance.sqrt();
let low = od * (1.0 - epsilon as f64 / 1000000.0).sqrt();
let high = od * (1.0 + epsilon as f64 / 1000000.0).sqrt();
for _iter in 0..100 {
let d = low + (high - low) * rng.gen::<f64>();
let theta = 2.0 * std::f64::consts::PI * rng.gen::<f64>();
let vect = Point::new(
(theta.cos() * d + 0.5).floor(),
(theta.sin() * d + 0.5).floor(),
);
let p = solution[some_neighbor] + vect;
if !is_valid_point_move(i, &p, solution, original_vertices, out_edges, hole, epsilon) {
continue;
}
return p;
}
return solution[i];
}
unreachable!()
}
fn is_valid_point_move(
index: usize,
p: &Point,
solution: &[Point],
original_vertices: &[Point],
out_edges: &[Vec<usize>],
hole: &Polygon,
epsilon: i64,
) -> bool {
let ok1 = out_edges[index].iter().all(|&dst| {
is_allowed_distance(
&p,
&solution[dst],
&original_vertices[index],
&original_vertices[dst],
epsilon,
false,
)
});
if !ok1 {
return false;
}
let ok2 = out_edges[index]
.iter()
.all(|&dst| does_line_fit_in_hole(&p, &solution[dst], hole));
if !ok2 {
return false;
}
return true;
}
fn | (
from: usize,
w: usize,
solution: &Vec<Point>,
input: &Input,
rng: &mut SmallRng,
out_edges: &Vec<Vec<usize>>,
orders: &Vec<Vec<usize>>,
) -> Option<Vec<Point>> {
let mut gx: f64 = 0.0;
let mut gy: f64 = 0.0;
for p in solution.iter() {
gx += p.x();
gy += p.y();
}
gx /= solution.len() as f64;
gy /= solution.len() as f64;
let g = Point::new(gx | random_move_one_point | identifier_name |
mod.rs | ,
};
use tokio::io::AsyncReadExt;
mod hash_writer;
use hash_writer::HashWriter;
mod error{
use snafu::Snafu;
#[derive(Debug,Snafu)]
#[snafu(visibility(pub))]
pub enum Error{
#[snafu(display("Invalid uri: {}", source))]
BadUri{
source: http::uri::InvalidUri,
},
#[snafu(display("Invalid url: {}", source))]
BadUrl{
source: url::ParseError,
},
}
}
#[derive(Debug)]
pub enum VerifyResult {
Good,
Bad,
NotInCache,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Hash)]
pub struct Artifact {
pub group: String,
pub artifact: String,
pub version: String,
pub classifier: Option<String>,
pub extension: Option<String>,
}
#[derive(Debug, Clone)]
pub struct ResolvedArtifact {
pub artifact: Artifact,
pub repo: Uri,
}
pub struct Cache;
impl Cacheable for ResolvedArtifact {
type Cache = crate::cache::FileCache;
fn cached_path(&self) -> PathBuf {
let mut p = PathBuf::new();
p.push(app_dirs::app_dir(app_dirs::AppDataType::UserCache, crate::APP_INFO, "maven_cache").expect("Cache directory must be accesible"));
p.push(&self.artifact.to_path());
p
}
fn uri(&self) -> crate::cache::Result<Uri> {
crate::cache::ResultExt::erased(self.artifact.get_uri_on(&self.repo))
}
}
impl cache::Cache<ResolvedArtifact> for Cache {
fn with(
artifact: ResolvedArtifact,
manager: download::Manager,
log: Logger,
) -> crate::cache::BoxFuture<PathBuf> {
let cached_path = artifact.cached_path();
let log = log.new(
o!("artifact"=>artifact.artifact.to_string(),"repo"=>artifact.repo.to_string(),"cached_path"=>cached_path.as_path().to_string_lossy().into_owned()),
);
Box::pin(async move{
info!(log, "caching maven artifact");
if !Self::is_cached(&artifact) {
info!(log, "artifact is not cached, downloading now");
let uri = artifact.uri()?;
manager
.download(uri.clone(), cached_path.clone(), false, &log).await.context(crate::cache::error::Downloading{uri})?;
}
Ok(cached_path)
})
}
}
impl Cache {
pub async fn verify_cached(
resolved: ResolvedArtifact,
manager: download::Manager,
) -> download::Result<VerifyResult> {
if Self::is_cached(&resolved) {
let cached_path = resolved.cached_path();
let sha_url_res = resolved.sha_uri();
let mut cached_file = tokio::fs::File::open(cached_path).await.context(download::error::Io)?;
let mut sha = HashWriter::new();
cached_file.copy(&mut sha).await.context(download::error::Io)?;
let cached_sha = sha.digest();
let sha_uri = sha_url_res?;
let (res,_) = manager.get(sha_uri)?.await?;
let hash_str = res.into_body().map_ok(hyper::Chunk::into_bytes).try_concat().await.context(download::error::Hyper)?;
if hash_str == format!("{}", cached_sha) {
Ok(VerifyResult::Good)
} else {
Ok(VerifyResult::Bad)
}
} else {
Ok(VerifyResult::NotInCache)
}
}
}
impl Artifact {
fn to_path(&self) -> PathBuf {
let mut p = PathBuf::new();
p.push(&self.group_path());
p.push(&self.artifact);
p.push(&self.version);
p.push(&self.artifact_filename());
p
}
pub fn get_uri_on(&self, base: &Uri) -> Result<Uri,error::Error> {
let base = crate::util::uri_to_url(base).context(error::BadUrl)?;
let path = self.to_path();
let url = base.join(path.to_str().expect("non unicode path encountered")).context(error::BadUrl)?;
crate::util::url_to_uri(&url).context(error::BadUri)
}
fn group_path(&self) -> PathBuf {
PathBuf::from_iter(self.group.split('.'))
}
fn artifact_filename(&self) -> String {
let classifier_fmt = match self.classifier {
Some(ref class) => format!("-{classifier}", classifier = class),
None => "".to_string(),
};
let extension_fmt = match self.extension {
Some(ref extension) => extension.clone(),
None => "jar".to_string(),
};
format!(
"{artifact}-{version}{classifier}.{extension}",
artifact = self.artifact,
version = self.version,
classifier = classifier_fmt,
extension = extension_fmt
)
}
pub fn | (&self, repo_uri: Uri) -> ResolvedArtifact {
ResolvedArtifact {
artifact: self.clone(),
repo: repo_uri,
}
}
pub fn download_from(
&self,
location: &Path,
repo_uri: Uri,
manager: download::Manager,
log: Logger,
) -> impl Future<Output=Result<(), crate::cache::Error>> + Send {
Cache::install_at(self.resolve(repo_uri), location.to_owned(), manager, log)
}
}
impl ResolvedArtifact {
pub fn to_path(&self) -> PathBuf {
self.artifact.to_path()
}
pub fn sha_uri(&self) -> crate::download::Result<Uri> {
let mut url = crate::util::uri_to_url(&self.uri().context(download::error::Cached)?).context(download::error::BadUrl)?;
let mut path = url.path().to_owned();
path.push_str(".sha1");
url.set_path(path.as_ref());
crate::util::url_to_uri(&url).context(download::error::BadUri)
}
pub fn install_at_no_classifier(
self,
location: PathBuf,
manager: download::Manager,
log: Logger,
) -> impl Future<Output=crate::cache::Result<()>> + Send {
async move{
let cached_path_no_classifier = Self {
artifact: Artifact {
classifier: None,
..self.artifact.clone()
},
repo: self.repo.clone(),
}.cached_path();
let filename = cached_path_no_classifier.file_name().expect("Maven artifact should have a filename");
<Self as Cacheable>::install_at_custom_filename(self, location, filename.to_os_string(), manager, log).await
}
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum ArtifactParseError {
BadNumberOfParts,
}
impl ToString for Artifact {
fn to_string(&self) -> String {
let mut strn = String::new();
strn.push_str(&self.group);
strn.push(':');
strn.push_str(&self.artifact);
strn.push(':');
strn.push_str(&self.version);
if let Some(ref classifier) = self.classifier {
strn.push(':');
strn.push_str(classifier);
}
if let Some(ref ext) = self.extension {
strn.push('@');
strn.push_str(ext);
}
strn
}
}
impl FromStr for Artifact {
type Err = ArtifactParseError;
fn from_str(s: &str) -> ::std::result::Result<Self, Self::Err> {
let parts: Vec<&str> = s.split('@').collect();
let (s, ext): (&str, Option<String>) = match *parts.as_slice() {
[s, ext] => (s, Some(ext.to_string())),
_ => (s, None),
};
let parts = s.split(':');
let parts: Vec<&str> = parts.collect();
match *parts.as_slice() {
[grp, art, ver] => Ok(Self {
group: grp.into(),
artifact: art.into(),
version: ver.into(),
classifier: None,
extension: ext,
}),
[grp, art, ver, class] => Ok(Self {
group: grp.into(),
artifact: art.into(),
version: ver.into(),
classifier: Some(class.into()),
extension: ext,
}),
_ => Err(ArtifactParseError::BadNumberOfParts),
}
}
}
#[cfg(test)]
mod test {
use super::Artifact;
#[test]
fn parses_simple() {
assert_eq!(
"net.minecraftforge.forge:some-jar:some-version".parse(),
Ok(Artifact {
group: "net.minecraftforge.forge".into(),
artifact: "some-jar".into(),
version: "some-version".into(),
classifier: None,
extension: None,
})
)
}
#[test]
fn parses_with_ext() {
assert_eq!(
"net.minecraftforge.forge:some-jar:some-version@zip".parse(),
Ok(Artifact {
group: "net.minecraftforge.forge".into(),
artifact: "some-jar".into(),
version: "some-version".into(),
classifier: None,
extension: Some("zip".into()),
})
)
}
#[test]
fn parses_with_classifier() {
assert_eq!(
"net.minecraftforge.forge:some-jar:some-version:universal".parse(),
Ok | resolve | identifier_name |
mod.rs | ,
};
use tokio::io::AsyncReadExt;
mod hash_writer;
use hash_writer::HashWriter;
mod error{
use snafu::Snafu;
#[derive(Debug,Snafu)]
#[snafu(visibility(pub))]
pub enum Error{
#[snafu(display("Invalid uri: {}", source))]
BadUri{
source: http::uri::InvalidUri,
},
#[snafu(display("Invalid url: {}", source))]
BadUrl{
source: url::ParseError,
},
}
}
#[derive(Debug)]
pub enum VerifyResult {
Good,
Bad,
NotInCache,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Hash)]
pub struct Artifact {
pub group: String,
pub artifact: String,
pub version: String,
pub classifier: Option<String>,
pub extension: Option<String>,
}
#[derive(Debug, Clone)]
pub struct ResolvedArtifact {
pub artifact: Artifact,
pub repo: Uri,
}
pub struct Cache;
impl Cacheable for ResolvedArtifact {
type Cache = crate::cache::FileCache;
fn cached_path(&self) -> PathBuf {
let mut p = PathBuf::new();
p.push(app_dirs::app_dir(app_dirs::AppDataType::UserCache, crate::APP_INFO, "maven_cache").expect("Cache directory must be accesible"));
p.push(&self.artifact.to_path());
p
}
fn uri(&self) -> crate::cache::Result<Uri> {
crate::cache::ResultExt::erased(self.artifact.get_uri_on(&self.repo))
}
}
impl cache::Cache<ResolvedArtifact> for Cache {
fn with(
artifact: ResolvedArtifact,
manager: download::Manager,
log: Logger,
) -> crate::cache::BoxFuture<PathBuf> {
let cached_path = artifact.cached_path();
let log = log.new(
o!("artifact"=>artifact.artifact.to_string(),"repo"=>artifact.repo.to_string(),"cached_path"=>cached_path.as_path().to_string_lossy().into_owned()),
);
Box::pin(async move{
info!(log, "caching maven artifact");
if !Self::is_cached(&artifact) {
info!(log, "artifact is not cached, downloading now");
let uri = artifact.uri()?;
manager
.download(uri.clone(), cached_path.clone(), false, &log).await.context(crate::cache::error::Downloading{uri})?;
}
Ok(cached_path)
})
}
}
impl Cache {
pub async fn verify_cached(
resolved: ResolvedArtifact,
manager: download::Manager,
) -> download::Result<VerifyResult> {
if Self::is_cached(&resolved) {
let cached_path = resolved.cached_path();
let sha_url_res = resolved.sha_uri();
let mut cached_file = tokio::fs::File::open(cached_path).await.context(download::error::Io)?;
let mut sha = HashWriter::new();
cached_file.copy(&mut sha).await.context(download::error::Io)?;
let cached_sha = sha.digest();
let sha_uri = sha_url_res?;
let (res,_) = manager.get(sha_uri)?.await?;
let hash_str = res.into_body().map_ok(hyper::Chunk::into_bytes).try_concat().await.context(download::error::Hyper)?;
if hash_str == format!("{}", cached_sha) {
Ok(VerifyResult::Good)
} else {
Ok(VerifyResult::Bad)
}
} else {
Ok(VerifyResult::NotInCache)
}
}
}
impl Artifact {
fn to_path(&self) -> PathBuf {
let mut p = PathBuf::new();
p.push(&self.group_path());
p.push(&self.artifact);
p.push(&self.version);
p.push(&self.artifact_filename());
p
}
pub fn get_uri_on(&self, base: &Uri) -> Result<Uri,error::Error> {
let base = crate::util::uri_to_url(base).context(error::BadUrl)?;
let path = self.to_path();
let url = base.join(path.to_str().expect("non unicode path encountered")).context(error::BadUrl)?;
crate::util::url_to_uri(&url).context(error::BadUri)
}
fn group_path(&self) -> PathBuf {
PathBuf::from_iter(self.group.split('.'))
}
fn artifact_filename(&self) -> String {
let classifier_fmt = match self.classifier {
Some(ref class) => format!("-{classifier}", classifier = class),
None => "".to_string(),
};
let extension_fmt = match self.extension {
Some(ref extension) => extension.clone(),
None => "jar".to_string(),
};
format!(
"{artifact}-{version}{classifier}.{extension}",
artifact = self.artifact,
version = self.version,
classifier = classifier_fmt,
extension = extension_fmt
)
}
pub fn resolve(&self, repo_uri: Uri) -> ResolvedArtifact {
ResolvedArtifact {
artifact: self.clone(),
repo: repo_uri,
}
}
pub fn download_from(
&self,
location: &Path,
repo_uri: Uri,
manager: download::Manager,
log: Logger,
) -> impl Future<Output=Result<(), crate::cache::Error>> + Send {
Cache::install_at(self.resolve(repo_uri), location.to_owned(), manager, log)
}
}
impl ResolvedArtifact {
pub fn to_path(&self) -> PathBuf {
self.artifact.to_path()
}
pub fn sha_uri(&self) -> crate::download::Result<Uri> {
let mut url = crate::util::uri_to_url(&self.uri().context(download::error::Cached)?).context(download::error::BadUrl)?;
let mut path = url.path().to_owned();
path.push_str(".sha1");
url.set_path(path.as_ref());
crate::util::url_to_uri(&url).context(download::error::BadUri)
}
pub fn install_at_no_classifier(
self,
location: PathBuf,
manager: download::Manager,
log: Logger,
) -> impl Future<Output=crate::cache::Result<()>> + Send {
async move{
let cached_path_no_classifier = Self {
artifact: Artifact {
classifier: None,
..self.artifact.clone()
},
repo: self.repo.clone(),
}.cached_path();
let filename = cached_path_no_classifier.file_name().expect("Maven artifact should have a filename");
<Self as Cacheable>::install_at_custom_filename(self, location, filename.to_os_string(), manager, log).await
}
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum ArtifactParseError {
BadNumberOfParts,
}
impl ToString for Artifact {
fn to_string(&self) -> String {
let mut strn = String::new();
strn.push_str(&self.group);
strn.push(':');
strn.push_str(&self.artifact);
strn.push(':');
strn.push_str(&self.version);
if let Some(ref classifier) = self.classifier |
if let Some(ref ext) = self.extension {
strn.push('@');
strn.push_str(ext);
}
strn
}
}
impl FromStr for Artifact {
type Err = ArtifactParseError;
fn from_str(s: &str) -> ::std::result::Result<Self, Self::Err> {
let parts: Vec<&str> = s.split('@').collect();
let (s, ext): (&str, Option<String>) = match *parts.as_slice() {
[s, ext] => (s, Some(ext.to_string())),
_ => (s, None),
};
let parts = s.split(':');
let parts: Vec<&str> = parts.collect();
match *parts.as_slice() {
[grp, art, ver] => Ok(Self {
group: grp.into(),
artifact: art.into(),
version: ver.into(),
classifier: None,
extension: ext,
}),
[grp, art, ver, class] => Ok(Self {
group: grp.into(),
artifact: art.into(),
version: ver.into(),
classifier: Some(class.into()),
extension: ext,
}),
_ => Err(ArtifactParseError::BadNumberOfParts),
}
}
}
#[cfg(test)]
mod test {
use super::Artifact;
#[test]
fn parses_simple() {
assert_eq!(
"net.minecraftforge.forge:some-jar:some-version".parse(),
Ok(Artifact {
group: "net.minecraftforge.forge".into(),
artifact: "some-jar".into(),
version: "some-version".into(),
classifier: None,
extension: None,
})
)
}
#[test]
fn parses_with_ext() {
assert_eq!(
"net.minecraftforge.forge:some-jar:some-version@zip".parse(),
Ok(Artifact {
group: "net.minecraftforge.forge".into(),
artifact: "some-jar".into(),
version: "some-version".into(),
classifier: None,
extension: Some("zip".into()),
})
)
}
#[test]
fn parses_with_classifier() {
assert_eq!(
"net.minecraftforge.forge:some-jar:some-version:universal".parse(),
| {
strn.push(':');
strn.push_str(classifier);
} | conditional_block |
mod.rs | ::*,
};
use tokio::io::AsyncReadExt;
mod hash_writer;
use hash_writer::HashWriter;
mod error{
use snafu::Snafu;
#[derive(Debug,Snafu)]
#[snafu(visibility(pub))]
pub enum Error{
#[snafu(display("Invalid uri: {}", source))]
BadUri{
source: http::uri::InvalidUri,
},
#[snafu(display("Invalid url: {}", source))]
BadUrl{
source: url::ParseError,
},
}
}
#[derive(Debug)]
pub enum VerifyResult {
Good,
Bad,
NotInCache,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Hash)]
pub struct Artifact {
pub group: String,
pub artifact: String,
pub version: String,
pub classifier: Option<String>,
pub extension: Option<String>,
}
#[derive(Debug, Clone)]
pub struct ResolvedArtifact {
pub artifact: Artifact,
pub repo: Uri,
}
pub struct Cache;
impl Cacheable for ResolvedArtifact {
type Cache = crate::cache::FileCache;
fn cached_path(&self) -> PathBuf {
let mut p = PathBuf::new();
p.push(app_dirs::app_dir(app_dirs::AppDataType::UserCache, crate::APP_INFO, "maven_cache").expect("Cache directory must be accesible"));
p.push(&self.artifact.to_path());
p
}
fn uri(&self) -> crate::cache::Result<Uri> {
crate::cache::ResultExt::erased(self.artifact.get_uri_on(&self.repo))
}
}
impl cache::Cache<ResolvedArtifact> for Cache {
fn with(
artifact: ResolvedArtifact,
manager: download::Manager,
log: Logger,
) -> crate::cache::BoxFuture<PathBuf> {
let cached_path = artifact.cached_path();
let log = log.new(
o!("artifact"=>artifact.artifact.to_string(),"repo"=>artifact.repo.to_string(),"cached_path"=>cached_path.as_path().to_string_lossy().into_owned()),
);
Box::pin(async move{
info!(log, "caching maven artifact");
if !Self::is_cached(&artifact) {
info!(log, "artifact is not cached, downloading now");
let uri = artifact.uri()?;
manager
.download(uri.clone(), cached_path.clone(), false, &log).await.context(crate::cache::error::Downloading{uri})?;
}
Ok(cached_path)
})
}
}
impl Cache {
pub async fn verify_cached(
resolved: ResolvedArtifact,
manager: download::Manager,
) -> download::Result<VerifyResult> {
if Self::is_cached(&resolved) {
let cached_path = resolved.cached_path();
let sha_url_res = resolved.sha_uri();
let mut cached_file = tokio::fs::File::open(cached_path).await.context(download::error::Io)?;
let mut sha = HashWriter::new();
cached_file.copy(&mut sha).await.context(download::error::Io)?;
let cached_sha = sha.digest();
let sha_uri = sha_url_res?;
let (res,_) = manager.get(sha_uri)?.await?;
let hash_str = res.into_body().map_ok(hyper::Chunk::into_bytes).try_concat().await.context(download::error::Hyper)?;
if hash_str == format!("{}", cached_sha) {
Ok(VerifyResult::Good)
} else {
Ok(VerifyResult::Bad)
}
} else {
Ok(VerifyResult::NotInCache)
}
}
}
impl Artifact {
fn to_path(&self) -> PathBuf {
let mut p = PathBuf::new();
p.push(&self.group_path());
p.push(&self.artifact);
p.push(&self.version);
p.push(&self.artifact_filename());
p
}
pub fn get_uri_on(&self, base: &Uri) -> Result<Uri,error::Error> {
let base = crate::util::uri_to_url(base).context(error::BadUrl)?;
let path = self.to_path();
let url = base.join(path.to_str().expect("non unicode path encountered")).context(error::BadUrl)?;
crate::util::url_to_uri(&url).context(error::BadUri)
}
fn group_path(&self) -> PathBuf {
PathBuf::from_iter(self.group.split('.'))
}
fn artifact_filename(&self) -> String {
let classifier_fmt = match self.classifier {
Some(ref class) => format!("-{classifier}", classifier = class),
None => "".to_string(),
};
let extension_fmt = match self.extension {
Some(ref extension) => extension.clone(),
None => "jar".to_string(),
};
format!(
"{artifact}-{version}{classifier}.{extension}",
artifact = self.artifact,
version = self.version, | classifier = classifier_fmt,
extension = extension_fmt
)
}
pub fn resolve(&self, repo_uri: Uri) -> ResolvedArtifact {
ResolvedArtifact {
artifact: self.clone(),
repo: repo_uri,
}
}
pub fn download_from(
&self,
location: &Path,
repo_uri: Uri,
manager: download::Manager,
log: Logger,
) -> impl Future<Output=Result<(), crate::cache::Error>> + Send {
Cache::install_at(self.resolve(repo_uri), location.to_owned(), manager, log)
}
}
impl ResolvedArtifact {
pub fn to_path(&self) -> PathBuf {
self.artifact.to_path()
}
pub fn sha_uri(&self) -> crate::download::Result<Uri> {
let mut url = crate::util::uri_to_url(&self.uri().context(download::error::Cached)?).context(download::error::BadUrl)?;
let mut path = url.path().to_owned();
path.push_str(".sha1");
url.set_path(path.as_ref());
crate::util::url_to_uri(&url).context(download::error::BadUri)
}
pub fn install_at_no_classifier(
self,
location: PathBuf,
manager: download::Manager,
log: Logger,
) -> impl Future<Output=crate::cache::Result<()>> + Send {
async move{
let cached_path_no_classifier = Self {
artifact: Artifact {
classifier: None,
..self.artifact.clone()
},
repo: self.repo.clone(),
}.cached_path();
let filename = cached_path_no_classifier.file_name().expect("Maven artifact should have a filename");
<Self as Cacheable>::install_at_custom_filename(self, location, filename.to_os_string(), manager, log).await
}
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum ArtifactParseError {
BadNumberOfParts,
}
impl ToString for Artifact {
fn to_string(&self) -> String {
let mut strn = String::new();
strn.push_str(&self.group);
strn.push(':');
strn.push_str(&self.artifact);
strn.push(':');
strn.push_str(&self.version);
if let Some(ref classifier) = self.classifier {
strn.push(':');
strn.push_str(classifier);
}
if let Some(ref ext) = self.extension {
strn.push('@');
strn.push_str(ext);
}
strn
}
}
impl FromStr for Artifact {
type Err = ArtifactParseError;
fn from_str(s: &str) -> ::std::result::Result<Self, Self::Err> {
let parts: Vec<&str> = s.split('@').collect();
let (s, ext): (&str, Option<String>) = match *parts.as_slice() {
[s, ext] => (s, Some(ext.to_string())),
_ => (s, None),
};
let parts = s.split(':');
let parts: Vec<&str> = parts.collect();
match *parts.as_slice() {
[grp, art, ver] => Ok(Self {
group: grp.into(),
artifact: art.into(),
version: ver.into(),
classifier: None,
extension: ext,
}),
[grp, art, ver, class] => Ok(Self {
group: grp.into(),
artifact: art.into(),
version: ver.into(),
classifier: Some(class.into()),
extension: ext,
}),
_ => Err(ArtifactParseError::BadNumberOfParts),
}
}
}
#[cfg(test)]
mod test {
use super::Artifact;
#[test]
fn parses_simple() {
assert_eq!(
"net.minecraftforge.forge:some-jar:some-version".parse(),
Ok(Artifact {
group: "net.minecraftforge.forge".into(),
artifact: "some-jar".into(),
version: "some-version".into(),
classifier: None,
extension: None,
})
)
}
#[test]
fn parses_with_ext() {
assert_eq!(
"net.minecraftforge.forge:some-jar:some-version@zip".parse(),
Ok(Artifact {
group: "net.minecraftforge.forge".into(),
artifact: "some-jar".into(),
version: "some-version".into(),
classifier: None,
extension: Some("zip".into()),
})
)
}
#[test]
fn parses_with_classifier() {
assert_eq!(
"net.minecraftforge.forge:some-jar:some-version:universal".parse(),
Ok | random_line_split |
|
mod.rs | ,
};
use tokio::io::AsyncReadExt;
mod hash_writer;
use hash_writer::HashWriter;
mod error{
use snafu::Snafu;
#[derive(Debug,Snafu)]
#[snafu(visibility(pub))]
pub enum Error{
#[snafu(display("Invalid uri: {}", source))]
BadUri{
source: http::uri::InvalidUri,
},
#[snafu(display("Invalid url: {}", source))]
BadUrl{
source: url::ParseError,
},
}
}
#[derive(Debug)]
pub enum VerifyResult {
Good,
Bad,
NotInCache,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Hash)]
pub struct Artifact {
pub group: String,
pub artifact: String,
pub version: String,
pub classifier: Option<String>,
pub extension: Option<String>,
}
#[derive(Debug, Clone)]
pub struct ResolvedArtifact {
pub artifact: Artifact,
pub repo: Uri,
}
pub struct Cache;
impl Cacheable for ResolvedArtifact {
type Cache = crate::cache::FileCache;
fn cached_path(&self) -> PathBuf {
let mut p = PathBuf::new();
p.push(app_dirs::app_dir(app_dirs::AppDataType::UserCache, crate::APP_INFO, "maven_cache").expect("Cache directory must be accesible"));
p.push(&self.artifact.to_path());
p
}
fn uri(&self) -> crate::cache::Result<Uri> {
crate::cache::ResultExt::erased(self.artifact.get_uri_on(&self.repo))
}
}
impl cache::Cache<ResolvedArtifact> for Cache {
fn with(
artifact: ResolvedArtifact,
manager: download::Manager,
log: Logger,
) -> crate::cache::BoxFuture<PathBuf> {
let cached_path = artifact.cached_path();
let log = log.new(
o!("artifact"=>artifact.artifact.to_string(),"repo"=>artifact.repo.to_string(),"cached_path"=>cached_path.as_path().to_string_lossy().into_owned()),
);
Box::pin(async move{
info!(log, "caching maven artifact");
if !Self::is_cached(&artifact) {
info!(log, "artifact is not cached, downloading now");
let uri = artifact.uri()?;
manager
.download(uri.clone(), cached_path.clone(), false, &log).await.context(crate::cache::error::Downloading{uri})?;
}
Ok(cached_path)
})
}
}
impl Cache {
pub async fn verify_cached(
resolved: ResolvedArtifact,
manager: download::Manager,
) -> download::Result<VerifyResult> {
if Self::is_cached(&resolved) {
let cached_path = resolved.cached_path();
let sha_url_res = resolved.sha_uri();
let mut cached_file = tokio::fs::File::open(cached_path).await.context(download::error::Io)?;
let mut sha = HashWriter::new();
cached_file.copy(&mut sha).await.context(download::error::Io)?;
let cached_sha = sha.digest();
let sha_uri = sha_url_res?;
let (res,_) = manager.get(sha_uri)?.await?;
let hash_str = res.into_body().map_ok(hyper::Chunk::into_bytes).try_concat().await.context(download::error::Hyper)?;
if hash_str == format!("{}", cached_sha) {
Ok(VerifyResult::Good)
} else {
Ok(VerifyResult::Bad)
}
} else {
Ok(VerifyResult::NotInCache)
}
}
}
impl Artifact {
fn to_path(&self) -> PathBuf |
pub fn get_uri_on(&self, base: &Uri) -> Result<Uri,error::Error> {
let base = crate::util::uri_to_url(base).context(error::BadUrl)?;
let path = self.to_path();
let url = base.join(path.to_str().expect("non unicode path encountered")).context(error::BadUrl)?;
crate::util::url_to_uri(&url).context(error::BadUri)
}
fn group_path(&self) -> PathBuf {
PathBuf::from_iter(self.group.split('.'))
}
fn artifact_filename(&self) -> String {
let classifier_fmt = match self.classifier {
Some(ref class) => format!("-{classifier}", classifier = class),
None => "".to_string(),
};
let extension_fmt = match self.extension {
Some(ref extension) => extension.clone(),
None => "jar".to_string(),
};
format!(
"{artifact}-{version}{classifier}.{extension}",
artifact = self.artifact,
version = self.version,
classifier = classifier_fmt,
extension = extension_fmt
)
}
pub fn resolve(&self, repo_uri: Uri) -> ResolvedArtifact {
ResolvedArtifact {
artifact: self.clone(),
repo: repo_uri,
}
}
pub fn download_from(
&self,
location: &Path,
repo_uri: Uri,
manager: download::Manager,
log: Logger,
) -> impl Future<Output=Result<(), crate::cache::Error>> + Send {
Cache::install_at(self.resolve(repo_uri), location.to_owned(), manager, log)
}
}
impl ResolvedArtifact {
pub fn to_path(&self) -> PathBuf {
self.artifact.to_path()
}
pub fn sha_uri(&self) -> crate::download::Result<Uri> {
let mut url = crate::util::uri_to_url(&self.uri().context(download::error::Cached)?).context(download::error::BadUrl)?;
let mut path = url.path().to_owned();
path.push_str(".sha1");
url.set_path(path.as_ref());
crate::util::url_to_uri(&url).context(download::error::BadUri)
}
pub fn install_at_no_classifier(
self,
location: PathBuf,
manager: download::Manager,
log: Logger,
) -> impl Future<Output=crate::cache::Result<()>> + Send {
async move{
let cached_path_no_classifier = Self {
artifact: Artifact {
classifier: None,
..self.artifact.clone()
},
repo: self.repo.clone(),
}.cached_path();
let filename = cached_path_no_classifier.file_name().expect("Maven artifact should have a filename");
<Self as Cacheable>::install_at_custom_filename(self, location, filename.to_os_string(), manager, log).await
}
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum ArtifactParseError {
BadNumberOfParts,
}
impl ToString for Artifact {
fn to_string(&self) -> String {
let mut strn = String::new();
strn.push_str(&self.group);
strn.push(':');
strn.push_str(&self.artifact);
strn.push(':');
strn.push_str(&self.version);
if let Some(ref classifier) = self.classifier {
strn.push(':');
strn.push_str(classifier);
}
if let Some(ref ext) = self.extension {
strn.push('@');
strn.push_str(ext);
}
strn
}
}
impl FromStr for Artifact {
type Err = ArtifactParseError;
fn from_str(s: &str) -> ::std::result::Result<Self, Self::Err> {
let parts: Vec<&str> = s.split('@').collect();
let (s, ext): (&str, Option<String>) = match *parts.as_slice() {
[s, ext] => (s, Some(ext.to_string())),
_ => (s, None),
};
let parts = s.split(':');
let parts: Vec<&str> = parts.collect();
match *parts.as_slice() {
[grp, art, ver] => Ok(Self {
group: grp.into(),
artifact: art.into(),
version: ver.into(),
classifier: None,
extension: ext,
}),
[grp, art, ver, class] => Ok(Self {
group: grp.into(),
artifact: art.into(),
version: ver.into(),
classifier: Some(class.into()),
extension: ext,
}),
_ => Err(ArtifactParseError::BadNumberOfParts),
}
}
}
#[cfg(test)]
mod test {
use super::Artifact;
#[test]
fn parses_simple() {
assert_eq!(
"net.minecraftforge.forge:some-jar:some-version".parse(),
Ok(Artifact {
group: "net.minecraftforge.forge".into(),
artifact: "some-jar".into(),
version: "some-version".into(),
classifier: None,
extension: None,
})
)
}
#[test]
fn parses_with_ext() {
assert_eq!(
"net.minecraftforge.forge:some-jar:some-version@zip".parse(),
Ok(Artifact {
group: "net.minecraftforge.forge".into(),
artifact: "some-jar".into(),
version: "some-version".into(),
classifier: None,
extension: Some("zip".into()),
})
)
}
#[test]
fn parses_with_classifier() {
assert_eq!(
"net.minecraftforge.forge:some-jar:some-version:universal".parse(),
| {
let mut p = PathBuf::new();
p.push(&self.group_path());
p.push(&self.artifact);
p.push(&self.version);
p.push(&self.artifact_filename());
p
} | identifier_body |
plots.py | group:
List of the demogrpahics to adjust for.
quant:
a list of event time quantiles at which the models are to be evaluated.
strat:
Specifies how the bins are computed. One of:
"quantile": Equal sized bins.
"uniform": Uniformly stratified.
adj (str):
Determines if IPCW adjustment is carried out on a population or subgroup
level.
One of "IPCWpop", "IPCWcon" (not implemented).
Returns:
A plotted matplotlib calibration curve.
"""
allscores = np.ones_like(t).astype('float')
for fold in set(folds):
allscores[folds == fold] = scores[fold]
scores = allscores
b_fc = (0, 0, 1, .4)
r_fc = (1, 0, 0, .2)
b_ec = (0, 0, 1, .8)
r_ec = (1, 0, 0, .8)
n_bins = 20
hatch = '//'
fs = 16
prob_true_n, _, outbins, ece = calibration_curve(
scores,
e,
t,
a,
group,
quant,
typ=adj,
ret_bins=True,
strat=strat,
n_bins=n_bins)
for d in range(len(prob_true_n)):
binsize = outbins[d + 1] - outbins[d]
binloc = (outbins[d + 1] + outbins[d]) / 2
gap = (prob_true_n[d] - binloc)
if gap < 0:
bottom = prob_true_n[d]
else:
bottom = prob_true_n[d] - abs(gap)
if d == len(prob_true_n) - 1:
lbl1 = 'Score'
lbl2 = 'Gap'
else:
lbl1 = None
lbl2 = None
if plot:
ax.bar(
binloc,
prob_true_n[d],
width=binsize,
facecolor=b_fc,
edgecolor=b_ec,
linewidth=2.5,
label=lbl1)
ax.bar(
binloc,
abs(gap),
bottom=bottom,
width=binsize,
facecolor=r_fc,
edgecolor=r_ec,
linewidth=2.5,
hatch=hatch,
label=lbl2)
d += 1
if plot:
ax.plot([0, 1], [0, 1], c='k', ls='--', lw=2, zorder=100)
ax.set_xlabel('Predicted Score', fontsize=fs)
ax.set_ylabel('True Score', fontsize=fs)
ax.legend(fontsize=fs)
ax.set_title(str(group), fontsize=fs)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.grid(ls=':', lw=2, zorder=-100, color='grey')
ax.set_axisbelow(True)
ax.text(
x=0.030,
y=.7,
s='ECE=' + str(round(ece, 3)),
size=fs,
bbox=dict(boxstyle='round', fc='white', ec='grey', pad=0.2))
return ece
def plot_roc_curve(ax,
scores,
e,
t,
a,
folds,
groups,
quant,
plot=True):
"""Function to plot ROC at a specified time horizon.
Accepts a matplotlib figure instance, risk scores from a trained survival
analysis model, and quantiles of event interest and generates an IPCW
adjusted ROC curve.
Args:
ax:
a matplotlib subfigure object.
scores:
choice of model. One of "coupled_deep_cph", "coupled_deep_cph_vae".
e:
a numpy array of input features.
t:
a numpy array of input features.
a:
a numpy vector of protected attributes.
folds:
a numpy vector of cv fold.
groups:
List of the demogrpahics to adjust for.
quant:
a list of event time quantiles at which the models are to be evaluated.
Returns:
A plotted matplotlib ROC curve.
"""
fs = 16
fprs, tprs, tprs_std, ctds, brss = {}, {}, {}, {}, {}
fprs['all'] = {}
tprs['all'] = {}
ctds['all'] = {}
brss['all'] = {}
for group in groups:
fprs[group] = {}
tprs[group] = {}
ctds[group] = {}
brss[group] = {}
for fold in set(folds):
ate = a[folds == fold]
str_test = baseline_models.structure_for_eval_(t[folds == fold],
e[folds == fold])
if len(set(folds)) == 1:
atr = ate
str_train = str_test
else:
atr = a[folds != fold]
str_train = baseline_models.structure_for_eval_(t[folds != fold],
e[folds != fold])
t_tr_max = np.max([t_[1] for t_ in str_train])
t_ = np.array([t_[1] for t_ in str_test])
clean = (t_<=t_tr_max)
str_test = str_test[t_<=t_tr_max]
ate = ate[t_<=t_tr_max]
scores_f = scores[fold][clean]
for group in groups:
te_protg = (ate == group)
tr_protg = (atr == group)
try:
roc_m = cumulative_dynamic_auc(str_train[tr_protg], str_test[te_protg],
-scores_f[te_protg], [quant])
brs_m = brier_score(str_train[tr_protg], str_test[te_protg],
scores_f[te_protg], quant)
ctd_m = concordance_index_ipcw(str_train[tr_protg], str_test[te_protg],
-scores_f[te_protg], quant)[0]
except:
roc_m = cumulative_dynamic_auc(str_train, str_test[te_protg],
-scores_f[te_protg], [quant])
brs_m = brier_score(str_train, str_test[te_protg],
scores_f[te_protg], quant)
ctd_m = concordance_index_ipcw(str_train, str_test[te_protg],
-scores_f[te_protg], quant)[0]
fprs[group][fold] = roc_m[0][0][1]
tprs[group][fold] = roc_m[0][0][0]
ctds[group][fold] = ctd_m
brss[group][fold] = brs_m[1][0]
roc_m = cumulative_dynamic_auc(str_train, str_test, -scores_f, [quant])
ctd_m = concordance_index_ipcw(str_train, str_test, -scores_f, quant)[0]
brs_m = brier_score(str_train, str_test, scores_f, quant)
fprs['all'][fold], tprs['all'][fold] = roc_m[0][0][1], roc_m[0][0][0]
ctds['all'][fold] = ctd_m
brss['all'][fold] = brs_m[1][0]
cols = ['b', 'r', 'g']
roc_auc = {}
ctds_mean = {}
brss_mean = {}
j = 0
for group in list(groups) + ['all']:
all_fpr = np.unique(np.concatenate([fprs[group][i] for i in set(folds)]))
# The ROC curves are interpolated at these points.
mean_tprs = []
for i in set(folds):
mean_tprs.append(np.interp(all_fpr, fprs[group][i], tprs[group][i]))
# Finally the interpolated curves are averaged over to compute AUC.
mean_tpr = np.mean(mean_tprs, axis=0)
std_tpr = 1.96 * np.std(mean_tprs, axis=0) / np.sqrt(10)
fprs[group]['macro'] = all_fpr
tprs[group]['macro'] = mean_tpr
tprs_std[group] = std_tpr
roc_auc[group] = auc(fprs[group]['macro'], tprs[group]['macro'])
ctds_mean[group] = np | """Function to plot Calibration Curve at a specified time horizon.
Accepts a matplotlib figure instance, risk scores from a trained survival
analysis model, and quantiles of event interest and generates an IPCW
adjusted calibration curve.
Args:
ax:
a matplotlib subfigure object.
scores:
risk scores P(T>t) issued by a trained survival analysis model
(output of deep_cox_mixtures.models.predict_survival).
e:
a numpy array of event indicators.
t:
a numpy array of event/censoring times.
a:
a numpy vector of protected attributes.
folds:
a numpy vector of cv folds. | identifier_body |
|
plots.py | subfigure object.
scores:
risk scores P(T>t) issued by a trained survival analysis model
(output of deep_cox_mixtures.models.predict_survival).
e:
a numpy array of event indicators.
t:
a numpy array of event/censoring times.
a:
a numpy vector of protected attributes.
folds:
a numpy vector of cv folds.
group:
List of the demogrpahics to adjust for.
quant:
a list of event time quantiles at which the models are to be evaluated.
strat:
Specifies how the bins are computed. One of:
"quantile": Equal sized bins.
"uniform": Uniformly stratified.
adj (str):
Determines if IPCW adjustment is carried out on a population or subgroup
level.
One of "IPCWpop", "IPCWcon" (not implemented).
Returns:
A plotted matplotlib calibration curve.
"""
allscores = np.ones_like(t).astype('float')
for fold in set(folds):
allscores[folds == fold] = scores[fold]
scores = allscores
b_fc = (0, 0, 1, .4)
r_fc = (1, 0, 0, .2)
b_ec = (0, 0, 1, .8)
r_ec = (1, 0, 0, .8)
n_bins = 20
hatch = '//'
fs = 16
prob_true_n, _, outbins, ece = calibration_curve(
scores,
e,
t,
a,
group,
quant,
typ=adj,
ret_bins=True,
strat=strat,
n_bins=n_bins)
for d in range(len(prob_true_n)):
binsize = outbins[d + 1] - outbins[d]
binloc = (outbins[d + 1] + outbins[d]) / 2
gap = (prob_true_n[d] - binloc)
if gap < 0:
bottom = prob_true_n[d]
else:
bottom = prob_true_n[d] - abs(gap)
if d == len(prob_true_n) - 1:
lbl1 = 'Score'
lbl2 = 'Gap'
else:
lbl1 = None
lbl2 = None
if plot:
ax.bar(
binloc,
prob_true_n[d],
width=binsize,
facecolor=b_fc,
edgecolor=b_ec,
linewidth=2.5,
label=lbl1)
ax.bar(
binloc,
abs(gap),
bottom=bottom,
width=binsize,
facecolor=r_fc,
edgecolor=r_ec,
linewidth=2.5,
hatch=hatch,
label=lbl2)
d += 1
if plot:
ax.plot([0, 1], [0, 1], c='k', ls='--', lw=2, zorder=100)
ax.set_xlabel('Predicted Score', fontsize=fs)
ax.set_ylabel('True Score', fontsize=fs)
ax.legend(fontsize=fs)
ax.set_title(str(group), fontsize=fs)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.grid(ls=':', lw=2, zorder=-100, color='grey')
ax.set_axisbelow(True)
ax.text(
x=0.030,
y=.7,
s='ECE=' + str(round(ece, 3)),
size=fs,
bbox=dict(boxstyle='round', fc='white', ec='grey', pad=0.2))
return ece
def plot_roc_curve(ax,
scores,
e,
t,
a,
folds,
groups,
quant,
plot=True):
"""Function to plot ROC at a specified time horizon.
Accepts a matplotlib figure instance, risk scores from a trained survival
analysis model, and quantiles of event interest and generates an IPCW
adjusted ROC curve.
Args:
ax:
a matplotlib subfigure object.
scores:
choice of model. One of "coupled_deep_cph", "coupled_deep_cph_vae".
e:
a numpy array of input features.
t:
a numpy array of input features.
a:
a numpy vector of protected attributes.
folds:
a numpy vector of cv fold.
groups:
List of the demogrpahics to adjust for.
quant:
a list of event time quantiles at which the models are to be evaluated.
Returns:
A plotted matplotlib ROC curve.
"""
fs = 16
fprs, tprs, tprs_std, ctds, brss = {}, {}, {}, {}, {}
fprs['all'] = {}
tprs['all'] = {}
ctds['all'] = {}
brss['all'] = {}
for group in groups:
fprs[group] = {}
tprs[group] = {}
ctds[group] = {}
brss[group] = {}
for fold in set(folds):
ate = a[folds == fold]
str_test = baseline_models.structure_for_eval_(t[folds == fold],
e[folds == fold])
if len(set(folds)) == 1:
atr = ate
str_train = str_test
else:
atr = a[folds != fold]
str_train = baseline_models.structure_for_eval_(t[folds != fold],
e[folds != fold])
t_tr_max = np.max([t_[1] for t_ in str_train])
t_ = np.array([t_[1] for t_ in str_test])
clean = (t_<=t_tr_max)
str_test = str_test[t_<=t_tr_max]
ate = ate[t_<=t_tr_max]
scores_f = scores[fold][clean]
for group in groups:
te_protg = (ate == group)
tr_protg = (atr == group)
try:
roc_m = cumulative_dynamic_auc(str_train[tr_protg], str_test[te_protg],
-scores_f[te_protg], [quant])
brs_m = brier_score(str_train[tr_protg], str_test[te_protg],
scores_f[te_protg], quant)
ctd_m = concordance_index_ipcw(str_train[tr_protg], str_test[te_protg],
-scores_f[te_protg], quant)[0]
except:
roc_m = cumulative_dynamic_auc(str_train, str_test[te_protg],
-scores_f[te_protg], [quant])
brs_m = brier_score(str_train, str_test[te_protg],
scores_f[te_protg], quant)
ctd_m = concordance_index_ipcw(str_train, str_test[te_protg],
-scores_f[te_protg], quant)[0]
fprs[group][fold] = roc_m[0][0][1]
tprs[group][fold] = roc_m[0][0][0]
ctds[group][fold] = ctd_m
brss[group][fold] = brs_m[1][0]
roc_m = cumulative_dynamic_auc(str_train, str_test, -scores_f, [quant])
ctd_m = concordance_index_ipcw(str_train, str_test, -scores_f, quant)[0]
brs_m = brier_score(str_train, str_test, scores_f, quant)
fprs['all'][fold], tprs['all'][fold] = roc_m[0][0][1], roc_m[0][0][0]
ctds['all'][fold] = ctd_m
brss['all'][fold] = brs_m[1][0]
cols = ['b', 'r', 'g']
roc_auc = {}
ctds_mean = {}
brss_mean = {}
j = 0
for group in list(groups) + ['all']:
all_fpr = np.unique(np.concatenate([fprs[group][i] for i in set(folds)]))
# The ROC curves are interpolated at these points.
mean_tprs = []
for i in set(folds):
|
# Finally the interpolated curves are averaged over to compute AUC.
mean_tpr = np.mean(mean_tprs, axis=0)
std_tpr = 1.96 * np.std(mean_tprs, axis=0) / np.sqrt(10)
fprs[group]['macro'] = all_fpr
tprs[group]['macro'] = mean_tpr
tprs_std[group] = std_tpr
roc_auc[group] = auc(fprs[group]['macro'], tprs[group]['macro'])
ctds_mean[group] = np.mean([ctds[group][fold] for fold in folds])
brss_mean[group] = np.mean([brss[group][fold] for fold in folds])
lbl = str(group)
lbl += ' AUC:' + str(round(roc_auc[group | mean_tprs.append(np.interp(all_fpr, fprs[group][i], tprs[group][i])) | conditional_block |
plots.py | matplotlib subfigure object.
scores:
risk scores P(T>t) issued by a trained survival analysis model
(output of deep_cox_mixtures.models.predict_survival).
e:
a numpy array of event indicators.
t:
a numpy array of event/censoring times.
a:
a numpy vector of protected attributes.
folds:
a numpy vector of cv folds.
group:
List of the demogrpahics to adjust for.
quant:
a list of event time quantiles at which the models are to be evaluated.
strat:
Specifies how the bins are computed. One of:
"quantile": Equal sized bins.
"uniform": Uniformly stratified.
adj (str):
Determines if IPCW adjustment is carried out on a population or subgroup
level.
One of "IPCWpop", "IPCWcon" (not implemented).
Returns:
A plotted matplotlib calibration curve.
"""
allscores = np.ones_like(t).astype('float')
for fold in set(folds):
allscores[folds == fold] = scores[fold]
scores = allscores
b_fc = (0, 0, 1, .4)
r_fc = (1, 0, 0, .2)
b_ec = (0, 0, 1, .8)
r_ec = (1, 0, 0, .8)
n_bins = 20
hatch = '//'
fs = 16
prob_true_n, _, outbins, ece = calibration_curve(
scores,
e,
t,
a,
group,
quant,
typ=adj,
ret_bins=True,
strat=strat,
n_bins=n_bins)
for d in range(len(prob_true_n)):
binsize = outbins[d + 1] - outbins[d]
binloc = (outbins[d + 1] + outbins[d]) / 2
gap = (prob_true_n[d] - binloc)
if gap < 0:
bottom = prob_true_n[d]
else:
bottom = prob_true_n[d] - abs(gap)
if d == len(prob_true_n) - 1:
lbl1 = 'Score'
lbl2 = 'Gap'
else:
lbl1 = None
lbl2 = None
if plot:
ax.bar(
binloc,
prob_true_n[d],
width=binsize,
facecolor=b_fc,
edgecolor=b_ec,
linewidth=2.5,
label=lbl1)
ax.bar(
binloc,
abs(gap),
bottom=bottom,
width=binsize,
facecolor=r_fc,
edgecolor=r_ec,
linewidth=2.5,
hatch=hatch,
label=lbl2)
d += 1
if plot:
ax.plot([0, 1], [0, 1], c='k', ls='--', lw=2, zorder=100)
ax.set_xlabel('Predicted Score', fontsize=fs)
ax.set_ylabel('True Score', fontsize=fs)
ax.legend(fontsize=fs)
ax.set_title(str(group), fontsize=fs)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.grid(ls=':', lw=2, zorder=-100, color='grey')
ax.set_axisbelow(True)
ax.text(
x=0.030,
y=.7,
s='ECE=' + str(round(ece, 3)),
size=fs,
bbox=dict(boxstyle='round', fc='white', ec='grey', pad=0.2))
return ece
def plot_roc_curve(ax,
scores,
e,
t,
a,
folds,
groups,
quant,
plot=True):
"""Function to plot ROC at a specified time horizon.
Accepts a matplotlib figure instance, risk scores from a trained survival
analysis model, and quantiles of event interest and generates an IPCW
adjusted ROC curve.
Args:
ax:
a matplotlib subfigure object.
scores:
choice of model. One of "coupled_deep_cph", "coupled_deep_cph_vae".
e:
a numpy array of input features.
t:
a numpy array of input features.
a:
a numpy vector of protected attributes.
folds:
a numpy vector of cv fold.
groups:
List of the demogrpahics to adjust for.
quant:
a list of event time quantiles at which the models are to be evaluated.
Returns:
A plotted matplotlib ROC curve.
"""
fs = 16
fprs, tprs, tprs_std, ctds, brss = {}, {}, {}, {}, {}
fprs['all'] = {}
tprs['all'] = {}
ctds['all'] = {}
brss['all'] = {}
for group in groups:
fprs[group] = {}
tprs[group] = {}
ctds[group] = {}
brss[group] = {}
for fold in set(folds):
ate = a[folds == fold]
str_test = baseline_models.structure_for_eval_(t[folds == fold],
e[folds == fold])
if len(set(folds)) == 1:
atr = ate
str_train = str_test
else:
atr = a[folds != fold]
str_train = baseline_models.structure_for_eval_(t[folds != fold],
e[folds != fold])
t_tr_max = np.max([t_[1] for t_ in str_train])
t_ = np.array([t_[1] for t_ in str_test])
clean = (t_<=t_tr_max)
str_test = str_test[t_<=t_tr_max]
ate = ate[t_<=t_tr_max]
scores_f = scores[fold][clean]
for group in groups:
te_protg = (ate == group)
tr_protg = (atr == group)
try:
roc_m = cumulative_dynamic_auc(str_train[tr_protg], str_test[te_protg],
-scores_f[te_protg], [quant])
brs_m = brier_score(str_train[tr_protg], str_test[te_protg],
scores_f[te_protg], quant)
ctd_m = concordance_index_ipcw(str_train[tr_protg], str_test[te_protg],
-scores_f[te_protg], quant)[0]
except:
roc_m = cumulative_dynamic_auc(str_train, str_test[te_protg],
-scores_f[te_protg], [quant])
brs_m = brier_score(str_train, str_test[te_protg],
scores_f[te_protg], quant)
ctd_m = concordance_index_ipcw(str_train, str_test[te_protg],
-scores_f[te_protg], quant)[0]
fprs[group][fold] = roc_m[0][0][1]
tprs[group][fold] = roc_m[0][0][0]
ctds[group][fold] = ctd_m
brss[group][fold] = brs_m[1][0]
roc_m = cumulative_dynamic_auc(str_train, str_test, -scores_f, [quant])
ctd_m = concordance_index_ipcw(str_train, str_test, -scores_f, quant)[0]
brs_m = brier_score(str_train, str_test, scores_f, quant)
fprs['all'][fold], tprs['all'][fold] = roc_m[0][0][1], roc_m[0][0][0]
ctds['all'][fold] = ctd_m
brss['all'][fold] = brs_m[1][0]
cols = ['b', 'r', 'g']
roc_auc = {}
ctds_mean = {}
brss_mean = {}
j = 0
for group in list(groups) + ['all']:
all_fpr = np.unique(np.concatenate([fprs[group][i] for i in set(folds)]))
# The ROC curves are interpolated at these points. | mean_tpr = np.mean(mean_tprs, axis=0)
std_tpr = 1.96 * np.std(mean_tprs, axis=0) / np.sqrt(10)
fprs[group]['macro'] = all_fpr
tprs[group]['macro'] = mean_tpr
tprs_std[group] = std_tpr
roc_auc[group] = auc(fprs[group]['macro'], tprs[group]['macro'])
ctds_mean[group] = np.mean([ctds[group][fold] for fold in folds])
brss_mean[group] = np.mean([brss[group][fold] for fold in folds])
lbl = str(group)
lbl += ' AUC:' + str(round(roc_auc[group | mean_tprs = []
for i in set(folds):
mean_tprs.append(np.interp(all_fpr, fprs[group][i], tprs[group][i]))
# Finally the interpolated curves are averaged over to compute AUC. | random_line_split |
plots.py | subfigure object.
scores:
risk scores P(T>t) issued by a trained survival analysis model
(output of deep_cox_mixtures.models.predict_survival).
e:
a numpy array of event indicators.
t:
a numpy array of event/censoring times.
a:
a numpy vector of protected attributes.
folds:
a numpy vector of cv folds.
group:
List of the demogrpahics to adjust for.
quant:
a list of event time quantiles at which the models are to be evaluated.
strat:
Specifies how the bins are computed. One of:
"quantile": Equal sized bins.
"uniform": Uniformly stratified.
adj (str):
Determines if IPCW adjustment is carried out on a population or subgroup
level.
One of "IPCWpop", "IPCWcon" (not implemented).
Returns:
A plotted matplotlib calibration curve.
"""
allscores = np.ones_like(t).astype('float')
for fold in set(folds):
allscores[folds == fold] = scores[fold]
scores = allscores
b_fc = (0, 0, 1, .4)
r_fc = (1, 0, 0, .2)
b_ec = (0, 0, 1, .8)
r_ec = (1, 0, 0, .8)
n_bins = 20
hatch = '//'
fs = 16
prob_true_n, _, outbins, ece = calibration_curve(
scores,
e,
t,
a,
group,
quant,
typ=adj,
ret_bins=True,
strat=strat,
n_bins=n_bins)
for d in range(len(prob_true_n)):
binsize = outbins[d + 1] - outbins[d]
binloc = (outbins[d + 1] + outbins[d]) / 2
gap = (prob_true_n[d] - binloc)
if gap < 0:
bottom = prob_true_n[d]
else:
bottom = prob_true_n[d] - abs(gap)
if d == len(prob_true_n) - 1:
lbl1 = 'Score'
lbl2 = 'Gap'
else:
lbl1 = None
lbl2 = None
if plot:
ax.bar(
binloc,
prob_true_n[d],
width=binsize,
facecolor=b_fc,
edgecolor=b_ec,
linewidth=2.5,
label=lbl1)
ax.bar(
binloc,
abs(gap),
bottom=bottom,
width=binsize,
facecolor=r_fc,
edgecolor=r_ec,
linewidth=2.5,
hatch=hatch,
label=lbl2)
d += 1
if plot:
ax.plot([0, 1], [0, 1], c='k', ls='--', lw=2, zorder=100)
ax.set_xlabel('Predicted Score', fontsize=fs)
ax.set_ylabel('True Score', fontsize=fs)
ax.legend(fontsize=fs)
ax.set_title(str(group), fontsize=fs)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.grid(ls=':', lw=2, zorder=-100, color='grey')
ax.set_axisbelow(True)
ax.text(
x=0.030,
y=.7,
s='ECE=' + str(round(ece, 3)),
size=fs,
bbox=dict(boxstyle='round', fc='white', ec='grey', pad=0.2))
return ece
def | (ax,
scores,
e,
t,
a,
folds,
groups,
quant,
plot=True):
"""Function to plot ROC at a specified time horizon.
Accepts a matplotlib figure instance, risk scores from a trained survival
analysis model, and quantiles of event interest and generates an IPCW
adjusted ROC curve.
Args:
ax:
a matplotlib subfigure object.
scores:
choice of model. One of "coupled_deep_cph", "coupled_deep_cph_vae".
e:
a numpy array of input features.
t:
a numpy array of input features.
a:
a numpy vector of protected attributes.
folds:
a numpy vector of cv fold.
groups:
List of the demogrpahics to adjust for.
quant:
a list of event time quantiles at which the models are to be evaluated.
Returns:
A plotted matplotlib ROC curve.
"""
fs = 16
fprs, tprs, tprs_std, ctds, brss = {}, {}, {}, {}, {}
fprs['all'] = {}
tprs['all'] = {}
ctds['all'] = {}
brss['all'] = {}
for group in groups:
fprs[group] = {}
tprs[group] = {}
ctds[group] = {}
brss[group] = {}
for fold in set(folds):
ate = a[folds == fold]
str_test = baseline_models.structure_for_eval_(t[folds == fold],
e[folds == fold])
if len(set(folds)) == 1:
atr = ate
str_train = str_test
else:
atr = a[folds != fold]
str_train = baseline_models.structure_for_eval_(t[folds != fold],
e[folds != fold])
t_tr_max = np.max([t_[1] for t_ in str_train])
t_ = np.array([t_[1] for t_ in str_test])
clean = (t_<=t_tr_max)
str_test = str_test[t_<=t_tr_max]
ate = ate[t_<=t_tr_max]
scores_f = scores[fold][clean]
for group in groups:
te_protg = (ate == group)
tr_protg = (atr == group)
try:
roc_m = cumulative_dynamic_auc(str_train[tr_protg], str_test[te_protg],
-scores_f[te_protg], [quant])
brs_m = brier_score(str_train[tr_protg], str_test[te_protg],
scores_f[te_protg], quant)
ctd_m = concordance_index_ipcw(str_train[tr_protg], str_test[te_protg],
-scores_f[te_protg], quant)[0]
except:
roc_m = cumulative_dynamic_auc(str_train, str_test[te_protg],
-scores_f[te_protg], [quant])
brs_m = brier_score(str_train, str_test[te_protg],
scores_f[te_protg], quant)
ctd_m = concordance_index_ipcw(str_train, str_test[te_protg],
-scores_f[te_protg], quant)[0]
fprs[group][fold] = roc_m[0][0][1]
tprs[group][fold] = roc_m[0][0][0]
ctds[group][fold] = ctd_m
brss[group][fold] = brs_m[1][0]
roc_m = cumulative_dynamic_auc(str_train, str_test, -scores_f, [quant])
ctd_m = concordance_index_ipcw(str_train, str_test, -scores_f, quant)[0]
brs_m = brier_score(str_train, str_test, scores_f, quant)
fprs['all'][fold], tprs['all'][fold] = roc_m[0][0][1], roc_m[0][0][0]
ctds['all'][fold] = ctd_m
brss['all'][fold] = brs_m[1][0]
cols = ['b', 'r', 'g']
roc_auc = {}
ctds_mean = {}
brss_mean = {}
j = 0
for group in list(groups) + ['all']:
all_fpr = np.unique(np.concatenate([fprs[group][i] for i in set(folds)]))
# The ROC curves are interpolated at these points.
mean_tprs = []
for i in set(folds):
mean_tprs.append(np.interp(all_fpr, fprs[group][i], tprs[group][i]))
# Finally the interpolated curves are averaged over to compute AUC.
mean_tpr = np.mean(mean_tprs, axis=0)
std_tpr = 1.96 * np.std(mean_tprs, axis=0) / np.sqrt(10)
fprs[group]['macro'] = all_fpr
tprs[group]['macro'] = mean_tpr
tprs_std[group] = std_tpr
roc_auc[group] = auc(fprs[group]['macro'], tprs[group]['macro'])
ctds_mean[group] = np.mean([ctds[group][fold] for fold in folds])
brss_mean[group] = np.mean([brss[group][fold] for fold in folds])
lbl = str(group)
lbl += ' AUC:' + str(round(roc_auc[group | plot_roc_curve | identifier_name |
main.rs | }
fn bar() -> Result<i32, String> {
Ok(2)
}
fn foo_bar() -> Result<i32, String> {
let res = foo(2)? + bar()?;
Ok(res)
}
let fb = foo_bar();
assert!(fb.is_ok());
}
fn _apuntadores_a_funcion() {
fn mas_uno(i: i32) -> i32 {
i + 1
}
let f: fn(i32) -> i32 = mas_uno;
assert_eq!(2, f(1));
}
fn _primitivos() {
let _a: bool = false;
let _b: char = 'x';
let _c: i32 = 42; //i8, i16, i32, i64, u8, u16, u32, u64, isize, usize, f32, f64
}
fn _arreglos() {
let mut m: [i32; 3] = [1, 2, 3];
m[2] = 5;
assert_eq!(5, m[2]);
}
fn _slices() {
let a: [i32; 5] = [0, 1, 2, 3, 4];
let middle: &[i32] = &a[1..4];
assert_eq!(1, middle[0]);
}
fn _tuplas() {
let (x, y) = (1, "Hello");
assert_eq!(1, x);
assert_eq!("Hello", y);
let z = (1, "Hello");
assert_eq!(1, z.0);
}
fn _expresiones() {
let x = 5;
let y = if x == 5 { 10 } else { 15 };
assert_eq!(10, y)
}
fn _while() {
let mut x = 0;
while x < 10 {
x += 1;
}
assert_eq!(10, x)
}
fn _for() {
for x in 0..10 {
println!("{}", x);
}
}
fn _loop() {
let mut x = 0;
loop {
x += 1;
if x >= 10 {
break
}
}
assert_eq!(10, x)
}
fn _etiquetas_loop() {
'exterior: for x in 0..10 {
'interior: for y in 0..10 {
if x % 2 == 0 { continue 'exterior; } // continua el ciclo por encima de x
if y % 2 == 0 { continue 'interior; } // continua el ciclo por encima de y
println!("x: {}, y: {}", x, y);
}
}
}
fn _enumerate() {
for (i,j) in (5..10).enumerate() {
println!("i = {} y j = {}", i, j);
}
let lineas = "hola\nmundo".lines();
for (numero_linea, linea) in lineas.enumerate() {
println!("{}: {}", numero_linea, linea);
}
}
fn _pertenencia() {
let v = vec![1, 2, 3];
let v2 = v;
println!("v2[0] es: {}", v2[0]);
//println!("v[0] es: {}", v[0]); // Error borrow of moved value: `v`
}
fn _pertenencia_funcion() {
fn tomar(_v: Vec<i32>) {
// Algo
}
let v = vec![1, 2, 3];
tomar(v);
//println!("v[0] es: {}", v[0]); // Error borrow of moved value: `v`
}
fn _copy() {
// i32 , Todos los tipos primitivos implementan el trait Copy
// Se realiza una copia y su pertenencia no es movida
let v: i32 = 1;
let _v2 = v;
println!("v es: {}", v); // =)
}
fn _devolver_pertenencia() {
fn _foo(v: Vec<i32>) -> Vec<i32> {
v
}
fn foo(v1: Vec<i32>, v2: Vec<i32>) -> (Vec<i32>, Vec<i32>, i32) {
(v1, v2, 42)
}
let v1 = vec![1, 2, 3];
let v2 = vec![1, 2, 3];
let (v1, _v2, _r) = foo(v1, v2);
assert_eq!(1, v1[0]);
}
fn _prestamo() {
fn foo(_v1: &Vec<i32>, _v2: &Vec<i32>) -> i32 {
42
}
let v1 = vec![1, 2, 3];
let _v2 = vec![1, 2, 3];
let _r = foo(&v1, &_v2);
// podemos usar a v1 y v2 aqui
assert_eq!(1, v1[0]);
}
fn _mutabilidad() {
let mut x = 5;
assert_eq!(5, x);
x = 6;
assert_eq!(6, x);
}
fn _estructuras() {
struct Punto {
x: i32,
y: i32,
}
let origen = Punto { x: 0, y: 0 };
assert_eq!(0, origen.x);
assert_eq!(0, origen.y);
}
fn _sintaxis_de_actualizacion() {
struct Punto3d {
_x: i32,
_y: i32,
_z: i32,
}
let origen = Punto3d { _x: 1, _y: 2, _z: 3 };
let punto = Punto3d { _y: 1, .. origen };
assert_eq!(3, punto._z);
}
fn _estructuras_pertenencia() {
struct Punto {
x: i32,
y: i32,
}
fn foo(punto: Punto) -> i32 {
punto.x + punto.y
}
let origen = Punto { x: 1, y: 2 };
let suma = foo(origen);
println!("{}", suma);
//println!("Punto x {}", origen.x); // Error borrow of moved value: `origen`
}
fn _estructuras_prestamo() {
struct Punto {
x: i32,
y: i32,
}
fn foo(punto: &Punto) -> i32 {
punto.x + punto.y
}
let origen = Punto { x: 1, y: 2 };
let suma = foo(&origen);
assert_eq!(3, suma);
assert_eq!(1, origen.x);
}
fn _tupla_estructuras() {
struct Color(i32, i32, i32);
let azul = Color(0, 0, 255);
assert_eq!(255, azul.2);
}
fn _estructuras_tipo_unitario() {
struct Electron;
let _e = Electron;
}
fn _enumeraciones() {
enum Mensaje {
Salir,
CambiarColor(i32, i32, i32),
Mover { _x: i32, _y: i32 },
Escribir(String),
}
let _salir = Mensaje::Salir;
let _cambiar_color = Mensaje::CambiarColor(0, 0, 255);
use Mensaje::{Mover};
let _mover = Mover {_x: 0, _y: 2};
let _escribir = Mensaje::Escribir("Hello".to_string());
}
fn _match_en_enums() {
enum _Mensaje {
Salir,
CambiarColor(i32, i32, i32),
Mover { x: i32, _y: i32 },
Escribir(String),
}
fn _salir() { /* ... */ }
fn _cambiar_color(_r: i32, _g: i32, _b: i32) { /* ... */ }
fn _mover_cursor(_x: i32, _y: i32) { /* ... */ }
fn _procesar_mensaje(msj: _Mensaje) {
match msj {
_Mensaje::Salir => _salir(),
_Mensaje::CambiarColor(r, g, b) => _cambiar_color(r, g, b),
_Mensaje::Mover { x, _y: y } => _mover_cursor(x, y),
_Mensaje::Escribir(s) => println!("{}", s), | };
}
}
fn _multiples_patrones() { | random_line_split |
|
main.rs | , number);
}
fn _result_funciones() {
enum Error {
Tecnico
}
let f: fn(i32) -> Result<i32, Error> = |num: i32| match num {
1 => Ok(num + 1),
_ => Err(Error::Tecnico)
};
/*fn f(num: i32) -> Result<i32, Error> {
match num {
1 => Ok(num + 1),
_ => Err(Error::Tecnico)
}
}*/
assert!(f(1).is_ok());
assert!(f(2).is_err());
let result: Result<i32, &str> = f(2)
.map(|ok| ok)
.map_err(|_err| "Error =(");
match result {
Ok(n) => println!("{}", n),
Err(e) => println!("{}", e)
};
}
fn _panic_result() {
let result: Result<i32, &str> = Ok(1);
//let result: Result<i32, &str> = Err("Error =(");
let valor = result.ok().expect("Error!");
assert_eq!(1, valor)
}
fn _try() {
fn _parser(num: &str) -> Result<i32, ParseIntError> {
num.parse()
}
fn f(x: &str, y: &str) -> Result<i32, ParseIntError> {
let num1 = _parser(x);
let num2 = _parser(y);
//let resultado = _parser(x) ? + _parser(y)?;
let resultado = num1? + num2?;
Ok(resultado)
}
assert!(f("1", "2").is_ok());
assert!(f("1P", "2").is_err());
match f("1P", "2") {
Ok(n) => println!("Ok: {}", n),
Err(e) => println!("Error: {}", e)
}
}
fn _try_azucar_sintactico() {
fn foo(n: i32) -> Result<i32, String> {
if n % 2 == 0 {
Ok(1)
} else { Err(String::from("Error")) }
}
fn bar() -> Result<i32, String> {
Ok(2)
}
fn foo_bar() -> Result<i32, String> {
let res = foo(2)? + bar()?;
Ok(res)
}
let fb = foo_bar();
assert!(fb.is_ok());
}
fn _apuntadores_a_funcion() {
fn mas_uno(i: i32) -> i32 {
i + 1
}
let f: fn(i32) -> i32 = mas_uno;
assert_eq!(2, f(1));
}
fn _primitivos() {
let _a: bool = false;
let _b: char = 'x';
let _c: i32 = 42; //i8, i16, i32, i64, u8, u16, u32, u64, isize, usize, f32, f64
}
fn _arreglos() {
let mut m: [i32; 3] = [1, 2, 3];
m[2] = 5;
assert_eq!(5, m[2]);
}
fn _slices() {
let a: [i32; 5] = [0, 1, 2, 3, 4];
let middle: &[i32] = &a[1..4];
assert_eq!(1, middle[0]);
}
fn _tuplas() {
let (x, y) = (1, "Hello");
assert_eq!(1, x);
assert_eq!("Hello", y);
let z = (1, "Hello");
assert_eq!(1, z.0);
}
fn _expresiones() {
let x = 5;
let y = if x == 5 { 10 } else { 15 };
assert_eq!(10, y)
}
fn _while() {
let mut x = 0;
while x < 10 {
x += 1;
}
assert_eq!(10, x)
}
fn _for() {
for x in 0..10 {
println!("{}", x);
}
}
fn _loop() {
let mut x = 0;
loop {
x += 1;
if x >= 10 {
break
}
}
assert_eq!(10, x)
}
fn _etiquetas_loop() {
'exterior: for x in 0..10 {
'interior: for y in 0..10 {
if x % 2 == 0 { continue 'exterior; } // continua el ciclo por encima de x
if y % 2 == 0 { continue 'interior; } // continua el ciclo por encima de y
println!("x: {}, y: {}", x, y);
}
}
}
fn _enumerate() {
for (i,j) in (5..10).enumerate() {
println!("i = {} y j = {}", i, j);
}
let lineas = "hola\nmundo".lines();
for (numero_linea, linea) in lineas.enumerate() {
println!("{}: {}", numero_linea, linea);
}
}
fn _pertenencia() {
let v = vec![1, 2, 3];
let v2 = v;
println!("v2[0] es: {}", v2[0]);
//println!("v[0] es: {}", v[0]); // Error borrow of moved value: `v`
}
fn _pertenencia_funcion() {
fn tomar(_v: Vec<i32>) {
// Algo
}
let v = vec![1, 2, 3];
tomar(v);
//println!("v[0] es: {}", v[0]); // Error borrow of moved value: `v`
}
fn _copy() {
// i32 , Todos los tipos primitivos implementan el trait Copy
// Se realiza una copia y su pertenencia no es movida
let v: i32 = 1;
let _v2 = v;
println!("v es: {}", v); // =)
}
fn _devolver_pertenencia() {
fn _foo(v: Vec<i32>) -> Vec<i32> {
v
}
fn foo(v1: Vec<i32>, v2: Vec<i32>) -> (Vec<i32>, Vec<i32>, i32) {
(v1, v2, 42)
}
let v1 = vec![1, 2, 3];
let v2 = vec![1, 2, 3];
let (v1, _v2, _r) = foo(v1, v2);
assert_eq!(1, v1[0]);
}
fn _prestamo() {
fn foo(_v1: &Vec<i32>, _v2: &Vec<i32>) -> i32 {
42
}
let v1 = vec![1, 2, 3];
let _v2 = vec![1, 2, 3];
let _r = foo(&v1, &_v2);
// podemos usar a v1 y v2 aqui
assert_eq!(1, v1[0]);
}
fn _mutabilidad() {
let mut x = 5;
assert_eq!(5, x);
x = 6;
assert_eq!(6, x);
}
fn _estructuras() {
struct Punto {
x: i32,
y: i32,
}
let origen = Punto { x: 0, y: 0 };
assert_eq!(0, origen.x);
assert_eq!(0, origen.y);
}
fn _sintaxis_de_actualizacion() {
struct Punto3d {
_x: i32,
_y: i32,
_z: i32,
}
let origen = Punto3d { _x: 1, _y: 2, _z: 3 };
let punto = Punto3d { _y: 1, .. origen };
assert_eq!(3, punto._z);
}
fn _estructuras_pertenencia() {
struct Punto {
x: i32,
y: i32,
}
fn foo(punto: Punto) -> i32 {
punto.x + punto.y
}
let origen = Punto { x: 1, y: 2 };
let suma = foo(origen);
println!("{}", suma);
//println!("Punto x {}", origen.x); // Error borrow of moved value: `origen`
}
fn _estructuras_prestamo() {
struct Punto {
x: i32,
y: i32,
}
fn foo(punto: &Punto) -> i32 {
| punto.x + punto.y
}
| identifier_body |
|
main.rs | fn bar() -> Result<i32, String> {
Ok(2)
}
fn foo_bar() -> Result<i32, String> {
let res = foo(2)? + bar()?;
Ok(res)
}
let fb = foo_bar();
assert!(fb.is_ok());
}
fn _apuntadores_a_funcion() {
fn mas_uno(i: i32) -> i32 {
i + 1
}
let f: fn(i32) -> i32 = mas_uno;
assert_eq!(2, f(1));
}
fn _primitivos() {
let _a: bool = false;
let _b: char = 'x';
let _c: i32 = 42; //i8, i16, i32, i64, u8, u16, u32, u64, isize, usize, f32, f64
}
fn _arreglos() {
let mut m: [i32; 3] = [1, 2, 3];
m[2] = 5;
assert_eq!(5, m[2]);
}
fn _slices() {
let a: [i32; 5] = [0, 1, 2, 3, 4];
let middle: &[i32] = &a[1..4];
assert_eq!(1, middle[0]);
}
fn _tuplas() {
let (x, y) = (1, "Hello");
assert_eq!(1, x);
assert_eq!("Hello", y);
let z = (1, "Hello");
assert_eq!(1, z.0);
}
fn _expresiones() {
let x = 5;
let y = if x == 5 { 10 } else { 15 };
assert_eq!(10, y)
}
fn _while() {
let mut x = 0;
while x < 10 {
x += 1;
}
assert_eq!(10, x)
}
fn _for() {
for x in 0..10 {
println!("{}", x);
}
}
fn _loop() {
let mut x = 0;
loop {
x += 1;
if x >= 10 {
break
}
}
assert_eq!(10, x)
}
fn _etiquetas_loop() {
'exterior: for x in 0..10 {
'interior: for y in 0..10 {
if x % 2 == 0 { continue 'exterior; } // continua el ciclo por encima de x
if y % 2 == 0 { continue 'interior; } // continua el ciclo por encima de y
println!("x: {}, y: {}", x, y);
}
}
}
fn _enumerate() {
for (i,j) in (5..10).enumerate() {
println!("i = {} y j = {}", i, j);
}
let lineas = "hola\nmundo".lines();
for (numero_linea, linea) in lineas.enumerate() {
println!("{}: {}", numero_linea, linea);
}
}
fn _pertenencia() {
let v = vec![1, 2, 3];
let v2 = v;
println!("v2[0] es: {}", v2[0]);
//println!("v[0] es: {}", v[0]); // Error borrow of moved value: `v`
}
fn _pertenencia_funcion() {
fn tomar(_v: Vec<i32>) {
// Algo
}
let v = vec![1, 2, 3];
tomar(v);
//println!("v[0] es: {}", v[0]); // Error borrow of moved value: `v`
}
fn _copy() {
// i32 , Todos los tipos primitivos implementan el trait Copy
// Se realiza una copia y su pertenencia no es movida
let v: i32 = 1;
let _v2 = v;
println!("v es: {}", v); // =)
}
fn _devolver_pertenencia() {
fn _foo(v: Vec<i32>) -> Vec<i32> {
v
}
fn foo(v1: Vec<i32>, v2: Vec<i32>) -> (Vec<i32>, Vec<i32>, i32) {
(v1, v2, 42)
}
let v1 = vec![1, 2, 3];
let v2 = vec![1, 2, 3];
let (v1, _v2, _r) = foo(v1, v2);
assert_eq!(1, v1[0]);
}
fn _prestamo() {
fn foo(_v1: &Vec<i32>, _v2: &Vec<i32>) -> i32 {
42
}
let v1 = vec![1, 2, 3];
let _v2 = vec![1, 2, 3];
let _r = foo(&v1, &_v2);
// podemos usar a v1 y v2 aqui
assert_eq!(1, v1[0]);
}
fn _mutabilidad() {
let mut x = 5;
assert_eq!(5, x);
x = 6;
assert_eq!(6, x);
}
fn _estructuras() {
struct Punto {
x: i32,
y: i32,
}
let origen = Punto { x: 0, y: 0 };
assert_eq!(0, origen.x);
assert_eq!(0, origen.y);
}
fn _sintaxis_de_actualizacion() {
struct Punto3d {
_x: i32,
_y: i32,
_z: i32,
}
let origen = Punto3d { _x: 1, _y: 2, _z: 3 };
let punto = Punto3d { _y: 1, .. origen };
assert_eq!(3, punto._z);
}
fn _estructuras_pertenencia() {
struct Punto {
x: i32,
y: i32,
}
fn foo(punto: Punto) -> i32 {
punto.x + punto.y
}
let origen = Punto { x: 1, y: 2 };
let suma = foo(origen);
println!("{}", suma);
//println!("Punto x {}", origen.x); // Error borrow of moved value: `origen`
}
fn _estructuras_prestamo() {
struct Punto {
x: i32,
y: i32,
}
fn foo(punto: &Punto) -> i32 {
punto.x + punto.y
}
let origen = Punto { x: 1, y: 2 };
let suma = foo(&origen);
assert_eq!(3, suma);
assert_eq!(1, origen.x);
}
fn _tupla_estructuras() {
struct Color(i32, i32, i32);
let azul = Color(0, 0, 255);
assert_eq!(255, azul.2);
}
fn _estructuras_tipo_unitario() {
struct Electron;
let _e = Electron;
}
fn _enumeraciones() {
enum Mensaje {
Salir,
CambiarColor(i32, i32, i32),
Mover { _x: i32, _y: i32 },
Escribir(String),
}
let _salir = Mensaje::Salir;
let _cambiar_color = Mensaje::CambiarColor(0, 0, 255);
use Mensaje::{Mover};
let _mover = Mover {_x: 0, _y: 2};
let _escribir = Mensaje::Escribir("Hello".to_string());
}
fn _match_en_enums() {
enum _Mensaje {
Salir,
CambiarColor(i32, i32, i32),
Mover { x: i32, _y: i32 },
Escribir(String),
}
fn _salir() { /* ... */ }
fn _cambiar_color(_r: i32, _g: i32, _b: i32) { /* ... */ }
fn _mover_cursor(_x: i32, _y: i32) { /* ... */ }
fn _procesar_mensaje(msj: _Mensaje) {
match msj {
_Mensaje::Salir => _salir(),
_Mensaje::CambiarColor(r, g, b) => _cambiar_color(r, g, b),
_Mensaje::Mover { x, _y: y } => _mover_cursor(x, y),
_Mensaje::Escribir(s) => println!("{}", s),
};
}
}
fn _mul | tiples_patrones() { | identifier_name |
|
main.rs | Some(x) => x,
None => 0,
};
assert_eq!(5, number);
}
fn _result_funciones() {
enum Error {
Tecnico
}
let f: fn(i32) -> Result<i32, Error> = |num: i32| match num {
1 => Ok(num + 1),
_ => Err(Error::Tecnico)
};
/*fn f(num: i32) -> Result<i32, Error> {
match num {
1 => Ok(num + 1),
_ => Err(Error::Tecnico)
}
}*/
assert!(f(1).is_ok());
assert!(f(2).is_err());
let result: Result<i32, &str> = f(2)
.map(|ok| ok)
.map_err(|_err| "Error =(");
match result {
Ok(n) => println!("{}", n),
Err(e) => println!("{}", e)
};
}
fn _panic_result() {
let result: Result<i32, &str> = Ok(1);
//let result: Result<i32, &str> = Err("Error =(");
let valor = result.ok().expect("Error!");
assert_eq!(1, valor)
}
fn _try() {
fn _parser(num: &str) -> Result<i32, ParseIntError> {
num.parse()
}
fn f(x: &str, y: &str) -> Result<i32, ParseIntError> {
let num1 = _parser(x);
let num2 = _parser(y);
//let resultado = _parser(x) ? + _parser(y)?;
let resultado = num1? + num2?;
Ok(resultado)
}
assert!(f("1", "2").is_ok());
assert!(f("1P", "2").is_err());
match f("1P", "2") {
Ok(n) => println!("Ok: {}", n),
Err(e) => println!("Error: {}", e)
}
}
fn _try_azucar_sintactico() {
fn foo(n: i32) -> Result<i32, String> {
if n % 2 == 0 {
Ok(1)
} else { Err(String::from("Error")) }
}
fn bar() -> Result<i32, String> {
Ok(2)
}
fn foo_bar() -> Result<i32, String> {
let res = foo(2)? + bar()?;
Ok(res)
}
let fb = foo_bar();
assert!(fb.is_ok());
}
fn _apuntadores_a_funcion() {
fn mas_uno(i: i32) -> i32 {
i + 1
}
let f: fn(i32) -> i32 = mas_uno;
assert_eq!(2, f(1));
}
fn _primitivos() {
let _a: bool = false;
let _b: char = 'x';
let _c: i32 = 42; //i8, i16, i32, i64, u8, u16, u32, u64, isize, usize, f32, f64
}
fn _arreglos() {
let mut m: [i32; 3] = [1, 2, 3];
m[2] = 5;
assert_eq!(5, m[2]);
}
fn _slices() {
let a: [i32; 5] = [0, 1, 2, 3, 4];
let middle: &[i32] = &a[1..4];
assert_eq!(1, middle[0]);
}
fn _tuplas() {
let (x, y) = (1, "Hello");
assert_eq!(1, x);
assert_eq!("Hello", y);
let z = (1, "Hello");
assert_eq!(1, z.0);
}
fn _expresiones() {
let x = 5;
let y = if x == 5 { 10 } else { 15 };
assert_eq!(10, y)
}
fn _while() {
let mut x = 0;
while x < 10 {
x += 1;
}
assert_eq!(10, x)
}
fn _for() {
for x in 0..10 {
println!("{}", x);
}
}
fn _loop() {
let mut x = 0;
loop {
x += 1;
if x >= 10 {
break
}
}
assert_eq!(10, x)
}
fn _etiquetas_loop() {
'exterior: for x in 0..10 {
'interior: for y in 0..10 {
if x % 2 == 0 { continue 'exterior; } // continua el ciclo por encima de x
if y % 2 == 0 { co | continua el ciclo por encima de y
println!("x: {}, y: {}", x, y);
}
}
}
fn _enumerate() {
for (i,j) in (5..10).enumerate() {
println!("i = {} y j = {}", i, j);
}
let lineas = "hola\nmundo".lines();
for (numero_linea, linea) in lineas.enumerate() {
println!("{}: {}", numero_linea, linea);
}
}
fn _pertenencia() {
let v = vec![1, 2, 3];
let v2 = v;
println!("v2[0] es: {}", v2[0]);
//println!("v[0] es: {}", v[0]); // Error borrow of moved value: `v`
}
fn _pertenencia_funcion() {
fn tomar(_v: Vec<i32>) {
// Algo
}
let v = vec![1, 2, 3];
tomar(v);
//println!("v[0] es: {}", v[0]); // Error borrow of moved value: `v`
}
fn _copy() {
// i32 , Todos los tipos primitivos implementan el trait Copy
// Se realiza una copia y su pertenencia no es movida
let v: i32 = 1;
let _v2 = v;
println!("v es: {}", v); // =)
}
fn _devolver_pertenencia() {
fn _foo(v: Vec<i32>) -> Vec<i32> {
v
}
fn foo(v1: Vec<i32>, v2: Vec<i32>) -> (Vec<i32>, Vec<i32>, i32) {
(v1, v2, 42)
}
let v1 = vec![1, 2, 3];
let v2 = vec![1, 2, 3];
let (v1, _v2, _r) = foo(v1, v2);
assert_eq!(1, v1[0]);
}
fn _prestamo() {
fn foo(_v1: &Vec<i32>, _v2: &Vec<i32>) -> i32 {
42
}
let v1 = vec![1, 2, 3];
let _v2 = vec![1, 2, 3];
let _r = foo(&v1, &_v2);
// podemos usar a v1 y v2 aqui
assert_eq!(1, v1[0]);
}
fn _mutabilidad() {
let mut x = 5;
assert_eq!(5, x);
x = 6;
assert_eq!(6, x);
}
fn _estructuras() {
struct Punto {
x: i32,
y: i32,
}
let origen = Punto { x: 0, y: 0 };
assert_eq!(0, origen.x);
assert_eq!(0, origen.y);
}
fn _sintaxis_de_actualizacion() {
struct Punto3d {
_x: i32,
_y: i32,
_z: i32,
}
let origen = Punto3d { _x: 1, _y: 2, _z: 3 };
let punto = Punto3d { _y: 1, .. origen };
assert_eq!(3, punto._z);
}
fn _estructuras_pertenencia() {
struct Punto {
x: i32,
y: i32,
}
fn foo(punto: Punto) -> i32 {
punto.x + punto.y
}
let origen = Punto { x: 1, y: 2 };
let suma = foo(origen);
println!("{}", suma);
//println!("Punto x {}", origen.x); // Error borrow of moved value: `origen`
}
fn _estructuras_prestamo() {
struct Punto {
x: i32,
y: i32,
}
fn | ntinue 'interior; } // | conditional_block |
main.go | () {
player := NewPlayer()
treasure := NewTreasure()
treasureMap := NewTreasureMap(mapSize)
treasureMap.createMap(listCustomObstacle)
treasureMap.setEntity(entity_player, player.Position)
for true {
treasure.randomizePosition(mapSize[0], mapSize[1])
if treasureMap.setEntity(entity_treasure, treasure.Position) {
break
}
}
treasureMap.render() // display initial condition with treasure
fmt.Println("Initial Condition, treasure hid in:", treasure.Position, "Wait for it..")
time.Sleep(pause_time * time.Millisecond)
treasureMap.setPossibleTreasure()
treasureMap.render() // display map with possible treasure location
fmt.Println("Now it's hidden! Let's go find it!")
time.Sleep(pause_time * time.Millisecond)
for !player.FoundTreasure {
// player see unobstructed path, and determine which is treasure and which is path
treasurePositionXY, listPathPosition := player.see(treasureMap)
for _, pathPosition := range listPathPosition {
treasureMap.setEntity(entity_path, pathPosition)
treasureMap.updatePossibleTreasureLocation(listPathPosition)
}
if !player.FoundTreasure {
// keep moving until found the treasure
newPosition, _ := player.move(treasureMap.Mapping)
oldPosition := player.Position
// stop, when player cannot move any longer
if newPosition == oldPosition {
break
}
// move the player into new position, put path on the older position
treasureMap.setEntity(entity_path, oldPosition)
treasureMap.setEntity(entity_player, newPosition)
treasureMap.render()
// update player position
player.setPosition(newPosition)
} else {
treasureMap.clearPossibleTreasureLocation()
treasureMap.setTreasureLocation(treasurePositionXY)
treasureMap.revealMap(treasurePositionXY)
treasureMap.render()
break
}
}
}
// NewPlayer creating a new player with initial position
func NewPlayer() Player {
return Player{
Position: playerStartPosition,
Range: make(map[int]int),
}
}
// setPosition update the value of player position
func (p *Player) setPosition(newPosition [2]int) {
p.Position = newPosition
}
// move update the coordinate of the entity_player limited by predefined direction
func (p *Player) move(treasureMap map[[2]int]int) ([2]int, bool) {
if p.DirectionTaken == up {
newPlayerPositionXY := [2]int{p.Position[0], p.Position[1] + 1}
if treasureMap[newPlayerPositionXY] == entity_obstacle {
p.DirectionTaken = right
} else {
return newPlayerPositionXY, true
}
}
if p.DirectionTaken == right {
newPlayerPositionXY := [2]int{p.Position[0] + 1, p.Position[1]}
if treasureMap[newPlayerPositionXY] == entity_obstacle {
p.DirectionTaken = down
} else {
return newPlayerPositionXY, true
}
}
if p.DirectionTaken == down {
newPlayerPositionXY := [2]int{p.Position[0], p.Position[1] - 1}
if treasureMap[newPlayerPositionXY] == entity_obstacle {
p.DirectionTaken = stuck
} else {
return newPlayerPositionXY, true
}
}
return p.Position, false
}
// see check all unobstructed line of X & Y from entity_player position
func (p *Player) see(treasureMap TreasureMap) ([2]int, [][2]int) {
var (
startX, startY = p.Position[0], p.Position[1]
treasurePosition, treasureFound [2]int
listPathPosition, pathFound [][2]int
)
// see all entity in x axis with same y axis / right direction ->
treasurePosition, pathFound = checkMap(treasureMap, startX+1, startY, 1, axis_x)
if treasureMap.OriginalMapping[treasurePosition] == entity_treasure {
treasureFound = treasurePosition
}
listPathPosition = append(listPathPosition, pathFound...)
p.Range[right] = len(pathFound)
// see all entity in -x axis with same y axis / left direction <-
treasurePosition, pathFound = checkMap(treasureMap, startX-1, startY, -1, axis_x)
if treasureMap.OriginalMapping[treasurePosition] == entity_treasure {
treasureFound = treasurePosition
}
listPathPosition = append(listPathPosition, pathFound...)
p.Range[left] = len(pathFound)
// see all entity in y axis with same x axis / up direction ^
treasurePosition, pathFound = checkMap(treasureMap, startY+1, startX, 1, axis_y)
if treasureMap.OriginalMapping[treasurePosition] == entity_treasure {
treasureFound = treasurePosition
}
listPathPosition = append(listPathPosition, pathFound...)
p.Range[up] = len(pathFound)
// see all entity in -y axis with same x axis / down direction v
treasurePosition, pathFound = checkMap(treasureMap, startY-1, startX, -1, axis_y)
if treasureMap.OriginalMapping[treasurePosition] == entity_treasure {
treasureFound = treasurePosition
}
listPathPosition = append(listPathPosition, pathFound...)
p.Range[down] = len(pathFound)
if treasureMap.OriginalMapping[treasureFound] == entity_treasure {
p.FoundTreasure = true
}
// check possibility of path intersection with best probability to get the most explored map
if p.DirectionTaken == up && p.Range[right] > p.Range[up] {
p.DirectionTaken = right
} else if p.DirectionTaken == right && p.Range[down] > p.Range[right] {
p.DirectionTaken = down
}
return treasureFound, listPathPosition
}
// checkMap a shorthand to validate an unobstructed line of sight in original mapping, return treasure location, list of clear path in sight
func checkMap(treasureMap TreasureMap, startAxis int, staticAxis int, addValue int, typeAxis int) ([2]int, [][2]int) {
var (
check = true
treasurePosition [2]int
pathPosition [][2]int
currentPosition [2]int
)
for check {
if typeAxis == axis_x {
currentPosition = [2]int{startAxis, staticAxis}
} else {
currentPosition = [2]int{staticAxis, startAxis}
}
if check {
switch treasureMap.OriginalMapping[currentPosition] {
case entity_path:
pathPosition = append(pathPosition, currentPosition)
case entity_treasure:
treasurePosition = currentPosition
case entity_obstacle:
check = false
default:
check = false
}
}
startAxis += addValue
}
return treasurePosition, pathPosition
}
// NewTreasure creating a new blank
func NewTreasure() Treasure {
return Treasure{}
}
// randomizePosition put the entity_treasure in the map randomly. It require several loop to ensure the entity_treasure located on a clear entity_path
func (t *Treasure) randomizePosition(sizeX, sizeY int) {
var (
xMin, xMax = 1, sizeX
yMin, yMax = 1, sizeY
treasurePositionX, treasurePositionY int
treasurePositionXY [2]int
)
rand.Seed(time.Now().UnixNano())
treasurePositionX = rand.Intn(xMax-xMin) + xMin
treasurePositionY = rand.Intn(yMax-yMin) + yMin
treasurePositionXY = [2]int{treasurePositionX, treasurePositionY}
t.Position = treasurePositionXY
}
// NewTreasureMap creating a new blank treasure map
func NewTreasureMap(size [2]int) TreasureMap {
return TreasureMap{
Size: size,
Mapping: make(map[[2]int]int),
OriginalMapping: make(map[[2]int]int),
ListPossibleTreasureLocation: make(map[[2]int]bool),
}
}
// render display of the mapping, not the original mapping. It also print the info of list possible location of the treasure.
func (tm *TreasureMap) render() {
var (
treasureMapDrawPerLine, treasureMapDrawComplete, treasureMapAdditional string
)
for y := 1; y <= tm.Size[1]; y++ {
treasureMapDrawPerLine = ""
if y < tm.Size[1] {
treasureMapDrawPerLine = "\n"
}
for x := 1; x <= tm.Size[0]; x++ {
treasureMapDrawPerLine = treasureMapDrawPerLine + convertIntToEntity(tm.Mapping[[2]int{x, y}])
}
treasureMapDrawComplete = treasureMapDrawPerLine + treasureMapDrawComplete
}
| main | identifier_name |
|
main.go | (entity_treasure, treasure.Position) {
break
}
}
treasureMap.render() // display initial condition with treasure
fmt.Println("Initial Condition, treasure hid in:", treasure.Position, "Wait for it..")
time.Sleep(pause_time * time.Millisecond)
treasureMap.setPossibleTreasure()
treasureMap.render() // display map with possible treasure location
fmt.Println("Now it's hidden! Let's go find it!")
time.Sleep(pause_time * time.Millisecond)
for !player.FoundTreasure {
// player see unobstructed path, and determine which is treasure and which is path
treasurePositionXY, listPathPosition := player.see(treasureMap)
for _, pathPosition := range listPathPosition {
treasureMap.setEntity(entity_path, pathPosition)
treasureMap.updatePossibleTreasureLocation(listPathPosition)
}
if !player.FoundTreasure {
// keep moving until found the treasure
newPosition, _ := player.move(treasureMap.Mapping)
oldPosition := player.Position
// stop, when player cannot move any longer
if newPosition == oldPosition {
break
}
// move the player into new position, put path on the older position
treasureMap.setEntity(entity_path, oldPosition)
treasureMap.setEntity(entity_player, newPosition)
treasureMap.render()
// update player position
player.setPosition(newPosition)
} else {
treasureMap.clearPossibleTreasureLocation()
treasureMap.setTreasureLocation(treasurePositionXY)
treasureMap.revealMap(treasurePositionXY)
treasureMap.render()
break
}
}
}
// NewPlayer creating a new player with initial position
func NewPlayer() Player {
return Player{
Position: playerStartPosition,
Range: make(map[int]int),
}
}
// setPosition update the value of player position
func (p *Player) setPosition(newPosition [2]int) {
p.Position = newPosition
}
// move update the coordinate of the entity_player limited by predefined direction
func (p *Player) move(treasureMap map[[2]int]int) ([2]int, bool) {
if p.DirectionTaken == up {
newPlayerPositionXY := [2]int{p.Position[0], p.Position[1] + 1}
if treasureMap[newPlayerPositionXY] == entity_obstacle {
p.DirectionTaken = right
} else {
return newPlayerPositionXY, true
}
}
if p.DirectionTaken == right {
newPlayerPositionXY := [2]int{p.Position[0] + 1, p.Position[1]}
if treasureMap[newPlayerPositionXY] == entity_obstacle {
p.DirectionTaken = down
} else {
return newPlayerPositionXY, true
}
}
if p.DirectionTaken == down {
newPlayerPositionXY := [2]int{p.Position[0], p.Position[1] - 1}
if treasureMap[newPlayerPositionXY] == entity_obstacle {
p.DirectionTaken = stuck
} else {
return newPlayerPositionXY, true
}
}
return p.Position, false
}
// see check all unobstructed line of X & Y from entity_player position
func (p *Player) see(treasureMap TreasureMap) ([2]int, [][2]int) {
var (
startX, startY = p.Position[0], p.Position[1]
treasurePosition, treasureFound [2]int
listPathPosition, pathFound [][2]int
)
// see all entity in x axis with same y axis / right direction ->
treasurePosition, pathFound = checkMap(treasureMap, startX+1, startY, 1, axis_x)
if treasureMap.OriginalMapping[treasurePosition] == entity_treasure {
treasureFound = treasurePosition
}
listPathPosition = append(listPathPosition, pathFound...)
p.Range[right] = len(pathFound)
// see all entity in -x axis with same y axis / left direction <-
treasurePosition, pathFound = checkMap(treasureMap, startX-1, startY, -1, axis_x)
if treasureMap.OriginalMapping[treasurePosition] == entity_treasure {
treasureFound = treasurePosition
}
listPathPosition = append(listPathPosition, pathFound...)
p.Range[left] = len(pathFound)
// see all entity in y axis with same x axis / up direction ^
treasurePosition, pathFound = checkMap(treasureMap, startY+1, startX, 1, axis_y)
if treasureMap.OriginalMapping[treasurePosition] == entity_treasure {
treasureFound = treasurePosition
}
listPathPosition = append(listPathPosition, pathFound...)
p.Range[up] = len(pathFound)
// see all entity in -y axis with same x axis / down direction v
treasurePosition, pathFound = checkMap(treasureMap, startY-1, startX, -1, axis_y)
if treasureMap.OriginalMapping[treasurePosition] == entity_treasure {
treasureFound = treasurePosition
}
listPathPosition = append(listPathPosition, pathFound...)
p.Range[down] = len(pathFound)
| p.FoundTreasure = true
}
// check possibility of path intersection with best probability to get the most explored map
if p.DirectionTaken == up && p.Range[right] > p.Range[up] {
p.DirectionTaken = right
} else if p.DirectionTaken == right && p.Range[down] > p.Range[right] {
p.DirectionTaken = down
}
return treasureFound, listPathPosition
}
// checkMap a shorthand to validate an unobstructed line of sight in original mapping, return treasure location, list of clear path in sight
func checkMap(treasureMap TreasureMap, startAxis int, staticAxis int, addValue int, typeAxis int) ([2]int, [][2]int) {
var (
check = true
treasurePosition [2]int
pathPosition [][2]int
currentPosition [2]int
)
for check {
if typeAxis == axis_x {
currentPosition = [2]int{startAxis, staticAxis}
} else {
currentPosition = [2]int{staticAxis, startAxis}
}
if check {
switch treasureMap.OriginalMapping[currentPosition] {
case entity_path:
pathPosition = append(pathPosition, currentPosition)
case entity_treasure:
treasurePosition = currentPosition
case entity_obstacle:
check = false
default:
check = false
}
}
startAxis += addValue
}
return treasurePosition, pathPosition
}
// NewTreasure creating a new blank
func NewTreasure() Treasure {
return Treasure{}
}
// randomizePosition put the entity_treasure in the map randomly. It require several loop to ensure the entity_treasure located on a clear entity_path
func (t *Treasure) randomizePosition(sizeX, sizeY int) {
var (
xMin, xMax = 1, sizeX
yMin, yMax = 1, sizeY
treasurePositionX, treasurePositionY int
treasurePositionXY [2]int
)
rand.Seed(time.Now().UnixNano())
treasurePositionX = rand.Intn(xMax-xMin) + xMin
treasurePositionY = rand.Intn(yMax-yMin) + yMin
treasurePositionXY = [2]int{treasurePositionX, treasurePositionY}
t.Position = treasurePositionXY
}
// NewTreasureMap creating a new blank treasure map
func NewTreasureMap(size [2]int) TreasureMap {
return TreasureMap{
Size: size,
Mapping: make(map[[2]int]int),
OriginalMapping: make(map[[2]int]int),
ListPossibleTreasureLocation: make(map[[2]int]bool),
}
}
// render display of the mapping, not the original mapping. It also print the info of list possible location of the treasure.
func (tm *TreasureMap) render() {
var (
treasureMapDrawPerLine, treasureMapDrawComplete, treasureMapAdditional string
)
for y := 1; y <= tm.Size[1]; y++ {
treasureMapDrawPerLine = ""
if y < tm.Size[1] {
treasureMapDrawPerLine = "\n"
}
for x := 1; x <= tm.Size[0]; x++ {
treasureMapDrawPerLine = treasureMapDrawPerLine + convertIntToEntity(tm.Mapping[[2]int{x, y}])
}
treasureMapDrawComplete = treasureMapDrawPerLine + treasureMapDrawComplete
}
if len(tm.ListPossibleTreasureLocation) > 0 {
for coordinate, possibleLocation := range tm.ListPossibleTreasureLocation {
coordinateString := strconv.Itoa(coordinate[0]) + "," + strconv.Itoa(coordinate[1])
if possibleLocation {
treasureMapAdditional = treasureMapAdditional + fmt.Sprintf("{%s},", coordinateString)
| if treasureMap.OriginalMapping[treasureFound] == entity_treasure { | random_line_split |
main.go | _treasure, treasure.Position) {
break
}
}
treasureMap.render() // display initial condition with treasure
fmt.Println("Initial Condition, treasure hid in:", treasure.Position, "Wait for it..")
time.Sleep(pause_time * time.Millisecond)
treasureMap.setPossibleTreasure()
treasureMap.render() // display map with possible treasure location
fmt.Println("Now it's hidden! Let's go find it!")
time.Sleep(pause_time * time.Millisecond)
for !player.FoundTreasure {
// player see unobstructed path, and determine which is treasure and which is path
treasurePositionXY, listPathPosition := player.see(treasureMap)
for _, pathPosition := range listPathPosition {
treasureMap.setEntity(entity_path, pathPosition)
treasureMap.updatePossibleTreasureLocation(listPathPosition)
}
if !player.FoundTreasure {
// keep moving until found the treasure
newPosition, _ := player.move(treasureMap.Mapping)
oldPosition := player.Position
// stop, when player cannot move any longer
if newPosition == oldPosition |
// move the player into new position, put path on the older position
treasureMap.setEntity(entity_path, oldPosition)
treasureMap.setEntity(entity_player, newPosition)
treasureMap.render()
// update player position
player.setPosition(newPosition)
} else {
treasureMap.clearPossibleTreasureLocation()
treasureMap.setTreasureLocation(treasurePositionXY)
treasureMap.revealMap(treasurePositionXY)
treasureMap.render()
break
}
}
}
// NewPlayer creating a new player with initial position
func NewPlayer() Player {
return Player{
Position: playerStartPosition,
Range: make(map[int]int),
}
}
// setPosition update the value of player position
func (p *Player) setPosition(newPosition [2]int) {
p.Position = newPosition
}
// move update the coordinate of the entity_player limited by predefined direction
func (p *Player) move(treasureMap map[[2]int]int) ([2]int, bool) {
if p.DirectionTaken == up {
newPlayerPositionXY := [2]int{p.Position[0], p.Position[1] + 1}
if treasureMap[newPlayerPositionXY] == entity_obstacle {
p.DirectionTaken = right
} else {
return newPlayerPositionXY, true
}
}
if p.DirectionTaken == right {
newPlayerPositionXY := [2]int{p.Position[0] + 1, p.Position[1]}
if treasureMap[newPlayerPositionXY] == entity_obstacle {
p.DirectionTaken = down
} else {
return newPlayerPositionXY, true
}
}
if p.DirectionTaken == down {
newPlayerPositionXY := [2]int{p.Position[0], p.Position[1] - 1}
if treasureMap[newPlayerPositionXY] == entity_obstacle {
p.DirectionTaken = stuck
} else {
return newPlayerPositionXY, true
}
}
return p.Position, false
}
// see check all unobstructed line of X & Y from entity_player position
func (p *Player) see(treasureMap TreasureMap) ([2]int, [][2]int) {
var (
startX, startY = p.Position[0], p.Position[1]
treasurePosition, treasureFound [2]int
listPathPosition, pathFound [][2]int
)
// see all entity in x axis with same y axis / right direction ->
treasurePosition, pathFound = checkMap(treasureMap, startX+1, startY, 1, axis_x)
if treasureMap.OriginalMapping[treasurePosition] == entity_treasure {
treasureFound = treasurePosition
}
listPathPosition = append(listPathPosition, pathFound...)
p.Range[right] = len(pathFound)
// see all entity in -x axis with same y axis / left direction <-
treasurePosition, pathFound = checkMap(treasureMap, startX-1, startY, -1, axis_x)
if treasureMap.OriginalMapping[treasurePosition] == entity_treasure {
treasureFound = treasurePosition
}
listPathPosition = append(listPathPosition, pathFound...)
p.Range[left] = len(pathFound)
// see all entity in y axis with same x axis / up direction ^
treasurePosition, pathFound = checkMap(treasureMap, startY+1, startX, 1, axis_y)
if treasureMap.OriginalMapping[treasurePosition] == entity_treasure {
treasureFound = treasurePosition
}
listPathPosition = append(listPathPosition, pathFound...)
p.Range[up] = len(pathFound)
// see all entity in -y axis with same x axis / down direction v
treasurePosition, pathFound = checkMap(treasureMap, startY-1, startX, -1, axis_y)
if treasureMap.OriginalMapping[treasurePosition] == entity_treasure {
treasureFound = treasurePosition
}
listPathPosition = append(listPathPosition, pathFound...)
p.Range[down] = len(pathFound)
if treasureMap.OriginalMapping[treasureFound] == entity_treasure {
p.FoundTreasure = true
}
// check possibility of path intersection with best probability to get the most explored map
if p.DirectionTaken == up && p.Range[right] > p.Range[up] {
p.DirectionTaken = right
} else if p.DirectionTaken == right && p.Range[down] > p.Range[right] {
p.DirectionTaken = down
}
return treasureFound, listPathPosition
}
// checkMap a shorthand to validate an unobstructed line of sight in original mapping, return treasure location, list of clear path in sight
func checkMap(treasureMap TreasureMap, startAxis int, staticAxis int, addValue int, typeAxis int) ([2]int, [][2]int) {
var (
check = true
treasurePosition [2]int
pathPosition [][2]int
currentPosition [2]int
)
for check {
if typeAxis == axis_x {
currentPosition = [2]int{startAxis, staticAxis}
} else {
currentPosition = [2]int{staticAxis, startAxis}
}
if check {
switch treasureMap.OriginalMapping[currentPosition] {
case entity_path:
pathPosition = append(pathPosition, currentPosition)
case entity_treasure:
treasurePosition = currentPosition
case entity_obstacle:
check = false
default:
check = false
}
}
startAxis += addValue
}
return treasurePosition, pathPosition
}
// NewTreasure creating a new blank
func NewTreasure() Treasure {
return Treasure{}
}
// randomizePosition put the entity_treasure in the map randomly. It require several loop to ensure the entity_treasure located on a clear entity_path
func (t *Treasure) randomizePosition(sizeX, sizeY int) {
var (
xMin, xMax = 1, sizeX
yMin, yMax = 1, sizeY
treasurePositionX, treasurePositionY int
treasurePositionXY [2]int
)
rand.Seed(time.Now().UnixNano())
treasurePositionX = rand.Intn(xMax-xMin) + xMin
treasurePositionY = rand.Intn(yMax-yMin) + yMin
treasurePositionXY = [2]int{treasurePositionX, treasurePositionY}
t.Position = treasurePositionXY
}
// NewTreasureMap creating a new blank treasure map
func NewTreasureMap(size [2]int) TreasureMap {
return TreasureMap{
Size: size,
Mapping: make(map[[2]int]int),
OriginalMapping: make(map[[2]int]int),
ListPossibleTreasureLocation: make(map[[2]int]bool),
}
}
// render display of the mapping, not the original mapping. It also print the info of list possible location of the treasure.
func (tm *TreasureMap) render() {
var (
treasureMapDrawPerLine, treasureMapDrawComplete, treasureMapAdditional string
)
for y := 1; y <= tm.Size[1]; y++ {
treasureMapDrawPerLine = ""
if y < tm.Size[1] {
treasureMapDrawPerLine = "\n"
}
for x := 1; x <= tm.Size[0]; x++ {
treasureMapDrawPerLine = treasureMapDrawPerLine + convertIntToEntity(tm.Mapping[[2]int{x, y}])
}
treasureMapDrawComplete = treasureMapDrawPerLine + treasureMapDrawComplete
}
if len(tm.ListPossibleTreasureLocation) > 0 {
for coordinate, possibleLocation := range tm.ListPossibleTreasureLocation {
coordinateString := strconv.Itoa(coordinate[0]) + "," + strconv.Itoa(coordinate[1])
if possibleLocation {
treasureMapAdditional = treasureMapAdditional + fmt.Sprintf("{%s},", coordinateString)
| {
break
} | conditional_block |
main.go | _treasure, treasure.Position) {
break
}
}
treasureMap.render() // display initial condition with treasure
fmt.Println("Initial Condition, treasure hid in:", treasure.Position, "Wait for it..")
time.Sleep(pause_time * time.Millisecond)
treasureMap.setPossibleTreasure()
treasureMap.render() // display map with possible treasure location
fmt.Println("Now it's hidden! Let's go find it!")
time.Sleep(pause_time * time.Millisecond)
for !player.FoundTreasure {
// player see unobstructed path, and determine which is treasure and which is path
treasurePositionXY, listPathPosition := player.see(treasureMap)
for _, pathPosition := range listPathPosition {
treasureMap.setEntity(entity_path, pathPosition)
treasureMap.updatePossibleTreasureLocation(listPathPosition)
}
if !player.FoundTreasure {
// keep moving until found the treasure
newPosition, _ := player.move(treasureMap.Mapping)
oldPosition := player.Position
// stop, when player cannot move any longer
if newPosition == oldPosition {
break
}
// move the player into new position, put path on the older position
treasureMap.setEntity(entity_path, oldPosition)
treasureMap.setEntity(entity_player, newPosition)
treasureMap.render()
// update player position
player.setPosition(newPosition)
} else {
treasureMap.clearPossibleTreasureLocation()
treasureMap.setTreasureLocation(treasurePositionXY)
treasureMap.revealMap(treasurePositionXY)
treasureMap.render()
break
}
}
}
// NewPlayer creating a new player with initial position
func NewPlayer() Player {
return Player{
Position: playerStartPosition,
Range: make(map[int]int),
}
}
// setPosition update the value of player position
func (p *Player) setPosition(newPosition [2]int) {
p.Position = newPosition
}
// move update the coordinate of the entity_player limited by predefined direction
func (p *Player) move(treasureMap map[[2]int]int) ([2]int, bool) {
if p.DirectionTaken == up {
newPlayerPositionXY := [2]int{p.Position[0], p.Position[1] + 1}
if treasureMap[newPlayerPositionXY] == entity_obstacle {
p.DirectionTaken = right
} else {
return newPlayerPositionXY, true
}
}
if p.DirectionTaken == right {
newPlayerPositionXY := [2]int{p.Position[0] + 1, p.Position[1]}
if treasureMap[newPlayerPositionXY] == entity_obstacle {
p.DirectionTaken = down
} else {
return newPlayerPositionXY, true
}
}
if p.DirectionTaken == down {
newPlayerPositionXY := [2]int{p.Position[0], p.Position[1] - 1}
if treasureMap[newPlayerPositionXY] == entity_obstacle {
p.DirectionTaken = stuck
} else {
return newPlayerPositionXY, true
}
}
return p.Position, false
}
// see check all unobstructed line of X & Y from entity_player position
func (p *Player) see(treasureMap TreasureMap) ([2]int, [][2]int) | listPathPosition = append(listPathPosition, pathFound...)
p.Range[left] = len(pathFound)
// see all entity in y axis with same x axis / up direction ^
treasurePosition, pathFound = checkMap(treasureMap, startY+1, startX, 1, axis_y)
if treasureMap.OriginalMapping[treasurePosition] == entity_treasure {
treasureFound = treasurePosition
}
listPathPosition = append(listPathPosition, pathFound...)
p.Range[up] = len(pathFound)
// see all entity in -y axis with same x axis / down direction v
treasurePosition, pathFound = checkMap(treasureMap, startY-1, startX, -1, axis_y)
if treasureMap.OriginalMapping[treasurePosition] == entity_treasure {
treasureFound = treasurePosition
}
listPathPosition = append(listPathPosition, pathFound...)
p.Range[down] = len(pathFound)
if treasureMap.OriginalMapping[treasureFound] == entity_treasure {
p.FoundTreasure = true
}
// check possibility of path intersection with best probability to get the most explored map
if p.DirectionTaken == up && p.Range[right] > p.Range[up] {
p.DirectionTaken = right
} else if p.DirectionTaken == right && p.Range[down] > p.Range[right] {
p.DirectionTaken = down
}
return treasureFound, listPathPosition
}
// checkMap a shorthand to validate an unobstructed line of sight in original mapping, return treasure location, list of clear path in sight
func checkMap(treasureMap TreasureMap, startAxis int, staticAxis int, addValue int, typeAxis int) ([2]int, [][2]int) {
var (
check = true
treasurePosition [2]int
pathPosition [][2]int
currentPosition [2]int
)
for check {
if typeAxis == axis_x {
currentPosition = [2]int{startAxis, staticAxis}
} else {
currentPosition = [2]int{staticAxis, startAxis}
}
if check {
switch treasureMap.OriginalMapping[currentPosition] {
case entity_path:
pathPosition = append(pathPosition, currentPosition)
case entity_treasure:
treasurePosition = currentPosition
case entity_obstacle:
check = false
default:
check = false
}
}
startAxis += addValue
}
return treasurePosition, pathPosition
}
// NewTreasure creating a new blank
func NewTreasure() Treasure {
return Treasure{}
}
// randomizePosition put the entity_treasure in the map randomly. It require several loop to ensure the entity_treasure located on a clear entity_path
func (t *Treasure) randomizePosition(sizeX, sizeY int) {
var (
xMin, xMax = 1, sizeX
yMin, yMax = 1, sizeY
treasurePositionX, treasurePositionY int
treasurePositionXY [2]int
)
rand.Seed(time.Now().UnixNano())
treasurePositionX = rand.Intn(xMax-xMin) + xMin
treasurePositionY = rand.Intn(yMax-yMin) + yMin
treasurePositionXY = [2]int{treasurePositionX, treasurePositionY}
t.Position = treasurePositionXY
}
// NewTreasureMap creating a new blank treasure map
func NewTreasureMap(size [2]int) TreasureMap {
return TreasureMap{
Size: size,
Mapping: make(map[[2]int]int),
OriginalMapping: make(map[[2]int]int),
ListPossibleTreasureLocation: make(map[[2]int]bool),
}
}
// render display of the mapping, not the original mapping. It also print the info of list possible location of the treasure.
func (tm *TreasureMap) render() {
var (
treasureMapDrawPerLine, treasureMapDrawComplete, treasureMapAdditional string
)
for y := 1; y <= tm.Size[1]; y++ {
treasureMapDrawPerLine = ""
if y < tm.Size[1] {
treasureMapDrawPerLine = "\n"
}
for x := 1; x <= tm.Size[0]; x++ {
treasureMapDrawPerLine = treasureMapDrawPerLine + convertIntToEntity(tm.Mapping[[2]int{x, y}])
}
treasureMapDrawComplete = treasureMapDrawPerLine + treasureMapDrawComplete
}
if len(tm.ListPossibleTreasureLocation) > 0 {
for coordinate, possibleLocation := range tm.ListPossibleTreasureLocation {
coordinateString := strconv.Itoa(coordinate[0]) + "," + strconv.Itoa(coordinate[1])
if possibleLocation {
treasureMapAdditional = treasureMapAdditional + fmt.Sprintf("{%s},", coordinateString)
| {
var (
startX, startY = p.Position[0], p.Position[1]
treasurePosition, treasureFound [2]int
listPathPosition, pathFound [][2]int
)
// see all entity in x axis with same y axis / right direction ->
treasurePosition, pathFound = checkMap(treasureMap, startX+1, startY, 1, axis_x)
if treasureMap.OriginalMapping[treasurePosition] == entity_treasure {
treasureFound = treasurePosition
}
listPathPosition = append(listPathPosition, pathFound...)
p.Range[right] = len(pathFound)
// see all entity in -x axis with same y axis / left direction <-
treasurePosition, pathFound = checkMap(treasureMap, startX-1, startY, -1, axis_x)
if treasureMap.OriginalMapping[treasurePosition] == entity_treasure {
treasureFound = treasurePosition
} | identifier_body |
vessel.pb.go | Info_Vessel.Marshal(b, m, deterministic)
}
func (m *Vessel) XXX_Merge(src proto.Message) {
xxx_messageInfo_Vessel.Merge(m, src)
}
func (m *Vessel) XXX_Size() int {
return xxx_messageInfo_Vessel.Size(m)
}
func (m *Vessel) XXX_DiscardUnknown() {
xxx_messageInfo_Vessel.DiscardUnknown(m)
}
var xxx_messageInfo_Vessel proto.InternalMessageInfo
func (m *Vessel) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *Vessel) GetCapacity() int32 {
if m != nil {
return m.Capacity
}
return 0
}
func (m *Vessel) GetMaxWeight() int32 {
if m != nil {
return m.MaxWeight
}
return 0
}
func (m *Vessel) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Vessel) GetAvailable() bool {
if m != nil {
return m.Available
}
return false
}
func (m *Vessel) GetOwnerId() string {
if m != nil {
return m.OwnerId
}
return ""
}
type Specification struct {
Capacity int32 `protobuf:"varint,1,opt,name=capacity,proto3" json:"capacity,omitempty"`
MaxWeight int32 `protobuf:"varint,2,opt,name=max_weight,json=maxWeight,proto3" json:"max_weight,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Specification) Reset() { *m = Specification{} }
func (m *Specification) String() string { return proto.CompactTextString(m) }
func (*Specification) ProtoMessage() {}
func (*Specification) Descriptor() ([]byte, []int) {
return fileDescriptor_04ef66862bb50716, []int{1}
}
func (m *Specification) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Specification.Unmarshal(m, b)
}
func (m *Specification) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Specification.Marshal(b, m, deterministic)
}
func (m *Specification) XXX_Merge(src proto.Message) {
xxx_messageInfo_Specification.Merge(m, src)
}
func (m *Specification) XXX_Size() int {
return xxx_messageInfo_Specification.Size(m)
}
func (m *Specification) XXX_DiscardUnknown() {
xxx_messageInfo_Specification.DiscardUnknown(m)
}
var xxx_messageInfo_Specification proto.InternalMessageInfo
func (m *Specification) GetCapacity() int32 {
if m != nil {
return m.Capacity
}
return 0
}
func (m *Specification) GetMaxWeight() int32 {
if m != nil {
return m.MaxWeight
}
return 0
}
type Response struct {
Vessel *Vessel `protobuf:"bytes,1,opt,name=vessel,proto3" json:"vessel,omitempty"`
Vessels []*Vessel `protobuf:"bytes,2,rep,name=vessels,proto3" json:"vessels,omitempty"`
Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Response) Reset() { *m = Response{} }
func (m *Response) String() string { return proto.CompactTextString(m) }
func (*Response) ProtoMessage() {}
func (*Response) Descriptor() ([]byte, []int) {
return fileDescriptor_04ef66862bb50716, []int{2}
}
func (m *Response) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Response.Unmarshal(m, b)
}
func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Response.Marshal(b, m, deterministic)
}
func (m *Response) XXX_Merge(src proto.Message) {
xxx_messageInfo_Response.Merge(m, src)
}
func (m *Response) XXX_Size() int {
return xxx_messageInfo_Response.Size(m)
}
func (m *Response) XXX_DiscardUnknown() {
xxx_messageInfo_Response.DiscardUnknown(m)
}
var xxx_messageInfo_Response proto.InternalMessageInfo
func (m *Response) GetVessel() *Vessel {
if m != nil {
return m.Vessel
}
return nil
}
func (m *Response) GetVessels() []*Vessel {
if m != nil {
return m.Vessels
}
return nil
}
func (m *Response) GetCreated() bool {
if m != nil {
return m.Created
}
return false
}
func init() {
proto.RegisterType((*Vessel)(nil), "vessel.Vessel")
proto.RegisterType((*Specification)(nil), "vessel.Specification")
proto.RegisterType((*Response)(nil), "vessel.Response")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ client.Option
var _ server.Option
// Client API for VesselService service
type VesselServiceClient interface {
FindAvailable(ctx context.Context, in *Specification, opts ...client.CallOption) (*Response, error)
Create(ctx context.Context, in *Vessel, opts ...client.CallOption) (*Response, error)
}
type vesselServiceClient struct {
c client.Client
serviceName string
}
func NewVesselServiceClient(serviceName string, c client.Client) VesselServiceClient { | serviceName = "vessel"
}
return &vesselServiceClient{
c: c,
serviceName: serviceName,
}
}
func (c *vesselServiceClient) FindAvailable(ctx context.Context, in *Specification, opts ...client.CallOption) (*Response, error) {
req := c.c.NewRequest(c.serviceName, "VesselService.FindAvailable", in)
out := new(Response)
err := c.c.Call(ctx, req, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *vesselServiceClient) Create(ctx context.Context, in *Vessel, opts ...client.CallOption) (*Response, error) {
req := c.c.NewRequest(c.serviceName, "VesselService.Create", in)
out := new(Response)
err := c.c.Call(ctx, req, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for VesselService service
type VesselServiceHandler interface {
FindAvailable(context.Context, *Specification, *Response) error
Create(context.Context, *Vessel, *Response) error
}
func RegisterVesselServiceHandler(s server.Server, hdlr VesselServiceHandler, opts ...server.HandlerOption) {
s.Handle(s.NewHandler(&VesselService{hdlr}, opts...))
}
type VesselService struct {
VesselServiceHandler
}
func (h *VesselService) FindAvailable(ctx context.Context, in *Specification, out *Response) error {
return h.VesselServiceHandler.FindAvailable(ctx, in, out)
}
func (h *VesselService) Create(ctx context.Context, in *Vessel, out *Response) error {
return h.VesselServiceHandler.Create(ctx, in, out)
}
func init() { proto.RegisterFile("proto/vessel/vessel.proto", fileDescriptor_04ef66862bb50716) }
var fileDescriptor_04ef66862bb50716 = []byte{
// 300 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x51, 0x4d, 0x4b, 0xc3, 0x40,
0x10, 0x75, 0xd3, 0x36, 0x4d, 0x47, 0x5a, 0x64, 0x40, 0xd8, 0x16, 0x85, 0x90, 0x83, 0xe4, 0x20,
0x15, 0xea, 0xc5, 0xab, 0x08, 0x82, 0x1e, 0xb7, 0xa0, 0xc7, 0xb2, 0xdd, 0x1d, 0x75, 0xa1, 0x4d,
0x42, 0x12, 0xd2, 0xfa, 0x6f, 0xfc, 0xa9, 0xc2 | if c == nil {
c = client.NewClient()
}
if len(serviceName) == 0 { | random_line_split |
vessel.pb.go | Info_Vessel.Marshal(b, m, deterministic)
}
func (m *Vessel) XXX_Merge(src proto.Message) {
xxx_messageInfo_Vessel.Merge(m, src)
}
func (m *Vessel) XXX_Size() int {
return xxx_messageInfo_Vessel.Size(m)
}
func (m *Vessel) XXX_DiscardUnknown() {
xxx_messageInfo_Vessel.DiscardUnknown(m)
}
var xxx_messageInfo_Vessel proto.InternalMessageInfo
func (m *Vessel) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *Vessel) GetCapacity() int32 {
if m != nil {
return m.Capacity
}
return 0
}
func (m *Vessel) GetMaxWeight() int32 {
if m != nil {
return m.MaxWeight
}
return 0
}
func (m *Vessel) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Vessel) GetAvailable() bool {
if m != nil {
return m.Available
}
return false
}
func (m *Vessel) GetOwnerId() string {
if m != nil {
return m.OwnerId
}
return ""
}
type Specification struct {
Capacity int32 `protobuf:"varint,1,opt,name=capacity,proto3" json:"capacity,omitempty"`
MaxWeight int32 `protobuf:"varint,2,opt,name=max_weight,json=maxWeight,proto3" json:"max_weight,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Specification) Reset() { *m = Specification{} }
func (m *Specification) String() string { return proto.CompactTextString(m) }
func (*Specification) ProtoMessage() {}
func (*Specification) Descriptor() ([]byte, []int) {
return fileDescriptor_04ef66862bb50716, []int{1}
}
func (m *Specification) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Specification.Unmarshal(m, b)
}
func (m *Specification) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Specification.Marshal(b, m, deterministic)
}
func (m *Specification) XXX_Merge(src proto.Message) |
func (m *Specification) XXX_Size() int {
return xxx_messageInfo_Specification.Size(m)
}
func (m *Specification) XXX_DiscardUnknown() {
xxx_messageInfo_Specification.DiscardUnknown(m)
}
var xxx_messageInfo_Specification proto.InternalMessageInfo
func (m *Specification) GetCapacity() int32 {
if m != nil {
return m.Capacity
}
return 0
}
func (m *Specification) GetMaxWeight() int32 {
if m != nil {
return m.MaxWeight
}
return 0
}
type Response struct {
Vessel *Vessel `protobuf:"bytes,1,opt,name=vessel,proto3" json:"vessel,omitempty"`
Vessels []*Vessel `protobuf:"bytes,2,rep,name=vessels,proto3" json:"vessels,omitempty"`
Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Response) Reset() { *m = Response{} }
func (m *Response) String() string { return proto.CompactTextString(m) }
func (*Response) ProtoMessage() {}
func (*Response) Descriptor() ([]byte, []int) {
return fileDescriptor_04ef66862bb50716, []int{2}
}
func (m *Response) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Response.Unmarshal(m, b)
}
func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Response.Marshal(b, m, deterministic)
}
func (m *Response) XXX_Merge(src proto.Message) {
xxx_messageInfo_Response.Merge(m, src)
}
func (m *Response) XXX_Size() int {
return xxx_messageInfo_Response.Size(m)
}
func (m *Response) XXX_DiscardUnknown() {
xxx_messageInfo_Response.DiscardUnknown(m)
}
var xxx_messageInfo_Response proto.InternalMessageInfo
func (m *Response) GetVessel() *Vessel {
if m != nil {
return m.Vessel
}
return nil
}
func (m *Response) GetVessels() []*Vessel {
if m != nil {
return m.Vessels
}
return nil
}
func (m *Response) GetCreated() bool {
if m != nil {
return m.Created
}
return false
}
func init() {
proto.RegisterType((*Vessel)(nil), "vessel.Vessel")
proto.RegisterType((*Specification)(nil), "vessel.Specification")
proto.RegisterType((*Response)(nil), "vessel.Response")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ client.Option
var _ server.Option
// Client API for VesselService service
type VesselServiceClient interface {
FindAvailable(ctx context.Context, in *Specification, opts ...client.CallOption) (*Response, error)
Create(ctx context.Context, in *Vessel, opts ...client.CallOption) (*Response, error)
}
type vesselServiceClient struct {
c client.Client
serviceName string
}
func NewVesselServiceClient(serviceName string, c client.Client) VesselServiceClient {
if c == nil {
c = client.NewClient()
}
if len(serviceName) == 0 {
serviceName = "vessel"
}
return &vesselServiceClient{
c: c,
serviceName: serviceName,
}
}
func (c *vesselServiceClient) FindAvailable(ctx context.Context, in *Specification, opts ...client.CallOption) (*Response, error) {
req := c.c.NewRequest(c.serviceName, "VesselService.FindAvailable", in)
out := new(Response)
err := c.c.Call(ctx, req, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *vesselServiceClient) Create(ctx context.Context, in *Vessel, opts ...client.CallOption) (*Response, error) {
req := c.c.NewRequest(c.serviceName, "VesselService.Create", in)
out := new(Response)
err := c.c.Call(ctx, req, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for VesselService service
type VesselServiceHandler interface {
FindAvailable(context.Context, *Specification, *Response) error
Create(context.Context, *Vessel, *Response) error
}
func RegisterVesselServiceHandler(s server.Server, hdlr VesselServiceHandler, opts ...server.HandlerOption) {
s.Handle(s.NewHandler(&VesselService{hdlr}, opts...))
}
type VesselService struct {
VesselServiceHandler
}
func (h *VesselService) FindAvailable(ctx context.Context, in *Specification, out *Response) error {
return h.VesselServiceHandler.FindAvailable(ctx, in, out)
}
func (h *VesselService) Create(ctx context.Context, in *Vessel, out *Response) error {
return h.VesselServiceHandler.Create(ctx, in, out)
}
func init() { proto.RegisterFile("proto/vessel/vessel.proto", fileDescriptor_04ef66862bb50716) }
var fileDescriptor_04ef66862bb50716 = []byte{
// 300 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x51, 0x4d, 0x4b, 0xc3, 0x40,
0x10, 0x75, 0xd3, 0x36, 0x4d, 0x47, 0x5a, 0x64, 0x40, 0xd8, 0x16, 0x85, 0x90, 0x83, 0xe4, 0x20,
0x15, 0xea, 0xc5, 0xab, 0x08, 0x82, 0x1e, 0xb7, 0xa0, 0xc7, 0xb2, 0xdd, 0x1d, 0x75, 0xa1, 0x4d,
0x42, 0x12, 0xd2, 0xfa, 0x6f, 0xfc, 0xa9, 0 | {
xxx_messageInfo_Specification.Merge(m, src)
} | identifier_body |
vessel.pb.go | Info_Vessel.Marshal(b, m, deterministic)
}
func (m *Vessel) XXX_Merge(src proto.Message) {
xxx_messageInfo_Vessel.Merge(m, src)
}
func (m *Vessel) XXX_Size() int {
return xxx_messageInfo_Vessel.Size(m)
}
func (m *Vessel) XXX_DiscardUnknown() {
xxx_messageInfo_Vessel.DiscardUnknown(m)
}
var xxx_messageInfo_Vessel proto.InternalMessageInfo
func (m *Vessel) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *Vessel) GetCapacity() int32 {
if m != nil {
return m.Capacity
}
return 0
}
func (m *Vessel) GetMaxWeight() int32 {
if m != nil {
return m.MaxWeight
}
return 0
}
func (m *Vessel) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Vessel) GetAvailable() bool {
if m != nil {
return m.Available
}
return false
}
func (m *Vessel) GetOwnerId() string {
if m != nil {
return m.OwnerId
}
return ""
}
type Specification struct {
Capacity int32 `protobuf:"varint,1,opt,name=capacity,proto3" json:"capacity,omitempty"`
MaxWeight int32 `protobuf:"varint,2,opt,name=max_weight,json=maxWeight,proto3" json:"max_weight,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Specification) Reset() { *m = Specification{} }
func (m *Specification) String() string { return proto.CompactTextString(m) }
func (*Specification) ProtoMessage() {}
func (*Specification) Descriptor() ([]byte, []int) {
return fileDescriptor_04ef66862bb50716, []int{1}
}
func (m *Specification) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Specification.Unmarshal(m, b)
}
func (m *Specification) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Specification.Marshal(b, m, deterministic)
}
func (m *Specification) XXX_Merge(src proto.Message) {
xxx_messageInfo_Specification.Merge(m, src)
}
func (m *Specification) XXX_Size() int {
return xxx_messageInfo_Specification.Size(m)
}
func (m *Specification) XXX_DiscardUnknown() {
xxx_messageInfo_Specification.DiscardUnknown(m)
}
var xxx_messageInfo_Specification proto.InternalMessageInfo
func (m *Specification) GetCapacity() int32 {
if m != nil {
return m.Capacity
}
return 0
}
func (m *Specification) GetMaxWeight() int32 {
if m != nil {
return m.MaxWeight
}
return 0
}
type Response struct {
Vessel *Vessel `protobuf:"bytes,1,opt,name=vessel,proto3" json:"vessel,omitempty"`
Vessels []*Vessel `protobuf:"bytes,2,rep,name=vessels,proto3" json:"vessels,omitempty"`
Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Response) Reset() { *m = Response{} }
func (m *Response) String() string { return proto.CompactTextString(m) }
func (*Response) ProtoMessage() {}
func (*Response) Descriptor() ([]byte, []int) {
return fileDescriptor_04ef66862bb50716, []int{2}
}
func (m *Response) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Response.Unmarshal(m, b)
}
func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Response.Marshal(b, m, deterministic)
}
func (m *Response) XXX_Merge(src proto.Message) {
xxx_messageInfo_Response.Merge(m, src)
}
func (m *Response) XXX_Size() int {
return xxx_messageInfo_Response.Size(m)
}
func (m *Response) XXX_DiscardUnknown() {
xxx_messageInfo_Response.DiscardUnknown(m)
}
var xxx_messageInfo_Response proto.InternalMessageInfo
func (m *Response) GetVessel() *Vessel {
if m != nil {
return m.Vessel
}
return nil
}
func (m *Response) GetVessels() []*Vessel {
if m != nil {
return m.Vessels
}
return nil
}
func (m *Response) GetCreated() bool {
if m != nil {
return m.Created
}
return false
}
func init() {
proto.RegisterType((*Vessel)(nil), "vessel.Vessel")
proto.RegisterType((*Specification)(nil), "vessel.Specification")
proto.RegisterType((*Response)(nil), "vessel.Response")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ client.Option
var _ server.Option
// Client API for VesselService service
type VesselServiceClient interface {
FindAvailable(ctx context.Context, in *Specification, opts ...client.CallOption) (*Response, error)
Create(ctx context.Context, in *Vessel, opts ...client.CallOption) (*Response, error)
}
type vesselServiceClient struct {
c client.Client
serviceName string
}
func NewVesselServiceClient(serviceName string, c client.Client) VesselServiceClient {
if c == nil {
c = client.NewClient()
}
if len(serviceName) == 0 {
serviceName = "vessel"
}
return &vesselServiceClient{
c: c,
serviceName: serviceName,
}
}
func (c *vesselServiceClient) FindAvailable(ctx context.Context, in *Specification, opts ...client.CallOption) (*Response, error) {
req := c.c.NewRequest(c.serviceName, "VesselService.FindAvailable", in)
out := new(Response)
err := c.c.Call(ctx, req, out, opts...)
if err != nil |
return out, nil
}
func (c *vesselServiceClient) Create(ctx context.Context, in *Vessel, opts ...client.CallOption) (*Response, error) {
req := c.c.NewRequest(c.serviceName, "VesselService.Create", in)
out := new(Response)
err := c.c.Call(ctx, req, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for VesselService service
type VesselServiceHandler interface {
FindAvailable(context.Context, *Specification, *Response) error
Create(context.Context, *Vessel, *Response) error
}
func RegisterVesselServiceHandler(s server.Server, hdlr VesselServiceHandler, opts ...server.HandlerOption) {
s.Handle(s.NewHandler(&VesselService{hdlr}, opts...))
}
type VesselService struct {
VesselServiceHandler
}
func (h *VesselService) FindAvailable(ctx context.Context, in *Specification, out *Response) error {
return h.VesselServiceHandler.FindAvailable(ctx, in, out)
}
func (h *VesselService) Create(ctx context.Context, in *Vessel, out *Response) error {
return h.VesselServiceHandler.Create(ctx, in, out)
}
func init() { proto.RegisterFile("proto/vessel/vessel.proto", fileDescriptor_04ef66862bb50716) }
var fileDescriptor_04ef66862bb50716 = []byte{
// 300 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x51, 0x4d, 0x4b, 0xc3, 0x40,
0x10, 0x75, 0xd3, 0x36, 0x4d, 0x47, 0x5a, 0x64, 0x40, 0xd8, 0x16, 0x85, 0x90, 0x83, 0xe4, 0x20,
0x15, 0xea, 0xc5, 0xab, 0x08, 0x82, 0x1e, 0xb7, 0xa0, 0xc7, 0xb2, 0xdd, 0x1d, 0x75, 0xa1, 0x4d,
0x42, 0x12, 0xd2, 0xfa, 0x6f, 0xfc, 0xa9, 0 | {
return nil, err
} | conditional_block |
vessel.pb.go | Info_Vessel.Marshal(b, m, deterministic)
}
func (m *Vessel) XXX_Merge(src proto.Message) {
xxx_messageInfo_Vessel.Merge(m, src)
}
func (m *Vessel) XXX_Size() int {
return xxx_messageInfo_Vessel.Size(m)
}
func (m *Vessel) XXX_DiscardUnknown() {
xxx_messageInfo_Vessel.DiscardUnknown(m)
}
var xxx_messageInfo_Vessel proto.InternalMessageInfo
func (m *Vessel) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *Vessel) GetCapacity() int32 {
if m != nil {
return m.Capacity
}
return 0
}
func (m *Vessel) GetMaxWeight() int32 {
if m != nil {
return m.MaxWeight
}
return 0
}
func (m *Vessel) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Vessel) GetAvailable() bool {
if m != nil {
return m.Available
}
return false
}
func (m *Vessel) GetOwnerId() string {
if m != nil {
return m.OwnerId
}
return ""
}
type Specification struct {
Capacity int32 `protobuf:"varint,1,opt,name=capacity,proto3" json:"capacity,omitempty"`
MaxWeight int32 `protobuf:"varint,2,opt,name=max_weight,json=maxWeight,proto3" json:"max_weight,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Specification) Reset() { *m = Specification{} }
func (m *Specification) String() string { return proto.CompactTextString(m) }
func (*Specification) ProtoMessage() {}
func (*Specification) Descriptor() ([]byte, []int) {
return fileDescriptor_04ef66862bb50716, []int{1}
}
func (m *Specification) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Specification.Unmarshal(m, b)
}
func (m *Specification) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Specification.Marshal(b, m, deterministic)
}
func (m *Specification) XXX_Merge(src proto.Message) {
xxx_messageInfo_Specification.Merge(m, src)
}
func (m *Specification) XXX_Size() int {
return xxx_messageInfo_Specification.Size(m)
}
func (m *Specification) XXX_DiscardUnknown() {
xxx_messageInfo_Specification.DiscardUnknown(m)
}
var xxx_messageInfo_Specification proto.InternalMessageInfo
func (m *Specification) GetCapacity() int32 {
if m != nil {
return m.Capacity
}
return 0
}
func (m *Specification) GetMaxWeight() int32 {
if m != nil {
return m.MaxWeight
}
return 0
}
type Response struct {
Vessel *Vessel `protobuf:"bytes,1,opt,name=vessel,proto3" json:"vessel,omitempty"`
Vessels []*Vessel `protobuf:"bytes,2,rep,name=vessels,proto3" json:"vessels,omitempty"`
Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Response) Reset() { *m = Response{} }
func (m *Response) String() string { return proto.CompactTextString(m) }
func (*Response) ProtoMessage() {}
func (*Response) Descriptor() ([]byte, []int) {
return fileDescriptor_04ef66862bb50716, []int{2}
}
func (m *Response) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Response.Unmarshal(m, b)
}
func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Response.Marshal(b, m, deterministic)
}
func (m *Response) | (src proto.Message) {
xxx_messageInfo_Response.Merge(m, src)
}
func (m *Response) XXX_Size() int {
return xxx_messageInfo_Response.Size(m)
}
func (m *Response) XXX_DiscardUnknown() {
xxx_messageInfo_Response.DiscardUnknown(m)
}
var xxx_messageInfo_Response proto.InternalMessageInfo
func (m *Response) GetVessel() *Vessel {
if m != nil {
return m.Vessel
}
return nil
}
func (m *Response) GetVessels() []*Vessel {
if m != nil {
return m.Vessels
}
return nil
}
func (m *Response) GetCreated() bool {
if m != nil {
return m.Created
}
return false
}
func init() {
proto.RegisterType((*Vessel)(nil), "vessel.Vessel")
proto.RegisterType((*Specification)(nil), "vessel.Specification")
proto.RegisterType((*Response)(nil), "vessel.Response")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ client.Option
var _ server.Option
// Client API for VesselService service
type VesselServiceClient interface {
FindAvailable(ctx context.Context, in *Specification, opts ...client.CallOption) (*Response, error)
Create(ctx context.Context, in *Vessel, opts ...client.CallOption) (*Response, error)
}
type vesselServiceClient struct {
c client.Client
serviceName string
}
func NewVesselServiceClient(serviceName string, c client.Client) VesselServiceClient {
if c == nil {
c = client.NewClient()
}
if len(serviceName) == 0 {
serviceName = "vessel"
}
return &vesselServiceClient{
c: c,
serviceName: serviceName,
}
}
func (c *vesselServiceClient) FindAvailable(ctx context.Context, in *Specification, opts ...client.CallOption) (*Response, error) {
req := c.c.NewRequest(c.serviceName, "VesselService.FindAvailable", in)
out := new(Response)
err := c.c.Call(ctx, req, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *vesselServiceClient) Create(ctx context.Context, in *Vessel, opts ...client.CallOption) (*Response, error) {
req := c.c.NewRequest(c.serviceName, "VesselService.Create", in)
out := new(Response)
err := c.c.Call(ctx, req, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for VesselService service
type VesselServiceHandler interface {
FindAvailable(context.Context, *Specification, *Response) error
Create(context.Context, *Vessel, *Response) error
}
func RegisterVesselServiceHandler(s server.Server, hdlr VesselServiceHandler, opts ...server.HandlerOption) {
s.Handle(s.NewHandler(&VesselService{hdlr}, opts...))
}
type VesselService struct {
VesselServiceHandler
}
func (h *VesselService) FindAvailable(ctx context.Context, in *Specification, out *Response) error {
return h.VesselServiceHandler.FindAvailable(ctx, in, out)
}
func (h *VesselService) Create(ctx context.Context, in *Vessel, out *Response) error {
return h.VesselServiceHandler.Create(ctx, in, out)
}
func init() { proto.RegisterFile("proto/vessel/vessel.proto", fileDescriptor_04ef66862bb50716) }
var fileDescriptor_04ef66862bb50716 = []byte{
// 300 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x51, 0x4d, 0x4b, 0xc3, 0x40,
0x10, 0x75, 0xd3, 0x36, 0x4d, 0x47, 0x5a, 0x64, 0x40, 0xd8, 0x16, 0x85, 0x90, 0x83, 0xe4, 0x20,
0x15, 0xea, 0xc5, 0xab, 0x08, 0x82, 0x1e, 0xb7, 0xa0, 0xc7, 0xb2, 0xdd, 0x1d, 0x75, 0xa1, 0x4d,
0x42, 0x12, 0xd2, 0xfa, 0x6f, 0xfc, 0xa9, 0xc | XXX_Merge | identifier_name |
download.go | etheus.GaugeOpts{
Name: "last_data_refresh_failure",
Help: "Unix timestamp of the most recent failure to refresh data",
}, []string{"source"})
lastDataRefreshCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "last_data_refresh_count",
Help: "Count of records for a given sanction or entity list",
}, []string{"source"})
)
func init() {
prometheus.MustRegister(lastDataRefreshSuccess)
prometheus.MustRegister(lastDataRefreshCount)
prometheus.MustRegister(lastDataRefreshFailure)
}
// Download holds counts for each type of list data parsed from files and a
// timestamp of when the download happened.
type Download struct {
Timestamp time.Time `json:"timestamp"`
// US Office of Foreign Assets Control (OFAC)
SDNs int `json:"SDNs"`
Alts int `json:"altNames"`
Addresses int `json:"addresses"`
SectoralSanctions int `json:"sectoralSanctions"`
// US Bureau of Industry and Security (BIS)
DeniedPersons int `json:"deniedPersons"`
BISEntities int `json:"bisEntities"`
}
type downloadStats struct {
// US Office of Foreign Assets Control (OFAC)
SDNs int `json:"SDNs"`
Alts int `json:"altNames"`
Addresses int `json:"addresses"`
SectoralSanctions int `json:"sectoralSanctions"`
// US Bureau of Industry and Security (BIS)
DeniedPersons int `json:"deniedPersons"`
BISEntities int `json:"bisEntities"`
RefreshedAt time.Time `json:"timestamp"`
}
// periodicDataRefresh will forever block for interval's duration and then download and reparse the data.
// Download stats are recorded as part of a successful re-download and parse.
func (s *searcher) periodicDataRefresh(interval time.Duration, downloadRepo downloadRepository, updates chan *downloadStats) {
if interval == 0*time.Second {
s.logger.Log("download", fmt.Sprintf("not scheduling periodic refreshing duration=%v", interval))
return
}
for {
time.Sleep(interval)
stats, err := s.refreshData("")
if err != nil {
if s.logger != nil {
s.logger.Log("main", fmt.Sprintf("ERROR: refreshing data: %v", err))
}
} else {
downloadRepo.recordStats(stats)
if s.logger != nil {
s.logger.Log(
"main", fmt.Sprintf("data refreshed %v ago", time.Since(stats.RefreshedAt)),
"SDNs", stats.SDNs, "AltNames", stats.Alts, "Addresses", stats.Addresses, "SSI", stats.SectoralSanctions,
"DPL", stats.DeniedPersons, "BISEntities", stats.BISEntities,
)
}
updates <- stats // send stats for re-search and watch notifications
}
}
}
func ofacRecords(logger log.Logger, initialDir string) (*ofac.Results, error) {
files, err := ofac.Download(logger, initialDir)
if err != nil {
return nil, fmt.Errorf("download: %v", err)
}
if len(files) == 0 {
return nil, errors.New("no OFAC Results")
}
var res *ofac.Results
for i := range files {
if i == 0 {
rr, err := ofac.Read(files[i])
if err != nil {
return nil, fmt.Errorf("read: %v", err)
}
if rr != nil {
res = rr
}
} else {
rr, err := ofac.Read(files[i])
if err != nil {
return nil, fmt.Errorf("read and replace: %v", err)
}
if rr != nil {
res.Addresses = append(res.Addresses, rr.Addresses...)
res.AlternateIdentities = append(res.AlternateIdentities, rr.AlternateIdentities...)
res.SDNs = append(res.SDNs, rr.SDNs...)
res.SDNComments = append(res.SDNComments, rr.SDNComments...)
}
}
}
return res, err
}
func dplRecords(logger log.Logger, initialDir string) ([]*dpl.DPL, error) {
file, err := dpl.Download(logger, initialDir)
if err != nil {
return nil, err
}
return dpl.Read(file)
}
func cslRecords(logger log.Logger, initialDir string) (*csl.CSL, error) {
file, err := csl.Download(logger, initialDir)
if err != nil {
logger.Log("download", "WARN: skipping CSL download", "description", err)
return &csl.CSL{}, nil
}
cslRecords, err := csl.Read(file)
if err != nil {
return nil, err
}
return cslRecords, err
}
// refreshData reaches out to the various websites to download the latest
// files, runs each list's parser, and index data for searches.
func (s *searcher) refreshData(initialDir string) (*downloadStats, error) {
if s.logger != nil {
s.logger.Log("download", "Starting refresh of data")
if initialDir != "" {
s.logger.Log("download", fmt.Sprintf("reading files from %s", initialDir))
}
}
lastDataRefreshFailure.WithLabelValues("SDNs").Set(float64(time.Now().Unix()))
results, err := ofacRecords(s.logger, initialDir)
if err != nil {
lastDataRefreshFailure.WithLabelValues("SDNs").Set(float64(time.Now().Unix()))
return nil, fmt.Errorf("OFAC records: %v", err)
} | alts := precomputeAlts(results.AlternateIdentities)
deniedPersons, err := dplRecords(s.logger, initialDir)
if err != nil {
lastDataRefreshFailure.WithLabelValues("DPs").Set(float64(time.Now().Unix()))
return nil, fmt.Errorf("DPL records: %v", err)
}
dps := precomputeDPs(deniedPersons, s.pipe)
consolidatedLists, err := cslRecords(s.logger, initialDir)
if err != nil {
lastDataRefreshFailure.WithLabelValues("CSL").Set(float64(time.Now().Unix()))
return nil, fmt.Errorf("CSL records: %v", err)
}
ssis := precomputeSSIs(consolidatedLists.SSIs, s.pipe)
els := precomputeBISEntities(consolidatedLists.ELs, s.pipe)
stats := &downloadStats{
// OFAC
SDNs: len(sdns),
Alts: len(alts),
Addresses: len(adds),
SectoralSanctions: len(ssis),
// BIS
BISEntities: len(els),
DeniedPersons: len(dps),
}
stats.RefreshedAt = lastRefresh(initialDir)
// record prometheus metrics
lastDataRefreshCount.WithLabelValues("SDNs").Set(float64(len(sdns)))
lastDataRefreshCount.WithLabelValues("SSIs").Set(float64(len(ssis)))
lastDataRefreshCount.WithLabelValues("BISEntities").Set(float64(len(els)))
lastDataRefreshCount.WithLabelValues("DPs").Set(float64(len(dps)))
// Set new records after precomputation (to minimize lock contention)
s.Lock()
// OFAC
s.SDNs = sdns
s.Addresses = adds
s.Alts = alts
s.SSIs = ssis
// BIS
s.DPs = dps
s.BISEntities = els
// metadata
s.lastRefreshedAt = stats.RefreshedAt
s.Unlock()
if s.logger != nil {
s.logger.Log("download", "Finished refresh of data")
}
// record successful data refresh
lastDataRefreshSuccess.WithLabelValues().Set(float64(time.Now().Unix()))
return stats, nil
}
// lastRefresh returns a time.Time for the oldest file in dir or the current time if empty.
func lastRefresh(dir string) time.Time {
if dir == "" {
return time.Now()
}
infos, err := ioutil.ReadDir(dir)
if len(infos) == 0 || err != nil {
return time.Time{} // zero time because there's no initial data
}
oldest := infos[0].ModTime()
for i := range infos[1:] {
if t := infos[i].ModTime(); t.Before(oldest) {
oldest = t
}
}
return oldest
}
func addDownloadRoutes(logger log.Logger, r *mux.Router, repo downloadRepository) {
r.Methods("GET").Path("/downloads").HandlerFunc(getLatestDownloads(logger, repo))
}
func getLatestDownloads(logger log.Logger, repo downloadRepository) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w = wrapResponseWriter(logger, w, r)
limit := extractSearchLimit(r)
downloads, err := repo.latestDownloads(limit)
if err != nil {
moovhttp.Problem(w, err)
return
}
logger.Log("download |
sdns := precomputeSDNs(results.SDNs, results.Addresses, s.pipe)
adds := precomputeAddresses(results.Addresses) | random_line_split |
download.go | etheus.GaugeOpts{
Name: "last_data_refresh_failure",
Help: "Unix timestamp of the most recent failure to refresh data",
}, []string{"source"})
lastDataRefreshCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "last_data_refresh_count",
Help: "Count of records for a given sanction or entity list",
}, []string{"source"})
)
func init() {
prometheus.MustRegister(lastDataRefreshSuccess)
prometheus.MustRegister(lastDataRefreshCount)
prometheus.MustRegister(lastDataRefreshFailure)
}
// Download holds counts for each type of list data parsed from files and a
// timestamp of when the download happened.
type Download struct {
Timestamp time.Time `json:"timestamp"`
// US Office of Foreign Assets Control (OFAC)
SDNs int `json:"SDNs"`
Alts int `json:"altNames"`
Addresses int `json:"addresses"`
SectoralSanctions int `json:"sectoralSanctions"`
// US Bureau of Industry and Security (BIS)
DeniedPersons int `json:"deniedPersons"`
BISEntities int `json:"bisEntities"`
}
type downloadStats struct {
// US Office of Foreign Assets Control (OFAC)
SDNs int `json:"SDNs"`
Alts int `json:"altNames"`
Addresses int `json:"addresses"`
SectoralSanctions int `json:"sectoralSanctions"`
// US Bureau of Industry and Security (BIS)
DeniedPersons int `json:"deniedPersons"`
BISEntities int `json:"bisEntities"`
RefreshedAt time.Time `json:"timestamp"`
}
// periodicDataRefresh will forever block for interval's duration and then download and reparse the data.
// Download stats are recorded as part of a successful re-download and parse.
func (s *searcher) periodicDataRefresh(interval time.Duration, downloadRepo downloadRepository, updates chan *downloadStats) {
if interval == 0*time.Second {
s.logger.Log("download", fmt.Sprintf("not scheduling periodic refreshing duration=%v", interval))
return
}
for {
time.Sleep(interval)
stats, err := s.refreshData("")
if err != nil {
if s.logger != nil {
s.logger.Log("main", fmt.Sprintf("ERROR: refreshing data: %v", err))
}
} else {
downloadRepo.recordStats(stats)
if s.logger != nil |
updates <- stats // send stats for re-search and watch notifications
}
}
}
func ofacRecords(logger log.Logger, initialDir string) (*ofac.Results, error) {
files, err := ofac.Download(logger, initialDir)
if err != nil {
return nil, fmt.Errorf("download: %v", err)
}
if len(files) == 0 {
return nil, errors.New("no OFAC Results")
}
var res *ofac.Results
for i := range files {
if i == 0 {
rr, err := ofac.Read(files[i])
if err != nil {
return nil, fmt.Errorf("read: %v", err)
}
if rr != nil {
res = rr
}
} else {
rr, err := ofac.Read(files[i])
if err != nil {
return nil, fmt.Errorf("read and replace: %v", err)
}
if rr != nil {
res.Addresses = append(res.Addresses, rr.Addresses...)
res.AlternateIdentities = append(res.AlternateIdentities, rr.AlternateIdentities...)
res.SDNs = append(res.SDNs, rr.SDNs...)
res.SDNComments = append(res.SDNComments, rr.SDNComments...)
}
}
}
return res, err
}
func dplRecords(logger log.Logger, initialDir string) ([]*dpl.DPL, error) {
file, err := dpl.Download(logger, initialDir)
if err != nil {
return nil, err
}
return dpl.Read(file)
}
func cslRecords(logger log.Logger, initialDir string) (*csl.CSL, error) {
file, err := csl.Download(logger, initialDir)
if err != nil {
logger.Log("download", "WARN: skipping CSL download", "description", err)
return &csl.CSL{}, nil
}
cslRecords, err := csl.Read(file)
if err != nil {
return nil, err
}
return cslRecords, err
}
// refreshData reaches out to the various websites to download the latest
// files, runs each list's parser, and index data for searches.
func (s *searcher) refreshData(initialDir string) (*downloadStats, error) {
if s.logger != nil {
s.logger.Log("download", "Starting refresh of data")
if initialDir != "" {
s.logger.Log("download", fmt.Sprintf("reading files from %s", initialDir))
}
}
lastDataRefreshFailure.WithLabelValues("SDNs").Set(float64(time.Now().Unix()))
results, err := ofacRecords(s.logger, initialDir)
if err != nil {
lastDataRefreshFailure.WithLabelValues("SDNs").Set(float64(time.Now().Unix()))
return nil, fmt.Errorf("OFAC records: %v", err)
}
sdns := precomputeSDNs(results.SDNs, results.Addresses, s.pipe)
adds := precomputeAddresses(results.Addresses)
alts := precomputeAlts(results.AlternateIdentities)
deniedPersons, err := dplRecords(s.logger, initialDir)
if err != nil {
lastDataRefreshFailure.WithLabelValues("DPs").Set(float64(time.Now().Unix()))
return nil, fmt.Errorf("DPL records: %v", err)
}
dps := precomputeDPs(deniedPersons, s.pipe)
consolidatedLists, err := cslRecords(s.logger, initialDir)
if err != nil {
lastDataRefreshFailure.WithLabelValues("CSL").Set(float64(time.Now().Unix()))
return nil, fmt.Errorf("CSL records: %v", err)
}
ssis := precomputeSSIs(consolidatedLists.SSIs, s.pipe)
els := precomputeBISEntities(consolidatedLists.ELs, s.pipe)
stats := &downloadStats{
// OFAC
SDNs: len(sdns),
Alts: len(alts),
Addresses: len(adds),
SectoralSanctions: len(ssis),
// BIS
BISEntities: len(els),
DeniedPersons: len(dps),
}
stats.RefreshedAt = lastRefresh(initialDir)
// record prometheus metrics
lastDataRefreshCount.WithLabelValues("SDNs").Set(float64(len(sdns)))
lastDataRefreshCount.WithLabelValues("SSIs").Set(float64(len(ssis)))
lastDataRefreshCount.WithLabelValues("BISEntities").Set(float64(len(els)))
lastDataRefreshCount.WithLabelValues("DPs").Set(float64(len(dps)))
// Set new records after precomputation (to minimize lock contention)
s.Lock()
// OFAC
s.SDNs = sdns
s.Addresses = adds
s.Alts = alts
s.SSIs = ssis
// BIS
s.DPs = dps
s.BISEntities = els
// metadata
s.lastRefreshedAt = stats.RefreshedAt
s.Unlock()
if s.logger != nil {
s.logger.Log("download", "Finished refresh of data")
}
// record successful data refresh
lastDataRefreshSuccess.WithLabelValues().Set(float64(time.Now().Unix()))
return stats, nil
}
// lastRefresh returns a time.Time for the oldest file in dir or the current time if empty.
func lastRefresh(dir string) time.Time {
if dir == "" {
return time.Now()
}
infos, err := ioutil.ReadDir(dir)
if len(infos) == 0 || err != nil {
return time.Time{} // zero time because there's no initial data
}
oldest := infos[0].ModTime()
for i := range infos[1:] {
if t := infos[i].ModTime(); t.Before(oldest) {
oldest = t
}
}
return oldest
}
func addDownloadRoutes(logger log.Logger, r *mux.Router, repo downloadRepository) {
r.Methods("GET").Path("/downloads").HandlerFunc(getLatestDownloads(logger, repo))
}
func getLatestDownloads(logger log.Logger, repo downloadRepository) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w = wrapResponseWriter(logger, w, r)
limit := extractSearchLimit(r)
downloads, err := repo.latestDownloads(limit)
if err != nil {
moovhttp.Problem(w, err)
return
}
logger.Log(" | {
s.logger.Log(
"main", fmt.Sprintf("data refreshed %v ago", time.Since(stats.RefreshedAt)),
"SDNs", stats.SDNs, "AltNames", stats.Alts, "Addresses", stats.Addresses, "SSI", stats.SectoralSanctions,
"DPL", stats.DeniedPersons, "BISEntities", stats.BISEntities,
)
} | conditional_block |
download.go | etheus.GaugeOpts{
Name: "last_data_refresh_failure",
Help: "Unix timestamp of the most recent failure to refresh data",
}, []string{"source"})
lastDataRefreshCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "last_data_refresh_count",
Help: "Count of records for a given sanction or entity list",
}, []string{"source"})
)
func init() {
prometheus.MustRegister(lastDataRefreshSuccess)
prometheus.MustRegister(lastDataRefreshCount)
prometheus.MustRegister(lastDataRefreshFailure)
}
// Download holds counts for each type of list data parsed from files and a
// timestamp of when the download happened.
type Download struct {
Timestamp time.Time `json:"timestamp"`
// US Office of Foreign Assets Control (OFAC)
SDNs int `json:"SDNs"`
Alts int `json:"altNames"`
Addresses int `json:"addresses"`
SectoralSanctions int `json:"sectoralSanctions"`
// US Bureau of Industry and Security (BIS)
DeniedPersons int `json:"deniedPersons"`
BISEntities int `json:"bisEntities"`
}
type downloadStats struct {
// US Office of Foreign Assets Control (OFAC)
SDNs int `json:"SDNs"`
Alts int `json:"altNames"`
Addresses int `json:"addresses"`
SectoralSanctions int `json:"sectoralSanctions"`
// US Bureau of Industry and Security (BIS)
DeniedPersons int `json:"deniedPersons"`
BISEntities int `json:"bisEntities"`
RefreshedAt time.Time `json:"timestamp"`
}
// periodicDataRefresh will forever block for interval's duration and then download and reparse the data.
// Download stats are recorded as part of a successful re-download and parse.
func (s *searcher) periodicDataRefresh(interval time.Duration, downloadRepo downloadRepository, updates chan *downloadStats) {
if interval == 0*time.Second {
s.logger.Log("download", fmt.Sprintf("not scheduling periodic refreshing duration=%v", interval))
return
}
for {
time.Sleep(interval)
stats, err := s.refreshData("")
if err != nil {
if s.logger != nil {
s.logger.Log("main", fmt.Sprintf("ERROR: refreshing data: %v", err))
}
} else {
downloadRepo.recordStats(stats)
if s.logger != nil {
s.logger.Log(
"main", fmt.Sprintf("data refreshed %v ago", time.Since(stats.RefreshedAt)),
"SDNs", stats.SDNs, "AltNames", stats.Alts, "Addresses", stats.Addresses, "SSI", stats.SectoralSanctions,
"DPL", stats.DeniedPersons, "BISEntities", stats.BISEntities,
)
}
updates <- stats // send stats for re-search and watch notifications
}
}
}
func ofacRecords(logger log.Logger, initialDir string) (*ofac.Results, error) {
files, err := ofac.Download(logger, initialDir)
if err != nil {
return nil, fmt.Errorf("download: %v", err)
}
if len(files) == 0 {
return nil, errors.New("no OFAC Results")
}
var res *ofac.Results
for i := range files {
if i == 0 {
rr, err := ofac.Read(files[i])
if err != nil {
return nil, fmt.Errorf("read: %v", err)
}
if rr != nil {
res = rr
}
} else {
rr, err := ofac.Read(files[i])
if err != nil {
return nil, fmt.Errorf("read and replace: %v", err)
}
if rr != nil {
res.Addresses = append(res.Addresses, rr.Addresses...)
res.AlternateIdentities = append(res.AlternateIdentities, rr.AlternateIdentities...)
res.SDNs = append(res.SDNs, rr.SDNs...)
res.SDNComments = append(res.SDNComments, rr.SDNComments...)
}
}
}
return res, err
}
func dplRecords(logger log.Logger, initialDir string) ([]*dpl.DPL, error) {
file, err := dpl.Download(logger, initialDir)
if err != nil {
return nil, err
}
return dpl.Read(file)
}
func cslRecords(logger log.Logger, initialDir string) (*csl.CSL, error) {
file, err := csl.Download(logger, initialDir)
if err != nil {
logger.Log("download", "WARN: skipping CSL download", "description", err)
return &csl.CSL{}, nil
}
cslRecords, err := csl.Read(file)
if err != nil {
return nil, err
}
return cslRecords, err
}
// refreshData reaches out to the various websites to download the latest
// files, runs each list's parser, and index data for searches.
func (s *searcher) refreshData(initialDir string) (*downloadStats, error) {
if s.logger != nil {
s.logger.Log("download", "Starting refresh of data")
if initialDir != "" {
s.logger.Log("download", fmt.Sprintf("reading files from %s", initialDir))
}
}
lastDataRefreshFailure.WithLabelValues("SDNs").Set(float64(time.Now().Unix()))
results, err := ofacRecords(s.logger, initialDir)
if err != nil {
lastDataRefreshFailure.WithLabelValues("SDNs").Set(float64(time.Now().Unix()))
return nil, fmt.Errorf("OFAC records: %v", err)
}
sdns := precomputeSDNs(results.SDNs, results.Addresses, s.pipe)
adds := precomputeAddresses(results.Addresses)
alts := precomputeAlts(results.AlternateIdentities)
deniedPersons, err := dplRecords(s.logger, initialDir)
if err != nil {
lastDataRefreshFailure.WithLabelValues("DPs").Set(float64(time.Now().Unix()))
return nil, fmt.Errorf("DPL records: %v", err)
}
dps := precomputeDPs(deniedPersons, s.pipe)
consolidatedLists, err := cslRecords(s.logger, initialDir)
if err != nil {
lastDataRefreshFailure.WithLabelValues("CSL").Set(float64(time.Now().Unix()))
return nil, fmt.Errorf("CSL records: %v", err)
}
ssis := precomputeSSIs(consolidatedLists.SSIs, s.pipe)
els := precomputeBISEntities(consolidatedLists.ELs, s.pipe)
stats := &downloadStats{
// OFAC
SDNs: len(sdns),
Alts: len(alts),
Addresses: len(adds),
SectoralSanctions: len(ssis),
// BIS
BISEntities: len(els),
DeniedPersons: len(dps),
}
stats.RefreshedAt = lastRefresh(initialDir)
// record prometheus metrics
lastDataRefreshCount.WithLabelValues("SDNs").Set(float64(len(sdns)))
lastDataRefreshCount.WithLabelValues("SSIs").Set(float64(len(ssis)))
lastDataRefreshCount.WithLabelValues("BISEntities").Set(float64(len(els)))
lastDataRefreshCount.WithLabelValues("DPs").Set(float64(len(dps)))
// Set new records after precomputation (to minimize lock contention)
s.Lock()
// OFAC
s.SDNs = sdns
s.Addresses = adds
s.Alts = alts
s.SSIs = ssis
// BIS
s.DPs = dps
s.BISEntities = els
// metadata
s.lastRefreshedAt = stats.RefreshedAt
s.Unlock()
if s.logger != nil {
s.logger.Log("download", "Finished refresh of data")
}
// record successful data refresh
lastDataRefreshSuccess.WithLabelValues().Set(float64(time.Now().Unix()))
return stats, nil
}
// lastRefresh returns a time.Time for the oldest file in dir or the current time if empty.
func lastRefresh(dir string) time.Time {
if dir == "" {
return time.Now()
}
infos, err := ioutil.ReadDir(dir)
if len(infos) == 0 || err != nil {
return time.Time{} // zero time because there's no initial data
}
oldest := infos[0].ModTime()
for i := range infos[1:] {
if t := infos[i].ModTime(); t.Before(oldest) {
oldest = t
}
}
return oldest
}
func addDownloadRoutes(logger log.Logger, r *mux.Router, repo downloadRepository) |
func getLatestDownloads(logger log.Logger, repo downloadRepository) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w = wrapResponseWriter(logger, w, r)
limit := extractSearchLimit(r)
downloads, err := repo.latestDownloads(limit)
if err != nil {
moovhttp.Problem(w, err)
return
}
logger.Log(" | {
r.Methods("GET").Path("/downloads").HandlerFunc(getLatestDownloads(logger, repo))
} | identifier_body |
download.go | {
Timestamp time.Time `json:"timestamp"`
// US Office of Foreign Assets Control (OFAC)
SDNs int `json:"SDNs"`
Alts int `json:"altNames"`
Addresses int `json:"addresses"`
SectoralSanctions int `json:"sectoralSanctions"`
// US Bureau of Industry and Security (BIS)
DeniedPersons int `json:"deniedPersons"`
BISEntities int `json:"bisEntities"`
}
type downloadStats struct {
// US Office of Foreign Assets Control (OFAC)
SDNs int `json:"SDNs"`
Alts int `json:"altNames"`
Addresses int `json:"addresses"`
SectoralSanctions int `json:"sectoralSanctions"`
// US Bureau of Industry and Security (BIS)
DeniedPersons int `json:"deniedPersons"`
BISEntities int `json:"bisEntities"`
RefreshedAt time.Time `json:"timestamp"`
}
// periodicDataRefresh will forever block for interval's duration and then download and reparse the data.
// Download stats are recorded as part of a successful re-download and parse.
func (s *searcher) periodicDataRefresh(interval time.Duration, downloadRepo downloadRepository, updates chan *downloadStats) {
if interval == 0*time.Second {
s.logger.Log("download", fmt.Sprintf("not scheduling periodic refreshing duration=%v", interval))
return
}
for {
time.Sleep(interval)
stats, err := s.refreshData("")
if err != nil {
if s.logger != nil {
s.logger.Log("main", fmt.Sprintf("ERROR: refreshing data: %v", err))
}
} else {
downloadRepo.recordStats(stats)
if s.logger != nil {
s.logger.Log(
"main", fmt.Sprintf("data refreshed %v ago", time.Since(stats.RefreshedAt)),
"SDNs", stats.SDNs, "AltNames", stats.Alts, "Addresses", stats.Addresses, "SSI", stats.SectoralSanctions,
"DPL", stats.DeniedPersons, "BISEntities", stats.BISEntities,
)
}
updates <- stats // send stats for re-search and watch notifications
}
}
}
func ofacRecords(logger log.Logger, initialDir string) (*ofac.Results, error) {
files, err := ofac.Download(logger, initialDir)
if err != nil {
return nil, fmt.Errorf("download: %v", err)
}
if len(files) == 0 {
return nil, errors.New("no OFAC Results")
}
var res *ofac.Results
for i := range files {
if i == 0 {
rr, err := ofac.Read(files[i])
if err != nil {
return nil, fmt.Errorf("read: %v", err)
}
if rr != nil {
res = rr
}
} else {
rr, err := ofac.Read(files[i])
if err != nil {
return nil, fmt.Errorf("read and replace: %v", err)
}
if rr != nil {
res.Addresses = append(res.Addresses, rr.Addresses...)
res.AlternateIdentities = append(res.AlternateIdentities, rr.AlternateIdentities...)
res.SDNs = append(res.SDNs, rr.SDNs...)
res.SDNComments = append(res.SDNComments, rr.SDNComments...)
}
}
}
return res, err
}
func dplRecords(logger log.Logger, initialDir string) ([]*dpl.DPL, error) {
file, err := dpl.Download(logger, initialDir)
if err != nil {
return nil, err
}
return dpl.Read(file)
}
func cslRecords(logger log.Logger, initialDir string) (*csl.CSL, error) {
file, err := csl.Download(logger, initialDir)
if err != nil {
logger.Log("download", "WARN: skipping CSL download", "description", err)
return &csl.CSL{}, nil
}
cslRecords, err := csl.Read(file)
if err != nil {
return nil, err
}
return cslRecords, err
}
// refreshData reaches out to the various websites to download the latest
// files, runs each list's parser, and index data for searches.
func (s *searcher) refreshData(initialDir string) (*downloadStats, error) {
if s.logger != nil {
s.logger.Log("download", "Starting refresh of data")
if initialDir != "" {
s.logger.Log("download", fmt.Sprintf("reading files from %s", initialDir))
}
}
lastDataRefreshFailure.WithLabelValues("SDNs").Set(float64(time.Now().Unix()))
results, err := ofacRecords(s.logger, initialDir)
if err != nil {
lastDataRefreshFailure.WithLabelValues("SDNs").Set(float64(time.Now().Unix()))
return nil, fmt.Errorf("OFAC records: %v", err)
}
sdns := precomputeSDNs(results.SDNs, results.Addresses, s.pipe)
adds := precomputeAddresses(results.Addresses)
alts := precomputeAlts(results.AlternateIdentities)
deniedPersons, err := dplRecords(s.logger, initialDir)
if err != nil {
lastDataRefreshFailure.WithLabelValues("DPs").Set(float64(time.Now().Unix()))
return nil, fmt.Errorf("DPL records: %v", err)
}
dps := precomputeDPs(deniedPersons, s.pipe)
consolidatedLists, err := cslRecords(s.logger, initialDir)
if err != nil {
lastDataRefreshFailure.WithLabelValues("CSL").Set(float64(time.Now().Unix()))
return nil, fmt.Errorf("CSL records: %v", err)
}
ssis := precomputeSSIs(consolidatedLists.SSIs, s.pipe)
els := precomputeBISEntities(consolidatedLists.ELs, s.pipe)
stats := &downloadStats{
// OFAC
SDNs: len(sdns),
Alts: len(alts),
Addresses: len(adds),
SectoralSanctions: len(ssis),
// BIS
BISEntities: len(els),
DeniedPersons: len(dps),
}
stats.RefreshedAt = lastRefresh(initialDir)
// record prometheus metrics
lastDataRefreshCount.WithLabelValues("SDNs").Set(float64(len(sdns)))
lastDataRefreshCount.WithLabelValues("SSIs").Set(float64(len(ssis)))
lastDataRefreshCount.WithLabelValues("BISEntities").Set(float64(len(els)))
lastDataRefreshCount.WithLabelValues("DPs").Set(float64(len(dps)))
// Set new records after precomputation (to minimize lock contention)
s.Lock()
// OFAC
s.SDNs = sdns
s.Addresses = adds
s.Alts = alts
s.SSIs = ssis
// BIS
s.DPs = dps
s.BISEntities = els
// metadata
s.lastRefreshedAt = stats.RefreshedAt
s.Unlock()
if s.logger != nil {
s.logger.Log("download", "Finished refresh of data")
}
// record successful data refresh
lastDataRefreshSuccess.WithLabelValues().Set(float64(time.Now().Unix()))
return stats, nil
}
// lastRefresh returns a time.Time for the oldest file in dir or the current time if empty.
func lastRefresh(dir string) time.Time {
if dir == "" {
return time.Now()
}
infos, err := ioutil.ReadDir(dir)
if len(infos) == 0 || err != nil {
return time.Time{} // zero time because there's no initial data
}
oldest := infos[0].ModTime()
for i := range infos[1:] {
if t := infos[i].ModTime(); t.Before(oldest) {
oldest = t
}
}
return oldest
}
func addDownloadRoutes(logger log.Logger, r *mux.Router, repo downloadRepository) {
r.Methods("GET").Path("/downloads").HandlerFunc(getLatestDownloads(logger, repo))
}
func getLatestDownloads(logger log.Logger, repo downloadRepository) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w = wrapResponseWriter(logger, w, r)
limit := extractSearchLimit(r)
downloads, err := repo.latestDownloads(limit)
if err != nil {
moovhttp.Problem(w, err)
return
}
logger.Log("download", "get latest downloads", "requestID", moovhttp.GetRequestID(r))
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusOK)
if err := json.NewEncoder(w).Encode(downloads); err != nil {
moovhttp.Problem(w, err)
return
}
}
}
type downloadRepository interface {
latestDownloads(limit int) ([]Download, error)
recordStats(stats *downloadStats) error
}
type sqliteDownloadRepository struct {
db *sql.DB
logger log.Logger
}
func (r *sqliteDownloadRepository) close() error {
return r.db.Close()
}
func (r *sqliteDownloadRepository) | recordStats | identifier_name |
|
basics.py | # print('Hello World')
# myName = input("Whats your name?\n")
# print("It is good to meet you, " + myName)
# print('The length of your name is: ')
# print(len(myName))
# myAge = input("Whats age?\n")
# print('you will be ' + str(int(myAge) + 1) + ' in a year.')
# Flow Controls
# example 1
# name = 'Msry'
# password = 'swordfish'
# if name == 'Mary':
# print('Hello Mary')
# if password == 'swordfish':
# print('Access Granted')
# else:
# print('Wrong Pass')
# else:
# print('Program Terminated!')
# example 2
# name = 'Alisce'
# age = 11
# if name == 'Alice':
# print('Hi Alice')
# elif age < 12:
# print('Not Allice')
# elif age > 2000:
# print('you are immortal')
# else:
# print('hello alice')
# exxample 3
# with if
import pprint
from random import randint, sample
import re
import sys
import random
from typing import Type
spam = 0
# if spam < 5:
# print ('Hello')
# spam = spam + 1
# with while
# while spam < 5:
# print('hello')
# spam = spam + 1
# name = ''
# while name != 'your name':
# print('Please type your name.')
# name = input()
# print('thanks')
# example 4
# while True:
# print('Who are you?')
# name = input()
# if name != 'Parth':
# continue
# print('Hello ' + name + ' What\'s your password')
# password = input()
# if password == 'swordfish':
# break
# print('Access Granted')
# example 5
# for loop
# print('My name is')
# for i in range(5):
# print('Parth five times (' + str(i) + ')')
# total = 0
# for num in range(101):
# total = total + num
# print (total)
# normal for loop Start - Stop
# for i in range(12, 16):
# for loop with Start - Stop - Step
# for i in range(0, 10, 2):
# for loop to count down
# for i in range(5, -1, -2):
# print(i)
# importing modules
# import random
# for i in range(5):
# print(random.randint(1, 10))
# import sys
# while True:
# print('Type exit to exit.')
# response = input()
# if response == 'exit':
# sys.exit()
# print('You typed ' + response + '.')
# functions
# def hello():
# print('Howdy!')
# print('Hwdy!')
# print('Hello There')
# hello()
# hello()
# hello()
# with params
# def hello(name):
# print('Hello ' + name)
# hello('Alice')
# hello('Bob')
# def getAnswer(answerNum):
# if answerNum == 1:
# return 'It is certain'
# elif answerNum == 2:
# return 'It is decidedly so'
# elif answerNum == 3:
# return 'Yes'
# elif answerNum == 4:
# return 'Reply hazy try again'
# elif answerNum == 5:
# return 'Ask again later'
# elif answerNum == 6:
# return 'Concentrate and ask again'
# elif answerNum == 7:
# return 'My reply is no'
# elif answerNum == 8:
# return 'Outlook not so good'
# elif answerNum == 9:
# return 'very doubtful'
# r = random.randint(1, 9)
# print(r)
# fortune = getAnswer(r)
# print(fortune)
# test = None
# print(test)
# Exception handling
# def spam(divideBy):
# try:
# return 42 / divideBy
# except ZeroDivisionError:
# print('Error: Invalid Argument')
# print(spam(2))
# print(spam(12))
# print(spam(0))
# print(spam(1))
# Example: Guess my number game
# secretNum = random.randint(1, 20)
# print('I am thinking of a number between 1 and 20')
# # Ask player to guess 6 times
# for guessTaken in range(1, 7):
# guess = int(input('Take a guess: '))
# if guess < secretNum:
# print('Your guess is too low')
# elif guess > secretNum:
# print('Your guess is too high')
# else:
# break # this codition is correct guess
# if guess == secretNum:
# print('Good job! You guessed my number in ' +
# str(guessTaken) + ' guesses!')
# else:
# print('Nope. the number i was thinking of was ' + str(secretNum))
# Practice 1
# def collatz(number):
# if number % 2 == 0:
# print(number // 2)
# return number // 2
# elif number % 2 == 1:
# result = 3 * number + 1
# print(result)
# return result
# try:
# userInput = int(input("Enter Num: "))
# while (userInput != 1):
# userInput = collatz(int(userInput))
# except ValueError:
# print("Please enter integer")
# list
# catNames = []
# while True:
# print("Enter name of cat " + str(len(catNames) + 1) + '(or nothing to stop.)')
# name = input()
# if name == '':
# break
# catNames = catNames + [name]
# print('The cat names are: ')
# for name in catNames:
# print(' ' + name)
someList = ['cat', 'dog', 'mat']
# for someVal in range(len(someList)):
# print('Index ' + str(someVal) + ' in supplies is: ' + someList[someVal])
# 'in' and 'not in'
# 'howdy' in ['hello', 'hi', 'howdy', 'heyas']
# 'hey' not in ['hello', 'hi', 'howdy', 'heyas']
# myPets = ['jacob', 'john', 'lucifer']
# print("Enter a pet name: ")
# name = input()
# if name not in myPets:
# print('No pet named ' + name)
# else:
# print(name + ' is my pet')
# dog = ['fat', 'black', 'loud']
# size, color, feature = dog
# print(dog)
# Magic 8 Ball with a list
# messages = [['It is certain',
# 'It is decidedly so',
# 'Yes definitely',
# 'Reply hazy try again',
# 'Ask again later',
# 'Concentrate and ask again',
# 'My reply is no',
# 'Outlook not so good',
# 'Very doubtful']]
# print(messages[random.randint(0, len(messages) - 1)])
# passing references
# def eggs(params):
# params.append('Hello')
# spam = [1, 2, 3]
# eggs(spam)
# print(spam)
# spam = [['A', 'B', 'C', 'D'], ['a', 'b', 'c', 'd']]
# cheese = copy.deepcopy(spam)
# # cheese[1] = 42
# print(spam)
# print(cheese)
# spam = ['a', 'b', 'c', 'd']
# print(spam[int(int('3' * 2) / 11)])
# print(spam[:2])
# bacon = [3.14, 'cat', 11, 'cat', True]
# print(bacon.remove('cat'))
# print(bacon)
# list1 = [1, 2, 3]
# list2 = [4, 5, 6]
# list3 = list1 + list1
# list3 = list1 * list1
# print(list3)
# practice of list
# spam = ['apples', 'bananas', 'tofu', 'cat']
# newSpam = [1, 3, 'hello']
# def makeStringFromList(aList):
# for i in aList:
# print(i, end=", ")
# makeStringFromList(newSpam)
# grid = [['.', '.', '.', '.', '.', '.'],
# ['.', 'O', 'O', '.', '.', '.'],
# ['O', 'O', 'O', 'O', '.', '.'],
# ['O', 'O', 'O', 'O', 'O', '.'],
# ['.', 'O', 'O', 'O', 'O', 'O'],
# ['O', 'O', 'O', 'O | random_line_split |
||
profile.pb.go | images" json:"images,omitempty"`
}
func (m *Hotel) Reset() { *m = Hotel{} }
func (m *Hotel) String() string { return proto.CompactTextString(m) }
func (*Hotel) ProtoMessage() {}
func (*Hotel) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *Hotel) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *Hotel) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Hotel) GetPhoneNumber() string {
if m != nil {
return m.PhoneNumber
}
return ""
}
func (m *Hotel) GetDescription() string {
if m != nil {
return m.Description
}
return ""
}
func (m *Hotel) GetAddress() *Address {
if m != nil {
return m.Address
}
return nil
}
func (m *Hotel) GetImages() []*Image {
if m != nil {
return m.Images
}
return nil
}
type Address struct {
StreetNumber string `protobuf:"bytes,1,opt,name=streetNumber" json:"streetNumber,omitempty"`
StreetName string `protobuf:"bytes,2,opt,name=streetName" json:"streetName,omitempty"`
City string `protobuf:"bytes,3,opt,name=city" json:"city,omitempty"`
State string `protobuf:"bytes,4,opt,name=state" json:"state,omitempty"`
Country string `protobuf:"bytes,5,opt,name=country" json:"country,omitempty"`
PostalCode string `protobuf:"bytes,6,opt,name=postalCode" json:"postalCode,omitempty"`
}
func (m *Address) Reset() { *m = Address{} }
func (m *Address) String() string { return proto.CompactTextString(m) }
func (*Address) ProtoMessage() {}
func (*Address) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *Address) GetStreetNumber() string {
if m != nil {
return m.StreetNumber
}
return ""
}
func (m *Address) GetStreetName() string {
if m != nil {
return m.StreetName
}
return ""
}
func (m *Address) GetCity() string {
if m != nil {
return m.City
}
return ""
}
func (m *Address) GetState() string {
if m != nil {
return m.State
}
return ""
}
func (m *Address) GetCountry() string {
if m != nil {
return m.Country
}
return ""
}
func (m *Address) GetPostalCode() string {
if m != nil {
return m.PostalCode
}
return ""
}
type Image struct {
Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"`
Default bool `protobuf:"varint,2,opt,name=default" json:"default,omitempty"`
}
func (m *Image) Reset() { *m = Image{} }
func (m *Image) String() string { return proto.CompactTextString(m) }
func (*Image) ProtoMessage() {}
func (*Image) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *Image) GetUrl() string {
if m != nil {
return m.Url
}
return ""
}
func (m *Image) GetDefault() bool {
if m != nil {
return m.Default
}
return false
}
func init() {
proto.RegisterType((*Request)(nil), "profile.Request")
proto.RegisterType((*Result)(nil), "profile.Result")
proto.RegisterType((*Hotel)(nil), "profile.Hotel")
proto.RegisterType((*Address)(nil), "profile.Address")
proto.RegisterType((*Image)(nil), "profile.Image")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Profile service
type ProfileClient interface {
GetProfiles(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Result, error)
}
type profileClient struct {
cc *grpc.ClientConn
}
func | (cc *grpc.ClientConn) ProfileClient {
return &profileClient{cc}
}
func (c *profileClient) GetProfiles(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Result, error) {
out := new(Result)
err := grpc.Invoke(ctx, "/profile.Profile/GetProfiles", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Profile service
type ProfileServer interface {
GetProfiles(context.Context, *Request) (*Result, error)
}
func RegisterProfileServer(s *grpc.Server, srv ProfileServer) {
s.RegisterService(&_Profile_serviceDesc, srv)
}
func _Profile_GetProfiles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Request)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ProfileServer).GetProfiles(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/profile.Profile/GetProfiles",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ProfileServer).GetProfiles(ctx, req.(*Request))
}
return interceptor(ctx, in, info, handler)
}
var _Profile_serviceDesc = grpc.ServiceDesc{
ServiceName: "profile.Profile",
HandlerType: (*ProfileServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetProfiles",
Handler: _Profile_GetProfiles_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "github.com/micro/examples/booking/srv/profile/proto/profile.proto",
}
func init() {
proto.RegisterFile("github.com/micro/examples/booking/srv/profile/proto/profile.proto", fileDescriptor0)
}
var fileDescriptor0 = []byte{
// 397 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x92, 0xc1, 0x6e, 0xd4, 0x30,
0x10, 0x86, 0x95, 0xdd, 0x6e, 0xd2, 0x9d, 0x45, 0xa5, 0x1a, 0x21, 0x64, 0xf5, 0x80, 0x56, 0x39,
0xa0, 0x15, 0x87, 0x4d, 0xb5, 0x3d, 0x22, 0x0e, 0x15, 0x07, 0xe8, 0x05, 0x21, 0xbf, 0x81, 0x13,
0x4f, 0x77, 0x2d, 0x9c, 0x38, 0xd8, 0x0e, 0xa2, 0x8f, 0xc5, 0x33, 0xf0, 0x62, 0xc8, 0x8e, 0xd3,
0x0d, 0x3d, 0x79, 0xfe, 0x6f, 0xc6, 0x9e, 0xf9, 0xad, 0x81, 0xfb, 0xa3, 0xf2, 0xa7, 0xa1, 0xde,
0x37, 0xa6, 0xad, 0x5a, 0xd5, 0x58, 0x53, 0xd1, 0x6f, 0xd1, 0xf6, 0x9a, 0x5c, 0x55, 0x1b, 0xf3,
0x43, 0x75, 0xc7, 0xca, 0xd9, 0x5f, 0x55, 0x6f, 0xcd, 0xa3, 0xd2, 0x14, 0x4e, 0x6f, 0x26, 0xb5,
0x8f, 0x0a, 0x8b, 0x24, 0xcb, 0x4f, 0x50, 0x70, 0 | NewProfileClient | identifier_name |
profile.pb.go | images" json:"images,omitempty"`
}
func (m *Hotel) Reset() { *m = Hotel{} }
func (m *Hotel) String() string { return proto.CompactTextString(m) }
func (*Hotel) ProtoMessage() {}
func (*Hotel) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *Hotel) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *Hotel) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Hotel) GetPhoneNumber() string {
if m != nil {
return m.PhoneNumber
}
return ""
}
func (m *Hotel) GetDescription() string {
if m != nil {
return m.Description
}
return ""
}
func (m *Hotel) GetAddress() *Address {
if m != nil {
return m.Address
}
return nil
}
func (m *Hotel) GetImages() []*Image {
if m != nil {
return m.Images
}
return nil
}
type Address struct {
StreetNumber string `protobuf:"bytes,1,opt,name=streetNumber" json:"streetNumber,omitempty"`
StreetName string `protobuf:"bytes,2,opt,name=streetName" json:"streetName,omitempty"`
City string `protobuf:"bytes,3,opt,name=city" json:"city,omitempty"`
State string `protobuf:"bytes,4,opt,name=state" json:"state,omitempty"`
Country string `protobuf:"bytes,5,opt,name=country" json:"country,omitempty"`
PostalCode string `protobuf:"bytes,6,opt,name=postalCode" json:"postalCode,omitempty"`
}
func (m *Address) Reset() { *m = Address{} }
func (m *Address) String() string { return proto.CompactTextString(m) }
func (*Address) ProtoMessage() {}
func (*Address) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *Address) GetStreetNumber() string {
if m != nil {
return m.StreetNumber
}
return ""
}
func (m *Address) GetStreetName() string {
if m != nil {
return m.StreetName
}
return ""
}
func (m *Address) GetCity() string {
if m != nil {
return m.City
}
return ""
}
func (m *Address) GetState() string {
if m != nil {
return m.State
}
return ""
}
func (m *Address) GetCountry() string {
if m != nil {
return m.Country
}
return ""
}
func (m *Address) GetPostalCode() string {
if m != nil {
return m.PostalCode
}
return ""
}
type Image struct {
Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"`
Default bool `protobuf:"varint,2,opt,name=default" json:"default,omitempty"`
}
func (m *Image) Reset() { *m = Image{} }
func (m *Image) String() string { return proto.CompactTextString(m) }
func (*Image) ProtoMessage() {}
func (*Image) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *Image) GetUrl() string {
if m != nil {
return m.Url
}
return ""
}
func (m *Image) GetDefault() bool {
if m != nil {
return m.Default
}
return false
}
func init() {
proto.RegisterType((*Request)(nil), "profile.Request")
proto.RegisterType((*Result)(nil), "profile.Result")
proto.RegisterType((*Hotel)(nil), "profile.Hotel")
proto.RegisterType((*Address)(nil), "profile.Address")
proto.RegisterType((*Image)(nil), "profile.Image")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Profile service
type ProfileClient interface {
GetProfiles(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Result, error)
}
| return &profileClient{cc}
}
func (c *profileClient) GetProfiles(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Result, error) {
out := new(Result)
err := grpc.Invoke(ctx, "/profile.Profile/GetProfiles", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Profile service
type ProfileServer interface {
GetProfiles(context.Context, *Request) (*Result, error)
}
func RegisterProfileServer(s *grpc.Server, srv ProfileServer) {
s.RegisterService(&_Profile_serviceDesc, srv)
}
func _Profile_GetProfiles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Request)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ProfileServer).GetProfiles(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/profile.Profile/GetProfiles",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ProfileServer).GetProfiles(ctx, req.(*Request))
}
return interceptor(ctx, in, info, handler)
}
var _Profile_serviceDesc = grpc.ServiceDesc{
ServiceName: "profile.Profile",
HandlerType: (*ProfileServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetProfiles",
Handler: _Profile_GetProfiles_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "github.com/micro/examples/booking/srv/profile/proto/profile.proto",
}
func init() {
proto.RegisterFile("github.com/micro/examples/booking/srv/profile/proto/profile.proto", fileDescriptor0)
}
var fileDescriptor0 = []byte{
// 397 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x92, 0xc1, 0x6e, 0xd4, 0x30,
0x10, 0x86, 0x95, 0xdd, 0x6e, 0xd2, 0x9d, 0x45, 0xa5, 0x1a, 0x21, 0x64, 0xf5, 0x80, 0x56, 0x39,
0xa0, 0x15, 0x87, 0x4d, 0xb5, 0x3d, 0x22, 0x0e, 0x15, 0x07, 0xe8, 0x05, 0x21, 0xbf, 0x81, 0x13,
0x4f, 0x77, 0x2d, 0x9c, 0x38, 0xd8, 0x0e, 0xa2, 0x8f, 0xc5, 0x33, 0xf0, 0x62, 0xc8, 0x8e, 0xd3,
0x0d, 0x3d, 0x79, 0xfe, 0x6f, 0xc6, 0x9e, 0xf9, 0xad, 0x81, 0xfb, 0xa3, 0xf2, 0xa7, 0xa1, 0xde,
0x37, 0xa6, 0xad, 0x5a, 0xd5, 0x58, 0x53, 0xd1, 0x6f, 0xd1, 0xf6, 0x9a, 0x5c, 0x55, 0x1b, 0xf3,
0x43, 0x75, 0xc7, 0xca, 0xd9, 0x5f, 0x55, 0x6f, 0xcd, 0xa3, 0xd2, 0x14, 0x4e, 0x6f, 0x26, 0xb5,
0x8f, 0x0a, 0x8b, 0x24, 0xcb, 0x4f, 0x50, 0x70, 0xfa | type profileClient struct {
cc *grpc.ClientConn
}
func NewProfileClient(cc *grpc.ClientConn) ProfileClient { | random_line_split |
profile.pb.go | images" json:"images,omitempty"`
}
func (m *Hotel) Reset() { *m = Hotel{} }
func (m *Hotel) String() string { return proto.CompactTextString(m) }
func (*Hotel) ProtoMessage() {}
func (*Hotel) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *Hotel) GetId() string {
if m != nil |
return ""
}
func (m *Hotel) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Hotel) GetPhoneNumber() string {
if m != nil {
return m.PhoneNumber
}
return ""
}
func (m *Hotel) GetDescription() string {
if m != nil {
return m.Description
}
return ""
}
func (m *Hotel) GetAddress() *Address {
if m != nil {
return m.Address
}
return nil
}
func (m *Hotel) GetImages() []*Image {
if m != nil {
return m.Images
}
return nil
}
type Address struct {
StreetNumber string `protobuf:"bytes,1,opt,name=streetNumber" json:"streetNumber,omitempty"`
StreetName string `protobuf:"bytes,2,opt,name=streetName" json:"streetName,omitempty"`
City string `protobuf:"bytes,3,opt,name=city" json:"city,omitempty"`
State string `protobuf:"bytes,4,opt,name=state" json:"state,omitempty"`
Country string `protobuf:"bytes,5,opt,name=country" json:"country,omitempty"`
PostalCode string `protobuf:"bytes,6,opt,name=postalCode" json:"postalCode,omitempty"`
}
func (m *Address) Reset() { *m = Address{} }
func (m *Address) String() string { return proto.CompactTextString(m) }
func (*Address) ProtoMessage() {}
func (*Address) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *Address) GetStreetNumber() string {
if m != nil {
return m.StreetNumber
}
return ""
}
func (m *Address) GetStreetName() string {
if m != nil {
return m.StreetName
}
return ""
}
func (m *Address) GetCity() string {
if m != nil {
return m.City
}
return ""
}
func (m *Address) GetState() string {
if m != nil {
return m.State
}
return ""
}
func (m *Address) GetCountry() string {
if m != nil {
return m.Country
}
return ""
}
func (m *Address) GetPostalCode() string {
if m != nil {
return m.PostalCode
}
return ""
}
type Image struct {
Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"`
Default bool `protobuf:"varint,2,opt,name=default" json:"default,omitempty"`
}
func (m *Image) Reset() { *m = Image{} }
func (m *Image) String() string { return proto.CompactTextString(m) }
func (*Image) ProtoMessage() {}
func (*Image) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *Image) GetUrl() string {
if m != nil {
return m.Url
}
return ""
}
func (m *Image) GetDefault() bool {
if m != nil {
return m.Default
}
return false
}
func init() {
proto.RegisterType((*Request)(nil), "profile.Request")
proto.RegisterType((*Result)(nil), "profile.Result")
proto.RegisterType((*Hotel)(nil), "profile.Hotel")
proto.RegisterType((*Address)(nil), "profile.Address")
proto.RegisterType((*Image)(nil), "profile.Image")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Profile service
type ProfileClient interface {
GetProfiles(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Result, error)
}
type profileClient struct {
cc *grpc.ClientConn
}
func NewProfileClient(cc *grpc.ClientConn) ProfileClient {
return &profileClient{cc}
}
func (c *profileClient) GetProfiles(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Result, error) {
out := new(Result)
err := grpc.Invoke(ctx, "/profile.Profile/GetProfiles", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Profile service
type ProfileServer interface {
GetProfiles(context.Context, *Request) (*Result, error)
}
func RegisterProfileServer(s *grpc.Server, srv ProfileServer) {
s.RegisterService(&_Profile_serviceDesc, srv)
}
func _Profile_GetProfiles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Request)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ProfileServer).GetProfiles(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/profile.Profile/GetProfiles",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ProfileServer).GetProfiles(ctx, req.(*Request))
}
return interceptor(ctx, in, info, handler)
}
var _Profile_serviceDesc = grpc.ServiceDesc{
ServiceName: "profile.Profile",
HandlerType: (*ProfileServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetProfiles",
Handler: _Profile_GetProfiles_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "github.com/micro/examples/booking/srv/profile/proto/profile.proto",
}
func init() {
proto.RegisterFile("github.com/micro/examples/booking/srv/profile/proto/profile.proto", fileDescriptor0)
}
var fileDescriptor0 = []byte{
// 397 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x92, 0xc1, 0x6e, 0xd4, 0x30,
0x10, 0x86, 0x95, 0xdd, 0x6e, 0xd2, 0x9d, 0x45, 0xa5, 0x1a, 0x21, 0x64, 0xf5, 0x80, 0x56, 0x39,
0xa0, 0x15, 0x87, 0x4d, 0xb5, 0x3d, 0x22, 0x0e, 0x15, 0x07, 0xe8, 0x05, 0x21, 0xbf, 0x81, 0x13,
0x4f, 0x77, 0x2d, 0x9c, 0x38, 0xd8, 0x0e, 0xa2, 0x8f, 0xc5, 0x33, 0xf0, 0x62, 0xc8, 0x8e, 0xd3,
0x0d, 0x3d, 0x79, 0xfe, 0x6f, 0xc6, 0x9e, 0xf9, 0xad, 0x81, 0xfb, 0xa3, 0xf2, 0xa7, 0xa1, 0xde,
0x37, 0xa6, 0xad, 0x5a, 0xd5, 0x58, 0x53, 0xd1, 0x6f, 0xd1, 0xf6, 0x9a, 0x5c, 0x55, 0x1b, 0xf3,
0x43, 0x75, 0xc7, 0xca, 0xd9, 0x5f, 0x55, 0x6f, 0xcd, 0xa3, 0xd2, 0x14, 0x4e, 0x6f, 0x26, 0xb5,
0x8f, 0x0a, 0x8b, 0x24, 0xcb, 0x4f, 0x50, 0x70, | {
return m.Id
} | conditional_block |
profile.pb.go | images" json:"images,omitempty"`
}
func (m *Hotel) Reset() { *m = Hotel{} }
func (m *Hotel) String() string { return proto.CompactTextString(m) }
func (*Hotel) ProtoMessage() {}
func (*Hotel) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *Hotel) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *Hotel) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Hotel) GetPhoneNumber() string {
if m != nil {
return m.PhoneNumber
}
return ""
}
func (m *Hotel) GetDescription() string {
if m != nil {
return m.Description
}
return ""
}
func (m *Hotel) GetAddress() *Address {
if m != nil {
return m.Address
}
return nil
}
func (m *Hotel) GetImages() []*Image {
if m != nil {
return m.Images
}
return nil
}
type Address struct {
StreetNumber string `protobuf:"bytes,1,opt,name=streetNumber" json:"streetNumber,omitempty"`
StreetName string `protobuf:"bytes,2,opt,name=streetName" json:"streetName,omitempty"`
City string `protobuf:"bytes,3,opt,name=city" json:"city,omitempty"`
State string `protobuf:"bytes,4,opt,name=state" json:"state,omitempty"`
Country string `protobuf:"bytes,5,opt,name=country" json:"country,omitempty"`
PostalCode string `protobuf:"bytes,6,opt,name=postalCode" json:"postalCode,omitempty"`
}
func (m *Address) Reset() { *m = Address{} }
func (m *Address) String() string { return proto.CompactTextString(m) }
func (*Address) ProtoMessage() {}
func (*Address) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *Address) GetStreetNumber() string {
if m != nil {
return m.StreetNumber
}
return ""
}
func (m *Address) GetStreetName() string {
if m != nil {
return m.StreetName
}
return ""
}
func (m *Address) GetCity() string |
func (m *Address) GetState() string {
if m != nil {
return m.State
}
return ""
}
func (m *Address) GetCountry() string {
if m != nil {
return m.Country
}
return ""
}
func (m *Address) GetPostalCode() string {
if m != nil {
return m.PostalCode
}
return ""
}
type Image struct {
Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"`
Default bool `protobuf:"varint,2,opt,name=default" json:"default,omitempty"`
}
func (m *Image) Reset() { *m = Image{} }
func (m *Image) String() string { return proto.CompactTextString(m) }
func (*Image) ProtoMessage() {}
func (*Image) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *Image) GetUrl() string {
if m != nil {
return m.Url
}
return ""
}
func (m *Image) GetDefault() bool {
if m != nil {
return m.Default
}
return false
}
func init() {
proto.RegisterType((*Request)(nil), "profile.Request")
proto.RegisterType((*Result)(nil), "profile.Result")
proto.RegisterType((*Hotel)(nil), "profile.Hotel")
proto.RegisterType((*Address)(nil), "profile.Address")
proto.RegisterType((*Image)(nil), "profile.Image")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Profile service
type ProfileClient interface {
GetProfiles(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Result, error)
}
type profileClient struct {
cc *grpc.ClientConn
}
func NewProfileClient(cc *grpc.ClientConn) ProfileClient {
return &profileClient{cc}
}
func (c *profileClient) GetProfiles(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Result, error) {
out := new(Result)
err := grpc.Invoke(ctx, "/profile.Profile/GetProfiles", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Profile service
type ProfileServer interface {
GetProfiles(context.Context, *Request) (*Result, error)
}
func RegisterProfileServer(s *grpc.Server, srv ProfileServer) {
s.RegisterService(&_Profile_serviceDesc, srv)
}
func _Profile_GetProfiles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Request)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ProfileServer).GetProfiles(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/profile.Profile/GetProfiles",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ProfileServer).GetProfiles(ctx, req.(*Request))
}
return interceptor(ctx, in, info, handler)
}
var _Profile_serviceDesc = grpc.ServiceDesc{
ServiceName: "profile.Profile",
HandlerType: (*ProfileServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetProfiles",
Handler: _Profile_GetProfiles_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "github.com/micro/examples/booking/srv/profile/proto/profile.proto",
}
func init() {
proto.RegisterFile("github.com/micro/examples/booking/srv/profile/proto/profile.proto", fileDescriptor0)
}
var fileDescriptor0 = []byte{
// 397 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x92, 0xc1, 0x6e, 0xd4, 0x30,
0x10, 0x86, 0x95, 0xdd, 0x6e, 0xd2, 0x9d, 0x45, 0xa5, 0x1a, 0x21, 0x64, 0xf5, 0x80, 0x56, 0x39,
0xa0, 0x15, 0x87, 0x4d, 0xb5, 0x3d, 0x22, 0x0e, 0x15, 0x07, 0xe8, 0x05, 0x21, 0xbf, 0x81, 0x13,
0x4f, 0x77, 0x2d, 0x9c, 0x38, 0xd8, 0x0e, 0xa2, 0x8f, 0xc5, 0x33, 0xf0, 0x62, 0xc8, 0x8e, 0xd3,
0x0d, 0x3d, 0x79, 0xfe, 0x6f, 0xc6, 0x9e, 0xf9, 0xad, 0x81, 0xfb, 0xa3, 0xf2, 0xa7, 0xa1, 0xde,
0x37, 0xa6, 0xad, 0x5a, 0xd5, 0x58, 0x53, 0xd1, 0x6f, 0xd1, 0xf6, 0x9a, 0x5c, 0x55, 0x1b, 0xf3,
0x43, 0x75, 0xc7, 0xca, 0xd9, 0x5f, 0x55, 0x6f, 0xcd, 0xa3, 0xd2, 0x14, 0x4e, 0x6f, 0x26, 0xb5,
0x8f, 0x0a, 0x8b, 0x24, 0xcb, 0x4f, 0x50, 0x70, | {
if m != nil {
return m.City
}
return ""
} | identifier_body |
default.py | True
"""
publicaciones = LOAD(r=request,c='default',f='publicaciones.load',args=request.args,ajax=True,content='Cargando bloques...')
return dict(publicaciones=publicaciones)
#return dict()
#@cache(request.env.path_info, time_expire=150, cache_model=cache.disk)
def publicaciones():
i | .ajax: return ''
from gluon.tools import prettydate
from datetime import datetime
if request.args:
catslug_data = db(db.categoria.slug == request.args(0)).select(db.categoria.slug)
for cat in catslug_data:
catslug = cat.slug
else:
catslug = 'noticias'
publicaciones = DIV()
# obteniendo los feeds categorizados bajo el slug solicitado desde la url
### 1 categoría por feed
"""
for feedincat in db((db.categoria.slug == catslug) & (db.feed.categoria == db.categoria.id)
#& (db.feed_categoria.feed == db.feed.id)
#& (db.feed_categoria.is_active == True)
& (db.feed.is_active == True)
& (db.categoria.is_active == True)
).select(db.feed.ALL):
"""
feedincat_data = db((db.categoria.slug == catslug)
& (db.feed.categoria == db.categoria.id)
& (db.feed.is_active == True)
& (db.categoria.is_active == True)
).select(db.feed.id,db.feed.title,db.feed.source)
for feedincat in feedincat_data:
# armando feed_bloque y la noticia de cada feed
feedbox = DIV(DIV(A(feedincat.title,_href=feedincat.source,_target='_blank',_class='ui-widget-header-a'), _class = 'feed_titulo ui-widget-header ui-corner-all'), _class = 'feedbox feed_bloque izq ui-widget ui-corner-all')
for n in db(db.noticia.feed == feedincat.id).select(db.noticia.ALL, orderby =~ db.noticia.id, limitby=(0,4)):
try:
actualizado = datetime.strptime(str(n.updated),'%Y-%m-%d %H:%M:%S')
except:
actualizado = n.created_on
# armando la url que va en el rss
#localurl = 'http://' + request.env.http_host + URL(c = 'default', f = 'blog.html', args = [n.slug,n.id], extension='html')
# armando el título y enlace a la publicación; armando los bloques de publicación
feedbox.append(DIV(DIV(A(n.title.lower()+'...', _name = n.slug,
_href = URL(r = request, f = 'blog', args = [catslug,n.slug,n.id], extension=False),
_class = 'noticia_link ui-widget-content-a', _target='_blank',extension='html'),
DIV(prettydate(actualizado,T),
_class='noticia_meta'),
_class = 'noticia_contenido ui-widget-content ui-corner-all'),
_class = 'noticia ui-widget ui-corner-all')
)
#entradas.append(dict(title =unicode(n.title,'utf8'), link = localurl, description = unicode('%s (%s)' % (n.description, n.feed.title),'utf8'), created_on = request.now))
publicaciones.append(feedbox)
response.js = XML('''function filtro(){
jQuery("#filtrando").keyup(function () {
var filter = jQuery(this).val(), count = 0;
jQuery(".feedbox .noticia, .feed_titulo").each(function () {
if (jQuery(this).text().search(new RegExp(filter, "i")) < 0) {
jQuery(this).addClass("hidden");
} else {
jQuery(this).removeClass("hidden");
count++;
}
});
jQuery("#filtrado").text(count);
});
}
jQuery(document).ready(filtro);
''')
d = dict(publicaciones=publicaciones)
return response.render(d)
#return dict(publicaciones=publicaciones)
def elimina_tildes(s):
"""
Esta función sirve para eliminar las tildes del string que
se le pase como parámetro.
"""
import unicodedata
normalizado = ''.join((c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn'))
return str(normalizado)
#@cache(request.env.path_info, time_expire=1200, cache_model=cache.disk)
def blog():
if request.extension!='html':
request.extension = 'html'
if not request.args:
redirect(URL('default','index.html'))
response.files.append(URL('static','css/blog.css'))
#response.files.append(URL('static','js/jquery.iframe.js'))
catslug = request.args(0)
slugnoticia = request.args(1) #para mostrar la noticia en la url; SEO
nid = request.args(2)
#nid = int(request.args[len(request.args)-1])
#titulo = db.noticia[nid].title
#print(type(nid))
titulo = slugnoticia.replace('-',' ')
categoria = catslug
response.title='%s: %s' % (categoria.capitalize(),titulo.capitalize())
#response.meta.description = '%s %s' % (response.title,db.noticia[nid].feed.title)
if db.noticia(nid):
shorturl = db.noticia(nid).shorturl
else:
shorturl = 'http://lmddgtfy.net/?q=%s, %s' % (request.args(0).title().replace('-',' '),request.args(1).title().replace('-',' '))
if 'http://lmddgtfy' in shorturl:
response.flash = 'El enlace se ha perdido. Te dirigiré a una búsqueda PRIVADA usando DuckDuckGo.com. Disculpa las molestias.'
if request.env.http_referer!=None:
goback = A(SPAN(_class = 'icon leftarrow'), 'Regresar', _title='Volver a la página anterior', _class = 'pill button izq',
_href = request.env.http_referer)
else:
goback = A(SPAN(_class = 'icon home'), 'Blogchile.cl', _class = 'positive primary button izq',
_href = 'http://blogchile.cl/')
cerrarmarco = A(SPAN(_class = 'icon rightarrow'), 'Ir al Blog', _class = 'pill negative button der', _href = shorturl, _title='Cerrar este marco y visitar el artículo en el blog de su fuente original')
referer = goback
#referer = DIV(goback, class='izq')
#go = DIV(IFRAME(_src = shorturl, _style = 'height:90%;width:inherit;border:0;'), _id = 'godiv', _style = 'display:block;height:100%;width:100%;')
blog = IFRAME(_src = shorturl, _id='blogiframe', _style='width:inherit;border:0;')
d = dict(blog=blog,shorturl=shorturl,referer=referer,cerrarmarco=cerrarmarco)
return response.render(d)
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
return dict(form = auth())
def sitemap():
del response.headers['Cache-Control']
del response.headers['Pragma']
del response.headers['Expires']
response.headers['Cache-Control'] = 'max-age=300'
if request.extension == 'xml':
sm = [str('<?xml version="1.0" encoding="UTF-8" ?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">')]
prefix = request.env.wsgi_url_scheme+'://'+request.env.http_host
for cat in db((db.categoria.id>0) & (db.categoria.is_active == True)).select(db.categoria.id,db.categoria.title,db.categoria.slug):
sm.append(str(TAG.url(
TAG.loc(prefix,URL(r=request,c='default',f='index.html',args=[cat.slug])),
TAG.changefreq('always')
)))
sm.append(str(TAG.url(
TAG.loc(prefix,URL(r=request,c='default',f='feed.rss',args=[cat.slug])),
TAG.changefreq('always')
)))
sm.append('</urlset>')
return sm
elif request.extension == 'html':
#response.view = 'plantilla.html'
sm = DIV(_id='sitemap')
for cat in db((db.categoria.id>0) & (db.categoria.is_active==True)).select(db.categoria.id,db.categoria.title,db.categoria.slug):
| f not request | identifier_name |
default.py | jQuery("#filtrado").text(count);
});
}
jQuery(document).ready(filtro);
''')
d = dict(publicaciones=publicaciones)
return response.render(d)
#return dict(publicaciones=publicaciones)
def elimina_tildes(s):
"""
Esta función sirve para eliminar las tildes del string que
se le pase como parámetro.
"""
import unicodedata
normalizado = ''.join((c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn'))
return str(normalizado)
#@cache(request.env.path_info, time_expire=1200, cache_model=cache.disk)
def blog():
if request.extension!='html':
request.extension = 'html'
if not request.args:
redirect(URL('default','index.html'))
response.files.append(URL('static','css/blog.css'))
#response.files.append(URL('static','js/jquery.iframe.js'))
catslug = request.args(0)
slugnoticia = request.args(1) #para mostrar la noticia en la url; SEO
nid = request.args(2)
#nid = int(request.args[len(request.args)-1])
#titulo = db.noticia[nid].title
#print(type(nid))
titulo = slugnoticia.replace('-',' ')
categoria = catslug
response.title='%s: %s' % (categoria.capitalize(),titulo.capitalize())
#response.meta.description = '%s %s' % (response.title,db.noticia[nid].feed.title)
if db.noticia(nid):
shorturl = db.noticia(nid).shorturl
else:
shorturl = 'http://lmddgtfy.net/?q=%s, %s' % (request.args(0).title().replace('-',' '),request.args(1).title().replace('-',' '))
if 'http://lmddgtfy' in shorturl:
response.flash = 'El enlace se ha perdido. Te dirigiré a una búsqueda PRIVADA usando DuckDuckGo.com. Disculpa las molestias.'
if request.env.http_referer!=None:
goback = A(SPAN(_class = 'icon leftarrow'), 'Regresar', _title='Volver a la página anterior', _class = 'pill button izq',
_href = request.env.http_referer)
else:
goback = A(SPAN(_class = 'icon home'), 'Blogchile.cl', _class = 'positive primary button izq',
_href = 'http://blogchile.cl/')
cerrarmarco = A(SPAN(_class = 'icon rightarrow'), 'Ir al Blog', _class = 'pill negative button der', _href = shorturl, _title='Cerrar este marco y visitar el artículo en el blog de su fuente original')
referer = goback
#referer = DIV(goback, class='izq')
#go = DIV(IFRAME(_src = shorturl, _style = 'height:90%;width:inherit;border:0;'), _id = 'godiv', _style = 'display:block;height:100%;width:100%;')
blog = IFRAME(_src = shorturl, _id='blogiframe', _style='width:inherit;border:0;')
d = dict(blog=blog,shorturl=shorturl,referer=referer,cerrarmarco=cerrarmarco)
return response.render(d)
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
return dict(form = auth())
def sitemap():
del response.headers['Cache-Control']
del response.headers['Pragma']
del response.headers['Expires']
response.headers['Cache-Control'] = 'max-age=300'
if request.extension == 'xml':
sm = [str('<?xml version="1.0" encoding="UTF-8" ?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">')]
prefix = request.env.wsgi_url_scheme+'://'+request.env.http_host
for cat in db((db.categoria.id>0) & (db.categoria.is_active == True)).select(db.categoria.id,db.categoria.title,db.categoria.slug):
sm.append(str(TAG.url(
TAG.loc(prefix,URL(r=request,c='default',f='index.html',args=[cat.slug])),
TAG.changefreq('always')
)))
sm.append(str(TAG.url(
TAG.loc(prefix,URL(r=request,c='default',f='feed.rss',args=[cat.slug])),
TAG.changefreq('always')
)))
sm.append('</urlset>')
return sm
elif request.extension == 'html':
#response.view = 'plantilla.html'
sm = DIV(_id='sitemap')
for cat in db((db.categoria.id>0) & (db.categoria.is_active==True)).select(db.categoria.id,db.categoria.title,db.categoria.slug):
categorias = DIV(H2(A(cat.title.capitalize(),_href=URL(r=request,c='default',f='index.html',args=[cat.slug]))))
noticias = UL()
data = db((db.feed.categoria == cat.id)& (db.noticia.feed == db.feed.id)).select(db.noticia.id, db.noticia.title, db.noticia.slug, distinct=True, orderby=~db.noticia.id, limitby=(0,4))
for noti in data:
noticias.append(LI(A(noti.title, _href=URL(c='default',f='blog',args=[noti.slug,noti.id]))))
categorias.append(noticias)
sm.append(categorias)
return dict(sm=sm)
def sitemapindex():
sm = [str('<?xml version="1.0" encoding="UTF-8" ?> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">')]
prefix = request.env.wsgi_url_scheme+'://'+request.env.http_host
for i in xrange(1,5):
sm.append(str(TAG.sitemap(
TAG.loc(prefix,URL(c='default',f='sitemap%s.xml' % i))
)))
sm.append('</sitemapindex>')
return sm
def sitemap1():
sm = [str('<?xml version="1.0" encoding="UTF-8" ?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">')]
prefix = request.env.wsgi_url_scheme+'://'+request.env.http_host
data = db(db.noticia.id>0).select(db.noticia.id, db.noticia.created_on, db.noticia.title, db.noticia.slug, orderby=~db.noticia.id, limitby=(0,200))
for noti in data:
sm.append(str(TAG.url(
TAG.loc(prefix,URL(c='default',f='blog',args=[noti.slug,noti.id],extension='')),
TAG.lastmod(noti.created_on.date()),
TAG.changefreq('always')
)))
sm.append('</urlset>')
return sm
def sitemap2():
sm = [str('<?xml version="1.0" encoding="UTF-8" ?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">')]
prefix = request.env.wsgi_url_scheme+'://'+request.env.http_host
data = db(db.noticia.id>0).select(db.noticia.id, db.noticia.created_on, db.noticia.title, db.noticia.slug, distinct=True, orderby=~db.noticia.id, limitby=(200,400))
for noti in data:
sm.append(str(TAG.url(
TAG.loc(prefix,URL(c='default',f='blog',args=[noti.slug,noti.id],extension='')),
TAG.lastmod(noti.created_on.date()),
TAG.changefreq('always')
)))
sm.append('</urlset>')
return sm
def sitemap3():
sm = [str('<?xml version="1.0" encoding="UTF-8" ?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">')]
prefix = request.env.wsgi_url_scheme+'://'+request.env.http_host
data = db(db.noticia.id>0).select(db.noticia.id, db.noticia.created_on, db.noticia.title, db.noticia.slug, distinct=True, orderby=~db.noticia.id, limitby=(400,600))
for noti in data:
sm.append(str(TAG.url(
| TAG.loc(prefix,URL(c='default',f='blog',args=[noti.slug,noti.id],extension='')),
TAG.lastmod(noti.created_on.date()),
TAG.changefreq('always')
)))
sm.append('</urlset>')
| conditional_block |
|
default.py | else:
cat = request.args(0)
return redirect('http://feeds.feedburner.com/blogchile%s' % cat)
# verificamos si pasó por c=default f=mobile y activó el bit de sesión
if session.mobile:
response.view = 'default/index.mobi'
response.files.append(URL('static','css/blogchilemobile.css'))
else:
''' si no hay bit de sesión mobile, establece el caché del browserl
esto es por que sino el caché impediría cambiar al modo mobile (bug de flojo)'''
response.files.append(URL('static','js/jquery.cycle.all.min.js'))
if request.args(0):
catslug = request.args(0)
response.title = 'Blog Chile: %s' % catslug.capitalize().replace('-',' ')
response.meta.keywords = catslug.replace('-',' ')
response.meta.description = "Blog de %s en Chile, Blogósfera Chilena, Blogs Chilenos," % catslug.capitalize().replace('-',' ')
if catslug in ['medio-ambiente','animales']:
return redirect(URL(r=request,f='index',args='naturaleza'),301)
else:
response.title = 'Blog Chile: Portada'
response.meta.description = 'Blogs de Chile: Últimas publicaciones de noticias, tecnología, opinión, deporte, diseño, ocio, música, política, arte y más en la blogósfera chilena'
response.meta.keywords = 'blogs chile, turismo chile, blogs chilenos'
#if request.extension == 'rss':
# return redirect('http://feeds.feedburner.com/blogosfera/dDKt')
try:
# muestra un response.flash con la descripción de cada categoría, si es que la hay (en db.feed)
if request.args:
descrip = db(db.categoria.slug == request.args(0)).select(db.categoria.description)[0].description
if descrip != None:
response.flash = descrip
except:
pass
# aviso temporal de WIP. chk según sessión de conexión en el sitio
"""
if session.avisado == False:
response.flash = XML('El Sitio está temporalmente bajo algunos ajustes extraordinarios; disculpa si te ocasionan alguna molestia: %s ' % session.avisado)
session.avisado = True
"""
publicaciones = LOAD(r=request,c='default',f='publicaciones.load',args=request.args,ajax=True,content='Cargando bloques...')
return dict(publicaciones=publicaciones)
#return dict()
#@cache(request.env.path_info, time_expire=150, cache_model=cache.disk)
def publicaciones():
if not request.ajax: return ''
from gluon.tools import prettydate
from datetime import datetime
if request.args:
catslug_data = db(db.categoria.slug == request.args(0)).select(db.categoria.slug)
for cat in catslug_data:
catslug = cat.slug
else:
catslug = 'noticias'
publicaciones = DIV()
# obteniendo los feeds categorizados bajo el slug solicitado desde la url
### 1 categoría por feed
"""
for feedincat in db((db.categoria.slug == catslug) & (db.feed.categoria == db.categoria.id)
#& (db.feed_categoria.feed == db.feed.id)
#& (db.feed_categoria.is_active == True)
& (db.feed.is_active == True)
& (db.categoria.is_active == True)
).select(db.feed.ALL):
"""
feedincat_data = db((db.categoria.slug == catslug)
& (db.feed.categoria == db.categoria.id)
& (db.feed.is_active == True)
& (db.categoria.is_active == True)
).select(db.feed.id,db.feed.title,db.feed.source)
for feedincat in feedincat_data:
# armando feed_bloque y la noticia de cada feed
feedbox = DIV(DIV(A(feedincat.title,_href=feedincat.source,_target='_blank',_class='ui-widget-header-a'), _class = 'feed_titulo ui-widget-header ui-corner-all'), _class = 'feedbox feed_bloque izq ui-widget ui-corner-all')
for n in db(db.noticia.feed == feedincat.id).select(db.noticia.ALL, orderby =~ db.noticia.id, limitby=(0,4)):
try:
actualizado = datetime.strptime(str(n.updated),'%Y-%m-%d %H:%M:%S')
except:
actualizado = n.created_on
# armando la url que va en el rss
#localurl = 'http://' + request.env.http_host + URL(c = 'default', f = 'blog.html', args = [n.slug,n.id], extension='html')
# armando el título y enlace a la publicación; armando los bloques de publicación
feedbox.append(DIV(DIV(A(n.title.lower()+'...', _name = n.slug,
_href = URL(r = request, f = 'blog', args = [catslug,n.slug,n.id], extension=False),
_class = 'noticia_link ui-widget-content-a', _target='_blank',extension='html'),
DIV(prettydate(actualizado,T),
_class='noticia_meta'),
_class = 'noticia_contenido ui-widget-content ui-corner-all'),
_class = 'noticia ui-widget ui-corner-all')
)
#entradas.append(dict(title =unicode(n.title,'utf8'), link = localurl, description = unicode('%s (%s)' % (n.description, n.feed.title),'utf8'), created_on = request.now))
publicaciones.append(feedbox)
response.js = XML('''function filtro(){
jQuery("#filtrando").keyup(function () {
var filter = jQuery(this).val(), count = 0;
jQuery(".feedbox .noticia, .feed_titulo").each(function () {
if (jQuery(this).text().search(new RegExp(filter, "i")) < 0) {
jQuery(this).addClass("hidden");
} else {
jQuery(this).removeClass("hidden");
count++;
}
});
jQuery("#filtrado").text(count);
});
}
jQuery(document).ready(filtro);
''')
d = dict(publicaciones=publicaciones)
return response.render(d)
#return dict(publicaciones=publicaciones)
def elimina_tildes(s):
"""
Esta función sirve para eliminar las tildes del string que
se le pase como parámetro.
"""
import unicodedata
normalizado = ''.join((c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn'))
return str(normalizado)
#@cache(request.env.path_info, time_expire=1200, cache_model=cache.disk)
def blog():
if request.extension!='html':
request.extension = 'html'
if not request.args:
redirect(URL('default','index.html'))
response.files.append(URL('static','css/blog.css'))
#response.files.append(URL('static','js/jquery.iframe.js'))
catslug = request.args(0)
slugnoticia = request.args(1) #para mostrar la noticia en la url; SEO
nid = request.args(2)
#nid = int(request.args[len(request.args)-1])
#titulo = db.noticia[nid].title
#print(type(nid))
titulo = slugnoticia.replace('-',' ')
categoria = catslug
response.title='%s: %s' % (categoria.capitalize(),titulo.capitalize())
#response.meta.description = '%s %s' % (response.title,db.noticia[nid].feed.title)
if db.noticia(nid):
shorturl = db.noticia(nid).shorturl
else:
shorturl = 'http://lmddgtfy.net/?q=%s, %s' % (request.args(0).title().replace('-',' '),request.args(1).title().replace('-',' '))
if 'http://lmddgtfy' in shorturl:
response.flash = 'El enlace se ha perdido. Te dirigiré a una búsqueda PRIVADA usando DuckDuckGo.com. Disculpa las molestias.'
if request.env.http_referer!=None:
goback = A(SPAN(_class = 'icon leftarrow'), 'Regresar', _title='Volver a la página anterior', _class = 'pill button izq',
_href = request.env.http_referer)
else:
goback = A(SPAN(_class = 'icon home'), 'Blogchile.cl', _class = 'positive primary button izq',
_href = 'http://blogchile.cl/')
cerrarmarco = A(SPAN(_class = 'icon rightarrow'), 'Ir al Blog', _class = 'pill negative button der', _href = shorturl, _title='Cerrar este marco y visitar el artículo en el blog de su fuente original')
referer = goback
#referer = DIV(goback, class='izq')
#go = DIV(IFRAME(_src = shorturl, _style = 'height:90%;width:inherit;border:0;'), _id | cat = '' | random_line_split |
|
default.py | True
"""
publicaciones = LOAD(r=request,c='default',f='publicaciones.load',args=request.args,ajax=True,content='Cargando bloques...')
return dict(publicaciones=publicaciones)
#return dict()
#@cache(request.env.path_info, time_expire=150, cache_model=cache.disk)
def publicaciones():
if not request.ajax: return ''
from gluon.tools import prettydate
from datetime import datetime
if request.args:
catslug_data = db(db.categoria.slug == request.args(0)).select(db.categoria.slug)
for cat in catslug_data:
catslug = cat.slug
else:
catslug = 'noticias'
publicaciones = DIV()
# obteniendo los feeds categorizados bajo el slug solicitado desde la url
### 1 categoría por feed
"""
for feedincat in db((db.categoria.slug == catslug) & (db.feed.categoria == db.categoria.id)
#& (db.feed_categoria.feed == db.feed.id)
#& (db.feed_categoria.is_active == True)
& (db.feed.is_active == True)
& (db.categoria.is_active == True)
).select(db.feed.ALL):
"""
feedincat_data = db((db.categoria.slug == catslug)
& (db.feed.categoria == db.categoria.id)
& (db.feed.is_active == True)
& (db.categoria.is_active == True)
).select(db.feed.id,db.feed.title,db.feed.source)
for feedincat in feedincat_data:
# armando feed_bloque y la noticia de cada feed
feedbox = DIV(DIV(A(feedincat.title,_href=feedincat.source,_target='_blank',_class='ui-widget-header-a'), _class = 'feed_titulo ui-widget-header ui-corner-all'), _class = 'feedbox feed_bloque izq ui-widget ui-corner-all')
for n in db(db.noticia.feed == feedincat.id).select(db.noticia.ALL, orderby =~ db.noticia.id, limitby=(0,4)):
try:
actualizado = datetime.strptime(str(n.updated),'%Y-%m-%d %H:%M:%S')
except:
actualizado = n.created_on
# armando la url que va en el rss
#localurl = 'http://' + request.env.http_host + URL(c = 'default', f = 'blog.html', args = [n.slug,n.id], extension='html')
# armando el título y enlace a la publicación; armando los bloques de publicación
feedbox.append(DIV(DIV(A(n.title.lower()+'...', _name = n.slug,
_href = URL(r = request, f = 'blog', args = [catslug,n.slug,n.id], extension=False),
_class = 'noticia_link ui-widget-content-a', _target='_blank',extension='html'),
DIV(prettydate(actualizado,T),
_class='noticia_meta'),
_class = 'noticia_contenido ui-widget-content ui-corner-all'),
_class = 'noticia ui-widget ui-corner-all')
)
#entradas.append(dict(title =unicode(n.title,'utf8'), link = localurl, description = unicode('%s (%s)' % (n.description, n.feed.title),'utf8'), created_on = request.now))
publicaciones.append(feedbox)
response.js = XML('''function filtro(){
jQuery("#filtrando").keyup(function () {
var filter = jQuery(this).val(), count = 0;
jQuery(".feedbox .noticia, .feed_titulo").each(function () {
if (jQuery(this).text().search(new RegExp(filter, "i")) < 0) {
jQuery(this).addClass("hidden");
} else {
jQuery(this).removeClass("hidden");
count++;
}
});
jQuery("#filtrado").text(count);
});
}
jQuery(document).ready(filtro);
''')
d = dict(publicaciones=publicaciones)
return response.render(d)
#return dict(publicaciones=publicaciones)
def elimina_tildes(s):
"""
Esta función sirve para eliminar las tildes del string que
se le pase como parámetro.
"""
import unicodedata
normalizado = ''.join((c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn'))
return str(normalizado)
#@cache(request.env.path_info, time_expire=1200, cache_model=cache.disk)
def blog():
if request.extension!='html':
request.extension = 'html'
if not request.args:
redirect(URL('default','index.html'))
response.files.append(URL('static','css/blog.css'))
#response.files.append(URL('static','js/jquery.iframe.js'))
catslug = request.args(0)
slugnoticia = request.args(1) #para mostrar la noticia en la url; SEO
nid = request.args(2)
#nid = int(request.args[len(request.args)-1])
#titulo = db.noticia[nid].title
#print(type(nid))
titulo = slugnoticia.replace('-',' ')
categoria = catslug
response.title='%s: %s' % (categoria.capitalize(),titulo.capitalize())
#response.meta.description = '%s %s' % (response.title,db.noticia[nid].feed.title)
if db.noticia(nid):
shorturl = db.noticia(nid).shorturl
else:
shorturl = 'http://lmddgtfy.net/?q=%s, %s' % (request.args(0).title().replace('-',' '),request.args(1).title().replace('-',' '))
if 'http://lmddgtfy' in shorturl:
response.flash = 'El enlace se ha perdido. Te dirigiré a una búsqueda PRIVADA usando DuckDuckGo.com. Disculpa las molestias.'
if request.env.http_referer!=None:
goback = A(SPAN(_class = 'icon leftarrow'), 'Regresar', _title='Volver a la página anterior', _class = 'pill button izq',
_href = request.env.http_referer)
else:
goback = A(SPAN(_class = 'icon home'), 'Blogchile.cl', _class = 'positive primary button izq',
_href = 'http://blogchile.cl/')
cerrarmarco = A(SPAN(_class = 'icon rightarrow'), 'Ir al Blog', _class = 'pill negative button der', _href = shorturl, _title='Cerrar este marco y visitar el artículo en el blog de su fuente original')
referer = goback
#referer = DIV(goback, class='izq')
#go = DIV(IFRAME(_src = shorturl, _style = 'height:90%;width:inherit;border:0;'), _id = 'godiv', _style = 'display:block;height:100%;width:100%;')
blog = IFRAME(_src = shorturl, _id='blogiframe', _style='width:inherit;border:0;')
d = dict(blog=blog,shorturl=shorturl,referer=referer,cerrarmarco=cerrarmarco)
return response.render(d)
def user():
"""
exposes:
http://.... | e.headers['Cache-Control']
del response.headers['Pragma']
del response.headers['Expires']
response.headers['Cache-Control'] = 'max-age=300'
if request.extension == 'xml':
sm = [str('<?xml version="1.0" encoding="UTF-8" ?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">')]
prefix = request.env.wsgi_url_scheme+'://'+request.env.http_host
for cat in db((db.categoria.id>0) & (db.categoria.is_active == True)).select(db.categoria.id,db.categoria.title,db.categoria.slug):
sm.append(str(TAG.url(
TAG.loc(prefix,URL(r=request,c='default',f='index.html',args=[cat.slug])),
TAG.changefreq('always')
)))
sm.append(str(TAG.url(
TAG.loc(prefix,URL(r=request,c='default',f='feed.rss',args=[cat.slug])),
TAG.changefreq('always')
)))
sm.append('</urlset>')
return sm
elif request.extension == 'html':
#response.view = 'plantilla.html'
sm = DIV(_id='sitemap')
for cat in db((db.categoria.id>0) & (db.categoria.is_active==True)).select(db.categoria.id,db.categoria.title,db.categoria.slug):
| /[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
return dict(form = auth())
def sitemap():
del respons | identifier_body |
fixed.rs | () -> Vec<Route> {
routes![
get_index,
resume::get,
links::get,
contacts::get,
projects::get,
projects::project::get,
]
}
/// Functions generating my home page.
pub mod htmlgen {
use maud::{html, Markup, Render};
use page_client::{data, partials};
/// Create a basic menu.
pub fn menu() -> Option<data::Menu<'static>> {
Some(data::Menu(&[data::MenuItem {
text: "Blog",
link: Some("/blog"),
children: None,
}]))
}
/// Returns a list of links as [`Markup`].
fn link_group() -> Markup {
let links = vec![
data::LogoLink {
url: "https://github.com/AlterionX/",
logo: "public/img/icon/github.png",
alt_text: "Github",
text: "AlterionX",
},
data::LogoLink {
url: "mailto:[email protected]",
logo: "public/img/icon/email.svg",
alt_text: "Email",
text: "[email protected]",
},
data::LogoLink {
url: "public/resume/resume.pdf",
logo: "public/img/icon/resume.svg",
alt_text: "Resume",
text: "Resume",
},
];
html! {
.link-group {
@for link in links.iter() {
(link)
}
}
}
}
/// Returns the slide show as [`Markup`].
fn slides() -> Markup {
html! {
.slides {
(my_intro())
(my_story())
(my_work())
(my_interests())
(my_passion())
(my_reading_time())
(my_gaming_time())
}
.slide-attachments {
img#slide-prev.slide-attachment src="public/img/left-simple-arrow.svg";
.slide-markers.slide-attachment {
(slide_markers(7))
}
img#slide-next.slide-attachment src="public/img/right-simple-arrow.svg";
}
}
}
/// Returns a slide as [`Markup`].
fn slide<T: Render, U: Render>(title: T, text: U, cls: Option<&str>) -> Markup {
html! {
div class={ "slide" @if let Some(cls) = cls { " " (cls) } } {
h2.slide-heading { (title) }
.slide-text { (text) }
}
}
}
/// Returns the slide_markers as [`Markup`].
fn slide_markers(slide_cnt: u8) -> Markup {
html! {
@for i in 0..slide_cnt {
(slide_marker(i))
}
}
}
/// Returns the slide_marker as [`Markup`].
fn slide_marker(idx: u8) -> Markup {
html! {
div id = { "slide-marker-"(idx) } class={"slide-marker" @if idx == 0 { (" active-slide-marker") }} {}
}
}
/// Returns the first slide as [`Markup`].
fn my_intro() -> Markup {
slide(
"Nice to meet you",
html! {
p { "My name is Ben. I am a developer, but I am also:" }
ul {
li {
"a reader; I love to read. But that can get long, so let's save the details for later."
}
li {
"a writer; " a href="https://www.nanowrimo.org/participants/alterionx/novels" { "NaNoWriMo" } " \
(a.k.a. November) is simultaneously my favorite and most hated month of the year."
}
li {
"a gamer; still waiting for " a href="https://robertsspaceindustries.com/" { "Star Citizen." }
}
li {
"a linguist: I technically know Chinese, and am studying Japanese."
}
}
p {"\
But mostly, I just enjoy watching pretty colors scroll really " span.italic.bold { "really" } " fast down \
my terminal screen while I run my programs and blabber endlessly about my interests.\
"}
},
Some("intro active-slide"),
)
}
/// Returns the second slide as [`Markup`].
fn my_interests() -> Markup {
slide(
"Everything is fascinating",
html! {
p {"\
C, C++, and Rust are my favorite languages. I have worked in both OpenGl and Vulkan. \
I've dabbled with Unity, Godot, and Unreal; Amethyst sounds interesting as well. \
However, I also enjoy gaming and reading in my spare time, as well as learning even more about \
tech and interesting projects such as WASM, xi, TypeScript, Fuschia, and AR glasses.\
"}
p {"\
As a note, just for fun, this entire website is built with Rust + WASM \
(Such a fun word. Anyways...). I don't know how many browsers it runs on, \
but it was definitely fun. \
"}
},
None,
)
}
/// Returns the third slide as [`Markup`].
fn my_story() -> Markup {
slide(
"Improve a little, day by day",
html! {
p {"\
There was a day in 10th grade, when one of my friends introduced me to Java. I was \
enamored the moment I touched the keyboard. The actual program was cute little \
thing, reading and adding two numbers.\
"}
p {"\
It blew my mind.
"}
p {"\
Now that I think about it, it fits; I had been enchanted by the power of words so I wanted to be a novelist,\
but then I found something even more powerful.\
"}
p {"\
Either way, I had decided then and there that I knew that I wanted to program for \
a living. And now I'm here, seeking to live a life programming and architecting solutions.\
"}
},
None,
)
}
/// Returns the fourth slide as [`Markup`].
fn my_work() -> Markup {
slide(
"Learning to code",
html! {
p {"\
I've picked up a lot of different skills since that day. I developed a custom Wordpress theme and wrote \
a chatlog for my English class. In my last year of high school, I learned about automata theory.\
"}
p {"\
When I came to college, I wrote part of an OS in no-std C++ and a Python frontend for connecting to a server and testing. \
I fell in love with writing tools and performance-critical programming.\
"}
p {"\
I've written (with a friend) a ray tracer, a fluid simulation, and a shattering simulation. I am in the \
middle of writing a simulation in Rust that combines a majority of these concepts. I ended up devoting \
enough time to it that I will make it my thesis project.\
"}
},
None,
)
}
/// Returns the fifth slide as [`Markup`].
fn my_passion() -> Markup {
slide(
"Programming and Writing",
html! {
p {"\
I focus on systems development, rendering, and physical simulation. I think I've already said \
enough about that. But I also have a string interest in game development and story writing.\
"}
p {"\
In fact, by virtue of NaNoWriMo, I have the first version of my novel finished!\
"}
},
None,
)
}
/// Returns the sixth slide as [`Markup`].
fn my_reading_time() -> Markup {
slide(
"Breaktime: Reading!",
html! {
p {"\
Speaking of wriing, I love to read as well. " a href="https://brandonsanderson.com/" { "Brandon Sanderson" } "'s my favorite author, \
but " a href="https://www.patrickrothfuss.com/content/index.asp" { "Patrick Rothfuss" } " is the most \
inspirational one—still waiting for " span.underline { "The Doors of Stone" } ". (It's alright. We've only waited for a decade-ish.)\
"}
p {"\
Rothfuss is the one who inspired me to write, so I aim to take just as long as him to finish my stories. \
But, actually, the subtelty and detailed foreshadowing in his work is mind boggling. As I attempt to do \
the same, I realize this all the more.\
"}
},
None,
)
}
/// Returns the seventh slide as [`Markup`].
fn my_gaming_time() -> Markup {
slide(
"Breaktime: Gaming!",
html! {
p {"\
Games are the other half of my free time. Shooters are good as stress relief but my favorites are RPGs. \
My favorites, however, is The Last of Us. It is a work of art. Nier: Automata comes in at a close second; it's only lower \
due to the PC port -- as a developer, its poor performance was obvious.\
"}
p {"\
The favorites I'd listed are RPGs, | routes | identifier_name |
|
fixed.rs | .
pub mod htmlgen {
use maud::{html, Markup, Render};
use page_client::{data, partials};
/// Create a basic menu.
pub fn menu() -> Option<data::Menu<'static>> {
Some(data::Menu(&[data::MenuItem {
text: "Blog",
link: Some("/blog"),
children: None,
}]))
}
/// Returns a list of links as [`Markup`].
fn link_group() -> Markup {
let links = vec![
data::LogoLink {
url: "https://github.com/AlterionX/",
logo: "public/img/icon/github.png",
alt_text: "Github",
text: "AlterionX",
},
data::LogoLink {
url: "mailto:[email protected]",
logo: "public/img/icon/email.svg",
alt_text: "Email",
text: "[email protected]",
},
data::LogoLink {
url: "public/resume/resume.pdf",
logo: "public/img/icon/resume.svg",
alt_text: "Resume",
text: "Resume",
},
];
html! {
.link-group {
@for link in links.iter() {
(link)
}
}
}
}
/// Returns the slide show as [`Markup`].
fn slides() -> Markup {
html! {
.slides {
(my_intro())
(my_story())
(my_work())
(my_interests())
(my_passion())
(my_reading_time())
(my_gaming_time())
}
.slide-attachments {
img#slide-prev.slide-attachment src="public/img/left-simple-arrow.svg";
.slide-markers.slide-attachment {
(slide_markers(7))
}
img#slide-next.slide-attachment src="public/img/right-simple-arrow.svg";
}
}
}
/// Returns a slide as [`Markup`].
fn slide<T: Render, U: Render>(title: T, text: U, cls: Option<&str>) -> Markup {
html! {
div class={ "slide" @if let Some(cls) = cls { " " (cls) } } {
h2.slide-heading { (title) }
.slide-text { (text) }
}
}
}
/// Returns the slide_markers as [`Markup`].
fn slide_markers(slide_cnt: u8) -> Markup {
html! {
@for i in 0..slide_cnt {
(slide_marker(i))
}
}
}
/// Returns the slide_marker as [`Markup`].
fn slide_marker(idx: u8) -> Markup |
/// Returns the first slide as [`Markup`].
fn my_intro() -> Markup {
slide(
"Nice to meet you",
html! {
p { "My name is Ben. I am a developer, but I am also:" }
ul {
li {
"a reader; I love to read. But that can get long, so let's save the details for later."
}
li {
"a writer; " a href="https://www.nanowrimo.org/participants/alterionx/novels" { "NaNoWriMo" } " \
(a.k.a. November) is simultaneously my favorite and most hated month of the year."
}
li {
"a gamer; still waiting for " a href="https://robertsspaceindustries.com/" { "Star Citizen." }
}
li {
"a linguist: I technically know Chinese, and am studying Japanese."
}
}
p {"\
But mostly, I just enjoy watching pretty colors scroll really " span.italic.bold { "really" } " fast down \
my terminal screen while I run my programs and blabber endlessly about my interests.\
"}
},
Some("intro active-slide"),
)
}
/// Returns the second slide as [`Markup`].
fn my_interests() -> Markup {
slide(
"Everything is fascinating",
html! {
p {"\
C, C++, and Rust are my favorite languages. I have worked in both OpenGl and Vulkan. \
I've dabbled with Unity, Godot, and Unreal; Amethyst sounds interesting as well. \
However, I also enjoy gaming and reading in my spare time, as well as learning even more about \
tech and interesting projects such as WASM, xi, TypeScript, Fuschia, and AR glasses.\
"}
p {"\
As a note, just for fun, this entire website is built with Rust + WASM \
(Such a fun word. Anyways...). I don't know how many browsers it runs on, \
but it was definitely fun. \
"}
},
None,
)
}
/// Returns the third slide as [`Markup`].
fn my_story() -> Markup {
slide(
"Improve a little, day by day",
html! {
p {"\
There was a day in 10th grade, when one of my friends introduced me to Java. I was \
enamored the moment I touched the keyboard. The actual program was cute little \
thing, reading and adding two numbers.\
"}
p {"\
It blew my mind.
"}
p {"\
Now that I think about it, it fits; I had been enchanted by the power of words so I wanted to be a novelist,\
but then I found something even more powerful.\
"}
p {"\
Either way, I had decided then and there that I knew that I wanted to program for \
a living. And now I'm here, seeking to live a life programming and architecting solutions.\
"}
},
None,
)
}
/// Returns the fourth slide as [`Markup`].
fn my_work() -> Markup {
slide(
"Learning to code",
html! {
p {"\
I've picked up a lot of different skills since that day. I developed a custom Wordpress theme and wrote \
a chatlog for my English class. In my last year of high school, I learned about automata theory.\
"}
p {"\
When I came to college, I wrote part of an OS in no-std C++ and a Python frontend for connecting to a server and testing. \
I fell in love with writing tools and performance-critical programming.\
"}
p {"\
I've written (with a friend) a ray tracer, a fluid simulation, and a shattering simulation. I am in the \
middle of writing a simulation in Rust that combines a majority of these concepts. I ended up devoting \
enough time to it that I will make it my thesis project.\
"}
},
None,
)
}
/// Returns the fifth slide as [`Markup`].
fn my_passion() -> Markup {
slide(
"Programming and Writing",
html! {
p {"\
I focus on systems development, rendering, and physical simulation. I think I've already said \
enough about that. But I also have a string interest in game development and story writing.\
"}
p {"\
In fact, by virtue of NaNoWriMo, I have the first version of my novel finished!\
"}
},
None,
)
}
/// Returns the sixth slide as [`Markup`].
fn my_reading_time() -> Markup {
slide(
"Breaktime: Reading!",
html! {
p {"\
Speaking of wriing, I love to read as well. " a href="https://brandonsanderson.com/" { "Brandon Sanderson" } "'s my favorite author, \
but " a href="https://www.patrickrothfuss.com/content/index.asp" { "Patrick Rothfuss" } " is the most \
inspirational one—still waiting for " span.underline { "The Doors of Stone" } ". (It's alright. We've only waited for a decade-ish.)\
"}
p {"\
Rothfuss is the one who inspired me to write, so I aim to take just as long as him to finish my stories. \
But, actually, the subtelty and detailed foreshadowing in his work is mind boggling. As I attempt to do \
the same, I realize this all the more.\
"}
},
None,
)
}
/// Returns the seventh slide as [`Markup`].
fn my_gaming_time() -> Markup {
slide(
"Breaktime: Gaming!",
html! {
p {"\
Games are the other half of my free time. Shooters are good as stress relief but my favorites are RPGs. \
My favorites, however, is The Last of Us. It is a work of art. Nier: Automata comes in at a close second; it's only lower \
due to the PC port -- as a developer, its poor performance was obvious.\
"}
p {"\
The favorites I'd listed are RPGs, but I find myself more engrossed in Terraria and Stellaris than RPGs since they leave a lot of room to \
establish a character and role play despite not being an RPG. Dungeons and Dragons (DnD) is pretty | {
html! {
div id = { "slide-marker-"(idx) } class={"slide-marker" @if idx == 0 { (" active-slide-marker") }} {}
}
} | identifier_body |
fixed.rs | page.
pub mod htmlgen {
use maud::{html, Markup, Render};
use page_client::{data, partials};
/// Create a basic menu.
pub fn menu() -> Option<data::Menu<'static>> {
Some(data::Menu(&[data::MenuItem {
text: "Blog",
link: Some("/blog"),
children: None,
}]))
}
/// Returns a list of links as [`Markup`].
fn link_group() -> Markup {
let links = vec![
data::LogoLink {
url: "https://github.com/AlterionX/",
logo: "public/img/icon/github.png",
alt_text: "Github",
text: "AlterionX",
},
data::LogoLink {
url: "mailto:[email protected]",
logo: "public/img/icon/email.svg",
alt_text: "Email",
text: "[email protected]",
},
data::LogoLink {
url: "public/resume/resume.pdf",
logo: "public/img/icon/resume.svg",
alt_text: "Resume",
text: "Resume",
},
];
html! {
.link-group {
@for link in links.iter() {
(link)
}
}
}
}
/// Returns the slide show as [`Markup`].
fn slides() -> Markup {
html! {
.slides {
(my_intro())
(my_story())
(my_work())
(my_interests())
(my_passion())
(my_reading_time())
(my_gaming_time())
}
.slide-attachments {
img#slide-prev.slide-attachment src="public/img/left-simple-arrow.svg";
.slide-markers.slide-attachment {
(slide_markers(7))
}
img#slide-next.slide-attachment src="public/img/right-simple-arrow.svg";
}
}
}
/// Returns a slide as [`Markup`].
fn slide<T: Render, U: Render>(title: T, text: U, cls: Option<&str>) -> Markup {
html! {
div class={ "slide" @if let Some(cls) = cls { " " (cls) } } {
h2.slide-heading { (title) }
.slide-text { (text) }
}
}
} | @for i in 0..slide_cnt {
(slide_marker(i))
}
}
}
/// Returns the slide_marker as [`Markup`].
fn slide_marker(idx: u8) -> Markup {
html! {
div id = { "slide-marker-"(idx) } class={"slide-marker" @if idx == 0 { (" active-slide-marker") }} {}
}
}
/// Returns the first slide as [`Markup`].
fn my_intro() -> Markup {
slide(
"Nice to meet you",
html! {
p { "My name is Ben. I am a developer, but I am also:" }
ul {
li {
"a reader; I love to read. But that can get long, so let's save the details for later."
}
li {
"a writer; " a href="https://www.nanowrimo.org/participants/alterionx/novels" { "NaNoWriMo" } " \
(a.k.a. November) is simultaneously my favorite and most hated month of the year."
}
li {
"a gamer; still waiting for " a href="https://robertsspaceindustries.com/" { "Star Citizen." }
}
li {
"a linguist: I technically know Chinese, and am studying Japanese."
}
}
p {"\
But mostly, I just enjoy watching pretty colors scroll really " span.italic.bold { "really" } " fast down \
my terminal screen while I run my programs and blabber endlessly about my interests.\
"}
},
Some("intro active-slide"),
)
}
/// Returns the second slide as [`Markup`].
fn my_interests() -> Markup {
slide(
"Everything is fascinating",
html! {
p {"\
C, C++, and Rust are my favorite languages. I have worked in both OpenGl and Vulkan. \
I've dabbled with Unity, Godot, and Unreal; Amethyst sounds interesting as well. \
However, I also enjoy gaming and reading in my spare time, as well as learning even more about \
tech and interesting projects such as WASM, xi, TypeScript, Fuschia, and AR glasses.\
"}
p {"\
As a note, just for fun, this entire website is built with Rust + WASM \
(Such a fun word. Anyways...). I don't know how many browsers it runs on, \
but it was definitely fun. \
"}
},
None,
)
}
/// Returns the third slide as [`Markup`].
fn my_story() -> Markup {
slide(
"Improve a little, day by day",
html! {
p {"\
There was a day in 10th grade, when one of my friends introduced me to Java. I was \
enamored the moment I touched the keyboard. The actual program was cute little \
thing, reading and adding two numbers.\
"}
p {"\
It blew my mind.
"}
p {"\
Now that I think about it, it fits; I had been enchanted by the power of words so I wanted to be a novelist,\
but then I found something even more powerful.\
"}
p {"\
Either way, I had decided then and there that I knew that I wanted to program for \
a living. And now I'm here, seeking to live a life programming and architecting solutions.\
"}
},
None,
)
}
/// Returns the fourth slide as [`Markup`].
fn my_work() -> Markup {
slide(
"Learning to code",
html! {
p {"\
I've picked up a lot of different skills since that day. I developed a custom Wordpress theme and wrote \
a chatlog for my English class. In my last year of high school, I learned about automata theory.\
"}
p {"\
When I came to college, I wrote part of an OS in no-std C++ and a Python frontend for connecting to a server and testing. \
I fell in love with writing tools and performance-critical programming.\
"}
p {"\
I've written (with a friend) a ray tracer, a fluid simulation, and a shattering simulation. I am in the \
middle of writing a simulation in Rust that combines a majority of these concepts. I ended up devoting \
enough time to it that I will make it my thesis project.\
"}
},
None,
)
}
/// Returns the fifth slide as [`Markup`].
fn my_passion() -> Markup {
slide(
"Programming and Writing",
html! {
p {"\
I focus on systems development, rendering, and physical simulation. I think I've already said \
enough about that. But I also have a string interest in game development and story writing.\
"}
p {"\
In fact, by virtue of NaNoWriMo, I have the first version of my novel finished!\
"}
},
None,
)
}
/// Returns the sixth slide as [`Markup`].
fn my_reading_time() -> Markup {
slide(
"Breaktime: Reading!",
html! {
p {"\
Speaking of wriing, I love to read as well. " a href="https://brandonsanderson.com/" { "Brandon Sanderson" } "'s my favorite author, \
but " a href="https://www.patrickrothfuss.com/content/index.asp" { "Patrick Rothfuss" } " is the most \
inspirational one—still waiting for " span.underline { "The Doors of Stone" } ". (It's alright. We've only waited for a decade-ish.)\
"}
p {"\
Rothfuss is the one who inspired me to write, so I aim to take just as long as him to finish my stories. \
But, actually, the subtelty and detailed foreshadowing in his work is mind boggling. As I attempt to do \
the same, I realize this all the more.\
"}
},
None,
)
}
/// Returns the seventh slide as [`Markup`].
fn my_gaming_time() -> Markup {
slide(
"Breaktime: Gaming!",
html! {
p {"\
Games are the other half of my free time. Shooters are good as stress relief but my favorites are RPGs. \
My favorites, however, is The Last of Us. It is a work of art. Nier: Automata comes in at a close second; it's only lower \
due to the PC port -- as a developer, its poor performance was obvious.\
"}
p {"\
The favorites I'd listed are RPGs, but I find myself more engrossed in Terraria and Stellaris than RPGs since they leave a lot of room to \
establish a character and role play despite not being an RPG. Dungeons and Dragons (DnD) is pretty fun | /// Returns the slide_markers as [`Markup`].
fn slide_markers(slide_cnt: u8) -> Markup {
html! { | random_line_split |
attention_refine.py | context_c = tf.reduce_sum(tf.multiply(tf.expand_dims(attention_weight, axis=2), encoder_output), axis=1)
step_in = tf.concat((step_in, context_c), axis=-1)
in_concat = tf.concat((step_in, last_state), axis=-1)
gate_inputs = tf.sigmoid(tf.matmul(in_concat, self.de_w_r_z) + self.de_b_r_z)
r, z = tf.split(value=gate_inputs, num_or_size_splits=2, axis=1)
h_ = tf.tanh(tf.matmul(tf.concat((step_in, r * last_state), axis=-1), self.de_w_h) + self.de_b_h)
h = z * last_state + (1 - z) * h_
de_gru_output = tf.concat((de_gru_output, tf.expand_dims(h, axis=1)), axis=1)
i = i + 1
return i, de_embeded, de_gru_output, encoder_output
def __call__(self, de_embeded, de_gru_output, encoder_output, *args, **kwargs):
"""
可以在内部创建de_gru_output,并不会抛出未初始化异常
:param de_embeded:
:param args:
:param kwargs:
:return:
"""
i0 = tf.constant(0)
_, _, decoder_output, _ = tf.while_loop(self.de_cond, self.de_gru,
loop_vars=[i0, de_embeded, de_gru_output, encoder_output],
shape_invariants=[i0.get_shape(), de_embeded.get_shape(),
tf.TensorShape([None, None, self.units]),
encoder_output.get_shape()])
return decoder_output
sess = tf.Session()
# 编码
en_input = tf.placeholder(tf.int32, shape=[None, None])
en_embedding_variable = tf.Variable(tf.truncated_normal(shape=[DIC_SIZE, EMBEDDING_SIZE]))
en_embeded = tf.nn.embedding_lookup(en_embedding_variable, en_input)
en_gru_cell_1 = GruCell(units=GRU_UNITS, step_dimension=EMBEDDING_SIZE)
gru_init_state_1 = tf.zeros(shape=[BATCH_SIZE, 1, en_gru_cell_1.units]) # 这里不应该定义称为变量
encoder_output_1 = en_gru_cell_1(en_embeded, gru_init_state_1)
en_gru_cell_2 = GruCell(units=GRU_UNITS, step_dimension=en_gru_cell_1.units)
gru_init_state_2 = tf.zeros(shape=[BATCH_SIZE, 1, en_gru_cell_2.units]) # 这里不应该定义称为变量
encoder_output_2 = en_gru_cell_2(encoder_output_1[:, 1:], gru_init_state_2)
# 解码
de_in_label = tf.placeholder(tf.int32, shape=[None, None]) # batch_size,seq_len
de_embedding_variable = tf.Variable(tf.truncated_normal(shape=[LABEL_SIZE, EMBEDDING_SIZE]))
de_embeded = tf.nn.embedding_lookup(de_embedding_variable, de_in_label)
de_gru_cell_1 = GruCellAttentionDecoder(GRU_UNITS, step_dimension=EMBEDDING_SIZE)
de_init_state_1 = tf.expand_dims(encoder_output_1[:, -1], axis=1) # init state
decoder_output_1 = de_gru_cell_1(de_embeded, de_init_state_1, encoder_output_2[:, 1:])
de_gru_cell_2 = GruCell(GRU_UNITS, step_dimension=de_gru_cell_1.units)
de_init_state_2 = tf.expand_dims(encoder_output_2[:, -1], axis=1) # init state
decoder_output_2 = de_gru_cell_2(decoder_output_1[:, 1:], de_init_state_2)
# 全连接
dense_w_1 = tf.Variable(tf.truncated_normal(shape=[GRU_UNITS, GRU_UNITS // 2]))
dense_b_1 = tf.Variable(tf.truncated_normal(shape=[GRU_UNITS // 2, ]))
dense_w_2 = tf.Variable(tf.truncated_normal(shape=[GRU_UNITS // 2, LABEL_SIZE]))
dense_b_2 = tf.Variable(tf.truncated_normal(shape=[LABEL_SIZE, ]))
dense_1 = tf.nn.leaky_relu(tf.tensordot(decoder_output_2, dense_w_1, [[2], [0]]) + dense_b_1)
output = tf.nn.leaky_relu(tf.tensordot(dense_1, dense_w_2, [[2], [0]]) + dense_b_2)
loss = tf.losses.sparse_softmax_cross_entropy(labels=de_in_label[:, 1:], logits=output[:, 1:-1])
optimizer = tf.train.AdamOptimizer(learning_rate=0.002).minimize(loss)
decoder_start = np.zeros(shape=[BATCH_SIZE, 1]) + LABEL_SIZE - 1
saver = tf.train.Saver()
ds = input_fn("../../../data/translate/train_data.tfrecord").make_one_shot_iterator().get_next()
# sess.run(tf.global_variables_initializer())
saver.restore(sess, save_path=model_path + "ner.model-1")
def save_model():
np.savetxt("./params_v3/en_embeding", sess.run(en_embedding_variable))
np.savetxt("./params_v3/de_embedding", sess.run(de_embedding_variable))
np.savetxt("./params_v3/en_grucell_1_w_r_z", sess.run(en_gru_cell_1.en_w_r_z))
np.savetxt("./params_v3/en_grucell_1_b_r_z", sess.run(en_gru_cell_1.en_b_r_z))
np.savetxt("./params_v3/en_grucell_1_w_h", sess.run(en_gru_cell_1.en_w_h))
np.savetxt("./params_v3/en_grucell_1_b_h", sess.run(en_gru_cell_1.en_b_h))
# --
np.savetxt("./params_v3/en_grucell_2_w_r_z", sess.run(en_gru_cell_2.en_w_r_z))
np.savetxt("./params_v3/en_grucell_2_b_r_z", sess.run(en_gru_cell_2.en_b_r_z))
np.savetxt("./params_v3/en_grucell_2_w_h", sess.run(en_gru_cell_2.en_w_h))
np.savetxt("./params_v3/en_grucell_2_b_h", sess.run(en_gru_cell_2.en_b_h))
# --
np.savetxt("./params_v3/de_grucell_1_w_r_z", sess.run(de_gru_cell_1.de_w_r_z))
np.savetxt("./params_v3/de_grucell_1_b_r_z", sess.run(de_gru_cell_1.de_b_r_z))
np.savetxt("./params_v3/de_grucell_1_w_h", sess.run(de_gru_cell_1.de_w_h))
np.savetxt("./params_v3/de_grucell_1_b_h", sess.run(de_gru_cell_1.de_b_h))
np.savetxt("./params_v3/de_grucell_2_w_r_z", sess.run(de_gru_cell_2.en_w_r_z))
np.savetxt("./params_v3/de_grucell_2_b_r_z", sess.run(de_gru_cell_2.en_b_r_z))
np.savetxt("./params_v3/de_grucell_2_w_h", sess.run(de_gru_cell_2.en_w_h))
np.savetxt("./params_v3/de_grucell_2_b_h", sess.run(de_gru_cell_2.en_b_h))
np.savetxt("./params_v3/dense_w_1", sess.run(dense_w_1))
np.savetxt("./params_v3/dense_b_1", sess.run(dense_b_1))
np.savetxt("./params_v3/dense_w_2", sess.run(dense_w_2))
np.savetxt("./params_v3/dense_b_2", sess.run(dense_b_2))
min_v = 0.1
for i in range(100000):
features, labels, f_lengths, l_lengths = sess.run(ds)
"""
这里在训练的时候不同批次的最大长度是不一样的,目的是为了加快训练速度,但对精度会有影响,
通过翻译场景来看,在预测时,适当的增加padding,对最终效果有一定的正面影响;
后续需要优化的地方是重新组织训练数据,比如长度小于10的分到一个批次,padding长度为10; 10 ~ 20的分到一个批次,padding长度为20
如果使用固定长度的padding训练,那么对算力的需求可能会增长几十倍
"""
f_max_len = f_lengths[np.argmax(f_lengths, axis=-1)]
l_max_len = l_lengths[np.argmax(l_lengths, axis=-1)]
loss_value, _ = sess.run((loss, optimizer), feed_dict={en_input: features[:, :f_max_len + 1],
de_in_label: np.concatenate(
(decoder_start, labels[:, :l_max_len + 1]),
axis=-1)[:,
0:-1]})
print(i, loss_value)
if i % 10 == 0 and (min_v > loss_value):
saver.save(sess, model_path + "ner.model", global_step=int(loss_value * 1000))
min_v = loss_value
"""
if i % 1 == 0:
# save_model() | # saver.save(sess, model_path + "ner.model", global_step=int(loss_value * 1000))
| random_line_split |
|
attention_refine.py | 0000)
self.en_b_r_z = tf.Variable(tf.truncated_normal(shape=[units * 2, ]) / 10000)
self.en_w_h = tf.Variable(tf.truncated_normal(shape=[step_dimension + self.units, self.units]) / 10000)
self.en_b_h = tf.Variable(tf.truncated_normal(shape=[units, ]) / 10000)
def en_cond(self, i, en_embeded, en_gru_output):
return i < tf.shape(en_embeded)[1]
def en_gru(self, i, en_embeded, en_gru_output):
step_in = en_embeded[:, i]
last_state = en_gru_output[:, i]
in_concat = tf.concat((step_in, last_state), axis=-1)
gate_inputs = tf.sigmoid(tf.matmul(in_concat, self.en_w_r_z) + self.en_b_r_z)
r, z = tf.split(value=gate_inputs, num_or_size_splits=2, axis=1)
h_ = tf.tanh(tf.matmul(tf.concat((step_in, r * last_state), axis=-1), self.en_w_h) + self.en_b_h)
h = z * last_state + (1 - z) * h_
en_gru_output = tf.concat((en_gru_output, tf.expand_dims(h, axis=1)), axis=1)
i = i + 1
return i, en_embeded, en_gru_output
def __call__(self, seqs, en_gru_output, *args, **kwargs):
"""
在call函数内部创建en_gru_output会有问题,解码过程提示变量未初始化,所以在外部创建好变量传入call;估计可能是后面用这个变量的最后一个状态去初始化其他变量导致的
:param seqs:
:param en_gru_output:
:param args:
:param kwargs:
:return:
"""
i0 = tf.constant(0)
_, _, encoder_output = tf.while_loop(self.en_cond, self.en_gru, loop_vars=[i0, seqs, en_gru_output],
shape_invariants=[i0.get_shape(), seqs.get_shape(),
tf.TensorShape([None, None, self.units])])
return encoder_output
class GruCellAttentionDecoder:
def __init__(self, units, step_dimension):
"""
:param units:每一个时间步的输出维度
:param step_dimension: 每一时间步的输入维度
"""
self.units = units
self.de_w_r | Variable(
tf.truncated_normal(shape=[step_dimension + self.units * 2, self.units * 2]) / 10000)
self.de_b_r_z = tf.Variable(tf.truncated_normal(shape=[self.units * 2, ]) / 10000)
self.de_w_h = tf.Variable(tf.truncated_normal(shape=[step_dimension + self.units * 2, self.units]) / 10000)
self.de_b_h = tf.Variable(tf.truncated_normal(shape=[self.units, ]) / 10000)
def de_cond(self, i, de_embeded, de_gru_output, encoder_output):
return i < tf.shape(de_embeded)[1]
def de_gru(self, i, de_embeded, de_gru_output, encoder_output):
step_in = de_embeded[:, i]
last_state = de_gru_output[:, i]
attention_weight = tf.nn.softmax(tf.squeeze(tf.matmul(encoder_output, tf.expand_dims(last_state, axis=2))))
context_c = tf.reduce_sum(tf.multiply(tf.expand_dims(attention_weight, axis=2), encoder_output), axis=1)
step_in = tf.concat((step_in, context_c), axis=-1)
in_concat = tf.concat((step_in, last_state), axis=-1)
gate_inputs = tf.sigmoid(tf.matmul(in_concat, self.de_w_r_z) + self.de_b_r_z)
r, z = tf.split(value=gate_inputs, num_or_size_splits=2, axis=1)
h_ = tf.tanh(tf.matmul(tf.concat((step_in, r * last_state), axis=-1), self.de_w_h) + self.de_b_h)
h = z * last_state + (1 - z) * h_
de_gru_output = tf.concat((de_gru_output, tf.expand_dims(h, axis=1)), axis=1)
i = i + 1
return i, de_embeded, de_gru_output, encoder_output
def __call__(self, de_embeded, de_gru_output, encoder_output, *args, **kwargs):
"""
可以在内部创建de_gru_output,并不会抛出未初始化异常
:param de_embeded:
:param args:
:param kwargs:
:return:
"""
i0 = tf.constant(0)
_, _, decoder_output, _ = tf.while_loop(self.de_cond, self.de_gru,
loop_vars=[i0, de_embeded, de_gru_output, encoder_output],
shape_invariants=[i0.get_shape(), de_embeded.get_shape(),
tf.TensorShape([None, None, self.units]),
encoder_output.get_shape()])
return decoder_output
sess = tf.Session()
# 编码
en_input = tf.placeholder(tf.int32, shape=[None, None])
en_embedding_variable = tf.Variable(tf.truncated_normal(shape=[DIC_SIZE, EMBEDDING_SIZE]))
en_embeded = tf.nn.embedding_lookup(en_embedding_variable, en_input)
en_gru_cell_1 = GruCell(units=GRU_UNITS, step_dimension=EMBEDDING_SIZE)
gru_init_state_1 = tf.zeros(shape=[BATCH_SIZE, 1, en_gru_cell_1.units]) # 这里不应该定义称为变量
encoder_output_1 = en_gru_cell_1(en_embeded, gru_init_state_1)
en_gru_cell_2 = GruCell(units=GRU_UNITS, step_dimension=en_gru_cell_1.units)
gru_init_state_2 = tf.zeros(shape=[BATCH_SIZE, 1, en_gru_cell_2.units]) # 这里不应该定义称为变量
encoder_output_2 = en_gru_cell_2(encoder_output_1[:, 1:], gru_init_state_2)
# 解码
de_in_label = tf.placeholder(tf.int32, shape=[None, None]) # batch_size,seq_len
de_embedding_variable = tf.Variable(tf.truncated_normal(shape=[LABEL_SIZE, EMBEDDING_SIZE]))
de_embeded = tf.nn.embedding_lookup(de_embedding_variable, de_in_label)
de_gru_cell_1 = GruCellAttentionDecoder(GRU_UNITS, step_dimension=EMBEDDING_SIZE)
de_init_state_1 = tf.expand_dims(encoder_output_1[:, -1], axis=1) # init state
decoder_output_1 = de_gru_cell_1(de_embeded, de_init_state_1, encoder_output_2[:, 1:])
de_gru_cell_2 = GruCell(GRU_UNITS, step_dimension=de_gru_cell_1.units)
de_init_state_2 = tf.expand_dims(encoder_output_2[:, -1], axis=1) # init state
decoder_output_2 = de_gru_cell_2(decoder_output_1[:, 1:], de_init_state_2)
# 全连接
dense_w_1 = tf.Variable(tf.truncated_normal(shape=[GRU_UNITS, GRU_UNITS // 2]))
dense_b_1 = tf.Variable(tf.truncated_normal(shape=[GRU_UNITS // 2, ]))
dense_w_2 = tf.Variable(tf.truncated_normal(shape=[GRU_UNITS // 2, LABEL_SIZE]))
dense_b_2 = tf.Variable(tf.truncated_normal(shape=[LABEL_SIZE, ]))
dense_1 = tf.nn.leaky_relu(tf.tensordot(decoder_output_2, dense_w_1, [[2], [0]]) + dense_b_1)
output = tf.nn.leaky_relu(tf.tensordot(dense_1, dense_w_2, [[2], [0]]) + dense_b_2)
loss = tf.losses.sparse_softmax_cross_entropy(labels=de_in_label[:, 1:], logits=output[:, 1:-1])
optimizer = tf.train.AdamOptimizer(learning_rate=0.002).minimize(loss)
decoder_start = np.zeros(shape=[BATCH_SIZE, 1]) + LABEL_SIZE - 1
saver = tf.train.Saver()
ds = input_fn("../../../data/translate/train_data.tfrecord").make_one_shot_iterator().get_next()
# sess.run(tf.global_variables_initializer())
saver.restore(sess, save_path=model_path + "ner.model-1")
def save_model():
np.savetxt("./params_v3/en_embeding", sess.run(en_embedding_variable))
np.savetxt("./params_v3/de_embedding", sess.run(de_embedding_variable))
np.savetxt("./params_v3/en_grucell_1_w_r_z", sess.run(en_gru_cell_1.en_w_r_z))
np.savetxt("./params_v3/en_grucell_1_b_r_z", sess.run(en_gru_cell_1.en_b_r_z))
np.savetxt("./params_v3/en_grucell_1_w_h", sess.run(en_gru_cell_1.en_w_h))
np.savetxt("./params_v3/en_grucell_1_b_h", sess.run(en_gru_cell_1.en_b_h))
# --
np.savetxt | _z = tf. | identifier_name |
attention_refine.py | (tf.expand_dims(attention_weight, axis=2), encoder_output), axis=1)
step_in = tf.concat((step_in, context_c), axis=-1)
in_concat = tf.concat((step_in, last_state), axis=-1)
gate_inputs = tf.sigmoid(tf.matmul(in_concat, self.de_w_r_z) + self.de_b_r_z)
r, z = tf.split(value=gate_inputs, num_or_size_splits=2, axis=1)
h_ = tf.tanh(tf.matmul(tf.concat((step_in, r * last_state), axis=-1), self.de_w_h) + self.de_b_h)
h = z * last_state + (1 - z) * h_
de_gru_output = tf.concat((de_gru_output, tf.expand_dims(h, axis=1)), axis=1)
i = i + 1
return i, de_embeded, de_gru_output, encoder_output
def __call__(self, de_embeded, de_gru_output, encoder_output, *args, **kwargs):
"""
可以在内部创建de_gru_output,并不会抛出未初始化异常
:param de_embeded:
:param args:
:param kwargs:
:return:
"""
i0 = tf.constant(0)
_, _, decoder_output, _ = tf.while_loop(self.de_cond, self.de_gru,
loop_vars=[i0, de_embeded, de_gru_output, encoder_output],
shape_invariants=[i0.get_shape(), de_embeded.get_shape(),
tf.TensorShape([None, None, self.units]),
encoder_output.get_shape()])
return decoder_output
sess = tf.Session()
# 编码
en_input = tf.placeholder(tf.int32, shape=[None, None])
en_embedding_variable = tf.Variable(tf.truncated_normal(shape=[DIC_SIZE, EMBEDDING_SIZE]))
en_embeded = tf.nn.embedding_lookup(en_embedding_variable, en_input)
en_gru_cell_1 = GruCell(units=GRU_UNITS, step_dimension=EMBEDDING_SIZE)
gru_init_state_1 = tf.zeros(shape=[BATCH_SIZE, 1, en_gru_cell_1.units]) # 这里不应该定义称为变量
encoder_output_1 = en_gru_cell_1(en_embeded, gru_init_state_1)
en_gru_cell_2 = GruCell(units=GRU_UNITS, step_dimension=en_gru_cell_1.units)
gru_init_state_2 = tf.zeros(shape=[BATCH_SIZE, 1, en_gru_cell_2.units]) # 这里不应该定义称为变量
encoder_output_2 = en_gru_cell_2(encoder_output_1[:, 1:], gru_init_state_2)
# 解码
de_in_label = tf.placeholder(tf.int32, shape=[None, None]) # batch_size,seq_len
de_embedding_variable = tf.Variable(tf.truncated_normal(shape=[LABEL_SIZE, EMBEDDING_SIZE]))
de_embeded = tf.nn.embedding_lookup(de_embedding_variable, de_in_label)
de_gru_cell_1 = GruCellAttentionDecoder(GRU_UNITS, step_dimension=EMBEDDING_SIZE)
de_init_state_1 = tf.expand_dims(encoder_output_1[:, -1], axis=1) # init state
decoder_output_1 = de_gru_cell_1(de_embeded, de_init_state_1, encoder_output_2[:, 1:])
de_gru_cell_2 = GruCell(GRU_UNITS, step_dimension=de_gru_cell_1.units)
de_init_state_2 = tf.expand_dims(encoder_output_2[:, -1], axis=1) # init state
decoder_output_2 = de_gru_cell_2(decoder_output_1[:, 1:], de_init_state_2)
# 全连接
dense_w_1 = tf.Variable(tf.truncated_normal(shape=[GRU_UNITS, GRU_UNITS // 2]))
dense_b_1 = tf.Variable(tf.truncated_normal(shape=[GRU_UNITS // 2, ]))
dense_w_2 = tf.Variable(tf.truncated_normal(shape=[GRU_UNITS // 2, LABEL_SIZE]))
dense_b_2 = tf.Variable(tf.truncated_normal(shape=[LABEL_SIZE, ]))
dense_1 = tf.nn.leaky_relu(tf.tensordot(decoder_output_2, dense_w_1, [[2], [0]]) + dense_b_1)
output = tf.nn.leaky_relu(tf.tensordot(dense_1, dense_w_2, [[2], [0]]) + dense_b_2)
loss = tf.losses.sparse_softmax_cross_entropy(labels=de_in_label[:, 1:], logits=output[:, 1:-1])
optimizer = tf.train.AdamOptimizer(learning_rate=0.002).minimize(loss)
decoder_start = np.zeros(shape=[BATCH_SIZE, 1]) + LABEL_SIZE - 1
saver = tf.train.Saver()
ds = input_fn("../../../data/translate/train_data.tfrecord").make_one_shot_iterator().get_next()
# sess.run(tf.global_variables_initializer())
saver.restore(sess, save_path=model_path + "ner.model-1")
def save_model():
np.savetxt("./params_v3/en_embeding", sess.run(en_embedding_variable))
np.savetxt("./params_v3/de_embedding", sess.run(de_embedding_variable))
np.savetxt("./params_v3/en_grucell_1_w_r_z", sess.run(en_gru_cell_1.en_w_r_z))
np.savetxt("./params_v3/en_grucell_1_b_r_z", sess.run(en_gru_cell_1.en_b_r_z))
np.savetxt("./params_v3/en_grucell_1_w_h", sess.run(en_gru_cell_1.en_w_h))
np.savetxt("./params_v3/en_grucell_1_b_h", sess.run(en_gru_cell_1.en_b_h))
# --
np.savetxt("./params_v3/en_grucell_2_w_r_z", sess.run(en_gru_cell_2.en_w_r_z))
np.savetxt("./params_v3/en_grucell_2_b_r_z", sess.run(en_gru_cell_2.en_b_r_z))
np.savetxt("./params_v3/en_grucell_2_w_h", sess.run(en_gru_cell_2.en_w_h))
np.savetxt("./params_v3/en_grucell_2_b_h", sess.run(en_gru_cell_2.en_b_h))
# --
np.savetxt("./params_v3/de_grucell_1_w_r_z", sess.run(de_gru_cell_1.de_w_r_z))
np.savetxt("./params_v3/de_grucell_1_b_r_z", sess.run(de_gru_cell_1.de_b_r_z))
np.savetxt("./params_v3/de_grucell_1_w_h", sess.run(de_gru_cell_1.de_w_h))
np.savetxt("./params_v3/de_grucell_1_b_h", sess.run(de_gru_cell_1.de_b_h))
np.savetxt("./params_v3/de_grucell_2_w_r_z", sess.run(de_gru_cell_2.en_w_r_z))
np.savetxt("./params_v3/de_grucell_2_b_r_z", sess.run(de_gru_cell_2.en_b_r_z))
np.savetxt("./params_v3/de_grucell_2_w_h", sess.run(de_gru_cell_2.en_w_h))
np.savetxt("./params_v3/de_grucell_2_b_h", sess.run(de_gru_cell_2.en_b_h))
np.savetxt("./params_v3/dense_w_1", sess.run(dense_w_1))
np.savetxt("./params_v3/dense_b_1", sess.run(dense_b_1))
np.savetxt("./params_v3/dense_w_2", sess.run(dense_w_2))
np.savetxt("./params_v3/dense_b_2", sess.run(dense_b_2))
min_v = 0.1
for i in range(100000):
features, labels, f_lengths, l_lengths = sess.run(ds)
"""
这里在训练的时候不同批次的最大长度是不一样的,目的是为了加快训练速度,但对精度会有影响,
通过翻译场景来看,在预测时,适当的增加padding,对最终效果有一定的正面影响;
后续需要优化的地方是重新组织训练数据,比如长度小于10的分到一个批次,padding长度为10; 10 ~ 20的分到一个批次,padding长度为20
如果使用固定长度的padding训练,那么对算力的需求可能会增长几十倍
"""
f_max_len = f_lengths[np.argmax( | f_lengths, axis=-1)]
l_max_len = l_lengths[np.argmax(l_lengths, axis=-1)]
loss_value, _ = sess.run((loss, optimizer), feed_dict={en_input: features[:, :f_max_len + 1],
de_in_label: np.concatenate(
(decoder_start, labels[:, :l_max_len + 1]),
axis=-1)[:,
0:-1]})
print(i, loss_value)
if i % 10 == 0 and (min_v > loss_value):
saver.save(sess, model_path + "ner.model", global_step=int(loss_value * 1000))
min_v = loss_value
"""
if i % 1 == 0:
# save_model()
# saver.save(sess, model_path + "ner.model", global_step=int(loss_value * 1000))
pred = sess.run(output, | conditional_block |
|
attention_refine.py | 0000)
self.en_b_r_z = tf.Variable(tf.truncated_normal(shape=[units * 2, ]) / 10000)
self.en_w_h = tf.Variable(tf.truncated_normal(shape=[step_dimension + self.units, self.units]) / 10000)
self.en_b_h = tf.Variable(tf.truncated_normal(shape=[units, ]) / 10000)
def en_cond(self, i, en_embeded, en_gru_output):
return i < tf.shape(en_embeded)[1]
def en_gru(self, i, en_embeded, en_gru_output):
step_in = en_embeded[:, i]
last_state = en_gru_output[:, i]
in_concat = tf.concat((step_in, last_state), axis=-1)
gate_inputs = tf.sigmoid(tf.matmul(in_concat, self.en_w_r_z) + self.en_b_r_z)
r, z = tf.split(value=gate_inputs, num_or_size_splits=2, axis=1)
h_ = tf.tanh(tf.matmul(tf.concat((step_in, r * last_state), axis=-1), self.en_w_h) + self.en_b_h)
h = z * last_state + (1 - z) * h_
en_gru_output = tf.concat((en_gru_output, tf.expand_dims(h, axis=1)), axis=1)
i = i + 1
return i, en_embeded, en_gru_output
def __call__(self, seqs, en_gru_output, *args, **kwargs):
"""
在call函数内部创建en_gru_output会有问题,解码过程提示变量未初始化,所以在外部创建好变量传入call;估计可能是后面用这个变量的最后一个状态去初始化其他变量导致的
:param seqs:
:param en_gru_output:
:param args:
:param kwargs:
:return:
"""
i0 = tf.constant(0)
_, _, encoder_output = tf.while_loop(self.en_cond, self.en_gru, loop_vars=[i0, seqs, en_gru_output],
shape_invariants=[i0.get_shape(), seqs.get_shape(),
tf.TensorShape([None, None, self.units])])
return encoder_output
class GruCellAttentionDecoder:
def __init__(self, units, step_dimension):
"""
:param units:每一个时间步的输出维度
:param step_dimension: 每一时间步的输入维度
"""
self.units = units
self.de_w_r_z = tf.Variable(
tf.truncated_norm | last_state = de_gru_output[:, i]
attention_weight = tf.nn.softmax(tf.squeeze(tf.matmul(encoder_output, tf.expand_dims(last_state, axis=2))))
context_c = tf.reduce_sum(tf.multiply(tf.expand_dims(attention_weight, axis=2), encoder_output), axis=1)
step_in = tf.concat((step_in, context_c), axis=-1)
in_concat = tf.concat((step_in, last_state), axis=-1)
gate_inputs = tf.sigmoid(tf.matmul(in_concat, self.de_w_r_z) + self.de_b_r_z)
r, z = tf.split(value=gate_inputs, num_or_size_splits=2, axis=1)
h_ = tf.tanh(tf.matmul(tf.concat((step_in, r * last_state), axis=-1), self.de_w_h) + self.de_b_h)
h = z * last_state + (1 - z) * h_
de_gru_output = tf.concat((de_gru_output, tf.expand_dims(h, axis=1)), axis=1)
i = i + 1
return i, de_embeded, de_gru_output, encoder_output
def __call__(self, de_embeded, de_gru_output, encoder_output, *args, **kwargs):
"""
可以在内部创建de_gru_output,并不会抛出未初始化异常
:param de_embeded:
:param args:
:param kwargs:
:return:
"""
i0 = tf.constant(0)
_, _, decoder_output, _ = tf.while_loop(self.de_cond, self.de_gru,
loop_vars=[i0, de_embeded, de_gru_output, encoder_output],
shape_invariants=[i0.get_shape(), de_embeded.get_shape(),
tf.TensorShape([None, None, self.units]),
encoder_output.get_shape()])
return decoder_output
sess = tf.Session()
# 编码
en_input = tf.placeholder(tf.int32, shape=[None, None])
en_embedding_variable = tf.Variable(tf.truncated_normal(shape=[DIC_SIZE, EMBEDDING_SIZE]))
en_embeded = tf.nn.embedding_lookup(en_embedding_variable, en_input)
en_gru_cell_1 = GruCell(units=GRU_UNITS, step_dimension=EMBEDDING_SIZE)
gru_init_state_1 = tf.zeros(shape=[BATCH_SIZE, 1, en_gru_cell_1.units]) # 这里不应该定义称为变量
encoder_output_1 = en_gru_cell_1(en_embeded, gru_init_state_1)
en_gru_cell_2 = GruCell(units=GRU_UNITS, step_dimension=en_gru_cell_1.units)
gru_init_state_2 = tf.zeros(shape=[BATCH_SIZE, 1, en_gru_cell_2.units]) # 这里不应该定义称为变量
encoder_output_2 = en_gru_cell_2(encoder_output_1[:, 1:], gru_init_state_2)
# 解码
de_in_label = tf.placeholder(tf.int32, shape=[None, None]) # batch_size,seq_len
de_embedding_variable = tf.Variable(tf.truncated_normal(shape=[LABEL_SIZE, EMBEDDING_SIZE]))
de_embeded = tf.nn.embedding_lookup(de_embedding_variable, de_in_label)
de_gru_cell_1 = GruCellAttentionDecoder(GRU_UNITS, step_dimension=EMBEDDING_SIZE)
de_init_state_1 = tf.expand_dims(encoder_output_1[:, -1], axis=1) # init state
decoder_output_1 = de_gru_cell_1(de_embeded, de_init_state_1, encoder_output_2[:, 1:])
de_gru_cell_2 = GruCell(GRU_UNITS, step_dimension=de_gru_cell_1.units)
de_init_state_2 = tf.expand_dims(encoder_output_2[:, -1], axis=1) # init state
decoder_output_2 = de_gru_cell_2(decoder_output_1[:, 1:], de_init_state_2)
# 全连接
dense_w_1 = tf.Variable(tf.truncated_normal(shape=[GRU_UNITS, GRU_UNITS // 2]))
dense_b_1 = tf.Variable(tf.truncated_normal(shape=[GRU_UNITS // 2, ]))
dense_w_2 = tf.Variable(tf.truncated_normal(shape=[GRU_UNITS // 2, LABEL_SIZE]))
dense_b_2 = tf.Variable(tf.truncated_normal(shape=[LABEL_SIZE, ]))
dense_1 = tf.nn.leaky_relu(tf.tensordot(decoder_output_2, dense_w_1, [[2], [0]]) + dense_b_1)
output = tf.nn.leaky_relu(tf.tensordot(dense_1, dense_w_2, [[2], [0]]) + dense_b_2)
loss = tf.losses.sparse_softmax_cross_entropy(labels=de_in_label[:, 1:], logits=output[:, 1:-1])
optimizer = tf.train.AdamOptimizer(learning_rate=0.002).minimize(loss)
decoder_start = np.zeros(shape=[BATCH_SIZE, 1]) + LABEL_SIZE - 1
saver = tf.train.Saver()
ds = input_fn("../../../data/translate/train_data.tfrecord").make_one_shot_iterator().get_next()
# sess.run(tf.global_variables_initializer())
saver.restore(sess, save_path=model_path + "ner.model-1")
def save_model():
np.savetxt("./params_v3/en_embeding", sess.run(en_embedding_variable))
np.savetxt("./params_v3/de_embedding", sess.run(de_embedding_variable))
np.savetxt("./params_v3/en_grucell_1_w_r_z", sess.run(en_gru_cell_1.en_w_r_z))
np.savetxt("./params_v3/en_grucell_1_b_r_z", sess.run(en_gru_cell_1.en_b_r_z))
np.savetxt("./params_v3/en_grucell_1_w_h", sess.run(en_gru_cell_1.en_w_h))
np.savetxt("./params_v3/en_grucell_1_b_h", sess.run(en_gru_cell_1.en_b_h))
# --
np.savetxt | al(shape=[step_dimension + self.units * 2, self.units * 2]) / 10000)
self.de_b_r_z = tf.Variable(tf.truncated_normal(shape=[self.units * 2, ]) / 10000)
self.de_w_h = tf.Variable(tf.truncated_normal(shape=[step_dimension + self.units * 2, self.units]) / 10000)
self.de_b_h = tf.Variable(tf.truncated_normal(shape=[self.units, ]) / 10000)
def de_cond(self, i, de_embeded, de_gru_output, encoder_output):
return i < tf.shape(de_embeded)[1]
def de_gru(self, i, de_embeded, de_gru_output, encoder_output):
step_in = de_embeded[:, i]
| identifier_body |
client.go | -collector-docker/fs"
"github.com/intelsdi-x/snap-plugin-collector-docker/network"
"github.com/intelsdi-x/snap-plugin-collector-docker/wrapper"
"github.com/opencontainers/runc/libcontainer/cgroups"
)
const (
endpoint string = "unix:///var/run/docker.sock"
dockerVersionKey string = "Version"
)
// DockerClientInterface provides methods i.a. for interaction with the docker API.
type DockerClientInterface interface {
ListContainersAsMap() (map[string]docker.APIContainers, error)
GetStatsFromContainer(string, bool) (*wrapper.Statistics, error)
InspectContainer(string) (*docker.Container, error)
FindCgroupMountpoint(string) (string, error)
}
// DockerClient holds fsouza go-dockerclient instance ready for communication with the server endpoint `unix:///var/run/docker.sock`,
// cache instance which is used to store output from docker container inspect (to avoid execute inspect request multiply times, it is called only once per container)
// and diskUsageCollector which is responsible for collecting container disk usage (based on `du -u` command) in the background
type DockerClient struct {
cl *docker.Client
inspectCache map[string]*docker.Container
inspectMutex sync.Mutex
diskUsageCollector fs.DiskUsageCollector
}
type deviceInfo struct {
device string
major string
minor string
}
// NewDockerClient returns dockerClient instance ready for communication with the server endpoint `unix:///var/run/docker.sock`
func NewDockerClient() (*DockerClient, error) {
client, err := docker.NewClient(endpoint)
if err != nil {
return nil, fmt.Errorf("Cannot initialize docker client instance with the given server endpoint `%s`, err=%v", endpoint, err)
}
dc := &DockerClient{
cl: client,
inspectCache: map[string]*docker.Container{},
diskUsageCollector: fs.DiskUsageCollector{},
}
dc.diskUsageCollector.Init()
// get version of docker engine
version, err := dc.version()
if err != nil {
return nil, err
}
config.DockerVersion = version
return dc, nil
}
// FindCgroupMountpoint returns cgroup mountpoint of a given subsystem
func (dc *DockerClient) FindCgroupMountpoint(subsystem string) (string, error) |
// GetShortID returns short container ID (12 chars)
func GetShortID(dockerID string) (string, error) {
if len(dockerID) < 12 {
return "", fmt.Errorf("Docker id %v is too short (the length of id should equal at least 12)", dockerID)
}
return dockerID[:12], nil
}
// GetStatsFromContainer returns docker containers stats: cgroups stats (cpu usage, memory usage, etc.) and network stats (tx_bytes, rx_bytes etc.);
// notes that incoming container id has to be full-length to be able to inspect container
func (dc *DockerClient) GetStatsFromContainer(id string, collectFs bool) (*wrapper.Statistics, error) {
var (
err error
pid int
workingSet uint64
container = &docker.Container{}
groupWrap = wrapper.Cgroups2Stats // wrapper for cgroup name and interface for stats extraction
stats = wrapper.NewStatistics()
)
if !isHost(id) {
if !isFullLengthID(id) {
return nil, fmt.Errorf("Container id %+v is not fully-length - cannot inspect container", id)
}
// inspect container based only on fully-length container id.
container, err = dc.InspectContainer(id)
if err != nil {
return nil, err
}
// take docker container PID
pid = container.State.Pid
}
for cg, stat := range groupWrap {
groupPath, err := getSubsystemPath(cg, id)
if err != nil {
fmt.Fprintln(os.Stderr, "Cannot found subsystem path for cgroup=", cg, " for container id=", container)
continue
}
// get cgroup stats for given docker
err = stat.GetStats(groupPath, stats.CgroupStats)
if err != nil {
// just log about it
if isHost(id) {
fmt.Fprintln(os.Stderr, "Cannot obtain cgroups statistics for host, err=", err)
} else {
fmt.Fprintln(os.Stderr, "Cannot obtain cgroups statistics for container: id=", id, ", image=", container.Image, ", name=", container.Name, ", err=", err)
}
continue
}
}
// calculate additional stats memory:working_set based on memory_stats
if totalInactiveAnon, ok := stats.CgroupStats.MemoryStats.Stats["total_inactive_anon"]; ok {
workingSet = stats.CgroupStats.MemoryStats.Usage.Usage
if workingSet < totalInactiveAnon {
workingSet = 0
} else {
workingSet -= totalInactiveAnon
}
if totalInactiveFile, ok := stats.CgroupStats.MemoryStats.Stats["total_inactive_file"]; ok {
if workingSet < totalInactiveFile {
workingSet = 0
} else {
workingSet -= totalInactiveFile
}
}
}
stats.CgroupStats.MemoryStats.Stats["working_set"] = workingSet
if !isHost(id) {
rootFs := "/"
stats.Network, err = network.NetworkStatsFromProc(rootFs, pid)
if err != nil {
// only log error message
fmt.Fprintf(os.Stderr, "Unable to get network stats, containerID=%+v, pid %d: %v", container.ID, pid, err)
}
stats.Connection.Tcp, err = network.TcpStatsFromProc(rootFs, pid)
if err != nil {
// only log error message
fmt.Fprintf(os.Stderr, "Unable to get tcp stats from pid %d: %v", pid, err)
}
stats.Connection.Tcp6, err = network.Tcp6StatsFromProc(rootFs, pid)
if err != nil {
// only log error message
fmt.Fprintf(os.Stderr, "Unable to get tcp6 stats from pid %d: %v", pid, err)
}
} else {
stats.Network, err = network.NetworkStatsFromRoot()
if err != nil {
// only log error message
fmt.Fprintf(os.Stderr, "Unable to get network stats, containerID=%v, %v", id, err)
}
}
if collectFs {
stats.Filesystem, err = fs.GetFsStats(container)
if err != nil {
// only log error message
fmt.Fprintf(os.Stderr, "Unable to get filesystem stats for docker: %v, err=%v", id, err)
}
}
return stats, nil
}
// InspectContainer returns information about the container with given ID
func (dc *DockerClient) InspectContainer(id string) (*docker.Container, error) {
dc.inspectMutex.Lock()
defer dc.inspectMutex.Unlock()
// check if the inspect info is already stored in inspectCache
if info, haveInfo := dc.inspectCache[id]; haveInfo {
return info, nil
}
info, err := dc.cl.InspectContainer(id)
if err != nil {
return nil, err
}
dc.inspectCache[id] = info
return info, nil
}
// ListContainersAsMap returns list of all available docker containers and base information about them (status, uptime, etc.)
func (dc *DockerClient) ListContainersAsMap() (map[string]docker.APIContainers, error) {
containers := make(map[string]docker.APIContainers)
containerList, err := dc.cl.ListContainers(docker.ListContainersOptions{})
if err != nil {
return nil, err
}
for _, cont := range containerList {
shortID, err := GetShortID(cont.ID)
if err != nil {
return nil, err
}
containers[shortID] = cont
}
containers["root"] = docker.APIContainers{ID: "/"}
if len(containers) == 0 {
return nil, errors.New("No docker container found")
}
return containers, nil
}
func getSubsystemPath(subsystem string, id string) (string, error) {
var subsystemPath string
systemSlice := "system.slice"
groupPath, err := cgroups.FindCgroupMountpoint(subsystem)
if err != nil {
fmt.Fprintf(os.Stderr, "[WARNING] Could not find mount point for %v\n", subsystem)
return "", err
}
if isHost(id) {
if isRunningSystemd() {
subsystemPath = filepath.Join(groupPath, systemSlice)
} else {
subsystemPath = groupPath
}
return subsystemPath, nil
}
if isFsCgroupParent(groupPath) {
// default cgroupfs parent is used for container
subsystemPath = filepath.Join(groupPath, "docker", id)
} else {
// cgroup is created under systemd.slice
subsystemPath = filepath.Join(groupPath, systemSlice, "docker-"+id+".scope")
}
return subsystemPath, nil
}
// isFullLengthID returns true if docker ID is a full-length (64 chars)
func isFullLengthID(dockerID string) bool {
if | {
return cgroups.FindCgroupMountpoint(subsystem)
} | identifier_body |
client.go | -collector-docker/fs"
"github.com/intelsdi-x/snap-plugin-collector-docker/network"
"github.com/intelsdi-x/snap-plugin-collector-docker/wrapper"
"github.com/opencontainers/runc/libcontainer/cgroups"
)
const (
endpoint string = "unix:///var/run/docker.sock"
dockerVersionKey string = "Version"
)
// DockerClientInterface provides methods i.a. for interaction with the docker API.
type DockerClientInterface interface {
ListContainersAsMap() (map[string]docker.APIContainers, error)
GetStatsFromContainer(string, bool) (*wrapper.Statistics, error)
InspectContainer(string) (*docker.Container, error)
FindCgroupMountpoint(string) (string, error)
}
// DockerClient holds fsouza go-dockerclient instance ready for communication with the server endpoint `unix:///var/run/docker.sock`,
// cache instance which is used to store output from docker container inspect (to avoid execute inspect request multiply times, it is called only once per container)
// and diskUsageCollector which is responsible for collecting container disk usage (based on `du -u` command) in the background
type DockerClient struct {
cl *docker.Client
inspectCache map[string]*docker.Container
inspectMutex sync.Mutex
diskUsageCollector fs.DiskUsageCollector
}
type deviceInfo struct {
device string
major string
minor string
}
// NewDockerClient returns dockerClient instance ready for communication with the server endpoint `unix:///var/run/docker.sock`
func NewDockerClient() (*DockerClient, error) {
client, err := docker.NewClient(endpoint)
if err != nil {
return nil, fmt.Errorf("Cannot initialize docker client instance with the given server endpoint `%s`, err=%v", endpoint, err)
}
dc := &DockerClient{
cl: client,
inspectCache: map[string]*docker.Container{},
diskUsageCollector: fs.DiskUsageCollector{},
}
dc.diskUsageCollector.Init()
// get version of docker engine
version, err := dc.version()
if err != nil {
return nil, err
}
config.DockerVersion = version
return dc, nil
}
// FindCgroupMountpoint returns cgroup mountpoint of a given subsystem
func (dc *DockerClient) FindCgroupMountpoint(subsystem string) (string, error) {
return cgroups.FindCgroupMountpoint(subsystem)
}
// GetShortID returns short container ID (12 chars)
func GetShortID(dockerID string) (string, error) {
if len(dockerID) < 12 {
return "", fmt.Errorf("Docker id %v is too short (the length of id should equal at least 12)", dockerID)
}
return dockerID[:12], nil
}
// GetStatsFromContainer returns docker containers stats: cgroups stats (cpu usage, memory usage, etc.) and network stats (tx_bytes, rx_bytes etc.); | func (dc *DockerClient) GetStatsFromContainer(id string, collectFs bool) (*wrapper.Statistics, error) {
var (
err error
pid int
workingSet uint64
container = &docker.Container{}
groupWrap = wrapper.Cgroups2Stats // wrapper for cgroup name and interface for stats extraction
stats = wrapper.NewStatistics()
)
if !isHost(id) {
if !isFullLengthID(id) {
return nil, fmt.Errorf("Container id %+v is not fully-length - cannot inspect container", id)
}
// inspect container based only on fully-length container id.
container, err = dc.InspectContainer(id)
if err != nil {
return nil, err
}
// take docker container PID
pid = container.State.Pid
}
for cg, stat := range groupWrap {
groupPath, err := getSubsystemPath(cg, id)
if err != nil {
fmt.Fprintln(os.Stderr, "Cannot found subsystem path for cgroup=", cg, " for container id=", container)
continue
}
// get cgroup stats for given docker
err = stat.GetStats(groupPath, stats.CgroupStats)
if err != nil {
// just log about it
if isHost(id) {
fmt.Fprintln(os.Stderr, "Cannot obtain cgroups statistics for host, err=", err)
} else {
fmt.Fprintln(os.Stderr, "Cannot obtain cgroups statistics for container: id=", id, ", image=", container.Image, ", name=", container.Name, ", err=", err)
}
continue
}
}
// calculate additional stats memory:working_set based on memory_stats
if totalInactiveAnon, ok := stats.CgroupStats.MemoryStats.Stats["total_inactive_anon"]; ok {
workingSet = stats.CgroupStats.MemoryStats.Usage.Usage
if workingSet < totalInactiveAnon {
workingSet = 0
} else {
workingSet -= totalInactiveAnon
}
if totalInactiveFile, ok := stats.CgroupStats.MemoryStats.Stats["total_inactive_file"]; ok {
if workingSet < totalInactiveFile {
workingSet = 0
} else {
workingSet -= totalInactiveFile
}
}
}
stats.CgroupStats.MemoryStats.Stats["working_set"] = workingSet
if !isHost(id) {
rootFs := "/"
stats.Network, err = network.NetworkStatsFromProc(rootFs, pid)
if err != nil {
// only log error message
fmt.Fprintf(os.Stderr, "Unable to get network stats, containerID=%+v, pid %d: %v", container.ID, pid, err)
}
stats.Connection.Tcp, err = network.TcpStatsFromProc(rootFs, pid)
if err != nil {
// only log error message
fmt.Fprintf(os.Stderr, "Unable to get tcp stats from pid %d: %v", pid, err)
}
stats.Connection.Tcp6, err = network.Tcp6StatsFromProc(rootFs, pid)
if err != nil {
// only log error message
fmt.Fprintf(os.Stderr, "Unable to get tcp6 stats from pid %d: %v", pid, err)
}
} else {
stats.Network, err = network.NetworkStatsFromRoot()
if err != nil {
// only log error message
fmt.Fprintf(os.Stderr, "Unable to get network stats, containerID=%v, %v", id, err)
}
}
if collectFs {
stats.Filesystem, err = fs.GetFsStats(container)
if err != nil {
// only log error message
fmt.Fprintf(os.Stderr, "Unable to get filesystem stats for docker: %v, err=%v", id, err)
}
}
return stats, nil
}
// InspectContainer returns information about the container with given ID
func (dc *DockerClient) InspectContainer(id string) (*docker.Container, error) {
dc.inspectMutex.Lock()
defer dc.inspectMutex.Unlock()
// check if the inspect info is already stored in inspectCache
if info, haveInfo := dc.inspectCache[id]; haveInfo {
return info, nil
}
info, err := dc.cl.InspectContainer(id)
if err != nil {
return nil, err
}
dc.inspectCache[id] = info
return info, nil
}
// ListContainersAsMap returns list of all available docker containers and base information about them (status, uptime, etc.)
func (dc *DockerClient) ListContainersAsMap() (map[string]docker.APIContainers, error) {
containers := make(map[string]docker.APIContainers)
containerList, err := dc.cl.ListContainers(docker.ListContainersOptions{})
if err != nil {
return nil, err
}
for _, cont := range containerList {
shortID, err := GetShortID(cont.ID)
if err != nil {
return nil, err
}
containers[shortID] = cont
}
containers["root"] = docker.APIContainers{ID: "/"}
if len(containers) == 0 {
return nil, errors.New("No docker container found")
}
return containers, nil
}
func getSubsystemPath(subsystem string, id string) (string, error) {
var subsystemPath string
systemSlice := "system.slice"
groupPath, err := cgroups.FindCgroupMountpoint(subsystem)
if err != nil {
fmt.Fprintf(os.Stderr, "[WARNING] Could not find mount point for %v\n", subsystem)
return "", err
}
if isHost(id) {
if isRunningSystemd() {
subsystemPath = filepath.Join(groupPath, systemSlice)
} else {
subsystemPath = groupPath
}
return subsystemPath, nil
}
if isFsCgroupParent(groupPath) {
// default cgroupfs parent is used for container
subsystemPath = filepath.Join(groupPath, "docker", id)
} else {
// cgroup is created under systemd.slice
subsystemPath = filepath.Join(groupPath, systemSlice, "docker-"+id+".scope")
}
return subsystemPath, nil
}
// isFullLengthID returns true if docker ID is a full-length (64 chars)
func isFullLengthID(dockerID string) bool {
if len(d | // notes that incoming container id has to be full-length to be able to inspect container | random_line_split |
client.go | -collector-docker/fs"
"github.com/intelsdi-x/snap-plugin-collector-docker/network"
"github.com/intelsdi-x/snap-plugin-collector-docker/wrapper"
"github.com/opencontainers/runc/libcontainer/cgroups"
)
const (
endpoint string = "unix:///var/run/docker.sock"
dockerVersionKey string = "Version"
)
// DockerClientInterface provides methods i.a. for interaction with the docker API.
type DockerClientInterface interface {
ListContainersAsMap() (map[string]docker.APIContainers, error)
GetStatsFromContainer(string, bool) (*wrapper.Statistics, error)
InspectContainer(string) (*docker.Container, error)
FindCgroupMountpoint(string) (string, error)
}
// DockerClient holds fsouza go-dockerclient instance ready for communication with the server endpoint `unix:///var/run/docker.sock`,
// cache instance which is used to store output from docker container inspect (to avoid execute inspect request multiply times, it is called only once per container)
// and diskUsageCollector which is responsible for collecting container disk usage (based on `du -u` command) in the background
type DockerClient struct {
cl *docker.Client
inspectCache map[string]*docker.Container
inspectMutex sync.Mutex
diskUsageCollector fs.DiskUsageCollector
}
type deviceInfo struct {
device string
major string
minor string
}
// NewDockerClient returns dockerClient instance ready for communication with the server endpoint `unix:///var/run/docker.sock`
func NewDockerClient() (*DockerClient, error) {
client, err := docker.NewClient(endpoint)
if err != nil {
return nil, fmt.Errorf("Cannot initialize docker client instance with the given server endpoint `%s`, err=%v", endpoint, err)
}
dc := &DockerClient{
cl: client,
inspectCache: map[string]*docker.Container{},
diskUsageCollector: fs.DiskUsageCollector{},
}
dc.diskUsageCollector.Init()
// get version of docker engine
version, err := dc.version()
if err != nil {
return nil, err
}
config.DockerVersion = version
return dc, nil
}
// FindCgroupMountpoint returns cgroup mountpoint of a given subsystem
func (dc *DockerClient) FindCgroupMountpoint(subsystem string) (string, error) {
return cgroups.FindCgroupMountpoint(subsystem)
}
// GetShortID returns short container ID (12 chars)
func GetShortID(dockerID string) (string, error) {
if len(dockerID) < 12 {
return "", fmt.Errorf("Docker id %v is too short (the length of id should equal at least 12)", dockerID)
}
return dockerID[:12], nil
}
// GetStatsFromContainer returns docker containers stats: cgroups stats (cpu usage, memory usage, etc.) and network stats (tx_bytes, rx_bytes etc.);
// notes that incoming container id has to be full-length to be able to inspect container
func (dc *DockerClient) GetStatsFromContainer(id string, collectFs bool) (*wrapper.Statistics, error) {
var (
err error
pid int
workingSet uint64
container = &docker.Container{}
groupWrap = wrapper.Cgroups2Stats // wrapper for cgroup name and interface for stats extraction
stats = wrapper.NewStatistics()
)
if !isHost(id) {
if !isFullLengthID(id) {
return nil, fmt.Errorf("Container id %+v is not fully-length - cannot inspect container", id)
}
// inspect container based only on fully-length container id.
container, err = dc.InspectContainer(id)
if err != nil {
return nil, err
}
// take docker container PID
pid = container.State.Pid
}
for cg, stat := range groupWrap {
groupPath, err := getSubsystemPath(cg, id)
if err != nil {
fmt.Fprintln(os.Stderr, "Cannot found subsystem path for cgroup=", cg, " for container id=", container)
continue
}
// get cgroup stats for given docker
err = stat.GetStats(groupPath, stats.CgroupStats)
if err != nil {
// just log about it
if isHost(id) {
fmt.Fprintln(os.Stderr, "Cannot obtain cgroups statistics for host, err=", err)
} else {
fmt.Fprintln(os.Stderr, "Cannot obtain cgroups statistics for container: id=", id, ", image=", container.Image, ", name=", container.Name, ", err=", err)
}
continue
}
}
// calculate additional stats memory:working_set based on memory_stats
if totalInactiveAnon, ok := stats.CgroupStats.MemoryStats.Stats["total_inactive_anon"]; ok {
workingSet = stats.CgroupStats.MemoryStats.Usage.Usage
if workingSet < totalInactiveAnon {
workingSet = 0
} else {
workingSet -= totalInactiveAnon
}
if totalInactiveFile, ok := stats.CgroupStats.MemoryStats.Stats["total_inactive_file"]; ok {
if workingSet < totalInactiveFile {
workingSet = 0
} else {
workingSet -= totalInactiveFile
}
}
}
stats.CgroupStats.MemoryStats.Stats["working_set"] = workingSet
if !isHost(id) {
rootFs := "/"
stats.Network, err = network.NetworkStatsFromProc(rootFs, pid)
if err != nil {
// only log error message
fmt.Fprintf(os.Stderr, "Unable to get network stats, containerID=%+v, pid %d: %v", container.ID, pid, err)
}
stats.Connection.Tcp, err = network.TcpStatsFromProc(rootFs, pid)
if err != nil {
// only log error message
fmt.Fprintf(os.Stderr, "Unable to get tcp stats from pid %d: %v", pid, err)
}
stats.Connection.Tcp6, err = network.Tcp6StatsFromProc(rootFs, pid)
if err != nil {
// only log error message
fmt.Fprintf(os.Stderr, "Unable to get tcp6 stats from pid %d: %v", pid, err)
}
} else {
stats.Network, err = network.NetworkStatsFromRoot()
if err != nil {
// only log error message
fmt.Fprintf(os.Stderr, "Unable to get network stats, containerID=%v, %v", id, err)
}
}
if collectFs {
stats.Filesystem, err = fs.GetFsStats(container)
if err != nil {
// only log error message
fmt.Fprintf(os.Stderr, "Unable to get filesystem stats for docker: %v, err=%v", id, err)
}
}
return stats, nil
}
// InspectContainer returns information about the container with given ID
func (dc *DockerClient) InspectContainer(id string) (*docker.Container, error) {
dc.inspectMutex.Lock()
defer dc.inspectMutex.Unlock()
// check if the inspect info is already stored in inspectCache
if info, haveInfo := dc.inspectCache[id]; haveInfo {
return info, nil
}
info, err := dc.cl.InspectContainer(id)
if err != nil |
dc.inspectCache[id] = info
return info, nil
}
// ListContainersAsMap returns list of all available docker containers and base information about them (status, uptime, etc.)
func (dc *DockerClient) ListContainersAsMap() (map[string]docker.APIContainers, error) {
containers := make(map[string]docker.APIContainers)
containerList, err := dc.cl.ListContainers(docker.ListContainersOptions{})
if err != nil {
return nil, err
}
for _, cont := range containerList {
shortID, err := GetShortID(cont.ID)
if err != nil {
return nil, err
}
containers[shortID] = cont
}
containers["root"] = docker.APIContainers{ID: "/"}
if len(containers) == 0 {
return nil, errors.New("No docker container found")
}
return containers, nil
}
func getSubsystemPath(subsystem string, id string) (string, error) {
var subsystemPath string
systemSlice := "system.slice"
groupPath, err := cgroups.FindCgroupMountpoint(subsystem)
if err != nil {
fmt.Fprintf(os.Stderr, "[WARNING] Could not find mount point for %v\n", subsystem)
return "", err
}
if isHost(id) {
if isRunningSystemd() {
subsystemPath = filepath.Join(groupPath, systemSlice)
} else {
subsystemPath = groupPath
}
return subsystemPath, nil
}
if isFsCgroupParent(groupPath) {
// default cgroupfs parent is used for container
subsystemPath = filepath.Join(groupPath, "docker", id)
} else {
// cgroup is created under systemd.slice
subsystemPath = filepath.Join(groupPath, systemSlice, "docker-"+id+".scope")
}
return subsystemPath, nil
}
// isFullLengthID returns true if docker ID is a full-length (64 chars)
func isFullLengthID(dockerID string) bool {
if | {
return nil, err
} | conditional_block |
client.go | () (map[string]docker.APIContainers, error)
GetStatsFromContainer(string, bool) (*wrapper.Statistics, error)
InspectContainer(string) (*docker.Container, error)
FindCgroupMountpoint(string) (string, error)
}
// DockerClient holds fsouza go-dockerclient instance ready for communication with the server endpoint `unix:///var/run/docker.sock`,
// cache instance which is used to store output from docker container inspect (to avoid execute inspect request multiply times, it is called only once per container)
// and diskUsageCollector which is responsible for collecting container disk usage (based on `du -u` command) in the background
type DockerClient struct {
cl *docker.Client
inspectCache map[string]*docker.Container
inspectMutex sync.Mutex
diskUsageCollector fs.DiskUsageCollector
}
type deviceInfo struct {
device string
major string
minor string
}
// NewDockerClient returns dockerClient instance ready for communication with the server endpoint `unix:///var/run/docker.sock`
func NewDockerClient() (*DockerClient, error) {
client, err := docker.NewClient(endpoint)
if err != nil {
return nil, fmt.Errorf("Cannot initialize docker client instance with the given server endpoint `%s`, err=%v", endpoint, err)
}
dc := &DockerClient{
cl: client,
inspectCache: map[string]*docker.Container{},
diskUsageCollector: fs.DiskUsageCollector{},
}
dc.diskUsageCollector.Init()
// get version of docker engine
version, err := dc.version()
if err != nil {
return nil, err
}
config.DockerVersion = version
return dc, nil
}
// FindCgroupMountpoint returns cgroup mountpoint of a given subsystem
func (dc *DockerClient) FindCgroupMountpoint(subsystem string) (string, error) {
return cgroups.FindCgroupMountpoint(subsystem)
}
// GetShortID returns short container ID (12 chars)
func GetShortID(dockerID string) (string, error) {
if len(dockerID) < 12 {
return "", fmt.Errorf("Docker id %v is too short (the length of id should equal at least 12)", dockerID)
}
return dockerID[:12], nil
}
// GetStatsFromContainer returns docker containers stats: cgroups stats (cpu usage, memory usage, etc.) and network stats (tx_bytes, rx_bytes etc.);
// notes that incoming container id has to be full-length to be able to inspect container
func (dc *DockerClient) GetStatsFromContainer(id string, collectFs bool) (*wrapper.Statistics, error) {
var (
err error
pid int
workingSet uint64
container = &docker.Container{}
groupWrap = wrapper.Cgroups2Stats // wrapper for cgroup name and interface for stats extraction
stats = wrapper.NewStatistics()
)
if !isHost(id) {
if !isFullLengthID(id) {
return nil, fmt.Errorf("Container id %+v is not fully-length - cannot inspect container", id)
}
// inspect container based only on fully-length container id.
container, err = dc.InspectContainer(id)
if err != nil {
return nil, err
}
// take docker container PID
pid = container.State.Pid
}
for cg, stat := range groupWrap {
groupPath, err := getSubsystemPath(cg, id)
if err != nil {
fmt.Fprintln(os.Stderr, "Cannot found subsystem path for cgroup=", cg, " for container id=", container)
continue
}
// get cgroup stats for given docker
err = stat.GetStats(groupPath, stats.CgroupStats)
if err != nil {
// just log about it
if isHost(id) {
fmt.Fprintln(os.Stderr, "Cannot obtain cgroups statistics for host, err=", err)
} else {
fmt.Fprintln(os.Stderr, "Cannot obtain cgroups statistics for container: id=", id, ", image=", container.Image, ", name=", container.Name, ", err=", err)
}
continue
}
}
// calculate additional stats memory:working_set based on memory_stats
if totalInactiveAnon, ok := stats.CgroupStats.MemoryStats.Stats["total_inactive_anon"]; ok {
workingSet = stats.CgroupStats.MemoryStats.Usage.Usage
if workingSet < totalInactiveAnon {
workingSet = 0
} else {
workingSet -= totalInactiveAnon
}
if totalInactiveFile, ok := stats.CgroupStats.MemoryStats.Stats["total_inactive_file"]; ok {
if workingSet < totalInactiveFile {
workingSet = 0
} else {
workingSet -= totalInactiveFile
}
}
}
stats.CgroupStats.MemoryStats.Stats["working_set"] = workingSet
if !isHost(id) {
rootFs := "/"
stats.Network, err = network.NetworkStatsFromProc(rootFs, pid)
if err != nil {
// only log error message
fmt.Fprintf(os.Stderr, "Unable to get network stats, containerID=%+v, pid %d: %v", container.ID, pid, err)
}
stats.Connection.Tcp, err = network.TcpStatsFromProc(rootFs, pid)
if err != nil {
// only log error message
fmt.Fprintf(os.Stderr, "Unable to get tcp stats from pid %d: %v", pid, err)
}
stats.Connection.Tcp6, err = network.Tcp6StatsFromProc(rootFs, pid)
if err != nil {
// only log error message
fmt.Fprintf(os.Stderr, "Unable to get tcp6 stats from pid %d: %v", pid, err)
}
} else {
stats.Network, err = network.NetworkStatsFromRoot()
if err != nil {
// only log error message
fmt.Fprintf(os.Stderr, "Unable to get network stats, containerID=%v, %v", id, err)
}
}
if collectFs {
stats.Filesystem, err = fs.GetFsStats(container)
if err != nil {
// only log error message
fmt.Fprintf(os.Stderr, "Unable to get filesystem stats for docker: %v, err=%v", id, err)
}
}
return stats, nil
}
// InspectContainer returns information about the container with given ID
func (dc *DockerClient) InspectContainer(id string) (*docker.Container, error) {
dc.inspectMutex.Lock()
defer dc.inspectMutex.Unlock()
// check if the inspect info is already stored in inspectCache
if info, haveInfo := dc.inspectCache[id]; haveInfo {
return info, nil
}
info, err := dc.cl.InspectContainer(id)
if err != nil {
return nil, err
}
dc.inspectCache[id] = info
return info, nil
}
// ListContainersAsMap returns list of all available docker containers and base information about them (status, uptime, etc.)
func (dc *DockerClient) ListContainersAsMap() (map[string]docker.APIContainers, error) {
containers := make(map[string]docker.APIContainers)
containerList, err := dc.cl.ListContainers(docker.ListContainersOptions{})
if err != nil {
return nil, err
}
for _, cont := range containerList {
shortID, err := GetShortID(cont.ID)
if err != nil {
return nil, err
}
containers[shortID] = cont
}
containers["root"] = docker.APIContainers{ID: "/"}
if len(containers) == 0 {
return nil, errors.New("No docker container found")
}
return containers, nil
}
func getSubsystemPath(subsystem string, id string) (string, error) {
var subsystemPath string
systemSlice := "system.slice"
groupPath, err := cgroups.FindCgroupMountpoint(subsystem)
if err != nil {
fmt.Fprintf(os.Stderr, "[WARNING] Could not find mount point for %v\n", subsystem)
return "", err
}
if isHost(id) {
if isRunningSystemd() {
subsystemPath = filepath.Join(groupPath, systemSlice)
} else {
subsystemPath = groupPath
}
return subsystemPath, nil
}
if isFsCgroupParent(groupPath) {
// default cgroupfs parent is used for container
subsystemPath = filepath.Join(groupPath, "docker", id)
} else {
// cgroup is created under systemd.slice
subsystemPath = filepath.Join(groupPath, systemSlice, "docker-"+id+".scope")
}
return subsystemPath, nil
}
// isFullLengthID returns true if docker ID is a full-length (64 chars)
func isFullLengthID(dockerID string) bool {
if len(dockerID) == 64 {
return true
}
return false
}
// isFsCgroupParent returns true if the docker was run with default cgroup parent
func isFsCgroupParent(groupPath string) bool {
fi, err := os.Lstat(filepath.Join(groupPath, "docker"))
if err != nil {
return false
}
return fi.IsDir()
}
// isRunningSystemd returns true if the host was booted with systemd
func | isRunningSystemd | identifier_name |
|
M130104.py | = 1
else:
flagp = 0
if i == '<':
flag = flag + 1
elif i == '>':
flag = flag - 1
else:
if flag == 0:
s = s+i
if flagp == 1:
if newline:
s = s + '\n'
pass
# print(s)
return s
def getMuseumData():
datadict = {} #用来存储爬取的网页信息
datadict["M_ID"] = "130104"
datadict["M_CName"] = "中国闽台缘博物馆"
datadict["M_EName"] = "China Museum for Fujian Taiwan kinship"
datadict["M_Batch"] = 1
datadict["M_Address"] = "福建泉州北清东路212号"
#官网主页相关内容
baseurl = "http://www.mtybwg.org.cn/" #要爬取的网页链接
datadict["M_Web"] = baseurl
html = askURL(baseurl) # 保存获取到的网页源码
soup = BeautifulSoup(html, "html.parser")
datadict["M_Logo"] = "http://www.mtybwg.org.cn/templates/mty/images/logo.jpg"
# 博物馆开放时间及门票
i = 0
time = []
item = soup.find("div", class_="top").find("ul",class_="notice").find("p")
item = item.find("span").text
# print(item)
# time = item.split()
# print(time)
time0 = re.findall(r'开放时间:(.*))', item)
# print(time0)
# exit()
datadict["M_Openingtime"] = time0[0]
datadict["M_Ticket"] = "免费开放"
# 门票信息
url = "http://www.mtybwg.org.cn/about/detail/249.aspx"
html = askURL(url)
soup = BeautifulSoup(html,"html.parser")
# print(soup)
item = soup.find("ul",class_="detailcon")
# print(item)
# item = str(item)
time = []
# time = re.findall(r'<(.*。)', string)
for pi in item.find_all(style="white-space:normal;line-height:32px;margin:0cm 0cm 0pt;"):
pi = getText(pi.text)
time.append(pi)
# print(time)
datadict["M_OpeningInformation"] = time[0:2]
datadict["M_Booking"] = time[17:20]
datadict["M_TicketInformation"] = time[16]
datadict["M_Triffic"] = time[10:14]
# 博物馆图片(list)
url = "http://www.mtybwg.org.cn/about/924.aspx"
html = askURL(url)
soup = BeautifulSoup(html,"html.parser")
src = []
for item in soup.find("ul", class_="detailcon").find_all("img"):
src.append(item["src"])
p = []
for pi in src:
pi = baseurl[0:-1] + pi
p.append(pi)
# print(p)
datadict["M_Pictures"] = p
# print(p)
# 博物馆介绍
src.clear()
for item in soup.find("ul", class_="detailcon").find_all("p",class_="MsoNormal"):
# print("===========")
item = getText(item.text)
src.append(item)
# print(src)
p = []
for pi in src:
if len(pi) >= 10:
p.append(pi)
# srcs = re.findall('<img src="(.*?)"/>', str(src))
datadict["M_Introduction"] = p
jsondata = json.dumps(datadict, ensure_ascii=False,indent = 4)
with open("./museums/M130104.json", 'w', encoding='utf-8') as f:
f.write(jsondata)
return datadict
exit()
def getCollectionsData():
baseurl = "http://www.mtybwg.org.cn/"
index = "http://www.mtybwg.org.cn/cangpin.aspx"
html = askURL(index)
soup = BeautifulSoup(html,"html.parser")
# print(soup)
# exit()
href = []
for item in soup | # print(type(href))
collectiondict = {}
collectiondict["CRM_In"] = ID
Id = re.findall(r'http.*/(.*?).aspx', url)
# print(Id)
collectiondict["C_ID"] = ID + '-' + str(Id[0])
item = soup.find("ul", class_="infolist")
# print(item)
title = re.findall(r'<h1>(.*)</h1>', str(item))
title = str(title)
src = str(item.find_all("img"))
src = re.findall(r'<img.*src="(.*?)"', src)
txt0 = item.find("div",class_="pluscon").find("ul", class_="con").text
txt0 = getText(txt0)
# txt0 = txt0.split()
# # print(txt0)
txt = []
txt.append("藏品描述:")
txt.append(txt0)
collectiondict["C_Name"] = title
collectiondict["C_Pictures"] = baseurl[0:-1] + src[0]
collectiondict["C_Introduction"] = txt
jsondata = json.dumps(collectiondict, ensure_ascii=False,indent = 4 )
with open("./collections/C"+collectiondict["C_ID"]+".json", 'w', encoding='utf-8') as f:
f.write(jsondata)
# exit()
pass
pass
def getActivitiesData():
baseurl = "http://www.mtybwg.org.cn/"
# 展览
index = "http://www.mtybwg.org.cn/zhanlan.aspx"
html = askURL(index)
soup = BeautifulSoup(html,"html.parser")
# print(soup)
# exit()
h
ref = []
for item in soup.find("div", class_="rightcon").find("ul",class_="falllist animated").find_all("li"):
href0 = item.find("a",class_="pic")["href"]
href.append(href0)
# print(href)
# exit()
n = len(href) - 1
for href1 in href:
if href1 == "http://vr1.mtybwg.org.cn/20160316/":
break
url = href1
html = askURL(url)
soup = BeautifulSoup(html,"html.parser")
hrefa = []
for item in soup.find("div", class_="rightcon").find("ul",class_="falllist animated").find_all("li"):
href0 = item.find("a")["href"]
hrefa.append(baseurl[0:-1] + href0)
# print(hrefa)
for href2 in hrefa:
url = href2
html = askURL(url)
soup = BeautifulSoup(html,"html.parser")
# print(type(href))
activityDict = {}
activityDict["ARM_In"] = ID
Id = re.findall(r'http.*/(.*?).aspx', url)
activityDict["A_ID"] = ID + '-' + str(Id[0])
item = soup.find("ul", class_="infolist")
# print(item)
title = re.findall(r'<h1>(.*)</h1>', str(item))
title = str(title)
src = str(item.find_all("img"))
src = re.findall(r'<img.*src="(.*?)"', src)
txt0 = item.find("ul",class_="detailcon").find("p", class_="MsoNormal").text
txt0 = getText(txt0)
# txt0 = txt0.split()
# # print(txt0)
txt = []
txt.append("活动描述:")
txt.append(txt0)
activityDict["A_Name"] = title
activityDict["A_Type"] | .find("div", class_="rightcon").find("ul",class_="falllist animated").find_all("li"):
href0 = item.find("a",class_="pic")["href"]
href.append(href0)
# print(href)
# exit()
n = len(href)
for href1 in href:
url = href1
html = askURL(url)
soup = BeautifulSoup(html,"html.parser")
hrefa = []
for item in soup.find("div", class_="rightcon").find("ul",class_="falllist falllist2 animated").find_all("li"):
href0 = item.find("a")["href"]
hrefa.append(baseurl[0:-1] + href0)
# print(hrefa)
for href2 in hrefa:
url = href2
html = askURL(url)
soup = BeautifulSoup(html,"html.parser")
| identifier_body |
M130104.py | = 1
else:
flagp = 0
if i == '<':
flag = flag + 1
elif i == '>':
flag = flag - 1
else:
if flag == 0:
s = s+i
if flagp == 1:
if newline:
s = s + '\n'
pass
# print(s)
return s
def getMuseumData():
datadict = {} #用来存储爬取的网页信息
d | "] = "130104"
datadict["M_CName"] = "中国闽台缘博物馆"
datadict["M_EName"] = "China Museum for Fujian Taiwan kinship"
datadict["M_Batch"] = 1
datadict["M_Address"] = "福建泉州北清东路212号"
#官网主页相关内容
baseurl = "http://www.mtybwg.org.cn/" #要爬取的网页链接
datadict["M_Web"] = baseurl
html = askURL(baseurl) # 保存获取到的网页源码
soup = BeautifulSoup(html, "html.parser")
datadict["M_Logo"] = "http://www.mtybwg.org.cn/templates/mty/images/logo.jpg"
# 博物馆开放时间及门票
i = 0
time = []
item = soup.find("div", class_="top").find("ul",class_="notice").find("p")
item = item.find("span").text
# print(item)
# time = item.split()
# print(time)
time0 = re.findall(r'开放时间:(.*))', item)
# print(time0)
# exit()
datadict["M_Openingtime"] = time0[0]
datadict["M_Ticket"] = "免费开放"
# 门票信息
url = "http://www.mtybwg.org.cn/about/detail/249.aspx"
html = askURL(url)
soup = BeautifulSoup(html,"html.parser")
# print(soup)
item = soup.find("ul",class_="detailcon")
# print(item)
# item = str(item)
time = []
# time = re.findall(r'<(.*。)', string)
for pi in item.find_all(style="white-space:normal;line-height:32px;margin:0cm 0cm 0pt;"):
pi = getText(pi.text)
time.append(pi)
# print(time)
datadict["M_OpeningInformation"] = time[0:2]
datadict["M_Booking"] = time[17:20]
datadict["M_TicketInformation"] = time[16]
datadict["M_Triffic"] = time[10:14]
# 博物馆图片(list)
url = "http://www.mtybwg.org.cn/about/924.aspx"
html = askURL(url)
soup = BeautifulSoup(html,"html.parser")
src = []
for item in soup.find("ul", class_="detailcon").find_all("img"):
src.append(item["src"])
p = []
for pi in src:
pi = baseurl[0:-1] + pi
p.append(pi)
# print(p)
datadict["M_Pictures"] = p
# print(p)
# 博物馆介绍
src.clear()
for item in soup.find("ul", class_="detailcon").find_all("p",class_="MsoNormal"):
# print("===========")
item = getText(item.text)
src.append(item)
# print(src)
p = []
for pi in src:
if len(pi) >= 10:
p.append(pi)
# srcs = re.findall('<img src="(.*?)"/>', str(src))
datadict["M_Introduction"] = p
jsondata = json.dumps(datadict, ensure_ascii=False,indent = 4)
with open("./museums/M130104.json", 'w', encoding='utf-8') as f:
f.write(jsondata)
return datadict
exit()
def getCollectionsData():
baseurl = "http://www.mtybwg.org.cn/"
index = "http://www.mtybwg.org.cn/cangpin.aspx"
html = askURL(index)
soup = BeautifulSoup(html,"html.parser")
# print(soup)
# exit()
href = []
for item in soup.find("div", class_="rightcon").find("ul",class_="falllist animated").find_all("li"):
href0 = item.find("a",class_="pic")["href"]
href.append(href0)
# print(href)
# exit()
n = len(href)
for href1 in href:
url = href1
html = askURL(url)
soup = BeautifulSoup(html,"html.parser")
hrefa = []
for item in soup.find("div", class_="rightcon").find("ul",class_="falllist falllist2 animated").find_all("li"):
href0 = item.find("a")["href"]
hrefa.append(baseurl[0:-1] + href0)
# print(hrefa)
for href2 in hrefa:
url = href2
html = askURL(url)
soup = BeautifulSoup(html,"html.parser")
# print(type(href))
collectiondict = {}
collectiondict["CRM_In"] = ID
Id = re.findall(r'http.*/(.*?).aspx', url)
# print(Id)
collectiondict["C_ID"] = ID + '-' + str(Id[0])
item = soup.find("ul", class_="infolist")
# print(item)
title = re.findall(r'<h1>(.*)</h1>', str(item))
title = str(title)
src = str(item.find_all("img"))
src = re.findall(r'<img.*src="(.*?)"', src)
txt0 = item.find("div",class_="pluscon").find("ul", class_="con").text
txt0 = getText(txt0)
# txt0 = txt0.split()
# # print(txt0)
txt = []
txt.append("藏品描述:")
txt.append(txt0)
collectiondict["C_Name"] = title
collectiondict["C_Pictures"] = baseurl[0:-1] + src[0]
collectiondict["C_Introduction"] = txt
jsondata = json.dumps(collectiondict, ensure_ascii=False,indent = 4 )
with open("./collections/C"+collectiondict["C_ID"]+".json", 'w', encoding='utf-8') as f:
f.write(jsondata)
# exit()
pass
pass
def getActivitiesData():
baseurl = "http://www.mtybwg.org.cn/"
# 展览
index = "http://www.mtybwg.org.cn/zhanlan.aspx"
html = askURL(index)
soup = BeautifulSoup(html,"html.parser")
# print(soup)
# exit()
href = []
for item in soup.find("div", class_="rightcon").find("ul",class_="falllist animated").find_all("li"):
href0 = item.find("a",class_="pic")["href"]
href.append(href0)
# print(href)
# exit()
n = len(href) - 1
for href1 in href:
if href1 == "http://vr1.mtybwg.org.cn/20160316/":
break
url = href1
html = askURL(url)
soup = BeautifulSoup(html,"html.parser")
hrefa = []
for item in soup.find("div", class_="rightcon").find("ul",class_="falllist animated").find_all("li"):
href0 = item.find("a")["href"]
hrefa.append(baseurl[0:-1] + href0)
# print(hrefa)
for href2 in hrefa:
url = href2
html = askURL(url)
soup = BeautifulSoup(html,"html.parser")
# print(type(href))
activityDict = {}
activityDict["ARM_In"] = ID
Id = re.findall(r'http.*/(.*?).aspx', url)
activityDict["A_ID"] = ID + '-' + str(Id[0])
item = soup.find("ul", class_="infolist")
# print(item)
title = re.findall(r'<h1>(.*)</h1>', str(item))
title = str(title)
src = str(item.find_all("img"))
src = re.findall(r'<img.*src="(.*?)"', src)
txt0 = item.find("ul",class_="detailcon").find("p", class_="MsoNormal").text
txt0 = getText(txt0)
# txt0 = txt0.split()
# # print(txt0)
txt = []
txt.append("活动描述:")
txt.append(txt0)
activityDict["A_Name"] = title
activityDict["A_Type"] = " | atadict["M_ID | identifier_name |
M130104.py | = 1
else:
flagp = 0
if i == '<':
flag = flag + 1
elif i == '>':
flag = flag - 1
else:
if flag == 0:
s = s+i
if flagp == 1:
if newline:
s = s + '\n'
pass
# print(s)
return s
def getMuseumData():
datadict = {} #用来存储爬取的网页信息
datadict["M_ID"] = "130104"
datadict["M_CName"] = "中国闽台缘博物馆"
datadict["M_EName"] = "China Museum for Fujian Taiwan kinship"
datadict["M_Batch"] = 1
datadict["M_Address"] = "福建泉州北清东路212号"
#官网主页相关内容
baseurl = "http://www.mtybwg.org.cn/" #要爬取的网页链接
datadict["M_Web"] = baseurl
html = askURL(baseurl) # 保存获取到的网页源码
soup = BeautifulSoup(html, "html.parser")
datadict["M_Logo"] = "http://www.mtybwg.org.cn/templates/mty/images/logo.jpg"
# 博物馆开放时间及门票
i = 0
time = []
item = soup.find("div", class_="top").find("ul",class_="notice").find("p")
item = item.find("span").text
# print(item)
# time = item.split()
# print(time)
time0 = re.findall(r'开放时间:(.*))', item)
# print(time0)
# exit()
datadict["M_Openingtime"] = time0[0]
datadict["M_Ticket"] = "免费开放"
# 门票信息
url = "http://www.mtybwg.org.cn/about/detail/249.aspx"
html = askURL(url)
soup = BeautifulSoup(html,"html.parser")
# print(soup)
item = soup.find("ul",class_="detailcon")
# print(item)
# item = str(item)
time = []
# time = re.findall(r'<(.*。)', string)
for pi in item.find_all(style="white-space:normal;line-height:32px;margin:0cm 0cm 0pt;"):
pi = getText(pi.text)
time.append(pi)
# print(time)
datadict["M_OpeningInformation"] = time[0:2]
datadict["M_Booking"] = time[17:20]
datadict["M_TicketInformation"] = time[16]
datadict["M_Triffic"] = time[10:14]
# 博物馆图片(list)
url = "http://www.mtybwg.org.cn/about/924.aspx"
html = askURL(url)
soup = BeautifulSoup(html,"html.parser")
src = []
for item in soup.find("ul", class_="detailcon").find_all("img"):
src.append(item["src"])
p = []
for pi in src:
pi = baseurl[0:-1] + pi
p.append(pi)
# print(p)
datadict["M_Pictures"] = p
# print(p)
# 博物馆介绍
src.clear()
for item in soup.find("ul", class_="detailcon").find_all("p",class_="MsoNormal"):
# print("===========")
item = getText(item.text)
src.append(item)
# print(src)
p = []
for pi in src:
if len(pi) >= 10:
p.append(pi)
# srcs = re.findall('<img src="(.*?)"/>', str(src))
datadict["M_Introduction"] = p
jsondata = json.dumps(datadict, ensure_ascii=False,indent = 4)
with open("./museums/M130104.json", 'w', encoding='utf-8') as f:
f.write(jsondata)
return datadict
exit()
def getCollectionsData():
baseurl = "http://www.mtybwg.org.cn/"
index = "http://www.mtybwg.org.cn/cangpin.aspx"
html = askURL(index)
soup = BeautifulSoup(html,"html.parser")
# print(soup)
# exit()
href = []
for item in soup.find("div", class_="rightcon").find("ul",class_="falllist animated").find_all("li"):
href0 = item.find("a",class_="pic")["href"]
href.append(href0)
# print(href)
# exit()
n = len(href)
for href1 in href:
url = href1
html = askURL(url)
soup = BeautifulSoup(html,"html.parser")
hrefa = []
for item in soup.find("div", class_="rightcon").find("ul",class_="falllist falllist2 animated").find_all("li"):
href0 = item.find("a")["href"]
hrefa.append(baseurl[0:-1] + href0)
# print(hrefa)
for href2 in hrefa:
url = href2
html = askURL(url)
soup = BeautifulSoup(html,"html.parser")
# print(type(href))
collectiondict = {}
collectiondict["CRM_In"] = ID
Id = re.findall(r'http.*/(.*?).aspx', url)
# print(Id)
collectiondict["C_ID"] = ID + '-' + str(Id[0])
item = soup.find("ul", class_="infolist")
# print(item)
title = re.findall(r'<h1>(.*)</h1>', str(item))
title = str(title)
src = str(item.find_all("img"))
src = re.findall(r'<img.*src="(.*?)"', src)
txt0 = item.find("div",class_="pluscon").find("ul", class_="con").text
txt0 = getText(txt0)
# txt0 = txt0.split()
# # print(txt0)
txt = []
txt.append("藏品描述:")
txt.append(txt0)
collectiondict["C_Name"] = title
collectiondict["C_Pictures"] = baseurl[0:-1] + src[0]
collectiondict["C_Introduction"] = txt
jsondata = json.dumps(collectiondict, ensure_ascii=False,indent = 4 )
with open("./collections/C"+collectiondict["C_ID"]+".json", 'w', encoding='utf-8') as f:
f.write(jsondata)
# exit()
pass
pass
def getActivitiesData():
baseurl = "http://www.mtybwg.org.cn/"
| html = askURL(index)
soup = BeautifulSoup(html,"html.parser")
# print(soup)
# exit()
href = []
for item in soup.find("div", class_="rightcon").find("ul",class_="falllist animated").find_all("li"):
href0 = item.find("a",class_="pic")["href"]
href.append(href0)
# print(href)
# exit()
n = len(href) - 1
for href1 in href:
if href1 == "http://vr1.mtybwg.org.cn/20160316/":
break
url = href1
html = askURL(url)
soup = BeautifulSoup(html,"html.parser")
hrefa = []
for item in soup.find("div", class_="rightcon").find("ul",class_="falllist animated").find_all("li"):
href0 = item.find("a")["href"]
hrefa.append(baseurl[0:-1] + href0)
# print(hrefa)
for href2 in hrefa:
url = href2
html = askURL(url)
soup = BeautifulSoup(html,"html.parser")
# print(type(href))
activityDict = {}
activityDict["ARM_In"] = ID
Id = re.findall(r'http.*/(.*?).aspx', url)
activityDict["A_ID"] = ID + '-' + str(Id[0])
item = soup.find("ul", class_="infolist")
# print(item)
title = re.findall(r'<h1>(.*)</h1>', str(item))
title = str(title)
src = str(item.find_all("img"))
src = re.findall(r'<img.*src="(.*?)"', src)
txt0 = item.find("ul",class_="detailcon").find("p", class_="MsoNormal").text
txt0 = getText(txt0)
# txt0 = txt0.split()
# # print(txt0)
txt = []
txt.append("活动描述:")
txt.append(txt0)
activityDict["A_Name"] = title
activityDict["A_Type"] = " | # 展览
index = "http://www.mtybwg.org.cn/zhanlan.aspx"
| random_line_split |
M130104.py | _Pictures"] = p
# print(p)
# 博物馆介绍
src.clear()
for item in soup.find("ul", class_="detailcon").find_all("p",class_="MsoNormal"):
# print("===========")
item = getText(item.text)
src.append(item)
# print(src)
p = []
for pi in src:
if len(pi) >= 10:
p.append(pi)
# srcs = re.findall('<img src="(.*?)"/>', str(src))
datadict["M_Introduction"] = p
jsondata = json.dumps(datadict, ensure_ascii=False,indent = 4)
with open("./museums/M130104.json", 'w', encoding='utf-8') as f:
f.write(jsondata)
return datadict
exit()
def getCollectionsData():
baseurl = "http://www.mtybwg.org.cn/"
index = "http://www.mtybwg.org.cn/cangpin.aspx"
html = askURL(index)
soup = BeautifulSoup(html,"html.parser")
# print(soup)
# exit()
href = []
for item in soup.find("div", class_="rightcon").find("ul",class_="falllist animated").find_all("li"):
href0 = item.find("a",class_="pic")["href"]
href.append(href0)
# print(href)
# exit()
n = len(href)
for href1 in href:
url = href1
html = askURL(url)
soup = BeautifulSoup(html,"html.parser")
hrefa = []
for item in soup.find("div", class_="rightcon").find("ul",class_="falllist falllist2 animated").find_all("li"):
href0 = item.find("a")["href"]
hrefa.append(baseurl[0:-1] + href0)
# print(hrefa)
for href2 in hrefa:
url = href2
html = askURL(url)
soup = BeautifulSoup(html,"html.parser")
# print(type(href))
collectiondict = {}
collectiondict["CRM_In"] = ID
Id = re.findall(r'http.*/(.*?).aspx', url)
# print(Id)
collectiondict["C_ID"] = ID + '-' + str(Id[0])
item = soup.find("ul", class_="infolist")
# print(item)
title = re.findall(r'<h1>(.*)</h1>', str(item))
title = str(title)
src = str(item.find_all("img"))
src = re.findall(r'<img.*src="(.*?)"', src)
txt0 = item.find("div",class_="pluscon").find("ul", class_="con").text
txt0 = getText(txt0)
# txt0 = txt0.split()
# # print(txt0)
txt = []
txt.append("藏品描述:")
txt.append(txt0)
collectiondict["C_Name"] = title
collectiondict["C_Pictures"] = baseurl[0:-1] + src[0]
collectiondict["C_Introduction"] = txt
jsondata = json.dumps(collectiondict, ensure_ascii=False,indent = 4 )
with open("./collections/C"+collectiondict["C_ID"]+".json", 'w', encoding='utf-8') as f:
f.write(jsondata)
# exit()
pass
pass
def getActivitiesData():
baseurl = "http://www.mtybwg.org.cn/"
# 展览
index = "http://www.mtybwg.org.cn/zhanlan.aspx"
html = askURL(index)
soup = BeautifulSoup(html,"html.parser")
# print(soup)
# exit()
href = []
for item in soup.find("div", class_="rightcon").find("ul",class_="falllist animated").find_all("li"):
href0 = item.find("a",class_="pic")["href"]
href.append(href0)
# print(href)
# exit()
n = len(href) - 1
for href1 in href:
if href1 == "http://vr1.mtybwg.org.cn/20160316/":
break
url = href1
html = askURL(url)
soup = BeautifulSoup(html,"html.parser")
hrefa = []
for item in soup.find("div", class_="rightcon").find("ul",class_="falllist animated").find_all("li"):
href0 = item.find("a")["href"]
hrefa.append(baseurl[0:-1] + href0)
# print(hrefa)
for href2 in hrefa:
url = href2
html = askURL(url)
soup = BeautifulSoup(html,"html.parser")
# print(type(href))
activityDict = {}
activityDict["ARM_In"] = ID
Id = re.findall(r'http.*/(.*?).aspx', url)
activityDict["A_ID"] = ID + '-' + str(Id[0])
item = soup.find("ul", class_="infolist")
# print(item)
title = re.findall(r'<h1>(.*)</h1>', str(item))
title = str(title)
src = str(item.find_all("img"))
src = re.findall(r'<img.*src="(.*?)"', src)
txt0 = item.find("ul",class_="detailcon").find("p", class_="MsoNormal").text
txt0 = getText(txt0)
# txt0 = txt0.split()
# # print(txt0)
txt = []
txt.append("活动描述:")
txt.append(txt0)
activityDict["A_Name"] = title
activityDict["A_Type"] = "1"
activityDict["A_Pictures"] = baseurl[0:-1] + src[0]
activityDict["A_Information"] = txt
jsondata = json.dumps(activityDict, ensure_ascii=False,indent = 4)
with open("./activities/A"+activityDict["A_ID"]+".json", 'w', encoding='utf-8') as f:
f.write(jsondata)
# exit()
pass
pass
# 教育及学术活动(——微信推送格式,故只存链接)
baseurl = "http://www.mtybwg.org.cn/"
index = "http://www.mtybwg.org.cn/{}/0-1.aspx"
# html = askURL(index)
for pi in {"xueshu","xuanjiao"}:
index0 = index.format(i)
html = askURL(index0)
# print(index0)
soup = BeautifulSoup(html, "html.parser")
# print("hhh")
item = soup.find("ul", class_="infolist").find("ul",class_="iflist")
href = []
title = []
if pi == "xuanjiao":
type = "3"
else:
type = "2"
for li in item.find_all("li"):
# print("=3=3=3=3=3=3=")
# print(li)
if li.text == "":
pass
else:
href.append(li.find("a")["href"])
title.append(li.text)
# print(title)
# print(href)
n = len(title)
for i in range(n):
activityDict = {}
activityDict["ARM_In"] = ID
activityDict["A_ID"] = ID + "-" + str(i+1)
activityDict["A_Name"] = title[i]
activityDict["A_Type"] = type
activityDict["A_Information"] = baseurl[0:-1]+href[i]
jsondata = json.dumps(activityDict, ensure_ascii=False,indent = 4)
with open("./activities/A"+activityDict["A_ID"]+".json", 'w', encoding='utf-8') as f:
f.write(jsondata)
exit()
def askURL(url):
# head = headers[0]
head = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36(KHTML, like Gecko) Chrome / 80.0.3987.122 Safari / 537.36'
}
html = ""
try:
res = requests.get(url, headers=head)
res.raise_for_status()
res.encoding = res.apparent_encoding
html = res.text
except requests.RequestException as e:
print(e)
return html
def askPic(url):
head = {
'User-Agent':'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)'
}
try:
res = requests.get(url, headers=head)
res.raise_for_status()
res.encoding = res.apparent_encoding
except requests.RequestException as e:
print(e)
return res
if __name__ == "__main__":
getMuseumData()
getCollectionsData()
getActivitiesData() | conditional_block |
||
routes.rs | Preferences, TimeSlotRating};
use config;
use db::Db;
use dict::{self, Locale};
use errors::*;
use state::PreparationState;
use template::{NavItem, Page};
use user::{AuthUser, Role, User};
use timeslot::Rating;
fn | (locale: Locale) -> Vec<NavItem> {
// TODO: pass `Dict` once possible
let dict = dict::new(locale).prep;
vec![
NavItem::new(dict.nav_overview_title(), "/prep"),
NavItem::new(dict.nav_timeslots_title(), "/prep/timeslots"),
]
}
#[get("/prep")]
pub fn overview(
auth_user: AuthUser,
locale: Locale,
db: State<Db>,
_state: PreparationState,
) -> Result<Page> {
let dict = dict::new(locale).prep;
match auth_user.role() {
// ===== Student ======================================================
Role::Student => {
let student = auth_user.into_user().into_student().unwrap();
let pref = StudentPreferences::load_for(&student, &db)?;
let partner = pref.partner.as_ref()
.map_or(Ok(None), |name| User::load_by_username(name, &db))?
.and_then(|u| u.into_student().ok())
.filter(|s| s.id() != student.id());
Page::empty()
.with_title(dict.overview_title())
.add_nav_items(nav_items(locale))
.with_active_nav_route("/prep")
.with_content(html::student_overview(
locale,
&pref,
&partner,
))
}
// ===== Tutor or admin ===============================================
Role::Tutor | Role::Admin => {
use diesel::prelude::*;
use diesel::expression::sql;
use db::schema::{timeslot_ratings, users};
let conn = &*db.conn()?;
let stats = {
let num_students = users::table
.filter(sql("role = 'student'"))
.count()
.get_result::<i64>(conn)?;
let num_students_with_slots = users::table
.inner_join(timeslot_ratings::table)
.filter(sql("rating <> 'bad' AND role = 'student'"))
.select(sql("count(distinct user_id) as count"))
.get_result::<i64>(conn)?;
let avg_good_rating_per_student = sql("
select cast(avg(count) as float) from (
select count(*) as count, user_id
from timeslot_ratings
inner join users
on users.id = user_id
where rating = 'good' and role = 'student'
group by user_id
) as counts
").get_result::<f64>(conn)?;
let avg_ok_rating_per_student = sql("
select cast(avg(count) as float) from (
select count(*) as count, user_id
from timeslot_ratings
inner join users
on users.id = user_id
where rating <> 'bad' and role = 'student'
group by user_id
) as counts
").get_result::<f64>(conn)?;
html::TutorAdminStats {
num_students: num_students as u64,
num_students_with_slots: num_students_with_slots as u64,
avg_good_rating_per_student,
avg_ok_rating_per_student,
}
};
let tutors = users::table
.inner_join(timeslot_ratings::table)
.filter(sql("role = 'tutor'"))
.group_by(users::columns::id)
.select(sql("
username,
name,
sum(case when rating='good' then 1 else 0 end) as num_good,
sum(case when rating<>'bad' then 1 else 0 end) as num_ok
"))
.load::<(String, Option<String>, i64, i64)>(conn)?;
let content = html::tutor_admin_overview(
locale,
auth_user.is_tutor(),
stats,
&tutors,
);
Page::empty()
.with_title(dict.overview_title())
.add_nav_items(nav_items(locale))
.with_active_nav_route("/prep")
.with_content(content)
}
}.make_ok()
}
#[post("/prep_student_settings", data = "<form>")]
pub fn set_general_settings(
auth_user: AuthUser,
form: Form<GeneralStudentSettings>,
db: State<Db>,
_state: PreparationState,
locale: Locale,
) -> Result<Flash<Redirect>> {
fn err<S: AsRef<str>>(msg: S) -> Result<Flash<Redirect>> {
Ok(Flash::error(Redirect::to("/prep"), msg))
}
let dict = dict::new(locale).prep;
// The auth_user needs to be a student. Tutors and admins should not be
// forwarded to this route.
let student = match auth_user.into_user().into_student() {
Ok(s) => s,
Err(_) => {
return err(bad_request(locale));
}
};
let mut pref = StudentPreferences::load_for(&student, &db)?;
let form = form.into_inner();
// Set partner
match form.partner.as_ref() {
"random" => {
pref.partner = None;
}
"chosen" => {
if let Some(id) = form.partner_id {
match User::load_by_username(&id, &db)? {
Some(ref u) if u.is_student() => {
pref.partner = Some(id);
}
Some(ref u) => {
return Ok(Flash::error(
Redirect::to("/prep"),
dict.flash_err_partner_not_a_student(u.username()),
));
}
None => {
return Ok(Flash::error(
Redirect::to("/prep"),
dict.flash_err_user_not_found(),
));
}
}
} else {
return err(bad_request(locale));
}
}
_ => return err(bad_request(locale)),
}
// Set preferred language
match form.language.as_ref() {
"de" => pref.prefers_english = false,
"en" => pref.prefers_english = true,
_ => return err(bad_request(locale)),
}
// Finally, store the changes in the database.
pref.update(&db)?;
Ok(Flash::success(Redirect::to("/prep"), dict.flash_success_storing_preferences()))
}
#[derive(Debug, Clone, FromForm)]
pub struct GeneralStudentSettings {
partner: String,
partner_id: Option<String>,
language: String,
}
#[get("/prep/timeslots")]
pub fn timeslots(
auth_user: AuthUser,
locale: Locale,
db: State<Db>,
_state: PreparationState,
) -> Result<Page> {
let dict = dict::new(locale).prep;
// Load all ratings of the user.
let ratings = TimeSlotRating::load_all_of_user(&auth_user, &db)?;
match auth_user.role() {
Role::Student | Role::Tutor => {
let (explanation, min_good, min_ok) = match auth_user.role() {
Role::Student => (
dict.timeslots_student_explanation(),
config::MIN_GOOD_SLOTS_STUDENT,
config::MIN_OK_SLOTS_STUDENT,
),
Role::Tutor => (
dict.timeslots_tutor_explanation(),
config::MIN_GOOD_SLOTS_TUTOR,
config::MIN_OK_SLOTS_TUTOR,
),
_ => unreachable!(),
};
let content = html::timeslots(
&explanation,
min_good,
min_ok,
&ratings,
locale,
);
Page::empty()
.with_title(dict.timeslots_title())
.add_nav_items(nav_items(locale))
.with_active_nav_route("/prep/timeslots")
.with_content(content)
.make_ok()
}
Role::Admin => {
Page::unimplemented().make_ok()
}
}
}
/// Stores a list of (timeslot_id, rating).
#[derive(Debug)]
pub struct TimeSlotForm {
slots: Vec<(i16, Rating)>,
}
impl<'f> FromForm<'f> for TimeSlotForm {
type Error = TimeSlotFormError;
fn from_form(items: &mut FormItems<'f>, _: bool) -> StdResult<Self, Self::Error> {
let slots = items.into_iter().map(|(key, value)| {
// The keys come in the form `slot-34` and we want this number.
if !key.starts_with("slot-") {
return Err(TimeSlotFormError::InvalidId);
}
let id = match key[5..].parse() {
Err(_) => return Err(TimeSlotFormError::InvalidId),
Ok(id) => id,
};
// The value should only be one of those three values.
let rating = match value.as_str() {
"good" => Rating::Good,
"tolerable" => Rating::Tolerable,
"bad" => Rating::Bad,
_ => return Err(TimeSlotFormError::InvalidRating),
};
Ok((id, rating))
}).collect::<StdResult<Vec<_>, _>>()?;
Ok(Self { slots })
}
}
#[derive(Debug)]
pub enum TimeSlotFormError {
InvalidRating,
InvalidId,
}
#[post("/prep/update_timeslots", data = "<form>")]
fn update_timeslots(
auth_user: AuthUser,
form: | nav_items | identifier_name |
routes.rs | StudentPreferences, TimeSlotRating};
use config;
use db::Db;
use dict::{self, Locale};
use errors::*;
use state::PreparationState;
use template::{NavItem, Page};
use user::{AuthUser, Role, User};
use timeslot::Rating;
fn nav_items(locale: Locale) -> Vec<NavItem> {
// TODO: pass `Dict` once possible
let dict = dict::new(locale).prep;
vec![
NavItem::new(dict.nav_overview_title(), "/prep"),
NavItem::new(dict.nav_timeslots_title(), "/prep/timeslots"),
]
}
#[get("/prep")]
pub fn overview(
auth_user: AuthUser,
locale: Locale,
db: State<Db>,
_state: PreparationState,
) -> Result<Page> {
let dict = dict::new(locale).prep;
match auth_user.role() {
// ===== Student ======================================================
Role::Student => {
let student = auth_user.into_user().into_student().unwrap();
let pref = StudentPreferences::load_for(&student, &db)?;
let partner = pref.partner.as_ref()
.map_or(Ok(None), |name| User::load_by_username(name, &db))?
.and_then(|u| u.into_student().ok())
.filter(|s| s.id() != student.id());
Page::empty()
.with_title(dict.overview_title())
.add_nav_items(nav_items(locale))
.with_active_nav_route("/prep")
.with_content(html::student_overview(
locale,
&pref,
&partner,
))
}
// ===== Tutor or admin ===============================================
Role::Tutor | Role::Admin => {
use diesel::prelude::*;
use diesel::expression::sql;
use db::schema::{timeslot_ratings, users};
let conn = &*db.conn()?;
let stats = {
let num_students = users::table
.filter(sql("role = 'student'"))
.count()
.get_result::<i64>(conn)?;
let num_students_with_slots = users::table
.inner_join(timeslot_ratings::table)
.filter(sql("rating <> 'bad' AND role = 'student'"))
.select(sql("count(distinct user_id) as count"))
.get_result::<i64>(conn)?;
let avg_good_rating_per_student = sql("
select cast(avg(count) as float) from (
select count(*) as count, user_id
from timeslot_ratings
inner join users
on users.id = user_id | ").get_result::<f64>(conn)?;
let avg_ok_rating_per_student = sql("
select cast(avg(count) as float) from (
select count(*) as count, user_id
from timeslot_ratings
inner join users
on users.id = user_id
where rating <> 'bad' and role = 'student'
group by user_id
) as counts
").get_result::<f64>(conn)?;
html::TutorAdminStats {
num_students: num_students as u64,
num_students_with_slots: num_students_with_slots as u64,
avg_good_rating_per_student,
avg_ok_rating_per_student,
}
};
let tutors = users::table
.inner_join(timeslot_ratings::table)
.filter(sql("role = 'tutor'"))
.group_by(users::columns::id)
.select(sql("
username,
name,
sum(case when rating='good' then 1 else 0 end) as num_good,
sum(case when rating<>'bad' then 1 else 0 end) as num_ok
"))
.load::<(String, Option<String>, i64, i64)>(conn)?;
let content = html::tutor_admin_overview(
locale,
auth_user.is_tutor(),
stats,
&tutors,
);
Page::empty()
.with_title(dict.overview_title())
.add_nav_items(nav_items(locale))
.with_active_nav_route("/prep")
.with_content(content)
}
}.make_ok()
}
#[post("/prep_student_settings", data = "<form>")]
pub fn set_general_settings(
auth_user: AuthUser,
form: Form<GeneralStudentSettings>,
db: State<Db>,
_state: PreparationState,
locale: Locale,
) -> Result<Flash<Redirect>> {
fn err<S: AsRef<str>>(msg: S) -> Result<Flash<Redirect>> {
Ok(Flash::error(Redirect::to("/prep"), msg))
}
let dict = dict::new(locale).prep;
// The auth_user needs to be a student. Tutors and admins should not be
// forwarded to this route.
let student = match auth_user.into_user().into_student() {
Ok(s) => s,
Err(_) => {
return err(bad_request(locale));
}
};
let mut pref = StudentPreferences::load_for(&student, &db)?;
let form = form.into_inner();
// Set partner
match form.partner.as_ref() {
"random" => {
pref.partner = None;
}
"chosen" => {
if let Some(id) = form.partner_id {
match User::load_by_username(&id, &db)? {
Some(ref u) if u.is_student() => {
pref.partner = Some(id);
}
Some(ref u) => {
return Ok(Flash::error(
Redirect::to("/prep"),
dict.flash_err_partner_not_a_student(u.username()),
));
}
None => {
return Ok(Flash::error(
Redirect::to("/prep"),
dict.flash_err_user_not_found(),
));
}
}
} else {
return err(bad_request(locale));
}
}
_ => return err(bad_request(locale)),
}
// Set preferred language
match form.language.as_ref() {
"de" => pref.prefers_english = false,
"en" => pref.prefers_english = true,
_ => return err(bad_request(locale)),
}
// Finally, store the changes in the database.
pref.update(&db)?;
Ok(Flash::success(Redirect::to("/prep"), dict.flash_success_storing_preferences()))
}
#[derive(Debug, Clone, FromForm)]
pub struct GeneralStudentSettings {
partner: String,
partner_id: Option<String>,
language: String,
}
#[get("/prep/timeslots")]
pub fn timeslots(
auth_user: AuthUser,
locale: Locale,
db: State<Db>,
_state: PreparationState,
) -> Result<Page> {
let dict = dict::new(locale).prep;
// Load all ratings of the user.
let ratings = TimeSlotRating::load_all_of_user(&auth_user, &db)?;
match auth_user.role() {
Role::Student | Role::Tutor => {
let (explanation, min_good, min_ok) = match auth_user.role() {
Role::Student => (
dict.timeslots_student_explanation(),
config::MIN_GOOD_SLOTS_STUDENT,
config::MIN_OK_SLOTS_STUDENT,
),
Role::Tutor => (
dict.timeslots_tutor_explanation(),
config::MIN_GOOD_SLOTS_TUTOR,
config::MIN_OK_SLOTS_TUTOR,
),
_ => unreachable!(),
};
let content = html::timeslots(
&explanation,
min_good,
min_ok,
&ratings,
locale,
);
Page::empty()
.with_title(dict.timeslots_title())
.add_nav_items(nav_items(locale))
.with_active_nav_route("/prep/timeslots")
.with_content(content)
.make_ok()
}
Role::Admin => {
Page::unimplemented().make_ok()
}
}
}
/// Stores a list of (timeslot_id, rating).
#[derive(Debug)]
pub struct TimeSlotForm {
slots: Vec<(i16, Rating)>,
}
impl<'f> FromForm<'f> for TimeSlotForm {
type Error = TimeSlotFormError;
fn from_form(items: &mut FormItems<'f>, _: bool) -> StdResult<Self, Self::Error> {
let slots = items.into_iter().map(|(key, value)| {
// The keys come in the form `slot-34` and we want this number.
if !key.starts_with("slot-") {
return Err(TimeSlotFormError::InvalidId);
}
let id = match key[5..].parse() {
Err(_) => return Err(TimeSlotFormError::InvalidId),
Ok(id) => id,
};
// The value should only be one of those three values.
let rating = match value.as_str() {
"good" => Rating::Good,
"tolerable" => Rating::Tolerable,
"bad" => Rating::Bad,
_ => return Err(TimeSlotFormError::InvalidRating),
};
Ok((id, rating))
}).collect::<StdResult<Vec<_>, _>>()?;
Ok(Self { slots })
}
}
#[derive(Debug)]
pub enum TimeSlotFormError {
InvalidRating,
InvalidId,
}
#[post("/prep/update_timeslots", data = "<form>")]
fn update_timeslots(
auth_user: AuthUser,
form: Form< | where rating = 'good' and role = 'student'
group by user_id
) as counts | random_line_split |
handlers.go | , request *http.Request) {
ctx, cancel := context.WithTimeout(request.Context(), timeout)
defer cancel()
delegate.ServeHTTP(response, request.WithContext(ctx))
})
}
}
// IDFromRequest is a strategy type for extracting the device identifier from an HTTP request
type IDFromRequest func(*http.Request) (ID, error)
// UseID is a collection of Alice-style constructors that all insert the device ID
// into the delegate's request Context using various strategies.
var UseID = struct {
// F is a configurable constructor that allows an arbitrary IDFromRequest strategy
F func(IDFromRequest) func(http.Handler) http.Handler
// FromHeader uses the device name header to extract the device identifier.
// This constructor isn't configurable, and is used as-is: device.UseID.FromHeader.
FromHeader func(http.Handler) http.Handler
// FromPath is a configurable constructor that extracts the device identifier
// from the URI path using the supplied variable name. This constructor is
// configurable: device.UseID.FromPath("deviceId").
FromPath func(string) func(http.Handler) http.Handler
}{
F: useID,
FromHeader: useID(
func(request *http.Request) (ID, error) {
deviceName := request.Header.Get(DeviceNameHeader)
if len(deviceName) == 0 {
return invalidID, ErrorMissingDeviceNameHeader
}
return ParseID(deviceName)
},
),
FromPath: func(variableName string) func(http.Handler) http.Handler {
return useID(
func(request *http.Request) (ID, error) {
vars := mux.Vars(request)
if vars == nil {
return invalidID, ErrorMissingPathVars
}
deviceName := vars[variableName]
if len(deviceName) == 0 {
return invalidID, ErrorMissingDeviceNameVar
}
return ParseID(deviceName)
},
)
},
}
// useID is the general purpose creator for an Alice-style constructor that passes the ID
// to the delegate via the request Context. This internal function is exported via UseID.F.
func useID(f IDFromRequest) func(http.Handler) http.Handler {
return func(delegate http.Handler) http.Handler {
return http.HandlerFunc(func(response http.ResponseWriter, request *http.Request) {
id, err := f(request)
if err != nil {
httperror.Formatf(
response,
http.StatusBadRequest,
"Could extract device id: %s",
err,
)
return
}
ctx := WithID(id, request.Context())
delegate.ServeHTTP(response, request.WithContext(ctx))
})
}
}
// MessageHandler is a configurable http.Handler which handles inbound WRP traffic
// to be sent to devices.
type MessageHandler struct {
// Logger is the sink for logging output. If not set, logging will be sent to logging.DefaultLogger().
Logger logging.Logger
// Decoders is the pool of wrp.Decoder objects used to decode http.Request bodies
// sent to this handler. This field is required.
Decoders *wrp.DecoderPool
// Encoders is the optional pool of wrp.Encoder objects used to encode wrp messages sent
// as HTTP responses. If not supplied, this handler assumes the format returned by the Router
// is the format to be sent back in the HTTP response.
Encoders *wrp.EncoderPool
// Router is the device message Router to use. This field is required.
Router Router
}
func (mh *MessageHandler) logger() logging.Logger {
if mh.Logger != nil {
return mh.Logger
}
return logging.DefaultLogger()
}
// decodeRequest transforms an HTTP request into a device request.
func (mh *MessageHandler) decodeRequest(httpRequest *http.Request) (deviceRequest *Request, err error) {
deviceRequest, err = DecodeRequest(httpRequest.Body, mh.Decoders)
if err == nil {
deviceRequest = deviceRequest.WithContext(httpRequest.Context())
}
return
}
func (mh *MessageHandler) ServeHTTP(httpResponse http.ResponseWriter, httpRequest *http.Request) | code = http.StatusNotFound
case ErrorNonUniqueID:
code = http.StatusBadRequest
case ErrorInvalidTransactionKey:
code = http.StatusBadRequest
case ErrorTransactionAlreadyRegistered:
code = http.StatusBadRequest
}
httperror.Formatf(
httpResponse,
code,
"Could not process device request: %s",
err,
)
} else if deviceResponse != nil {
if err := EncodeResponse(httpResponse, deviceResponse, mh.Encoders); err != nil {
mh.logger().Error("Error while writing transaction response: %s", err)
}
}
// if deviceReponse == nil, that just means the request was not something that represented
// the start of a transaction. For example, events do not carry a transaction key because
// they do not expect responses.
}
type ConnectHandler struct {
Logger logging.Logger
Connector Connector
ResponseHeader http.Header
}
func (ch *ConnectHandler) logger() logging.Logger {
if ch.Logger != nil {
return ch.Logger
}
return logging.DefaultLogger()
}
func (ch *ConnectHandler) ServeHTTP(response http.ResponseWriter, request *http.Request) {
if device, err := ch.Connector.Connect(response, request, ch.ResponseHeader); err != nil {
ch.logger().Error("Failed to connect device: %s", err)
} else {
ch.logger().Debug("Connected device: %s", device.ID())
}
}
// ConnectedDeviceListener listens for connection and disconnection events and produces
// a JSON document containing information about connected devices. It produces this document
// on a certain interval.
type ConnectedDeviceListener struct {
// RefreshInterval is the time interval at which the cached JSON device list is updated.
// If this field is nonpositive, DefaultRefreshInterval is used.
RefreshInterval time.Duration
// Tick is a factory function that produces a ticker channel and a stop function.
// If not set, time.Ticker is used and the stop function is ticker.Stop.
Tick func(time.Duration) (<-chan time.Time, func())
lock sync.Mutex
initializeOnce sync.Once
devices map[Key][]byte
changeCount uint32
updates chan []byte
shutdown chan struct{}
}
func (l *ConnectedDeviceListener) refreshInterval() time.Duration {
if l.RefreshInterval > 0 {
return l.RefreshInterval
}
return DefaultRefreshInterval
}
// newTick returns a ticker channel and a stop function for cleanup. If tick is set,
// that function is used. Otherwise, a time.Ticker is created and (ticker.C, ticker.Stop) is returned.
func (l *ConnectedDeviceListener) newTick() (<-chan time.Time, func()) {
refreshInterval := l.refreshInterval()
if l.Tick != nil {
return l.Tick(refreshInterval)
}
ticker := time.NewTicker(refreshInterval)
return ticker.C, ticker.Stop
}
func (l *ConnectedDeviceListener) onDeviceEvent(e *Event) {
switch e.Type {
case Connect:
l.lock.Lock()
defer l.lock.Unlock()
l.changeCount++
l.devices[e.Device.Key()] = []byte(e.Device.String())
case Disconnect:
l.lock.Lock()
defer l.lock.Unlock()
l.changeCount++
delete(l.devices, e.Device.Key())
}
}
func (l *ConnectedDeviceListener) refresh() {
l.lock.Lock()
defer l.lock.Unlock()
if l.changeCount > 0 {
l.changeCount = 0
var (
output = bytes.NewBufferString(`{"devices":[`)
needsComma bool
comma = []byte(`,`)
)
for _, deviceJSON := range l.devices {
if needsComma {
output.Write(comma)
}
output.Write(deviceJSON)
needsComma = true
}
output.WriteString(`]}`)
l.updates <- output.Bytes()
}
}
// Stop stops updates coming from this listener.
func (l *ConnectedDeviceListener) Stop() {
l.lock.Lock()
defer l.lock.Unlock()
if l.shutdown != nil {
close(l.shutdown)
close(l.updates)
l.shutdown = nil
l.updates = nil
}
}
// Listen starts listening for changes to the set of connected devices. The returned Listener may
// be placed into an Options. This method is idempotent, and may be called to restart this handler
// after Stop is called. If this method is called multiple times without calling Stop, it simply
// returns the same Listener and output channel.
//
// The returned channel will received updated JSON device list documents. This channel can be
// used with ListHandler.Consume.
func (l *ConnectedDeviceListener) Listen() (Listener, <-chan []byte) {
l.lock.Lock()
defer l.lock.Unlock()
l.initializeOnce.Do(func() {
l.devices = make(map[Key | {
deviceRequest, err := mh.decodeRequest(httpRequest)
if err != nil {
httperror.Formatf(
httpResponse,
http.StatusBadRequest,
"Could not decode WRP message: %s",
err,
)
return
}
// deviceRequest carries the context through the routing infrastructure
if deviceResponse, err := mh.Router.Route(deviceRequest); err != nil {
code := http.StatusInternalServerError
switch err {
case ErrorInvalidDeviceName:
code = http.StatusBadRequest
case ErrorDeviceNotFound: | identifier_body |
handlers.go | request *http.Request) {
ctx, cancel := context.WithTimeout(request.Context(), timeout)
defer cancel()
delegate.ServeHTTP(response, request.WithContext(ctx))
})
}
}
// IDFromRequest is a strategy type for extracting the device identifier from an HTTP request
type IDFromRequest func(*http.Request) (ID, error)
// UseID is a collection of Alice-style constructors that all insert the device ID
// into the delegate's request Context using various strategies.
var UseID = struct {
// F is a configurable constructor that allows an arbitrary IDFromRequest strategy
F func(IDFromRequest) func(http.Handler) http.Handler
// FromHeader uses the device name header to extract the device identifier.
// This constructor isn't configurable, and is used as-is: device.UseID.FromHeader.
FromHeader func(http.Handler) http.Handler
// FromPath is a configurable constructor that extracts the device identifier
// from the URI path using the supplied variable name. This constructor is
// configurable: device.UseID.FromPath("deviceId").
FromPath func(string) func(http.Handler) http.Handler
}{
F: useID,
FromHeader: useID(
func(request *http.Request) (ID, error) {
deviceName := request.Header.Get(DeviceNameHeader)
if len(deviceName) == 0 {
return invalidID, ErrorMissingDeviceNameHeader
}
return ParseID(deviceName)
},
),
FromPath: func(variableName string) func(http.Handler) http.Handler {
return useID(
func(request *http.Request) (ID, error) {
vars := mux.Vars(request)
if vars == nil {
return invalidID, ErrorMissingPathVars
}
deviceName := vars[variableName]
if len(deviceName) == 0 {
return invalidID, ErrorMissingDeviceNameVar
}
return ParseID(deviceName)
},
)
},
}
// useID is the general purpose creator for an Alice-style constructor that passes the ID
// to the delegate via the request Context. This internal function is exported via UseID.F.
func useID(f IDFromRequest) func(http.Handler) http.Handler {
return func(delegate http.Handler) http.Handler {
return http.HandlerFunc(func(response http.ResponseWriter, request *http.Request) {
id, err := f(request)
if err != nil {
httperror.Formatf(
response,
http.StatusBadRequest,
"Could extract device id: %s",
err,
)
return
}
ctx := WithID(id, request.Context())
delegate.ServeHTTP(response, request.WithContext(ctx))
})
}
}
// MessageHandler is a configurable http.Handler which handles inbound WRP traffic
// to be sent to devices.
type MessageHandler struct {
// Logger is the sink for logging output. If not set, logging will be sent to logging.DefaultLogger().
Logger logging.Logger
// Decoders is the pool of wrp.Decoder objects used to decode http.Request bodies
// sent to this handler. This field is required.
Decoders *wrp.DecoderPool
// Encoders is the optional pool of wrp.Encoder objects used to encode wrp messages sent
// as HTTP responses. If not supplied, this handler assumes the format returned by the Router
// is the format to be sent back in the HTTP response.
Encoders *wrp.EncoderPool
// Router is the device message Router to use. This field is required.
Router Router
}
func (mh *MessageHandler) logger() logging.Logger {
if mh.Logger != nil {
return mh.Logger
}
return logging.DefaultLogger()
}
// decodeRequest transforms an HTTP request into a device request.
func (mh *MessageHandler) decodeRequest(httpRequest *http.Request) (deviceRequest *Request, err error) {
deviceRequest, err = DecodeRequest(httpRequest.Body, mh.Decoders)
if err == nil |
return
}
func (mh *MessageHandler) ServeHTTP(httpResponse http.ResponseWriter, httpRequest *http.Request) {
deviceRequest, err := mh.decodeRequest(httpRequest)
if err != nil {
httperror.Formatf(
httpResponse,
http.StatusBadRequest,
"Could not decode WRP message: %s",
err,
)
return
}
// deviceRequest carries the context through the routing infrastructure
if deviceResponse, err := mh.Router.Route(deviceRequest); err != nil {
code := http.StatusInternalServerError
switch err {
case ErrorInvalidDeviceName:
code = http.StatusBadRequest
case ErrorDeviceNotFound:
code = http.StatusNotFound
case ErrorNonUniqueID:
code = http.StatusBadRequest
case ErrorInvalidTransactionKey:
code = http.StatusBadRequest
case ErrorTransactionAlreadyRegistered:
code = http.StatusBadRequest
}
httperror.Formatf(
httpResponse,
code,
"Could not process device request: %s",
err,
)
} else if deviceResponse != nil {
if err := EncodeResponse(httpResponse, deviceResponse, mh.Encoders); err != nil {
mh.logger().Error("Error while writing transaction response: %s", err)
}
}
// if deviceReponse == nil, that just means the request was not something that represented
// the start of a transaction. For example, events do not carry a transaction key because
// they do not expect responses.
}
type ConnectHandler struct {
Logger logging.Logger
Connector Connector
ResponseHeader http.Header
}
func (ch *ConnectHandler) logger() logging.Logger {
if ch.Logger != nil {
return ch.Logger
}
return logging.DefaultLogger()
}
func (ch *ConnectHandler) ServeHTTP(response http.ResponseWriter, request *http.Request) {
if device, err := ch.Connector.Connect(response, request, ch.ResponseHeader); err != nil {
ch.logger().Error("Failed to connect device: %s", err)
} else {
ch.logger().Debug("Connected device: %s", device.ID())
}
}
// ConnectedDeviceListener listens for connection and disconnection events and produces
// a JSON document containing information about connected devices. It produces this document
// on a certain interval.
type ConnectedDeviceListener struct {
// RefreshInterval is the time interval at which the cached JSON device list is updated.
// If this field is nonpositive, DefaultRefreshInterval is used.
RefreshInterval time.Duration
// Tick is a factory function that produces a ticker channel and a stop function.
// If not set, time.Ticker is used and the stop function is ticker.Stop.
Tick func(time.Duration) (<-chan time.Time, func())
lock sync.Mutex
initializeOnce sync.Once
devices map[Key][]byte
changeCount uint32
updates chan []byte
shutdown chan struct{}
}
func (l *ConnectedDeviceListener) refreshInterval() time.Duration {
if l.RefreshInterval > 0 {
return l.RefreshInterval
}
return DefaultRefreshInterval
}
// newTick returns a ticker channel and a stop function for cleanup. If tick is set,
// that function is used. Otherwise, a time.Ticker is created and (ticker.C, ticker.Stop) is returned.
func (l *ConnectedDeviceListener) newTick() (<-chan time.Time, func()) {
refreshInterval := l.refreshInterval()
if l.Tick != nil {
return l.Tick(refreshInterval)
}
ticker := time.NewTicker(refreshInterval)
return ticker.C, ticker.Stop
}
func (l *ConnectedDeviceListener) onDeviceEvent(e *Event) {
switch e.Type {
case Connect:
l.lock.Lock()
defer l.lock.Unlock()
l.changeCount++
l.devices[e.Device.Key()] = []byte(e.Device.String())
case Disconnect:
l.lock.Lock()
defer l.lock.Unlock()
l.changeCount++
delete(l.devices, e.Device.Key())
}
}
func (l *ConnectedDeviceListener) refresh() {
l.lock.Lock()
defer l.lock.Unlock()
if l.changeCount > 0 {
l.changeCount = 0
var (
output = bytes.NewBufferString(`{"devices":[`)
needsComma bool
comma = []byte(`,`)
)
for _, deviceJSON := range l.devices {
if needsComma {
output.Write(comma)
}
output.Write(deviceJSON)
needsComma = true
}
output.WriteString(`]}`)
l.updates <- output.Bytes()
}
}
// Stop stops updates coming from this listener.
func (l *ConnectedDeviceListener) Stop() {
l.lock.Lock()
defer l.lock.Unlock()
if l.shutdown != nil {
close(l.shutdown)
close(l.updates)
l.shutdown = nil
l.updates = nil
}
}
// Listen starts listening for changes to the set of connected devices. The returned Listener may
// be placed into an Options. This method is idempotent, and may be called to restart this handler
// after Stop is called. If this method is called multiple times without calling Stop, it simply
// returns the same Listener and output channel.
//
// The returned channel will received updated JSON device list documents. This channel can be
// used with ListHandler.Consume.
func (l *ConnectedDeviceListener) Listen() (Listener, <-chan []byte) {
l.lock.Lock()
defer l.lock.Unlock()
l.initializeOnce.Do(func() {
l.devices = make(map[Key | {
deviceRequest = deviceRequest.WithContext(httpRequest.Context())
} | conditional_block |
handlers.go | strategy
F func(IDFromRequest) func(http.Handler) http.Handler
// FromHeader uses the device name header to extract the device identifier.
// This constructor isn't configurable, and is used as-is: device.UseID.FromHeader.
FromHeader func(http.Handler) http.Handler
// FromPath is a configurable constructor that extracts the device identifier
// from the URI path using the supplied variable name. This constructor is
// configurable: device.UseID.FromPath("deviceId").
FromPath func(string) func(http.Handler) http.Handler
}{
F: useID,
FromHeader: useID(
func(request *http.Request) (ID, error) {
deviceName := request.Header.Get(DeviceNameHeader)
if len(deviceName) == 0 {
return invalidID, ErrorMissingDeviceNameHeader
}
return ParseID(deviceName)
},
),
FromPath: func(variableName string) func(http.Handler) http.Handler {
return useID(
func(request *http.Request) (ID, error) {
vars := mux.Vars(request)
if vars == nil {
return invalidID, ErrorMissingPathVars
}
deviceName := vars[variableName]
if len(deviceName) == 0 {
return invalidID, ErrorMissingDeviceNameVar
}
return ParseID(deviceName)
},
)
},
}
// useID is the general purpose creator for an Alice-style constructor that passes the ID
// to the delegate via the request Context. This internal function is exported via UseID.F.
func useID(f IDFromRequest) func(http.Handler) http.Handler {
return func(delegate http.Handler) http.Handler {
return http.HandlerFunc(func(response http.ResponseWriter, request *http.Request) {
id, err := f(request)
if err != nil {
httperror.Formatf(
response,
http.StatusBadRequest,
"Could extract device id: %s",
err,
)
return
}
ctx := WithID(id, request.Context())
delegate.ServeHTTP(response, request.WithContext(ctx))
})
}
}
// MessageHandler is a configurable http.Handler which handles inbound WRP traffic
// to be sent to devices.
type MessageHandler struct {
// Logger is the sink for logging output. If not set, logging will be sent to logging.DefaultLogger().
Logger logging.Logger
// Decoders is the pool of wrp.Decoder objects used to decode http.Request bodies
// sent to this handler. This field is required.
Decoders *wrp.DecoderPool
// Encoders is the optional pool of wrp.Encoder objects used to encode wrp messages sent
// as HTTP responses. If not supplied, this handler assumes the format returned by the Router
// is the format to be sent back in the HTTP response.
Encoders *wrp.EncoderPool
// Router is the device message Router to use. This field is required.
Router Router
}
func (mh *MessageHandler) logger() logging.Logger {
if mh.Logger != nil {
return mh.Logger
}
return logging.DefaultLogger()
}
// decodeRequest transforms an HTTP request into a device request.
func (mh *MessageHandler) decodeRequest(httpRequest *http.Request) (deviceRequest *Request, err error) {
deviceRequest, err = DecodeRequest(httpRequest.Body, mh.Decoders)
if err == nil {
deviceRequest = deviceRequest.WithContext(httpRequest.Context())
}
return
}
func (mh *MessageHandler) ServeHTTP(httpResponse http.ResponseWriter, httpRequest *http.Request) {
deviceRequest, err := mh.decodeRequest(httpRequest)
if err != nil {
httperror.Formatf(
httpResponse,
http.StatusBadRequest,
"Could not decode WRP message: %s",
err,
)
return
}
// deviceRequest carries the context through the routing infrastructure
if deviceResponse, err := mh.Router.Route(deviceRequest); err != nil {
code := http.StatusInternalServerError
switch err {
case ErrorInvalidDeviceName:
code = http.StatusBadRequest
case ErrorDeviceNotFound:
code = http.StatusNotFound
case ErrorNonUniqueID:
code = http.StatusBadRequest
case ErrorInvalidTransactionKey:
code = http.StatusBadRequest
case ErrorTransactionAlreadyRegistered:
code = http.StatusBadRequest
}
httperror.Formatf(
httpResponse,
code,
"Could not process device request: %s",
err,
)
} else if deviceResponse != nil {
if err := EncodeResponse(httpResponse, deviceResponse, mh.Encoders); err != nil {
mh.logger().Error("Error while writing transaction response: %s", err)
}
}
// if deviceReponse == nil, that just means the request was not something that represented
// the start of a transaction. For example, events do not carry a transaction key because
// they do not expect responses.
}
type ConnectHandler struct {
Logger logging.Logger
Connector Connector
ResponseHeader http.Header
}
func (ch *ConnectHandler) logger() logging.Logger {
if ch.Logger != nil {
return ch.Logger
}
return logging.DefaultLogger()
}
func (ch *ConnectHandler) ServeHTTP(response http.ResponseWriter, request *http.Request) {
if device, err := ch.Connector.Connect(response, request, ch.ResponseHeader); err != nil {
ch.logger().Error("Failed to connect device: %s", err)
} else {
ch.logger().Debug("Connected device: %s", device.ID())
}
}
// ConnectedDeviceListener listens for connection and disconnection events and produces
// a JSON document containing information about connected devices. It produces this document
// on a certain interval.
type ConnectedDeviceListener struct {
// RefreshInterval is the time interval at which the cached JSON device list is updated.
// If this field is nonpositive, DefaultRefreshInterval is used.
RefreshInterval time.Duration
// Tick is a factory function that produces a ticker channel and a stop function.
// If not set, time.Ticker is used and the stop function is ticker.Stop.
Tick func(time.Duration) (<-chan time.Time, func())
lock sync.Mutex
initializeOnce sync.Once
devices map[Key][]byte
changeCount uint32
updates chan []byte
shutdown chan struct{}
}
func (l *ConnectedDeviceListener) refreshInterval() time.Duration {
if l.RefreshInterval > 0 {
return l.RefreshInterval
}
return DefaultRefreshInterval
}
// newTick returns a ticker channel and a stop function for cleanup. If tick is set,
// that function is used. Otherwise, a time.Ticker is created and (ticker.C, ticker.Stop) is returned.
func (l *ConnectedDeviceListener) newTick() (<-chan time.Time, func()) {
refreshInterval := l.refreshInterval()
if l.Tick != nil {
return l.Tick(refreshInterval)
}
ticker := time.NewTicker(refreshInterval)
return ticker.C, ticker.Stop
}
func (l *ConnectedDeviceListener) onDeviceEvent(e *Event) {
switch e.Type {
case Connect:
l.lock.Lock()
defer l.lock.Unlock()
l.changeCount++
l.devices[e.Device.Key()] = []byte(e.Device.String())
case Disconnect:
l.lock.Lock()
defer l.lock.Unlock()
l.changeCount++
delete(l.devices, e.Device.Key())
}
}
func (l *ConnectedDeviceListener) refresh() {
l.lock.Lock()
defer l.lock.Unlock()
if l.changeCount > 0 {
l.changeCount = 0
var (
output = bytes.NewBufferString(`{"devices":[`)
needsComma bool
comma = []byte(`,`)
)
for _, deviceJSON := range l.devices {
if needsComma {
output.Write(comma)
}
output.Write(deviceJSON)
needsComma = true
}
output.WriteString(`]}`)
l.updates <- output.Bytes()
}
}
// Stop stops updates coming from this listener.
func (l *ConnectedDeviceListener) Stop() {
l.lock.Lock()
defer l.lock.Unlock()
if l.shutdown != nil {
close(l.shutdown)
close(l.updates)
l.shutdown = nil
l.updates = nil
}
}
// Listen starts listening for changes to the set of connected devices. The returned Listener may
// be placed into an Options. This method is idempotent, and may be called to restart this handler
// after Stop is called. If this method is called multiple times without calling Stop, it simply
// returns the same Listener and output channel.
//
// The returned channel will received updated JSON device list documents. This channel can be
// used with ListHandler.Consume.
func (l *ConnectedDeviceListener) Listen() (Listener, <-chan []byte) {
l.lock.Lock()
defer l.lock.Unlock()
l.initializeOnce.Do(func() {
l.devices = make(map[Key][]byte, 1000)
})
if l.shutdown == nil {
l.shutdown = make(chan struct{})
l.updates = make(chan []byte, 1)
// spawn the monitor goroutine
go func(shutdown <-chan struct{}) {
refreshC, refreshStop := l.newTick()
defer refreshStop()
for {
select {
case <-shutdown:
return
case <-refreshC:
l.refresh()
}
}
}(l.shutdown)
}
return l.onDeviceEvent, l.updates
} | random_line_split |
||
handlers.go | , request *http.Request) {
ctx, cancel := context.WithTimeout(request.Context(), timeout)
defer cancel()
delegate.ServeHTTP(response, request.WithContext(ctx))
})
}
}
// IDFromRequest is a strategy type for extracting the device identifier from an HTTP request
type IDFromRequest func(*http.Request) (ID, error)
// UseID is a collection of Alice-style constructors that all insert the device ID
// into the delegate's request Context using various strategies.
var UseID = struct {
// F is a configurable constructor that allows an arbitrary IDFromRequest strategy
F func(IDFromRequest) func(http.Handler) http.Handler
// FromHeader uses the device name header to extract the device identifier.
// This constructor isn't configurable, and is used as-is: device.UseID.FromHeader.
FromHeader func(http.Handler) http.Handler
// FromPath is a configurable constructor that extracts the device identifier
// from the URI path using the supplied variable name. This constructor is
// configurable: device.UseID.FromPath("deviceId").
FromPath func(string) func(http.Handler) http.Handler
}{
F: useID,
FromHeader: useID(
func(request *http.Request) (ID, error) {
deviceName := request.Header.Get(DeviceNameHeader)
if len(deviceName) == 0 {
return invalidID, ErrorMissingDeviceNameHeader
}
return ParseID(deviceName)
},
),
FromPath: func(variableName string) func(http.Handler) http.Handler {
return useID(
func(request *http.Request) (ID, error) {
vars := mux.Vars(request)
if vars == nil {
return invalidID, ErrorMissingPathVars
}
deviceName := vars[variableName]
if len(deviceName) == 0 {
return invalidID, ErrorMissingDeviceNameVar
}
return ParseID(deviceName)
},
)
},
}
// useID is the general purpose creator for an Alice-style constructor that passes the ID
// to the delegate via the request Context. This internal function is exported via UseID.F.
func useID(f IDFromRequest) func(http.Handler) http.Handler {
return func(delegate http.Handler) http.Handler {
return http.HandlerFunc(func(response http.ResponseWriter, request *http.Request) {
id, err := f(request)
if err != nil {
httperror.Formatf(
response,
http.StatusBadRequest,
"Could extract device id: %s",
err,
)
return
}
ctx := WithID(id, request.Context())
delegate.ServeHTTP(response, request.WithContext(ctx))
})
}
}
// MessageHandler is a configurable http.Handler which handles inbound WRP traffic
// to be sent to devices.
type MessageHandler struct {
// Logger is the sink for logging output. If not set, logging will be sent to logging.DefaultLogger().
Logger logging.Logger
// Decoders is the pool of wrp.Decoder objects used to decode http.Request bodies
// sent to this handler. This field is required.
Decoders *wrp.DecoderPool
// Encoders is the optional pool of wrp.Encoder objects used to encode wrp messages sent
// as HTTP responses. If not supplied, this handler assumes the format returned by the Router
// is the format to be sent back in the HTTP response.
Encoders *wrp.EncoderPool
// Router is the device message Router to use. This field is required.
Router Router
}
func (mh *MessageHandler) logger() logging.Logger {
if mh.Logger != nil {
return mh.Logger
}
return logging.DefaultLogger()
}
// decodeRequest transforms an HTTP request into a device request.
func (mh *MessageHandler) | (httpRequest *http.Request) (deviceRequest *Request, err error) {
deviceRequest, err = DecodeRequest(httpRequest.Body, mh.Decoders)
if err == nil {
deviceRequest = deviceRequest.WithContext(httpRequest.Context())
}
return
}
func (mh *MessageHandler) ServeHTTP(httpResponse http.ResponseWriter, httpRequest *http.Request) {
deviceRequest, err := mh.decodeRequest(httpRequest)
if err != nil {
httperror.Formatf(
httpResponse,
http.StatusBadRequest,
"Could not decode WRP message: %s",
err,
)
return
}
// deviceRequest carries the context through the routing infrastructure
if deviceResponse, err := mh.Router.Route(deviceRequest); err != nil {
code := http.StatusInternalServerError
switch err {
case ErrorInvalidDeviceName:
code = http.StatusBadRequest
case ErrorDeviceNotFound:
code = http.StatusNotFound
case ErrorNonUniqueID:
code = http.StatusBadRequest
case ErrorInvalidTransactionKey:
code = http.StatusBadRequest
case ErrorTransactionAlreadyRegistered:
code = http.StatusBadRequest
}
httperror.Formatf(
httpResponse,
code,
"Could not process device request: %s",
err,
)
} else if deviceResponse != nil {
if err := EncodeResponse(httpResponse, deviceResponse, mh.Encoders); err != nil {
mh.logger().Error("Error while writing transaction response: %s", err)
}
}
// if deviceReponse == nil, that just means the request was not something that represented
// the start of a transaction. For example, events do not carry a transaction key because
// they do not expect responses.
}
type ConnectHandler struct {
Logger logging.Logger
Connector Connector
ResponseHeader http.Header
}
func (ch *ConnectHandler) logger() logging.Logger {
if ch.Logger != nil {
return ch.Logger
}
return logging.DefaultLogger()
}
func (ch *ConnectHandler) ServeHTTP(response http.ResponseWriter, request *http.Request) {
if device, err := ch.Connector.Connect(response, request, ch.ResponseHeader); err != nil {
ch.logger().Error("Failed to connect device: %s", err)
} else {
ch.logger().Debug("Connected device: %s", device.ID())
}
}
// ConnectedDeviceListener listens for connection and disconnection events and produces
// a JSON document containing information about connected devices. It produces this document
// on a certain interval.
type ConnectedDeviceListener struct {
// RefreshInterval is the time interval at which the cached JSON device list is updated.
// If this field is nonpositive, DefaultRefreshInterval is used.
RefreshInterval time.Duration
// Tick is a factory function that produces a ticker channel and a stop function.
// If not set, time.Ticker is used and the stop function is ticker.Stop.
Tick func(time.Duration) (<-chan time.Time, func())
lock sync.Mutex
initializeOnce sync.Once
devices map[Key][]byte
changeCount uint32
updates chan []byte
shutdown chan struct{}
}
func (l *ConnectedDeviceListener) refreshInterval() time.Duration {
if l.RefreshInterval > 0 {
return l.RefreshInterval
}
return DefaultRefreshInterval
}
// newTick returns a ticker channel and a stop function for cleanup. If tick is set,
// that function is used. Otherwise, a time.Ticker is created and (ticker.C, ticker.Stop) is returned.
func (l *ConnectedDeviceListener) newTick() (<-chan time.Time, func()) {
refreshInterval := l.refreshInterval()
if l.Tick != nil {
return l.Tick(refreshInterval)
}
ticker := time.NewTicker(refreshInterval)
return ticker.C, ticker.Stop
}
func (l *ConnectedDeviceListener) onDeviceEvent(e *Event) {
switch e.Type {
case Connect:
l.lock.Lock()
defer l.lock.Unlock()
l.changeCount++
l.devices[e.Device.Key()] = []byte(e.Device.String())
case Disconnect:
l.lock.Lock()
defer l.lock.Unlock()
l.changeCount++
delete(l.devices, e.Device.Key())
}
}
func (l *ConnectedDeviceListener) refresh() {
l.lock.Lock()
defer l.lock.Unlock()
if l.changeCount > 0 {
l.changeCount = 0
var (
output = bytes.NewBufferString(`{"devices":[`)
needsComma bool
comma = []byte(`,`)
)
for _, deviceJSON := range l.devices {
if needsComma {
output.Write(comma)
}
output.Write(deviceJSON)
needsComma = true
}
output.WriteString(`]}`)
l.updates <- output.Bytes()
}
}
// Stop stops updates coming from this listener.
func (l *ConnectedDeviceListener) Stop() {
l.lock.Lock()
defer l.lock.Unlock()
if l.shutdown != nil {
close(l.shutdown)
close(l.updates)
l.shutdown = nil
l.updates = nil
}
}
// Listen starts listening for changes to the set of connected devices. The returned Listener may
// be placed into an Options. This method is idempotent, and may be called to restart this handler
// after Stop is called. If this method is called multiple times without calling Stop, it simply
// returns the same Listener and output channel.
//
// The returned channel will received updated JSON device list documents. This channel can be
// used with ListHandler.Consume.
func (l *ConnectedDeviceListener) Listen() (Listener, <-chan []byte) {
l.lock.Lock()
defer l.lock.Unlock()
l.initializeOnce.Do(func() {
l.devices = make(map[Key | decodeRequest | identifier_name |
server.py | .dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT id, webkeyword, no
FROM keyword
ORDER BY sortkeyword, n, no
LIMIT :limit
OFFSET :offset
""", { 'offset' : offset, 'limit' : limit })
return make_headwords_response (res, limit)
if q:
q = q.replace ('-', '')
q = q.replace ('%', '')
q = q.replace ('?', '_')
q = q.replace ('*', '%')
where = "(keyword LIKE :q) AND"
if not fulltext:
# easy out
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT id, webkeyword, no
FROM keyword
WHERE keyword LIKE :q
ORDER BY sortkeyword, n, no
LIMIT :limit
OFFSET :offset
""", { 'q' : q, 'offset' : offset, 'limit' : limit })
return make_headwords_response (res, limit)
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT DISTINCT
k.id,
k.webkeyword COLLATE utf8mb4_bin AS webkeyword,
k.no
FROM keyword k,
article a
WHERE {where} (MATCH (a.idxtext) AGAINST (:fulltext IN BOOLEAN MODE))
AND a.no = k.no
ORDER BY k.sortkeyword, k.n, k.no
LIMIT :limit
OFFSET :offset
""".format (where = where), { 'q' : q, 'fulltext' : fulltext,
'offset' : offset, 'limit' : limit })
return make_headwords_response (res, limit)
@app.endpoint ('headwords_id')
def headwords_id (_id):
""" Retrieve a headword. """
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, """
SELECT id, webkeyword, no
FROM keyword
WHERE id = :id
""", { 'id' : _id })
return make_headwords_response (res)
@app.endpoint ('headwords_id_context')
def headwords_id_context (_id):
""" Retrieve a list of headwords around a given headword. """
limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, """
SELECT keyword, sortkeyword
FROM keyword
WHERE id = :id
""", { 'id' : _id })
keyword, sortkeyword = res.fetchone ()
res1 = execute (conn, """
SELECT id, webkeyword, no
FROM keyword
WHERE sortkeyword < :sortkeyword
ORDER BY sortkeyword DESC, n DESC, no DESC
LIMIT :limit
""", { 'sortkeyword' : sortkeyword, 'limit' : limit })
res2 = execute (conn, """
SELECT id, webkeyword, no
FROM keyword
WHERE sortkeyword >= :sortkeyword
ORDER BY sortkeyword, n, no
LIMIT :limit
""", { 'sortkeyword' : sortkeyword, 'limit' : limit + 1 })
res = []
for row in reversed (res1.fetchall ()):
res.append (row[:3])
for row in res2:
res.append (row[:3])
return make_headwords_response (res, limit)
def make_article (row, lang = LANG):
""" row is: article_id """
return {
'articles_url' : 'v1/articles/%d' % row[0],
}
def make_articles_response (res, limit = MAX_RESULTS, lang = LANG):
return make_json_response ({
'limit' : limit,
'data' : [ make_article (row, lang) for row in res ]
})
@app.endpoint ('articles')
def articles ():
""" Endpoint. Retrieve a list of articles. """
offset = int (arg ('offset', '0', re_integer_arg))
limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT no
FROM article
ORDER BY no
LIMIT :limit
OFFSET :offset
""", { 'offset' : offset, 'limit' : limit })
return make_articles_response (res, limit)
@app.endpoint ('articles_id')
def articles_id (_id = None):
""" Endpoint. Retrieve an article. """
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT no
FROM article
WHERE no = :id
""", { 'id' : _id })
return make_articles_response (res)
@app.endpoint ('articles_id_formats')
def articles_id_formats (_id):
""" Endpoint. Retrieve an article's available formats. """
canonical_url = app.config['APPLICATION_MAIN_URL'] + 'search?article_id='
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT webtext FROM article WHERE no=:no
""", { 'no' : _id })
return make_json_response ([
{
'mimetype' : 'text/x-html-literal',
'lang' : LANG,
'embeddable' : True,
'text' : normalize_iso ('<div>%s</div>' % res.fetchone ()[0]),
},
{
'mimetype' : 'text/html',
'lang' : LANG,
'canonical' : True,
'urls' : [ canonical_url + str (_id) ],
}
])
@app.endpoint ('articles_id_headwords')
def articles_id_headwords (_id):
""" Endpoint. Retrieve the list of headwords for an article. """
offset = int (arg ('offset', '0', re_integer_arg))
limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT id, webkeyword, no
FROM keyword
WHERE no = :id
ORDER BY sortkeyword
LIMIT :limit
OFFSET :offset
""", { 'id' : _id, 'offset' : offset, 'limit' : limit })
return make_headwords_response (res, limit)
#
# main
#
parser = argparse.ArgumentParser (description='A simple API for dictionares')
parser.add_argument ('-v', '--verbose', dest='verbose', action='count',
help='increase output verbosity', default=0)
parser.add_argument ('-c', '--config-file', dest='config_file', action='append',
required=True, metavar='CONFIG_FILE',
help="a config file (repeat for more than one, later ones overwrite)")
args = parser.parse_args ()
args.start_time = datetime.datetime.now ()
LOG_LEVELS = {
0: logging.CRITICAL,
1: logging.ERROR,
2: logging.WARN,
3: logging.INFO,
4: logging.DEBUG
}
args.log_level = LOG_LEVELS.get (args.verbose + 1, logging.CRITICAL)
logging.basicConfig (format = '%(asctime)s - %(levelname)s - %(message)s')
logging.getLogger ('sqlalchemy.engine').setLevel (args.log_level)
logging.getLogger ('server').setLevel (args.log_level)
logger = logging.getLogger ('server')
for config_file in args.config_file:
app.config.from_pyfile (config_file)
app.config.dba = MySQLEngine (**app.config)
app.config['SQLALCHEMY_DATABASE_URI'] = app.config.dba.url
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['server_start_time'] = str (int (args.start_time.timestamp ()))
app.url_map = Map ([
Rule ('/v1', endpoint = 'info'),
Rule ('/v1/headwords', endpoint = 'headwords'),
Rule ('/v1/headwords/<int:_id>', endpoint = 'headwords_id'),
Rule ('/v1/headwords/<int:_id>/context', endpoint = 'headwords_id_context'),
Rule ('/v1/articles', endpoint = 'articles'),
Rule ('/v1/articles/<int:_id>', endpoint = 'articles_id'),
Rule ('/v1/articles/<int:_id>/formats', endpoint = 'articles_id_formats'),
Rule ('/v1/articles/<int:_id>/headwords', endpoint = 'articles_id_headwords'),
])
dba = flask_sqlalchemy.SQLAlchemy ()
dba.init_app (app)
port = app.config.get ('APPLICATION_PORT', 5000)
path = app.config.get ('APPLICATION_ROOT', '')
logger.log (logging.INFO, "'{name}' is now served from localhost:{port}{path}/v1".format (
name = app.config['APPLICATION_NAME'],
port = port,
path = path))
if __name__ == "__main__":
from werkzeug.serving import run_simple
if path == '':
run_simple ('localhost', port, app)
else:
from werkzeug.wsgi import DispatcherMiddleware
application = DispatcherMiddleware (flask.Flask ('dummy_app_for_root'), { | app.config['APPLICATION_ROOT'] : app,
}) | random_line_split |
|
server.py | _cnf = {
'host' : section.get ('host', 'localhost').strip ('"'),
'port' : section.get ('port', '3306').strip ('"'),
'database' : section.get ('database', '').strip ('"'),
'user' : section.get ('user', '').strip ('"'),
'password' : section.get ('password', '').strip ('"'),
}
return from_my_cnf
def execute (conn, sql, parameters, debug_level = logging.DEBUG):
start_time = datetime.datetime.now ()
result = conn.execute (text (sql.strip ()), parameters)
logger.log (debug_level, '%d rows in %.3fs',
result.rowcount, (datetime.datetime.now () - start_time).total_seconds ())
return result
def clip (i, min_, max_):
return max (min (int (i), max_), min_)
def arg (name, default, regex, msg = None):
arg = request.args.get (name, default)
if not regex.match (arg):
if ms | return arg
cpd_iso_trans = str.maketrans ('âêîôû', 'aeiou')
def normalize_iso (text):
"""Normalize to ISO 15919
CPD transliteration is almost ISO 15919, but uses uppercase for proper names
and 'â' instead of 'a' to signal a syncope 'a' + 'a'.
We have to replace all 'â's because they definitely do not conform to ISO.
We get away with serving uppercase letters in proper names because it is an
easy fix on the client's side.
"""
return text.translate (cpd_iso_trans)
def make_headword (row, lang = LANG):
""" row is: headword_id, text, article_id """
normalized = text = normalize_iso (row[1])
m = re_normalize_headword.match (normalized)
if m:
normalized = m.group (1).lower ()
return {
'articles_url' : 'v1/articles/%d' % row[2],
'headwords_url' : 'v1/headwords/%d' % row[0],
'lang' : lang,
'normalized_text' : normalized,
'text' : text,
}
def make_json_response (obj):
resp = flask.Response (json.dumps (obj, indent=2, sort_keys=True), mimetype='application/json')
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
def make_headwords_response (res, limit = MAX_RESULTS, lang = LANG):
return make_json_response ({
'limit' : limit,
'data' : [ make_headword (row, lang) for row in res ]
})
# need this before first @app.endpoint declaration
app = flask.Flask (__name__)
@app.endpoint ('info')
def info ():
""" Endpoint. The root of the application. """
info = {
'name' : app.config['APPLICATION_NAME'],
'short_name' : app.config['APPLICATION_SHORT_NAME'],
'main_page_url' : app.config['APPLICATION_MAIN_URL'],
# 'css_url' : app.config.get ('APPLICATION_CSS_URL', ''),
'css' : 'span.smalltext { font-size: smaller }',
'supported_langs_query' : [ LANG ],
}
return make_json_response (info)
@app.endpoint ('headwords')
def headwords ():
""" Endpoint. Retrieve a list of headword IDs.
This implements the search query and wordlist.
"""
q = request.args.get ('q')
fulltext = request.args.get ('fulltext')
offset = int (arg ('offset', '0', re_integer_arg))
limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)
where = ''
if (not q) and (not fulltext):
# Retrieve full list of headwords
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT id, webkeyword, no
FROM keyword
ORDER BY sortkeyword, n, no
LIMIT :limit
OFFSET :offset
""", { 'offset' : offset, 'limit' : limit })
return make_headwords_response (res, limit)
if q:
q = q.replace ('-', '')
q = q.replace ('%', '')
q = q.replace ('?', '_')
q = q.replace ('*', '%')
where = "(keyword LIKE :q) AND"
if not fulltext:
# easy out
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT id, webkeyword, no
FROM keyword
WHERE keyword LIKE :q
ORDER BY sortkeyword, n, no
LIMIT :limit
OFFSET :offset
""", { 'q' : q, 'offset' : offset, 'limit' : limit })
return make_headwords_response (res, limit)
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT DISTINCT
k.id,
k.webkeyword COLLATE utf8mb4_bin AS webkeyword,
k.no
FROM keyword k,
article a
WHERE {where} (MATCH (a.idxtext) AGAINST (:fulltext IN BOOLEAN MODE))
AND a.no = k.no
ORDER BY k.sortkeyword, k.n, k.no
LIMIT :limit
OFFSET :offset
""".format (where = where), { 'q' : q, 'fulltext' : fulltext,
'offset' : offset, 'limit' : limit })
return make_headwords_response (res, limit)
@app.endpoint ('headwords_id')
def headwords_id (_id):
""" Retrieve a headword. """
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, """
SELECT id, webkeyword, no
FROM keyword
WHERE id = :id
""", { 'id' : _id })
return make_headwords_response (res)
@app.endpoint ('headwords_id_context')
def headwords_id_context (_id):
""" Retrieve a list of headwords around a given headword. """
limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, """
SELECT keyword, sortkeyword
FROM keyword
WHERE id = :id
""", { 'id' : _id })
keyword, sortkeyword = res.fetchone ()
res1 = execute (conn, """
SELECT id, webkeyword, no
FROM keyword
WHERE sortkeyword < :sortkeyword
ORDER BY sortkeyword DESC, n DESC, no DESC
LIMIT :limit
""", { 'sortkeyword' : sortkeyword, 'limit' : limit })
res2 = execute (conn, """
SELECT id, webkeyword, no
FROM keyword
WHERE sortkeyword >= :sortkeyword
ORDER BY sortkeyword, n, no
LIMIT :limit
""", { 'sortkeyword' : sortkeyword, 'limit' : limit + 1 })
res = []
for row in reversed (res1.fetchall ()):
res.append (row[:3])
for row in res2:
res.append (row[:3])
return make_headwords_response (res, limit)
def make_article (row, lang = LANG):
""" row is: article_id """
return {
'articles_url' : 'v1/articles/%d' % row[0],
}
def make_articles_response (res, limit = MAX_RESULTS, lang = LANG):
return make_json_response ({
'limit' : limit,
'data' : [ make_article (row, lang) for row in res ]
})
@app.endpoint ('articles')
def articles ():
""" Endpoint. Retrieve a list of articles. """
offset = int (arg ('offset', '0', re_integer_arg))
limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT no
FROM article
ORDER BY no
LIMIT :limit
OFFSET :offset
""", { 'offset' : offset, 'limit' : limit })
return make_articles_response (res, limit)
@app.endpoint ('articles_id')
def articles_id (_id = None):
""" Endpoint. Retrieve an article. """
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT no
FROM article
WHERE no = :id
""", { 'id' : _id })
return make_articles_response (res)
@app.endpoint ('articles_id_formats')
def articles_id_formats (_id):
""" Endpoint. Retrieve an article's available formats. """
canonical_url = app.config['APPLICATION_MAIN_URL'] + 'search?article_id='
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT webtext FROM article WHERE no=:no
""", { 'no' | g is None:
msg = 'Invalid %s parameter' % name
flask.abort (msg)
| conditional_block |
server.py | ect):
""" Database Interface """
def __init__ (self, **kwargs):
args = self.get_connection_params (kwargs)
self.url = 'mysql+pymysql://{user}:{password}@{host}:{port}/{database}'.format (**args)
logger.log (logging.INFO,
'MySQLEngine: Connecting to mysql+pymysql://{user}:password@{host}:{port}/{database}'.format (**args))
self.engine = sqlalchemy.create_engine (self.url + '?charset=utf8mb4&sql_mode=ANSI',
pool_recycle = 300)
def get_connection_params (self, kwargs = {}):
""" Get connection parameters from .my.cnf file. """
config = configparser.ConfigParser ()
if 'MYSQL_CONF' in kwargs:
config.read (('/etc/my.cnf', os.path.expanduser (kwargs['MYSQL_CONF'])))
else:
config.read (('/etc/my.cnf', os.path.expanduser ('~/.my.cnf')))
section = config[kwargs.get ('MYSQL_GROUP', 'mysql')]
from_my_cnf = {
'host' : section.get ('host', 'localhost').strip ('"'),
'port' : section.get ('port', '3306').strip ('"'),
'database' : section.get ('database', '').strip ('"'),
'user' : section.get ('user', '').strip ('"'),
'password' : section.get ('password', '').strip ('"'),
}
return from_my_cnf
def execute (conn, sql, parameters, debug_level = logging.DEBUG):
start_time = datetime.datetime.now ()
result = conn.execute (text (sql.strip ()), parameters)
logger.log (debug_level, '%d rows in %.3fs',
result.rowcount, (datetime.datetime.now () - start_time).total_seconds ())
return result
def clip (i, min_, max_):
return max (min (int (i), max_), min_)
def arg (name, default, regex, msg = None):
arg = request.args.get (name, default)
if not regex.match (arg):
if msg is None:
msg = 'Invalid %s parameter' % name
flask.abort (msg)
return arg
cpd_iso_trans = str.maketrans ('âêîôû', 'aeiou')
def normalize_iso (text):
"""Normalize to ISO 15919
CPD transliteration is almost ISO 15919, but uses uppercase for proper names
and 'â' instead of 'a' to signal a syncope 'a' + 'a'.
We have to replace all 'â's because they definitely do not conform to ISO.
We get away with serving uppercase letters in proper names because it is an
easy fix on the client's side.
"""
return text.translate (cpd_iso_trans)
def make_headword (row, lang = LANG):
""" row is: headword_id, text, article_id """
normalized = text = normalize_iso (row[1])
m = re_normalize_headword.match (normalized)
if m:
normalized = m.group (1).lower ()
return {
'articles_url' : 'v1/articles/%d' % row[2],
'headwords_url' : 'v1/headwords/%d' % row[0],
'lang' : lang,
'normalized_text' : normalized,
'text' : text,
}
def make_json_response (obj):
resp = flask.Response (json.dumps (obj, indent=2, sort_keys=True), mimetype='application/json')
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
def make_headwords_response (res, limit = MAX_RESULTS, lang = LANG):
return make_json_response ({
'limit' : limit,
'data' : [ make_headword (row, lang) for row in res ]
})
# need this before first @app.endpoint declaration
app = flask.Flask (__name__)
@app.endpoint ('info')
def info ():
""" Endpoint. The root of the application. """
info = {
'name' : app.config['APPLICATION_NAME'],
'short_name' : app.config['APPLICATION_SHORT_NAME'],
'main_page_url' : app.config['APPLICATION_MAIN_URL'],
# 'css_url' : app.config.get ('APPLICATION_CSS_URL', ''),
'css' : 'span.smalltext { font-size: smaller }',
'supported_langs_query' : [ LANG ],
}
return make_json_response (info)
@app.endpoint ('headwords')
def headwords ():
""" Endpoint. Retrieve a list of headword IDs.
This implements the search query and wordlist.
"""
q = request.args.get ('q')
fulltext = request.args.get ('fulltext')
offset = int (arg ('offset', '0', re_integer_arg))
limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)
where = ''
if (not q) and (not fulltext):
# Retrieve full list of headwords
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT id, webkeyword, no
FROM keyword
ORDER BY sortkeyword, n, no
LIMIT :limit
OFFSET :offset
""", { 'offset' : offset, 'limit' : limit })
return make_headwords_response (res, limit)
if q:
q = q.replace ('-', '')
q = q.replace ('%', '')
q = q.replace ('?', '_')
q = q.replace ('*', '%')
where = "(keyword LIKE :q) AND"
if not fulltext:
# easy out
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT id, webkeyword, no
FROM keyword
WHERE keyword LIKE :q
ORDER BY sortkeyword, n, no
LIMIT :limit
OFFSET :offset
""", { 'q' : q, 'offset' : offset, 'limit' : limit })
return make_headwords_response (res, limit)
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT DISTINCT
k.id,
k.webkeyword COLLATE utf8mb4_bin AS webkeyword,
k.no
FROM keyword k,
article a
WHERE {where} (MATCH (a.idxtext) AGAINST (:fulltext IN BOOLEAN MODE))
AND a.no = k.no
ORDER BY k.sortkeyword, k.n, k.no
LIMIT :limit
OFFSET :offset
""".format (where = where), { 'q' : q, 'fulltext' : fulltext,
'offset' : offset, 'limit' : limit })
return make_headwords_response (res, limit)
@app.endpoint ('headwords_id')
def headwords_id (_id):
""" Retrieve a headword. """
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, """
SELECT id, webkeyword, no
FROM keyword
WHERE id = :id
""", { 'id' : _id })
return make_headwords_response (res)
@app.endpoint ('headwords_id_context')
def headwords_id_context (_id):
""" Retrieve a list of headwords around a given headword. """
limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, """
SELECT keyword, sortkeyword
FROM keyword
WHERE id = :id
""", { 'id' : _id })
keyword, sortkeyword = res.fetchone ()
res1 = execute (conn, """
SELECT id, webkeyword, no
FROM keyword
WHERE sortkeyword < :sortkeyword
ORDER BY sortkeyword DESC, n DESC, no DESC
LIMIT :limit
""", { 'sortkeyword' : sortkeyword, 'limit' : limit })
res2 = execute (conn, """
SELECT id, webkeyword, no
FROM keyword
WHERE sortkeyword >= :sortkeyword
ORDER BY sortkeyword, n, no
LIMIT :limit
""", { 'sortkeyword' : sortkeyword, 'limit' : limit + 1 })
res = []
for row in reversed (res1.fetchall ()):
res.append (row[:3])
for row in res2:
res.append (row[:3])
return make_headwords_response (res, limit)
def make_article (row, lang = LANG):
""" row is: article_id """
return {
'articles_url' : 'v1/articles/%d' % row[0],
}
def make_articles_response (res, limit = MAX_RESULTS, lang = LANG):
return make_json_response ({
'limit' : limit,
'data' : [ make_article (row, lang) for row in res ]
})
@app.endpoint ('articles')
def articles ():
""" Endpoint. Retrieve a list of articles. """
offset = int (arg ('offset', '0', re_integer_arg))
limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)
with current_app.config.dba.engine | Engine (obj | identifier_name |
|
server.py | nf = {
'host' : section.get ('host', 'localhost').strip ('"'),
'port' : section.get ('port', '3306').strip ('"'),
'database' : section.get ('database', '').strip ('"'),
'user' : section.get ('user', '').strip ('"'),
'password' : section.get ('password', '').strip ('"'),
}
return from_my_cnf
def execute (conn, sql, parameters, debug_level = logging.DEBUG):
start_time = datetime.datetime.now ()
result = conn.execute (text (sql.strip ()), parameters)
logger.log (debug_level, '%d rows in %.3fs',
result.rowcount, (datetime.datetime.now () - start_time).total_seconds ())
return result
def clip (i, min_, max_):
return max (min (int (i), max_), min_)
def arg (name, default, regex, msg = None):
arg = request.args.get (name, default)
if not regex.match (arg):
if msg is None:
msg = 'Invalid %s parameter' % name
flask.abort (msg)
return arg
cpd_iso_trans = str.maketrans ('âêîôû', 'aeiou')
def normalize_iso (text):
"""Normalize to ISO 15919
CPD transliteration is almost ISO 15919, but uses uppercase for proper names
and 'â' instead of 'a' to signal a syncope 'a' + 'a'.
We have to replace all 'â's because they definitely do not conform to ISO.
We get away with serving uppercase letters in proper names because it is an
easy fix on the client's side.
"""
return text.translate (cpd_iso_trans)
def make_headword (row, lang = LANG):
""" row is: headword_id, text, article_id """
normalized = text = normalize_iso (row[1])
m = re_normalize_headword.match (normalized)
if m:
normalized = m.group (1).lower ()
return {
'articles_url' : 'v1/articles/%d' % row[2],
'headwords_url' : 'v1/headwords/%d' % row[0],
'lang' : lang,
'normalized_text' : normalized,
'text' : text,
}
def make_json_response (obj):
resp = flask.Response (json.dumps (obj, indent=2, sort_keys=True), mimetype='application/json')
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
def make_headwords_response (res, limit = MAX_RESULTS, lang = LANG):
return make_json_response ({
'limit' : limit,
'data' : [ make_headword (row, lang) for row in res ]
})
# need this before first @app.endpoint declaration
app = flask.Flask (__name__)
@app.endpoint ('info')
def info ():
""" Endpoint. The root of the application. """
info = {
'name' : app.config['APPLICATION_NAME'],
'short_name' : app.config['APPLICATION_SHORT_NAME'],
'main_page_url' : app.config['APPLICATION_MAIN_URL'],
# 'css_url' : app.config.get ('APPLICATION_CSS_URL', ''),
'css' : 'span.smalltext { font-size: smaller }',
'supported_langs_query' : [ LANG ],
}
return make_json_response (info)
@app.endpoint ('headwords')
def headwords ():
""" Endpoint. Retrieve a list of headword IDs.
This implements the search query and wordlist.
"""
q = request.args.get ('q')
fulltext = request.args.get ('fulltext')
offset = int (arg ('offset', '0', re_integer_arg))
limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)
where = ''
if (not q) and (not fulltext):
# Retrieve full list of headwords
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT id, webkeyword, no
FROM keyword
ORDER BY sortkeyword, n, no
LIMIT :limit
OFFSET :offset
""", { 'offset' : offset, 'limit' : limit })
return make_headwords_response (res, limit)
if q:
q = q.replace ('-', '')
q = q.replace ('%', '')
q = q.replace ('?', '_')
q = q.replace ('*', '%')
where = "(keyword LIKE :q) AND"
if not fulltext:
# easy out
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT id, webkeyword, no
FROM keyword
WHERE keyword LIKE :q
ORDER BY sortkeyword, n, no
LIMIT :limit
OFFSET :offset
""", { 'q' : q, 'offset' : offset, 'limit' : limit })
return make_headwords_response (res, limit)
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT DISTINCT
k.id,
k.webkeyword COLLATE utf8mb4_bin AS webkeyword,
k.no
FROM keyword k,
article a
WHERE {where} (MATCH (a.idxtext) AGAINST (:fulltext IN BOOLEAN MODE))
AND a.no = k.no
ORDER BY k.sortkeyword, k.n, k.no
LIMIT :limit
OFFSET :offset
""".format (where = where), { 'q' : q, 'fulltext' : fulltext,
'offset' : offset, 'limit' : limit })
return make_headwords_response (res, limit)
@app.endpoint ('headwords_id')
def headwords_id (_id):
""" Retrieve a headword. """
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, """
SELECT id, webkeyword, no
FROM keyword
WHERE id = :id
""", { 'id' : _id })
return make_headwords_response (res)
@app.endpoint ('headwords_id_context')
def headwords_id_context (_id):
""" Retrieve a list of headwords around a given headword. """
limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, """
SELECT keyword, sortkeyword
FROM keyword
WHERE id = :id
""", { 'id' : _id })
keyword, sortkeyword = res.fetchone ()
res1 = execute (conn, """
SELECT id, webkeyword, no
FROM keyword
WHERE sortkeyword < :sortkeyword
ORDER BY sortkeyword DESC, n DESC, no DESC
LIMIT :limit
""", { 'sortkeyword' : sortkeyword, 'limit' : limit })
res2 = execute (conn, """
SELECT id, webkeyword, no
FROM keyword
WHERE sortkeyword >= :sortkeyword
ORDER BY sortkeyword, n, no
LIMIT :limit
""", { 'sortkeyword' : sortkeyword, 'limit' : limit + 1 })
res = []
for row in reversed (res1.fetchall ()):
res.append (row[:3])
for row in res2:
res.append (row[:3])
return make_headwords_response (res, limit)
def make_article (row, lang = LANG):
""" row is: article_id """
return {
'articles_url' : 'v1/articles/%d' % row[0],
}
def make_articles_response (res, limit = MAX_RESULTS, lang = LANG):
return make_json_response ({
'limit' : limit,
'data' : [ make_article (row, lang) for row in res ]
})
@app.endpoint ('articles')
def articles ():
""" Endpoint | oint ('articles_id')
def articles_id (_id = None):
""" Endpoint. Retrieve an article. """
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT no
FROM article
WHERE no = :id
""", { 'id' : _id })
return make_articles_response (res)
@app.endpoint ('articles_id_formats')
def articles_id_formats (_id):
""" Endpoint. Retrieve an article's available formats. """
canonical_url = app.config['APPLICATION_MAIN_URL'] + 'search?article_id='
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT webtext FROM article WHERE no=:no
""", { 'no' | . Retrieve a list of articles. """
offset = int (arg ('offset', '0', re_integer_arg))
limit = clip (arg ('limit', str (MAX_RESULTS), re_integer_arg), 1, MAX_RESULTS)
with current_app.config.dba.engine.begin () as conn:
res = execute (conn, r"""
SELECT no
FROM article
ORDER BY no
LIMIT :limit
OFFSET :offset
""", { 'offset' : offset, 'limit' : limit })
return make_articles_response (res, limit)
@app.endp | identifier_body |
Train.py | rb') as f:
img = Image.open(f)
return img.convert('RGB')
def load_labels(path):
with open(path, newline='') as csvfile: | headers = next(reader)
return [{
headers[column_index]: row[column_index]
for column_index in range(len(row))
}
for row in reader]
class CustomDataset(Dataset):
def __init__(self, root, split='train', incr=None, transform=None):
self.root = root = os.path.expanduser(root)
category = 'id'
self.category = category
self.split = split
self.incr = incr
if incr is None:
labels = load_labels(os.path.join(root, f'cleaned_{split}.csv'))
else:
labels = load_labels(os.path.join(root, split+str(incr)+'.csv'))
self.entries = [
(label_entry['Image'], int(label_entry[category]))
for label_entry in labels
if os.path.exists(
os.path.join(self.root, f'{split}/{split}', label_entry['Image']))
]
self.transform = transform
def __len__(self):
return len(self.entries)
def __getitem__(self, index):
image_filename, label = self.entries[index]
image_filepath = os.path.join(self.root, f'{self.split}/{self.split}', image_filename)
image = image_loader(image_filepath)
if self.transform is not None:
image = self.transform(image)
return image, label
"""#### Data Augmentation & Data Normalization"""
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
num_workers = 2
# Data augmentation and normalization for training
# Just normalization for validation
transforms_train = transforms.Compose([
transforms.Resize([224,224]), # Resizing the image
transforms.RandomHorizontalFlip(), # Flip the data horizontally
transforms.RandomVerticalFlip(), # Flip the data vertically
transforms.Grayscale(num_output_channels=3),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
transforms_val = transforms.Compose([
transforms.Resize([224,224]),
transforms.Grayscale(num_output_channels=3),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
transforms_oversampling = transforms.Compose([
transforms.Resize([230,230]),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomRotation(15),
transforms.CenterCrop(224),
transforms.Grayscale(num_output_channels=3),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
])
train_set = CustomDataset(root = os.getcwd()+'/', transform=transforms_train)
for i in range(1,10):
for j in range(i,10):
train_set += CustomDataset(root = os.getcwd()+'/', incr=i, transform=transforms_oversampling)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=128, shuffle=True, num_workers=num_workers)
val_set = CustomDataset(root = os.getcwd()+'/', transform=transforms_val)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=128, shuffle=True, num_workers=num_workers)
len(train_set)
len(classes)
def imshow(inp):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, labels = next(iter(train_loader))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs[:4])
# print labels
print(' '.join('{:>10}'.format(classes[labels[j]]) for j in range(4)))
# image show
imshow(out)
"""#### Models"""
def _weights_init(m):
classname = m.__class__.__name__
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=4246):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
import torchvision.models as models
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# net = models.resnet152(pretrained=False) #Method 2
# net = models.resnet50(pretrained=False) #Method 3
net = models.resnet50(pretrained=True) #Method 4, best one
num_ftrs = net.fc.in_features
net.fc = nn.Linear(num_ftrs, 4246)
# net = ResNet(BasicBlock, [3, 3, 3]) #Method 1
net = net.to(device)
"""#### Loss function & Optimizer"""
import torch.optim as optim
def createLossAndOptimizer(net, learning_rate):
# it combines softmax with negative log likelihood loss
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=1e-4)
#optimizer = optim.Adam(net.parameters(), lr=learning_rate)
return criterion, optimizer
"""#### Training Model
Batch size = 128, number of epochs = 20, starting learning rate = 0.01
Save the trained model
"""
def train(net, batch_size, n_epochs, learning | reader = csv.reader(csvfile) | random_line_split |
Train.py | rb') as f:
img = Image.open(f)
return img.convert('RGB')
def load_labels(path):
with open(path, newline='') as csvfile:
reader = csv.reader(csvfile)
headers = next(reader)
return [{
headers[column_index]: row[column_index]
for column_index in range(len(row))
}
for row in reader]
class CustomDataset(Dataset):
def __init__(self, root, split='train', incr=None, transform=None):
self.root = root = os.path.expanduser(root)
category = 'id'
self.category = category
self.split = split
self.incr = incr
if incr is None:
labels = load_labels(os.path.join(root, f'cleaned_{split}.csv'))
else:
labels = load_labels(os.path.join(root, split+str(incr)+'.csv'))
self.entries = [
(label_entry['Image'], int(label_entry[category]))
for label_entry in labels
if os.path.exists(
os.path.join(self.root, f'{split}/{split}', label_entry['Image']))
]
self.transform = transform
def __len__(self):
return len(self.entries)
def __getitem__(self, index):
image_filename, label = self.entries[index]
image_filepath = os.path.join(self.root, f'{self.split}/{self.split}', image_filename)
image = image_loader(image_filepath)
if self.transform is not None:
image = self.transform(image)
return image, label
"""#### Data Augmentation & Data Normalization"""
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
num_workers = 2
# Data augmentation and normalization for training
# Just normalization for validation
transforms_train = transforms.Compose([
transforms.Resize([224,224]), # Resizing the image
transforms.RandomHorizontalFlip(), # Flip the data horizontally
transforms.RandomVerticalFlip(), # Flip the data vertically
transforms.Grayscale(num_output_channels=3),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
transforms_val = transforms.Compose([
transforms.Resize([224,224]),
transforms.Grayscale(num_output_channels=3),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
transforms_oversampling = transforms.Compose([
transforms.Resize([230,230]),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomRotation(15),
transforms.CenterCrop(224),
transforms.Grayscale(num_output_channels=3),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
])
train_set = CustomDataset(root = os.getcwd()+'/', transform=transforms_train)
for i in range(1,10):
for j in range(i,10):
train_set += CustomDataset(root = os.getcwd()+'/', incr=i, transform=transforms_oversampling)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=128, shuffle=True, num_workers=num_workers)
val_set = CustomDataset(root = os.getcwd()+'/', transform=transforms_val)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=128, shuffle=True, num_workers=num_workers)
len(train_set)
len(classes)
def imshow(inp):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, labels = next(iter(train_loader))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs[:4])
# print labels
print(' '.join('{:>10}'.format(classes[labels[j]]) for j in range(4)))
# image show
imshow(out)
"""#### Models"""
def _weights_init(m):
classname = m.__class__.__name__
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
|
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=4246):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
import torchvision.models as models
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# net = models.resnet152(pretrained=False) #Method 2
# net = models.resnet50(pretrained=False) #Method 3
net = models.resnet50(pretrained=True) #Method 4, best one
num_ftrs = net.fc.in_features
net.fc = nn.Linear(num_ftrs, 4246)
# net = ResNet(BasicBlock, [3, 3, 3]) #Method 1
net = net.to(device)
"""#### Loss function & Optimizer"""
import torch.optim as optim
def createLossAndOptimizer(net, learning_rate):
# it combines softmax with negative log likelihood loss
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=1e-4)
#optimizer = optim.Adam(net.parameters(), lr=learning_rate)
return criterion, optimizer
"""#### Training Model
Batch size = 128, number of epochs = 20, starting learning rate = 0.01
Save the trained model
"""
def train(net, batch_size, n_epochs, | if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
) | conditional_block |
Train.py | else:
labels = load_labels(os.path.join(root, split+str(incr)+'.csv'))
self.entries = [
(label_entry['Image'], int(label_entry[category]))
for label_entry in labels
if os.path.exists(
os.path.join(self.root, f'{split}/{split}', label_entry['Image']))
]
self.transform = transform
def __len__(self):
return len(self.entries)
def __getitem__(self, index):
image_filename, label = self.entries[index]
image_filepath = os.path.join(self.root, f'{self.split}/{self.split}', image_filename)
image = image_loader(image_filepath)
if self.transform is not None:
image = self.transform(image)
return image, label
"""#### Data Augmentation & Data Normalization"""
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
num_workers = 2
# Data augmentation and normalization for training
# Just normalization for validation
transforms_train = transforms.Compose([
transforms.Resize([224,224]), # Resizing the image
transforms.RandomHorizontalFlip(), # Flip the data horizontally
transforms.RandomVerticalFlip(), # Flip the data vertically
transforms.Grayscale(num_output_channels=3),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
transforms_val = transforms.Compose([
transforms.Resize([224,224]),
transforms.Grayscale(num_output_channels=3),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
transforms_oversampling = transforms.Compose([
transforms.Resize([230,230]),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomRotation(15),
transforms.CenterCrop(224),
transforms.Grayscale(num_output_channels=3),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
])
train_set = CustomDataset(root = os.getcwd()+'/', transform=transforms_train)
for i in range(1,10):
for j in range(i,10):
train_set += CustomDataset(root = os.getcwd()+'/', incr=i, transform=transforms_oversampling)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=128, shuffle=True, num_workers=num_workers)
val_set = CustomDataset(root = os.getcwd()+'/', transform=transforms_val)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=128, shuffle=True, num_workers=num_workers)
len(train_set)
len(classes)
def imshow(inp):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, labels = next(iter(train_loader))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs[:4])
# print labels
print(' '.join('{:>10}'.format(classes[labels[j]]) for j in range(4)))
# image show
imshow(out)
"""#### Models"""
def _weights_init(m):
classname = m.__class__.__name__
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=4246):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
import torchvision.models as models
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# net = models.resnet152(pretrained=False) #Method 2
# net = models.resnet50(pretrained=False) #Method 3
net = models.resnet50(pretrained=True) #Method 4, best one
num_ftrs = net.fc.in_features
net.fc = nn.Linear(num_ftrs, 4246)
# net = ResNet(BasicBlock, [3, 3, 3]) #Method 1
net = net.to(device)
"""#### Loss function & Optimizer"""
import torch.optim as optim
def createLossAndOptimizer(net, learning_rate):
# it combines softmax with negative log likelihood loss
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=1e-4)
#optimizer = optim.Adam(net.parameters(), lr=learning_rate)
return criterion, optimizer
"""#### Training Model
Batch size = 128, number of epochs = 20, starting learning rate = 0.01
Save the trained model
"""
def train(net, batch_size, n_epochs, learning_rate):
| """
Train a neural network and print statistics of the training
:param net: (PyTorch Neural Network)
:param batch_size: (int)
:param n_epochs: (int) Number of iterations on the training set
:param learning_rate: (float) learning rate used by the optimizer
"""
print("===== HYPERPARAMETERS =====")
print("batch_size=", batch_size)
print("n_epochs=", n_epochs)
print("Starting learning_rate=", learning_rate)
print("=" * 30)
n_minibatches = len(train_loader)
criterion, optimizer = createLossAndOptimizer(net, learning_rate)
# Init variables used for plotting the loss
train_history = []
val_history = [] | identifier_body |
|
Train.py | rb') as f:
img = Image.open(f)
return img.convert('RGB')
def load_labels(path):
with open(path, newline='') as csvfile:
reader = csv.reader(csvfile)
headers = next(reader)
return [{
headers[column_index]: row[column_index]
for column_index in range(len(row))
}
for row in reader]
class CustomDataset(Dataset):
def __init__(self, root, split='train', incr=None, transform=None):
self.root = root = os.path.expanduser(root)
category = 'id'
self.category = category
self.split = split
self.incr = incr
if incr is None:
labels = load_labels(os.path.join(root, f'cleaned_{split}.csv'))
else:
labels = load_labels(os.path.join(root, split+str(incr)+'.csv'))
self.entries = [
(label_entry['Image'], int(label_entry[category]))
for label_entry in labels
if os.path.exists(
os.path.join(self.root, f'{split}/{split}', label_entry['Image']))
]
self.transform = transform
def __len__(self):
return len(self.entries)
def __getitem__(self, index):
image_filename, label = self.entries[index]
image_filepath = os.path.join(self.root, f'{self.split}/{self.split}', image_filename)
image = image_loader(image_filepath)
if self.transform is not None:
image = self.transform(image)
return image, label
"""#### Data Augmentation & Data Normalization"""
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
num_workers = 2
# Data augmentation and normalization for training
# Just normalization for validation
transforms_train = transforms.Compose([
transforms.Resize([224,224]), # Resizing the image
transforms.RandomHorizontalFlip(), # Flip the data horizontally
transforms.RandomVerticalFlip(), # Flip the data vertically
transforms.Grayscale(num_output_channels=3),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
transforms_val = transforms.Compose([
transforms.Resize([224,224]),
transforms.Grayscale(num_output_channels=3),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
transforms_oversampling = transforms.Compose([
transforms.Resize([230,230]),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomRotation(15),
transforms.CenterCrop(224),
transforms.Grayscale(num_output_channels=3),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
])
train_set = CustomDataset(root = os.getcwd()+'/', transform=transforms_train)
for i in range(1,10):
for j in range(i,10):
train_set += CustomDataset(root = os.getcwd()+'/', incr=i, transform=transforms_oversampling)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=128, shuffle=True, num_workers=num_workers)
val_set = CustomDataset(root = os.getcwd()+'/', transform=transforms_val)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=128, shuffle=True, num_workers=num_workers)
len(train_set)
len(classes)
def imshow(inp):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, labels = next(iter(train_loader))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs[:4])
# print labels
print(' '.join('{:>10}'.format(classes[labels[j]]) for j in range(4)))
# image show
imshow(out)
"""#### Models"""
def _weights_init(m):
classname = m.__class__.__name__
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=4246):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
import torchvision.models as models
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# net = models.resnet152(pretrained=False) #Method 2
# net = models.resnet50(pretrained=False) #Method 3
net = models.resnet50(pretrained=True) #Method 4, best one
num_ftrs = net.fc.in_features
net.fc = nn.Linear(num_ftrs, 4246)
# net = ResNet(BasicBlock, [3, 3, 3]) #Method 1
net = net.to(device)
"""#### Loss function & Optimizer"""
import torch.optim as optim
def createLossAndOptimizer(net, learning_rate):
# it combines softmax with negative log likelihood loss
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=1e-4)
#optimizer = optim.Adam(net.parameters(), lr=learning_rate)
return criterion, optimizer
"""#### Training Model
Batch size = 128, number of epochs = 20, starting learning rate = 0.01
Save the trained model
"""
def | (net, batch_size, n_epochs, | train | identifier_name |
PAD.py | bound_orbitals : list of callables
`bound_orbitals[b](x,y,z)` should evaluate the `b`-th orbital
on a cartesian grid
pke : np.1darray (shape (npke,))
photokinetic energies (in Hartree) for which the PADs should be calculated
pol : int, optional
polarization of light, 0 (linear), -1 (left), +1 (right)
pad_file : str, optional
path to file for storing table with PADs
units : str, optional
units of energies and cross sections in PAD table
* 'eV-Mb' : PKE in eV, sigma in megabarn
* 'au' : PKE in Hartree, sigma in bohr^2
Returns
-------
pad : np.ndarray (shape (npke,norb,3))
`pad[k,b,:]` contains the three parameters
`sigma`,`beta1` and `beta2` for ionization from orbital `b` into
the continuum at energy `pke[k]`.
"""
print( " " )
print( " *******************" )
print( " * PADs *" )
print( " *******************" )
print( " " )
# number of bound orbitals
norb = len(bound_orbitals)
# number of energies
npke = len(pke)
# find values of bound orbitals on a Becke grid for numerical integration
grid, orbs = self.muffin.evaluate_orbitals(bound_orbitals)
# compute orientation-averaged PAD for each energy
pad = np.zeros((npke,norb,3))
for i,energy in enumerate(pke):
if (self.muffin.debug > -1):
print( "%3.1d of %3.1d PKE = %e Hartree ( %e eV )" % (i+1,npke, energy, energy * AtomicData.hartree_to_eV) )
pad[i,:,:] = self.muffin.photoelectron_distribution(energy, grid, orbs,
pol=pol)
plot_pads(pke, pad, pol)
save_pads(pke, pad, pol, pad_file, units=units)
# save intermediate variables for locating resonances
self._pke = pke
self._pad = pad
self._pol = pol
self._grid = grid
self._orbs = orbs
return pad
def find_resonances(self, sigma_thresh=1.0):
"""
identify resonances as peaks in the photoionization cross section
Resonances are highly peaked local maxima of sigma(E) which exceed a certain threshold.
First the local maxima are identified in the curve sigma(E) that was calculated in the
a previous call to `calculate_pads(...)`. The energetic positions of the maxima are
refined by bisection. The kinetic energy grid use to compute sigma(E) has to be fine
enough to obtain the initial guesses
Parameters
----------
sigma_thresh : float, optional
Maxima in the photoionization cross section are considered to be resonances
if they exceed a threshold, sigma > sigma_thresh (in magebarn)
Returns
-------
resonances : dict
`resonances[i]` contains a list of continuum orbitals at the resonances for ionization
from initial orbital `i`. The energy of the resonance can be accessed `resonances[i].energy`.
"""
assert hasattr(self, "_pke"), "`find_resonances(...)` must be preceded by call to `calculate_pads(...)`."
# retrieve data from previous PAD calculation
pke = self._pke
pad = self._pad
pol = self._pol
grid = self._grid
orbs = self._orbs
print( " " )
print( " **********************" )
print( " * Resonances *" )
print( " **********************" )
print( " " )
npke, norb, dummy = pad.shape
# `energies[i]` is a list of photoelectron kinetic energy at resonance
# for ionization from orbital `i`
energies = {}
# `sigmas[i]` contains list of values of sigma at resonances
sigmas = {}
# `resonances[i] contains list of continuum orbitals at resonances
# (instances of `CMSWavefunction`)
resonances = {}
for i in range(0, norb):
# Instead of maximizing sigma we minimize (-1)*sigma.
# find indices of local minima
minima = signal.argrelmin(-pad[:,i,0])[0].tolist()
# and indices of local maxima
maxima = signal.argrelmax(-pad[:,i,0])[0].tolist()
if len(minima) == 0:
# No local maximum of sigma, which is a local minimum of (-1)*sigma,
# so no resonance
continue
if len(maxima) == 0:
# No maximum was found, bracket minimum by end points
maxima += [0,-1]
# Each local minimum should be bracketed by two local maxima
if pke[minima[0]] < pke[maxima[0]]:
# first extremum is a minimum, so
# maxima[j-1] < minima[j] < maxima[j]
maxima = [0] + maxima
# After prepending the first point, we have
# maxima[j ] < minima[j] < maxima[j+1]
if pke[minima[-1]] > pke[maxima[-1]]:
# last extremum is a minimum, which is not bracketed
# by two maxima
maxima = maxima + [-1]
# After appending last point, we have
# maxima[i ] < minima[i] < maxima[i+1]
# for all minima
assert len(minima) == len(maxima)-1
def func(energy):
# compute (-1) x photoionization cross section for initial orbital
# with index `i` at energy `energy`
pad_i = self.muffin.photoelectron_distribution(energy, grid, orbs[i:i+1,:],
pol=pol)
sigma_i = pad_i[0,0]
return (-1)*sigma_i
# list of photoelectron kinetic energy at resonance
energies[i] = []
# values of sigma at resonances
sigmas[i] = []
# list of continuum orbitals at resonances (instances of `CMSWavefunction`)
resonances[i] = []
# Refine energy at each local minimum of func(E) (= maximum of sigma)
for j in range(0, len(minima)):
# initial guess
emin0 = pke[minima[j]]
# maxima that bracket this local minimum
emax_lower = pke[maxima[j] ]
emax_upper = pke[maxima[j+1]]
assert emax_lower < emin0 < emax_upper
# We search for a minimum around emin0 by Golden search
# (https://en.wikipedia.org/wiki/Golden-section_search)
# which assumes that there is a single minimum in the interval [l,u]
alpha = 0.2
l = (1.0-alpha)*emin0 + alpha*emax_lower
u = (1.0-alpha)*emin0 + alpha*emax_upper
# find minimum of func(E) = -log(cond(M(E))) in the interval [l,u]
try:
emin = minimize_golden(func, l, u)
except StopIteration:
continue
fmin = func(emin)
assert self.muffin.energy == emin
sigma_max = -fmin
if (sigma_max < sigma_thresh):
# sigma at maximum is too small to classify as a resonance
continue
resonances[i] += self.muffin.eigenchannel_analysis()
energies[i].append(emin)
sigmas[i].append(sigma_max)
if len(resonances.keys()) > 0:
print( " -----------------------------------------------------------------------------" )
print( " Orbital Resonance Energy Sigma " )
print( " Hartree eV Mb " )
print( " -----------------------------------------------------------------------------" )
else:
print( " no resonances found with sigma > %e Mb | """
Parameters
----------
muffin : instance of `MuffinTinPotential`
muffin tin potential, which provides the continuum orbitals
"""
def __init__(self, muffin):
self.muffin = muffin
def compute_pads(self, bound_orbitals, pke,
pol=0, pad_file="/tmp/pad.dat", units="eV-Mb"):
"""
compute photoelectron angular distributions (PADs) for isotropic ensemble
as a function of kinetic energy of the photoelectron using the CMS method
Parameters
---------- | identifier_body |
|
PAD.py |
the continuum at energy `pke[k]`.
"""
print( " " )
print( " *******************" )
print( " * PADs *" )
print( " *******************" )
print( " " )
# number of bound orbitals
norb = len(bound_orbitals)
# number of energies
npke = len(pke)
# find values of bound orbitals on a Becke grid for numerical integration
grid, orbs = self.muffin.evaluate_orbitals(bound_orbitals)
# compute orientation-averaged PAD for each energy
pad = np.zeros((npke,norb,3))
for i,energy in enumerate(pke):
if (self.muffin.debug > -1):
print( "%3.1d of %3.1d PKE = %e Hartree ( %e eV )" % (i+1,npke, energy, energy * AtomicData.hartree_to_eV) )
pad[i,:,:] = self.muffin.photoelectron_distribution(energy, grid, orbs,
pol=pol)
plot_pads(pke, pad, pol)
save_pads(pke, pad, pol, pad_file, units=units)
# save intermediate variables for locating resonances
self._pke = pke
self._pad = pad
self._pol = pol
self._grid = grid
self._orbs = orbs
return pad
def find_resonances(self, sigma_thresh=1.0):
"""
identify resonances as peaks in the photoionization cross section
Resonances are highly peaked local maxima of sigma(E) which exceed a certain threshold.
First the local maxima are identified in the curve sigma(E) that was calculated in the
a previous call to `calculate_pads(...)`. The energetic positions of the maxima are
refined by bisection. The kinetic energy grid use to compute sigma(E) has to be fine
enough to obtain the initial guesses
Parameters
----------
sigma_thresh : float, optional
Maxima in the photoionization cross section are considered to be resonances
if they exceed a threshold, sigma > sigma_thresh (in magebarn)
Returns
-------
resonances : dict
`resonances[i]` contains a list of continuum orbitals at the resonances for ionization
from initial orbital `i`. The energy of the resonance can be accessed `resonances[i].energy`.
"""
assert hasattr(self, "_pke"), "`find_resonances(...)` must be preceded by call to `calculate_pads(...)`."
# retrieve data from previous PAD calculation
pke = self._pke
pad = self._pad
pol = self._pol
grid = self._grid
orbs = self._orbs
print( " " )
print( " **********************" )
print( " * Resonances *" )
print( " **********************" )
print( " " )
npke, norb, dummy = pad.shape
# `energies[i]` is a list of photoelectron kinetic energy at resonance
# for ionization from orbital `i`
energies = {}
# `sigmas[i]` contains list of values of sigma at resonances
sigmas = {}
# `resonances[i] contains list of continuum orbitals at resonances
# (instances of `CMSWavefunction`)
resonances = {}
for i in range(0, norb):
# Instead of maximizing sigma we minimize (-1)*sigma.
# find indices of local minima
minima = signal.argrelmin(-pad[:,i,0])[0].tolist()
# and indices of local maxima
maxima = signal.argrelmax(-pad[:,i,0])[0].tolist()
if len(minima) == 0:
# No local maximum of sigma, which is a local minimum of (-1)*sigma,
# so no resonance
continue
if len(maxima) == 0:
# No maximum was found, bracket minimum by end points
maxima += [0,-1]
# Each local minimum should be bracketed by two local maxima
if pke[minima[0]] < pke[maxima[0]]:
# first extremum is a minimum, so
# maxima[j-1] < minima[j] < maxima[j]
maxima = [0] + maxima
# After prepending the first point, we have | # last extremum is a minimum, which is not bracketed
# by two maxima
maxima = maxima + [-1]
# After appending last point, we have
# maxima[i ] < minima[i] < maxima[i+1]
# for all minima
assert len(minima) == len(maxima)-1
def func(energy):
# compute (-1) x photoionization cross section for initial orbital
# with index `i` at energy `energy`
pad_i = self.muffin.photoelectron_distribution(energy, grid, orbs[i:i+1,:],
pol=pol)
sigma_i = pad_i[0,0]
return (-1)*sigma_i
# list of photoelectron kinetic energy at resonance
energies[i] = []
# values of sigma at resonances
sigmas[i] = []
# list of continuum orbitals at resonances (instances of `CMSWavefunction`)
resonances[i] = []
# Refine energy at each local minimum of func(E) (= maximum of sigma)
for j in range(0, len(minima)):
# initial guess
emin0 = pke[minima[j]]
# maxima that bracket this local minimum
emax_lower = pke[maxima[j] ]
emax_upper = pke[maxima[j+1]]
assert emax_lower < emin0 < emax_upper
# We search for a minimum around emin0 by Golden search
# (https://en.wikipedia.org/wiki/Golden-section_search)
# which assumes that there is a single minimum in the interval [l,u]
alpha = 0.2
l = (1.0-alpha)*emin0 + alpha*emax_lower
u = (1.0-alpha)*emin0 + alpha*emax_upper
# find minimum of func(E) = -log(cond(M(E))) in the interval [l,u]
try:
emin = minimize_golden(func, l, u)
except StopIteration:
continue
fmin = func(emin)
assert self.muffin.energy == emin
sigma_max = -fmin
if (sigma_max < sigma_thresh):
# sigma at maximum is too small to classify as a resonance
continue
resonances[i] += self.muffin.eigenchannel_analysis()
energies[i].append(emin)
sigmas[i].append(sigma_max)
if len(resonances.keys()) > 0:
print( " -----------------------------------------------------------------------------" )
print( " Orbital Resonance Energy Sigma " )
print( " Hartree eV Mb " )
print( " -----------------------------------------------------------------------------" )
else:
print( " no resonances found with sigma > %e Mb" % sigma_thresh )
for i in sorted(resonances.keys()):
for j,res in enumerate(resonances[i]):
print( " %4.1d %4.1d %12.8f %12.8f %6.4e" %
(i+1, j+1,
res.energy,
res.energy * AtomicData.hartree_to_eV,
res.tags["sigma"] * AtomicData.bohr2_to_megabarn) )
print( "" )
return resonances
def save_pads(pke,pad, pol, tbl_file, units="eV-Mb"):
"""
A table with the PAD is written to `tbl_file`.
It contains the 4 columns PKE SIGMA BETA1 BETA_2
which define the PAD(th) at each energy according to
.. code-block:: none
PAD(th) = SIMGA/(4pi) [ 1 + BETA P (cos(th)) + BETA P (cos(th)) ]
1 1 2 2
For each orbital a block separated by a newline is written.
"""
npke,norb,dummy = pad.shape
sigma = pad[:,:,0]
beta1 = pad[:,:,1]
beta2 = pad[:,:,2]
fh = open(tbl_file, "w")
pol2str = {0 : "0 (linear)", -1 : "-1 (left)", +1: "+1 (right)"}
print( """
#
# photoelectron angular distributions (PAD) for an isotropic ensemble
#
# PAD(th) = SIMGA/(4pi) [ 1 + | # maxima[j ] < minima[j] < maxima[j+1]
if pke[minima[-1]] > pke[maxima[-1]]: | random_line_split |
PAD.py |
the continuum at energy `pke[k]`.
"""
print( " " )
print( " *******************" )
print( " * PADs *" )
print( " *******************" )
print( " " )
# number of bound orbitals
norb = len(bound_orbitals)
# number of energies
npke = len(pke)
# find values of bound orbitals on a Becke grid for numerical integration
grid, orbs = self.muffin.evaluate_orbitals(bound_orbitals)
# compute orientation-averaged PAD for each energy
pad = np.zeros((npke,norb,3))
for i,energy in enumerate(pke):
if (self.muffin.debug > -1):
print( "%3.1d of %3.1d PKE = %e Hartree ( %e eV )" % (i+1,npke, energy, energy * AtomicData.hartree_to_eV) )
pad[i,:,:] = self.muffin.photoelectron_distribution(energy, grid, orbs,
pol=pol)
plot_pads(pke, pad, pol)
save_pads(pke, pad, pol, pad_file, units=units)
# save intermediate variables for locating resonances
self._pke = pke
self._pad = pad
self._pol = pol
self._grid = grid
self._orbs = orbs
return pad
def find_resonances(self, sigma_thresh=1.0):
"""
identify resonances as peaks in the photoionization cross section
Resonances are highly peaked local maxima of sigma(E) which exceed a certain threshold.
First the local maxima are identified in the curve sigma(E) that was calculated in the
a previous call to `calculate_pads(...)`. The energetic positions of the maxima are
refined by bisection. The kinetic energy grid use to compute sigma(E) has to be fine
enough to obtain the initial guesses
Parameters
----------
sigma_thresh : float, optional
Maxima in the photoionization cross section are considered to be resonances
if they exceed a threshold, sigma > sigma_thresh (in magebarn)
Returns
-------
resonances : dict
`resonances[i]` contains a list of continuum orbitals at the resonances for ionization
from initial orbital `i`. The energy of the resonance can be accessed `resonances[i].energy`.
"""
assert hasattr(self, "_pke"), "`find_resonances(...)` must be preceded by call to `calculate_pads(...)`."
# retrieve data from previous PAD calculation
pke = self._pke
pad = self._pad
pol = self._pol
grid = self._grid
orbs = self._orbs
print( " " )
print( " **********************" )
print( " * Resonances *" )
print( " **********************" )
print( " " )
npke, norb, dummy = pad.shape
# `energies[i]` is a list of photoelectron kinetic energy at resonance
# for ionization from orbital `i`
energies = {}
# `sigmas[i]` contains list of values of sigma at resonances
sigmas = {}
# `resonances[i] contains list of continuum orbitals at resonances
# (instances of `CMSWavefunction`)
resonances = {}
for i in range(0, norb):
# Instead of maximizing sigma we minimize (-1)*sigma.
# find indices of local minima
minima = signal.argrelmin(-pad[:,i,0])[0].tolist()
# and indices of local maxima
maxima = signal.argrelmax(-pad[:,i,0])[0].tolist()
if len(minima) == 0:
# No local maximum of sigma, which is a local minimum of (-1)*sigma,
# so no resonance
continue
if len(maxima) == 0:
# No maximum was found, bracket minimum by end points
maxima += [0,-1]
# Each local minimum should be bracketed by two local maxima
if pke[minima[0]] < pke[maxima[0]]:
# first extremum is a minimum, so
# maxima[j-1] < minima[j] < maxima[j]
maxima = [0] + maxima
# After prepending the first point, we have
# maxima[j ] < minima[j] < maxima[j+1]
if pke[minima[-1]] > pke[maxima[-1]]:
# last extremum is a minimum, which is not bracketed
# by two maxima
maxima = maxima + [-1]
# After appending last point, we have
# maxima[i ] < minima[i] < maxima[i+1]
# for all minima
assert len(minima) == len(maxima)-1
def | (energy):
# compute (-1) x photoionization cross section for initial orbital
# with index `i` at energy `energy`
pad_i = self.muffin.photoelectron_distribution(energy, grid, orbs[i:i+1,:],
pol=pol)
sigma_i = pad_i[0,0]
return (-1)*sigma_i
# list of photoelectron kinetic energy at resonance
energies[i] = []
# values of sigma at resonances
sigmas[i] = []
# list of continuum orbitals at resonances (instances of `CMSWavefunction`)
resonances[i] = []
# Refine energy at each local minimum of func(E) (= maximum of sigma)
for j in range(0, len(minima)):
# initial guess
emin0 = pke[minima[j]]
# maxima that bracket this local minimum
emax_lower = pke[maxima[j] ]
emax_upper = pke[maxima[j+1]]
assert emax_lower < emin0 < emax_upper
# We search for a minimum around emin0 by Golden search
# (https://en.wikipedia.org/wiki/Golden-section_search)
# which assumes that there is a single minimum in the interval [l,u]
alpha = 0.2
l = (1.0-alpha)*emin0 + alpha*emax_lower
u = (1.0-alpha)*emin0 + alpha*emax_upper
# find minimum of func(E) = -log(cond(M(E))) in the interval [l,u]
try:
emin = minimize_golden(func, l, u)
except StopIteration:
continue
fmin = func(emin)
assert self.muffin.energy == emin
sigma_max = -fmin
if (sigma_max < sigma_thresh):
# sigma at maximum is too small to classify as a resonance
continue
resonances[i] += self.muffin.eigenchannel_analysis()
energies[i].append(emin)
sigmas[i].append(sigma_max)
if len(resonances.keys()) > 0:
print( " -----------------------------------------------------------------------------" )
print( " Orbital Resonance Energy Sigma " )
print( " Hartree eV Mb " )
print( " -----------------------------------------------------------------------------" )
else:
print( " no resonances found with sigma > %e Mb" % sigma_thresh )
for i in sorted(resonances.keys()):
for j,res in enumerate(resonances[i]):
print( " %4.1d %4.1d %12.8f %12.8f %6.4e" %
(i+1, j+1,
res.energy,
res.energy * AtomicData.hartree_to_eV,
res.tags["sigma"] * AtomicData.bohr2_to_megabarn) )
print( "" )
return resonances
def save_pads(pke,pad, pol, tbl_file, units="eV-Mb"):
"""
A table with the PAD is written to `tbl_file`.
It contains the 4 columns PKE SIGMA BETA1 BETA_2
which define the PAD(th) at each energy according to
.. code-block:: none
PAD(th) = SIMGA/(4pi) [ 1 + BETA P (cos(th)) + BETA P (cos(th)) ]
1 1 2 2
For each orbital a block separated by a newline is written.
"""
npke,norb,dummy = pad.shape
sigma = pad[:,:,0]
beta1 = pad[:,:,1]
beta2 = pad[:,:,2]
fh = open(tbl_file, "w")
pol2str = {0 : "0 (linear)", -1 : "-1 (left)", +1: "+1 (right)"}
print( """
#
# photoelectron angular distributions (PAD) for an isotropic ensemble
#
# PAD(th) = SIMGA/(4pi) [ 1 | func | identifier_name |
PAD.py |
the continuum at energy `pke[k]`.
"""
print( " " )
print( " *******************" )
print( " * PADs *" )
print( " *******************" )
print( " " )
# number of bound orbitals
norb = len(bound_orbitals)
# number of energies
npke = len(pke)
# find values of bound orbitals on a Becke grid for numerical integration
grid, orbs = self.muffin.evaluate_orbitals(bound_orbitals)
# compute orientation-averaged PAD for each energy
pad = np.zeros((npke,norb,3))
for i,energy in enumerate(pke):
if (self.muffin.debug > -1):
print( "%3.1d of %3.1d PKE = %e Hartree ( %e eV )" % (i+1,npke, energy, energy * AtomicData.hartree_to_eV) )
pad[i,:,:] = self.muffin.photoelectron_distribution(energy, grid, orbs,
pol=pol)
plot_pads(pke, pad, pol)
save_pads(pke, pad, pol, pad_file, units=units)
# save intermediate variables for locating resonances
self._pke = pke
self._pad = pad
self._pol = pol
self._grid = grid
self._orbs = orbs
return pad
def find_resonances(self, sigma_thresh=1.0):
"""
identify resonances as peaks in the photoionization cross section
Resonances are highly peaked local maxima of sigma(E) which exceed a certain threshold.
First the local maxima are identified in the curve sigma(E) that was calculated in the
a previous call to `calculate_pads(...)`. The energetic positions of the maxima are
refined by bisection. The kinetic energy grid use to compute sigma(E) has to be fine
enough to obtain the initial guesses
Parameters
----------
sigma_thresh : float, optional
Maxima in the photoionization cross section are considered to be resonances
if they exceed a threshold, sigma > sigma_thresh (in magebarn)
Returns
-------
resonances : dict
`resonances[i]` contains a list of continuum orbitals at the resonances for ionization
from initial orbital `i`. The energy of the resonance can be accessed `resonances[i].energy`.
"""
assert hasattr(self, "_pke"), "`find_resonances(...)` must be preceded by call to `calculate_pads(...)`."
# retrieve data from previous PAD calculation
pke = self._pke
pad = self._pad
pol = self._pol
grid = self._grid
orbs = self._orbs
print( " " )
print( " **********************" )
print( " * Resonances *" )
print( " **********************" )
print( " " )
npke, norb, dummy = pad.shape
# `energies[i]` is a list of photoelectron kinetic energy at resonance
# for ionization from orbital `i`
energies = {}
# `sigmas[i]` contains list of values of sigma at resonances
sigmas = {}
# `resonances[i] contains list of continuum orbitals at resonances
# (instances of `CMSWavefunction`)
resonances = {}
for i in range(0, norb):
# Instead of maximizing sigma we minimize (-1)*sigma.
# find indices of local minima
minima = signal.argrelmin(-pad[:,i,0])[0].tolist()
# and indices of local maxima
maxima = signal.argrelmax(-pad[:,i,0])[0].tolist()
if len(minima) == 0:
# No local maximum of sigma, which is a local minimum of (-1)*sigma,
# so no resonance
continue
if len(maxima) == 0:
# No maximum was found, bracket minimum by end points
maxima += [0,-1]
# Each local minimum should be bracketed by two local maxima
if pke[minima[0]] < pke[maxima[0]]:
# first extremum is a minimum, so
# maxima[j-1] < minima[j] < maxima[j]
maxima = [0] + maxima
# After prepending the first point, we have
# maxima[j ] < minima[j] < maxima[j+1]
if pke[minima[-1]] > pke[maxima[-1]]:
# last extremum is a minimum, which is not bracketed
# by two maxima
maxima = maxima + [-1]
# After appending last point, we have
# maxima[i ] < minima[i] < maxima[i+1]
# for all minima
assert len(minima) == len(maxima)-1
def func(energy):
# compute (-1) x photoionization cross section for initial orbital
# with index `i` at energy `energy`
pad_i = self.muffin.photoelectron_distribution(energy, grid, orbs[i:i+1,:],
pol=pol)
sigma_i = pad_i[0,0]
return (-1)*sigma_i
# list of photoelectron kinetic energy at resonance
energies[i] = []
# values of sigma at resonances
sigmas[i] = []
# list of continuum orbitals at resonances (instances of `CMSWavefunction`)
resonances[i] = []
# Refine energy at each local minimum of func(E) (= maximum of sigma)
for j in range(0, len(minima)):
# initial guess
emin0 = pke[minima[j]]
# maxima that bracket this local minimum
emax_lower = pke[maxima[j] ]
emax_upper = pke[maxima[j+1]]
assert emax_lower < emin0 < emax_upper
# We search for a minimum around emin0 by Golden search
# (https://en.wikipedia.org/wiki/Golden-section_search)
# which assumes that there is a single minimum in the interval [l,u]
alpha = 0.2
l = (1.0-alpha)*emin0 + alpha*emax_lower
u = (1.0-alpha)*emin0 + alpha*emax_upper
# find minimum of func(E) = -log(cond(M(E))) in the interval [l,u]
try:
emin = minimize_golden(func, l, u)
except StopIteration:
continue
fmin = func(emin)
assert self.muffin.energy == emin
sigma_max = -fmin
if (sigma_max < sigma_thresh):
# sigma at maximum is too small to classify as a resonance
continue
resonances[i] += self.muffin.eigenchannel_analysis()
energies[i].append(emin)
sigmas[i].append(sigma_max)
if len(resonances.keys()) > 0:
print( " -----------------------------------------------------------------------------" )
print( " Orbital Resonance Energy Sigma " )
print( " Hartree eV Mb " )
print( " -----------------------------------------------------------------------------" )
else:
print( " no resonances found with sigma > %e Mb" % sigma_thresh )
for i in sorted(resonances.keys()):
for j,res in enumerate(resonances[i]):
|
print( "" )
return resonances
def save_pads(pke,pad, pol, tbl_file, units="eV-Mb"):
"""
A table with the PAD is written to `tbl_file`.
It contains the 4 columns PKE SIGMA BETA1 BETA_2
which define the PAD(th) at each energy according to
.. code-block:: none
PAD(th) = SIMGA/(4pi) [ 1 + BETA P (cos(th)) + BETA P (cos(th)) ]
1 1 2 2
For each orbital a block separated by a newline is written.
"""
npke,norb,dummy = pad.shape
sigma = pad[:,:,0]
beta1 = pad[:,:,1]
beta2 = pad[:,:,2]
fh = open(tbl_file, "w")
pol2str = {0 : "0 (linear)", -1 : "-1 (left)", +1: "+1 (right)"}
print( """
#
# photoelectron angular distributions (PAD) for an isotropic ensemble
#
# PAD(th) = SIMGA/(4pi) [ 1 | print( " %4.1d %4.1d %12.8f %12.8f %6.4e" %
(i+1, j+1,
res.energy,
res.energy * AtomicData.hartree_to_eV,
res.tags["sigma"] * AtomicData.bohr2_to_megabarn) ) | conditional_block |
IssuanceHelpers.ts | SignatureToken.encode(header, claims);
return new ClaimToken(TokenType.selfIssued, jwt, '');
}
/**
* Create a verifiable credential
* @param claims Token claims
*/
public static async createVc(setup: TestSetup, credentialSubject: TokenPayload, configuration: string, jwkPrivate: any, jwkPublic: any): Promise<ClaimToken> {
// Set the mock because we will resolve the signing key as did
await this.resolverMock(setup, setup.defaultIssuerDid, jwkPrivate, jwkPublic);
const statusUrl = 'https://portableidentitycards.azure-api.net/42b39d9d-0cdd-4ae0-b251-b7b39a561f91/api/portable/v1.0/status';
// Status mock
setup.fetchMock.post(statusUrl, {}, { overwriteRoutes: true });
let vcTemplate = {
"jti": "urn:pic:80a509d2-99d4-4d6c-86a7-7b2636944080",
"vc": {
"@context": [
"https://www.w3.org/2018/credentials/v1",
"https://portableidentitycards.azure-api.net/42b39d9d-0cdd-4ae0-b251-b7b39a561f91/api/portable/v1.0/contracts/test/schema"
],
"type": [
"VerifiableCredential",
"DrivingLicense"
],
"credentialSubject": {
},
"credentialStatus": {
"id": `${statusUrl}`,
"type": "PortableIdentityCardServiceCredentialStatus2020"
}
},
iss: `${setup.defaultIssuerDid}`,
sub: `${setup.defaultUserDid}`
};
vcTemplate.vc.credentialSubject = credentialSubject;
return IssuanceHelpers.signAToken(setup, vcTemplate, configuration, jwkPrivate);
}
/**
* Create a verifiable presentation
* @param claims Token claims
*/
public static async createVp(setup: TestSetup, vcs: ClaimToken[], jwkPrivate: any): Promise<ClaimToken> {
let vpTemplate = {
"jti": "baab2cdccb38408d8f1179071fe37dbe",
"scope": "openid did_authn verify",
"vp": {
"@context": [
"https://www.w3.org/2018/credentials/v1"
],
"type": [
"VerifiablePresentation"
],
"verifiableCredential": []
},
iss: `${setup.defaultUserDid}`,
aud: `${setup.defaultIssuerDid}`,
};
for (let inx = 0; inx < vcs.length; inx++) {
(vpTemplate.vp.verifiableCredential as string[]).push(<string>vcs[inx].rawToken);
}
return IssuanceHelpers.signAToken(setup, vpTemplate, '', jwkPrivate);
}
/**
* Generate a signing keys and set the configuration mock
*/
public static async generateSigningKey(_setup: TestSetup, kid: string): Promise<[any, any]> {
const generator = new Subtle();
const key: any = await generator.generateKey(
<any>{
name: "RSASSA-PKCS1-v1_5",
modulusLength: 2048,
publicExponent: new Uint8Array([0x01, 0x00, 0x01]),
hash: { name: "SHA-256" },
},
true,
["sign", "verify"]);
const jwkPublic = await generator.exportKey('jwk', key.publicKey);
const jwkPrivate = await generator.exportKey('jwk', key.privateKey);
(<any>jwkPrivate).kid = (<any>jwkPublic).kid = kid;
return [jwkPrivate, jwkPublic];
}
// Generate a signing keys and set the configuration mock
public static async generateSigningKeyAndSetConfigurationMock(setup: TestSetup, kid: string, configuration?: string, issuer?: string): Promise<[any, any, string]> {
// setup http mock
configuration = configuration || setup.defaultIdTokenConfiguration;
issuer = issuer || setup.tokenIssuer;
const jwks = setup.defaultIdTokenJwksConfiguration;
setup.fetchMock.get(configuration, { "jwks_uri": `${jwks}`, "issuer": `${issuer}` }, { overwriteRoutes: true });
const [jwkPrivate, jwkPublic] = await IssuanceHelpers.generateSigningKey(setup, kid);
setup.fetchMock.get(jwks, `{"keys": [${JSON.stringify(jwkPublic)}]}`, { overwriteRoutes: true });
return [jwkPrivate, jwkPublic, configuration];
}
// Set resolver mock
public static async resolverMock(setup: TestSetup, did: string, jwkPrivate?: any, jwkPublic?: any): Promise<[DidDocument, any, any]> {
// setup http mock
if (!jwkPrivate && !jwkPublic) {
[jwkPrivate, jwkPublic] = await IssuanceHelpers.generateSigningKey(setup, `${did}#signing`);
}
const didDocument = {
didDocument: new DidDocument({
"@context": "https://w3id.org/did/v1",
id: did,
publicKey: <any>[{
id: jwkPublic.kid,
type: 'RsaVerificationKey2018',
controller: did,
publicKeyJwk: jwkPublic
}]
})
};
(didDocument.didDocument as any)['@context'] = 'https://w3id.org/did/v1';
// Resolver mock
const resolverUrl = `${setup.resolverUrl}/${did}`;
setup.fetchMock.get(resolverUrl, didDocument, { overwriteRoutes: true });
return [didDocument.didDocument, jwkPrivate, jwkPublic];
}
// Sign a token
public static async signAToken(setup: TestSetup, payload: object, configuration: string, jwkPrivate: any): Promise<ClaimToken> {
const keyId = new KeyReference(jwkPrivate.kid);
await setup.keyStore.save(keyId, <any>jwkPrivate);
setup.validatorOptions.crypto.builder.useSigningKeyReference(keyId);
setup.validatorOptions.crypto.signingProtocol(JoseBuilder.JWT).builder.useKid(keyId.keyReference);
const signature = await setup.validatorOptions.crypto.signingProtocol(JoseBuilder.JWT).sign(payload);
const token = await setup.validatorOptions.crypto.signingProtocol(JoseBuilder.JWT).serialize();
let claimToken = ClaimToken.create(token, configuration);
return claimToken;
}
public static async createRequest(
setup: TestSetup,
tokenDescription: TokenType,
issuance: boolean,
idTokenIssuer?: string,
idTokenAudience?: string,
idTokenExp?: number): Promise<[ClaimToken, ValidationOptions, any]> {
const options = new ValidationOptions(setup.validatorOptions, tokenDescription);
const [didJwkPrivate, didJwkPublic] = await IssuanceHelpers.generateSigningKey(setup, setup.defaulUserDidKid);
const [tokenJwkPrivate, tokenJwkPublic, tokenConfiguration] = await IssuanceHelpers.generateSigningKeyAndSetConfigurationMock(setup, setup.defaulIssuerDidKid);
const [didDocument, jwkPrivate2, jwkPublic2] = await IssuanceHelpers.resolverMock(setup, setup.defaultUserDid, didJwkPrivate, didJwkPublic);
const idTokenPayload = {
upn: '[email protected]',
name: 'Jules Winnfield',
iss: idTokenIssuer ?? setup.tokenIssuer,
aud: idTokenAudience ?? setup.tokenAudience,
exp: idTokenExp ?? Math.trunc(Date.now() / 1000) + 10000,
};
const idToken = await IssuanceHelpers.signAToken(
setup,
idTokenPayload,
tokenConfiguration,
tokenJwkPrivate);
const vcConfiguration = 'https://vcexample.com/schema';
const vcPayload = {
givenName: 'Jules',
familyName: 'Winnfield'
};
const vc = await IssuanceHelpers.createVc(
setup,
vcPayload,
vcConfiguration,
tokenJwkPrivate,
tokenJwkPublic);
const vp = await IssuanceHelpers.createVp(setup, [vc], didJwkPrivate);
const si = IssuanceHelpers.createSelfIssuedToken({ name: 'jules', birthDate: new Date().toString() });
let attestations: { [claim: string]: any };
if (issuance) {
attestations = {
selfIssued: si.rawToken,
idTokens: {},
presentations: {}
};
attestations.idTokens[setup.defaultIdTokenConfiguration] = idToken.rawToken;
attestations.presentations['DrivingLicense'] = vp.rawToken;
} else | {
attestations = {
presentations: {}
};
attestations.presentations['DrivingLicense'] = vp.rawToken;
} | conditional_block |
|
IssuanceHelpers.ts | contract,
attestations,
iss: 'https://self-issued.me',
aud: setup.AUDIENCE,
jti: IssuanceHelpers.jti,
sub_jwk: key,
sub: createJwkThumbprint(key),
did: setup.defaultUserDid
}
if(setup.siopMutator){
siop = setup.siopMutator(siop);
}
return IssuanceHelpers.createSiopRequestWithPayload(setup, siop, key);
}
/**
* Create a verifiable credentiaL
* @param claims Credential claims
*/
public static createSelfIssuedToken(claims: TokenPayload): ClaimToken {
const header = {
alg: "none",
typ: 'JWT'
};
const jwt = JsonWebSignatureToken.encode(header, claims);
return new ClaimToken(TokenType.selfIssued, jwt, '');
}
/**
* Create a verifiable credential
* @param claims Token claims
*/
public static async createVc(setup: TestSetup, credentialSubject: TokenPayload, configuration: string, jwkPrivate: any, jwkPublic: any): Promise<ClaimToken> {
// Set the mock because we will resolve the signing key as did
await this.resolverMock(setup, setup.defaultIssuerDid, jwkPrivate, jwkPublic);
const statusUrl = 'https://portableidentitycards.azure-api.net/42b39d9d-0cdd-4ae0-b251-b7b39a561f91/api/portable/v1.0/status';
// Status mock
setup.fetchMock.post(statusUrl, {}, { overwriteRoutes: true });
let vcTemplate = {
"jti": "urn:pic:80a509d2-99d4-4d6c-86a7-7b2636944080",
"vc": {
"@context": [
"https://www.w3.org/2018/credentials/v1",
"https://portableidentitycards.azure-api.net/42b39d9d-0cdd-4ae0-b251-b7b39a561f91/api/portable/v1.0/contracts/test/schema"
],
"type": [
"VerifiableCredential",
"DrivingLicense"
],
"credentialSubject": {
},
"credentialStatus": {
"id": `${statusUrl}`,
"type": "PortableIdentityCardServiceCredentialStatus2020"
}
},
iss: `${setup.defaultIssuerDid}`,
sub: `${setup.defaultUserDid}`
};
vcTemplate.vc.credentialSubject = credentialSubject;
return IssuanceHelpers.signAToken(setup, vcTemplate, configuration, jwkPrivate);
}
/**
* Create a verifiable presentation
* @param claims Token claims
*/
public static async createVp(setup: TestSetup, vcs: ClaimToken[], jwkPrivate: any): Promise<ClaimToken> {
let vpTemplate = {
"jti": "baab2cdccb38408d8f1179071fe37dbe",
"scope": "openid did_authn verify",
"vp": {
"@context": [
"https://www.w3.org/2018/credentials/v1"
],
"type": [
"VerifiablePresentation"
],
"verifiableCredential": []
},
iss: `${setup.defaultUserDid}`,
aud: `${setup.defaultIssuerDid}`,
};
for (let inx = 0; inx < vcs.length; inx++) {
(vpTemplate.vp.verifiableCredential as string[]).push(<string>vcs[inx].rawToken);
}
return IssuanceHelpers.signAToken(setup, vpTemplate, '', jwkPrivate);
}
/**
* Generate a signing keys and set the configuration mock
*/
public static async generateSigningKey(_setup: TestSetup, kid: string): Promise<[any, any]> {
const generator = new Subtle();
const key: any = await generator.generateKey(
<any>{
name: "RSASSA-PKCS1-v1_5",
modulusLength: 2048,
publicExponent: new Uint8Array([0x01, 0x00, 0x01]),
hash: { name: "SHA-256" },
},
true,
["sign", "verify"]);
const jwkPublic = await generator.exportKey('jwk', key.publicKey);
const jwkPrivate = await generator.exportKey('jwk', key.privateKey);
(<any>jwkPrivate).kid = (<any>jwkPublic).kid = kid;
return [jwkPrivate, jwkPublic];
}
// Generate a signing keys and set the configuration mock
public static async generateSigningKeyAndSetConfigurationMock(setup: TestSetup, kid: string, configuration?: string, issuer?: string): Promise<[any, any, string]> {
// setup http mock
configuration = configuration || setup.defaultIdTokenConfiguration;
issuer = issuer || setup.tokenIssuer;
const jwks = setup.defaultIdTokenJwksConfiguration;
setup.fetchMock.get(configuration, { "jwks_uri": `${jwks}`, "issuer": `${issuer}` }, { overwriteRoutes: true });
const [jwkPrivate, jwkPublic] = await IssuanceHelpers.generateSigningKey(setup, kid);
setup.fetchMock.get(jwks, `{"keys": [${JSON.stringify(jwkPublic)}]}`, { overwriteRoutes: true });
return [jwkPrivate, jwkPublic, configuration];
}
// Set resolver mock
public static async resolverMock(setup: TestSetup, did: string, jwkPrivate?: any, jwkPublic?: any): Promise<[DidDocument, any, any]> {
// setup http mock
if (!jwkPrivate && !jwkPublic) {
[jwkPrivate, jwkPublic] = await IssuanceHelpers.generateSigningKey(setup, `${did}#signing`);
}
const didDocument = {
didDocument: new DidDocument({
"@context": "https://w3id.org/did/v1",
id: did,
publicKey: <any>[{
id: jwkPublic.kid,
type: 'RsaVerificationKey2018',
controller: did,
publicKeyJwk: jwkPublic
}]
})
};
(didDocument.didDocument as any)['@context'] = 'https://w3id.org/did/v1';
// Resolver mock
const resolverUrl = `${setup.resolverUrl}/${did}`;
setup.fetchMock.get(resolverUrl, didDocument, { overwriteRoutes: true });
return [didDocument.didDocument, jwkPrivate, jwkPublic];
}
// Sign a token
public static async signAToken(setup: TestSetup, payload: object, configuration: string, jwkPrivate: any): Promise<ClaimToken> {
const keyId = new KeyReference(jwkPrivate.kid);
await setup.keyStore.save(keyId, <any>jwkPrivate);
setup.validatorOptions.crypto.builder.useSigningKeyReference(keyId);
setup.validatorOptions.crypto.signingProtocol(JoseBuilder.JWT).builder.useKid(keyId.keyReference);
const signature = await setup.validatorOptions.crypto.signingProtocol(JoseBuilder.JWT).sign(payload);
const token = await setup.validatorOptions.crypto.signingProtocol(JoseBuilder.JWT).serialize();
let claimToken = ClaimToken.create(token, configuration);
return claimToken;
}
public static async createRequest(
setup: TestSetup,
tokenDescription: TokenType,
issuance: boolean,
idTokenIssuer?: string,
idTokenAudience?: string,
idTokenExp?: number): Promise<[ClaimToken, ValidationOptions, any]> {
const options = new ValidationOptions(setup.validatorOptions, tokenDescription);
const [didJwkPrivate, didJwkPublic] = await IssuanceHelpers.generateSigningKey(setup, setup.defaulUserDidKid);
const [tokenJwkPrivate, tokenJwkPublic, tokenConfiguration] = await IssuanceHelpers.generateSigningKeyAndSetConfigurationMock(setup, setup.defaulIssuerDidKid);
const [didDocument, jwkPrivate2, jwkPublic2] = await IssuanceHelpers.resolverMock(setup, setup.defaultUserDid, didJwkPrivate, didJwkPublic);
const idTokenPayload = {
upn: '[email protected]',
name: 'Jules Winnfield',
iss: idTokenIssuer ?? setup.tokenIssuer,
aud: idTokenAudience ?? setup.tokenAudience,
exp: idTokenExp ?? Math.trunc(Date.now() / 1000) + 10000,
};
const idToken = await IssuanceHelpers.signAToken(
setup,
idTokenPayload,
tokenConfiguration,
tokenJwkPrivate);
const vcConfiguration = 'https://vcexample.com/schema';
const vcPayload = {
givenName: 'Jules',
familyName: 'Winnfield' | };
const vc = await IssuanceHelpers.createVc(
setup,
vcPayload,
vcConfiguration, | random_line_split |
|
IssuanceHelpers.ts | , '', key);
return claimToken;
}
/**
* Create siop request
*/
public static async createSiopRequest(setup: TestSetup, key: any, contract: string | undefined, nonce: string, attestations: any): Promise<ClaimToken> {
let siop: TokenPayload = {
nonce,
contract,
attestations,
iss: 'https://self-issued.me',
aud: setup.AUDIENCE,
jti: IssuanceHelpers.jti,
sub_jwk: key,
sub: createJwkThumbprint(key),
did: setup.defaultUserDid
}
if(setup.siopMutator){
siop = setup.siopMutator(siop);
}
return IssuanceHelpers.createSiopRequestWithPayload(setup, siop, key);
}
/**
* Create a verifiable credentiaL
* @param claims Credential claims
*/
public static createSelfIssuedToken(claims: TokenPayload): ClaimToken {
const header = {
alg: "none",
typ: 'JWT'
};
const jwt = JsonWebSignatureToken.encode(header, claims);
return new ClaimToken(TokenType.selfIssued, jwt, '');
}
/**
* Create a verifiable credential
* @param claims Token claims
*/
public static async createVc(setup: TestSetup, credentialSubject: TokenPayload, configuration: string, jwkPrivate: any, jwkPublic: any): Promise<ClaimToken> {
// Set the mock because we will resolve the signing key as did
await this.resolverMock(setup, setup.defaultIssuerDid, jwkPrivate, jwkPublic);
const statusUrl = 'https://portableidentitycards.azure-api.net/42b39d9d-0cdd-4ae0-b251-b7b39a561f91/api/portable/v1.0/status';
// Status mock
setup.fetchMock.post(statusUrl, {}, { overwriteRoutes: true });
let vcTemplate = {
"jti": "urn:pic:80a509d2-99d4-4d6c-86a7-7b2636944080",
"vc": {
"@context": [
"https://www.w3.org/2018/credentials/v1",
"https://portableidentitycards.azure-api.net/42b39d9d-0cdd-4ae0-b251-b7b39a561f91/api/portable/v1.0/contracts/test/schema"
],
"type": [
"VerifiableCredential",
"DrivingLicense"
],
"credentialSubject": {
},
"credentialStatus": {
"id": `${statusUrl}`,
"type": "PortableIdentityCardServiceCredentialStatus2020"
}
},
iss: `${setup.defaultIssuerDid}`,
sub: `${setup.defaultUserDid}`
};
vcTemplate.vc.credentialSubject = credentialSubject;
return IssuanceHelpers.signAToken(setup, vcTemplate, configuration, jwkPrivate);
}
/**
* Create a verifiable presentation
* @param claims Token claims
*/
public static async createVp(setup: TestSetup, vcs: ClaimToken[], jwkPrivate: any): Promise<ClaimToken> {
let vpTemplate = {
"jti": "baab2cdccb38408d8f1179071fe37dbe",
"scope": "openid did_authn verify",
"vp": {
"@context": [
"https://www.w3.org/2018/credentials/v1"
],
"type": [
"VerifiablePresentation"
],
"verifiableCredential": []
},
iss: `${setup.defaultUserDid}`,
aud: `${setup.defaultIssuerDid}`,
};
for (let inx = 0; inx < vcs.length; inx++) {
(vpTemplate.vp.verifiableCredential as string[]).push(<string>vcs[inx].rawToken);
}
return IssuanceHelpers.signAToken(setup, vpTemplate, '', jwkPrivate);
}
/**
* Generate a signing keys and set the configuration mock
*/
public static async generateSigningKey(_setup: TestSetup, kid: string): Promise<[any, any]> {
const generator = new Subtle();
const key: any = await generator.generateKey(
<any>{
name: "RSASSA-PKCS1-v1_5",
modulusLength: 2048,
publicExponent: new Uint8Array([0x01, 0x00, 0x01]),
hash: { name: "SHA-256" },
},
true,
["sign", "verify"]);
const jwkPublic = await generator.exportKey('jwk', key.publicKey);
const jwkPrivate = await generator.exportKey('jwk', key.privateKey);
(<any>jwkPrivate).kid = (<any>jwkPublic).kid = kid;
return [jwkPrivate, jwkPublic];
}
// Generate a signing keys and set the configuration mock
public static async generateSigningKeyAndSetConfigurationMock(setup: TestSetup, kid: string, configuration?: string, issuer?: string): Promise<[any, any, string]> {
// setup http mock
configuration = configuration || setup.defaultIdTokenConfiguration;
issuer = issuer || setup.tokenIssuer;
const jwks = setup.defaultIdTokenJwksConfiguration;
setup.fetchMock.get(configuration, { "jwks_uri": `${jwks}`, "issuer": `${issuer}` }, { overwriteRoutes: true });
const [jwkPrivate, jwkPublic] = await IssuanceHelpers.generateSigningKey(setup, kid);
setup.fetchMock.get(jwks, `{"keys": [${JSON.stringify(jwkPublic)}]}`, { overwriteRoutes: true });
return [jwkPrivate, jwkPublic, configuration];
}
// Set resolver mock
public static async resolverMock(setup: TestSetup, did: string, jwkPrivate?: any, jwkPublic?: any): Promise<[DidDocument, any, any]> {
// setup http mock
if (!jwkPrivate && !jwkPublic) {
[jwkPrivate, jwkPublic] = await IssuanceHelpers.generateSigningKey(setup, `${did}#signing`);
}
const didDocument = {
didDocument: new DidDocument({
"@context": "https://w3id.org/did/v1",
id: did,
publicKey: <any>[{
id: jwkPublic.kid,
type: 'RsaVerificationKey2018',
controller: did,
publicKeyJwk: jwkPublic
}]
})
};
(didDocument.didDocument as any)['@context'] = 'https://w3id.org/did/v1';
// Resolver mock
const resolverUrl = `${setup.resolverUrl}/${did}`;
setup.fetchMock.get(resolverUrl, didDocument, { overwriteRoutes: true });
return [didDocument.didDocument, jwkPrivate, jwkPublic];
}
// Sign a token
public static async signAToken(setup: TestSetup, payload: object, configuration: string, jwkPrivate: any): Promise<ClaimToken> |
public static async createRequest(
setup: TestSetup,
tokenDescription: TokenType,
issuance: boolean,
idTokenIssuer?: string,
idTokenAudience?: string,
idTokenExp?: number): Promise<[ClaimToken, ValidationOptions, any]> {
const options = new ValidationOptions(setup.validatorOptions, tokenDescription);
const [didJwkPrivate, didJwkPublic] = await IssuanceHelpers.generateSigningKey(setup, setup.defaulUserDidKid);
const [tokenJwkPrivate, tokenJwkPublic, tokenConfiguration] = await IssuanceHelpers.generateSigningKeyAndSetConfigurationMock(setup, setup.defaulIssuerDidKid);
const [didDocument, jwkPrivate2, jwkPublic2] = await IssuanceHelpers.resolverMock(setup, setup.defaultUserDid, didJwkPrivate, didJwkPublic);
const idTokenPayload = {
upn: '[email protected]',
name: 'Jules Winnfield',
iss: idTokenIssuer ?? setup.tokenIssuer,
aud: idTokenAudience ?? setup.tokenAudience,
exp: idTokenExp ?? Math.trunc(Date.now() / 1000) + 10000,
};
const idToken = await IssuanceHelpers.signAToken(
setup,
| {
const keyId = new KeyReference(jwkPrivate.kid);
await setup.keyStore.save(keyId, <any>jwkPrivate);
setup.validatorOptions.crypto.builder.useSigningKeyReference(keyId);
setup.validatorOptions.crypto.signingProtocol(JoseBuilder.JWT).builder.useKid(keyId.keyReference);
const signature = await setup.validatorOptions.crypto.signingProtocol(JoseBuilder.JWT).sign(payload);
const token = await setup.validatorOptions.crypto.signingProtocol(JoseBuilder.JWT).serialize();
let claimToken = ClaimToken.create(token, configuration);
return claimToken;
} | identifier_body |
IssuanceHelpers.ts | , '', key);
return claimToken;
}
/**
* Create siop request
*/
public static async createSiopRequest(setup: TestSetup, key: any, contract: string | undefined, nonce: string, attestations: any): Promise<ClaimToken> {
let siop: TokenPayload = {
nonce,
contract,
attestations,
iss: 'https://self-issued.me',
aud: setup.AUDIENCE,
jti: IssuanceHelpers.jti,
sub_jwk: key,
sub: createJwkThumbprint(key),
did: setup.defaultUserDid
}
if(setup.siopMutator){
siop = setup.siopMutator(siop);
}
return IssuanceHelpers.createSiopRequestWithPayload(setup, siop, key);
}
/**
* Create a verifiable credentiaL
* @param claims Credential claims
*/
public static createSelfIssuedToken(claims: TokenPayload): ClaimToken {
const header = {
alg: "none",
typ: 'JWT'
};
const jwt = JsonWebSignatureToken.encode(header, claims);
return new ClaimToken(TokenType.selfIssued, jwt, '');
}
/**
* Create a verifiable credential
* @param claims Token claims
*/
public static async createVc(setup: TestSetup, credentialSubject: TokenPayload, configuration: string, jwkPrivate: any, jwkPublic: any): Promise<ClaimToken> {
// Set the mock because we will resolve the signing key as did
await this.resolverMock(setup, setup.defaultIssuerDid, jwkPrivate, jwkPublic);
const statusUrl = 'https://portableidentitycards.azure-api.net/42b39d9d-0cdd-4ae0-b251-b7b39a561f91/api/portable/v1.0/status';
// Status mock
setup.fetchMock.post(statusUrl, {}, { overwriteRoutes: true });
let vcTemplate = {
"jti": "urn:pic:80a509d2-99d4-4d6c-86a7-7b2636944080",
"vc": {
"@context": [
"https://www.w3.org/2018/credentials/v1",
"https://portableidentitycards.azure-api.net/42b39d9d-0cdd-4ae0-b251-b7b39a561f91/api/portable/v1.0/contracts/test/schema"
],
"type": [
"VerifiableCredential",
"DrivingLicense"
],
"credentialSubject": {
},
"credentialStatus": {
"id": `${statusUrl}`,
"type": "PortableIdentityCardServiceCredentialStatus2020"
}
},
iss: `${setup.defaultIssuerDid}`,
sub: `${setup.defaultUserDid}`
};
vcTemplate.vc.credentialSubject = credentialSubject;
return IssuanceHelpers.signAToken(setup, vcTemplate, configuration, jwkPrivate);
}
/**
* Create a verifiable presentation
* @param claims Token claims
*/
public static async createVp(setup: TestSetup, vcs: ClaimToken[], jwkPrivate: any): Promise<ClaimToken> {
let vpTemplate = {
"jti": "baab2cdccb38408d8f1179071fe37dbe",
"scope": "openid did_authn verify",
"vp": {
"@context": [
"https://www.w3.org/2018/credentials/v1"
],
"type": [
"VerifiablePresentation"
],
"verifiableCredential": []
},
iss: `${setup.defaultUserDid}`,
aud: `${setup.defaultIssuerDid}`,
};
for (let inx = 0; inx < vcs.length; inx++) {
(vpTemplate.vp.verifiableCredential as string[]).push(<string>vcs[inx].rawToken);
}
return IssuanceHelpers.signAToken(setup, vpTemplate, '', jwkPrivate);
}
/**
* Generate a signing keys and set the configuration mock
*/
public static async generateSigningKey(_setup: TestSetup, kid: string): Promise<[any, any]> {
const generator = new Subtle();
const key: any = await generator.generateKey(
<any>{
name: "RSASSA-PKCS1-v1_5",
modulusLength: 2048,
publicExponent: new Uint8Array([0x01, 0x00, 0x01]),
hash: { name: "SHA-256" },
},
true,
["sign", "verify"]);
const jwkPublic = await generator.exportKey('jwk', key.publicKey);
const jwkPrivate = await generator.exportKey('jwk', key.privateKey);
(<any>jwkPrivate).kid = (<any>jwkPublic).kid = kid;
return [jwkPrivate, jwkPublic];
}
// Generate a signing keys and set the configuration mock
public static async generateSigningKeyAndSetConfigurationMock(setup: TestSetup, kid: string, configuration?: string, issuer?: string): Promise<[any, any, string]> {
// setup http mock
configuration = configuration || setup.defaultIdTokenConfiguration;
issuer = issuer || setup.tokenIssuer;
const jwks = setup.defaultIdTokenJwksConfiguration;
setup.fetchMock.get(configuration, { "jwks_uri": `${jwks}`, "issuer": `${issuer}` }, { overwriteRoutes: true });
const [jwkPrivate, jwkPublic] = await IssuanceHelpers.generateSigningKey(setup, kid);
setup.fetchMock.get(jwks, `{"keys": [${JSON.stringify(jwkPublic)}]}`, { overwriteRoutes: true });
return [jwkPrivate, jwkPublic, configuration];
}
// Set resolver mock
public static async resolverMock(setup: TestSetup, did: string, jwkPrivate?: any, jwkPublic?: any): Promise<[DidDocument, any, any]> {
// setup http mock
if (!jwkPrivate && !jwkPublic) {
[jwkPrivate, jwkPublic] = await IssuanceHelpers.generateSigningKey(setup, `${did}#signing`);
}
const didDocument = {
didDocument: new DidDocument({
"@context": "https://w3id.org/did/v1",
id: did,
publicKey: <any>[{
id: jwkPublic.kid,
type: 'RsaVerificationKey2018',
controller: did,
publicKeyJwk: jwkPublic
}]
})
};
(didDocument.didDocument as any)['@context'] = 'https://w3id.org/did/v1';
// Resolver mock
const resolverUrl = `${setup.resolverUrl}/${did}`;
setup.fetchMock.get(resolverUrl, didDocument, { overwriteRoutes: true });
return [didDocument.didDocument, jwkPrivate, jwkPublic];
}
// Sign a token
public static async | (setup: TestSetup, payload: object, configuration: string, jwkPrivate: any): Promise<ClaimToken> {
const keyId = new KeyReference(jwkPrivate.kid);
await setup.keyStore.save(keyId, <any>jwkPrivate);
setup.validatorOptions.crypto.builder.useSigningKeyReference(keyId);
setup.validatorOptions.crypto.signingProtocol(JoseBuilder.JWT).builder.useKid(keyId.keyReference);
const signature = await setup.validatorOptions.crypto.signingProtocol(JoseBuilder.JWT).sign(payload);
const token = await setup.validatorOptions.crypto.signingProtocol(JoseBuilder.JWT).serialize();
let claimToken = ClaimToken.create(token, configuration);
return claimToken;
}
public static async createRequest(
setup: TestSetup,
tokenDescription: TokenType,
issuance: boolean,
idTokenIssuer?: string,
idTokenAudience?: string,
idTokenExp?: number): Promise<[ClaimToken, ValidationOptions, any]> {
const options = new ValidationOptions(setup.validatorOptions, tokenDescription);
const [didJwkPrivate, didJwkPublic] = await IssuanceHelpers.generateSigningKey(setup, setup.defaulUserDidKid);
const [tokenJwkPrivate, tokenJwkPublic, tokenConfiguration] = await IssuanceHelpers.generateSigningKeyAndSetConfigurationMock(setup, setup.defaulIssuerDidKid);
const [didDocument, jwkPrivate2, jwkPublic2] = await IssuanceHelpers.resolverMock(setup, setup.defaultUserDid, didJwkPrivate, didJwkPublic);
const idTokenPayload = {
upn: '[email protected]',
name: 'Jules Winnfield',
iss: idTokenIssuer ?? setup.tokenIssuer,
aud: idTokenAudience ?? setup.tokenAudience,
exp: idTokenExp ?? Math.trunc(Date.now() / 1000) + 10000,
};
const idToken = await IssuanceHelpers.signAToken(
setup,
id | signAToken | identifier_name |
ui.rs | _pair()`.
InitPair,
/// Describes a possible error during call to `noecho()`.
Noecho,
/// Describes a possible error during call to `start_color()`.
StartColor,
/// Describes a possible error during call to `use_default_colors()`.
UseDefaultColors,
/// Describes a possible error during call to `waddch()`.
Waddch,
/// Describes a possible error during call to `waddstr()`.
Waddstr,
/// Describes a possible error during call to `wchgat()`.
Wchgat,
/// Describes a possible error during call to `wclear()`.
Wclear,
/// Describes a possible error during call to `wcleartoeol()`.
Wcleartoeol,
/// Describes a possible error during call to `wdelch()`.
Wdelch,
/// Describes a possible error during call to `winsch()`.
Winsch,
/// Describes a possible error during call to `wmove()`.
Wmove,
/// Describes a possible error during call to `nodelay()`.
Nodelay,
}
impl Error {
/// Returns the function that caused the current `Error`.
fn get_function(&self) -> &str {
match self {
Error::Endwin => "endwin",
Error::Flash => "flash",
Error::InitPair => "init_pair",
Error::Noecho => "noecho",
Error::StartColor => "start_color",
Error::UseDefaultColors => "use_default_colors",
Error::Waddch => "waddch",
Error::Waddstr => "waddstr",
Error::Wchgat => "wchgat",
Error::Wclear => "wclear",
Error::Wcleartoeol => "wcleartoeol",
Error::Wdelch => "wdelch",
Error::Winsch => "winsch",
Error::Wmove => "wmove",
Error::Nodelay => "nodelay",
Error::NoUi => "",
}
}
}
impl Display for Error {
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Error::NoUi => write!(f, "No UserInterface was created."),
_ => write!(f, "Failed while calling {}().", self.get_function()),
}
}
}
impl error::Error for Error {}
/// Signifies a specific cell in the grid.
#[derive(Clone, Copy, Eq, Debug, Default, Hash, Ord, PartialEq, PartialOrd)]
pub struct Address {
/// The index of the row that contains the cell (starts at 0).
row: Index,
/// The index of the column that contains the cell (starts at 0).
column: Index,
}
impl Address {
/// Creates a new `Address` with a given row and column.
#[inline]
pub fn new(row: Index, column: Index) -> Self {
Self { row, column }
}
/// Returns the column of `self`.
///
/// Used with [`pancurses`].
///
/// [`pancurses`]: ../../pancurses/index.html
fn x(self) -> IndexType {
IndexType::from(self.column)
}
/// Returns the row of `self`.
///
/// Used with [`pancurses`].
///
/// [`pancurses`]: ../../pancurses/index.html
fn y(self) -> IndexType {
IndexType::from(self.row)
}
}
impl Display for Address {
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "({}, {})", self.row, self.column)
}
}
/// Signifies a modification to the grid.
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub enum Change {
/// Removes the previous cell, moving all subsequent cells to the left.
Backspace,
/// Clears all cells.
Clear,
/// Sets the color of a given number of cells.
Format(Length, Color),
/// Inserts a cell containing a character, moving all subsequent cells to the right.
Insert(char),
/// Does nothing.
Nothing,
/// Writes the characters of a string in sequence and clears all subsequent cells.
Row(String),
/// Flashes the display.
Flash,
}
impl Default for Change {
#[inline]
fn default() -> Self {
Change::Nothing
}
}
impl Display for Change { | Change::Clear => write!(f, "Clear"),
Change::Format(n, color) => write!(f, "Format {} cells to {}", n, color),
Change::Insert(input) => write!(f, "Insert '{}'", input),
Change::Nothing => write!(f, "Nothing"),
Change::Row(row_str) => write!(f, "Write row '{}'", row_str),
Change::Flash => write!(f, "Flash"),
}
}
}
/// Signifies a color.
// Order must be kept as defined to match pancurses.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum Color {
/// The default foreground on the default background.
Default,
/// The default foreground on a red background.
Red,
/// The default foreground on a green background.
Green,
/// The default foreground on a yellow background.
Yellow,
/// The default foreground on a blue background.
Blue,
}
impl Color {
/// Converts `self` to a `color-pair` as specified in [`pancurses`].
fn cp(self) -> i16 {
self as i16
}
}
impl Display for Color {
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Color::Default => write!(f, "Default"),
Color::Red => write!(f, "Red"),
Color::Green => write!(f, "Green"),
Color::Yellow => write!(f, "Yellow"),
Color::Blue => write!(f, "Blue"),
}
}
}
/// Signifies a [`Change`] to make to an [`Address`].
///
/// [`Change`]: enum.Change.html
/// [`Address`]: struct.Address.html
#[derive(Clone, Debug, Default, Eq, Hash, PartialEq)]
pub struct Edit {
/// The [`Change`] to be made.
change: Change,
/// The [`Address`] on which the [`Change`] is intended.
address: Option<Address>,
}
impl Edit {
/// Creates a new `Edit`.
#[inline]
pub fn new(address: Option<Address>, change: Change) -> Self {
Self { address, change }
}
}
/// The interface between the user and the application.
///
/// All output is displayed in a grid of cells. Each cell contains one character and can change its
/// background color.
pub trait UserInterface: Debug {
/// Sets up the user interface for use.
fn init(&self) -> Outcome;
/// Closes the user interface.
fn close(&self) -> Outcome;
/// Returns the number of cells that make up the height of the grid.
fn grid_height(&self) -> Result<Index, TryFromIntError>;
/// Applies the edit to the output.
fn apply(&self, edit: Edit) -> Outcome;
/// Flashes the output.
fn flash(&self) -> Outcome;
/// Returns the input from the user.
///
/// Returns [`None`] if no character input is provided.
fn receive_input(&self) -> Option<Input>;
}
/// The user interface provided by a terminal.
#[derive(Debug)]
pub struct Terminal {
/// The window that interfaces with the application.
window: pancurses::Window,
}
impl Terminal {
/// Creates a new `Terminal`.
#[inline]
pub fn new() -> Mrc<Self> {
Rc::new(RefCell::new(Self {
// Must call initscr() first.
window: pancurses::initscr(),
}))
}
/// Converts given result of ui function to a [`Outcome`].
fn process(result: i32, error: Error) -> Outcome {
if result == pancurses::OK {
Ok(())
} else {
Err(error)
}
}
/// Overwrites the block at cursor with a character.
fn add_char(&self, c: char) -> Outcome {
Self::process(self.window.addch(c), Error::Waddch)
}
/// Writes a string starting at the cursor.
fn add_str(&self, s: String) -> Outcome {
Self::process(self.window.addstr(s), Error::Waddstr)
}
/// Clears the entire window.
fn clear_all(&self) -> Outcome {
Self::process(self.window.clear(), Error::Wclear)
}
/// Clears all blocks from the cursor to the end of the row.
fn clear_to_row_end(&self) -> Outcome {
Self::process(self.window.clrtoeol(), Error::Wcleartoeol)
}
/// Defines [`Color`] as having a background color.
fn define_color(&self, color: Color, background: i16) -> Outcome {
Self::process(
panc | #[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Change::Backspace => write!(f, "Backspace"), | random_line_split |
ui.rs | ()`.
InitPair,
/// Describes a possible error during call to `noecho()`.
Noecho,
/// Describes a possible error during call to `start_color()`.
StartColor,
/// Describes a possible error during call to `use_default_colors()`.
UseDefaultColors,
/// Describes a possible error during call to `waddch()`.
Waddch,
/// Describes a possible error during call to `waddstr()`.
Waddstr,
/// Describes a possible error during call to `wchgat()`.
Wchgat,
/// Describes a possible error during call to `wclear()`.
Wclear,
/// Describes a possible error during call to `wcleartoeol()`.
Wcleartoeol,
/// Describes a possible error during call to `wdelch()`.
Wdelch,
/// Describes a possible error during call to `winsch()`.
Winsch,
/// Describes a possible error during call to `wmove()`.
Wmove,
/// Describes a possible error during call to `nodelay()`.
Nodelay,
}
impl Error {
/// Returns the function that caused the current `Error`.
fn get_function(&self) -> &str {
match self {
Error::Endwin => "endwin",
Error::Flash => "flash",
Error::InitPair => "init_pair",
Error::Noecho => "noecho",
Error::StartColor => "start_color",
Error::UseDefaultColors => "use_default_colors",
Error::Waddch => "waddch",
Error::Waddstr => "waddstr",
Error::Wchgat => "wchgat",
Error::Wclear => "wclear",
Error::Wcleartoeol => "wcleartoeol",
Error::Wdelch => "wdelch",
Error::Winsch => "winsch",
Error::Wmove => "wmove",
Error::Nodelay => "nodelay",
Error::NoUi => "",
}
}
}
impl Display for Error {
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Error::NoUi => write!(f, "No UserInterface was created."),
_ => write!(f, "Failed while calling {}().", self.get_function()),
}
}
}
impl error::Error for Error {}
/// Signifies a specific cell in the grid.
#[derive(Clone, Copy, Eq, Debug, Default, Hash, Ord, PartialEq, PartialOrd)]
pub struct Address {
/// The index of the row that contains the cell (starts at 0).
row: Index,
/// The index of the column that contains the cell (starts at 0).
column: Index,
}
impl Address {
/// Creates a new `Address` with a given row and column.
#[inline]
pub fn new(row: Index, column: Index) -> Self {
Self { row, column }
}
/// Returns the column of `self`.
///
/// Used with [`pancurses`].
///
/// [`pancurses`]: ../../pancurses/index.html
fn x(self) -> IndexType {
IndexType::from(self.column)
}
/// Returns the row of `self`.
///
/// Used with [`pancurses`].
///
/// [`pancurses`]: ../../pancurses/index.html
fn y(self) -> IndexType {
IndexType::from(self.row)
}
}
impl Display for Address {
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "({}, {})", self.row, self.column)
}
}
/// Signifies a modification to the grid.
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub enum Change {
/// Removes the previous cell, moving all subsequent cells to the left.
Backspace,
/// Clears all cells.
Clear,
/// Sets the color of a given number of cells.
Format(Length, Color),
/// Inserts a cell containing a character, moving all subsequent cells to the right.
Insert(char),
/// Does nothing.
Nothing,
/// Writes the characters of a string in sequence and clears all subsequent cells.
Row(String),
/// Flashes the display.
Flash,
}
impl Default for Change {
#[inline]
fn default() -> Self {
Change::Nothing
}
}
impl Display for Change {
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Change::Backspace => write!(f, "Backspace"),
Change::Clear => write!(f, "Clear"),
Change::Format(n, color) => write!(f, "Format {} cells to {}", n, color),
Change::Insert(input) => write!(f, "Insert '{}'", input),
Change::Nothing => write!(f, "Nothing"),
Change::Row(row_str) => write!(f, "Write row '{}'", row_str),
Change::Flash => write!(f, "Flash"),
}
}
}
/// Signifies a color.
// Order must be kept as defined to match pancurses.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum Color {
/// The default foreground on the default background.
Default,
/// The default foreground on a red background.
Red,
/// The default foreground on a green background.
Green,
/// The default foreground on a yellow background.
Yellow,
/// The default foreground on a blue background.
Blue,
}
impl Color {
/// Converts `self` to a `color-pair` as specified in [`pancurses`].
fn cp(self) -> i16 {
self as i16
}
}
impl Display for Color {
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result |
}
/// Signifies a [`Change`] to make to an [`Address`].
///
/// [`Change`]: enum.Change.html
/// [`Address`]: struct.Address.html
#[derive(Clone, Debug, Default, Eq, Hash, PartialEq)]
pub struct Edit {
/// The [`Change`] to be made.
change: Change,
/// The [`Address`] on which the [`Change`] is intended.
address: Option<Address>,
}
impl Edit {
/// Creates a new `Edit`.
#[inline]
pub fn new(address: Option<Address>, change: Change) -> Self {
Self { address, change }
}
}
/// The interface between the user and the application.
///
/// All output is displayed in a grid of cells. Each cell contains one character and can change its
/// background color.
pub trait UserInterface: Debug {
/// Sets up the user interface for use.
fn init(&self) -> Outcome;
/// Closes the user interface.
fn close(&self) -> Outcome;
/// Returns the number of cells that make up the height of the grid.
fn grid_height(&self) -> Result<Index, TryFromIntError>;
/// Applies the edit to the output.
fn apply(&self, edit: Edit) -> Outcome;
/// Flashes the output.
fn flash(&self) -> Outcome;
/// Returns the input from the user.
///
/// Returns [`None`] if no character input is provided.
fn receive_input(&self) -> Option<Input>;
}
/// The user interface provided by a terminal.
#[derive(Debug)]
pub struct Terminal {
/// The window that interfaces with the application.
window: pancurses::Window,
}
impl Terminal {
/// Creates a new `Terminal`.
#[inline]
pub fn new() -> Mrc<Self> {
Rc::new(RefCell::new(Self {
// Must call initscr() first.
window: pancurses::initscr(),
}))
}
/// Converts given result of ui function to a [`Outcome`].
fn process(result: i32, error: Error) -> Outcome {
if result == pancurses::OK {
Ok(())
} else {
Err(error)
}
}
/// Overwrites the block at cursor with a character.
fn add_char(&self, c: char) -> Outcome {
Self::process(self.window.addch(c), Error::Waddch)
}
/// Writes a string starting at the cursor.
fn add_str(&self, s: String) -> Outcome {
Self::process(self.window.addstr(s), Error::Waddstr)
}
/// Clears the entire window.
fn clear_all(&self) -> Outcome {
Self::process(self.window.clear(), Error::Wclear)
}
/// Clears all blocks from the cursor to the end of the row.
fn clear_to_row_end(&self) -> Outcome {
Self::process(self.window.clrtoeol(), Error::Wcleartoeol)
}
/// Defines [`Color`] as having a background color.
fn define_color(&self, color: Color, background: i16) -> Outcome {
Self::process(
| {
match self {
Color::Default => write!(f, "Default"),
Color::Red => write!(f, "Red"),
Color::Green => write!(f, "Green"),
Color::Yellow => write!(f, "Yellow"),
Color::Blue => write!(f, "Blue"),
}
} | identifier_body |
ui.rs | `.
UseDefaultColors,
/// Describes a possible error during call to `waddch()`.
Waddch,
/// Describes a possible error during call to `waddstr()`.
Waddstr,
/// Describes a possible error during call to `wchgat()`.
Wchgat,
/// Describes a possible error during call to `wclear()`.
Wclear,
/// Describes a possible error during call to `wcleartoeol()`.
Wcleartoeol,
/// Describes a possible error during call to `wdelch()`.
Wdelch,
/// Describes a possible error during call to `winsch()`.
Winsch,
/// Describes a possible error during call to `wmove()`.
Wmove,
/// Describes a possible error during call to `nodelay()`.
Nodelay,
}
impl Error {
/// Returns the function that caused the current `Error`.
fn get_function(&self) -> &str {
match self {
Error::Endwin => "endwin",
Error::Flash => "flash",
Error::InitPair => "init_pair",
Error::Noecho => "noecho",
Error::StartColor => "start_color",
Error::UseDefaultColors => "use_default_colors",
Error::Waddch => "waddch",
Error::Waddstr => "waddstr",
Error::Wchgat => "wchgat",
Error::Wclear => "wclear",
Error::Wcleartoeol => "wcleartoeol",
Error::Wdelch => "wdelch",
Error::Winsch => "winsch",
Error::Wmove => "wmove",
Error::Nodelay => "nodelay",
Error::NoUi => "",
}
}
}
impl Display for Error {
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Error::NoUi => write!(f, "No UserInterface was created."),
_ => write!(f, "Failed while calling {}().", self.get_function()),
}
}
}
impl error::Error for Error {}
/// Signifies a specific cell in the grid.
#[derive(Clone, Copy, Eq, Debug, Default, Hash, Ord, PartialEq, PartialOrd)]
pub struct Address {
/// The index of the row that contains the cell (starts at 0).
row: Index,
/// The index of the column that contains the cell (starts at 0).
column: Index,
}
impl Address {
/// Creates a new `Address` with a given row and column.
#[inline]
pub fn new(row: Index, column: Index) -> Self {
Self { row, column }
}
/// Returns the column of `self`.
///
/// Used with [`pancurses`].
///
/// [`pancurses`]: ../../pancurses/index.html
fn x(self) -> IndexType {
IndexType::from(self.column)
}
/// Returns the row of `self`.
///
/// Used with [`pancurses`].
///
/// [`pancurses`]: ../../pancurses/index.html
fn y(self) -> IndexType {
IndexType::from(self.row)
}
}
impl Display for Address {
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "({}, {})", self.row, self.column)
}
}
/// Signifies a modification to the grid.
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub enum Change {
/// Removes the previous cell, moving all subsequent cells to the left.
Backspace,
/// Clears all cells.
Clear,
/// Sets the color of a given number of cells.
Format(Length, Color),
/// Inserts a cell containing a character, moving all subsequent cells to the right.
Insert(char),
/// Does nothing.
Nothing,
/// Writes the characters of a string in sequence and clears all subsequent cells.
Row(String),
/// Flashes the display.
Flash,
}
impl Default for Change {
#[inline]
fn default() -> Self {
Change::Nothing
}
}
impl Display for Change {
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Change::Backspace => write!(f, "Backspace"),
Change::Clear => write!(f, "Clear"),
Change::Format(n, color) => write!(f, "Format {} cells to {}", n, color),
Change::Insert(input) => write!(f, "Insert '{}'", input),
Change::Nothing => write!(f, "Nothing"),
Change::Row(row_str) => write!(f, "Write row '{}'", row_str),
Change::Flash => write!(f, "Flash"),
}
}
}
/// Signifies a color.
// Order must be kept as defined to match pancurses.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum Color {
/// The default foreground on the default background.
Default,
/// The default foreground on a red background.
Red,
/// The default foreground on a green background.
Green,
/// The default foreground on a yellow background.
Yellow,
/// The default foreground on a blue background.
Blue,
}
impl Color {
/// Converts `self` to a `color-pair` as specified in [`pancurses`].
fn cp(self) -> i16 {
self as i16
}
}
impl Display for Color {
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Color::Default => write!(f, "Default"),
Color::Red => write!(f, "Red"),
Color::Green => write!(f, "Green"),
Color::Yellow => write!(f, "Yellow"),
Color::Blue => write!(f, "Blue"),
}
}
}
/// Signifies a [`Change`] to make to an [`Address`].
///
/// [`Change`]: enum.Change.html
/// [`Address`]: struct.Address.html
#[derive(Clone, Debug, Default, Eq, Hash, PartialEq)]
pub struct Edit {
/// The [`Change`] to be made.
change: Change,
/// The [`Address`] on which the [`Change`] is intended.
address: Option<Address>,
}
impl Edit {
/// Creates a new `Edit`.
#[inline]
pub fn new(address: Option<Address>, change: Change) -> Self {
Self { address, change }
}
}
/// The interface between the user and the application.
///
/// All output is displayed in a grid of cells. Each cell contains one character and can change its
/// background color.
pub trait UserInterface: Debug {
/// Sets up the user interface for use.
fn init(&self) -> Outcome;
/// Closes the user interface.
fn close(&self) -> Outcome;
/// Returns the number of cells that make up the height of the grid.
fn grid_height(&self) -> Result<Index, TryFromIntError>;
/// Applies the edit to the output.
fn apply(&self, edit: Edit) -> Outcome;
/// Flashes the output.
fn flash(&self) -> Outcome;
/// Returns the input from the user.
///
/// Returns [`None`] if no character input is provided.
fn receive_input(&self) -> Option<Input>;
}
/// The user interface provided by a terminal.
#[derive(Debug)]
pub struct Terminal {
/// The window that interfaces with the application.
window: pancurses::Window,
}
impl Terminal {
/// Creates a new `Terminal`.
#[inline]
pub fn new() -> Mrc<Self> {
Rc::new(RefCell::new(Self {
// Must call initscr() first.
window: pancurses::initscr(),
}))
}
/// Converts given result of ui function to a [`Outcome`].
fn process(result: i32, error: Error) -> Outcome {
if result == pancurses::OK {
Ok(())
} else {
Err(error)
}
}
/// Overwrites the block at cursor with a character.
fn add_char(&self, c: char) -> Outcome {
Self::process(self.window.addch(c), Error::Waddch)
}
/// Writes a string starting at the cursor.
fn add_str(&self, s: String) -> Outcome {
Self::process(self.window.addstr(s), Error::Waddstr)
}
/// Clears the entire window.
fn clear_all(&self) -> Outcome {
Self::process(self.window.clear(), Error::Wclear)
}
/// Clears all blocks from the cursor to the end of the row.
fn clear_to_row_end(&self) -> Outcome {
Self::process(self.window.clrtoeol(), Error::Wcleartoeol)
}
/// Defines [`Color`] as having a background color.
fn define_color(&self, color: Color, background: i16) -> Outcome {
Self::process(
pancurses::init_pair(color.cp(), DEFAULT_COLOR, background),
Error::InitPair,
)
}
/// Deletes the character at the cursor.
///
/// All subseqent characters are shifted to the left and a blank block is added at the end.
fn | delete_char | identifier_name |
|
command.py | User
from ZeroBot.common.enums import HelpType
from ZeroBot.exceptions import CommandAlreadyRegistered, CommandParseError
from ZeroBot.module import Module
from ZeroBot.util import gen_repr
__all__ = ["CommandHelp", "CommandParser", "ParsedCommand"]
class _NoExitArgumentParser(ArgumentParser):
"""Modified `argparse.ArgumentParser` that doesn't exit on errors."""
# NOTE: Python 3.9 will add an `exit_on_error` parameter that will stop
# argparse from exiting instead of having to override exit and error.
def exit(self, status=0, message=None):
pass
def error(self, message):
raise CommandParseError(message, cmd_name=self.prog)
class CommandParser(_NoExitArgumentParser):
"""Definition and parser for ZeroBot commands.
Creation of a `CommandParser` object necessarily entails defining the
command itself: its name, what arguments and options it accepts, how they
behave, etc. It is both the blueprint and interpreter for a command.
Attributes
----------
name : str, optional
The name of the command, i.e. how the command will be invoked. May be
omitted, but this only makes sense when creating a parent parser for
another parser.
description : str, optional
A short description of the command. May be omitted.
usage : str, optional
The text shown as the "usage" line in the command's help text. If
omitted, it will be automatically generated by `argparse`.
kwargs
Any extra keyword arguments are passed to the underlying
`argparse.ArgumentParser` constructor.
Notes
-----
Under the hood, `CommandParser` is simply a wrapper around an
`argparse.ArgumentParser` with some ZeroBot-related members.
"""
def __init__(
self, name: Optional[str] = None, description: Optional[str] = None, usage: Optional[str] = None, **kwargs
):
# NOTE: Might be able to make use of formatter_class if need be
if not name:
name = kwargs.get("name", kwargs.get("prog"))
kwargs.update(
{
"prog": name,
"description": description,
"usage": usage,
"add_help": False,
}
)
super().__init__(**kwargs)
self.name = name
self._module = None
# More minimal default argument grouping
blank_group = self.add_argument_group()
self._optionals = blank_group
self._positionals = blank_group
def __repr__(self):
attrs = ["name", "description", "module"]
return gen_repr(self, attrs)
def __str__(self):
return self.name
def make_adder(self, *args, **kwargs):
"""Helper shortcut for creating subcommands.
Accepts arguments for `add_subparsers`, creating a new subparser and
returning a partial function wrapping `add_subcommand` for the new
subparser. If the `dest` argument isn't specified, it defaults to
`'subcmd'`.
Example
-------
cmd_foo = CommandParser('foo', 'Does foo stuff')
foo_adder = cmd_foo.make_adder(metavar='OPERATION', required=True)
bar_subcmd = foo_adder('bar', description='Does bar stuff to foo')
"""
kwargs.setdefault("dest", "subcmd")
subp = self.add_subparsers(*args, **kwargs)
return partial(self.add_subcommand, subp)
@staticmethod
def add_subcommand(
subp: _SubParsersAction, name: str, description: Optional[str] = None, **kwargs
) -> "CommandParser":
"""Helper method for adding subcommands.
Wrapper around `add_parser` that simplifies adding subcommands to
ZeroBot commands. The same string is used for both the `description`
and `help` parameters of `add_parser`.
Parameters
----------
subp : Result of calling the `add_subparsers` method.
The subparser object returned from the `add_subparsers` method.
name : str
The name of the subcommand.
description : str, optional
A short description of the command. May be omitted. The `help`
parameter will be set to this value automatically.
kwargs
Extra arguments to pass to the `CommandParser` constructor.
"""
desc_help = {"description": description, "help": description}
return subp.add_parser(name, **desc_help, **kwargs)
@property
def module(self) -> Optional[Module]:
"""The module that this command is registered to.
Will return `None` if this command has not yet been registered.
"""
return self._module
@dataclass
class ParsedCommand:
"""A successfully parsed command with invoker and destination info.
ZeroBot's `Core` will send these as the payload of `module_command_*`
events.
Attributes
----------
name : str
The command name.
args : dict
A dictionary of the resultant parsed arguments and options and their
values.
parser : CommandParser
The parser that created this instance.
msg : Message
The original message encompassing the command.
invoker
source
subcmd
"""
name: str
args: dict[str, Any]
parser: CommandParser
msg: Message
def __post_init__(self):
# pylint: disable=protected-access
try:
action = self.parser._actions[0]
if isinstance(action, _SubParsersAction):
name_map = action.choices
canon_parser = name_map[self.args[action.dest]]
self._subcmd = canon_parser.name.split()[-1]
else:
self._subcmd = None
except (KeyError, IndexError):
self._subcmd = None
@property
def invoker(self) -> User:
"""The User that invoked the command."""
return self.msg.source
@property
def source(self) -> Union[User, Channel]:
"""Where the command was sent from.
Can be either directly from a user, or from a user within a channel.
"""
return self.msg.destination
@property
def subcmd(self) -> Optional[str]:
"""The invoked subcommand name, if one was invoked.
For subcommands with aliases, the name returned is always the canonical
name that the aliases are associated with. For this reason, this
attribute should be preferred to extracting the subcommand name from
`ParsedCommand.args`.
"""
return self._subcmd
def nested_subcmd(self, depth: int = 2) -> Optional[str]:
"""Get the name of a nested subcommand.
Like the `subcmd` property, the name returned is always the canonical
name for the subcommand. The `depth` parameter determines how many
levels of nesting to traverse; the default of ``2`` gets the first
nested subcommand. As a consequence, a value of ``1`` is the same as
`subcmd`.
"""
# pylint: disable=protected-access
current = 0
subparser = self.parser
try:
while current < depth:
|
return subparser.name.split()[-1]
except (IndexError, KeyError, TypeError):
return None
@dataclass
class CommandHelp:
"""Encapsulates the result of a command help request.
ZeroBot's `Core` will create and pass these to `core_command_help`
callbacks.
Attributes
----------
type : HelpType
An enum type representing the type of help request.
name : str, optional
The command or module name that the help is about.
aliases : list, optional
If applicable, a list of aliases for this command.
description : str, optional
The command or module description
usage : str, optional
The "usage" string for the command
args : dict, optional
A dictionary mapping each positional argument name and a tuple of their
help strings and a boolean flag denoting whether or not the argument
represents a subcommand.
Only set when `type` is `CMD`.
opts : dict, optional
A dictionary mapping a tuple of option names representing a particular
option to a tuple of the option's value name and its help strings.
cmds : dict, optional
A dictionary mapping module names to another dictionary of command
names and their help strings. Only set when `type` is `MOD` or `ALL`.
subcmds : dict, optional
If applicable, a dictionary of subcommand names and their own
`CommandHelp` objects.
parent : CommandHelp
Only set when `type` is `NO_SUCH_SUBCMD`, and refers to the parent
`CommandHelp` object.
"""
type: HelpType
name: str = None
description: str = None
usage: str = None
aliases: list[str] = field(default_factory=list)
args: dict[str, tuple[Optional[str], bool]] = field(default_factory=dict)
opts: dict[tuple[str, ...], Optional[tuple[str, Optional[str]] | action = subparser._actions[0]
if isinstance(action, _SubParsersAction):
subparser = action.choices[self.args[action.dest]]
current += 1
else:
return None | conditional_block |
command.py | ArgumentParser(ArgumentParser):
"""Modified `argparse.ArgumentParser` that doesn't exit on errors."""
# NOTE: Python 3.9 will add an `exit_on_error` parameter that will stop
# argparse from exiting instead of having to override exit and error.
def exit(self, status=0, message=None):
pass
def error(self, message):
raise CommandParseError(message, cmd_name=self.prog)
class CommandParser(_NoExitArgumentParser):
"""Definition and parser for ZeroBot commands.
Creation of a `CommandParser` object necessarily entails defining the
command itself: its name, what arguments and options it accepts, how they
behave, etc. It is both the blueprint and interpreter for a command.
Attributes
----------
name : str, optional
The name of the command, i.e. how the command will be invoked. May be
omitted, but this only makes sense when creating a parent parser for
another parser.
description : str, optional
A short description of the command. May be omitted.
usage : str, optional
The text shown as the "usage" line in the command's help text. If
omitted, it will be automatically generated by `argparse`.
kwargs
Any extra keyword arguments are passed to the underlying
`argparse.ArgumentParser` constructor.
Notes
-----
Under the hood, `CommandParser` is simply a wrapper around an
`argparse.ArgumentParser` with some ZeroBot-related members.
"""
def __init__(
self, name: Optional[str] = None, description: Optional[str] = None, usage: Optional[str] = None, **kwargs
):
# NOTE: Might be able to make use of formatter_class if need be
if not name:
name = kwargs.get("name", kwargs.get("prog"))
kwargs.update(
{
"prog": name,
"description": description,
"usage": usage,
"add_help": False,
}
)
super().__init__(**kwargs)
self.name = name
self._module = None
# More minimal default argument grouping
blank_group = self.add_argument_group()
self._optionals = blank_group
self._positionals = blank_group
def __repr__(self):
attrs = ["name", "description", "module"]
return gen_repr(self, attrs)
def __str__(self):
return self.name
def make_adder(self, *args, **kwargs):
"""Helper shortcut for creating subcommands.
Accepts arguments for `add_subparsers`, creating a new subparser and
returning a partial function wrapping `add_subcommand` for the new
subparser. If the `dest` argument isn't specified, it defaults to
`'subcmd'`.
Example
-------
cmd_foo = CommandParser('foo', 'Does foo stuff')
foo_adder = cmd_foo.make_adder(metavar='OPERATION', required=True)
bar_subcmd = foo_adder('bar', description='Does bar stuff to foo')
"""
kwargs.setdefault("dest", "subcmd")
subp = self.add_subparsers(*args, **kwargs)
return partial(self.add_subcommand, subp)
@staticmethod
def add_subcommand(
subp: _SubParsersAction, name: str, description: Optional[str] = None, **kwargs
) -> "CommandParser":
"""Helper method for adding subcommands.
Wrapper around `add_parser` that simplifies adding subcommands to
ZeroBot commands. The same string is used for both the `description`
and `help` parameters of `add_parser`.
Parameters
----------
subp : Result of calling the `add_subparsers` method.
The subparser object returned from the `add_subparsers` method.
name : str
The name of the subcommand.
description : str, optional
A short description of the command. May be omitted. The `help`
parameter will be set to this value automatically.
kwargs
Extra arguments to pass to the `CommandParser` constructor.
"""
desc_help = {"description": description, "help": description}
return subp.add_parser(name, **desc_help, **kwargs)
@property
def module(self) -> Optional[Module]:
"""The module that this command is registered to.
Will return `None` if this command has not yet been registered.
"""
return self._module
@dataclass
class ParsedCommand:
"""A successfully parsed command with invoker and destination info.
ZeroBot's `Core` will send these as the payload of `module_command_*`
events.
Attributes
----------
name : str
The command name.
args : dict
A dictionary of the resultant parsed arguments and options and their
values.
parser : CommandParser
The parser that created this instance.
msg : Message
The original message encompassing the command.
invoker
source
subcmd
"""
name: str
args: dict[str, Any]
parser: CommandParser
msg: Message
def __post_init__(self):
# pylint: disable=protected-access
try:
action = self.parser._actions[0]
if isinstance(action, _SubParsersAction):
name_map = action.choices
canon_parser = name_map[self.args[action.dest]]
self._subcmd = canon_parser.name.split()[-1]
else:
self._subcmd = None
except (KeyError, IndexError):
self._subcmd = None
@property
def invoker(self) -> User:
"""The User that invoked the command."""
return self.msg.source
@property
def source(self) -> Union[User, Channel]:
"""Where the command was sent from.
Can be either directly from a user, or from a user within a channel.
"""
return self.msg.destination
@property
def subcmd(self) -> Optional[str]:
"""The invoked subcommand name, if one was invoked.
For subcommands with aliases, the name returned is always the canonical
name that the aliases are associated with. For this reason, this
attribute should be preferred to extracting the subcommand name from
`ParsedCommand.args`.
"""
return self._subcmd
def nested_subcmd(self, depth: int = 2) -> Optional[str]:
"""Get the name of a nested subcommand.
Like the `subcmd` property, the name returned is always the canonical
name for the subcommand. The `depth` parameter determines how many
levels of nesting to traverse; the default of ``2`` gets the first
nested subcommand. As a consequence, a value of ``1`` is the same as
`subcmd`.
"""
# pylint: disable=protected-access
current = 0
subparser = self.parser
try:
while current < depth:
action = subparser._actions[0]
if isinstance(action, _SubParsersAction):
subparser = action.choices[self.args[action.dest]]
current += 1
else:
return None
return subparser.name.split()[-1]
except (IndexError, KeyError, TypeError):
return None
@dataclass
class CommandHelp:
"""Encapsulates the result of a command help request.
ZeroBot's `Core` will create and pass these to `core_command_help`
callbacks.
Attributes
----------
type : HelpType
An enum type representing the type of help request.
name : str, optional
The command or module name that the help is about.
aliases : list, optional
If applicable, a list of aliases for this command.
description : str, optional
The command or module description
usage : str, optional
The "usage" string for the command
args : dict, optional
A dictionary mapping each positional argument name and a tuple of their
help strings and a boolean flag denoting whether or not the argument
represents a subcommand.
Only set when `type` is `CMD`.
opts : dict, optional
A dictionary mapping a tuple of option names representing a particular
option to a tuple of the option's value name and its help strings.
cmds : dict, optional
A dictionary mapping module names to another dictionary of command
names and their help strings. Only set when `type` is `MOD` or `ALL`.
subcmds : dict, optional
If applicable, a dictionary of subcommand names and their own
`CommandHelp` objects.
parent : CommandHelp
Only set when `type` is `NO_SUCH_SUBCMD`, and refers to the parent
`CommandHelp` object.
"""
type: HelpType
name: str = None
description: str = None
usage: str = None
aliases: list[str] = field(default_factory=list)
args: dict[str, tuple[Optional[str], bool]] = field(default_factory=dict)
opts: dict[tuple[str, ...], Optional[tuple[str, Optional[str]]]] = field(default_factory=dict)
cmds: dict[str, dict[str, str]] = field(default_factory=dict)
subcmds: dict[str, "CommandHelp"] = field(default_factory=dict, repr=False)
parent: "CommandHelp" = None
def | get_subcmd | identifier_name |
|
command.py | User
from ZeroBot.common.enums import HelpType
from ZeroBot.exceptions import CommandAlreadyRegistered, CommandParseError
from ZeroBot.module import Module
from ZeroBot.util import gen_repr
__all__ = ["CommandHelp", "CommandParser", "ParsedCommand"]
class _NoExitArgumentParser(ArgumentParser):
"""Modified `argparse.ArgumentParser` that doesn't exit on errors."""
# NOTE: Python 3.9 will add an `exit_on_error` parameter that will stop
# argparse from exiting instead of having to override exit and error.
def exit(self, status=0, message=None):
pass
def error(self, message):
raise CommandParseError(message, cmd_name=self.prog)
class CommandParser(_NoExitArgumentParser):
"""Definition and parser for ZeroBot commands.
Creation of a `CommandParser` object necessarily entails defining the
command itself: its name, what arguments and options it accepts, how they
behave, etc. It is both the blueprint and interpreter for a command.
Attributes
----------
name : str, optional
The name of the command, i.e. how the command will be invoked. May be
omitted, but this only makes sense when creating a parent parser for
another parser.
description : str, optional
A short description of the command. May be omitted.
usage : str, optional
The text shown as the "usage" line in the command's help text. If
omitted, it will be automatically generated by `argparse`.
kwargs
Any extra keyword arguments are passed to the underlying
`argparse.ArgumentParser` constructor.
Notes
-----
Under the hood, `CommandParser` is simply a wrapper around an
`argparse.ArgumentParser` with some ZeroBot-related members.
"""
def __init__(
self, name: Optional[str] = None, description: Optional[str] = None, usage: Optional[str] = None, **kwargs
):
# NOTE: Might be able to make use of formatter_class if need be
if not name:
name = kwargs.get("name", kwargs.get("prog"))
kwargs.update(
{
"prog": name,
"description": description,
"usage": usage,
"add_help": False,
}
)
super().__init__(**kwargs)
self.name = name
self._module = None
# More minimal default argument grouping
blank_group = self.add_argument_group()
self._optionals = blank_group
self._positionals = blank_group
def __repr__(self):
attrs = ["name", "description", "module"]
return gen_repr(self, attrs)
def __str__(self):
return self.name
def make_adder(self, *args, **kwargs):
"""Helper shortcut for creating subcommands.
Accepts arguments for `add_subparsers`, creating a new subparser and
returning a partial function wrapping `add_subcommand` for the new
subparser. If the `dest` argument isn't specified, it defaults to
`'subcmd'`.
Example
-------
cmd_foo = CommandParser('foo', 'Does foo stuff')
foo_adder = cmd_foo.make_adder(metavar='OPERATION', required=True)
bar_subcmd = foo_adder('bar', description='Does bar stuff to foo')
"""
kwargs.setdefault("dest", "subcmd")
subp = self.add_subparsers(*args, **kwargs)
return partial(self.add_subcommand, subp)
@staticmethod
def add_subcommand(
subp: _SubParsersAction, name: str, description: Optional[str] = None, **kwargs
) -> "CommandParser":
"""Helper method for adding subcommands.
Wrapper around `add_parser` that simplifies adding subcommands to
ZeroBot commands. The same string is used for both the `description`
and `help` parameters of `add_parser`.
Parameters
----------
subp : Result of calling the `add_subparsers` method.
The subparser object returned from the `add_subparsers` method.
name : str
The name of the subcommand.
description : str, optional
A short description of the command. May be omitted. The `help`
parameter will be set to this value automatically.
kwargs
Extra arguments to pass to the `CommandParser` constructor.
"""
desc_help = {"description": description, "help": description}
return subp.add_parser(name, **desc_help, **kwargs)
@property
def module(self) -> Optional[Module]:
"""The module that this command is registered to.
Will return `None` if this command has not yet been registered.
"""
return self._module
@dataclass
class ParsedCommand:
"""A successfully parsed command with invoker and destination info.
ZeroBot's `Core` will send these as the payload of `module_command_*`
events.
Attributes
----------
name : str
The command name.
args : dict
A dictionary of the resultant parsed arguments and options and their
values.
parser : CommandParser
The parser that created this instance.
msg : Message
The original message encompassing the command.
invoker
source
subcmd
"""
name: str
args: dict[str, Any]
parser: CommandParser
msg: Message
def __post_init__(self):
# pylint: disable=protected-access
try:
action = self.parser._actions[0]
if isinstance(action, _SubParsersAction): | except (KeyError, IndexError):
self._subcmd = None
@property
def invoker(self) -> User:
"""The User that invoked the command."""
return self.msg.source
@property
def source(self) -> Union[User, Channel]:
"""Where the command was sent from.
Can be either directly from a user, or from a user within a channel.
"""
return self.msg.destination
@property
def subcmd(self) -> Optional[str]:
"""The invoked subcommand name, if one was invoked.
For subcommands with aliases, the name returned is always the canonical
name that the aliases are associated with. For this reason, this
attribute should be preferred to extracting the subcommand name from
`ParsedCommand.args`.
"""
return self._subcmd
def nested_subcmd(self, depth: int = 2) -> Optional[str]:
"""Get the name of a nested subcommand.
Like the `subcmd` property, the name returned is always the canonical
name for the subcommand. The `depth` parameter determines how many
levels of nesting to traverse; the default of ``2`` gets the first
nested subcommand. As a consequence, a value of ``1`` is the same as
`subcmd`.
"""
# pylint: disable=protected-access
current = 0
subparser = self.parser
try:
while current < depth:
action = subparser._actions[0]
if isinstance(action, _SubParsersAction):
subparser = action.choices[self.args[action.dest]]
current += 1
else:
return None
return subparser.name.split()[-1]
except (IndexError, KeyError, TypeError):
return None
@dataclass
class CommandHelp:
"""Encapsulates the result of a command help request.
ZeroBot's `Core` will create and pass these to `core_command_help`
callbacks.
Attributes
----------
type : HelpType
An enum type representing the type of help request.
name : str, optional
The command or module name that the help is about.
aliases : list, optional
If applicable, a list of aliases for this command.
description : str, optional
The command or module description
usage : str, optional
The "usage" string for the command
args : dict, optional
A dictionary mapping each positional argument name and a tuple of their
help strings and a boolean flag denoting whether or not the argument
represents a subcommand.
Only set when `type` is `CMD`.
opts : dict, optional
A dictionary mapping a tuple of option names representing a particular
option to a tuple of the option's value name and its help strings.
cmds : dict, optional
A dictionary mapping module names to another dictionary of command
names and their help strings. Only set when `type` is `MOD` or `ALL`.
subcmds : dict, optional
If applicable, a dictionary of subcommand names and their own
`CommandHelp` objects.
parent : CommandHelp
Only set when `type` is `NO_SUCH_SUBCMD`, and refers to the parent
`CommandHelp` object.
"""
type: HelpType
name: str = None
description: str = None
usage: str = None
aliases: list[str] = field(default_factory=list)
args: dict[str, tuple[Optional[str], bool]] = field(default_factory=dict)
opts: dict[tuple[str, ...], Optional[tuple[str, Optional[str]]]] | name_map = action.choices
canon_parser = name_map[self.args[action.dest]]
self._subcmd = canon_parser.name.split()[-1]
else:
self._subcmd = None | random_line_split |
command.py | User
from ZeroBot.common.enums import HelpType
from ZeroBot.exceptions import CommandAlreadyRegistered, CommandParseError
from ZeroBot.module import Module
from ZeroBot.util import gen_repr
__all__ = ["CommandHelp", "CommandParser", "ParsedCommand"]
class _NoExitArgumentParser(ArgumentParser):
"""Modified `argparse.ArgumentParser` that doesn't exit on errors."""
# NOTE: Python 3.9 will add an `exit_on_error` parameter that will stop
# argparse from exiting instead of having to override exit and error.
def exit(self, status=0, message=None):
pass
def error(self, message):
raise CommandParseError(message, cmd_name=self.prog)
class CommandParser(_NoExitArgumentParser):
"""Definition and parser for ZeroBot commands.
Creation of a `CommandParser` object necessarily entails defining the
command itself: its name, what arguments and options it accepts, how they
behave, etc. It is both the blueprint and interpreter for a command.
Attributes
----------
name : str, optional
The name of the command, i.e. how the command will be invoked. May be
omitted, but this only makes sense when creating a parent parser for
another parser.
description : str, optional
A short description of the command. May be omitted.
usage : str, optional
The text shown as the "usage" line in the command's help text. If
omitted, it will be automatically generated by `argparse`.
kwargs
Any extra keyword arguments are passed to the underlying
`argparse.ArgumentParser` constructor.
Notes
-----
Under the hood, `CommandParser` is simply a wrapper around an
`argparse.ArgumentParser` with some ZeroBot-related members.
"""
def __init__(
self, name: Optional[str] = None, description: Optional[str] = None, usage: Optional[str] = None, **kwargs
):
# NOTE: Might be able to make use of formatter_class if need be
if not name:
name = kwargs.get("name", kwargs.get("prog"))
kwargs.update(
{
"prog": name,
"description": description,
"usage": usage,
"add_help": False,
}
)
super().__init__(**kwargs)
self.name = name
self._module = None
# More minimal default argument grouping
blank_group = self.add_argument_group()
self._optionals = blank_group
self._positionals = blank_group
def __repr__(self):
attrs = ["name", "description", "module"]
return gen_repr(self, attrs)
def __str__(self):
return self.name
def make_adder(self, *args, **kwargs):
"""Helper shortcut for creating subcommands.
Accepts arguments for `add_subparsers`, creating a new subparser and
returning a partial function wrapping `add_subcommand` for the new
subparser. If the `dest` argument isn't specified, it defaults to
`'subcmd'`.
Example
-------
cmd_foo = CommandParser('foo', 'Does foo stuff')
foo_adder = cmd_foo.make_adder(metavar='OPERATION', required=True)
bar_subcmd = foo_adder('bar', description='Does bar stuff to foo')
"""
kwargs.setdefault("dest", "subcmd")
subp = self.add_subparsers(*args, **kwargs)
return partial(self.add_subcommand, subp)
@staticmethod
def add_subcommand(
subp: _SubParsersAction, name: str, description: Optional[str] = None, **kwargs
) -> "CommandParser":
"""Helper method for adding subcommands.
Wrapper around `add_parser` that simplifies adding subcommands to
ZeroBot commands. The same string is used for both the `description`
and `help` parameters of `add_parser`.
Parameters
----------
subp : Result of calling the `add_subparsers` method.
The subparser object returned from the `add_subparsers` method.
name : str
The name of the subcommand.
description : str, optional
A short description of the command. May be omitted. The `help`
parameter will be set to this value automatically.
kwargs
Extra arguments to pass to the `CommandParser` constructor.
"""
desc_help = {"description": description, "help": description}
return subp.add_parser(name, **desc_help, **kwargs)
@property
def module(self) -> Optional[Module]:
"""The module that this command is registered to.
Will return `None` if this command has not yet been registered.
"""
return self._module
@dataclass
class ParsedCommand:
"""A successfully parsed command with invoker and destination info.
ZeroBot's `Core` will send these as the payload of `module_command_*`
events.
Attributes
----------
name : str
The command name.
args : dict
A dictionary of the resultant parsed arguments and options and their
values.
parser : CommandParser
The parser that created this instance.
msg : Message
The original message encompassing the command.
invoker
source
subcmd
"""
name: str
args: dict[str, Any]
parser: CommandParser
msg: Message
def __post_init__(self):
# pylint: disable=protected-access
try:
action = self.parser._actions[0]
if isinstance(action, _SubParsersAction):
name_map = action.choices
canon_parser = name_map[self.args[action.dest]]
self._subcmd = canon_parser.name.split()[-1]
else:
self._subcmd = None
except (KeyError, IndexError):
self._subcmd = None
@property
def invoker(self) -> User:
"""The User that invoked the command."""
return self.msg.source
@property
def source(self) -> Union[User, Channel]:
"""Where the command was sent from.
Can be either directly from a user, or from a user within a channel.
"""
return self.msg.destination
@property
def subcmd(self) -> Optional[str]:
|
def nested_subcmd(self, depth: int = 2) -> Optional[str]:
"""Get the name of a nested subcommand.
Like the `subcmd` property, the name returned is always the canonical
name for the subcommand. The `depth` parameter determines how many
levels of nesting to traverse; the default of ``2`` gets the first
nested subcommand. As a consequence, a value of ``1`` is the same as
`subcmd`.
"""
# pylint: disable=protected-access
current = 0
subparser = self.parser
try:
while current < depth:
action = subparser._actions[0]
if isinstance(action, _SubParsersAction):
subparser = action.choices[self.args[action.dest]]
current += 1
else:
return None
return subparser.name.split()[-1]
except (IndexError, KeyError, TypeError):
return None
@dataclass
class CommandHelp:
"""Encapsulates the result of a command help request.
ZeroBot's `Core` will create and pass these to `core_command_help`
callbacks.
Attributes
----------
type : HelpType
An enum type representing the type of help request.
name : str, optional
The command or module name that the help is about.
aliases : list, optional
If applicable, a list of aliases for this command.
description : str, optional
The command or module description
usage : str, optional
The "usage" string for the command
args : dict, optional
A dictionary mapping each positional argument name and a tuple of their
help strings and a boolean flag denoting whether or not the argument
represents a subcommand.
Only set when `type` is `CMD`.
opts : dict, optional
A dictionary mapping a tuple of option names representing a particular
option to a tuple of the option's value name and its help strings.
cmds : dict, optional
A dictionary mapping module names to another dictionary of command
names and their help strings. Only set when `type` is `MOD` or `ALL`.
subcmds : dict, optional
If applicable, a dictionary of subcommand names and their own
`CommandHelp` objects.
parent : CommandHelp
Only set when `type` is `NO_SUCH_SUBCMD`, and refers to the parent
`CommandHelp` object.
"""
type: HelpType
name: str = None
description: str = None
usage: str = None
aliases: list[str] = field(default_factory=list)
args: dict[str, tuple[Optional[str], bool]] = field(default_factory=dict)
opts: dict[tuple[str, ...], Optional[tuple[str, Optional[str]] | """The invoked subcommand name, if one was invoked.
For subcommands with aliases, the name returned is always the canonical
name that the aliases are associated with. For this reason, this
attribute should be preferred to extracting the subcommand name from
`ParsedCommand.args`.
"""
return self._subcmd | identifier_body |
store.go | _SOURCE_UPLOAD:
if dbginfo.Upload == nil {
return nil, status.Error(codes.Internal, "metadata inconsistency: upload is nil")
}
switch dbginfo.Upload.State {
case debuginfopb.DebuginfoUpload_STATE_UPLOADING:
if s.uploadIsStale(dbginfo.Upload) {
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonUploadStale,
}, nil
}
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: false,
Reason: ReasonUploadInProgress,
}, nil
case debuginfopb.DebuginfoUpload_STATE_UPLOADED:
if dbginfo.Quality == nil || !dbginfo.Quality.NotValidElf {
if req.Force {
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonDebuginfoAlreadyExistsButForced,
}, nil
}
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: false,
Reason: ReasonDebuginfoAlreadyExists,
}, nil
}
if req.Hash == "" {
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonDebuginfoInvalid,
}, nil
}
if dbginfo.Upload.Hash == req.Hash {
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: false,
Reason: ReasonDebuginfoEqual,
}, nil
}
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonDebuginfoNotEqual,
}, nil
default:
return nil, status.Error(codes.Internal, "metadata inconsistency: unknown upload state")
}
case debuginfopb.Debuginfo_SOURCE_DEBUGINFOD:
if dbginfo.Quality == nil || !dbginfo.Quality.NotValidElf {
// We already have debuginfo that's also not marked to be
// invalid, so we don't need to upload it again.
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: false,
Reason: ReasonDebuginfodSource,
}, nil
}
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonDebuginfodInvalid,
}, nil
default:
return nil, status.Errorf(codes.Internal, "unknown debuginfo source %q", dbginfo.Source)
}
}
}
func (s *Store) InitiateUpload(ctx context.Context, req *debuginfopb.InitiateUploadRequest) (*debuginfopb.InitiateUploadResponse, error) {
ctx, span := s.tracer.Start(ctx, "InitiateUpload")
defer span.End()
span.SetAttributes(attribute.String("build_id", req.BuildId))
if req.Hash == "" {
return nil, status.Error(codes.InvalidArgument, "hash must be set")
}
if req.Size == 0 {
return nil, status.Error(codes.InvalidArgument, "size must be set")
}
// We don't want to blindly accept upload initiation requests that
// shouldn't have happened.
shouldInitiateResp, err := s.ShouldInitiateUpload(ctx, &debuginfopb.ShouldInitiateUploadRequest{
BuildId: req.BuildId,
Hash: req.Hash,
Force: req.Force,
Type: req.Type,
})
if err != nil {
return nil, err
}
if !shouldInitiateResp.ShouldInitiateUpload {
if shouldInitiateResp.Reason == ReasonDebuginfoEqual {
return nil, status.Error(codes.AlreadyExists, ReasonDebuginfoEqual)
}
return nil, status.Errorf(codes.FailedPrecondition, "upload should not have been attempted to be initiated, a previous check should have failed with: %s", shouldInitiateResp.Reason)
}
if req.Size > s.maxUploadSize {
return nil, status.Errorf(codes.InvalidArgument, "upload size %d exceeds maximum allowed size %d", req.Size, s.maxUploadSize)
}
uploadID := uuid.New().String()
uploadStarted := s.timeNow()
uploadExpiry := uploadStarted.Add(s.maxUploadDuration)
if !s.signedUpload.Enabled {
if err := s.metadata.MarkAsUploading(ctx, req.BuildId, uploadID, req.Hash, req.Type, timestamppb.New(uploadStarted)); err != nil {
return nil, fmt.Errorf("mark debuginfo upload as uploading via gRPC: %w", err)
}
return &debuginfopb.InitiateUploadResponse{
UploadInstructions: &debuginfopb.UploadInstructions{
BuildId: req.BuildId,
UploadId: uploadID,
UploadStrategy: debuginfopb.UploadInstructions_UPLOAD_STRATEGY_GRPC,
Type: req.Type,
},
}, nil
}
signedURL, err := s.signedUpload.Client.SignedPUT(ctx, objectPath(req.BuildId, req.Type), req.Size, uploadExpiry)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if err := s.metadata.MarkAsUploading(ctx, req.BuildId, uploadID, req.Hash, req.Type, timestamppb.New(uploadStarted)); err != nil {
return nil, fmt.Errorf("mark debuginfo upload as uploading via signed URL: %w", err)
}
return &debuginfopb.InitiateUploadResponse{
UploadInstructions: &debuginfopb.UploadInstructions{
BuildId: req.BuildId,
UploadId: uploadID,
UploadStrategy: debuginfopb.UploadInstructions_UPLOAD_STRATEGY_SIGNED_URL,
SignedUrl: signedURL,
Type: req.Type,
},
}, nil
}
func (s *Store) MarkUploadFinished(ctx context.Context, req *debuginfopb.MarkUploadFinishedRequest) (*debuginfopb.MarkUploadFinishedResponse, error) {
ctx, span := s.tracer.Start(ctx, "MarkUploadFinished")
defer span.End()
span.SetAttributes(attribute.String("build_id", req.BuildId))
span.SetAttributes(attribute.String("upload_id", req.UploadId))
buildID := req.BuildId
if err := validateInput(buildID); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
err := s.metadata.MarkAsUploaded(ctx, buildID, req.UploadId, req.Type, timestamppb.New(s.timeNow()))
if errors.Is(err, ErrDebuginfoNotFound) {
return nil, status.Error(codes.NotFound, "no debuginfo metadata found for build id")
}
if errors.Is(err, ErrUploadMetadataNotFound) {
return nil, status.Error(codes.NotFound, "no debuginfo upload metadata found for build id")
}
if errors.Is(err, ErrUploadIDMismatch) {
return nil, status.Error(codes.InvalidArgument, "upload id mismatch")
}
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return &debuginfopb.MarkUploadFinishedResponse{}, nil
}
func (s *Store) Upload(stream debuginfopb.DebuginfoService_UploadServer) error {
if s.signedUpload.Enabled {
return status.Error(codes.Unimplemented, "signed URL uploads are the only supported upload strategy for this service")
}
req, err := stream.Recv()
if err != nil {
return status.Errorf(codes.Unknown, "failed to receive upload info: %q", err)
}
var (
buildID = req.GetInfo().BuildId
uploadID = req.GetInfo().UploadId
r = &UploadReader{stream: stream}
typ = req.GetInfo().Type
)
ctx, span := s.tracer.Start(stream.Context(), "Upload")
defer span.End()
span.SetAttributes(attribute.String("build_id", buildID))
span.SetAttributes(attribute.String("upload_id", uploadID))
if err := s.upload(ctx, buildID, uploadID, typ, r); err != nil {
return err
}
return stream.SendAndClose(&debuginfopb.UploadResponse{
BuildId: buildID,
Size: r.size,
})
}
func (s *Store) upload(ctx context.Context, buildID, uploadID string, typ debuginfopb.DebuginfoType, r io.Reader) error | {
if err := validateInput(buildID); err != nil {
return status.Errorf(codes.InvalidArgument, "invalid build ID: %q", err)
}
dbginfo, err := s.metadata.Fetch(ctx, buildID, typ)
if err != nil {
if errors.Is(err, ErrMetadataNotFound) {
return status.Error(codes.FailedPrecondition, "metadata not found, this indicates that the upload was not previously initiated")
}
return status.Error(codes.Internal, err.Error())
}
if dbginfo.Upload == nil {
return status.Error(codes.FailedPrecondition, "upload metadata not found, this indicates that the upload was not previously initiated")
}
if dbginfo.Upload.Id != uploadID {
return status.Error(codes.InvalidArgument, "the upload ID does not match the one returned by the InitiateUpload call")
} | identifier_body |
|
store.go | nil || !dbginfo.Quality.NotValidElf {
if req.Force {
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonDebuginfoAlreadyExistsButForced,
}, nil
}
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: false,
Reason: ReasonDebuginfoAlreadyExists,
}, nil
}
if req.Hash == "" {
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonDebuginfoInvalid,
}, nil
}
if dbginfo.Upload.Hash == req.Hash {
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: false,
Reason: ReasonDebuginfoEqual,
}, nil
}
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonDebuginfoNotEqual,
}, nil
default:
return nil, status.Error(codes.Internal, "metadata inconsistency: unknown upload state")
}
case debuginfopb.Debuginfo_SOURCE_DEBUGINFOD:
if dbginfo.Quality == nil || !dbginfo.Quality.NotValidElf {
// We already have debuginfo that's also not marked to be
// invalid, so we don't need to upload it again.
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: false,
Reason: ReasonDebuginfodSource,
}, nil
}
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonDebuginfodInvalid,
}, nil
default:
return nil, status.Errorf(codes.Internal, "unknown debuginfo source %q", dbginfo.Source)
}
}
}
func (s *Store) InitiateUpload(ctx context.Context, req *debuginfopb.InitiateUploadRequest) (*debuginfopb.InitiateUploadResponse, error) {
ctx, span := s.tracer.Start(ctx, "InitiateUpload")
defer span.End()
span.SetAttributes(attribute.String("build_id", req.BuildId))
if req.Hash == "" {
return nil, status.Error(codes.InvalidArgument, "hash must be set")
}
if req.Size == 0 {
return nil, status.Error(codes.InvalidArgument, "size must be set")
}
// We don't want to blindly accept upload initiation requests that
// shouldn't have happened.
shouldInitiateResp, err := s.ShouldInitiateUpload(ctx, &debuginfopb.ShouldInitiateUploadRequest{
BuildId: req.BuildId,
Hash: req.Hash,
Force: req.Force,
Type: req.Type,
})
if err != nil {
return nil, err
}
if !shouldInitiateResp.ShouldInitiateUpload {
if shouldInitiateResp.Reason == ReasonDebuginfoEqual {
return nil, status.Error(codes.AlreadyExists, ReasonDebuginfoEqual)
}
return nil, status.Errorf(codes.FailedPrecondition, "upload should not have been attempted to be initiated, a previous check should have failed with: %s", shouldInitiateResp.Reason)
}
if req.Size > s.maxUploadSize {
return nil, status.Errorf(codes.InvalidArgument, "upload size %d exceeds maximum allowed size %d", req.Size, s.maxUploadSize)
}
uploadID := uuid.New().String()
uploadStarted := s.timeNow()
uploadExpiry := uploadStarted.Add(s.maxUploadDuration)
if !s.signedUpload.Enabled {
if err := s.metadata.MarkAsUploading(ctx, req.BuildId, uploadID, req.Hash, req.Type, timestamppb.New(uploadStarted)); err != nil {
return nil, fmt.Errorf("mark debuginfo upload as uploading via gRPC: %w", err)
}
return &debuginfopb.InitiateUploadResponse{
UploadInstructions: &debuginfopb.UploadInstructions{
BuildId: req.BuildId,
UploadId: uploadID,
UploadStrategy: debuginfopb.UploadInstructions_UPLOAD_STRATEGY_GRPC,
Type: req.Type,
},
}, nil
}
signedURL, err := s.signedUpload.Client.SignedPUT(ctx, objectPath(req.BuildId, req.Type), req.Size, uploadExpiry)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if err := s.metadata.MarkAsUploading(ctx, req.BuildId, uploadID, req.Hash, req.Type, timestamppb.New(uploadStarted)); err != nil {
return nil, fmt.Errorf("mark debuginfo upload as uploading via signed URL: %w", err)
}
return &debuginfopb.InitiateUploadResponse{
UploadInstructions: &debuginfopb.UploadInstructions{
BuildId: req.BuildId,
UploadId: uploadID,
UploadStrategy: debuginfopb.UploadInstructions_UPLOAD_STRATEGY_SIGNED_URL,
SignedUrl: signedURL,
Type: req.Type,
},
}, nil
}
func (s *Store) MarkUploadFinished(ctx context.Context, req *debuginfopb.MarkUploadFinishedRequest) (*debuginfopb.MarkUploadFinishedResponse, error) {
ctx, span := s.tracer.Start(ctx, "MarkUploadFinished")
defer span.End()
span.SetAttributes(attribute.String("build_id", req.BuildId))
span.SetAttributes(attribute.String("upload_id", req.UploadId))
buildID := req.BuildId
if err := validateInput(buildID); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
err := s.metadata.MarkAsUploaded(ctx, buildID, req.UploadId, req.Type, timestamppb.New(s.timeNow()))
if errors.Is(err, ErrDebuginfoNotFound) {
return nil, status.Error(codes.NotFound, "no debuginfo metadata found for build id")
}
if errors.Is(err, ErrUploadMetadataNotFound) {
return nil, status.Error(codes.NotFound, "no debuginfo upload metadata found for build id")
}
if errors.Is(err, ErrUploadIDMismatch) {
return nil, status.Error(codes.InvalidArgument, "upload id mismatch")
}
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return &debuginfopb.MarkUploadFinishedResponse{}, nil
}
func (s *Store) Upload(stream debuginfopb.DebuginfoService_UploadServer) error {
if s.signedUpload.Enabled {
return status.Error(codes.Unimplemented, "signed URL uploads are the only supported upload strategy for this service")
}
req, err := stream.Recv()
if err != nil {
return status.Errorf(codes.Unknown, "failed to receive upload info: %q", err)
}
var (
buildID = req.GetInfo().BuildId
uploadID = req.GetInfo().UploadId
r = &UploadReader{stream: stream}
typ = req.GetInfo().Type
)
ctx, span := s.tracer.Start(stream.Context(), "Upload")
defer span.End()
span.SetAttributes(attribute.String("build_id", buildID))
span.SetAttributes(attribute.String("upload_id", uploadID))
if err := s.upload(ctx, buildID, uploadID, typ, r); err != nil {
return err
}
return stream.SendAndClose(&debuginfopb.UploadResponse{
BuildId: buildID,
Size: r.size,
})
}
func (s *Store) upload(ctx context.Context, buildID, uploadID string, typ debuginfopb.DebuginfoType, r io.Reader) error {
if err := validateInput(buildID); err != nil {
return status.Errorf(codes.InvalidArgument, "invalid build ID: %q", err)
}
dbginfo, err := s.metadata.Fetch(ctx, buildID, typ)
if err != nil {
if errors.Is(err, ErrMetadataNotFound) {
return status.Error(codes.FailedPrecondition, "metadata not found, this indicates that the upload was not previously initiated")
}
return status.Error(codes.Internal, err.Error())
}
if dbginfo.Upload == nil {
return status.Error(codes.FailedPrecondition, "upload metadata not found, this indicates that the upload was not previously initiated")
}
if dbginfo.Upload.Id != uploadID {
return status.Error(codes.InvalidArgument, "the upload ID does not match the one returned by the InitiateUpload call")
}
if err := s.bucket.Upload(ctx, objectPath(buildID, typ), r); err != nil {
return status.Error(codes.Internal, fmt.Errorf("upload debuginfo: %w", err).Error())
}
return nil
}
func (s *Store) uploadIsStale(upload *debuginfopb.DebuginfoUpload) bool {
return upload.StartedAt.AsTime().Add(s.maxUploadDuration + 2*time.Minute).Before(s.timeNow())
}
func validateInput(id string) error {
_, err := hex.DecodeString(id)
if err != nil {
return fmt.Errorf("failed to validate input: %w", err)
}
if len(id) <= 2 {
return errors.New("unexpectedly short input")
}
return nil
}
func | objectPath | identifier_name |
|
store.go | odInvalid = "Debuginfo is available from debuginfod already but is marked as invalid, therefore a new upload is needed."
)
// ShouldInitiateUpload returns whether an upload should be initiated for the
// given build ID. Checking if an upload should even be initiated allows the
// parca-agent to avoid extracting debuginfos unnecessarily from a binary.
func (s *Store) ShouldInitiateUpload(ctx context.Context, req *debuginfopb.ShouldInitiateUploadRequest) (*debuginfopb.ShouldInitiateUploadResponse, error) {
ctx, span := s.tracer.Start(ctx, "ShouldInitiateUpload")
defer span.End()
span.SetAttributes(attribute.String("build_id", req.BuildId))
buildID := req.BuildId
if err := validateInput(buildID); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
dbginfo, err := s.metadata.Fetch(ctx, buildID, req.Type)
if err != nil && !errors.Is(err, ErrMetadataNotFound) {
return nil, status.Error(codes.Internal, err.Error())
} else if errors.Is(err, ErrMetadataNotFound) {
// First time we see this Build ID.
existsInDebuginfods, err := s.debuginfodClients.Exists(ctx, buildID)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if len(existsInDebuginfods) > 0 {
if err := s.metadata.MarkAsDebuginfodSource(ctx, existsInDebuginfods, buildID, req.Type); err != nil {
return nil, status.Error(codes.Internal, fmt.Errorf("mark Build ID to be available from debuginfod: %w", err).Error())
}
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: false,
Reason: ReasonDebuginfoInDebuginfod,
}, nil
}
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonFirstTimeSeen,
}, nil
} else {
// We have seen this Build ID before and there is metadata for it.
switch dbginfo.Source {
case debuginfopb.Debuginfo_SOURCE_UPLOAD:
if dbginfo.Upload == nil {
return nil, status.Error(codes.Internal, "metadata inconsistency: upload is nil")
}
switch dbginfo.Upload.State {
case debuginfopb.DebuginfoUpload_STATE_UPLOADING:
if s.uploadIsStale(dbginfo.Upload) {
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonUploadStale,
}, nil
}
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: false,
Reason: ReasonUploadInProgress,
}, nil
case debuginfopb.DebuginfoUpload_STATE_UPLOADED:
if dbginfo.Quality == nil || !dbginfo.Quality.NotValidElf {
if req.Force {
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonDebuginfoAlreadyExistsButForced,
}, nil
}
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: false,
Reason: ReasonDebuginfoAlreadyExists,
}, nil
}
if req.Hash == "" {
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonDebuginfoInvalid,
}, nil
}
if dbginfo.Upload.Hash == req.Hash {
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: false,
Reason: ReasonDebuginfoEqual,
}, nil
}
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonDebuginfoNotEqual,
}, nil
default:
return nil, status.Error(codes.Internal, "metadata inconsistency: unknown upload state")
}
case debuginfopb.Debuginfo_SOURCE_DEBUGINFOD:
if dbginfo.Quality == nil || !dbginfo.Quality.NotValidElf {
// We already have debuginfo that's also not marked to be
// invalid, so we don't need to upload it again.
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: false,
Reason: ReasonDebuginfodSource,
}, nil
}
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonDebuginfodInvalid,
}, nil
default:
return nil, status.Errorf(codes.Internal, "unknown debuginfo source %q", dbginfo.Source)
}
}
}
func (s *Store) InitiateUpload(ctx context.Context, req *debuginfopb.InitiateUploadRequest) (*debuginfopb.InitiateUploadResponse, error) {
ctx, span := s.tracer.Start(ctx, "InitiateUpload")
defer span.End()
span.SetAttributes(attribute.String("build_id", req.BuildId))
if req.Hash == "" {
return nil, status.Error(codes.InvalidArgument, "hash must be set")
}
if req.Size == 0 {
return nil, status.Error(codes.InvalidArgument, "size must be set")
}
// We don't want to blindly accept upload initiation requests that
// shouldn't have happened.
shouldInitiateResp, err := s.ShouldInitiateUpload(ctx, &debuginfopb.ShouldInitiateUploadRequest{
BuildId: req.BuildId,
Hash: req.Hash,
Force: req.Force,
Type: req.Type,
})
if err != nil {
return nil, err
}
if !shouldInitiateResp.ShouldInitiateUpload {
if shouldInitiateResp.Reason == ReasonDebuginfoEqual {
return nil, status.Error(codes.AlreadyExists, ReasonDebuginfoEqual)
}
return nil, status.Errorf(codes.FailedPrecondition, "upload should not have been attempted to be initiated, a previous check should have failed with: %s", shouldInitiateResp.Reason)
}
if req.Size > s.maxUploadSize {
return nil, status.Errorf(codes.InvalidArgument, "upload size %d exceeds maximum allowed size %d", req.Size, s.maxUploadSize)
}
uploadID := uuid.New().String()
uploadStarted := s.timeNow()
uploadExpiry := uploadStarted.Add(s.maxUploadDuration)
if !s.signedUpload.Enabled {
if err := s.metadata.MarkAsUploading(ctx, req.BuildId, uploadID, req.Hash, req.Type, timestamppb.New(uploadStarted)); err != nil {
return nil, fmt.Errorf("mark debuginfo upload as uploading via gRPC: %w", err)
}
return &debuginfopb.InitiateUploadResponse{
UploadInstructions: &debuginfopb.UploadInstructions{
BuildId: req.BuildId,
UploadId: uploadID,
UploadStrategy: debuginfopb.UploadInstructions_UPLOAD_STRATEGY_GRPC,
Type: req.Type,
},
}, nil
}
signedURL, err := s.signedUpload.Client.SignedPUT(ctx, objectPath(req.BuildId, req.Type), req.Size, uploadExpiry)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if err := s.metadata.MarkAsUploading(ctx, req.BuildId, uploadID, req.Hash, req.Type, timestamppb.New(uploadStarted)); err != nil {
return nil, fmt.Errorf("mark debuginfo upload as uploading via signed URL: %w", err)
}
return &debuginfopb.InitiateUploadResponse{
UploadInstructions: &debuginfopb.UploadInstructions{
BuildId: req.BuildId,
UploadId: uploadID,
UploadStrategy: debuginfopb.UploadInstructions_UPLOAD_STRATEGY_SIGNED_URL,
SignedUrl: signedURL,
Type: req.Type,
},
}, nil
}
func (s *Store) MarkUploadFinished(ctx context.Context, req *debuginfopb.MarkUploadFinishedRequest) (*debuginfopb.MarkUploadFinishedResponse, error) {
ctx, span := s.tracer.Start(ctx, "MarkUploadFinished")
defer span.End()
span.SetAttributes(attribute.String("build_id", req.BuildId))
span.SetAttributes(attribute.String("upload_id", req.UploadId))
buildID := req.BuildId
if err := validateInput(buildID); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
err := s.metadata.MarkAsUploaded(ctx, buildID, req.UploadId, req.Type, timestamppb.New(s.timeNow()))
if errors.Is(err, ErrDebuginfoNotFound) {
return nil, status.Error(codes.NotFound, "no debuginfo metadata found for build id")
}
if errors.Is(err, ErrUploadMetadataNotFound) {
return nil, status.Error(codes.NotFound, "no debuginfo upload metadata found for build id")
}
if errors.Is(err, ErrUploadIDMismatch) | {
return nil, status.Error(codes.InvalidArgument, "upload id mismatch")
} | conditional_block |
|
store.go | debuginfodClients DebuginfodClients
signedUpload SignedUpload
maxUploadDuration time.Duration
maxUploadSize int64
timeNow func() time.Time
}
type SignedUploadClient interface {
SignedPUT(ctx context.Context, objectKey string, size int64, expiry time.Time) (signedURL string, err error)
}
type SignedUpload struct {
Enabled bool
Client SignedUploadClient
}
// NewStore returns a new debug info store.
func NewStore(
tracer trace.Tracer,
logger log.Logger,
metadata MetadataManager,
bucket objstore.Bucket,
debuginfodClients DebuginfodClients,
signedUpload SignedUpload,
maxUploadDuration time.Duration,
maxUploadSize int64,
) (*Store, error) {
return &Store{
tracer: tracer,
logger: log.With(logger, "component", "debuginfo"),
bucket: bucket,
metadata: metadata,
debuginfodClients: debuginfodClients,
signedUpload: signedUpload,
maxUploadDuration: maxUploadDuration,
maxUploadSize: maxUploadSize,
timeNow: time.Now,
}, nil
}
const (
ReasonDebuginfoInDebuginfod = "Debuginfo exists in debuginfod, therefore no upload is necessary."
ReasonFirstTimeSeen = "First time we see this Build ID, and it does not exist in debuginfod, therefore please upload!"
ReasonUploadStale = "A previous upload was started but not finished and is now stale, so it can be retried."
ReasonUploadInProgress = "A previous upload is still in-progress and not stale yet (only stale uploads can be retried)."
ReasonDebuginfoAlreadyExists = "Debuginfo already exists and is not marked as invalid, therefore no new upload is needed."
ReasonDebuginfoAlreadyExistsButForced = "Debuginfo already exists and is not marked as invalid, therefore wouldn't have accepted a new upload, but accepting it because it's requested to be forced."
ReasonDebuginfoInvalid = "Debuginfo already exists but is marked as invalid, therefore a new upload is needed. Hash the debuginfo and initiate the upload."
ReasonDebuginfoEqual = "Debuginfo already exists and is marked as invalid, but the proposed hash is the same as the one already available, therefore the upload is not accepted as it would result in the same invalid debuginfos."
ReasonDebuginfoNotEqual = "Debuginfo already exists but is marked as invalid, therefore a new upload will be accepted."
ReasonDebuginfodSource = "Debuginfo is available from debuginfod already and not marked as invalid, therefore no new upload is needed."
ReasonDebuginfodInvalid = "Debuginfo is available from debuginfod already but is marked as invalid, therefore a new upload is needed."
)
// ShouldInitiateUpload returns whether an upload should be initiated for the
// given build ID. Checking if an upload should even be initiated allows the
// parca-agent to avoid extracting debuginfos unnecessarily from a binary.
func (s *Store) ShouldInitiateUpload(ctx context.Context, req *debuginfopb.ShouldInitiateUploadRequest) (*debuginfopb.ShouldInitiateUploadResponse, error) {
ctx, span := s.tracer.Start(ctx, "ShouldInitiateUpload")
defer span.End()
span.SetAttributes(attribute.String("build_id", req.BuildId))
buildID := req.BuildId
if err := validateInput(buildID); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
dbginfo, err := s.metadata.Fetch(ctx, buildID, req.Type)
if err != nil && !errors.Is(err, ErrMetadataNotFound) {
return nil, status.Error(codes.Internal, err.Error())
} else if errors.Is(err, ErrMetadataNotFound) {
// First time we see this Build ID.
existsInDebuginfods, err := s.debuginfodClients.Exists(ctx, buildID)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if len(existsInDebuginfods) > 0 {
if err := s.metadata.MarkAsDebuginfodSource(ctx, existsInDebuginfods, buildID, req.Type); err != nil {
return nil, status.Error(codes.Internal, fmt.Errorf("mark Build ID to be available from debuginfod: %w", err).Error())
}
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: false,
Reason: ReasonDebuginfoInDebuginfod,
}, nil
}
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonFirstTimeSeen,
}, nil
} else {
// We have seen this Build ID before and there is metadata for it.
switch dbginfo.Source {
case debuginfopb.Debuginfo_SOURCE_UPLOAD:
if dbginfo.Upload == nil {
return nil, status.Error(codes.Internal, "metadata inconsistency: upload is nil")
}
switch dbginfo.Upload.State {
case debuginfopb.DebuginfoUpload_STATE_UPLOADING:
if s.uploadIsStale(dbginfo.Upload) {
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonUploadStale,
}, nil
}
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: false,
Reason: ReasonUploadInProgress,
}, nil
case debuginfopb.DebuginfoUpload_STATE_UPLOADED:
if dbginfo.Quality == nil || !dbginfo.Quality.NotValidElf {
if req.Force {
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonDebuginfoAlreadyExistsButForced,
}, nil
}
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: false,
Reason: ReasonDebuginfoAlreadyExists,
}, nil
}
if req.Hash == "" {
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonDebuginfoInvalid,
}, nil
}
if dbginfo.Upload.Hash == req.Hash {
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: false,
Reason: ReasonDebuginfoEqual,
}, nil
}
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonDebuginfoNotEqual,
}, nil
default:
return nil, status.Error(codes.Internal, "metadata inconsistency: unknown upload state")
}
case debuginfopb.Debuginfo_SOURCE_DEBUGINFOD:
if dbginfo.Quality == nil || !dbginfo.Quality.NotValidElf {
// We already have debuginfo that's also not marked to be
// invalid, so we don't need to upload it again.
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: false,
Reason: ReasonDebuginfodSource,
}, nil
}
return &debuginfopb.ShouldInitiateUploadResponse{
ShouldInitiateUpload: true,
Reason: ReasonDebuginfodInvalid,
}, nil
default:
return nil, status.Errorf(codes.Internal, "unknown debuginfo source %q", dbginfo.Source)
}
}
}
func (s *Store) InitiateUpload(ctx context.Context, req *debuginfopb.InitiateUploadRequest) (*debuginfopb.InitiateUploadResponse, error) {
ctx, span := s.tracer.Start(ctx, "InitiateUpload")
defer span.End()
span.SetAttributes(attribute.String("build_id", req.BuildId))
if req.Hash == "" {
return nil, status.Error(codes.InvalidArgument, "hash must be set")
}
if req.Size == 0 {
return nil, status.Error(codes.InvalidArgument, "size must be set")
}
// We don't want to blindly accept upload initiation requests that
// shouldn't have happened.
shouldInitiateResp, err := s.ShouldInitiateUpload(ctx, &debuginfopb.ShouldInitiateUploadRequest{
BuildId: req.BuildId,
Hash: req.Hash,
Force: req.Force,
Type: req.Type, | if err != nil {
return nil, err
}
if !shouldInitiateResp.ShouldInitiateUpload {
if shouldInitiateResp.Reason == ReasonDebuginfoEqual {
return nil, status.Error(codes.AlreadyExists, ReasonDebuginfoEqual)
}
return nil, status.Errorf(codes.FailedPrecondition, "upload should not have been attempted to be initiated, a previous check should have failed with: %s", shouldInitiateResp.Reason)
}
if req.Size > s.maxUploadSize {
return nil, status.Errorf(codes.InvalidArgument, "upload size %d exceeds maximum allowed size %d", req.Size, s.maxUploadSize)
}
uploadID := uuid.New().String()
uploadStarted := s.timeNow()
uploadExpiry := uploadStarted.Add(s.maxUploadDuration)
if !s.signedUpload.Enabled {
if err := | }) | random_line_split |
server.go | .Code() != codes.Unavailable || errStatus.Message() != "transport is closing" {
errs <- xerrors.Errorf("failed to stop agent on host %s : %w", conn.Hostname, err)
}
}()
}
wg.Wait()
close(errs)
var multiErr *multierror.Error
for err := range errs {
multiErr = multierror.Append(multiErr, err)
}
return multiErr.ErrorOrNil()
}
func (s *Server) Stop(closeAgentConns bool) {
s.mu.Lock()
defer s.mu.Unlock()
// StopServices calls Stop(false) because it has already closed the agentConns
if closeAgentConns {
s.closeAgentConns()
}
if s.server != nil {
s.server.Stop()
<-s.stopped // block until it is OK to stop
}
// Mark this server stopped so that a concurrent Start() doesn't try to
// start things up again.
s.stopped = nil
}
func (s *Server) RestartAgents(ctx context.Context, in *idl.RestartAgentsRequest) (*idl.RestartAgentsReply, error) {
restartedHosts, err := RestartAgents(ctx, nil, AgentHosts(s.Source), s.AgentPort, s.StateDir)
return &idl.RestartAgentsReply{AgentHosts: restartedHosts}, err
}
func RestartAgents(ctx context.Context,
dialer func(context.Context, string) (net.Conn, error),
hostnames []string,
port int,
stateDir string) ([]string, error) {
var wg sync.WaitGroup
restartedHosts := make(chan string, len(hostnames))
errs := make(chan error, len(hostnames))
for _, host := range hostnames {
wg.Add(1)
go func(host string) {
defer wg.Done()
address := host + ":" + strconv.Itoa(port)
timeoutCtx, cancelFunc := context.WithTimeout(ctx, 3*time.Second)
opts := []grpc.DialOption{
grpc.WithBlock(),
grpc.WithInsecure(),
grpc.FailOnNonTempDialError(true),
}
if dialer != nil {
opts = append(opts, grpc.WithContextDialer(dialer))
}
conn, err := grpc.DialContext(timeoutCtx, address, opts...)
cancelFunc()
if err == nil {
err = conn.Close()
if err != nil {
gplog.Error("failed to close agent connection to %s: %+v", host, err)
}
return
}
gplog.Debug("failed to dial agent on %s: %+v", host, err)
gplog.Info("starting agent on %s", host)
agentPath, err := getAgentPath()
if err != nil {
errs <- err
return
}
cmd := execCommand("ssh", host,
fmt.Sprintf("bash -c \"%s agent --daemonize --port %d --state-directory %s\"", agentPath, port, stateDir))
stdout, err := cmd.Output()
if err != nil {
errs <- err
return
}
gplog.Debug(string(stdout))
restartedHosts <- host
}(host)
}
wg.Wait()
close(errs)
close(restartedHosts)
var hosts []string
for h := range restartedHosts {
hosts = append(hosts, h)
}
var multiErr *multierror.Error
for err := range errs {
multiErr = multierror.Append(multiErr, err)
}
return hosts, multiErr.ErrorOrNil()
}
func (s *Server) AgentConns() ([]*Connection, error) {
// Lock the mutex to protect against races with Server.Stop().
// XXX This is a *ridiculously* broad lock. Have fun waiting for the dial
// timeout when calling Stop() and AgentConns() at the same time, for
// instance. We should not lock around a network operation, but it seems
// like the AgentConns concept is not long for this world anyway.
s.mu.Lock()
defer s.mu.Unlock()
if s.agentConns != nil {
err := EnsureConnsAreReady(s.agentConns)
if err != nil {
gplog.Error("ensureConnsAreReady failed: %s", err)
return nil, err
}
return s.agentConns, nil
}
hostnames := AgentHosts(s.Source)
for _, host := range hostnames {
ctx, cancelFunc := context.WithTimeout(context.Background(), DialTimeout)
conn, err := s.grpcDialer(ctx,
host+":"+strconv.Itoa(s.AgentPort),
grpc.WithInsecure(), grpc.WithBlock())
if err != nil {
err = xerrors.Errorf("grpcDialer failed: %w", err)
gplog.Error(err.Error())
cancelFunc()
return nil, err
}
s.agentConns = append(s.agentConns, &Connection{
Conn: conn,
AgentClient: idl.NewAgentClient(conn),
Hostname: host,
CancelContext: cancelFunc,
})
}
return s.agentConns, nil
}
func EnsureConnsAreReady(agentConns []*Connection) error {
hostnames := []string{}
for _, conn := range agentConns {
if conn.Conn.GetState() != connectivity.Ready {
hostnames = append(hostnames, conn.Hostname)
}
}
if len(hostnames) > 0 {
return fmt.Errorf("the connections to the following hosts were not ready: %s", strings.Join(hostnames, ","))
}
return nil
}
// Closes all h.agentConns. Callers must hold the Server's mutex.
// TODO: this function assumes that all h.agentConns are _not_ in a terminal
// state(e.g. already closed). If so, conn.Conn.WaitForStateChange() can block
// indefinitely.
func (s *Server) closeAgentConns() {
for _, conn := range s.agentConns {
defer conn.CancelContext()
currState := conn.Conn.GetState()
err := conn.Conn.Close()
if err != nil {
gplog.Info(fmt.Sprintf("Error closing hub to agent connection. host: %s, err: %s", conn.Hostname, err.Error()))
}
conn.Conn.WaitForStateChange(context.Background(), currState)
}
}
type InitializeConfig struct {
Standby greenplum.SegConfig
Master greenplum.SegConfig
Primaries []greenplum.SegConfig
Mirrors []greenplum.SegConfig
}
// Config contains all the information that will be persisted to/loaded from
// from disk during calls to Save() and Load().
type Config struct {
Source *greenplum.Cluster
Target *greenplum.Cluster
// TargetInitializeConfig contains all the info needed to initialize the
// target cluster's master, standby, primaries and mirrors.
TargetInitializeConfig InitializeConfig
Port int
AgentPort int
UseLinkMode bool
UpgradeID upgrade.ID
// Tablespaces contains the tablespace in the database keyed by
// dbid and tablespace oid
Tablespaces greenplum.Tablespaces
TablespacesMappingFilePath string
}
func (c *Config) Load(r io.Reader) error {
dec := json.NewDecoder(r)
return dec.Decode(c)
}
func (c *Config) Save(w io.Writer) error {
enc := json.NewEncoder(w)
enc.SetIndent("", " ")
return enc.Encode(c)
}
// SaveConfig persists the hub's configuration to disk.
func (s *Server) SaveConfig() (err error) {
// TODO: Switch to an atomic implementation like renameio. Consider what
// happens if Config.Save() panics: we'll have truncated the file
// on disk and the hub will be unable to recover. For now, since we normally
// only save the configuration during initialize and any configuration
// errors could be fixed by reinitializing, the risk seems small.
file, err := utils.System.Create(upgrade.GetConfigFile())
if err != nil {
return err
}
defer func() {
if cerr := file.Close(); cerr != nil {
cerr = xerrors.Errorf("closing hub configuration: %w", cerr)
err = multierror.Append(err, cerr).ErrorOrNil()
}
}()
err = s.Config.Save(file)
if err != nil {
return xerrors.Errorf("saving hub configuration: %w", err)
}
return nil
}
func LoadConfig(conf *Config, path string) error {
file, err := os.Open(path)
if err != nil {
return xerrors.Errorf("opening configuration file: %w", err)
}
defer file.Close()
err = conf.Load(file)
if err != nil {
return xerrors.Errorf("reading configuration file: %w", err)
}
return nil
}
func AgentHosts(c *greenplum.Cluster) []string {
uniqueHosts := make(map[string]bool)
excludingMaster := func(seg *greenplum.SegConfig) bool {
return !seg.IsMaster()
}
for _, seg := range c.SelectSegments(excludingMaster) {
uniqueHosts[seg.Hostname] = true
}
hosts := make([]string, 0)
for host := range uniqueHosts {
hosts = append(hosts, host)
}
return hosts
}
func | MakeTargetClusterMessage | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.