file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
backend.rs | fn consolidate(&mut self, other: Self);
}
impl Consolidate for () {
fn consolidate(&mut self, _: Self) {
()
}
}
impl Consolidate for Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)> {
fn consolidate(&mut self, mut other: Self) {
self.append(&mut other);
}
}
impl<H: Hasher, KF: trie::KeyFunction<H>> Consolidate for trie::GenericMemoryDB<H, KF> {
fn consolidate(&mut self, other: Self) {
trie::GenericMemoryDB::consolidate(self, other)
}
}
/// Error impossible.
// FIXME: use `!` type when stabilized. https://github.com/rust-lang/rust/issues/35121
#[derive(Debug)]
pub enum Void {}
impl fmt::Display for Void {
fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result {
match *self {}
}
}
impl error::Error for Void {
fn description(&self) -> &str { "unreachable error" }
}
/// In-memory backend. Fully recomputes tries on each commit but useful for
/// tests.
pub struct InMemory<H: Hasher> {
inner: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>,
trie: Option<TrieBackend<MemoryDB<H>, H>>,
_hasher: PhantomData<H>,
}
impl<H: Hasher> Default for InMemory<H> {
fn default() -> Self {
InMemory {
inner: Default::default(),
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> Clone for InMemory<H> {
fn clone(&self) -> Self {
InMemory {
inner: self.inner.clone(),
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> PartialEq for InMemory<H> {
fn eq(&self, other: &Self) -> bool {
self.inner.eq(&other.inner)
}
}
impl<H: Hasher> InMemory<H> {
/// Copy the state, with applied updates
pub fn update(&self, changes: <Self as Backend<H>>::Transaction) -> Self {
let mut inner: HashMap<_, _> = self.inner.clone();
for (storage_key, key, val) in changes {
match val {
Some(v) => { inner.entry(storage_key).or_default().insert(key, v); },
None => { inner.entry(storage_key).or_default().remove(&key); },
}
}
inner.into()
}
}
impl<H: Hasher> From<HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>> for InMemory<H> {
fn from(inner: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>) -> Self {
InMemory {
inner: inner,
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> From<HashMap<Vec<u8>, Vec<u8>>> for InMemory<H> {
fn from(inner: HashMap<Vec<u8>, Vec<u8>>) -> Self {
let mut expanded = HashMap::new();
expanded.insert(None, inner);
InMemory {
inner: expanded,
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> From<Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>> for InMemory<H> {
fn from(inner: Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>) -> Self {
let mut expanded: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>> = HashMap::new();
for (child_key, key, value) in inner {
if let Some(value) = value {
expanded.entry(child_key).or_default().insert(key, value);
}
}
expanded.into()
}
}
impl super::Error for Void {}
impl<H: Hasher> InMemory<H> {
/// child storage key iterator
pub fn child_storage_keys(&self) -> impl Iterator<Item=&[u8]> {
self.inner.iter().filter_map(|item| item.0.as_ref().map(|v|&v[..]))
}
}
impl<H: Hasher> Backend<H> for InMemory<H> {
type Error = Void;
type Transaction = Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>;
type TrieBackendStorage = MemoryDB<H>;
fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
Ok(self.inner.get(&None).and_then(|map| map.get(key).map(Clone::clone)))
}
fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
Ok(self.inner.get(&Some(storage_key.to_vec())).and_then(|map| map.get(key).map(Clone::clone)))
}
fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
Ok(self.inner.get(&None).map(|map| map.get(key).is_some()).unwrap_or(false))
}
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) {
self.inner.get(&None).map(|map| map.keys().filter(|key| key.starts_with(prefix)).map(|k| &**k).for_each(f));
}
fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, storage_key: &[u8], mut f: F) {
self.inner.get(&Some(storage_key.to_vec())).map(|map| map.keys().for_each(|k| f(&k)));
}
fn storage_root<I>(&self, delta: I) -> (H::Out, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
<H as Hasher>::Out: Ord,
{
let existing_pairs = self.inner.get(&None)
.into_iter()
.flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone()))));
let transaction: Vec<_> = delta.into_iter().collect();
let root = Layout::<H>::trie_root(existing_pairs.chain(transaction.iter().cloned())
.collect::<HashMap<_, _>>()
.into_iter()
.filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val)))
);
let full_transaction = transaction.into_iter().map(|(k, v)| (None, k, v)).collect();
(root, full_transaction)
}
fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, bool, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord
{
let storage_key = storage_key.to_vec();
let existing_pairs = self.inner.get(&Some(storage_key.clone()))
.into_iter()
.flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone()))));
let transaction: Vec<_> = delta.into_iter().collect();
let root = child_trie_root::<Layout<H>, _, _, _>(
&storage_key,
existing_pairs.chain(transaction.iter().cloned())
.collect::<HashMap<_, _>>()
.into_iter()
.filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val)))
);
let full_transaction = transaction.into_iter().map(|(k, v)| (Some(storage_key.clone()), k, v)).collect();
let is_default = root == default_child_trie_root::<Layout<H>>(&storage_key);
(root, is_default, full_transaction)
}
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)> {
self.inner.get(&None)
.into_iter()
.flat_map(|map| map.iter().map(|(k, v)| (k.clone(), v.clone())))
.collect()
}
fn keys(&self, prefix: &[u8]) -> Vec<Vec<u8>> {
self.inner.get(&None)
.into_iter()
.flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned())
.collect()
}
fn child_keys(&self, storage_key: &[u8], prefix: &[u8]) -> Vec<Vec<u8>> {
self.inner.get(&Some(storage_key.to_vec()))
.into_iter()
.flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned())
.collect()
}
fn as_trie_backend(&mut self)-> Option<&TrieBackend<Self::TrieBackendStorage, H>> {
let mut mdb = MemoryDB::default();
let mut root = None;
let mut new_child_roots = Vec::new();
let mut root_map = None;
for (storage_key, map) in &self.inner {
if let Some(storage_key) = storage_key.as_ref() | {
let ch = insert_into_memory_db::<H, _>(&mut mdb, map.clone().into_iter())?;
new_child_roots.push((storage_key.clone(), ch.as_ref().into()));
} | conditional_block |
|
backend.rs | fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<bool, Self::Error> {
Ok(self.child_storage(storage_key, key)?.is_some())
}
/// Retrieve all entries keys of child storage and call `f` for each of those keys.
fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, storage_key: &[u8], f: F);
/// Retrieve all entries keys of which start with the given prefix and
/// call `f` for each of those keys.
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F);
/// Calculate the storage root, with given delta over what is already stored in
/// the backend, and produce a "transaction" that can be used to commit.
/// Does not include child storage updates.
fn storage_root<I>(&self, delta: I) -> (H::Out, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord;
/// Calculate the child storage root, with given delta over what is already stored in
/// the backend, and produce a "transaction" that can be used to commit. The second argument
/// is true if child storage root equals default storage root.
fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, bool, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord;
/// Get all key/value pairs into a Vec.
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)>;
/// Get all keys with given prefix
fn keys(&self, prefix: &[u8]) -> Vec<Vec<u8>> {
let mut all = Vec::new();
self.for_keys_with_prefix(prefix, |k| all.push(k.to_vec()));
all
}
/// Get all keys of child storage with given prefix
fn child_keys(&self, child_storage_key: &[u8], prefix: &[u8]) -> Vec<Vec<u8>> {
let mut all = Vec::new();
self.for_keys_in_child_storage(child_storage_key, |k| {
if k.starts_with(prefix) {
all.push(k.to_vec());
}
});
all
}
/// Try convert into trie backend.
fn as_trie_backend(&mut self) -> Option<&TrieBackend<Self::TrieBackendStorage, H>>;
/// Calculate the storage root, with given delta over what is already stored
/// in the backend, and produce a "transaction" that can be used to commit.
/// Does include child storage updates.
fn full_storage_root<I1, I2i, I2>(
&self,
delta: I1,
child_deltas: I2)
-> (H::Out, Self::Transaction)
where
I1: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
I2i: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
I2: IntoIterator<Item=(Vec<u8>, I2i)>,
<H as Hasher>::Out: Ord,
{
let mut txs: Self::Transaction = Default::default();
let mut child_roots: Vec<_> = Default::default();
// child first
for (storage_key, child_delta) in child_deltas {
let (child_root, empty, child_txs) =
self.child_storage_root(&storage_key[..], child_delta);
txs.consolidate(child_txs);
if empty {
child_roots.push((storage_key, None));
} else {
child_roots.push((storage_key, Some(child_root)));
}
}
let (root, parent_txs) = self.storage_root(
delta.into_iter().chain(child_roots.into_iter())
);
txs.consolidate(parent_txs);
(root, txs)
}
}
/// Trait that allows consolidate two transactions together.
pub trait Consolidate {
/// Consolidate two transactions into one.
fn consolidate(&mut self, other: Self);
}
impl Consolidate for () {
fn consolidate(&mut self, _: Self) {
()
}
}
impl Consolidate for Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)> {
fn consolidate(&mut self, mut other: Self) {
self.append(&mut other);
}
}
impl<H: Hasher, KF: trie::KeyFunction<H>> Consolidate for trie::GenericMemoryDB<H, KF> {
fn consolidate(&mut self, other: Self) {
trie::GenericMemoryDB::consolidate(self, other)
}
}
/// Error impossible.
// FIXME: use `!` type when stabilized. https://github.com/rust-lang/rust/issues/35121
#[derive(Debug)]
pub enum Void {}
impl fmt::Display for Void {
fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result {
match *self {}
}
}
impl error::Error for Void {
fn description(&self) -> &str { "unreachable error" }
}
/// In-memory backend. Fully recomputes tries on each commit but useful for
/// tests.
pub struct InMemory<H: Hasher> {
inner: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>,
trie: Option<TrieBackend<MemoryDB<H>, H>>,
_hasher: PhantomData<H>,
}
impl<H: Hasher> Default for InMemory<H> {
fn default() -> Self {
InMemory {
inner: Default::default(),
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> Clone for InMemory<H> {
fn clone(&self) -> Self {
InMemory {
inner: self.inner.clone(),
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> PartialEq for InMemory<H> {
fn eq(&self, other: &Self) -> bool {
self.inner.eq(&other.inner)
}
}
impl<H: Hasher> InMemory<H> {
/// Copy the state, with applied updates
pub fn update(&self, changes: <Self as Backend<H>>::Transaction) -> Self {
let mut inner: HashMap<_, _> = self.inner.clone();
for (storage_key, key, val) in changes {
match val {
Some(v) => { inner.entry(storage_key).or_default().insert(key, v); },
None => { inner.entry(storage_key).or_default().remove(&key); },
}
}
inner.into()
}
}
impl<H: Hasher> From<HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>> for InMemory<H> {
fn from(inner: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>) -> Self {
InMemory {
inner: inner,
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> From<HashMap<Vec<u8>, Vec<u8>>> for InMemory<H> {
fn from(inner: HashMap<Vec<u8>, Vec<u8>>) -> Self {
let mut expanded = HashMap::new();
expanded.insert(None, inner);
InMemory {
inner: expanded,
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> From<Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>> for InMemory<H> {
fn from(inner: Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>) -> Self {
let mut expanded: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>> = HashMap::new();
for (child_key, key, value) in inner {
if let Some(value) = value {
expanded.entry(child_key).or_default().insert(key, value);
}
}
expanded.into()
}
}
impl super::Error for Void {}
impl<H: Hasher> InMemory<H> {
/// child storage key iterator
pub fn child_storage_keys(&self) -> impl Iterator<Item=&[u8]> {
self.inner.iter().filter_map(|item| item.0.as_ref().map(|v|&v[..]))
}
}
impl<H: Hasher> Backend<H> for InMemory<H> {
type Error = Void;
type Transaction = Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>;
type TrieBackendStorage = MemoryDB<H>;
fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
Ok(self.inner.get(&None).and_then(|map| map.get(key).map(Clone::clone)))
}
fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
Ok(self.inner.get(&Some(storage_key.to_vec())).and_then(|map| map.get(key).map(Clone::clone)))
}
fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> | {
Ok(self.inner.get(&None).map(|map| map.get(key).is_some()).unwrap_or(false))
} | identifier_body |
|
manager.go | := m.Get()
if r != nil {
proto.UpdateTime = timeutil.Marshal64(r.updateTime)
proto.RankHero = make([]*server_proto.XuanyuanRankHeroProto, 0, len(r.rankHeros))
for _, v := range r.rankHeros {
_, mirror := v.GetMirror()
proto.RankHero = append(proto.RankHero, &server_proto.XuanyuanRankHeroProto{
HeroId: v.heroId,
Score: v.score.Load(),
RankScore: v.rankScore,
Win: v.win.Load(),
Lose: v.lose.Load(),
Mirror: mirror,
})
}
}
m.RLock()
defer m.RUnlock()
for _, v := range m.challengerMap {
proto.Challenger = append(proto.Challenger, &server_proto.XuanyuanRankHeroProto{
HeroId: v.heroId,
Score: v.score,
Win: v.win,
Lose: v.lose,
Mirror: v.combatMirror,
})
}
}
func (m *XuanyuanManager) Unmarshal(proto *server_proto.XuanyuanModuleProto) {
if proto == nil {
return
}
n := len(proto.RankHero)
newRo := &RoRank{
heroMap: make(map[int64]*XyRankHero, n),
rankHeros: make([]*XyRankHero, 0, n),
updateTime: timeutil.Unix64(proto.UpdateTime),
}
for i, v := range proto.RankHero {
rank := i + 1
newHero := newRankHero(v.HeroId,
u64.FromInt64(int64(v.Score)),
u64.FromInt64(int64(v.RankScore)),
v.Win, v.Lose, rank, v.Mirror)
newRo.heroMap[newHero.heroId] = newHero
newRo.rankHeros = append(newRo.rankHeros, newHero)
}
m.rrl.set(newRo)
m.Lock()
defer m.Unlock()
for _, v := range proto.Challenger {
m.addChallenger(v.HeroId, v.Score, v.Win, v.Lose, v.Mirror)
}
}
func (m *XuanyuanManager) Get() *RoRank {
return m.rrl.Get()
}
func (m *XuanyuanManager) getAndClearChallenger() map[int64]*XyHero {
m.Lock()
defer m.Unlock()
toReturn := m.challengerMap
m.challengerMap = make(map[int64]*XyHero)
return toReturn
}
func (m *XuanyuanManager) Update(updateTime time.Time, gmReset bool) bool {
prev := m.Get()
if !gmReset && prev != nil && !prev.updateTime.Before(updateTime) {
return false
}
heroMap := m.getAndClearChallenger()
m.rrl.update(heroMap, int(m.rankCount), updateTime, prev)
return true
}
func (m *XuanyuanManager) AddChallenger(heroId int64, score, win, lose uint64, player *shared_proto.CombatPlayerProto) {
m.Lock()
defer m.Unlock()
m.addChallenger(heroId, score, win, lose, player)
}
func (m *XuanyuanManager) addChallenger(heroId int64, score, win, lose uint64, player *shared_proto.CombatPlayerProto) {
m.challengerMap[heroId] = &XyHero{
heroId: heroId,
score: score,
win: win,
lose: lose,
combatMirror: player,
}
}
type RoRankList struct {
v atomic.Value
}
func (r *RoRankList) Get() *RoRank {
if rank := r.v.Load(); rank != nil {
return rank.(*RoRank)
}
return nil
}
func (r *RoRankList) set(toSet *RoRank) {
r.v.Store(toSet)
}
func (r *RoRankList) update(newHeroMap map[int64]*XyHero, rankCount int, updateTime time.Time, prev *RoRank) {
// 单线程更新
//if len(newHeroMap) <= 0 && prev != nil {
// // 改个时间
// r.set(&RoRank{
// heroMap: prev.heroMap,
// rankHeros: prev.rankHeros,
// updateTime: updateTime,
// })
// return
//}
pa := make([]*sortkeys.U64K2V, 0, len(newHeroMap))
if prev != nil {
for heroId, v := range prev.heroMap {
// 如果在榜单中,已榜单为准
delete(newHeroMap, heroId)
// 积分 + 战力
_, m := v.GetMirror()
var fightAmount uint64
if m != nil {
fightAmount = u64.FromInt32(m.TotalFightAmount)
}
pa = append(pa, &sortkeys.U64K2V{
K1: v.score.Load(),
K2: fightAmount,
V: heroId,
})
}
}
for heroId, v := range newHeroMap {
var fightAmount uint64
if v.combatMirror != nil {
fightAmount = u64.FromInt32(v.combatMirror.TotalFightAmount)
}
pa = append(pa, &sortkeys.U64K2V{
K1: v.score,
K2: fightAmount,
V: heroId,
})
}
sort.Sort(sort.Reverse(sortkeys.U64K2VSlice(pa)))
n := imath.Min(len(pa), rankCount)
newRo := &RoRank{
heroMap: make(map[int64]*XyRankHero, n),
rankHeros: make([]*XyRankHero, 0, n),
updateTime: updateTime,
}
for i := 0; i < n; i++ {
rank := i + 1
p := pa[i]
score := p.K1
heroId := p.I64Value()
var newHero *XyRankHero
if prev != nil {
prevHero := prev.GetHero(heroId)
if prevHero != nil {
newHero = prevHero.copy(score, rank)
}
}
if newHero == nil {
challenger := newHeroMap[heroId]
newHero = challenger.newRankHero(rank)
}
newRo.heroMap[heroId] = newHero
newRo.rankHeros = append(newRo.rankHeros, newHero)
}
r.set(newRo)
}
type RoRank struct {
heroMap map[int64]*XyRankHero
rankHeros []*XyRankHero
updateTime time.Time
}
func (m *RoRank) GetUpdateTime() time.Time {
return m.updateTime
}
func (m *RoRank) RankCount() int {
return len(m.rankHeros)
}
func (m *RoRank) GetHero(heroId int64) *XyRankHero {
return m.heroMap[heroId]
}
func (m *RoRank) GetHeroByRank(rank int) *XyRankHero {
if rank > 0 && rank <= len(m.rankHeros) {
return m.rankHeros[rank-1]
}
return nil
}
func (m *RoRank) Range(f func(hero *XyRankHero) (toContinue bool)) {
for _, v := range m.rankHeros {
if !f(v) {
break
}
}
}
func newRankHero(heroId int64, score, rankScore, win, lose uint64, rank int, combatMirror *shared_proto.CombatPlayerProto) *XyRankHero {
newHero := &XyRankHero{
heroId: heroId,
score: atomic2.NewUint64(score),
rankScore: rankScore,
rank: rank,
win: atomic2.NewUint64(win),
lose: atomic2.NewUint64(lose),
combatMirrorRef: &atomic.Value{},
}
newHero.SetMirror(combatMirror, int64(rank))
return newHero
}
type XyRankHero struct {
// 玩家id
heroId int64
// 当前积分
score *atomic2.Uint64
// 排名积分
rankScore uint64
// 名次
rank int
// 胜利次数
win *atomic2.Uint64
// 失败次数
lose *atomic2.Uint64
// 挑战镜像
combatMirrorRef *atomic.Value
targetBytesCache atomic.Value
}
func (hero *XyRankHero) copy(rankScore uint64, rank int) *XyRankHero {
newHero := &XyRankHero{
heroId: hero.heroId,
score: hero.score | ) {
r | identifier_name |
|
manager.go | i, v := range proto.RankHero {
rank := i + 1
newHero := newRankHero(v.HeroId,
u64.FromInt64(int64(v.Score)),
u64.FromInt64(int64(v.RankScore)),
v.Win, v.Lose, rank, v.Mirror)
newRo.heroMap[newHero.heroId] = newHero
newRo.rankHeros = append(newRo.rankHeros, newHero)
}
m.rrl.set(newRo)
m.Lock()
defer m.Unlock()
for _, v := range proto.Challenger {
m.addChallenger(v.HeroId, v.Score, v.Win, v.Lose, v.Mirror)
}
}
func (m *XuanyuanManager) Get() *RoRank {
return m.rrl.Get()
}
func (m *XuanyuanManager) getAndClearChallenger() map[int64]*XyHero {
m.Lock()
defer m.Unlock()
toReturn := m.challengerMap
m.challengerMap = make(map[int64]*XyHero)
return toReturn
}
func (m *XuanyuanManager) Update(updateTime time.Time, gmReset bool) bool {
prev := m.Get()
if !gmReset && prev != nil && !prev.updateTime.Before(updateTime) {
return false
}
heroMap := m.getAndClearChallenger()
m.rrl.update(heroMap, int(m.rankCount), updateTime, prev)
return true
}
func (m *XuanyuanManager) AddChallenger(heroId int64, score, win, lose uint64, player *shared_proto.CombatPlayerProto) {
m.Lock()
defer m.Unlock()
m.addChallenger(heroId, score, win, lose, player)
}
func (m *XuanyuanManager) addChallenger(heroId int64, score, win, lose uint64, player *shared_proto.CombatPlayerProto) {
m.challengerMap[heroId] = &XyHero{
heroId: heroId,
score: score,
win: win,
lose: lose,
combatMirror: player,
}
}
type RoRankList struct {
v atomic.Value
}
func (r *RoRankList) Get() *RoRank {
if rank := r.v.Load(); rank != nil {
return rank.(*RoRank)
}
return nil
}
func (r *RoRankList) set(toSet *RoRank) {
r.v.Store(toSet)
}
func (r *RoRankList) update(newHeroMap map[int64]*XyHero, rankCount int, updateTime time.Time, prev *RoRank) {
// 单线程更新
//if len(newHeroMap) <= 0 && prev != nil {
// // 改个时间
// r.set(&RoRank{
// heroMap: prev.heroMap,
// rankHeros: prev.rankHeros,
// updateTime: updateTime,
// })
// return
//}
pa := make([]*sortkeys.U64K2V, 0, len(newHeroMap))
if prev != nil {
for heroId, v := range prev.heroMap {
// 如果在榜单中,已榜单为准
delete(newHeroMap, heroId)
// 积分 + 战力
_, m := v.GetMirror()
var fightAmount uint64
if m != nil {
fightAmount = u64.FromInt32(m.TotalFightAmount)
}
pa = append(pa, &sortkeys.U64K2V{
K1: v.score.Load(),
K2: fightAmount,
V: heroId,
})
}
}
for heroId, v := range newHeroMap {
var fightAmount uint64
if v.combatMirror != nil {
fightAmount = u64.FromInt32(v.combatMirror.TotalFightAmount)
}
pa = append(pa, &sortkeys.U64K2V{
K1: v.score,
K2: fightAmount,
V: heroId,
})
}
sort.Sort(sort.Reverse(sortkeys.U64K2VSlice(pa)))
n := imath.Min(len(pa), rankCount)
newRo := &RoRank{
heroMap: make(map[int64]*XyRankHero, n),
rankHeros: make([]*XyRankHero, 0, n),
updateTime: updateTime,
}
for i := 0; i < n; i++ {
rank := i + 1
p := pa[i]
score := p.K1
heroId := p.I64Value()
var newHero *XyRankHero
if prev != nil {
prevHero := prev.GetHero(heroId)
if prevHero != nil {
newHero = prevHero.copy(score, rank)
}
}
if newHero == nil {
challenger := newHeroMap[heroId]
newHero = challenger.newRankHero(rank)
}
newRo.heroMap[heroId] = newHero
newRo.rankHeros = append(newRo.rankHeros, newHero)
}
r.set(newRo)
}
type RoRank struct {
heroMap map[int64]*XyRankHero
rankHeros []*XyRankHero
updateTime time.Time
}
func (m *RoRank) GetUpdateTime() time.Time {
return m.updateTime
}
func (m *RoRank) RankCount() int {
return len(m.rankHeros)
}
func (m *RoRank) GetHero(heroId int64) *XyRankHero {
return m.heroMap[heroId]
}
func (m *RoRank) GetHeroByRank(rank int) *XyRankHero {
if rank > 0 && rank <= len(m.rankHeros) {
return m.rankHeros[rank-1]
}
return nil
}
func (m *RoRank) Range(f func(hero *XyRankHero) (toContinue bool)) {
for _, v := range m.rankHeros {
if !f(v) {
break
}
}
}
func newRankHero(heroId int64, score, rankScore, win, lose uint64, rank int, combatMirror *shared_proto.CombatPlayerProto) *XyRankHero {
newHero := &XyRankHero{
heroId: heroId,
score: atomic2.NewUint64(score),
rankScore: rankScore,
rank: rank,
win: atomic2.NewUint64(win),
lose: atomic2.NewUint64(lose),
combatMirrorRef: &atomic.Value{},
}
newHero.SetMirror(combatMirror, int64(rank))
return newHero
}
type XyRankHero struct {
// 玩家id
heroId int64
// 当前积分
score *atomic2.Uint64
// 排名积分
rankScore uint64
// 名次
rank int
// 胜利次数
win *atomic2.Uint64
// 失败次数
lose *atomic2.Uint64
// 挑战镜像
combatMirrorRef *atomic.Value
targetBytesCache atomic.Value
}
func (hero *XyRankHero) copy(rankScore uint64, rank int) *XyRankHero {
newHero := &XyRankHero{
heroId: hero.heroId,
score: hero.score,
rankScore: rankScore,
rank: rank,
win: hero.win,
lose: hero.lose,
combatMirrorRef: hero.combatMirrorRef,
}
return newHero
}
func (hero *XyRankHero) Id() int64 {
return hero.heroId
}
func (hero *XyRankHero) Rank() int {
return hero.rank
}
func (hero *XyRankHero) GetScore() uint64 {
return hero.score.Load()
}
func (hero *XyRankHero) SetScore(toSet uint64) {
hero.score.Store(toSet)
}
func (hero *XyRankHero) GetWin() uint64 {
return hero.win.Load()
}
func (hero *XyRankHero) IncWin() uint64 {
amt := hero.win.Inc()
hero.clearTargetBytesCache()
return amt
}
func (hero *XyRankHero) GetLose() uint64 {
return hero.lose.Load()
}
func (hero *XyRankHero) IncLose() uint64 {
amt := hero.lose.Inc()
hero.clearTargetBytesCache()
return amt
}
func (hero *XyRankHero) EncodeTarget(getter func(int64) *snapshotdata.HeroSnapshot) []byte {
cache := hero.targetBytesCache.Load()
if cache != nil {
if b, ok := cache.([]byte); ok && len(b) > 0 {
return b
}
}
proto := hero.encodeTarget(getter)
protoBytes := must.Marshal(proto)
hero.targetBytesCache.Store(protoBytes) | return protoBytes
}
| random_line_split |
|
manager.go | Hero, &server_proto.XuanyuanRankHeroProto{
HeroId: v.heroId,
Score: v.score.Load(),
RankScore: v.rankScore,
Win: v.win.Load(),
Lose: v.lose.Load(),
Mirror: mirror,
})
}
}
m.RLock()
defer m.RUnlock()
for _, v := range m.challengerMap {
proto.Challenger = append(proto.Challenger, &server_proto.XuanyuanRankHeroProto{
HeroId: v.heroId,
Score: v.score,
Win: v.win,
Lose: v.lose,
Mirror: v.combatMirror,
})
}
}
func (m *XuanyuanManager) Unmarshal(proto *server_proto.XuanyuanModuleProto) {
if proto == nil {
return
}
n := len(proto.RankHero)
newRo := &RoRank{
heroMap: make(map[int64]*XyRankHero, n),
rankHeros: make([]*XyRankHero, 0, n),
updateTime: timeutil.Unix64(proto.UpdateTime),
}
for i, v := range proto.RankHero {
rank := i + 1
newHero := newRankHero(v.HeroId,
u64.FromInt64(int64(v.Score)),
u64.FromInt64(int64(v.RankScore)),
v.Win, v.Lose, rank, v.Mirror)
newRo.heroMap[newHero.heroId] = newHero
newRo.rankHeros = append(newRo.rankHeros, newHero)
}
m.rrl.set(newRo)
m.Lock()
defer m.Unlock()
for _, v := range proto.Challenger {
m.addChallenger(v.HeroId, v.Score, v.Win, v.Lose, v.Mirror)
}
}
func (m *XuanyuanManager) Get() *RoRank {
return m.rrl.Get()
}
func (m *XuanyuanManager) getAndClearChallenger() map[int64]*XyHero {
m.Lock()
defer m.Unlock()
toReturn := m.challengerMap
m.challengerMap = make(map[int64]*XyHero)
return toReturn
}
func (m *XuanyuanManager) Update(updateTime time.Time, gmReset bool) bool {
prev := m.Get()
if !gmReset && prev != nil && !prev.updateTime.Before(updateTime) {
return false
}
heroMap := m.getAndClearChallenger()
m.rrl.update(heroMap, int(m.rankCount), updateTime, prev)
return true
}
func (m *XuanyuanManager) AddChallenger(heroId int64, score, win, lose uint64, player *shared_proto.CombatPlayerProto) {
m.Lock()
defer m.Unlock()
m.addChallenger(heroId, score, win, lose, player)
}
func (m *XuanyuanManager) addChallenger(heroId int64, score, win, lose uint64, player *shared_proto.CombatPlayerProto) {
m.challengerMap[heroId] = &XyHero{
heroId: heroId,
score: score,
win: win,
lose: lose,
combatMirror: player,
}
}
type RoRankList struct {
v atomic.Value
}
func (r *RoRankList) Get() *RoRank {
if rank := r.v.Load(); rank != nil {
retu | .v.Store(toSet)
}
func (r *RoRankList) update(newHeroMap map[int64]*XyHero, rankCount int, updateTime time.Time, prev *RoRank) {
// 单线程更新
//if len(newHeroMap) <= 0 && prev != nil {
// // 改个时间
// r.set(&RoRank{
// heroMap: prev.heroMap,
// rankHeros: prev.rankHeros,
// updateTime: updateTime,
// })
// return
//}
pa := make([]*sortkeys.U64K2V, 0, len(newHeroMap))
if prev != nil {
for heroId, v := range prev.heroMap {
// 如果在榜单中,已榜单为准
delete(newHeroMap, heroId)
// 积分 + 战力
_, m := v.GetMirror()
var fightAmount uint64
if m != nil {
fightAmount = u64.FromInt32(m.TotalFightAmount)
}
pa = append(pa, &sortkeys.U64K2V{
K1: v.score.Load(),
K2: fightAmount,
V: heroId,
})
}
}
for heroId, v := range newHeroMap {
var fightAmount uint64
if v.combatMirror != nil {
fightAmount = u64.FromInt32(v.combatMirror.TotalFightAmount)
}
pa = append(pa, &sortkeys.U64K2V{
K1: v.score,
K2: fightAmount,
V: heroId,
})
}
sort.Sort(sort.Reverse(sortkeys.U64K2VSlice(pa)))
n := imath.Min(len(pa), rankCount)
newRo := &RoRank{
heroMap: make(map[int64]*XyRankHero, n),
rankHeros: make([]*XyRankHero, 0, n),
updateTime: updateTime,
}
for i := 0; i < n; i++ {
rank := i + 1
p := pa[i]
score := p.K1
heroId := p.I64Value()
var newHero *XyRankHero
if prev != nil {
prevHero := prev.GetHero(heroId)
if prevHero != nil {
newHero = prevHero.copy(score, rank)
}
}
if newHero == nil {
challenger := newHeroMap[heroId]
newHero = challenger.newRankHero(rank)
}
newRo.heroMap[heroId] = newHero
newRo.rankHeros = append(newRo.rankHeros, newHero)
}
r.set(newRo)
}
type RoRank struct {
heroMap map[int64]*XyRankHero
rankHeros []*XyRankHero
updateTime time.Time
}
func (m *RoRank) GetUpdateTime() time.Time {
return m.updateTime
}
func (m *RoRank) RankCount() int {
return len(m.rankHeros)
}
func (m *RoRank) GetHero(heroId int64) *XyRankHero {
return m.heroMap[heroId]
}
func (m *RoRank) GetHeroByRank(rank int) *XyRankHero {
if rank > 0 && rank <= len(m.rankHeros) {
return m.rankHeros[rank-1]
}
return nil
}
func (m *RoRank) Range(f func(hero *XyRankHero) (toContinue bool)) {
for _, v := range m.rankHeros {
if !f(v) {
break
}
}
}
func newRankHero(heroId int64, score, rankScore, win, lose uint64, rank int, combatMirror *shared_proto.CombatPlayerProto) *XyRankHero {
newHero := &XyRankHero{
heroId: heroId,
score: atomic2.NewUint64(score),
rankScore: rankScore,
rank: rank,
win: atomic2.NewUint64(win),
lose: atomic2.NewUint64(lose),
combatMirrorRef: &atomic.Value{},
}
newHero.SetMirror(combatMirror, int64(rank))
return newHero
}
type XyRankHero struct {
// 玩家id
heroId int64
// 当前积分
score *atomic2.Uint64
// 排名积分
rankScore uint64
// 名次
rank int
// 胜利次数
win *atomic2.Uint64
// 失败次数
lose *atomic2.Uint64
// 挑战镜像
combatMirrorRef *atomic.Value
targetBytesCache atomic.Value
}
func (hero *XyRankHero) copy(rankScore uint64, rank int) *XyRankHero {
newHero := &XyRankHero{
heroId: hero.heroId,
score: hero.score,
rankScore: rankScore,
rank: rank,
win: hero.win,
lose: hero.lose,
combatMirrorRef: hero.combatMirrorRef,
}
return newHero
}
func (hero *XyRankHero) Id() int64 {
return hero.heroId
}
func (hero *XyRankHero) Rank() | rn rank.(*RoRank)
}
return nil
}
func (r *RoRankList) set(toSet *RoRank) {
r | identifier_body |
manager.go | ange m.challengerMap {
proto.Challenger = append(proto.Challenger, &server_proto.XuanyuanRankHeroProto{
HeroId: v.heroId,
Score: v.score,
Win: v.win,
Lose: v.lose,
Mirror: v.combatMirror,
})
}
}
func (m *XuanyuanManager) Unmarshal(proto *server_proto.XuanyuanModuleProto) {
if proto == nil {
return
}
n := len(proto.RankHero)
newRo := &RoRank{
heroMap: make(map[int64]*XyRankHero, n),
rankHeros: make([]*XyRankHero, 0, n),
updateTime: timeutil.Unix64(proto.UpdateTime),
}
for i, v := range proto.RankHero {
rank := i + 1
newHero := newRankHero(v.HeroId,
u64.FromInt64(int64(v.Score)),
u64.FromInt64(int64(v.RankScore)),
v.Win, v.Lose, rank, v.Mirror)
newRo.heroMap[newHero.heroId] = newHero
newRo.rankHeros = append(newRo.rankHeros, newHero)
}
m.rrl.set(newRo)
m.Lock()
defer m.Unlock()
for _, v := range proto.Challenger {
m.addChallenger(v.HeroId, v.Score, v.Win, v.Lose, v.Mirror)
}
}
func (m *XuanyuanManager) Get() *RoRank {
return m.rrl.Get()
}
func (m *XuanyuanManager) getAndClearChallenger() map[int64]*XyHero {
m.Lock()
defer m.Unlock()
toReturn := m.challengerMap
m.challengerMap = make(map[int64]*XyHero)
return toReturn
}
func (m *XuanyuanManager) Update(updateTime time.Time, gmReset bool) bool {
prev := m.Get()
if !gmReset && prev != nil && !prev.updateTime.Before(updateTime) {
return false
}
heroMap := m.getAndClearChallenger()
m.rrl.update(heroMap, int(m.rankCount), updateTime, prev)
return true
}
func (m *XuanyuanManager) AddChallenger(heroId int64, score, win, lose uint64, player *shared_proto.CombatPlayerProto) {
m.Lock()
defer m.Unlock()
m.addChallenger(heroId, score, win, lose, player)
}
func (m *XuanyuanManager) addChallenger(heroId int64, score, win, lose uint64, player *shared_proto.CombatPlayerProto) {
m.challengerMap[heroId] = &XyHero{
heroId: heroId,
score: score,
win: win,
lose: lose,
combatMirror: player,
}
}
type RoRankList struct {
v atomic.Value
}
func (r *RoRankList) Get() *RoRank {
if rank := r.v.Load(); rank != nil {
return rank.(*RoRank)
}
return nil
}
func (r *RoRankList) set(toSet *RoRank) {
r.v.Store(toSet)
}
func (r *RoRankList) update(newHeroMap map[int64]*XyHero, rankCount int, updateTime time.Time, prev *RoRank) {
// 单线程更新
//if len(newHeroMap) <= 0 && prev != nil {
// // 改个时间
// r.set(&RoRank{
// heroMap: prev.heroMap,
// rankHeros: prev.rankHeros,
// updateTime: updateTime,
// })
// return
//}
pa := make([]*sortkeys.U64K2V, 0, len(newHeroMap))
if prev != nil {
for heroId, v := range prev.heroMap {
// 如果在榜单中,已榜单为准
delete(newHeroMap, heroId)
// 积分 + 战力
_, m := v.GetMirror()
var fightAmount uint64
if m != nil {
fightAmount = u64.FromInt32(m.TotalFightAmount)
}
pa = append(pa, &sortkeys.U64K2V{
K1: v.score.Load(),
K2: fightAmount,
V: heroId,
})
}
}
for heroId, v := range newHeroMap {
var fightAmount uint64
if v.combatMirror != nil {
fightAmount = u64.FromInt32(v.combatMirror.TotalFightAmount)
}
pa = append(pa, &sortkeys.U64K2V{
K1: v.score,
K2: fightAmount,
V: heroId,
})
}
sort.Sort(sort.Reverse(sortkeys.U64K2VSlice(pa)))
n := imath.Min(len(pa), rankCount)
newRo := &RoRank{
heroMap: make(map[int64]*XyRankHero, n),
rankHeros: make([]*XyRankHero, 0, n),
updateTime: updateTime,
}
for i := 0; i < n; i++ {
rank := i + 1
p := pa[i]
score := p.K1
heroId := p.I64Value()
var newHero *XyRankHero
if prev != nil {
prevHero := prev.GetHero(heroId)
if prevHero != nil {
newHero = prevHero.copy(score, rank)
}
}
if newHero == nil {
challenger := newHeroMap[heroId]
newHero = challenger.newRankHero(rank)
}
newRo.heroMap[heroId] = newHero
newRo.rankHeros = append(newRo.rankHeros, newHero)
}
r.set(newRo)
}
type RoRank struct {
heroMap map[int64]*XyRankHero
rankHeros []*XyRankHero
updateTime time.Time
}
func (m *RoRank) GetUpdateTime() time.Time {
return m.updateTime
}
func (m *RoRank) RankCount() int {
return len(m.rankHeros)
}
func (m *RoRank) GetHero(heroId int64) *XyRankHero {
return m.heroMap[heroId]
}
func (m *RoRank) GetHeroByRank(rank int) *XyRankHero {
if rank > 0 && rank <= len(m.rankHeros) {
return m.rankHeros[rank-1]
}
return nil
}
func (m *RoRank) Range(f func(hero *XyRankHero) (toContinue bool)) {
for _, v := range m.rankHeros {
if !f(v) {
break
}
}
}
func newRankHero(heroId int64, score, rankScore, win, lose uint64, rank int, combatMirror *shared_proto.CombatPlayerProto) *XyRankHero {
newHero := &XyRankHero{
heroId: heroId,
score: atomic2.NewUint64(score),
rankScore: rankScore,
rank: rank,
win: atomic2.NewUint64(win),
lose: atomic2.NewUint64(lose),
combatMirrorRef: &atomic.Value{},
}
newHero.SetMirror(combatMirror, int64(rank))
return newHero
}
type XyRankHero struct {
// 玩家id
heroId int64
// 当前积分
score *atomic2.Uint64
// 排名积分
rankScore uint64
// 名次
rank int
// 胜利次数
win *atomic2.Uint64
// 失败次数
lose *atomic2.Uint64
// 挑战镜像
combatMirrorRef *atomic.Value
targetBytesCache atomic.Value
}
func (hero *XyRankHero) copy(rankScore uint64, rank int) *XyRankHero {
newHero := &XyRankHero{
heroId: hero.heroId,
score: hero.score,
rankScore: rankScore,
rank: rank,
win: hero.win,
| dateTime)
proto.RankHero = make([]*server_proto.XuanyuanRankHeroProto, 0, len(r.rankHeros))
for _, v := range r.rankHeros {
_, mirror := v.GetMirror()
proto.RankHero = append(proto.RankHero, &server_proto.XuanyuanRankHeroProto{
HeroId: v.heroId,
Score: v.score.Load(),
RankScore: v.rankScore,
Win: v.win.Load(),
Lose: v.lose.Load(),
Mirror: mirror,
})
}
}
m.RLock()
defer m.RUnlock()
for _, v := r | conditional_block |
|
webpack.ts | ` binding library.
*/
new webpack.IgnorePlugin({
resourceRegExp: /^\.\/native$/,
contextRegExp: /node_modules\/pg\/lib$/,
}),
];
if (!devServer) {
// Generate some stats for the bundles
plugins.push(getBundleAnalyzerPlugin(analyze, path.resolve(stageDirPath, `report-backend.html`)));
}
// Entry points to be bundled
const entries: Record<string, string> = {
// Entry point for rendering the views on server-side
server: require.resolve(devServer ? './bootstrap/local-server' : './bootstrap/server'),
};
// Aliases that entry points will `require`
const aliases: Record<string, string> = {
_site: path.resolve(projectRootPath, sourceDir, siteFile),
};
// Modules excluded from the bundle
const externals = [
// No need to bundle AWS SDK for compilation, because it will be available in the Lambda node environment
'aws-sdk',
];
// If an API is defined, compile it as well
if (serverFile) {
// eslint-disable-next-line no-underscore-dangle
aliases._service = path.resolve(projectRootPath, sourceDir, serverFile);
} else {
// API not available. Let the bundle to compile without it, but
// raise error if attempting to `require`
externals.push('_service');
}
// If a database defined, compile it as well
if (databaseFile) {
// eslint-disable-next-line no-underscore-dangle
aliases._db = path.resolve(projectRootPath, sourceDir, databaseFile);
} else {
// Database not available. Let the bundle to compile without it, but
// raise error if attempting to `require`
externals.push('_db');
}
// If a triggers file is defined, compile it as well
if (triggersFile) {
// eslint-disable-next-line no-underscore-dangle
aliases._triggers = path.resolve(projectRootPath, sourceDir, triggersFile);
} else {
// Triggers not available. Let the bundle to compile without it, but
// raise error if attempting to `require`
externals.push('_triggers');
}
return {
context: projectRootPath,
// Development or production build?
mode: devServer || debug ? 'development' : 'production',
optimization: {
// For better tracebacks, do not minify server-side code,
// even in production.
minimize: false,
},
// Build for running in node environment, instead of web browser
target: 'node',
// The main entry points for source files.
entry: entries,
output: {
// Output files are placed to this folder
path: buildDirPath,
// The file name template for the entry chunks
filename: devServer ? '[name].js' : '[name].[hash].js',
// The URL to the output directory resolved relative to the HTML page
publicPath: `${assetsRoot}/`,
// Export so for use in a Lambda function
libraryTarget: 'commonjs2',
},
module: {
rules: getCommonRules({
devServer,
debug,
tsConfigPath,
compilerOptions,
assetsFilePrefix,
emitFile: false,
}),
},
externals,
resolve: {
// Add '.ts' and '.tsx' as resolvable extensions.
extensions: ['.ts', '.tsx', '.js'],
alias: aliases,
},
resolveLoader: {
// Look from this library's node modules!
modules: [ownModulesDirPath, modulesDirPath],
},
// Enable sourcemaps for debugging webpack's output.
devtool: 'source-map',
// Plugins
plugins,
};
}
function getCommonPlugins(options: {
frontend: boolean;
devServer: boolean;
assetsFilePrefix: string;
tsConfigPath: string;
sourceDirPath: string;
compilerOptions?: unknown;
}): webpack.Plugin[] {
const { frontend, devServer, assetsFilePrefix, tsConfigPath, sourceDirPath, compilerOptions } = options;
const cssFilePrefix = `${assetsFilePrefix}css/`;
return [
// https://github.com/faceyspacey/extract-css-chunks-webpack-plugin
new ExtractCssChunks({
filename: devServer ? `${cssFilePrefix}[name].css` : `${cssFilePrefix}[contenthash].css`,
chunkFilename: devServer ? `${cssFilePrefix}[id].css` : `${cssFilePrefix}[contenthash].css`,
ignoreOrder: false,
}),
// Perform type checking for TypeScript
new ForkTsCheckerWebpackPlugin({
typescript: {
configFile: tsConfigPath,
configOverwrite: {
compilerOptions,
},
},
// When running the dev server, the backend compilation will handle ESlinting
eslint:
frontend && devServer
? undefined
: {
files: path.join(sourceDirPath, '**', '*.{ts,tsx,js,jsx}'),
},
}),
// Prevent all the MomentJS locales to be imported by default.
new webpack.ContextReplacementPlugin(
/\bmoment[/\\]locale\b/,
// Regular expression to match the files that should be imported
/\ben.js/,
),
];
}
function getCommonRules(options: {
assetsFilePrefix: string;
debug: boolean;
devServer: boolean;
tsConfigPath: string;
emitFile: boolean;
compilerOptions?: unknown;
}): webpack.RuleSetRule[] {
const { tsConfigPath, compilerOptions, debug, devServer, assetsFilePrefix, emitFile } = options;
return [
// Pre-process sourcemaps for scripts
{
test: /\.(jsx?|tsx?)$/,
loader: 'source-map-loader',
enforce: 'pre' as const,
},
// Compile TypeScript files ('.ts' or '.tsx')
{
test: /\.tsx?$/,
loader: 'ts-loader',
options: {
// Explicitly expect the tsconfig.json to be located at the project root
configFile: tsConfigPath,
// Disable type checker - use `fork-ts-checker-webpack-plugin` for that purpose instead
transpileOnly: true,
compilerOptions,
},
},
// Extract stylesheets as separate CSS files
{
test: /\.css$/i,
sideEffects: true,
use: [
{
loader: ExtractCssChunks.loader,
options: {
esModule: true,
},
},
{
loader: 'css-loader',
options: {
modules: {
mode: 'local',
// Auto-generated class names contain the original name on development
localIdentName: debug || devServer ? '[local]--[hash:base64:5]' : '[hash:base64]',
},
},
},
],
},
// Optimize image files and bundle them as files or data URIs
{
test: /\.(gif|png|jpe?g|svg)$/,
use: [
{
loader: 'url-loader',
options: {
// Max bytes to be converted to inline data URI
limit: 100,
// Asset files Files not emitted on server-side compilation
emitFile,
// If larger, then convert to a file instead
name: `${assetsFilePrefix}images/[name].[hash].[ext]`,
},
},
{
loader: 'image-webpack-loader',
options: {
disable: debug || devServer,
optipng: {
optimizationLevel: 7,
},
},
},
],
},
// Include font files either as data URIs or separate files
{
test: /\.(eot|ttf|otf|woff2?|svg)($|\?|#)/,
loader: 'url-loader',
options: {
// Max bytes to be converted to inline data URI
limit: 100,
// Asset files Files not emitted on server-side compilation
emitFile,
// If larger, then convert to a file instead
name: `${assetsFilePrefix}fonts/[name].[hash].[ext]`,
},
},
];
}
function getBundleAnalyzerPlugin(enabled: boolean, filename: string) | {
return new BundleAnalyzerPlugin({
// Can be `server`, `static` or `disabled`.
// In `server` mode analyzer will start HTTP server to show bundle report.
// In `static` mode single HTML file with bundle report will be generated.
// In `disabled` mode you can use this plugin to just generate Webpack Stats JSON file by setting `generateStatsFile` to `true`.
analyzerMode: enabled ? 'static' : 'disabled',
// Host that will be used in `server` mode to start HTTP server.
analyzerHost: '127.0.0.1',
// Port that will be used in `server` mode to start HTTP server.
analyzerPort: 8888,
// Path to bundle report file that will be generated in `static` mode.
// Relative to bundles output directory.
reportFilename: filename,
// Module sizes to show in report by default.
// Should be one of `stat`, `parsed` or `gzip`.
// See "Definitions" section for more information.
defaultSizes: 'parsed',
// Automatically open report in default browser
openAnalyzer: enabled, | identifier_body |
|
webpack.ts | string`
appName: title,
// TODO: Your application's description. `string`
appDescription: null,
// TODO: Your (or your developer's) name. `string`
developerName: null,
// TODO: Your (or your developer's) URL. `string`
developerURL: null,
// TODO: Your application's version string. `string`
version: null,
// Start URL when launching the application from a device. `string`
start_url: serverRoot,
// Print logs to console? `boolean`
logging: false,
/**
* Which icons should be generated.
* Platform Options:
* - offset - offset in percentage
* - shadow - drop shadow for Android icons, available online only
* - background:
* * false - use default
* * true - force use default, e.g. set background for Android icons
* * color - set background for the specified icons
*/
icons: {
// Create Android homescreen icon. `boolean` or `{ offset, background, shadow }`
android: !devServer && !debug,
// Create Apple touch icons. `boolean` or `{ offset, background }`
appleIcon: !devServer && !debug,
// Create Apple startup images. `boolean` or `{ offset, background }`
appleStartup: !devServer && !debug,
// Create Opera Coast icon with offset 25%. `boolean` or `{ offset, background }`
coast: false,
// Create regular favicons. `boolean`
favicons: true,
// Create Firefox OS icons. `boolean` or `{ offset, background }`
firefox: false,
// Create Windows 8 tile icons. `boolean` or `{ background }`
windows: !devServer && !debug,
// Create Yandex browser icon. `boolean` or `{ background }`
yandex: false,
},
},
}),
);
}
return {
context: projectRootPath,
// Development or production build?
mode: devServer || debug ? 'development' : 'production',
// The main entry points for source files.
entry: entries,
// Supposed to run in a browser
target: 'web',
output: {
// Output files are placed to this folder
path: buildDirPath,
// The file name template for the entry chunks
filename: devServer ? `${assetsFilePrefix}[name].js` : `${assetsFilePrefix}[name].[chunkhash].js`,
// The URL to the output directory resolved relative to the HTML page
// This will be the origin, not including the path, because that will be used as a subdirectory for files.
publicPath: `${assetsOrigin}/`,
// The name of the exported library, e.g. the global variable name
library: 'app',
// How the library is exported? E.g. 'var', 'this'
libraryTarget: 'var',
},
module: {
rules: getCommonRules({ tsConfigPath, debug, devServer, assetsFilePrefix, emitFile: true }),
},
resolve: {
// Add '.ts' and '.tsx' as resolvable extensions.
extensions: ['.ts', '.tsx', '.js'],
alias: {
// The entry point will `require` this module for finding the website component
_site: path.resolve(projectRootPath, sourceDir, siteFile),
},
},
resolveLoader: {
// Look from this library's node modules!
modules: [ownModulesDirPath, modulesDirPath],
},
// Behavior for polyfilling node modules
node: {
// The default value `true` seems not to work with RxJS
// TODO: Take a look if this can be enabled
setImmediate: false,
},
// Enable sourcemaps for debugging webpack's output.
devtool: devServer ? 'inline-source-map' : 'source-map',
// Plugins
plugins,
};
}
/**
* Creates the Webpack 2 configuration for the back-end code compilation.
* The options are documented at
* https://webpack.js.org/configuration/
*/
export function getBackendWebpackConfig(config: WebpackConfigOptions): webpack.Configuration {
const { serverFile, databaseFile, siteFile, triggersFile } = config;
const { sourceDir, buildDir, projectRootPath, devServer, debug, assetsRoot } = config;
const { analyze, stageDir } = config;
// Resolve modules, source, build and static paths
const sourceDirPath = path.resolve(projectRootPath, sourceDir);
const buildDirPath = path.resolve(projectRootPath, buildDir);
const modulesDirPath = path.resolve(projectRootPath, 'node_modules');
const ownModulesDirPath = path.resolve(__dirname, 'node_modules');
const stageDirPath = path.resolve(projectRootPath, stageDir);
// Use the tsconfig.json in the project folder (not in this library)
const tsConfigPath = path.resolve(projectRootPath, './tsconfig.json');
// Target backend always to ES2018
const compilerOptions = { target: 'ES2017' } as const;
// Determine the directory for the assets and the site
const assetsRootUrl = url.parse(assetsRoot);
const assetsPath = assetsRootUrl.pathname || '/';
const assetsDir = assetsPath.replace(/^\/+/, '');
const assetsFilePrefix = assetsDir && `${assetsDir}/`;
// Generate the plugins
const plugins: webpack.Plugin[] = [
// Perform type checking for TypeScript
...getCommonPlugins({
frontend: false,
devServer,
assetsFilePrefix,
tsConfigPath,
sourceDirPath,
compilerOptions,
}),
/**
* Prevent `pg` module to import `pg-native` binding library.
*/
new webpack.IgnorePlugin({
resourceRegExp: /^\.\/native$/,
contextRegExp: /node_modules\/pg\/lib$/,
}),
];
if (!devServer) {
// Generate some stats for the bundles
plugins.push(getBundleAnalyzerPlugin(analyze, path.resolve(stageDirPath, `report-backend.html`)));
}
// Entry points to be bundled
const entries: Record<string, string> = {
// Entry point for rendering the views on server-side
server: require.resolve(devServer ? './bootstrap/local-server' : './bootstrap/server'),
};
// Aliases that entry points will `require`
const aliases: Record<string, string> = {
_site: path.resolve(projectRootPath, sourceDir, siteFile),
};
// Modules excluded from the bundle
const externals = [
// No need to bundle AWS SDK for compilation, because it will be available in the Lambda node environment
'aws-sdk',
];
// If an API is defined, compile it as well
if (serverFile) {
// eslint-disable-next-line no-underscore-dangle
aliases._service = path.resolve(projectRootPath, sourceDir, serverFile);
} else {
// API not available. Let the bundle to compile without it, but
// raise error if attempting to `require`
externals.push('_service');
}
// If a database defined, compile it as well
if (databaseFile) {
// eslint-disable-next-line no-underscore-dangle
aliases._db = path.resolve(projectRootPath, sourceDir, databaseFile);
} else {
// Database not available. Let the bundle to compile without it, but
// raise error if attempting to `require`
externals.push('_db');
}
// If a triggers file is defined, compile it as well
if (triggersFile) {
// eslint-disable-next-line no-underscore-dangle
aliases._triggers = path.resolve(projectRootPath, sourceDir, triggersFile);
} else {
// Triggers not available. Let the bundle to compile without it, but
// raise error if attempting to `require`
externals.push('_triggers');
}
return {
context: projectRootPath,
// Development or production build?
mode: devServer || debug ? 'development' : 'production',
optimization: {
// For better tracebacks, do not minify server-side code,
// even in production.
minimize: false,
},
// Build for running in node environment, instead of web browser
target: 'node',
// The main entry points for source files.
entry: entries,
output: {
// Output files are placed to this folder
path: buildDirPath,
// The file name template for the entry chunks
filename: devServer ? '[name].js' : '[name].[hash].js',
// The URL to the output directory resolved relative to the HTML page
publicPath: `${assetsRoot}/`,
// Export so for use in a Lambda function
libraryTarget: 'commonjs2',
},
module: {
rules: getCommonRules({
devServer,
debug,
tsConfigPath,
compilerOptions,
assetsFilePrefix,
emitFile: false,
}),
},
externals,
resolve: {
// Add '.ts' and '.tsx' as resolvable extensions.
extensions: ['.ts', '.tsx', '.js'],
alias: aliases,
},
resolveLoader: { | // Look from this library's node modules!
modules: [ownModulesDirPath, modulesDirPath],
}, | random_line_split |
|
webpack.ts | Windows 8 tile icons. `boolean` or `{ background }`
windows: !devServer && !debug,
// Create Yandex browser icon. `boolean` or `{ background }`
yandex: false,
},
},
}),
);
}
return {
context: projectRootPath,
// Development or production build?
mode: devServer || debug ? 'development' : 'production',
// The main entry points for source files.
entry: entries,
// Supposed to run in a browser
target: 'web',
output: {
// Output files are placed to this folder
path: buildDirPath,
// The file name template for the entry chunks
filename: devServer ? `${assetsFilePrefix}[name].js` : `${assetsFilePrefix}[name].[chunkhash].js`,
// The URL to the output directory resolved relative to the HTML page
// This will be the origin, not including the path, because that will be used as a subdirectory for files.
publicPath: `${assetsOrigin}/`,
// The name of the exported library, e.g. the global variable name
library: 'app',
// How the library is exported? E.g. 'var', 'this'
libraryTarget: 'var',
},
module: {
rules: getCommonRules({ tsConfigPath, debug, devServer, assetsFilePrefix, emitFile: true }),
},
resolve: {
// Add '.ts' and '.tsx' as resolvable extensions.
extensions: ['.ts', '.tsx', '.js'],
alias: {
// The entry point will `require` this module for finding the website component
_site: path.resolve(projectRootPath, sourceDir, siteFile),
},
},
resolveLoader: {
// Look from this library's node modules!
modules: [ownModulesDirPath, modulesDirPath],
},
// Behavior for polyfilling node modules
node: {
// The default value `true` seems not to work with RxJS
// TODO: Take a look if this can be enabled
setImmediate: false,
},
// Enable sourcemaps for debugging webpack's output.
devtool: devServer ? 'inline-source-map' : 'source-map',
// Plugins
plugins,
};
}
/**
* Creates the Webpack 2 configuration for the back-end code compilation.
* The options are documented at
* https://webpack.js.org/configuration/
*/
export function getBackendWebpackConfig(config: WebpackConfigOptions): webpack.Configuration {
const { serverFile, databaseFile, siteFile, triggersFile } = config;
const { sourceDir, buildDir, projectRootPath, devServer, debug, assetsRoot } = config;
const { analyze, stageDir } = config;
// Resolve modules, source, build and static paths
const sourceDirPath = path.resolve(projectRootPath, sourceDir);
const buildDirPath = path.resolve(projectRootPath, buildDir);
const modulesDirPath = path.resolve(projectRootPath, 'node_modules');
const ownModulesDirPath = path.resolve(__dirname, 'node_modules');
const stageDirPath = path.resolve(projectRootPath, stageDir);
// Use the tsconfig.json in the project folder (not in this library)
const tsConfigPath = path.resolve(projectRootPath, './tsconfig.json');
// Target backend always to ES2018
const compilerOptions = { target: 'ES2017' } as const;
// Determine the directory for the assets and the site
const assetsRootUrl = url.parse(assetsRoot);
const assetsPath = assetsRootUrl.pathname || '/';
const assetsDir = assetsPath.replace(/^\/+/, '');
const assetsFilePrefix = assetsDir && `${assetsDir}/`;
// Generate the plugins
const plugins: webpack.Plugin[] = [
// Perform type checking for TypeScript
...getCommonPlugins({
frontend: false,
devServer,
assetsFilePrefix,
tsConfigPath,
sourceDirPath,
compilerOptions,
}),
/**
* Prevent `pg` module to import `pg-native` binding library.
*/
new webpack.IgnorePlugin({
resourceRegExp: /^\.\/native$/,
contextRegExp: /node_modules\/pg\/lib$/,
}),
];
if (!devServer) {
// Generate some stats for the bundles
plugins.push(getBundleAnalyzerPlugin(analyze, path.resolve(stageDirPath, `report-backend.html`)));
}
// Entry points to be bundled
const entries: Record<string, string> = {
// Entry point for rendering the views on server-side
server: require.resolve(devServer ? './bootstrap/local-server' : './bootstrap/server'),
};
// Aliases that entry points will `require`
const aliases: Record<string, string> = {
_site: path.resolve(projectRootPath, sourceDir, siteFile),
};
// Modules excluded from the bundle
const externals = [
// No need to bundle AWS SDK for compilation, because it will be available in the Lambda node environment
'aws-sdk',
];
// If an API is defined, compile it as well
if (serverFile) {
// eslint-disable-next-line no-underscore-dangle
aliases._service = path.resolve(projectRootPath, sourceDir, serverFile);
} else {
// API not available. Let the bundle to compile without it, but
// raise error if attempting to `require`
externals.push('_service');
}
// If a database defined, compile it as well
if (databaseFile) {
// eslint-disable-next-line no-underscore-dangle
aliases._db = path.resolve(projectRootPath, sourceDir, databaseFile);
} else {
// Database not available. Let the bundle to compile without it, but
// raise error if attempting to `require`
externals.push('_db');
}
// If a triggers file is defined, compile it as well
if (triggersFile) {
// eslint-disable-next-line no-underscore-dangle
aliases._triggers = path.resolve(projectRootPath, sourceDir, triggersFile);
} else {
// Triggers not available. Let the bundle to compile without it, but
// raise error if attempting to `require`
externals.push('_triggers');
}
return {
context: projectRootPath,
// Development or production build?
mode: devServer || debug ? 'development' : 'production',
optimization: {
// For better tracebacks, do not minify server-side code,
// even in production.
minimize: false,
},
// Build for running in node environment, instead of web browser
target: 'node',
// The main entry points for source files.
entry: entries,
output: {
// Output files are placed to this folder
path: buildDirPath,
// The file name template for the entry chunks
filename: devServer ? '[name].js' : '[name].[hash].js',
// The URL to the output directory resolved relative to the HTML page
publicPath: `${assetsRoot}/`,
// Export so for use in a Lambda function
libraryTarget: 'commonjs2',
},
module: {
rules: getCommonRules({
devServer,
debug,
tsConfigPath,
compilerOptions,
assetsFilePrefix,
emitFile: false,
}),
},
externals,
resolve: {
// Add '.ts' and '.tsx' as resolvable extensions.
extensions: ['.ts', '.tsx', '.js'],
alias: aliases,
},
resolveLoader: {
// Look from this library's node modules!
modules: [ownModulesDirPath, modulesDirPath],
},
// Enable sourcemaps for debugging webpack's output.
devtool: 'source-map',
// Plugins
plugins,
};
}
function getCommonPlugins(options: {
frontend: boolean;
devServer: boolean;
assetsFilePrefix: string;
tsConfigPath: string;
sourceDirPath: string;
compilerOptions?: unknown;
}): webpack.Plugin[] {
const { frontend, devServer, assetsFilePrefix, tsConfigPath, sourceDirPath, compilerOptions } = options;
const cssFilePrefix = `${assetsFilePrefix}css/`;
return [
// https://github.com/faceyspacey/extract-css-chunks-webpack-plugin
new ExtractCssChunks({
filename: devServer ? `${cssFilePrefix}[name].css` : `${cssFilePrefix}[contenthash].css`,
chunkFilename: devServer ? `${cssFilePrefix}[id].css` : `${cssFilePrefix}[contenthash].css`,
ignoreOrder: false,
}),
// Perform type checking for TypeScript
new ForkTsCheckerWebpackPlugin({
typescript: {
configFile: tsConfigPath,
configOverwrite: {
compilerOptions,
},
},
// When running the dev server, the backend compilation will handle ESlinting
eslint:
frontend && devServer
? undefined
: {
files: path.join(sourceDirPath, '**', '*.{ts,tsx,js,jsx}'),
},
}),
// Prevent all the MomentJS locales to be imported by default.
new webpack.ContextReplacementPlugin(
/\bmoment[/\\]locale\b/,
// Regular expression to match the files that should be imported
/\ben.js/,
),
];
}
function | getCommonRules | identifier_name |
|
webpack.ts | Server) {
plugins.push(
// Generate some stats for the bundles
getBundleAnalyzerPlugin(analyze, path.resolve(stageDirPath, `report-frontend.html`)),
);
}
// Define the entry for the app
const entries: Record<string, string[]> = {
app: [require.resolve(devServer ? './bootstrap/local-site' : './bootstrap/site')],
};
/**
* If icon source file is provided, generate icons for the app.
* For configuration, see https://github.com/jantimon/favicons-webpack-plugin
*/
if (iconFile) {
plugins.push(
new FaviconsWebpackPlugin({
// Your source logo
logo: path.resolve(sourceDirPath, iconFile),
// The prefix for all image files (might be a folder or a name)
prefix: devServer ? `${assetsFilePrefix}icons/` : `${assetsFilePrefix}icons/[hash]/`,
// Inject the html into the html-webpack-plugin
inject: true,
// Locate the cache folder inside the .broiler directory
cache: path.resolve(stageDirPath, '.fwp-cache'),
// The configuration passed to `favicons`:
// https://github.com/itgalaxy/favicons#usage
// NOTE: The most of the metadata is read automatically from package.json
favicons: {
// Your application's name. `string`
appName: title,
// TODO: Your application's description. `string`
appDescription: null,
// TODO: Your (or your developer's) name. `string`
developerName: null,
// TODO: Your (or your developer's) URL. `string`
developerURL: null,
// TODO: Your application's version string. `string`
version: null,
// Start URL when launching the application from a device. `string`
start_url: serverRoot,
// Print logs to console? `boolean`
logging: false,
/**
* Which icons should be generated.
* Platform Options:
* - offset - offset in percentage
* - shadow - drop shadow for Android icons, available online only
* - background:
* * false - use default
* * true - force use default, e.g. set background for Android icons
* * color - set background for the specified icons
*/
icons: {
// Create Android homescreen icon. `boolean` or `{ offset, background, shadow }`
android: !devServer && !debug,
// Create Apple touch icons. `boolean` or `{ offset, background }`
appleIcon: !devServer && !debug,
// Create Apple startup images. `boolean` or `{ offset, background }`
appleStartup: !devServer && !debug,
// Create Opera Coast icon with offset 25%. `boolean` or `{ offset, background }`
coast: false,
// Create regular favicons. `boolean`
favicons: true,
// Create Firefox OS icons. `boolean` or `{ offset, background }`
firefox: false,
// Create Windows 8 tile icons. `boolean` or `{ background }`
windows: !devServer && !debug,
// Create Yandex browser icon. `boolean` or `{ background }`
yandex: false,
},
},
}),
);
}
return {
context: projectRootPath,
// Development or production build?
mode: devServer || debug ? 'development' : 'production',
// The main entry points for source files.
entry: entries,
// Supposed to run in a browser
target: 'web',
output: {
// Output files are placed to this folder
path: buildDirPath,
// The file name template for the entry chunks
filename: devServer ? `${assetsFilePrefix}[name].js` : `${assetsFilePrefix}[name].[chunkhash].js`,
// The URL to the output directory resolved relative to the HTML page
// This will be the origin, not including the path, because that will be used as a subdirectory for files.
publicPath: `${assetsOrigin}/`,
// The name of the exported library, e.g. the global variable name
library: 'app',
// How the library is exported? E.g. 'var', 'this'
libraryTarget: 'var',
},
module: {
rules: getCommonRules({ tsConfigPath, debug, devServer, assetsFilePrefix, emitFile: true }),
},
resolve: {
// Add '.ts' and '.tsx' as resolvable extensions.
extensions: ['.ts', '.tsx', '.js'],
alias: {
// The entry point will `require` this module for finding the website component
_site: path.resolve(projectRootPath, sourceDir, siteFile),
},
},
resolveLoader: {
// Look from this library's node modules!
modules: [ownModulesDirPath, modulesDirPath],
},
// Behavior for polyfilling node modules
node: {
// The default value `true` seems not to work with RxJS
// TODO: Take a look if this can be enabled
setImmediate: false,
},
// Enable sourcemaps for debugging webpack's output.
devtool: devServer ? 'inline-source-map' : 'source-map',
// Plugins
plugins,
};
}
/**
* Creates the Webpack 2 configuration for the back-end code compilation.
* The options are documented at
* https://webpack.js.org/configuration/
*/
export function getBackendWebpackConfig(config: WebpackConfigOptions): webpack.Configuration {
const { serverFile, databaseFile, siteFile, triggersFile } = config;
const { sourceDir, buildDir, projectRootPath, devServer, debug, assetsRoot } = config;
const { analyze, stageDir } = config;
// Resolve modules, source, build and static paths
const sourceDirPath = path.resolve(projectRootPath, sourceDir);
const buildDirPath = path.resolve(projectRootPath, buildDir);
const modulesDirPath = path.resolve(projectRootPath, 'node_modules');
const ownModulesDirPath = path.resolve(__dirname, 'node_modules');
const stageDirPath = path.resolve(projectRootPath, stageDir);
// Use the tsconfig.json in the project folder (not in this library)
const tsConfigPath = path.resolve(projectRootPath, './tsconfig.json');
// Target backend always to ES2018
const compilerOptions = { target: 'ES2017' } as const;
// Determine the directory for the assets and the site
const assetsRootUrl = url.parse(assetsRoot);
const assetsPath = assetsRootUrl.pathname || '/';
const assetsDir = assetsPath.replace(/^\/+/, '');
const assetsFilePrefix = assetsDir && `${assetsDir}/`;
// Generate the plugins
const plugins: webpack.Plugin[] = [
// Perform type checking for TypeScript
...getCommonPlugins({
frontend: false,
devServer,
assetsFilePrefix,
tsConfigPath,
sourceDirPath,
compilerOptions,
}),
/**
* Prevent `pg` module to import `pg-native` binding library.
*/
new webpack.IgnorePlugin({
resourceRegExp: /^\.\/native$/,
contextRegExp: /node_modules\/pg\/lib$/,
}),
];
if (!devServer) {
// Generate some stats for the bundles
plugins.push(getBundleAnalyzerPlugin(analyze, path.resolve(stageDirPath, `report-backend.html`)));
}
// Entry points to be bundled
const entries: Record<string, string> = {
// Entry point for rendering the views on server-side
server: require.resolve(devServer ? './bootstrap/local-server' : './bootstrap/server'),
};
// Aliases that entry points will `require`
const aliases: Record<string, string> = {
_site: path.resolve(projectRootPath, sourceDir, siteFile),
};
// Modules excluded from the bundle
const externals = [
// No need to bundle AWS SDK for compilation, because it will be available in the Lambda node environment
'aws-sdk',
];
// If an API is defined, compile it as well
if (serverFile) | else {
// API not available. Let the bundle to compile without it, but
// raise error if attempting to `require`
externals.push('_service');
}
// If a database defined, compile it as well
if (databaseFile) {
// eslint-disable-next-line no-underscore-dangle
aliases._db = path.resolve(projectRootPath, sourceDir, databaseFile);
} else {
// Database not available. Let the bundle to compile without it, but
// raise error if attempting to `require`
externals.push('_db');
}
// If a triggers file is defined, compile it as well
if (triggersFile) {
// eslint-disable-next-line no-underscore-dangle
aliases._triggers = path.resolve(projectRootPath, sourceDir, triggersFile);
} else {
// Triggers not available. Let the bundle to compile without it, but
// raise error if attempting to `require`
externals.push('_triggers');
}
return {
context: projectRootPath,
// Development | {
// eslint-disable-next-line no-underscore-dangle
aliases._service = path.resolve(projectRootPath, sourceDir, serverFile);
} | conditional_block |
calibration.py | 1])] # 모든 x에서 가장 작은 blob
y_max_blob = blob_info[np.argmax(blob_info[::, 1])]
# int로 변경
x_min_blob = x_min_blob.astype(np.int)
x_max_blob = x_max_blob.astype(np.int)
y_min_blob = y_min_blob.astype(np.int)
y_max_blob = y_max_blob.astype(np.int)
print('x_min_blob : ', x_min_blob[0:2])
print('x_max_blob : ', x_max_blob[0:2])
print('y_min_blob : ', y_min_blob[0:2])
print('y_max_blob : ', y_max_blob[0:2])
# side blob point 표시
# cv2.circle(img_temp, (x_min_blob[0], x_min_blob[1]), 1, (155, 155, 155), 10)
# cv2.circle(img_temp, (x_max_blob[0], x_max_blob[1]), 1, (155, 155, 155), 10)
# cv2.circle(img_temp, (y_min_blob[0], y_min_blob[1]), 1, (155, 155, 155), 10)
# cv2.circle(img_temp, (y_max_blob[0], y_max_blob[1]), 1, (155, 155, 155), 10)
# 해당 side 포인트이 꼭지점을 이루는 사각형 그리기
pts = np.array([[x_max_blob[0],x_max_blob[1]],
[y_min_blob[0],y_min_blob[1]],
[x_min_blob[0],x_min_blob[1]],
[y_max_blob[0],y_max_blob[1]]], np.int32)
pts = pts.reshape((-1,1,2))
cv2.polylines(img_copy, [pts], isClosed = True, color = (155, 155, 155), thickness = 10) # 사각형 그리기
cv2.fillPoly(img_temp, [pts], (155, 155, 155), cv2.LINE_AA) # 채워진 사각형 그리기
# cv2.fillPoly(img_copy, [pts], (155, 155, 155), cv2.LINE_AA) # 채워진 사각형 그리기
# img_temp의 무게중심 구하기
# contours, hierarchy = cv2.findContours(img_temp, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
# for i in contours:
# M = cv2.moments(i)
# cX = int(M['m10'] / M['m00'])
# cY = int(M['m01'] / M['m00'])
# cv2.drawContours(img_temp, [i], 0, (100, 100, 100), 10)
## 두 선분의 교점으로 구하기
cX, cY = get_crosspt(x_min_blob[0:2], x_max_blob[0:2], y_min_blob[0:2], y_max_blob[0:2])
cX = int(cX)
cY = int(cY) |
print('Centroid : ', cX, cY)
# ref_square에 구하기 'ㄱ'부분 길이 구하기
ref_square_w = point2_distance(y_min_blob[0:2], x_max_blob[0:2]) # 'ㄱ'의 'ㅡ'부분
ref_square_h = point2_distance(y_max_blob[0:2], x_max_blob[0:2]) # 'ㄱ'의 '|'부분
print('ref_square_w : ', ref_square_w)
print('ref_square_h : ', ref_square_h)
plt.figure(figsize=(20,10))
plt.subplot(121), plt.imshow(img_copy, cmap='gray'), plt.title('Centroid Point')
plt.subplot(122), plt.imshow(img_temp, cmap='gray'), plt.title('Ref square from Side_Blob')
plt.show();
return ((int(cX), int(cY)), (int(ref_square_w), int(ref_square_h)), ((x_max_blob[0], x_max_blob[1]), (y_min_blob[0], y_min_blob[1]), (x_min_blob[0], x_min_blob[1]), (y_max_blob[0], y_max_blob[1]))) # 25개 blob의 무게중심(cX, cY) # ref 사각형의 w,h # 사각형의 네 꼭지점 정보
# return[0] = (cX, cY) # 25 blob 사각형의 무게중심
# return[1] = (ref_square_w, ref_square_h) # 해당 사각형의 w,h
# return [2] = (xmax, ymin, xmin, ymax) # ref 사각형의 w,h # 사각형의 네 꼭지점 정보
# for single camera
# 무게중심(CX,CY) 찾기, 및 BLOB 사각형의 'ㄱ'부분 길이 구하기 (ref_square_w, ref_square_h),
def find_centroid_for_singlecam(img, blob_info):
img_h, img_w = np.shape(img)
img_copy = np.copy(img) # 무게중심 표시를 위한 img
img_temp = np.zeros((img_h, img_w), dtype=np.uint8) # 25개의 blob중 가장자리 blob 으로 무게중심 찾기 위한 사각형
# blob_info = [x,y,diameter]
# find 5 ymin 5 ymax blob
sorted_y_blob = blob_info[blob_info[::, 1].argsort()] # y기준 sort
y_min_5_blob = sorted_y_blob[:5] # 모든 y에서 가장 작은 5개 blob 후보군
y_max_5_blob = sorted_y_blob[-5:] # 모든 y에서 가장 큰 5개 blob 후보군
x_max_blob_of_y_min = y_min_5_blob[np.argmax(y_min_5_blob[::, 0])] # (1)
x_min_blob_of_y_min = y_min_5_blob[np.argmin(y_min_5_blob[::, 0])] # y min 5 blob 중에서 가장 작은 x blob # (2)
x_min_blob_of_y_max = y_max_5_blob[np.argmin(y_max_5_blob[::, 0])] # y max 5 blob 중에서 가장 작은 x blob # (3)
x_max_blob_of_y_max = y_max_5_blob[np.argmax(y_max_5_blob[::, 0])] # (4)
# int로 변경
x_max_blob_of_y_min = x_max_blob_of_y_min.astype(np.int)
x_min_blob_of_y_min = x_min_blob_of_y_min.astype(np.int)
x_min_blob_of_y_max = x_min_blob_of_y_max.astype(np.int)
x_max_blob_of_y_max = x_max_blob_of_y_max.astype(np.int)
print('x_max_blob_of_y_min : ', x_max_blob_of_y_min[0:2])
print('x_min_blob_of_y_min : ', x_min_blob_of_y_min[0:2])
print('x_min_blob_of_y_max : ', x_min_blob_of_y_max[0:2])
print('x_max_blob_of_y_max : ', x_max_blob_of_y_max[0:2])
# side blob point 표시
# cv2.circle(img_temp, (x_min_blob[0], x_min_blob[1]), 1, (155, 155, 155), 10)
# cv2.circle(img_temp, (x_max_blob[0], x_max_blob[1]), 1, (155, 155, 155), 10)
# cv2.circle(img_temp, (y_min_blob[0], y_min_blob[1]), 1, (155, 155, 155), 10)
# cv2.circle(img_temp, (y_max_blob[0], y_max_blob[1]), 1, (155, 155, 155), 10)
# 해당 side 포인트이 꼭지점을 이루는 사각형 그리기
pts = np.array([[x_max_blob_of_y_min[0], x_max_blob_of_y_min[1]],
[x_min_blob_of_y_min[0], x_min_blob_of_y_min[1]],
|
cv2.circle(img_temp, (cX, cY), 15, (100, 100, 100), -1)
cv2.circle(img_copy, (cX, cY), 15, (100, 100, 100), -1)
| random_line_split |
calibration.py | plt.subplot(121), plt.imshow(img, cmap='gray'), plt.title('origin_binary_img')
plt.subplot(122), plt.imshow(img_copy, cmap='gray'), plt.title('Blob info')
plt.show();
return blob_info
# for quad camera
# 무게중심(CX,CY) 찾기, 및 BLOB 사각형의 'ㄱ'부분 길이 구하기 (ref_square_w, ref_square_h),
def find_centroid_for_quadcam(img, blob_info) :
img_h, img_w = np.shape(img)
img_copy = np.copy(img) # 무게중심 표시를 위한 img
img_temp = np.zeros((img_h, img_w), dtype=np.uint8) # 25개의 blob중 가장자리 blob 으로 무게중심 찾기 위한 사각형
x_min_blob = blob_info[np.argmin(blob_info[::, 0])] # 모든 x에서 가장 작은 blob
x_max_blob = blob_info[np.argmax(blob_info[::, 0])]
y_min_blob = blob_info[np.argmin(blob_info[::, 1])] # 모든 x에서 가장 작은 blob
y_max_blob = blob_info[np.argmax(blob_info[::, 1])]
# int로 변경
x_min_blob = x_min_blob.astype(np.int)
x_max_blob = x_max_blob.astype(np.int)
y_min_blob = y_min_blob.astype(np.int)
y_max_blob = y_max_blob.astype(np.int)
print('x_min_blob : ', x_min_blob[0:2])
print('x_max_blob : ', x_max_blob[0:2])
print('y_min_blob : ', y_min_blob[0:2])
print('y_max_blob : ', y_max_blob[0:2])
# side blob point 표시
# cv2.circle(img_temp, (x_min_blob[0], x_min_blob[1]), 1, (155, 155, 155), 10)
# cv2.circle(img_temp, (x_max_blob[0], x_max_blob[1]), 1, (155, 155, 155), 10)
# cv2.circle(img_temp, (y_min_blob[0], y_min_blob[1]), 1, (155, 155, 155), 10)
# cv2.circle(img_temp, (y_max_blob[0], y_max_blob[1]), 1, (155, 155, 155), 10)
# 해당 side 포인트이 꼭지점을 이루는 사각형 그리기
pts = np.array([[x_max_blob[0],x_max_blob[1]],
[y_min_blob[0],y_min_blob[1]],
[x_min_blob[0],x_min_blob[1]],
[y_max_blob[0],y_max_blob[1]]], np.int32)
pts = pts.reshape((-1,1,2))
cv2.polylines(img_copy, [pts], isClosed = True, color = (155, 155, 155), thickness = 10) # 사각형 그리기
cv2.fillPoly(img_temp, [pts], (155, 155, 155), cv2.LINE_AA) # 채워진 사각형 그리기
# cv2.fillPoly(img_copy, [pts], (155, 155, 155), cv2.LINE_AA) # 채워진 사각형 그리기
# img_temp의 무게중심 구하기
# contours, hierarchy = cv2.findContours(img_temp, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
# for i in contours:
# M = cv2.moments(i)
# cX = int(M['m10'] / M['m00'])
# cY = int(M['m01'] / M['m00'])
# cv2.drawContours(img_temp, [i], 0, (100, 100, 100), 10)
## 두 선분의 교점으로 구하기
cX, cY = get_crosspt(x_min_blob[0:2], x_max_blob[0:2], y_min_blob[0:2], y_max_blob[0:2])
cX = int(cX)
cY = int(cY)
cv2.circle(img_temp, (cX, cY), 15, (100, 100, 100), -1)
cv2.circle(img_copy, (cX, cY), 15, (100, 100, 100), -1)
print('Centroid : ', cX, cY)
# ref_square에 구하기 'ㄱ'부분 길이 구하기
ref_square_w = point2_distance(y_min_blob[0:2], x_max_blob[0:2]) # 'ㄱ'의 'ㅡ'부분
ref_square_h = point2_distance(y_max_blob[0:2], x_max_blob[0:2]) # 'ㄱ'의 '|'부분
print('ref_square_w : ', ref_square_w)
print('ref_square_h : ', ref_square_h)
plt.figure(figsize=(20,10))
plt.subplot(121), plt.imshow(img_copy, cmap='gray'), plt.title('Centroid Point')
plt.subplot(122), plt.imshow(img_temp, cmap='gray'), plt.title('Ref square from Side_Blob')
plt.show();
return ((int(cX), int(cY)), (int(ref_square_w), int(ref_square_h)), ((x_max_blob[0], x_max_blob[1]), (y_min_blob[0], y_min_blob[1]), (x_min_blob[0], x_min_blob[1]), (y_max_blob[0], y_max_blob[1]))) # 25개 blob의 무게중심(cX, cY) # ref 사각형의 w,h # 사각형의 네 꼭지점 정보
# return[0] = (cX, cY) # 25 blob 사각형의 무게중심
# return[1] = (ref_square_w, ref_square_h) # 해당 사각형의 w,h
# return [2] = (xmax, ymin, xmin, ymax) # ref 사각형의 w,h # 사각형의 네 꼭지점 정보
# for single camera
# 무게중심(CX,CY) 찾기, 및 BLOB 사각형의 'ㄱ'부분 길이 구하기 (ref_square_w, ref_square_h),
def find_centroid_for_singlecam(img, blob_info):
img_h, img_w = np.shape(img)
img_copy = np.copy(img) # 무게중심 표시를 위한 img
img_temp = np.zeros((img_h, img_w), dtype=np.uint8) # 25개의 blob중 가장자리 blob 으로 무게중심 찾기 위한 사각형
# blob_info = [x,y,diameter]
# find 5 ymin 5 ymax blob
sorted_y_blob = blob_info[blob_info[::, 1].argsort()] # y기준 sort
y_min_5_blob = sorted_y_blob[:5] # 모든 y에서 가장 작은 5개 blob 후보군
y_max_5_blob = sorted_y_blob[-5:] # 모든 y에서 가장 큰 5개 blob 후보군
x_max_blob_of_y_min = y_min_5_blob[np.argmax(y_min_5_blob[::, 0])] # (1)
x_min_blob_of_y_min = y_min_5_blob[np.argmin(y_min_5_blob[::, 0])] # y min 5 blob 중에서 가장 작은 x blob # (2)
x_min_blob_of_y_max = y_max_5_blob[np.argmin(y_max_5_blob[::, 0])] # y max 5 blob 중에서 가장 작은 x blob # (3)
x_max_blob_of_y_max = y_max_5_blob[np.argmax(y_max_5_blob[::, 0])] # (4)
# int로 변경
x_max_blob_of_y_min = x_max_blob_of_y_min.astype(np.int)
| d) # 추출결과의 중심, 추출결과의 diameter (blob의 직경x)
cv2.circle(img_copy, (x,y), 1, (155, 155, 155), 10)
cv2.circle(img_copy, (x,y), int(k.size/2), (155, 155, 155), 10)
blob_info.append([x,y,k.size]) # x,y, diameter 정보
blob_info = np.array(blob_info) # argmin, argmx 를 위해 numpy 사용
plt.figure(figsize=(15,15))
| conditional_block |
|
calibration.py | params.filterByConvexity = False
params.filterByInertia = True
params.minInertiaRatio = 0.7;
# 타원~원 = 0~1
# 줄 = 0
detector = cv2.SimpleBlobDetector_create(params)
keypoints = detector.detect(img_copy)
print('Detecting한 Blob개수 : ', len(keypoints))
# Blob labeling 수행
im_with_keypoints = cv2.drawKeypoints(img_copy, keypoints, np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
for k in keypoints :
x, y = k.pt
x,y = int(x), int(y)
print(k.pt, k.size,k.class_id) # 추출결과의 중심, 추출결과의 diameter (blob의 직경x)
cv2.circle(img_copy, (x,y), 1, (155, 155, 155), 10)
cv2.circle(img_copy, (x,y), int(k.size/2), (155, 155, 155), 10)
blob_info.append([x,y,k.size]) # x,y, diameter 정보
blob_info = np.array(blob_info) # argmin, argmx 를 위해 numpy 사용
plt.figure(figsize=(15,15))
plt.subplot(121), plt.imshow(img, cmap='gray'), plt.title('origin_binary_img')
plt.subplot(122), plt.imshow(img_copy, cmap='gray'), plt.title('Blob info')
plt.show();
return blob_info
# for quad camera
# 무게중심(CX,CY) 찾기, 및 BLOB 사각형의 'ㄱ'부분 길이 구하기 (ref_square_w, ref_square_h),
def find_centroid_for_quadc
am(img, blob_info) :
img_h, img_w = np.shape(img)
img_copy = np.copy(img) # 무게중심 표시를 위한 img
img_temp = np.zeros((img_h, img_w), dtype=np.uint8) # 25개의 blob중 가장자리 blob 으로 무게중심 찾기 위한 사각형
x_min_blob = blob_info[np.argmin(blob_info[::, 0])] # 모든 x에서 가장 작은 blob
x_max_blob = blob_info[np.argmax(blob_info[::, 0])]
y_min_blob = blob_info[np.argmin(blob_info[::, 1])] # 모든 x에서 가장 작은 blob
y_max_blob = blob_info[np.argmax(blob_info[::, 1])]
# int로 변경
x_min_blob = x_min_blob.astype(np.int)
x_max_blob = x_max_blob.astype(np.int)
y_min_blob = y_min_blob.astype(np.int)
y_max_blob = y_max_blob.astype(np.int)
print('x_min_blob : ', x_min_blob[0:2])
print('x_max_blob : ', x_max_blob[0:2])
print('y_min_blob : ', y_min_blob[0:2])
print('y_max_blob : ', y_max_blob[0:2])
# side blob point 표시
# cv2.circle(img_temp, (x_min_blob[0], x_min_blob[1]), 1, (155, 155, 155), 10)
# cv2.circle(img_temp, (x_max_blob[0], x_max_blob[1]), 1, (155, 155, 155), 10)
# cv2.circle(img_temp, (y_min_blob[0], y_min_blob[1]), 1, (155, 155, 155), 10)
# cv2.circle(img_temp, (y_max_blob[0], y_max_blob[1]), 1, (155, 155, 155), 10)
# 해당 side 포인트이 꼭지점을 이루는 사각형 그리기
pts = np.array([[x_max_blob[0],x_max_blob[1]],
[y_min_blob[0],y_min_blob[1]],
[x_min_blob[0],x_min_blob[1]],
[y_max_blob[0],y_max_blob[1]]], np.int32)
pts = pts.reshape((-1,1,2))
cv2.polylines(img_copy, [pts], isClosed = True, color = (155, 155, 155), thickness = 10) # 사각형 그리기
cv2.fillPoly(img_temp, [pts], (155, 155, 155), cv2.LINE_AA) # 채워진 사각형 그리기
# cv2.fillPoly(img_copy, [pts], (155, 155, 155), cv2.LINE_AA) # 채워진 사각형 그리기
# img_temp의 무게중심 구하기
# contours, hierarchy = cv2.findContours(img_temp, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
# for i in contours:
# M = cv2.moments(i)
# cX = int(M['m10'] / M['m00'])
# cY = int(M['m01'] / M['m00'])
# cv2.drawContours(img_temp, [i], 0, (100, 100, 100), 10)
## 두 선분의 교점으로 구하기
cX, cY = get_crosspt(x_min_blob[0:2], x_max_blob[0:2], y_min_blob[0:2], y_max_blob[0:2])
cX = int(cX)
cY = int(cY)
cv2.circle(img_temp, (cX, cY), 15, (100, 100, 100), -1)
cv2.circle(img_copy, (cX, cY), 15, (100, 100, 100), -1)
print('Centroid : ', cX, cY)
# ref_square에 구하기 'ㄱ'부분 길이 구하기
ref_square_w = point2_distance(y_min_blob[0:2], x_max_blob[0:2]) # 'ㄱ'의 'ㅡ'부분
ref_square_h = point2_distance(y_max_blob[0:2], x_max_blob[0:2]) # 'ㄱ'의 '|'부분
print('ref_square_w : ', ref_square_w)
print('ref_square_h : ', ref_square_h)
plt.figure(figsize=(20,10))
plt.subplot(121), plt.imshow(img_copy, cmap='gray'), plt.title('Centroid Point')
plt.subplot(122), plt.imshow(img_temp, cmap='gray'), plt.title('Ref square from Side_Blob')
plt.show();
return ((int(cX), int(cY)), (int(ref_square_w), int(ref_square_h)), ((x_max_blob[0], x_max_blob[1]), (y_min_blob[0], y_min_blob[1]), (x_min_blob[0], x_min_blob[1]), (y_max_blob[0], y_max_blob[1]))) # 25개 blob의 무게중심(cX, cY) # ref 사각형의 w,h # 사각형의 네 꼭지점 정보
# return[0] = (cX, cY) # 25 blob 사각형의 무게중심
# return[1] = (ref_square_w, ref_square_h) # 해당 사각형의 w,h
# return [2] = (xmax, ymin, xmin, ymax) # ref 사각형의 w,h # 사각형의 네 꼭지점 정보
# for single camera
# 무게중심(CX,CY) 찾기, 및 BLOB 사각형의 'ㄱ'부분 길이 구하기 (ref_square_w, ref_square_h),
def find_centroid_for_singlecam(img, blob_info):
img_h, img_w = np.shape(img)
img_copy = np.copy(img) # 무게중심 표시를 위한 img
img_temp = np.zeros((img_h, | ob_info = [] # 추출한 blob 정보
# blob detection
params = cv2.SimpleBlobDetector_Params()
params.blobColor = 255 # 밝은 얼룩 추출
# params.minThreshold = 240
# params.maxThreshold = 255
params.filterByArea = True
params.minArea = 10*10;
params.maxArea = 200*200
params.filterByCircularity = True
params.minCircularity = 0.8;
# 원 = 1.0
# 사각형 = 0.785
| identifier_body |
|
calibration.py | # find 5 ymin 5 ymax blob
sorted_y_blob = blob_info[blob_info[::, 1].argsort()] # y기준 sort
y_min_5_blob = sorted_y_blob[:5] # 모든 y에서 가장 작은 5개 blob 후보군
y_max_5_blob = sorted_y_blob[-5:] # 모든 y에서 가장 큰 5개 blob 후보군
x_max_blob_of_y_min = y_min_5_blob[np.argmax(y_min_5_blob[::, 0])] # (1)
x_min_blob_of_y_min = y_min_5_blob[np.argmin(y_min_5_blob[::, 0])] # y min 5 blob 중에서 가장 작은 x blob # (2)
x_min_blob_of_y_max = y_max_5_blob[np.argmin(y_max_5_blob[::, 0])] # y max 5 blob 중에서 가장 작은 x blob # (3)
x_max_blob_of_y_max = y_max_5_blob[np.argmax(y_max_5_blob[::, 0])] # (4)
# int로 변경
x_max_blob_of_y_min = x_max_blob_of_y_min.astype(np.int)
x_min_blob_of_y_min = x_min_blob_of_y_min.astype(np.int)
x_min_blob_of_y_max = x_min_blob_of_y_max.astype(np.int)
x_max_blob_of_y_max = x_max_blob_of_y_max.astype(np.int)
print('x_max_blob_of_y_min : ', x_max_blob_of_y_min[0:2])
print('x_min_blob_of_y_min : ', x_min_blob_of_y_min[0:2])
print('x_min_blob_of_y_max : ', x_min_blob_of_y_max[0:2])
print('x_max_blob_of_y_max : ', x_max_blob_of_y_max[0:2])
# side blob point 표시
# cv2.circle(img_temp, (x_min_blob[0], x_min_blob[1]), 1, (155, 155, 155), 10)
# cv2.circle(img_temp, (x_max_blob[0], x_max_blob[1]), 1, (155, 155, 155), 10)
# cv2.circle(img_temp, (y_min_blob[0], y_min_blob[1]), 1, (155, 155, 155), 10)
# cv2.circle(img_temp, (y_max_blob[0], y_max_blob[1]), 1, (155, 155, 155), 10)
# 해당 side 포인트이 꼭지점을 이루는 사각형 그리기
pts = np.array([[x_max_blob_of_y_min[0], x_max_blob_of_y_min[1]],
[x_min_blob_of_y_min[0], x_min_blob_of_y_min[1]],
[x_min_blob_of_y_max[0], x_min_blob_of_y_max[1]],
[x_max_blob_of_y_max[0], x_max_blob_of_y_max[1]]], np.int32)
pts = pts.reshape((-1, 1, 2))
cv2.polylines(img_copy, [pts], isClosed=True, color=(155, 155, 155), thickness=10) # 사각형 그리기
cv2.fillPoly(img_temp, [pts], (155, 155, 155), cv2.LINE_AA) # 채워진 사각형 그리기
# cv2.fillPoly(img_copy, [pts], (155, 155, 155), cv2.LINE_AA) # 채워진 사각형 그리기
# img_temp의 무게중심 구하기
# contours, hierarchy = cv2.findContours(img_temp, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
# for i in contours:
# M = cv2.moments(i)
# cX = int(M['m10'] / M['m00'])
# cY = int(M['m01'] / M['m00'])
#
# cv2.circle(img_temp, (cX, cY), 15, (100, 100, 100), -1)
# cv2.circle(img_copy, (cX, cY), 15, (100, 100, 100), -1)
# cv2.drawContours(img_temp, [i], 0, (100, 100, 100), 10)
## 두 선분의 교점으로 구하기
cX, cY = get_crosspt(x_min_blob_of_y_min[0:2], x_max_blob_of_y_max[0:2], x_max_blob_of_y_min[0:2], x_min_blob_of_y_max[0:2])
cX = int(cX)
cY = int(cY)
cv2.circle(img_temp, (cX, cY), 15, (100, 100, 100), -1)
cv2.circle(img_copy, (cX, cY), 15, (100, 100, 100), -1)
print('Centroid : ', cX, cY)
# ref_square에 구하기 'ㄱ'부분 길이 구하기
ref_square_w = point2_distance(x_max_blob_of_y_min[0:2], x_min_blob_of_y_min[0:2]) # 'ㄱ'의 'ㅡ'부분 # 1 - 2
ref_square_h = point2_distance(x_max_blob_of_y_min[0:2], x_max_blob_of_y_max[0:2]) # 'ㄱ'의 '|'부분 # 1 - 4
print('ref_square_w : ', ref_square_w)
print('ref_square_h : ', ref_square_h)
plt.figure(figsize=(20, 10))
plt.subplot(121), plt.imshow(img_copy, cmap='gray'), plt.title('Centroid Point')
plt.subplot(122), plt.imshow(img_temp, cmap='gray'), plt.title('Ref square from Side_Blob')
plt.show();
return ((int(cX), int(cY)), (int(ref_square_w), int(ref_square_h)), (
(x_max_blob_of_y_min[0], x_max_blob_of_y_min[1]), (x_min_blob_of_y_min[0], x_min_blob_of_y_min[1]),
(x_min_blob_of_y_max[0], x_min_blob_of_y_max[1]),
(x_max_blob_of_y_max[0], x_max_blob_of_y_max[1]))) # 25개 blob의 무게중심(cX, cY) # ref 사각형의 w,h # 사각형의 네 꼭지점 정보
# return[0] = (cX, cY) # 25 blob 사각형의 무게중심
# return[1] = (ref_square_w, ref_square_h) # 해당 사각형의 w,h
# return [2] = (xmax, ymin, xmin, ymax) # ref 사각형의 w,h # 사각형의 네 꼭지점 정보
# 지정한 중점에서 theta만큼 이미지 돌리기
def img_affine(img, centroid, theta) : #### aFFINE 시 0~255 -> 0~1로 변경
img_copy = np.copy(img)
# 회전하기 전에 center 표시 하여 얼마나 돌아갔는지 확인
img_copy = cv2.circle(img_copy, centroid, 1, (220, 220, 0), 30)
img_copy = cv2.putText(img_copy, 'theta = ' + str(theta), centroid, cv2.FONT_HERSHEY_SIMPLEX, 4, (0, 0, 255), cv2.LINE_AA)
# 회전 opcn cv
'''
img_h, img_w = img.shape[0:2]
# matrix = cv2.getRotationMatrix2D((img_w/2, img_h/2), theta, 1)
matrix = cv2.getRotationMatrix2D(centroid, theta, 1)
dst = cv2.warpAffine(img, matrix, (img_w, img_h)) # 0~1로 변경됨
'''
# 회전 pil
img_h, img_w = img.shape[0:2]
# pil 객체로 변경
dst = Image.fromarray(img.astype('uint8'), 'L')
dst = dst.rotate(theta, center=centroid, expand=False, resample=Image.NEAREST) # theta만큼 회전
# 다시 numpy로 변경
dst = np.array(dst)
plt.figure(figsize=(10,10))
plt.subplot(121), plt.imshow(img_copy, cmap='gray'), plt.title('Before_affine')
pl | t.subplot( | identifier_name |
|
ml_cms_heights.py | idx, len(lat_lons)
# vals_pi.append(rgeo.get_value_at_point('C:\\Users\\ritvik\\Documents\\PhD\\Projects\\CMS\\Input\\Soils\\PI.tif',
# lon, lat, replace_ras=False))
# vals_di.append(rgeo.get_value_at_point('C:\\Users\\ritvik\\Documents\\PhD\\Projects\\CMS\\Input\\Soils\\DI.tif',
# lon, lat, replace_ras=False))
#
# df['DI'] = vals_di
# df['PI'] = vals_pi
df = df[cols]
data = df.as_matrix(columns=cols[1:])
target = df.as_matrix(columns=[self.var_target]).ravel()
# Get training and testing splits
splits = train_test_split(data, target, test_size=0.2)
return cols, splits
def train_ml_model(self):
"""
:return:
"""
logger.info('#########################################################################')
logger.info('train_ml_model')
logger.info('#########################################################################')
######################################################
# Load dataset
######################################################
cols, splits = self.get_data()
data_train, data_test, target_train, target_test = splits
# clf = ExtraTreesRegressor(500, n_jobs=constants.ncpu)
# #clf = SVR(kernel='rbf', C=1e3, gamma=0.1)
# #clf = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3)
# data = df_train.as_matrix(columns=cols[1:]) # convert dataframe column to matrix
# #data = preprocessing.scale(data)
# target = df_train.as_matrix(columns=[self.var_target]).ravel() # convert dataframe column to matrix
# clf.fit(data, target)
#
# predict_val = clf.predict(after.as_matrix(columns=cols[1:]))
# results = compute_stats.ols(predict_val.tolist(), after_target.tolist())
# print results.rsquared
# import matplotlib.pyplot as plt
# plt.scatter(after_target, predict_val)
# plt.show()
# pdb.set_trace()
if not os.path.isfile(self.path_pickle_model):
# For details in scikit workflow: See http://stackoverflow.com/questions/
# 35256876/ensuring-right-order-of-operations-in-random-forest-classification-in-scikit-lea
# TODO Separate out a dataset so that even the grid search cv can be tested
############################
# Select features from model
############################
logger.info('Selecting important features from model')
if self.classify:
rf_feature_imp = ExtraTreesRegressor(150, n_jobs=constants.ncpu)
else:
rf_feature_imp = ExtraTreesRegressor(150, n_jobs=constants.ncpu)
feat_selection = SelectFromModel(rf_feature_imp)
pipeline = Pipeline([
('fs', feat_selection),
('clf', self.model),
])
#################################
# Grid search for best parameters
#################################
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
logger.info('Tuning hyperparameters')
param_grid = {
'fs__threshold': ['mean', 'median'],
'fs__estimator__max_features': ['auto', 'log2'],
'clf__max_features': ['auto', 'log2'],
'clf__n_estimators': [1000, 2000]
#'clf__gamma': np.logspace(-9, 3, 13),
#'clf__C': np.logspace(-2, 10, 13)
}
gs = GridSearchCV(pipeline, param_grid=param_grid, verbose=2, n_jobs=constants.ncpu, error_score=np.nan)
# Fir the data before getting the best parameter combination. Different data sets will have
# different optimized parameter combinations, i.e. without data, there is no optimal parameter combination.
gs.fit(data_train, target_train)
logger.info(gs.best_params_)
data_test = pd.DataFrame(data_test, columns=cols[1:])
# Update features that should be used in model
selected_features = gs.best_estimator_.named_steps['fs'].transform([cols[1:]])
cols = selected_features[0]
data_test = data_test[cols]
# Update model with the best parameters learnt in the previous step
self.model = gs.best_estimator_.named_steps['clf']
predict_val = self.model.predict(data_test)
results = compute_stats.ols(predict_val.tolist(), target_test.tolist())
print results.rsquared
print cols
plt.scatter(target_test, predict_val)
plt.show()
pdb.set_trace()
###################################################################
# Output and plot importance of model features, and learning curves
###################################################################
self.output_model_importance(gs, 'clf', num_cols=len(cols[1:]))
if constants.plot_model_importance:
train_sizes, train_scores, test_scores = learning_curve(self.model, data, target, cv=k_fold,
n_jobs=constants.ncpu)
plot.plot_learning_curve(train_scores, test_scores, train_sizes=train_sizes, fname='learning_curve',
ylim=(0.0, 1.01), title='Learning curves', out_path=self.path_out_dir)
# Save the model to disk
logger.info('Saving model and features as pickle on disk')
with open(self.path_pickle_model, 'wb') as f:
cPickle.dump(self.model, f)
with open(self.path_pickle_features, 'wb') as f:
cPickle.dump(self.vars_features, f)
else:
# Read model from pickle on disk
with open(self.path_pickle_model, 'rb') as f:
logger.info('Reading model from pickle on disk')
self.model = cPickle.load(f)
logger.info('Reading features from pickle on disk')
self.vars_features = pd.read_pickle(self.path_pickle_features)
return df_cc
def do_forecasting(self, df_forecast, mon_names, available_target=False, name_target='yield'):
"""
1. Does classification/regression based on already built model.
2. Plots confusion matrix for classification tasks, scatter plot for regression
3. Plots accuracy statistics for classification/regression
:param df_forecast:
:param mon_names:
:param available_target: Is target array available?
:param name_target: Name of target array (defaults to yield)
:return:
"""
data = df_forecast.as_matrix(columns=self.vars_features) # convert dataframe column to matrix
predicted = self.model.predict(data)
if available_target:
expected = df_forecast.as_matrix(columns=[name_target]).ravel()
if not self.classify: # REGRESSION
# Compute stats
results = compute_stats.ols(predicted.tolist(), expected.tolist())
bias = compute_stats.bias(predicted, expected)
rmse = compute_stats.rmse(predicted, expected)
mae = compute_stats.mae(predicted, expected)
# Plot!
plot.plot_regression_scatter(expected, np.asarray(predicted),
annotate=r'$r^{2}$ ' + '{:0.2f}'.format(results.rsquared) + '\n' +
'peak NDVI date: ' + self.time_peak_ndvi.strftime('%b %d'),
xlabel='Expected yield',
ylabel='Predicted yield',
title=mon_names + ' ' + str(int(df_forecast[self.season].unique()[0])),
fname=self.task + '_' + '_'.join([mon_names]) + '_' + self.crop,
out_path=self.path_out_dir)
# global expected vs predicted
if self.debug:
# any non-existing index will add row
self.df_global.loc[len(self.df_global)] = [np.nanmean(expected), np.nanmean(predicted), mon_names,
self.forecast_yr]
return predicted, {'RMSE': rmse, 'MAE': mae, r'$r^{2}$': results.rsquared, 'Bias': bias}
else: # CLASSIFICATION
# Convert from crop condition class (e.g. 4) to string (e.g. exceptional)
| expected, predicted = compute_stats.remove_nans(expected, predicted)
cm = confusion_matrix(expected, predicted, labels=self.dict_cc.keys()).T
# Compute and plot class probabilities
proba_cc = self.model.predict_proba(data)
df_proba = pd.DataFrame(proba_cc, columns=self.dict_cc.values())
plot.plot_class_probabilities(df_proba, fname='proba_' + '_'.join([mon_names]) + '_' + self.crop,
out_path=self.path_out_dir)
# Plot confusion matrix
plot.plot_confusion_matrix(cm, normalized=False, fname='cm_' + '_'.join([mon_names]) + '_' + self.crop,
xlabel='True class', ylabel='Predicted class', ticks=self.dict_cc.values(),
out_path=self.path_out_dir)
# Normalize and plot confusion matrix
cm_normalized = normalize(cm.astype(float), axis=1, norm='l1')
plot.plot_confusion_matrix(cm_normalized, fname='norm_cm_' + '_'.join([mon_names]) + '_' + self.crop,
xlabel='True class', ylabel='Predicted class', normalized=True,
ticks=self.dict_cc.values(), out_path=self.path_out_dir)
| conditional_block |
|
ml_cms_heights.py | countries x crops
# train_ml_model
# create_train_df
# compute_ml_vars
# ; create ml model
# loop_forecasting
# create_forecast_df
# do_forecasting
class MLCms:
"""
"""
def __init__(self, config_file=''):
# Parse config file
self.parser = SafeConfigParser()
self.parser.read(config_file)
# machine learning specific variables
self.classify = constants.DO_CLASSIFICATION # Regress or classify?
self.vars_features = constants.fixed_vars
self.vars_target = constants.ML_TARGETS
if self.classify:
self.var_target = constants.ML_TARGETS
self.task = 'classification'
self.model = RandomForestClassifier(n_estimators=2500, n_jobs=constants.ncpu, random_state=0)
else:
self.var_target = constants.ML_TARGETS
self.task = 'regression'
self.model = RandomForestRegressor(n_estimators=2500, n_jobs=constants.ncpu, random_state=0) # SVR()
# Get path to input
self.path_inp = constants.base_dir + os.sep + constants.name_inp_fl
# Output directory is <dir>_<classification>_<2014>
self.path_out_dir = constants.out_dir
utils.make_dir_if_missing(self.path_out_dir)
# Model pickle
self.path_pickle_model = self.path_out_dir + os.sep + constants.model_pickle
self.path_pickle_features = self.path_out_dir + os.sep + 'pickled_features'
def output_model_importance(self, gs, name_gs, num_cols):
"""
:param gs:
:param name_gs:
:param num_cols:
:return:
"""
rows_list = []
name_vars = []
feature_importance = gs.best_estimator_.named_steps[name_gs].feature_importances_
importances = 100.0 * (feature_importance / feature_importance.max())
std = np.std([tree.feature_importances_ for tree in self.model.estimators_], axis=0)
indices = np.argsort(importances)[::-1]
# Store feature ranking in a dataframe
for f in range(num_cols):
dict_results = {'Variable': self.vars_features[indices[f]], 'Importance': importances[indices[f]]}
name_vars.append(self.vars_features[indices[f]])
rows_list.append(dict_results)
df_results = pd.DataFrame(rows_list)
num_cols = 10 if len(indices) > 10 else len(indices) # Plot upto a maximum of 10 features
plot.plot_model_importance(num_bars=num_cols, xvals=importances[indices][:num_cols],
std=std[indices][:num_cols], fname=self.task + '_importance_' + self.crop,
title='Importance of variable (' + self.country + ' ' + self.crop_lname + ')',
xlabel=name_vars[:num_cols], out_path=self.path_out_dir)
df_results.to_csv(self.path_out_dir + os.sep + self.task + '_importance_' + self.crop + '.csv')
def get_data(self):
"""
:return:
"""
df = pd.read_csv(self.path_inp)
cols = [col for col in df.columns if col not in self.vars_features]
# cols.extend(['DI', 'PI'])
# Add information on PI and DI of soils
# iterate over each row, get lat and lon
# Find corresponding DI and PI
lat_lons = zip(df['Long_round'], df['Lat_round'])
vals_di = []
vals_pi = []
# for idx, (lon, lat) in enumerate(lat_lons):
# print idx, len(lat_lons)
# vals_pi.append(rgeo.get_value_at_point('C:\\Users\\ritvik\\Documents\\PhD\\Projects\\CMS\\Input\\Soils\\PI.tif',
# lon, lat, replace_ras=False))
# vals_di.append(rgeo.get_value_at_point('C:\\Users\\ritvik\\Documents\\PhD\\Projects\\CMS\\Input\\Soils\\DI.tif',
# lon, lat, replace_ras=False))
#
# df['DI'] = vals_di
# df['PI'] = vals_pi
df = df[cols]
data = df.as_matrix(columns=cols[1:])
target = df.as_matrix(columns=[self.var_target]).ravel()
# Get training and testing splits
splits = train_test_split(data, target, test_size=0.2)
return cols, splits
def train_ml_model(self):
| # clf.fit(data, target)
#
# predict_val = clf.predict(after.as_matrix(columns=cols[1:]))
# results = compute_stats.ols(predict_val.tolist(), after_target.tolist())
# print results.rsquared
# import matplotlib.pyplot as plt
# plt.scatter(after_target, predict_val)
# plt.show()
# pdb.set_trace()
if not os.path.isfile(self.path_pickle_model):
# For details in scikit workflow: See http://stackoverflow.com/questions/
# 35256876/ensuring-right-order-of-operations-in-random-forest-classification-in-scikit-lea
# TODO Separate out a dataset so that even the grid search cv can be tested
############################
# Select features from model
############################
logger.info('Selecting important features from model')
if self.classify:
rf_feature_imp = ExtraTreesRegressor(150, n_jobs=constants.ncpu)
else:
rf_feature_imp = ExtraTreesRegressor(150, n_jobs=constants.ncpu)
feat_selection = SelectFromModel(rf_feature_imp)
pipeline = Pipeline([
('fs', feat_selection),
('clf', self.model),
])
#################################
# Grid search for best parameters
#################################
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
logger.info('Tuning hyperparameters')
param_grid = {
'fs__threshold': ['mean', 'median'],
'fs__estimator__max_features': ['auto', 'log2'],
'clf__max_features': ['auto', 'log2'],
'clf__n_estimators': [1000, 2000]
#'clf__gamma': np.logspace(-9, 3, 13),
#'clf__C': np.logspace(-2, 10, 13)
}
gs = GridSearchCV(pipeline, param_grid=param_grid, verbose=2, n_jobs=constants.ncpu, error_score=np.nan)
# Fir the data before getting the best parameter combination. Different data sets will have
# different optimized parameter combinations, i.e. without data, there is no optimal parameter combination.
gs.fit(data_train, target_train)
logger.info(gs.best_params_)
data_test = pd.DataFrame(data_test, columns=cols[1:])
# Update features that should be used in model
selected_features = gs.best_estimator_.named_steps['fs'].transform([cols[1:]])
cols = selected_features[0]
data_test = data_test[cols]
# Update model with the best parameters learnt in the previous step
self.model = gs.best_estimator_.named_steps['clf']
predict_val = self.model.predict(data_test)
results = compute_stats.ols(predict_val.tolist(), target_test.tolist())
print results.rsquared
print cols
plt.scatter(target_test, predict_val)
plt.show()
pdb.set_trace()
###################################################################
# Output and plot importance of model features, and learning curves
###################################################################
self.output_model_importance(gs, 'clf', num_cols=len(cols[1:]))
if constants.plot_model_importance:
train_sizes, train_scores, test_scores = learning_curve(self.model, data, target, cv=k_fold,
n_jobs=constants.ncpu)
plot.plot_learning_curve(train_scores, test_scores, train_sizes=train_sizes, fname='learning_curve',
ylim=(0.0, 1.01), title='Learning curves', out_path=self.path_out_dir)
# Save the model to disk
logger.info('Saving model and features as pickle on disk')
with open(self.path_pickle_model, 'wb') as f:
cPickle.dump(self.model, f)
with open(self.path_pickle_features, 'wb') as f:
cPickle.dump(self.vars_features, f)
| """
:return:
"""
logger.info('#########################################################################')
logger.info('train_ml_model')
logger.info('#########################################################################')
######################################################
# Load dataset
######################################################
cols, splits = self.get_data()
data_train, data_test, target_train, target_test = splits
# clf = ExtraTreesRegressor(500, n_jobs=constants.ncpu)
# #clf = SVR(kernel='rbf', C=1e3, gamma=0.1)
# #clf = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3)
# data = df_train.as_matrix(columns=cols[1:]) # convert dataframe column to matrix
# #data = preprocessing.scale(data)
# target = df_train.as_matrix(columns=[self.var_target]).ravel() # convert dataframe column to matrix | identifier_body |
ml_cms_heights.py | x crops
# train_ml_model
# create_train_df
# compute_ml_vars
# ; create ml model
# loop_forecasting
# create_forecast_df
# do_forecasting
class MLCms:
"""
"""
def __init__(self, config_file=''):
# Parse config file
self.parser = SafeConfigParser()
self.parser.read(config_file)
# machine learning specific variables
self.classify = constants.DO_CLASSIFICATION # Regress or classify?
self.vars_features = constants.fixed_vars
self.vars_target = constants.ML_TARGETS
if self.classify:
self.var_target = constants.ML_TARGETS
self.task = 'classification'
self.model = RandomForestClassifier(n_estimators=2500, n_jobs=constants.ncpu, random_state=0)
else:
self.var_target = constants.ML_TARGETS
self.task = 'regression'
self.model = RandomForestRegressor(n_estimators=2500, n_jobs=constants.ncpu, random_state=0) # SVR()
# Get path to input
self.path_inp = constants.base_dir + os.sep + constants.name_inp_fl
# Output directory is <dir>_<classification>_<2014>
self.path_out_dir = constants.out_dir
utils.make_dir_if_missing(self.path_out_dir)
# Model pickle
self.path_pickle_model = self.path_out_dir + os.sep + constants.model_pickle
self.path_pickle_features = self.path_out_dir + os.sep + 'pickled_features'
def output_model_importance(self, gs, name_gs, num_cols):
"""
:param gs:
:param name_gs:
:param num_cols:
:return:
"""
rows_list = []
name_vars = []
feature_importance = gs.best_estimator_.named_steps[name_gs].feature_importances_
importances = 100.0 * (feature_importance / feature_importance.max())
std = np.std([tree.feature_importances_ for tree in self.model.estimators_], axis=0)
indices = np.argsort(importances)[::-1]
# Store feature ranking in a dataframe
for f in range(num_cols):
dict_results = {'Variable': self.vars_features[indices[f]], 'Importance': importances[indices[f]]}
name_vars.append(self.vars_features[indices[f]])
rows_list.append(dict_results)
df_results = pd.DataFrame(rows_list)
num_cols = 10 if len(indices) > 10 else len(indices) # Plot upto a maximum of 10 features
plot.plot_model_importance(num_bars=num_cols, xvals=importances[indices][:num_cols],
std=std[indices][:num_cols], fname=self.task + '_importance_' + self.crop,
title='Importance of variable (' + self.country + ' ' + self.crop_lname + ')',
xlabel=name_vars[:num_cols], out_path=self.path_out_dir)
df_results.to_csv(self.path_out_dir + os.sep + self.task + '_importance_' + self.crop + '.csv')
def get_data(self):
"""
:return:
"""
df = pd.read_csv(self.path_inp)
cols = [col for col in df.columns if col not in self.vars_features]
# cols.extend(['DI', 'PI'])
# Add information on PI and DI of soils
# iterate over each row, get lat and lon
# Find corresponding DI and PI
lat_lons = zip(df['Long_round'], df['Lat_round'])
vals_di = []
vals_pi = []
# for idx, (lon, lat) in enumerate(lat_lons):
# print idx, len(lat_lons)
# vals_pi.append(rgeo.get_value_at_point('C:\\Users\\ritvik\\Documents\\PhD\\Projects\\CMS\\Input\\Soils\\PI.tif',
# lon, lat, replace_ras=False))
# vals_di.append(rgeo.get_value_at_point('C:\\Users\\ritvik\\Documents\\PhD\\Projects\\CMS\\Input\\Soils\\DI.tif',
# lon, lat, replace_ras=False))
#
# df['DI'] = vals_di
# df['PI'] = vals_pi
df = df[cols]
data = df.as_matrix(columns=cols[1:])
target = df.as_matrix(columns=[self.var_target]).ravel()
# Get training and testing splits
splits = train_test_split(data, target, test_size=0.2)
return cols, splits
def | (self):
"""
:return:
"""
logger.info('#########################################################################')
logger.info('train_ml_model')
logger.info('#########################################################################')
######################################################
# Load dataset
######################################################
cols, splits = self.get_data()
data_train, data_test, target_train, target_test = splits
# clf = ExtraTreesRegressor(500, n_jobs=constants.ncpu)
# #clf = SVR(kernel='rbf', C=1e3, gamma=0.1)
# #clf = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3)
# data = df_train.as_matrix(columns=cols[1:]) # convert dataframe column to matrix
# #data = preprocessing.scale(data)
# target = df_train.as_matrix(columns=[self.var_target]).ravel() # convert dataframe column to matrix
# clf.fit(data, target)
#
# predict_val = clf.predict(after.as_matrix(columns=cols[1:]))
# results = compute_stats.ols(predict_val.tolist(), after_target.tolist())
# print results.rsquared
# import matplotlib.pyplot as plt
# plt.scatter(after_target, predict_val)
# plt.show()
# pdb.set_trace()
if not os.path.isfile(self.path_pickle_model):
# For details in scikit workflow: See http://stackoverflow.com/questions/
# 35256876/ensuring-right-order-of-operations-in-random-forest-classification-in-scikit-lea
# TODO Separate out a dataset so that even the grid search cv can be tested
############################
# Select features from model
############################
logger.info('Selecting important features from model')
if self.classify:
rf_feature_imp = ExtraTreesRegressor(150, n_jobs=constants.ncpu)
else:
rf_feature_imp = ExtraTreesRegressor(150, n_jobs=constants.ncpu)
feat_selection = SelectFromModel(rf_feature_imp)
pipeline = Pipeline([
('fs', feat_selection),
('clf', self.model),
])
#################################
# Grid search for best parameters
#################################
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
logger.info('Tuning hyperparameters')
param_grid = {
'fs__threshold': ['mean', 'median'],
'fs__estimator__max_features': ['auto', 'log2'],
'clf__max_features': ['auto', 'log2'],
'clf__n_estimators': [1000, 2000]
#'clf__gamma': np.logspace(-9, 3, 13),
#'clf__C': np.logspace(-2, 10, 13)
}
gs = GridSearchCV(pipeline, param_grid=param_grid, verbose=2, n_jobs=constants.ncpu, error_score=np.nan)
# Fir the data before getting the best parameter combination. Different data sets will have
# different optimized parameter combinations, i.e. without data, there is no optimal parameter combination.
gs.fit(data_train, target_train)
logger.info(gs.best_params_)
data_test = pd.DataFrame(data_test, columns=cols[1:])
# Update features that should be used in model
selected_features = gs.best_estimator_.named_steps['fs'].transform([cols[1:]])
cols = selected_features[0]
data_test = data_test[cols]
# Update model with the best parameters learnt in the previous step
self.model = gs.best_estimator_.named_steps['clf']
predict_val = self.model.predict(data_test)
results = compute_stats.ols(predict_val.tolist(), target_test.tolist())
print results.rsquared
print cols
plt.scatter(target_test, predict_val)
plt.show()
pdb.set_trace()
###################################################################
# Output and plot importance of model features, and learning curves
###################################################################
self.output_model_importance(gs, 'clf', num_cols=len(cols[1:]))
if constants.plot_model_importance:
train_sizes, train_scores, test_scores = learning_curve(self.model, data, target, cv=k_fold,
n_jobs=constants.ncpu)
plot.plot_learning_curve(train_scores, test_scores, train_sizes=train_sizes, fname='learning_curve',
ylim=(0.0, 1.01), title='Learning curves', out_path=self.path_out_dir)
# Save the model to disk
logger.info('Saving model and features as pickle on disk')
with open(self.path_pickle_model, 'wb') as f:
cPickle.dump(self.model, f)
with open(self.path_pickle_features, 'wb') as f:
cPickle.dump(self.vars_features, f | train_ml_model | identifier_name |
ml_cms_heights.py | countries x crops
# train_ml_model
# create_train_df
# compute_ml_vars
# ; create ml model
# loop_forecasting |
class MLCms:
"""
"""
def __init__(self, config_file=''):
# Parse config file
self.parser = SafeConfigParser()
self.parser.read(config_file)
# machine learning specific variables
self.classify = constants.DO_CLASSIFICATION # Regress or classify?
self.vars_features = constants.fixed_vars
self.vars_target = constants.ML_TARGETS
if self.classify:
self.var_target = constants.ML_TARGETS
self.task = 'classification'
self.model = RandomForestClassifier(n_estimators=2500, n_jobs=constants.ncpu, random_state=0)
else:
self.var_target = constants.ML_TARGETS
self.task = 'regression'
self.model = RandomForestRegressor(n_estimators=2500, n_jobs=constants.ncpu, random_state=0) # SVR()
# Get path to input
self.path_inp = constants.base_dir + os.sep + constants.name_inp_fl
# Output directory is <dir>_<classification>_<2014>
self.path_out_dir = constants.out_dir
utils.make_dir_if_missing(self.path_out_dir)
# Model pickle
self.path_pickle_model = self.path_out_dir + os.sep + constants.model_pickle
self.path_pickle_features = self.path_out_dir + os.sep + 'pickled_features'
def output_model_importance(self, gs, name_gs, num_cols):
"""
:param gs:
:param name_gs:
:param num_cols:
:return:
"""
rows_list = []
name_vars = []
feature_importance = gs.best_estimator_.named_steps[name_gs].feature_importances_
importances = 100.0 * (feature_importance / feature_importance.max())
std = np.std([tree.feature_importances_ for tree in self.model.estimators_], axis=0)
indices = np.argsort(importances)[::-1]
# Store feature ranking in a dataframe
for f in range(num_cols):
dict_results = {'Variable': self.vars_features[indices[f]], 'Importance': importances[indices[f]]}
name_vars.append(self.vars_features[indices[f]])
rows_list.append(dict_results)
df_results = pd.DataFrame(rows_list)
num_cols = 10 if len(indices) > 10 else len(indices) # Plot upto a maximum of 10 features
plot.plot_model_importance(num_bars=num_cols, xvals=importances[indices][:num_cols],
std=std[indices][:num_cols], fname=self.task + '_importance_' + self.crop,
title='Importance of variable (' + self.country + ' ' + self.crop_lname + ')',
xlabel=name_vars[:num_cols], out_path=self.path_out_dir)
df_results.to_csv(self.path_out_dir + os.sep + self.task + '_importance_' + self.crop + '.csv')
def get_data(self):
"""
:return:
"""
df = pd.read_csv(self.path_inp)
cols = [col for col in df.columns if col not in self.vars_features]
# cols.extend(['DI', 'PI'])
# Add information on PI and DI of soils
# iterate over each row, get lat and lon
# Find corresponding DI and PI
lat_lons = zip(df['Long_round'], df['Lat_round'])
vals_di = []
vals_pi = []
# for idx, (lon, lat) in enumerate(lat_lons):
# print idx, len(lat_lons)
# vals_pi.append(rgeo.get_value_at_point('C:\\Users\\ritvik\\Documents\\PhD\\Projects\\CMS\\Input\\Soils\\PI.tif',
# lon, lat, replace_ras=False))
# vals_di.append(rgeo.get_value_at_point('C:\\Users\\ritvik\\Documents\\PhD\\Projects\\CMS\\Input\\Soils\\DI.tif',
# lon, lat, replace_ras=False))
#
# df['DI'] = vals_di
# df['PI'] = vals_pi
df = df[cols]
data = df.as_matrix(columns=cols[1:])
target = df.as_matrix(columns=[self.var_target]).ravel()
# Get training and testing splits
splits = train_test_split(data, target, test_size=0.2)
return cols, splits
def train_ml_model(self):
"""
:return:
"""
logger.info('#########################################################################')
logger.info('train_ml_model')
logger.info('#########################################################################')
######################################################
# Load dataset
######################################################
cols, splits = self.get_data()
data_train, data_test, target_train, target_test = splits
# clf = ExtraTreesRegressor(500, n_jobs=constants.ncpu)
# #clf = SVR(kernel='rbf', C=1e3, gamma=0.1)
# #clf = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3)
# data = df_train.as_matrix(columns=cols[1:]) # convert dataframe column to matrix
# #data = preprocessing.scale(data)
# target = df_train.as_matrix(columns=[self.var_target]).ravel() # convert dataframe column to matrix
# clf.fit(data, target)
#
# predict_val = clf.predict(after.as_matrix(columns=cols[1:]))
# results = compute_stats.ols(predict_val.tolist(), after_target.tolist())
# print results.rsquared
# import matplotlib.pyplot as plt
# plt.scatter(after_target, predict_val)
# plt.show()
# pdb.set_trace()
if not os.path.isfile(self.path_pickle_model):
# For details in scikit workflow: See http://stackoverflow.com/questions/
# 35256876/ensuring-right-order-of-operations-in-random-forest-classification-in-scikit-lea
# TODO Separate out a dataset so that even the grid search cv can be tested
############################
# Select features from model
############################
logger.info('Selecting important features from model')
if self.classify:
rf_feature_imp = ExtraTreesRegressor(150, n_jobs=constants.ncpu)
else:
rf_feature_imp = ExtraTreesRegressor(150, n_jobs=constants.ncpu)
feat_selection = SelectFromModel(rf_feature_imp)
pipeline = Pipeline([
('fs', feat_selection),
('clf', self.model),
])
#################################
# Grid search for best parameters
#################################
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
logger.info('Tuning hyperparameters')
param_grid = {
'fs__threshold': ['mean', 'median'],
'fs__estimator__max_features': ['auto', 'log2'],
'clf__max_features': ['auto', 'log2'],
'clf__n_estimators': [1000, 2000]
#'clf__gamma': np.logspace(-9, 3, 13),
#'clf__C': np.logspace(-2, 10, 13)
}
gs = GridSearchCV(pipeline, param_grid=param_grid, verbose=2, n_jobs=constants.ncpu, error_score=np.nan)
# Fir the data before getting the best parameter combination. Different data sets will have
# different optimized parameter combinations, i.e. without data, there is no optimal parameter combination.
gs.fit(data_train, target_train)
logger.info(gs.best_params_)
data_test = pd.DataFrame(data_test, columns=cols[1:])
# Update features that should be used in model
selected_features = gs.best_estimator_.named_steps['fs'].transform([cols[1:]])
cols = selected_features[0]
data_test = data_test[cols]
# Update model with the best parameters learnt in the previous step
self.model = gs.best_estimator_.named_steps['clf']
predict_val = self.model.predict(data_test)
results = compute_stats.ols(predict_val.tolist(), target_test.tolist())
print results.rsquared
print cols
plt.scatter(target_test, predict_val)
plt.show()
pdb.set_trace()
###################################################################
# Output and plot importance of model features, and learning curves
###################################################################
self.output_model_importance(gs, 'clf', num_cols=len(cols[1:]))
if constants.plot_model_importance:
train_sizes, train_scores, test_scores = learning_curve(self.model, data, target, cv=k_fold,
n_jobs=constants.ncpu)
plot.plot_learning_curve(train_scores, test_scores, train_sizes=train_sizes, fname='learning_curve',
ylim=(0.0, 1.01), title='Learning curves', out_path=self.path_out_dir)
# Save the model to disk
logger.info('Saving model and features as pickle on disk')
with open(self.path_pickle_model, 'wb') as f:
cPickle.dump(self.model, f)
with open(self.path_pickle_features, 'wb') as f:
cPickle.dump(self.vars_features, f)
| # create_forecast_df
# do_forecasting | random_line_split |
aws.go | _vars", 0755)
exec.Command("cp", "-rfp", "./kubespray/inventory/sample/", "./kubespray/inventory/awscluster/").Run()
exec.Command("cp", "./kubespray/inventory/hosts", "./kubespray/inventory/awscluster/hosts").Run()
//Enable load balancer api access and copy the kubeconfig file locally
loadBalancerName, err := exec.Command("sh", "-c", "grep apiserver_loadbalancer_domain_name= ./kubespray/inventory/hosts | cut -d'=' -f2").CombinedOutput()
if err != nil {
fmt.Println("Problem getting the load balancer domain name", err)
} else {
//Make a copy of kubeconfig on Ansible host
f, err := os.OpenFile("./kubespray/inventory/awscluster/group_vars/k8s-cluster.yml", os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
panic(err)
}
defer f.Close()
fmt.Fprintf(f, "kubeconfig_localhost: true\n")
g, err := os.OpenFile("./kubespray/inventory/awscluster/group_vars/all.yml", os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
panic(err)
}
defer g.Close()
// Resolve Load Balancer Domain Name and pick the first IP
s, _ := exec.Command("sh", "-c", "grep apiserver_loadbalancer_domain_name= ./kubespray/inventory/hosts | cut -d'=' -f2 | sed 's/\"//g'").CombinedOutput()
// Convert the Domain name to string and strip all spaces so that Lookup does not return errors
r := string(s)
t := strings.TrimSpace(r)
fmt.Println(t)
node, err := net.LookupHost(t)
if err != nil { |
ec2IP := node[0]
fmt.Println(node)
DomainName := strings.TrimSpace(string(loadBalancerName))
loadBalancerDomainName := "apiserver_loadbalancer_domain_name: " + DomainName
fmt.Fprintf(g, "#Set cloud provider to AWS\n")
fmt.Fprintf(g, "cloud_provider: 'aws'\n")
fmt.Fprintf(g, "#Load Balancer Configuration\n")
fmt.Fprintf(g, "loadbalancer_apiserver_localhost: false\n")
fmt.Fprintf(g, "%s\n", loadBalancerDomainName)
fmt.Fprintf(g, "loadbalancer_apiserver:\n")
fmt.Fprintf(g, " address: %s\n", ec2IP)
fmt.Fprintf(g, " port: 6443\n")
}
}
kubeSet := exec.Command("ansible-playbook", "-i", "./inventory/awscluster/hosts", "./cluster.yml", "--timeout=60", "-e ansible_user=centos", "-e bootstrap_os=centos", "-b", "--become-user=root", "--flush-cache")
kubeSet.Dir = "./kubespray/"
stdout, _ := kubeSet.StdoutPipe()
kubeSet.Stderr = kubeSet.Stdout
kubeSet.Start()
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
m := scanner.Text()
fmt.Println(m)
//log.Printf(m)
}
kubeSet.Wait()
os.Exit(0)
}
if destroy {
// check if terraform is installed
terr, err := exec.LookPath("terraform")
if err != nil {
log.Fatal("Terraform command not found, kindly check")
}
fmt.Printf("Found terraform at %s\n", terr)
rr, err := exec.Command("terraform", "version").Output()
if err != nil {
log.Fatal(err)
}
fmt.Printf(string(rr))
// Remove ssh bastion file
if _, err := os.Stat("./kubespray/ssh-bastion.conf"); err == nil {
os.Remove("./kubespray/ssh-bastion.conf")
}
// Remove the cluster inventory folder
err = os.RemoveAll("./kubespray/inventory/awscluster")
if err != nil {
fmt.Println(err)
}
// Check if credentials file exist, if it exists skip asking to input the AWS values
if _, err := os.Stat("./kubespray/contrib/terraform/aws/credentials.tfvars"); err == nil {
fmt.Println("Credentials file already exists, creation skipped")
} else {
fmt.Println("Please enter your AWS access key ID")
var awsAccessKeyID string
fmt.Scanln(&awsAccessKeyID)
fmt.Println("Please enter your AWS SECRET ACCESS KEY")
var awsSecretKey string
fmt.Scanln(&awsSecretKey)
fmt.Println("Please enter your AWS SSH Key Name")
var awsAccessSSHKey string
fmt.Scanln(&awsAccessSSHKey)
fmt.Println("Please enter your AWS Default Region")
var awsDefaultRegion string
fmt.Scanln(&awsDefaultRegion)
file, err := os.Create("./kubespray/contrib/terraform/aws/credentials.tfvars")
if err != nil {
log.Fatal("Cannot create file", err)
}
defer file.Close()
fmt.Fprintf(file, "AWS_ACCESS_KEY_ID = %s\n", awsAccessKeyID)
fmt.Fprintf(file, "AWS_SECRET_ACCESS_KEY = %s\n", awsSecretKey)
fmt.Fprintf(file, "AWS_SSH_KEY_NAME = %s\n", awsAccessSSHKey)
fmt.Fprintf(file, "AWS_DEFAULT_REGION = %s\n", awsDefaultRegion)
}
terrSet := exec.Command("terraform", "destroy", "-var-file=credentials.tfvars", "-force")
terrSet.Dir = "./kubespray/contrib/terraform/aws/"
stdout, _ := terrSet.StdoutPipe()
terrSet.Stderr = terrSet.Stdout
error := terrSet.Start()
if error != nil {
fmt.Println(error)
}
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
m := scanner.Text()
fmt.Println(m)
//log.Printf(m)
}
terrSet.Wait()
os.Exit(0)
}
if create {
// check if terraform is installed
terr, err := exec.LookPath("terraform")
if err != nil {
log.Fatal("Terraform command not found, kindly check")
}
fmt.Printf("Found terraform at %s\n", terr)
rr, err := exec.Command("terraform", "version").Output()
if err != nil {
log.Fatal(err)
}
fmt.Printf(string(rr))
// Check if credentials file exist, if it exists skip asking to input the AWS values
if _, err := os.Stat("./kubespray/contrib/terraform/aws/credentials.tfvars"); err == nil {
fmt.Println("Credentials file already exists, creation skipped")
} else {
//Read Configuration File
viper.SetConfigName("config")
viper.AddConfigPath(".")
viper.AddConfigPath("/tk8")
verr := viper.ReadInConfig() // Find and read the config file
if verr != nil { // Handle errors reading the config file
panic(fmt.Errorf("fatal error config file: %s", verr))
}
awsAccessKeyID := viper.GetString("aws.aws_access_key_id")
awsSecretKey := viper.GetString("aws.aws_secret_access_key")
awsAccessSSHKey := viper.GetString("aws.aws_ssh_keypair")
awsDefaultRegion := viper.GetString("aws.aws_default_region")
file, err := os.Create("./kubespray/contrib/terraform/aws/credentials.tfvars")
if err != nil {
log.Fatal("Cannot create file", err)
}
defer file.Close()
fmt.Fprintf(file, "AWS_ACCESS_KEY_ID = %s\n", strconv.Quote(awsAccessKeyID))
fmt.Fprintf(file, "AWS_SECRET_ACCESS_KEY = %s\n", strconv.Quote(awsSecretKey))
fmt.Fprintf(file, "AWS_SSH_KEY_NAME = %s\n", strconv.Quote(awsAccessSSHKey))
fmt.Fprintf(file, "AWS_DEFAULT_REGION = %s\n", strconv.Quote(awsDefaultRegion))
}
// Remove tftvars file
err = os.Remove("./kubespray/contrib/terraform/aws/terraform.tfvars")
if err != nil {
fmt.Println(err)
}
//Read Configuration File
viper.SetConfigName("config")
viper.AddConfigPath(".")
verr := viper.ReadInConfig() // Find and read the config file
if verr != nil { // Handle errors reading the config file
panic(fmt.Errorf("fatal error config file: %s", verr))
}
awsClusterName := viper.GetString("aws.clustername")
awsVpcCidrBlock := viper.GetString("aws.aws_vpc_cidr_block")
awsCidrSubnetsPrivate := viper.GetString("aws.aws_cidr_subnets_private")
awsCidrSubnetsPublic := viper.GetString("aws.aws_cidr_subnets_public")
awsBastionSize := viper.GetString("aws.aws_bastion_size")
awsKubeMasterNum := viper.GetString("aws.aws_kube |
fmt.Println(err)
os.Exit(1)
}
| conditional_block |
aws.go | /group_vars", 0755)
exec.Command("cp", "-rfp", "./kubespray/inventory/sample/", "./kubespray/inventory/awscluster/").Run()
exec.Command("cp", "./kubespray/inventory/hosts", "./kubespray/inventory/awscluster/hosts").Run()
//Enable load balancer api access and copy the kubeconfig file locally
loadBalancerName, err := exec.Command("sh", "-c", "grep apiserver_loadbalancer_domain_name= ./kubespray/inventory/hosts | cut -d'=' -f2").CombinedOutput()
if err != nil {
fmt.Println("Problem getting the load balancer domain name", err)
} else {
//Make a copy of kubeconfig on Ansible host
f, err := os.OpenFile("./kubespray/inventory/awscluster/group_vars/k8s-cluster.yml", os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
panic(err)
}
defer f.Close()
fmt.Fprintf(f, "kubeconfig_localhost: true\n")
g, err := os.OpenFile("./kubespray/inventory/awscluster/group_vars/all.yml", os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
panic(err)
}
defer g.Close()
// Resolve Load Balancer Domain Name and pick the first IP
s, _ := exec.Command("sh", "-c", "grep apiserver_loadbalancer_domain_name= ./kubespray/inventory/hosts | cut -d'=' -f2 | sed 's/\"//g'").CombinedOutput()
// Convert the Domain name to string and strip all spaces so that Lookup does not return errors
r := string(s)
t := strings.TrimSpace(r)
fmt.Println(t)
node, err := net.LookupHost(t)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
ec2IP := node[0]
fmt.Println(node)
DomainName := strings.TrimSpace(string(loadBalancerName))
loadBalancerDomainName := "apiserver_loadbalancer_domain_name: " + DomainName
fmt.Fprintf(g, "#Set cloud provider to AWS\n")
fmt.Fprintf(g, "cloud_provider: 'aws'\n")
fmt.Fprintf(g, "#Load Balancer Configuration\n")
fmt.Fprintf(g, "loadbalancer_apiserver_localhost: false\n")
fmt.Fprintf(g, "%s\n", loadBalancerDomainName)
fmt.Fprintf(g, "loadbalancer_apiserver:\n")
fmt.Fprintf(g, " address: %s\n", ec2IP)
fmt.Fprintf(g, " port: 6443\n")
}
}
kubeSet := exec.Command("ansible-playbook", "-i", "./inventory/awscluster/hosts", "./cluster.yml", "--timeout=60", "-e ansible_user=centos", "-e bootstrap_os=centos", "-b", "--become-user=root", "--flush-cache")
kubeSet.Dir = "./kubespray/"
stdout, _ := kubeSet.StdoutPipe()
kubeSet.Stderr = kubeSet.Stdout
kubeSet.Start()
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
m := scanner.Text()
fmt.Println(m)
//log.Printf(m)
}
kubeSet.Wait()
os.Exit(0)
}
if destroy {
// check if terraform is installed
terr, err := exec.LookPath("terraform")
if err != nil {
log.Fatal("Terraform command not found, kindly check")
}
fmt.Printf("Found terraform at %s\n", terr)
rr, err := exec.Command("terraform", "version").Output()
if err != nil {
log.Fatal(err)
}
fmt.Printf(string(rr))
// Remove ssh bastion file
if _, err := os.Stat("./kubespray/ssh-bastion.conf"); err == nil {
os.Remove("./kubespray/ssh-bastion.conf")
}
// Remove the cluster inventory folder
err = os.RemoveAll("./kubespray/inventory/awscluster")
if err != nil {
fmt.Println(err)
}
// Check if credentials file exist, if it exists skip asking to input the AWS values
if _, err := os.Stat("./kubespray/contrib/terraform/aws/credentials.tfvars"); err == nil {
fmt.Println("Credentials file already exists, creation skipped")
} else {
fmt.Println("Please enter your AWS access key ID")
var awsAccessKeyID string
fmt.Scanln(&awsAccessKeyID)
fmt.Println("Please enter your AWS SECRET ACCESS KEY")
var awsSecretKey string
fmt.Scanln(&awsSecretKey)
fmt.Println("Please enter your AWS SSH Key Name")
var awsAccessSSHKey string
fmt.Scanln(&awsAccessSSHKey)
fmt.Println("Please enter your AWS Default Region")
var awsDefaultRegion string
fmt.Scanln(&awsDefaultRegion)
file, err := os.Create("./kubespray/contrib/terraform/aws/credentials.tfvars")
if err != nil {
log.Fatal("Cannot create file", err)
}
defer file.Close()
fmt.Fprintf(file, "AWS_ACCESS_KEY_ID = %s\n", awsAccessKeyID)
fmt.Fprintf(file, "AWS_SECRET_ACCESS_KEY = %s\n", awsSecretKey)
fmt.Fprintf(file, "AWS_SSH_KEY_NAME = %s\n", awsAccessSSHKey)
fmt.Fprintf(file, "AWS_DEFAULT_REGION = %s\n", awsDefaultRegion)
}
terrSet := exec.Command("terraform", "destroy", "-var-file=credentials.tfvars", "-force")
terrSet.Dir = "./kubespray/contrib/terraform/aws/"
stdout, _ := terrSet.StdoutPipe()
terrSet.Stderr = terrSet.Stdout
error := terrSet.Start()
if error != nil {
fmt.Println(error)
}
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
m := scanner.Text()
fmt.Println(m)
//log.Printf(m)
}
terrSet.Wait()
os.Exit(0)
}
if create {
// check if terraform is installed
terr, err := exec.LookPath("terraform")
if err != nil {
log.Fatal("Terraform command not found, kindly check")
}
fmt.Printf("Found terraform at %s\n", terr)
rr, err := exec.Command("terraform", "version").Output()
if err != nil {
log.Fatal(err)
}
fmt.Printf(string(rr))
// Check if credentials file exist, if it exists skip asking to input the AWS values
if _, err := os.Stat("./kubespray/contrib/terraform/aws/credentials.tfvars"); err == nil {
fmt.Println("Credentials file already exists, creation skipped")
} else {
//Read Configuration File
viper.SetConfigName("config")
viper.AddConfigPath(".")
viper.AddConfigPath("/tk8")
verr := viper.ReadInConfig() // Find and read the config file
if verr != nil { // Handle errors reading the config file
panic(fmt.Errorf("fatal error config file: %s", verr))
}
awsAccessKeyID := viper.GetString("aws.aws_access_key_id")
awsSecretKey := viper.GetString("aws.aws_secret_access_key")
awsAccessSSHKey := viper.GetString("aws.aws_ssh_keypair")
awsDefaultRegion := viper.GetString("aws.aws_default_region")
file, err := os.Create("./kubespray/contrib/terraform/aws/credentials.tfvars")
if err != nil {
log.Fatal("Cannot create file", err)
}
defer file.Close()
fmt.Fprintf(file, "AWS_ACCESS_KEY_ID = %s\n", strconv.Quote(awsAccessKeyID))
fmt.Fprintf(file, "AWS_SECRET_ACCESS_KEY = %s\n", strconv.Quote(awsSecretKey))
fmt.Fprintf(file, "AWS_SSH_KEY_NAME = %s\n", strconv.Quote(awsAccessSSHKey))
fmt.Fprintf(file, "AWS_DEFAULT_REGION = %s\n", strconv.Quote(awsDefaultRegion))
}
// Remove tftvars file
err = os.Remove("./kubespray/contrib/terraform/aws/terraform.tfvars")
if err != nil {
fmt.Println(err)
}
//Read Configuration File
viper.SetConfigName("config")
viper.AddConfigPath(".")
verr := viper.ReadInConfig() // Find and read the config file
if verr != nil { // Handle errors reading the config file
panic(fmt.Errorf("fatal error config file: %s", verr))
}
awsClusterName := viper.GetString("aws.clustername")
awsVpcCidrBlock := viper.GetString("aws.aws_vpc_cidr_block") | awsBastionSize := viper.GetString("aws.aws_bastion_size")
awsKubeMasterNum := viper.GetString("aws.aws_kube_master | awsCidrSubnetsPrivate := viper.GetString("aws.aws_cidr_subnets_private")
awsCidrSubnetsPublic := viper.GetString("aws.aws_cidr_subnets_public") | random_line_split |
aws.go | _ACCESS_KEY = %s\n", awsSecretKey)
fmt.Fprintf(file, "AWS_SSH_KEY_NAME = %s\n", awsAccessSSHKey)
fmt.Fprintf(file, "AWS_DEFAULT_REGION = %s\n", awsDefaultRegion)
}
terrSet := exec.Command("terraform", "destroy", "-var-file=credentials.tfvars", "-force")
terrSet.Dir = "./kubespray/contrib/terraform/aws/"
stdout, _ := terrSet.StdoutPipe()
terrSet.Stderr = terrSet.Stdout
error := terrSet.Start()
if error != nil {
fmt.Println(error)
}
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
m := scanner.Text()
fmt.Println(m)
//log.Printf(m)
}
terrSet.Wait()
os.Exit(0)
}
if create {
// check if terraform is installed
terr, err := exec.LookPath("terraform")
if err != nil {
log.Fatal("Terraform command not found, kindly check")
}
fmt.Printf("Found terraform at %s\n", terr)
rr, err := exec.Command("terraform", "version").Output()
if err != nil {
log.Fatal(err)
}
fmt.Printf(string(rr))
// Check if credentials file exist, if it exists skip asking to input the AWS values
if _, err := os.Stat("./kubespray/contrib/terraform/aws/credentials.tfvars"); err == nil {
fmt.Println("Credentials file already exists, creation skipped")
} else {
//Read Configuration File
viper.SetConfigName("config")
viper.AddConfigPath(".")
viper.AddConfigPath("/tk8")
verr := viper.ReadInConfig() // Find and read the config file
if verr != nil { // Handle errors reading the config file
panic(fmt.Errorf("fatal error config file: %s", verr))
}
awsAccessKeyID := viper.GetString("aws.aws_access_key_id")
awsSecretKey := viper.GetString("aws.aws_secret_access_key")
awsAccessSSHKey := viper.GetString("aws.aws_ssh_keypair")
awsDefaultRegion := viper.GetString("aws.aws_default_region")
file, err := os.Create("./kubespray/contrib/terraform/aws/credentials.tfvars")
if err != nil {
log.Fatal("Cannot create file", err)
}
defer file.Close()
fmt.Fprintf(file, "AWS_ACCESS_KEY_ID = %s\n", strconv.Quote(awsAccessKeyID))
fmt.Fprintf(file, "AWS_SECRET_ACCESS_KEY = %s\n", strconv.Quote(awsSecretKey))
fmt.Fprintf(file, "AWS_SSH_KEY_NAME = %s\n", strconv.Quote(awsAccessSSHKey))
fmt.Fprintf(file, "AWS_DEFAULT_REGION = %s\n", strconv.Quote(awsDefaultRegion))
}
// Remove tftvars file
err = os.Remove("./kubespray/contrib/terraform/aws/terraform.tfvars")
if err != nil {
fmt.Println(err)
}
//Read Configuration File
viper.SetConfigName("config")
viper.AddConfigPath(".")
verr := viper.ReadInConfig() // Find and read the config file
if verr != nil { // Handle errors reading the config file
panic(fmt.Errorf("fatal error config file: %s", verr))
}
awsClusterName := viper.GetString("aws.clustername")
awsVpcCidrBlock := viper.GetString("aws.aws_vpc_cidr_block")
awsCidrSubnetsPrivate := viper.GetString("aws.aws_cidr_subnets_private")
awsCidrSubnetsPublic := viper.GetString("aws.aws_cidr_subnets_public")
awsBastionSize := viper.GetString("aws.aws_bastion_size")
awsKubeMasterNum := viper.GetString("aws.aws_kube_master_num")
awsKubeMasterSize := viper.GetString("aws.aws_kube_master_size")
awsEtcdNum := viper.GetString("aws.aws_etcd_num")
awsEtcdSize := viper.GetString("aws.aws_etcd_size")
awsKubeWorkerNum := viper.GetString("aws.aws_kube_worker_num")
awsKubeWorkerSize := viper.GetString("aws.aws_kube_worker_size")
awsElbAPIPort := viper.GetString("aws.aws_elb_api_port")
k8sSecureAPIPort := viper.GetString("aws.k8s_secure_api_port")
kubeInsecureApiserverAddress := viper.GetString("aws.")
tfile, err := os.Create("./kubespray/contrib/terraform/aws/terraform.tfvars")
if err != nil {
log.Fatal("Cannot create file", err)
}
defer tfile.Close()
fmt.Fprintf(tfile, "aws_cluster_name = %s\n", strconv.Quote(awsClusterName))
fmt.Fprintf(tfile, "aws_vpc_cidr_block = %s\n", strconv.Quote(awsVpcCidrBlock))
fmt.Fprintf(tfile, "aws_cidr_subnets_private = %s\n", awsCidrSubnetsPrivate)
fmt.Fprintf(tfile, "aws_cidr_subnets_public = %s\n", awsCidrSubnetsPublic)
fmt.Fprintf(tfile, "aws_bastion_size = %s\n", strconv.Quote(awsBastionSize))
fmt.Fprintf(tfile, "aws_kube_master_num = %s\n", awsKubeMasterNum)
fmt.Fprintf(tfile, "aws_kube_master_size = %s\n", strconv.Quote(awsKubeMasterSize))
fmt.Fprintf(tfile, "aws_etcd_num = %s\n", awsEtcdNum)
fmt.Fprintf(tfile, "aws_etcd_size = %s\n", strconv.Quote(awsEtcdSize))
fmt.Fprintf(tfile, "aws_kube_worker_num = %s\n", awsKubeWorkerNum)
fmt.Fprintf(tfile, "aws_kube_worker_size = %s\n", strconv.Quote(awsKubeWorkerSize))
fmt.Fprintf(tfile, "aws_elb_api_port = %s\n", awsElbAPIPort)
fmt.Fprintf(tfile, "k8s_secure_api_port = %s\n", k8sSecureAPIPort)
fmt.Fprintf(tfile, "kube_insecure_apiserver_address = %s\n", strconv.Quote(kubeInsecureApiserverAddress))
fmt.Fprintf(tfile, "default_tags = {\n")
fmt.Fprintf(tfile, "# Env = 'devtest'\n")
fmt.Fprintf(tfile, "# Product = 'kubernetes'\n")
fmt.Fprintf(tfile, "}")
//fmt.Println("Please enter your AWS access key ID")
//var awsAccessKeyID string
//fmt.Scanln(&awsAccessKeyID)
//fmt.Println("Please enter your AWS SECRET ACCESS KEY")
//var awsSecretKey string
//fmt.Scanln(&awsSecretKey)
//fmt.Println("Please enter your AWS SSH Key Name")
//var awsAccessSSHKey string
//fmt.Scanln(&awsAccessSSHKey)
//fmt.Println("Please enter your AWS Default Region")
//var awsDefaultRegion string
//fmt.Scanln(&awsDefaultRegion)
terrInit := exec.Command("terraform", "init")
terrInit.Dir = "./kubespray/contrib/terraform/aws/"
out, _ := terrInit.StdoutPipe()
terrInit.Start()
scanInit := bufio.NewScanner(out)
for scanInit.Scan() {
m := scanInit.Text()
fmt.Println(m)
//log.Printf(m)
}
terrInit.Wait()
terrSet := exec.Command("terraform", "apply", "-var-file=credentials.tfvars", "-auto-approve")
terrSet.Dir = "./kubespray/contrib/terraform/aws/"
stdout, err := terrSet.StdoutPipe()
terrSet.Stderr = terrSet.Stdout
terrSet.Start()
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
m := scanner.Text()
fmt.Println(m)
//log.Printf(m)
}
terrSet.Wait()
os.Exit(0)
}
if len(args) == 0 {
cmd.Help()
os.Exit(0)
}
},
}
func init() { |
clusterCmd.AddCommand(awsCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// awsCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// awsCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
awsCmd.Flags().BoolVarP(&install, "install", "i", false, "Install Kubernetes on the AWS infrastructure")
// Flags to initiate the terraform installation
awsCmd.Flags().BoolVarP(&create, "create", "c", false, "Deploy the AWS infrastructure using terraform")
// Flag to destroy the AWS infrastructure using terraform
awsCmd.Flags().BoolVarP(&destroy, "destroy", "d", false, "Destroy the AWS infrastructure")
}
| identifier_body |
|
aws.go | credentials.tfvars"); err == nil {
fmt.Println("Credentials file already exists, creation skipped")
} else {
fmt.Println("Please enter your AWS access key ID")
var awsAccessKeyID string
fmt.Scanln(&awsAccessKeyID)
fmt.Println("Please enter your AWS SECRET ACCESS KEY")
var awsSecretKey string
fmt.Scanln(&awsSecretKey)
fmt.Println("Please enter your AWS SSH Key Name")
var awsAccessSSHKey string
fmt.Scanln(&awsAccessSSHKey)
fmt.Println("Please enter your AWS Default Region")
var awsDefaultRegion string
fmt.Scanln(&awsDefaultRegion)
file, err := os.Create("./kubespray/contrib/terraform/aws/credentials.tfvars")
if err != nil {
log.Fatal("Cannot create file", err)
}
defer file.Close()
fmt.Fprintf(file, "AWS_ACCESS_KEY_ID = %s\n", awsAccessKeyID)
fmt.Fprintf(file, "AWS_SECRET_ACCESS_KEY = %s\n", awsSecretKey)
fmt.Fprintf(file, "AWS_SSH_KEY_NAME = %s\n", awsAccessSSHKey)
fmt.Fprintf(file, "AWS_DEFAULT_REGION = %s\n", awsDefaultRegion)
}
terrSet := exec.Command("terraform", "destroy", "-var-file=credentials.tfvars", "-force")
terrSet.Dir = "./kubespray/contrib/terraform/aws/"
stdout, _ := terrSet.StdoutPipe()
terrSet.Stderr = terrSet.Stdout
error := terrSet.Start()
if error != nil {
fmt.Println(error)
}
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
m := scanner.Text()
fmt.Println(m)
//log.Printf(m)
}
terrSet.Wait()
os.Exit(0)
}
if create {
// check if terraform is installed
terr, err := exec.LookPath("terraform")
if err != nil {
log.Fatal("Terraform command not found, kindly check")
}
fmt.Printf("Found terraform at %s\n", terr)
rr, err := exec.Command("terraform", "version").Output()
if err != nil {
log.Fatal(err)
}
fmt.Printf(string(rr))
// Check if credentials file exist, if it exists skip asking to input the AWS values
if _, err := os.Stat("./kubespray/contrib/terraform/aws/credentials.tfvars"); err == nil {
fmt.Println("Credentials file already exists, creation skipped")
} else {
//Read Configuration File
viper.SetConfigName("config")
viper.AddConfigPath(".")
viper.AddConfigPath("/tk8")
verr := viper.ReadInConfig() // Find and read the config file
if verr != nil { // Handle errors reading the config file
panic(fmt.Errorf("fatal error config file: %s", verr))
}
awsAccessKeyID := viper.GetString("aws.aws_access_key_id")
awsSecretKey := viper.GetString("aws.aws_secret_access_key")
awsAccessSSHKey := viper.GetString("aws.aws_ssh_keypair")
awsDefaultRegion := viper.GetString("aws.aws_default_region")
file, err := os.Create("./kubespray/contrib/terraform/aws/credentials.tfvars")
if err != nil {
log.Fatal("Cannot create file", err)
}
defer file.Close()
fmt.Fprintf(file, "AWS_ACCESS_KEY_ID = %s\n", strconv.Quote(awsAccessKeyID))
fmt.Fprintf(file, "AWS_SECRET_ACCESS_KEY = %s\n", strconv.Quote(awsSecretKey))
fmt.Fprintf(file, "AWS_SSH_KEY_NAME = %s\n", strconv.Quote(awsAccessSSHKey))
fmt.Fprintf(file, "AWS_DEFAULT_REGION = %s\n", strconv.Quote(awsDefaultRegion))
}
// Remove tftvars file
err = os.Remove("./kubespray/contrib/terraform/aws/terraform.tfvars")
if err != nil {
fmt.Println(err)
}
//Read Configuration File
viper.SetConfigName("config")
viper.AddConfigPath(".")
verr := viper.ReadInConfig() // Find and read the config file
if verr != nil { // Handle errors reading the config file
panic(fmt.Errorf("fatal error config file: %s", verr))
}
awsClusterName := viper.GetString("aws.clustername")
awsVpcCidrBlock := viper.GetString("aws.aws_vpc_cidr_block")
awsCidrSubnetsPrivate := viper.GetString("aws.aws_cidr_subnets_private")
awsCidrSubnetsPublic := viper.GetString("aws.aws_cidr_subnets_public")
awsBastionSize := viper.GetString("aws.aws_bastion_size")
awsKubeMasterNum := viper.GetString("aws.aws_kube_master_num")
awsKubeMasterSize := viper.GetString("aws.aws_kube_master_size")
awsEtcdNum := viper.GetString("aws.aws_etcd_num")
awsEtcdSize := viper.GetString("aws.aws_etcd_size")
awsKubeWorkerNum := viper.GetString("aws.aws_kube_worker_num")
awsKubeWorkerSize := viper.GetString("aws.aws_kube_worker_size")
awsElbAPIPort := viper.GetString("aws.aws_elb_api_port")
k8sSecureAPIPort := viper.GetString("aws.k8s_secure_api_port")
kubeInsecureApiserverAddress := viper.GetString("aws.")
tfile, err := os.Create("./kubespray/contrib/terraform/aws/terraform.tfvars")
if err != nil {
log.Fatal("Cannot create file", err)
}
defer tfile.Close()
fmt.Fprintf(tfile, "aws_cluster_name = %s\n", strconv.Quote(awsClusterName))
fmt.Fprintf(tfile, "aws_vpc_cidr_block = %s\n", strconv.Quote(awsVpcCidrBlock))
fmt.Fprintf(tfile, "aws_cidr_subnets_private = %s\n", awsCidrSubnetsPrivate)
fmt.Fprintf(tfile, "aws_cidr_subnets_public = %s\n", awsCidrSubnetsPublic)
fmt.Fprintf(tfile, "aws_bastion_size = %s\n", strconv.Quote(awsBastionSize))
fmt.Fprintf(tfile, "aws_kube_master_num = %s\n", awsKubeMasterNum)
fmt.Fprintf(tfile, "aws_kube_master_size = %s\n", strconv.Quote(awsKubeMasterSize))
fmt.Fprintf(tfile, "aws_etcd_num = %s\n", awsEtcdNum)
fmt.Fprintf(tfile, "aws_etcd_size = %s\n", strconv.Quote(awsEtcdSize))
fmt.Fprintf(tfile, "aws_kube_worker_num = %s\n", awsKubeWorkerNum)
fmt.Fprintf(tfile, "aws_kube_worker_size = %s\n", strconv.Quote(awsKubeWorkerSize))
fmt.Fprintf(tfile, "aws_elb_api_port = %s\n", awsElbAPIPort)
fmt.Fprintf(tfile, "k8s_secure_api_port = %s\n", k8sSecureAPIPort)
fmt.Fprintf(tfile, "kube_insecure_apiserver_address = %s\n", strconv.Quote(kubeInsecureApiserverAddress))
fmt.Fprintf(tfile, "default_tags = {\n")
fmt.Fprintf(tfile, "# Env = 'devtest'\n")
fmt.Fprintf(tfile, "# Product = 'kubernetes'\n")
fmt.Fprintf(tfile, "}")
//fmt.Println("Please enter your AWS access key ID")
//var awsAccessKeyID string
//fmt.Scanln(&awsAccessKeyID)
//fmt.Println("Please enter your AWS SECRET ACCESS KEY")
//var awsSecretKey string
//fmt.Scanln(&awsSecretKey)
//fmt.Println("Please enter your AWS SSH Key Name")
//var awsAccessSSHKey string
//fmt.Scanln(&awsAccessSSHKey)
//fmt.Println("Please enter your AWS Default Region")
//var awsDefaultRegion string
//fmt.Scanln(&awsDefaultRegion)
terrInit := exec.Command("terraform", "init")
terrInit.Dir = "./kubespray/contrib/terraform/aws/"
out, _ := terrInit.StdoutPipe()
terrInit.Start()
scanInit := bufio.NewScanner(out)
for scanInit.Scan() {
m := scanInit.Text()
fmt.Println(m)
//log.Printf(m)
}
terrInit.Wait()
terrSet := exec.Command("terraform", "apply", "-var-file=credentials.tfvars", "-auto-approve")
terrSet.Dir = "./kubespray/contrib/terraform/aws/"
stdout, err := terrSet.StdoutPipe()
terrSet.Stderr = terrSet.Stdout
terrSet.Start()
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
m := scanner.Text()
fmt.Println(m)
//log.Printf(m)
}
terrSet.Wait()
os.Exit(0)
}
if len(args) == 0 {
cmd.Help()
os.Exit(0)
}
},
}
func i | nit( | identifier_name |
|
raft.go | storage,
// where it can later be retrieved after a crash and restart.
// see paper's Figure 2 for a description of what should be persistent.
//
func (rf *Raft) persist() {
// Your code here.
// Example:
// w := new(bytes.Buffer)
// e := gob.NewEncoder(w)
// e.Encode(rf.xxx)
// e.Encode(rf.yyy)
// data := w.Bytes()
// rf.persister.SaveRaftState(data)
w := new(bytes.Buffer)
e := gob.NewEncoder(w)
e.Encode(rf.currentTerm)
e.Encode(rf.voteFor)
e.Encode(rf.log)
data := w.Bytes()
rf.persister.SaveRaftState(data)
}
//
// restore previously persisted state.
//
func (rf *Raft) readPersist(data []byte) {
// Your code here.
// Example:
// r := bytes.NewBuffer(data)
// d := gob.NewDecoder(r)
// d.Decode(&rf.xxx)
// d.Decode(&rf.yyy)
r := bytes.NewBuffer(data)
d := gob.NewDecoder(r)
d.Decode(&rf.currentTerm)
d.Decode(&rf.voteFor)
d.Decode(&rf.log)
}
// example RequestVote RPC arguments structure.
type RequestVoteArgs struct {
// Your data here.
Term int
CandidateId int
LastLogIndex int
LastLogTerm int
}
// example RequestVote RPC reply structure.
type RequestVoteReply struct {
// Your data here.
Term int
VoteGranted bool
}
type RequestAppendEntriesArgs struct {
Term int
LeaderId int
PrevLogIndex int
PrevLogTerm int
Entries []logEntries
LeaderCommitIndex int
/// prevLogIndex, CommitIndex.
}
type RequestAppendEntriesReply struct {
Term int
Success bool
NextIndex int // used to tell leader that follower's next empty log, or leader can decrease one each time.
}
//
// example RequestVote RPC handler.
//
// rpc request/response should check term to convert self to follower;
// should check peer's log info to vote peer.
func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {
//fmt.Printf("[::RequestVote]\n")
// Your code here.
rf.mtx.Lock()
defer rf.mtx.Unlock()
defer rf.persist()
reply.VoteGranted = false
// case 1: check term
if args.Term < rf.currentTerm {
reply.Term = rf.currentTerm
return
}
if args.Term > rf.currentTerm { // set term to max. and then maybe become leader.
rf.currentTerm = args.Term
rf.state = STATE_FOLLOWER
rf.voteFor = -1
}
reply.Term = rf.currentTerm
// case 2: check log
isNewer := false
if args.LastLogTerm == rf.log[len(rf.log)-1].Term {
isNewer = args.LastLogIndex >= rf.log[len(rf.log)-1].LogIndex
} else {
isNewer = args.LastLogTerm > rf.log[len(rf.log)-1].Term
}
if (rf.voteFor == -1 || rf.voteFor == args.CandidateId) && isNewer {
rf.chanVoteOther <- 1
rf.state = STATE_FOLLOWER
reply.VoteGranted = true
rf.voteFor = args.CandidateId
}
}
func (rf *Raft) RequestAppendEntries(args RequestAppendEntriesArgs, reply *RequestAppendEntriesReply) {
// Your code here.
// Q: should candidate append entries?
//fmt.Println("[::RequestAppendEntries]", args)
rf.mtx.Lock()
defer rf.mtx.Unlock()
defer rf.persist()
// case 1: check term
reply.Success = false
if args.Term < rf.currentTerm {
reply.Term = rf.currentTerm
reply.NextIndex = rf.log[len(rf.log)-1].LogIndex + 1
return
}
rf.chanHeartBeat <- 1
if args.Term > rf.currentTerm {
rf.currentTerm = args.Term
rf.state = STATE_FOLLOWER
rf.voteFor = -1
}
reply.Term = args.Term
// case 2: check log number
if args.PrevLogIndex > rf.log[len(rf.log)-1].LogIndex {
reply.NextIndex = rf.log[len(rf.log)-1].LogIndex + 1
return
}
// case 3: check log term. decrease one each time...
if args.PrevLogIndex > 0 {
term := rf.log[args.PrevLogIndex].Term
if args.PrevLogTerm != term {
for i := args.PrevLogIndex - 1; i >= 0; i-- {
if rf.log[i].Term != term {
reply.NextIndex = i + 1
break
}
}
return
}
}
// step4: success: copy the log.
if args.PrevLogIndex < 0 {
} else {
rf.log = rf.log[:args.PrevLogIndex+1]
rf.log = append(rf.log, args.Entries...)
reply.Success = true
reply.NextIndex = rf.log[len(rf.log)-1].LogIndex + 1
}
if args.LeaderCommitIndex > rf.commitIndex {
last := rf.log[len(rf.log)-1].LogIndex
if args.LeaderCommitIndex > last {
rf.commitIndex = last
} else {
rf.commitIndex = args.LeaderCommitIndex
}
rf.chanCommit <- 1
}
return
}
//
// example code to send a RequestVote RPC to a server.
// server is the index of the target server in rf.peers[].
// expects RPC arguments in args.
// fills in *reply with RPC reply, so caller should
// pass &reply.
// the types of the args and reply passed to Call() must be
// the same as the types of the arguments declared in the
// handler function (including whether they are pointers).
//
// returns true if labrpc says the RPC was delivered.
//
// if you're having trouble getting RPC to work, check that you've
// capitalized all field names in structs passed over RPC, and
// that the caller passes the address of the reply struct with &, not
// the struct itself.
//
func (rf *Raft) sendRequestVote(server int, args RequestVoteArgs, reply *RequestVoteReply) bool {
ok := rf.peers[server].Call("Raft.RequestVote", args, reply)
rf.mtx.Lock()
defer rf.mtx.Unlock()
if ok {
if rf.state != STATE_CANDIDATE {
return ok
}
if args.Term != rf.currentTerm { // consider the current term's reply
return ok
}
if reply.Term > rf.currentTerm {
rf.currentTerm = reply.Term
rf.state = STATE_FOLLOWER
rf.voteFor = -1
rf.persist()
}
if reply.VoteGranted {
rf.beenVotedCount++
if rf.state == STATE_CANDIDATE && rf.beenVotedCount > len(rf.peers)/2 {
rf.state = STATE_FOLLOWER // ...
rf.chanBecomeLeader <- 1
}
}
}
return ok
}
func (rf *Raft) sendAppendEntries(server int, args RequestAppendEntriesArgs, reply *RequestAppendEntriesReply) bool {
//fmt.Printf("[sendAppendEntries][who=%v][term=%v]\n", rf.me, args.Term)
ok := rf.peers[server].Call("Raft.RequestAppendEntries", args, reply)
rf.mtx.Lock()
defer rf.mtx.Unlock()
if ok {
if rf.state != STATE_LEADER {
return ok
}
if args.Term != rf.currentTerm {
return ok
}
if reply.Term > rf.currentTerm {
rf.currentTerm = reply.Term
rf.state = STATE_FOLLOWER
rf.voteFor = -1
rf.persist()
return ok
}
if reply.Success {
if len(args.Entries) > 0 {
rf.nextIndex[server] = args.Entries[len(args.Entries)-1].LogIndex + 1
rf.matchIndex[server] = rf.nextIndex[server] - 1
}
} else {
rf.nextIndex[server] = reply.NextIndex
}
}
return ok
}
func (rf *Raft) broadcastRequestVote() {
var args RequestVoteArgs
rf.mtx.Lock()
args.Term = rf.currentTerm
args.CandidateId = rf.me
args.LastLogTerm = rf.log[len(rf.log)-1].Term
args.LastLogIndex = rf.log[len(rf.log)-1].LogIndex
rf.mtx.Unlock()
//fmt.Printf("[broadcastRequestVote][Candidate = %v]\n", rf.me)
for i := range rf.peers | {
if i != rf.me && rf.state == STATE_CANDIDATE {
go func(i int) {
var reply RequestVoteReply
rf.sendRequestVote(i, args, &reply)
}(i)
}
} | conditional_block |
|
raft.go | (args RequestAppendEntriesArgs, reply *RequestAppendEntriesReply) {
// Your code here.
// Q: should candidate append entries?
//fmt.Println("[::RequestAppendEntries]", args)
rf.mtx.Lock()
defer rf.mtx.Unlock()
defer rf.persist()
// case 1: check term
reply.Success = false
if args.Term < rf.currentTerm {
reply.Term = rf.currentTerm
reply.NextIndex = rf.log[len(rf.log)-1].LogIndex + 1
return
}
rf.chanHeartBeat <- 1
if args.Term > rf.currentTerm {
rf.currentTerm = args.Term
rf.state = STATE_FOLLOWER
rf.voteFor = -1
}
reply.Term = args.Term
// case 2: check log number
if args.PrevLogIndex > rf.log[len(rf.log)-1].LogIndex {
reply.NextIndex = rf.log[len(rf.log)-1].LogIndex + 1
return
}
// case 3: check log term. decrease one each time...
if args.PrevLogIndex > 0 {
term := rf.log[args.PrevLogIndex].Term
if args.PrevLogTerm != term {
for i := args.PrevLogIndex - 1; i >= 0; i-- {
if rf.log[i].Term != term {
reply.NextIndex = i + 1
break
}
}
return
}
}
// step4: success: copy the log.
if args.PrevLogIndex < 0 {
} else {
rf.log = rf.log[:args.PrevLogIndex+1]
rf.log = append(rf.log, args.Entries...)
reply.Success = true
reply.NextIndex = rf.log[len(rf.log)-1].LogIndex + 1
}
if args.LeaderCommitIndex > rf.commitIndex {
last := rf.log[len(rf.log)-1].LogIndex
if args.LeaderCommitIndex > last {
rf.commitIndex = last
} else {
rf.commitIndex = args.LeaderCommitIndex
}
rf.chanCommit <- 1
}
return
}
//
// example code to send a RequestVote RPC to a server.
// server is the index of the target server in rf.peers[].
// expects RPC arguments in args.
// fills in *reply with RPC reply, so caller should
// pass &reply.
// the types of the args and reply passed to Call() must be
// the same as the types of the arguments declared in the
// handler function (including whether they are pointers).
//
// returns true if labrpc says the RPC was delivered.
//
// if you're having trouble getting RPC to work, check that you've
// capitalized all field names in structs passed over RPC, and
// that the caller passes the address of the reply struct with &, not
// the struct itself.
//
func (rf *Raft) sendRequestVote(server int, args RequestVoteArgs, reply *RequestVoteReply) bool {
ok := rf.peers[server].Call("Raft.RequestVote", args, reply)
rf.mtx.Lock()
defer rf.mtx.Unlock()
if ok {
if rf.state != STATE_CANDIDATE {
return ok
}
if args.Term != rf.currentTerm { // consider the current term's reply
return ok
}
if reply.Term > rf.currentTerm {
rf.currentTerm = reply.Term
rf.state = STATE_FOLLOWER
rf.voteFor = -1
rf.persist()
}
if reply.VoteGranted {
rf.beenVotedCount++
if rf.state == STATE_CANDIDATE && rf.beenVotedCount > len(rf.peers)/2 {
rf.state = STATE_FOLLOWER // ...
rf.chanBecomeLeader <- 1
}
}
}
return ok
}
func (rf *Raft) sendAppendEntries(server int, args RequestAppendEntriesArgs, reply *RequestAppendEntriesReply) bool {
//fmt.Printf("[sendAppendEntries][who=%v][term=%v]\n", rf.me, args.Term)
ok := rf.peers[server].Call("Raft.RequestAppendEntries", args, reply)
rf.mtx.Lock()
defer rf.mtx.Unlock()
if ok {
if rf.state != STATE_LEADER {
return ok
}
if args.Term != rf.currentTerm {
return ok
}
if reply.Term > rf.currentTerm {
rf.currentTerm = reply.Term
rf.state = STATE_FOLLOWER
rf.voteFor = -1
rf.persist()
return ok
}
if reply.Success {
if len(args.Entries) > 0 {
rf.nextIndex[server] = args.Entries[len(args.Entries)-1].LogIndex + 1
rf.matchIndex[server] = rf.nextIndex[server] - 1
}
} else {
rf.nextIndex[server] = reply.NextIndex
}
}
return ok
}
func (rf *Raft) broadcastRequestVote() {
var args RequestVoteArgs
rf.mtx.Lock()
args.Term = rf.currentTerm
args.CandidateId = rf.me
args.LastLogTerm = rf.log[len(rf.log)-1].Term
args.LastLogIndex = rf.log[len(rf.log)-1].LogIndex
rf.mtx.Unlock()
//fmt.Printf("[broadcastRequestVote][Candidate = %v]\n", rf.me)
for i := range rf.peers {
if i != rf.me && rf.state == STATE_CANDIDATE {
go func(i int) {
var reply RequestVoteReply
rf.sendRequestVote(i, args, &reply)
}(i)
}
}
}
func (rf *Raft) broadcastAppendEntries() {
//fmt.Printf("[::broadcastAppendEntries][Candidate = %v]", rf.me)
rf.mtx.Lock()
defer rf.mtx.Unlock()
N := rf.commitIndex
last := rf.log[len(rf.log)-1].LogIndex
// step1: iterate all commitLog
for i := rf.commitIndex + 1; i <= last; i++ {
num := 1
for j := range rf.peers {
if j != rf.me && rf.matchIndex[j] >= i && rf.log[i].Term == rf.currentTerm {
num++
}
}
// replicated in majority of node.
if 2*num > len(rf.peers) {
N = i
}
}
// step2: we can apply these logs.
if N != rf.commitIndex {
rf.commitIndex = N
rf.chanCommit <- 1 /// majority of nodes have commited, then we can move applyIndex //
}
for i := range rf.peers {
if i != rf.me && rf.state == STATE_LEADER {
if rf.nextIndex[i] > 0 {
// step3: nextIndex[node i] until the end.
var args RequestAppendEntriesArgs
args.Term = rf.currentTerm
args.LeaderId = rf.me
args.PrevLogIndex = rf.nextIndex[i] - 1
args.PrevLogTerm = rf.log[args.PrevLogIndex].Term
args.Entries = make([]logEntries, len(rf.log[args.PrevLogIndex+1:]))
copy(args.Entries, rf.log[args.PrevLogIndex+1:])
args.LeaderCommitIndex = rf.commitIndex
go func(i int, args RequestAppendEntriesArgs) {
var reply RequestAppendEntriesReply
rf.sendAppendEntries(i, args, &reply)
}(i, args)
}
}
}
}
//
// the service using Raft (e.g. a k/v server) wants to start
// agreement on the next command to be appended to Raft's log. if this
// server isn't the leader, returns false. otherwise start the
// agreement and return immediately. there is no guarantee that this
// command will ever be committed to the Raft log, since the leader
// may fail or lose an election.
//
// the first return value is the index that the command will appear at
// if it's ever committed. the second return value is the current
// term. the third return value is true if this server believes it is
// the leader.
//
func (rf *Raft) Start(command interface{}) (int, int, bool) {
rf.mtx.Lock()
defer rf.mtx.Unlock()
index := -1
term := rf.currentTerm
isLeader := rf.state == STATE_LEADER
if isLeader {
index = rf.log[len(rf.log)-1].LogIndex + 1
rf.log = append(rf.log, logEntries{Term: term, Log: command, LogIndex: index}) // append new entry from client
rf.persist()
}
return index, term, isLeader
}
//
// the tester calls Kill() when a Raft instance won't
// be needed again. you are not required to do anything
// in Kill(), but it might be convenient to (for example)
// turn off debug output from this instance.
//
func (rf *Raft) Kill() | {
// Your code here, if desired.
} | identifier_body |
|
raft.go | , rf.state == STATE_LEADER
}
//
// save Raft's persistent state to stable storage,
// where it can later be retrieved after a crash and restart.
// see paper's Figure 2 for a description of what should be persistent.
//
func (rf *Raft) persist() {
// Your code here.
// Example:
// w := new(bytes.Buffer)
// e := gob.NewEncoder(w)
// e.Encode(rf.xxx)
// e.Encode(rf.yyy)
// data := w.Bytes()
// rf.persister.SaveRaftState(data)
w := new(bytes.Buffer)
e := gob.NewEncoder(w)
e.Encode(rf.currentTerm)
e.Encode(rf.voteFor)
e.Encode(rf.log)
data := w.Bytes()
rf.persister.SaveRaftState(data)
}
//
// restore previously persisted state.
//
func (rf *Raft) readPersist(data []byte) {
// Your code here.
// Example:
// r := bytes.NewBuffer(data)
// d := gob.NewDecoder(r)
// d.Decode(&rf.xxx)
// d.Decode(&rf.yyy)
r := bytes.NewBuffer(data)
d := gob.NewDecoder(r)
d.Decode(&rf.currentTerm)
d.Decode(&rf.voteFor)
d.Decode(&rf.log)
}
// example RequestVote RPC arguments structure.
type RequestVoteArgs struct {
// Your data here.
Term int
CandidateId int
LastLogIndex int
LastLogTerm int
}
// example RequestVote RPC reply structure.
type RequestVoteReply struct {
// Your data here.
Term int
VoteGranted bool
}
type RequestAppendEntriesArgs struct {
Term int
LeaderId int
PrevLogIndex int
PrevLogTerm int
Entries []logEntries
LeaderCommitIndex int
/// prevLogIndex, CommitIndex.
}
type RequestAppendEntriesReply struct {
Term int
Success bool
NextIndex int // used to tell leader that follower's next empty log, or leader can decrease one each time.
}
//
// example RequestVote RPC handler.
//
// rpc request/response should check term to convert self to follower;
// should check peer's log info to vote peer.
func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {
//fmt.Printf("[::RequestVote]\n")
// Your code here.
rf.mtx.Lock()
defer rf.mtx.Unlock()
defer rf.persist()
reply.VoteGranted = false
// case 1: check term
if args.Term < rf.currentTerm {
reply.Term = rf.currentTerm
return
}
if args.Term > rf.currentTerm { // set term to max. and then maybe become leader.
rf.currentTerm = args.Term
rf.state = STATE_FOLLOWER
rf.voteFor = -1
}
reply.Term = rf.currentTerm
// case 2: check log
isNewer := false
if args.LastLogTerm == rf.log[len(rf.log)-1].Term {
isNewer = args.LastLogIndex >= rf.log[len(rf.log)-1].LogIndex
} else {
isNewer = args.LastLogTerm > rf.log[len(rf.log)-1].Term
}
| if (rf.voteFor == -1 || rf.voteFor == args.CandidateId) && isNewer {
rf.chanVoteOther <- 1
rf.state = STATE_FOLLOWER
reply.VoteGranted = true
rf.voteFor = args.CandidateId
}
}
func (rf *Raft) RequestAppendEntries(args RequestAppendEntriesArgs, reply *RequestAppendEntriesReply) {
// Your code here.
// Q: should candidate append entries?
//fmt.Println("[::RequestAppendEntries]", args)
rf.mtx.Lock()
defer rf.mtx.Unlock()
defer rf.persist()
// case 1: check term
reply.Success = false
if args.Term < rf.currentTerm {
reply.Term = rf.currentTerm
reply.NextIndex = rf.log[len(rf.log)-1].LogIndex + 1
return
}
rf.chanHeartBeat <- 1
if args.Term > rf.currentTerm {
rf.currentTerm = args.Term
rf.state = STATE_FOLLOWER
rf.voteFor = -1
}
reply.Term = args.Term
// case 2: check log number
if args.PrevLogIndex > rf.log[len(rf.log)-1].LogIndex {
reply.NextIndex = rf.log[len(rf.log)-1].LogIndex + 1
return
}
// case 3: check log term. decrease one each time...
if args.PrevLogIndex > 0 {
term := rf.log[args.PrevLogIndex].Term
if args.PrevLogTerm != term {
for i := args.PrevLogIndex - 1; i >= 0; i-- {
if rf.log[i].Term != term {
reply.NextIndex = i + 1
break
}
}
return
}
}
// step4: success: copy the log.
if args.PrevLogIndex < 0 {
} else {
rf.log = rf.log[:args.PrevLogIndex+1]
rf.log = append(rf.log, args.Entries...)
reply.Success = true
reply.NextIndex = rf.log[len(rf.log)-1].LogIndex + 1
}
if args.LeaderCommitIndex > rf.commitIndex {
last := rf.log[len(rf.log)-1].LogIndex
if args.LeaderCommitIndex > last {
rf.commitIndex = last
} else {
rf.commitIndex = args.LeaderCommitIndex
}
rf.chanCommit <- 1
}
return
}
//
// example code to send a RequestVote RPC to a server.
// server is the index of the target server in rf.peers[].
// expects RPC arguments in args.
// fills in *reply with RPC reply, so caller should
// pass &reply.
// the types of the args and reply passed to Call() must be
// the same as the types of the arguments declared in the
// handler function (including whether they are pointers).
//
// returns true if labrpc says the RPC was delivered.
//
// if you're having trouble getting RPC to work, check that you've
// capitalized all field names in structs passed over RPC, and
// that the caller passes the address of the reply struct with &, not
// the struct itself.
//
func (rf *Raft) sendRequestVote(server int, args RequestVoteArgs, reply *RequestVoteReply) bool {
ok := rf.peers[server].Call("Raft.RequestVote", args, reply)
rf.mtx.Lock()
defer rf.mtx.Unlock()
if ok {
if rf.state != STATE_CANDIDATE {
return ok
}
if args.Term != rf.currentTerm { // consider the current term's reply
return ok
}
if reply.Term > rf.currentTerm {
rf.currentTerm = reply.Term
rf.state = STATE_FOLLOWER
rf.voteFor = -1
rf.persist()
}
if reply.VoteGranted {
rf.beenVotedCount++
if rf.state == STATE_CANDIDATE && rf.beenVotedCount > len(rf.peers)/2 {
rf.state = STATE_FOLLOWER // ...
rf.chanBecomeLeader <- 1
}
}
}
return ok
}
func (rf *Raft) sendAppendEntries(server int, args RequestAppendEntriesArgs, reply *RequestAppendEntriesReply) bool {
//fmt.Printf("[sendAppendEntries][who=%v][term=%v]\n", rf.me, args.Term)
ok := rf.peers[server].Call("Raft.RequestAppendEntries", args, reply)
rf.mtx.Lock()
defer rf.mtx.Unlock()
if ok {
if rf.state != STATE_LEADER {
return ok
}
if args.Term != rf.currentTerm {
return ok
}
if reply.Term > rf.currentTerm {
rf.currentTerm = reply.Term
rf.state = STATE_FOLLOWER
rf.voteFor = -1
rf.persist()
return ok
}
if reply.Success {
if len(args.Entries) > 0 {
rf.nextIndex[server] = args.Entries[len(args.Entries)-1].LogIndex + 1
rf.matchIndex[server] = rf.nextIndex[server] - 1
}
} else {
rf.nextIndex[server] = reply.NextIndex
}
}
return ok
}
func (rf *Raft) broadcastRequestVote() {
var args RequestVoteArgs
rf.mtx.Lock()
args.Term = rf.currentTerm
args.CandidateId = rf.me
args.LastLogTerm = rf.log[len(rf.log)-1].Term
args.LastLogIndex = rf.log[len(rf.log)-1].LogIndex
rf.mtx.Unlock()
//fmt.Printf("[broadcastRequestVote][Candidate = %v]\n", rf.me)
for i := range rf.peers {
if i != rf.me && rf.state == STATE_CANDIDATE {
go func(i int) {
var reply RequestVoteReply
rf.sendRequest | random_line_split |
|
raft.go | crash and restart.
// see paper's Figure 2 for a description of what should be persistent.
//
func (rf *Raft) persist() {
// Your code here.
// Example:
// w := new(bytes.Buffer)
// e := gob.NewEncoder(w)
// e.Encode(rf.xxx)
// e.Encode(rf.yyy)
// data := w.Bytes()
// rf.persister.SaveRaftState(data)
w := new(bytes.Buffer)
e := gob.NewEncoder(w)
e.Encode(rf.currentTerm)
e.Encode(rf.voteFor)
e.Encode(rf.log)
data := w.Bytes()
rf.persister.SaveRaftState(data)
}
//
// restore previously persisted state.
//
func (rf *Raft) readPersist(data []byte) {
// Your code here.
// Example:
// r := bytes.NewBuffer(data)
// d := gob.NewDecoder(r)
// d.Decode(&rf.xxx)
// d.Decode(&rf.yyy)
r := bytes.NewBuffer(data)
d := gob.NewDecoder(r)
d.Decode(&rf.currentTerm)
d.Decode(&rf.voteFor)
d.Decode(&rf.log)
}
// example RequestVote RPC arguments structure.
type RequestVoteArgs struct {
// Your data here.
Term int
CandidateId int
LastLogIndex int
LastLogTerm int
}
// example RequestVote RPC reply structure.
type RequestVoteReply struct {
// Your data here.
Term int
VoteGranted bool
}
type RequestAppendEntriesArgs struct {
Term int
LeaderId int
PrevLogIndex int
PrevLogTerm int
Entries []logEntries
LeaderCommitIndex int
/// prevLogIndex, CommitIndex.
}
type RequestAppendEntriesReply struct {
Term int
Success bool
NextIndex int // used to tell leader that follower's next empty log, or leader can decrease one each time.
}
//
// example RequestVote RPC handler.
//
// rpc request/response should check term to convert self to follower;
// should check peer's log info to vote peer.
func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) {
//fmt.Printf("[::RequestVote]\n")
// Your code here.
rf.mtx.Lock()
defer rf.mtx.Unlock()
defer rf.persist()
reply.VoteGranted = false
// case 1: check term
if args.Term < rf.currentTerm {
reply.Term = rf.currentTerm
return
}
if args.Term > rf.currentTerm { // set term to max. and then maybe become leader.
rf.currentTerm = args.Term
rf.state = STATE_FOLLOWER
rf.voteFor = -1
}
reply.Term = rf.currentTerm
// case 2: check log
isNewer := false
if args.LastLogTerm == rf.log[len(rf.log)-1].Term {
isNewer = args.LastLogIndex >= rf.log[len(rf.log)-1].LogIndex
} else {
isNewer = args.LastLogTerm > rf.log[len(rf.log)-1].Term
}
if (rf.voteFor == -1 || rf.voteFor == args.CandidateId) && isNewer {
rf.chanVoteOther <- 1
rf.state = STATE_FOLLOWER
reply.VoteGranted = true
rf.voteFor = args.CandidateId
}
}
func (rf *Raft) RequestAppendEntries(args RequestAppendEntriesArgs, reply *RequestAppendEntriesReply) {
// Your code here.
// Q: should candidate append entries?
//fmt.Println("[::RequestAppendEntries]", args)
rf.mtx.Lock()
defer rf.mtx.Unlock()
defer rf.persist()
// case 1: check term
reply.Success = false
if args.Term < rf.currentTerm {
reply.Term = rf.currentTerm
reply.NextIndex = rf.log[len(rf.log)-1].LogIndex + 1
return
}
rf.chanHeartBeat <- 1
if args.Term > rf.currentTerm {
rf.currentTerm = args.Term
rf.state = STATE_FOLLOWER
rf.voteFor = -1
}
reply.Term = args.Term
// case 2: check log number
if args.PrevLogIndex > rf.log[len(rf.log)-1].LogIndex {
reply.NextIndex = rf.log[len(rf.log)-1].LogIndex + 1
return
}
// case 3: check log term. decrease one each time...
if args.PrevLogIndex > 0 {
term := rf.log[args.PrevLogIndex].Term
if args.PrevLogTerm != term {
for i := args.PrevLogIndex - 1; i >= 0; i-- {
if rf.log[i].Term != term {
reply.NextIndex = i + 1
break
}
}
return
}
}
// step4: success: copy the log.
if args.PrevLogIndex < 0 {
} else {
rf.log = rf.log[:args.PrevLogIndex+1]
rf.log = append(rf.log, args.Entries...)
reply.Success = true
reply.NextIndex = rf.log[len(rf.log)-1].LogIndex + 1
}
if args.LeaderCommitIndex > rf.commitIndex {
last := rf.log[len(rf.log)-1].LogIndex
if args.LeaderCommitIndex > last {
rf.commitIndex = last
} else {
rf.commitIndex = args.LeaderCommitIndex
}
rf.chanCommit <- 1
}
return
}
//
// example code to send a RequestVote RPC to a server.
// server is the index of the target server in rf.peers[].
// expects RPC arguments in args.
// fills in *reply with RPC reply, so caller should
// pass &reply.
// the types of the args and reply passed to Call() must be
// the same as the types of the arguments declared in the
// handler function (including whether they are pointers).
//
// returns true if labrpc says the RPC was delivered.
//
// if you're having trouble getting RPC to work, check that you've
// capitalized all field names in structs passed over RPC, and
// that the caller passes the address of the reply struct with &, not
// the struct itself.
//
func (rf *Raft) sendRequestVote(server int, args RequestVoteArgs, reply *RequestVoteReply) bool {
ok := rf.peers[server].Call("Raft.RequestVote", args, reply)
rf.mtx.Lock()
defer rf.mtx.Unlock()
if ok {
if rf.state != STATE_CANDIDATE {
return ok
}
if args.Term != rf.currentTerm { // consider the current term's reply
return ok
}
if reply.Term > rf.currentTerm {
rf.currentTerm = reply.Term
rf.state = STATE_FOLLOWER
rf.voteFor = -1
rf.persist()
}
if reply.VoteGranted {
rf.beenVotedCount++
if rf.state == STATE_CANDIDATE && rf.beenVotedCount > len(rf.peers)/2 {
rf.state = STATE_FOLLOWER // ...
rf.chanBecomeLeader <- 1
}
}
}
return ok
}
func (rf *Raft) sendAppendEntries(server int, args RequestAppendEntriesArgs, reply *RequestAppendEntriesReply) bool {
//fmt.Printf("[sendAppendEntries][who=%v][term=%v]\n", rf.me, args.Term)
ok := rf.peers[server].Call("Raft.RequestAppendEntries", args, reply)
rf.mtx.Lock()
defer rf.mtx.Unlock()
if ok {
if rf.state != STATE_LEADER {
return ok
}
if args.Term != rf.currentTerm {
return ok
}
if reply.Term > rf.currentTerm {
rf.currentTerm = reply.Term
rf.state = STATE_FOLLOWER
rf.voteFor = -1
rf.persist()
return ok
}
if reply.Success {
if len(args.Entries) > 0 {
rf.nextIndex[server] = args.Entries[len(args.Entries)-1].LogIndex + 1
rf.matchIndex[server] = rf.nextIndex[server] - 1
}
} else {
rf.nextIndex[server] = reply.NextIndex
}
}
return ok
}
func (rf *Raft) broadcastRequestVote() {
var args RequestVoteArgs
rf.mtx.Lock()
args.Term = rf.currentTerm
args.CandidateId = rf.me
args.LastLogTerm = rf.log[len(rf.log)-1].Term
args.LastLogIndex = rf.log[len(rf.log)-1].LogIndex
rf.mtx.Unlock()
//fmt.Printf("[broadcastRequestVote][Candidate = %v]\n", rf.me)
for i := range rf.peers {
if i != rf.me && rf.state == STATE_CANDIDATE {
go func(i int) {
var reply RequestVoteReply
rf.sendRequestVote(i, args, &reply)
}(i)
}
}
}
func (rf *Raft) | broadcastAppendEntries | identifier_name |
|
mod.rs | pub struct VirtualMethod<'ast> {
pub sig: FnSig<'ast>,
pub body: Option<&'ast Block>,
}
pub struct FnSig<'ast> {
pub name: Ident,
pub inputs: Vec<FnArg<'ast>>,
pub output: Ty<'ast>,
}
pub enum FnArg<'ast> {
SelfRef(Token!(&), Token!(self)),
Arg {
mutbl: Option<Token![mut]>,
name: Ident,
ty: Ty<'ast>,
}
}
pub struct Signal {
// FIXME: signal flags
}
pub enum Ty<'ast> {
Unit,
Char(Ident),
Bool(Ident),
Borrowed(Box<Ty<'ast>>),
Integer(Ident),
Owned(&'ast syn::Path),
}
impl<'ast> Program<'ast> {
pub fn from_ast_program(ast: &'ast ast::Program) -> Result<Program<'ast>> {
check_program(ast)?;
let mut classes = Classes::new();
for class in ast.classes() {
classes.add(class)?;
}
for impl_ in ast.impls() {
classes.add_impl(impl_)?;
}
Ok(Program {
classes: classes,
})
}
}
impl<'ast> Classes<'ast> {
fn new() -> Classes<'ast> {
Classes {
items: HashMap::new(),
}
}
pub fn len(&self) -> usize {
self.items.len()
}
pub fn get(&self, name: &str) -> &Class {
self.items.iter().find(|c| c.1.name == name).unwrap().1
}
fn add(&mut self, ast_class: &'ast ast::Class) -> Result<()>
{
let prev = self.items.insert(ast_class.name, Class {
name: ast_class.name,
gobject_parent: ast_class.extends.is_none(),
parent: tokens_ParentInstance(ast_class),
parent_ffi: tokens_ParentInstanceFfi(ast_class),
parent_class_ffi: tokens_ParentClassFfi(ast_class),
implements: Vec::new(),
instance_private: ast_class.items.iter().filter_map(|i| {
match *i {
ast::ClassItem::InstancePrivate(ref ip) => Some(&ip.path),
}
}).next(),
slots: Vec::new(),
overrides: HashMap::new(),
});
if prev.is_some() {
bail!("redefinition of class `{}`", ast_class.name);
}
Ok(())
}
fn add_impl(&mut self, impl_: &'ast ast::Impl) -> Result<()> {
let class = match self.items.get_mut(&impl_.self_path) {
Some(class) => class,
None => bail!("impl for class that doesn't exist: {}", impl_.self_path),
};
match impl_.trait_ {
Some(parent_class) => {
for item in impl_.items.iter() {
let item = match item.node {
ast::ImplItemKind::Method(ref m) => m,
ast::ImplItemKind::ReserveSlots(_) => {
bail!("can't reserve slots in a parent class impl");
}
};
if item.signal {
bail!("can't implement signals for parent classes")
}
if !item.virtual_ {
bail!("can only implement virtual functions for parent classes")
}
if item.public {
bail!("overrides are always public, no `pub` needed")
}
let method = match class.translate_method(item)? {
Slot::VirtualMethod(VirtualMethod { sig, body: Some(body) }) => {
Method { public: false, sig, body }
}
Slot::VirtualMethod(VirtualMethod { .. }) => {
bail!("overrides must provide a body for virtual \
methods");
}
_ => unreachable!(),
};
class.overrides
.entry(parent_class)
.or_insert(Vec::new())
.push(method);
}
}
None => {
for item in impl_.items.iter() {
let slot = class.translate_slot(item)?;
class.slots.push(slot);
}
}
}
Ok(())
}
pub fn iter<'a>(&'a self) -> impl Iterator<Item = &'a Class> + 'a {
self.items.values()
}
}
impl<'ast> Class<'ast> {
fn translate_slot(&mut self, item: &'ast ast::ImplItem) -> Result<Slot<'ast>> {
assert_eq!(item.attrs.len(), 0); // attributes unimplemented
match item.node {
ast::ImplItemKind::Method(ref method) => self.translate_method(method),
ast::ImplItemKind::ReserveSlots(ref _slots) => {
panic!("reserve slots not implemented");
}
}
}
fn translate_method(&mut self, method: &'ast ast::ImplItemMethod)
-> Result<Slot<'ast>>
{
if method.signal {
panic!("signals not implemented");
}
if method.virtual_ {
if method.public {
bail!("function `{}` is virtual so it doesn't need to be public",
method.name)
}
let sig = self.extract_sig(method)?;
Ok(Slot::VirtualMethod(VirtualMethod {
sig,
body: method.body.as_ref(),
}))
} else {
let sig = self.extract_sig(method)?;
Ok(Slot::Method(Method {
sig,
public: method.public,
body: method.body.as_ref().ok_or_else(|| {
format!("function `{}` requires a body", method.name)
})?,
}))
}
}
fn extract_sig(&mut self, method: &'ast ast::ImplItemMethod) -> Result<FnSig<'ast>> {
Ok(FnSig {
output: self.extract_output(&method.output)?,
inputs: self.extract_inputs(&method.inputs)?,
name: method.name,
})
}
fn extract_output(&mut self, output: &'ast ReturnType) -> Result<Ty<'ast>> |
fn extract_inputs(&mut self, punc: &'ast Punctuated<syn::FnArg, Token!(,)>) -> Result<Vec<FnArg<'ast>>> {
punc.iter().map(|arg| {
match *arg {
syn::FnArg::Captured(syn::ArgCaptured { ref pat, ref ty, .. }) => {
let (name, mutbl) = match *pat {
syn::Pat::Ident(syn::PatIdent {
by_ref: None,
mutability: m,
ident,
subpat: None,
}) => {
(ident, m)
}
_ => bail!("only bare identifiers are allowed as \
argument patterns"),
};
Ok(FnArg::Arg {
mutbl,
name,
ty: self.extract_ty(ty)?,
})
}
syn::FnArg::SelfRef(syn::ArgSelfRef {
and_token,
lifetime: None,
mutability: None,
self_token,
}) => {
Ok(FnArg::SelfRef(and_token, self_token))
}
syn::FnArg::SelfRef(syn::ArgSelfRef {
mutability: Some(..),
..
}) => {
bail!("&mut self not implemented yet")
}
syn::FnArg::SelfRef(syn::ArgSelfRef {
lifetime: Some(..),
..
}) => {
bail!("lifetime arguments on self not implemented yet")
}
syn::FnArg::SelfValue(_) => bail!("by-value self not implemented"),
syn::FnArg::Inferred(_) => bail!("cannot have inferred function arguments"),
syn::FnArg::Ignored(_) => bail!("cannot have ignored function arguments"),
}
}).collect()
}
fn extract_ty(&mut self, t: &'ast syn::Type) -> Result<Ty<'ast>> {
match *t {
syn::Type::Slice(_) => bail!("slice types not implemented yet"),
syn::Type::Array(_) => bail!("array types not implemented yet"),
syn::Type::Ptr(_) => bail!("ptr types not implemented yet"),
syn::Type::Reference(syn::TypeReference { lifetime: Some(_), .. }) => {
bail!("borrowed types with lifetimes not implemented yet")
}
syn::Type::Reference(syn::TypeReference { lifetime: None, ref elem, ref mutability, .. }) => {
if let Some(_) = *mutability {
bail!("mutable borrowed pointers not implemented");
}
let path = match **elem {
syn::Type::Path(syn::TypePath { qself: None, ref path }) => path,
_ => bail!("only borrowed pointers to paths supported"),
};
let ty = self.extract_ty_path(path)?;
Ok(Ty::Borrowed(Box::new(ty)))
}
syn::Type::BareFn(_) => bail!("function pointer types not implemented yet"),
syn::Type::Never(_) => bail!("never not implemented yet"),
syn::Type::Tuple(syn::TypeTuple { ref elems, .. }) => {
if elems.len() == 0 {
Ok(Ty::Unit)
} else {
bail!("tuple types not implemented yet")
}
}
syn::Type::Path(syn::TypePath { qself: Some(_), .. }) => {
bail!("path types with qualified self (`as` syntax) not | {
match *output {
ReturnType::Type(_, ref boxt) => self.extract_ty(boxt),
ReturnType::Default => Ok(Ty::Unit),
}
} | identifier_body |
mod.rs | pub struct VirtualMethod<'ast> {
pub sig: FnSig<'ast>,
pub body: Option<&'ast Block>,
}
pub struct FnSig<'ast> {
pub name: Ident,
pub inputs: Vec<FnArg<'ast>>,
pub output: Ty<'ast>,
}
pub enum FnArg<'ast> {
SelfRef(Token!(&), Token!(self)),
Arg {
mutbl: Option<Token![mut]>,
name: Ident,
ty: Ty<'ast>,
}
}
pub struct Signal {
// FIXME: signal flags
}
pub enum Ty<'ast> {
Unit,
Char(Ident),
Bool(Ident),
Borrowed(Box<Ty<'ast>>),
Integer(Ident),
Owned(&'ast syn::Path),
}
impl<'ast> Program<'ast> {
pub fn from_ast_program(ast: &'ast ast::Program) -> Result<Program<'ast>> {
check_program(ast)?;
let mut classes = Classes::new();
for class in ast.classes() {
classes.add(class)?;
}
for impl_ in ast.impls() {
classes.add_impl(impl_)?;
}
Ok(Program {
classes: classes,
})
}
}
impl<'ast> Classes<'ast> {
fn new() -> Classes<'ast> {
Classes {
items: HashMap::new(),
}
}
pub fn len(&self) -> usize {
self.items.len()
}
pub fn get(&self, name: &str) -> &Class {
self.items.iter().find(|c| c.1.name == name).unwrap().1
}
fn add(&mut self, ast_class: &'ast ast::Class) -> Result<()>
{
let prev = self.items.insert(ast_class.name, Class {
name: ast_class.name,
gobject_parent: ast_class.extends.is_none(),
parent: tokens_ParentInstance(ast_class),
parent_ffi: tokens_ParentInstanceFfi(ast_class),
parent_class_ffi: tokens_ParentClassFfi(ast_class),
implements: Vec::new(),
instance_private: ast_class.items.iter().filter_map(|i| {
match *i {
ast::ClassItem::InstancePrivate(ref ip) => Some(&ip.path),
}
}).next(),
slots: Vec::new(),
overrides: HashMap::new(),
});
if prev.is_some() {
bail!("redefinition of class `{}`", ast_class.name);
}
Ok(())
}
fn add_impl(&mut self, impl_: &'ast ast::Impl) -> Result<()> {
let class = match self.items.get_mut(&impl_.self_path) {
Some(class) => class,
None => bail!("impl for class that doesn't exist: {}", impl_.self_path),
};
match impl_.trait_ {
Some(parent_class) => {
for item in impl_.items.iter() {
let item = match item.node {
ast::ImplItemKind::Method(ref m) => m,
ast::ImplItemKind::ReserveSlots(_) => {
bail!("can't reserve slots in a parent class impl");
}
};
if item.signal {
bail!("can't implement signals for parent classes")
}
if !item.virtual_ {
bail!("can only implement virtual functions for parent classes")
}
if item.public {
bail!("overrides are always public, no `pub` needed")
}
let method = match class.translate_method(item)? {
Slot::VirtualMethod(VirtualMethod { sig, body: Some(body) }) => {
Method { public: false, sig, body }
}
Slot::VirtualMethod(VirtualMethod { .. }) => {
bail!("overrides must provide a body for virtual \
methods");
}
_ => unreachable!(),
};
class.overrides
.entry(parent_class)
.or_insert(Vec::new())
.push(method);
}
}
None => {
for item in impl_.items.iter() {
let slot = class.translate_slot(item)?;
class.slots.push(slot); | }
}
}
Ok(())
}
pub fn iter<'a>(&'a self) -> impl Iterator<Item = &'a Class> + 'a {
self.items.values()
}
}
impl<'ast> Class<'ast> {
fn translate_slot(&mut self, item: &'ast ast::ImplItem) -> Result<Slot<'ast>> {
assert_eq!(item.attrs.len(), 0); // attributes unimplemented
match item.node {
ast::ImplItemKind::Method(ref method) => self.translate_method(method),
ast::ImplItemKind::ReserveSlots(ref _slots) => {
panic!("reserve slots not implemented");
}
}
}
fn translate_method(&mut self, method: &'ast ast::ImplItemMethod)
-> Result<Slot<'ast>>
{
if method.signal {
panic!("signals not implemented");
}
if method.virtual_ {
if method.public {
bail!("function `{}` is virtual so it doesn't need to be public",
method.name)
}
let sig = self.extract_sig(method)?;
Ok(Slot::VirtualMethod(VirtualMethod {
sig,
body: method.body.as_ref(),
}))
} else {
let sig = self.extract_sig(method)?;
Ok(Slot::Method(Method {
sig,
public: method.public,
body: method.body.as_ref().ok_or_else(|| {
format!("function `{}` requires a body", method.name)
})?,
}))
}
}
fn extract_sig(&mut self, method: &'ast ast::ImplItemMethod) -> Result<FnSig<'ast>> {
Ok(FnSig {
output: self.extract_output(&method.output)?,
inputs: self.extract_inputs(&method.inputs)?,
name: method.name,
})
}
fn extract_output(&mut self, output: &'ast ReturnType) -> Result<Ty<'ast>> {
match *output {
ReturnType::Type(_, ref boxt) => self.extract_ty(boxt),
ReturnType::Default => Ok(Ty::Unit),
}
}
fn extract_inputs(&mut self, punc: &'ast Punctuated<syn::FnArg, Token!(,)>) -> Result<Vec<FnArg<'ast>>> {
punc.iter().map(|arg| {
match *arg {
syn::FnArg::Captured(syn::ArgCaptured { ref pat, ref ty, .. }) => {
let (name, mutbl) = match *pat {
syn::Pat::Ident(syn::PatIdent {
by_ref: None,
mutability: m,
ident,
subpat: None,
}) => {
(ident, m)
}
_ => bail!("only bare identifiers are allowed as \
argument patterns"),
};
Ok(FnArg::Arg {
mutbl,
name,
ty: self.extract_ty(ty)?,
})
}
syn::FnArg::SelfRef(syn::ArgSelfRef {
and_token,
lifetime: None,
mutability: None,
self_token,
}) => {
Ok(FnArg::SelfRef(and_token, self_token))
}
syn::FnArg::SelfRef(syn::ArgSelfRef {
mutability: Some(..),
..
}) => {
bail!("&mut self not implemented yet")
}
syn::FnArg::SelfRef(syn::ArgSelfRef {
lifetime: Some(..),
..
}) => {
bail!("lifetime arguments on self not implemented yet")
}
syn::FnArg::SelfValue(_) => bail!("by-value self not implemented"),
syn::FnArg::Inferred(_) => bail!("cannot have inferred function arguments"),
syn::FnArg::Ignored(_) => bail!("cannot have ignored function arguments"),
}
}).collect()
}
fn extract_ty(&mut self, t: &'ast syn::Type) -> Result<Ty<'ast>> {
match *t {
syn::Type::Slice(_) => bail!("slice types not implemented yet"),
syn::Type::Array(_) => bail!("array types not implemented yet"),
syn::Type::Ptr(_) => bail!("ptr types not implemented yet"),
syn::Type::Reference(syn::TypeReference { lifetime: Some(_), .. }) => {
bail!("borrowed types with lifetimes not implemented yet")
}
syn::Type::Reference(syn::TypeReference { lifetime: None, ref elem, ref mutability, .. }) => {
if let Some(_) = *mutability {
bail!("mutable borrowed pointers not implemented");
}
let path = match **elem {
syn::Type::Path(syn::TypePath { qself: None, ref path }) => path,
_ => bail!("only borrowed pointers to paths supported"),
};
let ty = self.extract_ty_path(path)?;
Ok(Ty::Borrowed(Box::new(ty)))
}
syn::Type::BareFn(_) => bail!("function pointer types not implemented yet"),
syn::Type::Never(_) => bail!("never not implemented yet"),
syn::Type::Tuple(syn::TypeTuple { ref elems, .. }) => {
if elems.len() == 0 {
Ok(Ty::Unit)
} else {
bail!("tuple types not implemented yet")
}
}
syn::Type::Path(syn::TypePath { qself: Some(_), .. }) => {
bail!("path types with qualified self (`as` syntax) not allowed")
| random_line_split |
|
mod.rs | pub struct VirtualMethod<'ast> {
pub sig: FnSig<'ast>,
pub body: Option<&'ast Block>,
}
pub struct FnSig<'ast> {
pub name: Ident,
pub inputs: Vec<FnArg<'ast>>,
pub output: Ty<'ast>,
}
pub enum FnArg<'ast> {
SelfRef(Token!(&), Token!(self)),
Arg {
mutbl: Option<Token![mut]>,
name: Ident,
ty: Ty<'ast>,
}
}
pub struct Signal {
// FIXME: signal flags
}
pub enum Ty<'ast> {
Unit,
Char(Ident),
Bool(Ident),
Borrowed(Box<Ty<'ast>>),
Integer(Ident),
Owned(&'ast syn::Path),
}
impl<'ast> Program<'ast> {
pub fn from_ast_program(ast: &'ast ast::Program) -> Result<Program<'ast>> {
check_program(ast)?;
let mut classes = Classes::new();
for class in ast.classes() {
classes.add(class)?;
}
for impl_ in ast.impls() {
classes.add_impl(impl_)?;
}
Ok(Program {
classes: classes,
})
}
}
impl<'ast> Classes<'ast> {
fn new() -> Classes<'ast> {
Classes {
items: HashMap::new(),
}
}
pub fn | (&self) -> usize {
self.items.len()
}
pub fn get(&self, name: &str) -> &Class {
self.items.iter().find(|c| c.1.name == name).unwrap().1
}
fn add(&mut self, ast_class: &'ast ast::Class) -> Result<()>
{
let prev = self.items.insert(ast_class.name, Class {
name: ast_class.name,
gobject_parent: ast_class.extends.is_none(),
parent: tokens_ParentInstance(ast_class),
parent_ffi: tokens_ParentInstanceFfi(ast_class),
parent_class_ffi: tokens_ParentClassFfi(ast_class),
implements: Vec::new(),
instance_private: ast_class.items.iter().filter_map(|i| {
match *i {
ast::ClassItem::InstancePrivate(ref ip) => Some(&ip.path),
}
}).next(),
slots: Vec::new(),
overrides: HashMap::new(),
});
if prev.is_some() {
bail!("redefinition of class `{}`", ast_class.name);
}
Ok(())
}
fn add_impl(&mut self, impl_: &'ast ast::Impl) -> Result<()> {
let class = match self.items.get_mut(&impl_.self_path) {
Some(class) => class,
None => bail!("impl for class that doesn't exist: {}", impl_.self_path),
};
match impl_.trait_ {
Some(parent_class) => {
for item in impl_.items.iter() {
let item = match item.node {
ast::ImplItemKind::Method(ref m) => m,
ast::ImplItemKind::ReserveSlots(_) => {
bail!("can't reserve slots in a parent class impl");
}
};
if item.signal {
bail!("can't implement signals for parent classes")
}
if !item.virtual_ {
bail!("can only implement virtual functions for parent classes")
}
if item.public {
bail!("overrides are always public, no `pub` needed")
}
let method = match class.translate_method(item)? {
Slot::VirtualMethod(VirtualMethod { sig, body: Some(body) }) => {
Method { public: false, sig, body }
}
Slot::VirtualMethod(VirtualMethod { .. }) => {
bail!("overrides must provide a body for virtual \
methods");
}
_ => unreachable!(),
};
class.overrides
.entry(parent_class)
.or_insert(Vec::new())
.push(method);
}
}
None => {
for item in impl_.items.iter() {
let slot = class.translate_slot(item)?;
class.slots.push(slot);
}
}
}
Ok(())
}
pub fn iter<'a>(&'a self) -> impl Iterator<Item = &'a Class> + 'a {
self.items.values()
}
}
impl<'ast> Class<'ast> {
fn translate_slot(&mut self, item: &'ast ast::ImplItem) -> Result<Slot<'ast>> {
assert_eq!(item.attrs.len(), 0); // attributes unimplemented
match item.node {
ast::ImplItemKind::Method(ref method) => self.translate_method(method),
ast::ImplItemKind::ReserveSlots(ref _slots) => {
panic!("reserve slots not implemented");
}
}
}
fn translate_method(&mut self, method: &'ast ast::ImplItemMethod)
-> Result<Slot<'ast>>
{
if method.signal {
panic!("signals not implemented");
}
if method.virtual_ {
if method.public {
bail!("function `{}` is virtual so it doesn't need to be public",
method.name)
}
let sig = self.extract_sig(method)?;
Ok(Slot::VirtualMethod(VirtualMethod {
sig,
body: method.body.as_ref(),
}))
} else {
let sig = self.extract_sig(method)?;
Ok(Slot::Method(Method {
sig,
public: method.public,
body: method.body.as_ref().ok_or_else(|| {
format!("function `{}` requires a body", method.name)
})?,
}))
}
}
fn extract_sig(&mut self, method: &'ast ast::ImplItemMethod) -> Result<FnSig<'ast>> {
Ok(FnSig {
output: self.extract_output(&method.output)?,
inputs: self.extract_inputs(&method.inputs)?,
name: method.name,
})
}
fn extract_output(&mut self, output: &'ast ReturnType) -> Result<Ty<'ast>> {
match *output {
ReturnType::Type(_, ref boxt) => self.extract_ty(boxt),
ReturnType::Default => Ok(Ty::Unit),
}
}
fn extract_inputs(&mut self, punc: &'ast Punctuated<syn::FnArg, Token!(,)>) -> Result<Vec<FnArg<'ast>>> {
punc.iter().map(|arg| {
match *arg {
syn::FnArg::Captured(syn::ArgCaptured { ref pat, ref ty, .. }) => {
let (name, mutbl) = match *pat {
syn::Pat::Ident(syn::PatIdent {
by_ref: None,
mutability: m,
ident,
subpat: None,
}) => {
(ident, m)
}
_ => bail!("only bare identifiers are allowed as \
argument patterns"),
};
Ok(FnArg::Arg {
mutbl,
name,
ty: self.extract_ty(ty)?,
})
}
syn::FnArg::SelfRef(syn::ArgSelfRef {
and_token,
lifetime: None,
mutability: None,
self_token,
}) => {
Ok(FnArg::SelfRef(and_token, self_token))
}
syn::FnArg::SelfRef(syn::ArgSelfRef {
mutability: Some(..),
..
}) => {
bail!("&mut self not implemented yet")
}
syn::FnArg::SelfRef(syn::ArgSelfRef {
lifetime: Some(..),
..
}) => {
bail!("lifetime arguments on self not implemented yet")
}
syn::FnArg::SelfValue(_) => bail!("by-value self not implemented"),
syn::FnArg::Inferred(_) => bail!("cannot have inferred function arguments"),
syn::FnArg::Ignored(_) => bail!("cannot have ignored function arguments"),
}
}).collect()
}
fn extract_ty(&mut self, t: &'ast syn::Type) -> Result<Ty<'ast>> {
match *t {
syn::Type::Slice(_) => bail!("slice types not implemented yet"),
syn::Type::Array(_) => bail!("array types not implemented yet"),
syn::Type::Ptr(_) => bail!("ptr types not implemented yet"),
syn::Type::Reference(syn::TypeReference { lifetime: Some(_), .. }) => {
bail!("borrowed types with lifetimes not implemented yet")
}
syn::Type::Reference(syn::TypeReference { lifetime: None, ref elem, ref mutability, .. }) => {
if let Some(_) = *mutability {
bail!("mutable borrowed pointers not implemented");
}
let path = match **elem {
syn::Type::Path(syn::TypePath { qself: None, ref path }) => path,
_ => bail!("only borrowed pointers to paths supported"),
};
let ty = self.extract_ty_path(path)?;
Ok(Ty::Borrowed(Box::new(ty)))
}
syn::Type::BareFn(_) => bail!("function pointer types not implemented yet"),
syn::Type::Never(_) => bail!("never not implemented yet"),
syn::Type::Tuple(syn::TypeTuple { ref elems, .. }) => {
if elems.len() == 0 {
Ok(Ty::Unit)
} else {
bail!("tuple types not implemented yet")
}
}
syn::Type::Path(syn::TypePath { qself: Some(_), .. }) => {
bail!("path types with qualified self (`as` syntax) not allowed | len | identifier_name |
events.py | model_event
# Try creating an event just to trigger validation
_ = self.get_api_event()
self.upload_exception = None
@abc.abstractmethod
def get_api_event(self):
""" Get an API event instance """
pass
def get_file_entry(self):
""" Get information for a file that should be uploaded before this event is sent """
pass
def get_iteration(self):
return self._iter
def update(self, task=None, iter_offset=None, **kwargs):
""" Update event properties """
if task:
self._task = task
if iter_offset is not None and self._iter is not None:
self._iter += iter_offset
def _get_base_dict(self):
""" Get a dict with the base attributes """
res = dict(
task=self._task,
timestamp=self._timestamp,
metric=self._metric,
variant=self._variant
)
if self._iter is not None:
res.update(iter=self._iter)
if self._model_event is not None:
res.update(model_event=self._model_event)
return res
@classmethod
def _convert_np_nan_inf(cls, val):
if np.isnan(val):
cls._report_nan_warning_iteration += 1
if cls._report_nan_warning_iteration >= cls.report_nan_warning_period:
LoggerRoot.get_base_logger().info(
"NaN value encountered. Reporting it as '{}'. Use clearml.Logger.set_reporting_nan_value to assign another value".format(
cls.default_nan_value
)
)
cls._report_nan_warning_iteration = 0
return cls.default_nan_value
if np.isinf(val):
cls._report_inf_warning_iteration += 1
if cls._report_inf_warning_iteration >= cls.report_inf_warning_period: | LoggerRoot.get_base_logger().info(
"inf value encountered. Reporting it as '{}'. Use clearml.Logger.set_reporting_inf_value to assign another value".format(
cls.default_inf_value
)
)
cls._report_inf_warning_iteration = 0
return cls.default_inf_value
return val
class ScalarEvent(MetricsEventAdapter):
""" Scalar event adapter """
def __init__(self, metric, variant, value, iter, **kwargs):
self._value = self._convert_np_nan_inf(value)
super(ScalarEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs)
def get_api_event(self):
return events.MetricsScalarEvent(
value=self._value,
**self._get_base_dict())
class ConsoleEvent(MetricsEventAdapter):
""" Console log event adapter """
def __init__(self, message, level, worker, **kwargs):
self._value = str(message)
self._level = getLevelName(level) if isinstance(level, int) else str(level)
self._worker = worker
super(ConsoleEvent, self).__init__(metric=None, variant=None, iter=0, **kwargs)
def get_api_event(self):
return events.TaskLogEvent(
task=self._task,
timestamp=self._timestamp,
level=self._level,
worker=self._worker,
msg=self._value)
class VectorEvent(MetricsEventAdapter):
""" Vector event adapter """
def __init__(self, metric, variant, values, iter, **kwargs):
self._values = [self._convert_np_nan_inf(v) for v in values]
super(VectorEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs)
def get_api_event(self):
return events.MetricsVectorEvent(
values=self._values,
**self._get_base_dict())
class PlotEvent(MetricsEventAdapter):
""" Plot event adapter """
def __init__(self, metric, variant, plot_str, iter=None, **kwargs):
self._plot_str = plot_str
super(PlotEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs)
def get_api_event(self):
return events.MetricsPlotEvent(
plot_str=self._plot_str,
**self._get_base_dict())
class ImageEventNoUpload(MetricsEventAdapter):
def __init__(self, metric, variant, src, iter=0, **kwargs):
self._url = src
parts = urlparse(src)
self._key = urlunparse(('', '', parts.path, parts.params, parts.query, parts.fragment))
super(ImageEventNoUpload, self).__init__(metric, variant, iter=iter, **kwargs)
def get_api_event(self):
return events.MetricsImageEvent(
url=self._url,
key=self._key,
**self._get_base_dict())
class UploadEvent(MetricsEventAdapter):
""" Image event adapter """
_format = deferred_config(
'metrics.images.format', 'JPEG',
transform=lambda x: '.' + str(x).upper().lstrip('.')
)
_quality = deferred_config('metrics.images.quality', 87, transform=int)
_subsampling = deferred_config('metrics.images.subsampling', 0, transform=int)
_file_history_size = deferred_config('metrics.file_history_size', 5, transform=int)
_upload_retries = 3
_metric_counters = {}
_metric_counters_lock = SingletonLock()
@staticmethod
def _replace_slash(part):
# replace the three quote symbols we cannot have,
# notice % will be converted to %25 when the link is quoted, so we should not use it
# Replace quote safe characters: ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | "," | "\n" | "\r"
return reduce(lambda a, b: a.replace(b, "0x{:02x}".format(ord(b))), "#\"\';?:@&=+$,%!\r\n",
part.replace('\\', '/').strip('/').replace('/', '.slash.'))
def __init__(self, metric, variant, image_data, local_image_path=None, iter=0, upload_uri=None,
file_history_size=None, delete_after_upload=False, **kwargs):
# param override_filename: override uploaded file name (notice extension will be added from local path
# param override_filename_ext: override uploaded file extension
if image_data is not None and (
not hasattr(image_data, 'shape') and not isinstance(image_data, (six.StringIO, six.BytesIO))):
raise ValueError('Image must have a shape attribute')
self._image_data = image_data
self._local_image_path = local_image_path
self._url = None
self._key = None
self._count = None
self._filename = None
self.file_history_size = file_history_size or int(self._file_history_size)
self._override_filename = kwargs.pop('override_filename', None)
self._upload_uri = upload_uri
self._delete_after_upload = delete_after_upload
# get upload uri upfront, either predefined image format or local file extension
# e.g.: image.png -> .png or image.raw.gz -> .raw.gz
self._override_filename_ext = kwargs.pop('override_filename_ext', None)
self._upload_filename = None
self._override_storage_key_prefix = kwargs.pop('override_storage_key_prefix', None)
self.retries = self._upload_retries
super(UploadEvent, self).__init__(metric, variant, iter=iter, **kwargs)
def _generate_file_name(self, force_pid_suffix=None):
if force_pid_suffix is None and self._filename is not None:
return
self._count = self._get_metric_count(self._metric, self._variant)
self._filename = self._override_filename
if not self._filename:
self._filename = '{}_{}'.format(self._metric, self._variant)
cnt = self._count if self.file_history_size < 1 else (self._count % self.file_history_size)
self._filename += '_{:05x}{:03d}'.format(force_pid_suffix, cnt) \
if force_pid_suffix else '_{:08d}'.format(cnt)
# make sure we have to '/' in the filename because it might access other folders,
# and we don't want that to occur
self._filename = self._replace_slash(self._filename)
# get upload uri upfront, either predefined image format or local file extension
# e.g.: image.png -> .png or image.raw.gz -> .raw.gz
filename_ext = self._override_filename_ext
if filename_ext is None:
filename_ext = str(self._format).lower() if self._image_data is not None else \
'.' + '.'.join(pathlib2.Path(self._local_image_path).parts[-1].split('.')[1:])
# always add file extension to the uploaded target file
if filename_ext and filename_ext[0] != '.':
filename_ext = '.' + filename_ext
self._upload_filename = pathlib2.Path(self._filename).as_posix()
if self._filename.rpartition(".")[2] != filename_ext.rpartition(".")[2]:
self._upload_filename += filename_ext
@classmethod
def _get_metric_count(cls, metric, variant, next=True):
""" Returns the next count number for the given metric/variant (rotates every few calls) """
counters = cls._metric_counters
key = '%s_%s' % (metric, variant)
try:
cls._metric_counters_lock.acquire()
value = counters.get(key, -1)
if next:
| random_line_split |
|
events.py | model_event
# Try creating an event just to trigger validation
_ = self.get_api_event()
self.upload_exception = None
@abc.abstractmethod
def get_api_event(self):
""" Get an API event instance """
pass
def get_file_entry(self):
""" Get information for a file that should be uploaded before this event is sent """
pass
def get_iteration(self):
return self._iter
def update(self, task=None, iter_offset=None, **kwargs):
""" Update event properties """
if task:
self._task = task
if iter_offset is not None and self._iter is not None:
self._iter += iter_offset
def _get_base_dict(self):
""" Get a dict with the base attributes """
res = dict(
task=self._task,
timestamp=self._timestamp,
metric=self._metric,
variant=self._variant
)
if self._iter is not None:
res.update(iter=self._iter)
if self._model_event is not None:
res.update(model_event=self._model_event)
return res
@classmethod
def _convert_np_nan_inf(cls, val):
if np.isnan(val):
cls._report_nan_warning_iteration += 1
if cls._report_nan_warning_iteration >= cls.report_nan_warning_period:
LoggerRoot.get_base_logger().info(
"NaN value encountered. Reporting it as '{}'. Use clearml.Logger.set_reporting_nan_value to assign another value".format(
cls.default_nan_value
)
)
cls._report_nan_warning_iteration = 0
return cls.default_nan_value
if np.isinf(val):
cls._report_inf_warning_iteration += 1
if cls._report_inf_warning_iteration >= cls.report_inf_warning_period:
LoggerRoot.get_base_logger().info(
"inf value encountered. Reporting it as '{}'. Use clearml.Logger.set_reporting_inf_value to assign another value".format(
cls.default_inf_value
)
)
cls._report_inf_warning_iteration = 0
return cls.default_inf_value
return val
class ScalarEvent(MetricsEventAdapter):
""" Scalar event adapter """
def __init__(self, metric, variant, value, iter, **kwargs):
self._value = self._convert_np_nan_inf(value)
super(ScalarEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs)
def get_api_event(self):
return events.MetricsScalarEvent(
value=self._value,
**self._get_base_dict())
class ConsoleEvent(MetricsEventAdapter):
""" Console log event adapter """
def __init__(self, message, level, worker, **kwargs):
self._value = str(message)
self._level = getLevelName(level) if isinstance(level, int) else str(level)
self._worker = worker
super(ConsoleEvent, self).__init__(metric=None, variant=None, iter=0, **kwargs)
def get_api_event(self):
return events.TaskLogEvent(
task=self._task,
timestamp=self._timestamp,
level=self._level,
worker=self._worker,
msg=self._value)
class VectorEvent(MetricsEventAdapter):
""" Vector event adapter """
def __init__(self, metric, variant, values, iter, **kwargs):
self._values = [self._convert_np_nan_inf(v) for v in values]
super(VectorEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs)
def get_api_event(self):
return events.MetricsVectorEvent(
values=self._values,
**self._get_base_dict())
class PlotEvent(MetricsEventAdapter):
""" Plot event adapter """
def __init__(self, metric, variant, plot_str, iter=None, **kwargs):
self._plot_str = plot_str
super(PlotEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs)
def get_api_event(self):
return events.MetricsPlotEvent(
plot_str=self._plot_str,
**self._get_base_dict())
class ImageEventNoUpload(MetricsEventAdapter):
def __init__(self, metric, variant, src, iter=0, **kwargs):
self._url = src
parts = urlparse(src)
self._key = urlunparse(('', '', parts.path, parts.params, parts.query, parts.fragment))
super(ImageEventNoUpload, self).__init__(metric, variant, iter=iter, **kwargs)
def get_api_event(self):
return events.MetricsImageEvent(
url=self._url,
key=self._key,
**self._get_base_dict())
class UploadEvent(MetricsEventAdapter):
""" Image event adapter """
_format = deferred_config(
'metrics.images.format', 'JPEG',
transform=lambda x: '.' + str(x).upper().lstrip('.')
)
_quality = deferred_config('metrics.images.quality', 87, transform=int)
_subsampling = deferred_config('metrics.images.subsampling', 0, transform=int)
_file_history_size = deferred_config('metrics.file_history_size', 5, transform=int)
_upload_retries = 3
_metric_counters = {}
_metric_counters_lock = SingletonLock()
@staticmethod
def _replace_slash(part):
# replace the three quote symbols we cannot have,
# notice % will be converted to %25 when the link is quoted, so we should not use it
# Replace quote safe characters: ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | "," | "\n" | "\r"
return reduce(lambda a, b: a.replace(b, "0x{:02x}".format(ord(b))), "#\"\';?:@&=+$,%!\r\n",
part.replace('\\', '/').strip('/').replace('/', '.slash.'))
def | (self, metric, variant, image_data, local_image_path=None, iter=0, upload_uri=None,
file_history_size=None, delete_after_upload=False, **kwargs):
# param override_filename: override uploaded file name (notice extension will be added from local path
# param override_filename_ext: override uploaded file extension
if image_data is not None and (
not hasattr(image_data, 'shape') and not isinstance(image_data, (six.StringIO, six.BytesIO))):
raise ValueError('Image must have a shape attribute')
self._image_data = image_data
self._local_image_path = local_image_path
self._url = None
self._key = None
self._count = None
self._filename = None
self.file_history_size = file_history_size or int(self._file_history_size)
self._override_filename = kwargs.pop('override_filename', None)
self._upload_uri = upload_uri
self._delete_after_upload = delete_after_upload
# get upload uri upfront, either predefined image format or local file extension
# e.g.: image.png -> .png or image.raw.gz -> .raw.gz
self._override_filename_ext = kwargs.pop('override_filename_ext', None)
self._upload_filename = None
self._override_storage_key_prefix = kwargs.pop('override_storage_key_prefix', None)
self.retries = self._upload_retries
super(UploadEvent, self).__init__(metric, variant, iter=iter, **kwargs)
def _generate_file_name(self, force_pid_suffix=None):
if force_pid_suffix is None and self._filename is not None:
return
self._count = self._get_metric_count(self._metric, self._variant)
self._filename = self._override_filename
if not self._filename:
self._filename = '{}_{}'.format(self._metric, self._variant)
cnt = self._count if self.file_history_size < 1 else (self._count % self.file_history_size)
self._filename += '_{:05x}{:03d}'.format(force_pid_suffix, cnt) \
if force_pid_suffix else '_{:08d}'.format(cnt)
# make sure we have to '/' in the filename because it might access other folders,
# and we don't want that to occur
self._filename = self._replace_slash(self._filename)
# get upload uri upfront, either predefined image format or local file extension
# e.g.: image.png -> .png or image.raw.gz -> .raw.gz
filename_ext = self._override_filename_ext
if filename_ext is None:
filename_ext = str(self._format).lower() if self._image_data is not None else \
'.' + '.'.join(pathlib2.Path(self._local_image_path).parts[-1].split('.')[1:])
# always add file extension to the uploaded target file
if filename_ext and filename_ext[0] != '.':
filename_ext = '.' + filename_ext
self._upload_filename = pathlib2.Path(self._filename).as_posix()
if self._filename.rpartition(".")[2] != filename_ext.rpartition(".")[2]:
self._upload_filename += filename_ext
@classmethod
def _get_metric_count(cls, metric, variant, next=True):
""" Returns the next count number for the given metric/variant (rotates every few calls) """
counters = cls._metric_counters
key = '%s_%s' % (metric, variant)
try:
cls._metric_counters_lock.acquire()
value = counters.get(key, -1)
if next:
| __init__ | identifier_name |
events.py | model_event
# Try creating an event just to trigger validation
_ = self.get_api_event()
self.upload_exception = None
@abc.abstractmethod
def get_api_event(self):
""" Get an API event instance """
pass
def get_file_entry(self):
""" Get information for a file that should be uploaded before this event is sent """
pass
def get_iteration(self):
return self._iter
def update(self, task=None, iter_offset=None, **kwargs):
|
def _get_base_dict(self):
""" Get a dict with the base attributes """
res = dict(
task=self._task,
timestamp=self._timestamp,
metric=self._metric,
variant=self._variant
)
if self._iter is not None:
res.update(iter=self._iter)
if self._model_event is not None:
res.update(model_event=self._model_event)
return res
@classmethod
def _convert_np_nan_inf(cls, val):
if np.isnan(val):
cls._report_nan_warning_iteration += 1
if cls._report_nan_warning_iteration >= cls.report_nan_warning_period:
LoggerRoot.get_base_logger().info(
"NaN value encountered. Reporting it as '{}'. Use clearml.Logger.set_reporting_nan_value to assign another value".format(
cls.default_nan_value
)
)
cls._report_nan_warning_iteration = 0
return cls.default_nan_value
if np.isinf(val):
cls._report_inf_warning_iteration += 1
if cls._report_inf_warning_iteration >= cls.report_inf_warning_period:
LoggerRoot.get_base_logger().info(
"inf value encountered. Reporting it as '{}'. Use clearml.Logger.set_reporting_inf_value to assign another value".format(
cls.default_inf_value
)
)
cls._report_inf_warning_iteration = 0
return cls.default_inf_value
return val
class ScalarEvent(MetricsEventAdapter):
""" Scalar event adapter """
def __init__(self, metric, variant, value, iter, **kwargs):
self._value = self._convert_np_nan_inf(value)
super(ScalarEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs)
def get_api_event(self):
return events.MetricsScalarEvent(
value=self._value,
**self._get_base_dict())
class ConsoleEvent(MetricsEventAdapter):
""" Console log event adapter """
def __init__(self, message, level, worker, **kwargs):
self._value = str(message)
self._level = getLevelName(level) if isinstance(level, int) else str(level)
self._worker = worker
super(ConsoleEvent, self).__init__(metric=None, variant=None, iter=0, **kwargs)
def get_api_event(self):
return events.TaskLogEvent(
task=self._task,
timestamp=self._timestamp,
level=self._level,
worker=self._worker,
msg=self._value)
class VectorEvent(MetricsEventAdapter):
""" Vector event adapter """
def __init__(self, metric, variant, values, iter, **kwargs):
self._values = [self._convert_np_nan_inf(v) for v in values]
super(VectorEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs)
def get_api_event(self):
return events.MetricsVectorEvent(
values=self._values,
**self._get_base_dict())
class PlotEvent(MetricsEventAdapter):
""" Plot event adapter """
def __init__(self, metric, variant, plot_str, iter=None, **kwargs):
self._plot_str = plot_str
super(PlotEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs)
def get_api_event(self):
return events.MetricsPlotEvent(
plot_str=self._plot_str,
**self._get_base_dict())
class ImageEventNoUpload(MetricsEventAdapter):
def __init__(self, metric, variant, src, iter=0, **kwargs):
self._url = src
parts = urlparse(src)
self._key = urlunparse(('', '', parts.path, parts.params, parts.query, parts.fragment))
super(ImageEventNoUpload, self).__init__(metric, variant, iter=iter, **kwargs)
def get_api_event(self):
return events.MetricsImageEvent(
url=self._url,
key=self._key,
**self._get_base_dict())
class UploadEvent(MetricsEventAdapter):
""" Image event adapter """
_format = deferred_config(
'metrics.images.format', 'JPEG',
transform=lambda x: '.' + str(x).upper().lstrip('.')
)
_quality = deferred_config('metrics.images.quality', 87, transform=int)
_subsampling = deferred_config('metrics.images.subsampling', 0, transform=int)
_file_history_size = deferred_config('metrics.file_history_size', 5, transform=int)
_upload_retries = 3
_metric_counters = {}
_metric_counters_lock = SingletonLock()
@staticmethod
def _replace_slash(part):
# replace the three quote symbols we cannot have,
# notice % will be converted to %25 when the link is quoted, so we should not use it
# Replace quote safe characters: ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | "," | "\n" | "\r"
return reduce(lambda a, b: a.replace(b, "0x{:02x}".format(ord(b))), "#\"\';?:@&=+$,%!\r\n",
part.replace('\\', '/').strip('/').replace('/', '.slash.'))
def __init__(self, metric, variant, image_data, local_image_path=None, iter=0, upload_uri=None,
file_history_size=None, delete_after_upload=False, **kwargs):
# param override_filename: override uploaded file name (notice extension will be added from local path
# param override_filename_ext: override uploaded file extension
if image_data is not None and (
not hasattr(image_data, 'shape') and not isinstance(image_data, (six.StringIO, six.BytesIO))):
raise ValueError('Image must have a shape attribute')
self._image_data = image_data
self._local_image_path = local_image_path
self._url = None
self._key = None
self._count = None
self._filename = None
self.file_history_size = file_history_size or int(self._file_history_size)
self._override_filename = kwargs.pop('override_filename', None)
self._upload_uri = upload_uri
self._delete_after_upload = delete_after_upload
# get upload uri upfront, either predefined image format or local file extension
# e.g.: image.png -> .png or image.raw.gz -> .raw.gz
self._override_filename_ext = kwargs.pop('override_filename_ext', None)
self._upload_filename = None
self._override_storage_key_prefix = kwargs.pop('override_storage_key_prefix', None)
self.retries = self._upload_retries
super(UploadEvent, self).__init__(metric, variant, iter=iter, **kwargs)
def _generate_file_name(self, force_pid_suffix=None):
if force_pid_suffix is None and self._filename is not None:
return
self._count = self._get_metric_count(self._metric, self._variant)
self._filename = self._override_filename
if not self._filename:
self._filename = '{}_{}'.format(self._metric, self._variant)
cnt = self._count if self.file_history_size < 1 else (self._count % self.file_history_size)
self._filename += '_{:05x}{:03d}'.format(force_pid_suffix, cnt) \
if force_pid_suffix else '_{:08d}'.format(cnt)
# make sure we have to '/' in the filename because it might access other folders,
# and we don't want that to occur
self._filename = self._replace_slash(self._filename)
# get upload uri upfront, either predefined image format or local file extension
# e.g.: image.png -> .png or image.raw.gz -> .raw.gz
filename_ext = self._override_filename_ext
if filename_ext is None:
filename_ext = str(self._format).lower() if self._image_data is not None else \
'.' + '.'.join(pathlib2.Path(self._local_image_path).parts[-1].split('.')[1:])
# always add file extension to the uploaded target file
if filename_ext and filename_ext[0] != '.':
filename_ext = '.' + filename_ext
self._upload_filename = pathlib2.Path(self._filename).as_posix()
if self._filename.rpartition(".")[2] != filename_ext.rpartition(".")[2]:
self._upload_filename += filename_ext
@classmethod
def _get_metric_count(cls, metric, variant, next=True):
""" Returns the next count number for the given metric/variant (rotates every few calls) """
counters = cls._metric_counters
key = '%s_%s' % (metric, variant)
try:
cls._metric_counters_lock.acquire()
value = counters.get(key, -1)
if next:
| """ Update event properties """
if task:
self._task = task
if iter_offset is not None and self._iter is not None:
self._iter += iter_offset | identifier_body |
events.py | model_event
# Try creating an event just to trigger validation
_ = self.get_api_event()
self.upload_exception = None
@abc.abstractmethod
def get_api_event(self):
""" Get an API event instance """
pass
def get_file_entry(self):
""" Get information for a file that should be uploaded before this event is sent """
pass
def get_iteration(self):
return self._iter
def update(self, task=None, iter_offset=None, **kwargs):
""" Update event properties """
if task:
self._task = task
if iter_offset is not None and self._iter is not None:
self._iter += iter_offset
def _get_base_dict(self):
""" Get a dict with the base attributes """
res = dict(
task=self._task,
timestamp=self._timestamp,
metric=self._metric,
variant=self._variant
)
if self._iter is not None:
res.update(iter=self._iter)
if self._model_event is not None:
res.update(model_event=self._model_event)
return res
@classmethod
def _convert_np_nan_inf(cls, val):
if np.isnan(val):
cls._report_nan_warning_iteration += 1
if cls._report_nan_warning_iteration >= cls.report_nan_warning_period:
LoggerRoot.get_base_logger().info(
"NaN value encountered. Reporting it as '{}'. Use clearml.Logger.set_reporting_nan_value to assign another value".format(
cls.default_nan_value
)
)
cls._report_nan_warning_iteration = 0
return cls.default_nan_value
if np.isinf(val):
|
return val
class ScalarEvent(MetricsEventAdapter):
""" Scalar event adapter """
def __init__(self, metric, variant, value, iter, **kwargs):
self._value = self._convert_np_nan_inf(value)
super(ScalarEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs)
def get_api_event(self):
return events.MetricsScalarEvent(
value=self._value,
**self._get_base_dict())
class ConsoleEvent(MetricsEventAdapter):
""" Console log event adapter """
def __init__(self, message, level, worker, **kwargs):
self._value = str(message)
self._level = getLevelName(level) if isinstance(level, int) else str(level)
self._worker = worker
super(ConsoleEvent, self).__init__(metric=None, variant=None, iter=0, **kwargs)
def get_api_event(self):
return events.TaskLogEvent(
task=self._task,
timestamp=self._timestamp,
level=self._level,
worker=self._worker,
msg=self._value)
class VectorEvent(MetricsEventAdapter):
""" Vector event adapter """
def __init__(self, metric, variant, values, iter, **kwargs):
self._values = [self._convert_np_nan_inf(v) for v in values]
super(VectorEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs)
def get_api_event(self):
return events.MetricsVectorEvent(
values=self._values,
**self._get_base_dict())
class PlotEvent(MetricsEventAdapter):
""" Plot event adapter """
def __init__(self, metric, variant, plot_str, iter=None, **kwargs):
self._plot_str = plot_str
super(PlotEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs)
def get_api_event(self):
return events.MetricsPlotEvent(
plot_str=self._plot_str,
**self._get_base_dict())
class ImageEventNoUpload(MetricsEventAdapter):
def __init__(self, metric, variant, src, iter=0, **kwargs):
self._url = src
parts = urlparse(src)
self._key = urlunparse(('', '', parts.path, parts.params, parts.query, parts.fragment))
super(ImageEventNoUpload, self).__init__(metric, variant, iter=iter, **kwargs)
def get_api_event(self):
return events.MetricsImageEvent(
url=self._url,
key=self._key,
**self._get_base_dict())
class UploadEvent(MetricsEventAdapter):
""" Image event adapter """
_format = deferred_config(
'metrics.images.format', 'JPEG',
transform=lambda x: '.' + str(x).upper().lstrip('.')
)
_quality = deferred_config('metrics.images.quality', 87, transform=int)
_subsampling = deferred_config('metrics.images.subsampling', 0, transform=int)
_file_history_size = deferred_config('metrics.file_history_size', 5, transform=int)
_upload_retries = 3
_metric_counters = {}
_metric_counters_lock = SingletonLock()
@staticmethod
def _replace_slash(part):
# replace the three quote symbols we cannot have,
# notice % will be converted to %25 when the link is quoted, so we should not use it
# Replace quote safe characters: ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | "," | "\n" | "\r"
return reduce(lambda a, b: a.replace(b, "0x{:02x}".format(ord(b))), "#\"\';?:@&=+$,%!\r\n",
part.replace('\\', '/').strip('/').replace('/', '.slash.'))
def __init__(self, metric, variant, image_data, local_image_path=None, iter=0, upload_uri=None,
file_history_size=None, delete_after_upload=False, **kwargs):
# param override_filename: override uploaded file name (notice extension will be added from local path
# param override_filename_ext: override uploaded file extension
if image_data is not None and (
not hasattr(image_data, 'shape') and not isinstance(image_data, (six.StringIO, six.BytesIO))):
raise ValueError('Image must have a shape attribute')
self._image_data = image_data
self._local_image_path = local_image_path
self._url = None
self._key = None
self._count = None
self._filename = None
self.file_history_size = file_history_size or int(self._file_history_size)
self._override_filename = kwargs.pop('override_filename', None)
self._upload_uri = upload_uri
self._delete_after_upload = delete_after_upload
# get upload uri upfront, either predefined image format or local file extension
# e.g.: image.png -> .png or image.raw.gz -> .raw.gz
self._override_filename_ext = kwargs.pop('override_filename_ext', None)
self._upload_filename = None
self._override_storage_key_prefix = kwargs.pop('override_storage_key_prefix', None)
self.retries = self._upload_retries
super(UploadEvent, self).__init__(metric, variant, iter=iter, **kwargs)
def _generate_file_name(self, force_pid_suffix=None):
if force_pid_suffix is None and self._filename is not None:
return
self._count = self._get_metric_count(self._metric, self._variant)
self._filename = self._override_filename
if not self._filename:
self._filename = '{}_{}'.format(self._metric, self._variant)
cnt = self._count if self.file_history_size < 1 else (self._count % self.file_history_size)
self._filename += '_{:05x}{:03d}'.format(force_pid_suffix, cnt) \
if force_pid_suffix else '_{:08d}'.format(cnt)
# make sure we have to '/' in the filename because it might access other folders,
# and we don't want that to occur
self._filename = self._replace_slash(self._filename)
# get upload uri upfront, either predefined image format or local file extension
# e.g.: image.png -> .png or image.raw.gz -> .raw.gz
filename_ext = self._override_filename_ext
if filename_ext is None:
filename_ext = str(self._format).lower() if self._image_data is not None else \
'.' + '.'.join(pathlib2.Path(self._local_image_path).parts[-1].split('.')[1:])
# always add file extension to the uploaded target file
if filename_ext and filename_ext[0] != '.':
filename_ext = '.' + filename_ext
self._upload_filename = pathlib2.Path(self._filename).as_posix()
if self._filename.rpartition(".")[2] != filename_ext.rpartition(".")[2]:
self._upload_filename += filename_ext
@classmethod
def _get_metric_count(cls, metric, variant, next=True):
""" Returns the next count number for the given metric/variant (rotates every few calls) """
counters = cls._metric_counters
key = '%s_%s' % (metric, variant)
try:
cls._metric_counters_lock.acquire()
value = counters.get(key, -1)
if next:
| cls._report_inf_warning_iteration += 1
if cls._report_inf_warning_iteration >= cls.report_inf_warning_period:
LoggerRoot.get_base_logger().info(
"inf value encountered. Reporting it as '{}'. Use clearml.Logger.set_reporting_inf_value to assign another value".format(
cls.default_inf_value
)
)
cls._report_inf_warning_iteration = 0
return cls.default_inf_value | conditional_block |
merkle.rs | I: IntoIterator<Item = MerkleNode>,
<I as IntoIterator>::IntoIter: ExactSizeIterator<Item = MerkleNode>,
{
let mut tag_engine = sha256::Hash::engine();
tag_engine.input(prefix.as_bytes());
tag_engine.input(":merkle:".as_bytes());
let iter = data.into_iter();
let width = iter.len();
// Tagging merkle tree root
let (root, height) = merklize_inner(&tag_engine, iter, 0, false, None);
tag_engine.input("root:height=".as_bytes());
tag_engine.input(&height.to_string().into_bytes());
tag_engine.input(":width=".as_bytes());
tag_engine.input(&width.to_string().into_bytes());
let tag_hash = sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine));
let mut engine = MerkleNode::engine();
engine.input(&tag_hash[..]);
engine.input(&tag_hash[..]);
root.commit_encode(&mut engine);
let tagged_root = MerkleNode::from_engine(engine);
(tagged_root, height)
}
// TODO: Optimize to avoid allocations
// In current rust generic iterators do not work with recursion :(
fn merklize_inner(
engine_proto: &sha256::HashEngine,
mut iter: impl ExactSizeIterator<Item = MerkleNode>,
depth: u8,
extend: bool,
empty_node: Option<MerkleNode>,
) -> (MerkleNode, u8) {
let len = iter.len() + extend as usize;
let empty_node = empty_node.unwrap_or_else(|| MerkleNode::hash(&[0xFF]));
// Computing tagged hash as per BIP-340
let mut tag_engine = engine_proto.clone();
tag_engine.input("depth=".as_bytes());
tag_engine.input(depth.to_string().as_bytes());
tag_engine.input(":width=".as_bytes());
tag_engine.input(len.to_string().as_bytes());
tag_engine.input(":height=".as_bytes());
let mut engine = MerkleNode::engine();
if len <= 2 {
tag_engine.input("0:".as_bytes());
let tag_hash =
sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine));
engine.input(&tag_hash[..]);
engine.input(&tag_hash[..]);
let mut leaf_tag_engine = engine_proto.clone();
leaf_tag_engine.input("leaf".as_bytes());
let leaf_tag =
sha256::Hash::hash(&sha256::Hash::from_engine(leaf_tag_engine));
let mut leaf_engine = MerkleNode::engine();
leaf_engine.input(&leaf_tag[..]);
leaf_engine.input(&leaf_tag[..]);
let mut leaf1 = leaf_engine.clone();
leaf1.input(
iter.next()
.as_ref()
.map(|d| d.as_ref())
.unwrap_or_else(|| empty_node.as_ref()),
);
MerkleNode::from_engine(leaf1).commit_encode(&mut engine);
leaf_engine.input(
iter.next()
.as_ref()
.map(|d| d.as_ref())
.unwrap_or_else(|| empty_node.as_ref()),
);
MerkleNode::from_engine(leaf_engine).commit_encode(&mut engine);
(MerkleNode::from_engine(engine), 1)
} else | engine_proto,
iter,
depth + 1,
(div % 2 + len % 2) / 2 == 1,
Some(empty_node),
);
assert_eq!(
height1,
height2,
"merklization algorithm failure: height of subtrees is not equal \
(width = {}, depth = {}, prev_extend = {}, next_extend = {})",
len,
depth,
extend,
div % 2 == 1 && len % 2 == 1
);
tag_engine.input(height1.to_string().as_bytes());
tag_engine.input(":".as_bytes());
let tag_hash =
sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine));
engine.input(&tag_hash[..]);
engine.input(&tag_hash[..]);
node1.commit_encode(&mut engine);
node2.commit_encode(&mut engine);
(MerkleNode::from_engine(engine), height1 + 1)
}
}
/// The source data for the [LNPBP-81] merklization process.
///
/// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Default)]
pub struct MerkleSource<T>(
/// Array of the data which will be merklized
pub Vec<T>,
);
impl<L, I> From<I> for MerkleSource<L>
where
I: IntoIterator<Item = L>,
L: CommitEncode,
{
fn from(collection: I) -> Self { Self(collection.into_iter().collect()) }
}
impl<L> FromIterator<L> for MerkleSource<L>
where
L: CommitEncode,
{
fn from_iter<T: IntoIterator<Item = L>>(iter: T) -> Self {
iter.into_iter().collect::<Vec<_>>().into()
}
}
impl<L> CommitEncode for MerkleSource<L>
where
L: ConsensusMerkleCommit,
{
fn commit_encode<E: io::Write>(&self, e: E) -> usize {
let leafs = self.0.iter().map(L::consensus_commit);
merklize(L::MERKLE_NODE_PREFIX, leafs).0.commit_encode(e)
}
}
impl<L> ConsensusCommit for MerkleSource<L>
where
L: ConsensusMerkleCommit + CommitEncode,
{
type Commitment = MerkleNode;
#[inline]
fn consensus_commit(&self) -> Self::Commitment {
MerkleNode::from_slice(&self.commit_serialize())
.expect("MerkleSource::commit_serialize must produce MerkleNode")
}
#[inline]
fn consensus_verify(&self, commitment: &Self::Commitment) -> bool {
self.consensus_commit() == *commitment
}
}
/// Converts given piece of client-side-validated data into a structure which
/// can be used in merklization process.
///
/// This dedicated structure is required since with
/// `impl From<_> for MerkleSource` we would not be able to specify a concrete
/// tagged hash, which we require in [LNPBP-81] merklization and which we
/// provide here via [`ToMerkleSource::Leaf`]` associated type holding
/// [`ConsensusMerkleCommit::MERKLE_NODE_PREFIX`] prefix value.
///
/// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md
pub trait ToMerkleSource {
/// Defining type of the commitment produced during merlization process
type Leaf: ConsensusMerkleCommit;
/// Performs transformation of the data type into a merkilzable data
fn to_merkle_source(&self) -> MerkleSource<Self::Leaf>;
}
#[cfg(test)]
mod test {
use std::collections::BTreeMap;
use amplify::{bmap, s};
use bitcoin_hashes::hex::ToHex;
use bitcoin_hashes::{sha256d, Hash};
use strict_encoding::StrictEncode;
use super::*;
use crate::commit_encode::{strategies, Strategy};
use crate::CommitConceal;
#[test]
fn collections() {
// First, we define a data type
#[derive(
Clone,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Debug,
StrictEncode,
StrictDecode
)]
struct Item(pub String);
// Next, we say that it should be concealed using some function
// (double SHA256 hash in this case)
impl CommitConceal for Item {
type ConcealedCommitment = sha256d::Hash;
fn commit_conceal(&self) -> Self::ConcealedCommitment {
sha256d::Hash::hash(self.0.as_bytes())
}
}
// Next, we need to specify how the concealed data should be
// commit-encoded: this time we strict-serialize the hash
impl Strategy for sha256d::Hash {
type Strategy = strategies::UsingStrict;
}
// Now, we define commitment encoding for our concealable type: it
// should conceal the data
impl Strategy for Item {
type Strategy = strategies::UsingConce | {
let div = len / 2 + len % 2;
let (node1, height1) = merklize_inner(
engine_proto,
// Normally we should use `iter.by_ref().take(div)`, but currently
// rust compilers is unable to parse recursion with generic types
iter.by_ref().take(div).collect::<Vec<_>>().into_iter(),
depth + 1,
false,
Some(empty_node),
);
let iter = if extend {
iter.chain(vec![empty_node]).collect::<Vec<_>>().into_iter()
} else {
iter.collect::<Vec<_>>().into_iter()
};
let (node2, height2) = merklize_inner( | conditional_block |
merkle.rs | I: IntoIterator<Item = MerkleNode>,
<I as IntoIterator>::IntoIter: ExactSizeIterator<Item = MerkleNode>,
{
let mut tag_engine = sha256::Hash::engine();
tag_engine.input(prefix.as_bytes());
tag_engine.input(":merkle:".as_bytes());
let iter = data.into_iter();
let width = iter.len();
// Tagging merkle tree root
let (root, height) = merklize_inner(&tag_engine, iter, 0, false, None);
tag_engine.input("root:height=".as_bytes());
tag_engine.input(&height.to_string().into_bytes());
tag_engine.input(":width=".as_bytes());
tag_engine.input(&width.to_string().into_bytes());
let tag_hash = sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine));
let mut engine = MerkleNode::engine();
engine.input(&tag_hash[..]);
engine.input(&tag_hash[..]);
root.commit_encode(&mut engine);
let tagged_root = MerkleNode::from_engine(engine);
(tagged_root, height)
}
// TODO: Optimize to avoid allocations
// In current rust generic iterators do not work with recursion :(
fn merklize_inner(
engine_proto: &sha256::HashEngine,
mut iter: impl ExactSizeIterator<Item = MerkleNode>,
depth: u8,
extend: bool,
empty_node: Option<MerkleNode>,
) -> (MerkleNode, u8) {
let len = iter.len() + extend as usize;
let empty_node = empty_node.unwrap_or_else(|| MerkleNode::hash(&[0xFF]));
// Computing tagged hash as per BIP-340
let mut tag_engine = engine_proto.clone();
tag_engine.input("depth=".as_bytes());
tag_engine.input(depth.to_string().as_bytes());
tag_engine.input(":width=".as_bytes());
tag_engine.input(len.to_string().as_bytes());
tag_engine.input(":height=".as_bytes());
let mut engine = MerkleNode::engine();
if len <= 2 {
tag_engine.input("0:".as_bytes());
let tag_hash =
sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine));
engine.input(&tag_hash[..]);
engine.input(&tag_hash[..]);
let mut leaf_tag_engine = engine_proto.clone();
leaf_tag_engine.input("leaf".as_bytes());
let leaf_tag =
sha256::Hash::hash(&sha256::Hash::from_engine(leaf_tag_engine));
let mut leaf_engine = MerkleNode::engine();
leaf_engine.input(&leaf_tag[..]);
leaf_engine.input(&leaf_tag[..]);
let mut leaf1 = leaf_engine.clone();
leaf1.input(
iter.next()
.as_ref()
.map(|d| d.as_ref())
.unwrap_or_else(|| empty_node.as_ref()),
);
MerkleNode::from_engine(leaf1).commit_encode(&mut engine);
leaf_engine.input(
iter.next()
.as_ref()
.map(|d| d.as_ref())
.unwrap_or_else(|| empty_node.as_ref()),
);
MerkleNode::from_engine(leaf_engine).commit_encode(&mut engine);
(MerkleNode::from_engine(engine), 1)
} else {
let div = len / 2 + len % 2;
let (node1, height1) = merklize_inner(
engine_proto,
// Normally we should use `iter.by_ref().take(div)`, but currently
// rust compilers is unable to parse recursion with generic types
iter.by_ref().take(div).collect::<Vec<_>>().into_iter(),
depth + 1,
false,
Some(empty_node),
);
let iter = if extend {
iter.chain(vec![empty_node]).collect::<Vec<_>>().into_iter()
} else {
iter.collect::<Vec<_>>().into_iter()
};
let (node2, height2) = merklize_inner(
engine_proto,
iter,
depth + 1,
(div % 2 + len % 2) / 2 == 1,
Some(empty_node),
);
assert_eq!(
height1,
height2,
"merklization algorithm failure: height of subtrees is not equal \
(width = {}, depth = {}, prev_extend = {}, next_extend = {})",
len,
depth,
extend,
div % 2 == 1 && len % 2 == 1
);
tag_engine.input(height1.to_string().as_bytes());
tag_engine.input(":".as_bytes());
let tag_hash =
sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine));
engine.input(&tag_hash[..]);
engine.input(&tag_hash[..]);
node1.commit_encode(&mut engine);
node2.commit_encode(&mut engine);
(MerkleNode::from_engine(engine), height1 + 1)
}
}
/// The source data for the [LNPBP-81] merklization process.
///
/// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Default)]
pub struct MerkleSource<T>(
/// Array of the data which will be merklized
pub Vec<T>,
);
impl<L, I> From<I> for MerkleSource<L>
where
I: IntoIterator<Item = L>,
L: CommitEncode,
{
fn from(collection: I) -> Self |
}
impl<L> FromIterator<L> for MerkleSource<L>
where
L: CommitEncode,
{
fn from_iter<T: IntoIterator<Item = L>>(iter: T) -> Self {
iter.into_iter().collect::<Vec<_>>().into()
}
}
impl<L> CommitEncode for MerkleSource<L>
where
L: ConsensusMerkleCommit,
{
fn commit_encode<E: io::Write>(&self, e: E) -> usize {
let leafs = self.0.iter().map(L::consensus_commit);
merklize(L::MERKLE_NODE_PREFIX, leafs).0.commit_encode(e)
}
}
impl<L> ConsensusCommit for MerkleSource<L>
where
L: ConsensusMerkleCommit + CommitEncode,
{
type Commitment = MerkleNode;
#[inline]
fn consensus_commit(&self) -> Self::Commitment {
MerkleNode::from_slice(&self.commit_serialize())
.expect("MerkleSource::commit_serialize must produce MerkleNode")
}
#[inline]
fn consensus_verify(&self, commitment: &Self::Commitment) -> bool {
self.consensus_commit() == *commitment
}
}
/// Converts given piece of client-side-validated data into a structure which
/// can be used in merklization process.
///
/// This dedicated structure is required since with
/// `impl From<_> for MerkleSource` we would not be able to specify a concrete
/// tagged hash, which we require in [LNPBP-81] merklization and which we
/// provide here via [`ToMerkleSource::Leaf`]` associated type holding
/// [`ConsensusMerkleCommit::MERKLE_NODE_PREFIX`] prefix value.
///
/// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md
pub trait ToMerkleSource {
/// Defining type of the commitment produced during merlization process
type Leaf: ConsensusMerkleCommit;
/// Performs transformation of the data type into a merkilzable data
fn to_merkle_source(&self) -> MerkleSource<Self::Leaf>;
}
#[cfg(test)]
mod test {
use std::collections::BTreeMap;
use amplify::{bmap, s};
use bitcoin_hashes::hex::ToHex;
use bitcoin_hashes::{sha256d, Hash};
use strict_encoding::StrictEncode;
use super::*;
use crate::commit_encode::{strategies, Strategy};
use crate::CommitConceal;
#[test]
fn collections() {
// First, we define a data type
#[derive(
Clone,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Debug,
StrictEncode,
StrictDecode
)]
struct Item(pub String);
// Next, we say that it should be concealed using some function
// (double SHA256 hash in this case)
impl CommitConceal for Item {
type ConcealedCommitment = sha256d::Hash;
fn commit_conceal(&self) -> Self::ConcealedCommitment {
sha256d::Hash::hash(self.0.as_bytes())
}
}
// Next, we need to specify how the concealed data should be
// commit-encoded: this time we strict-serialize the hash
impl Strategy for sha256d::Hash {
type Strategy = strategies::UsingStrict;
}
// Now, we define commitment encoding for our concealable type: it
// should conceal the data
impl Strategy for Item {
type Strategy = strategies::UsingCon | { Self(collection.into_iter().collect()) } | identifier_body |
merkle.rs | I: IntoIterator<Item = MerkleNode>,
<I as IntoIterator>::IntoIter: ExactSizeIterator<Item = MerkleNode>,
{
let mut tag_engine = sha256::Hash::engine();
tag_engine.input(prefix.as_bytes());
tag_engine.input(":merkle:".as_bytes());
let iter = data.into_iter();
let width = iter.len();
// Tagging merkle tree root
let (root, height) = merklize_inner(&tag_engine, iter, 0, false, None);
tag_engine.input("root:height=".as_bytes());
tag_engine.input(&height.to_string().into_bytes());
tag_engine.input(":width=".as_bytes());
tag_engine.input(&width.to_string().into_bytes());
let tag_hash = sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine));
let mut engine = MerkleNode::engine();
engine.input(&tag_hash[..]);
engine.input(&tag_hash[..]);
root.commit_encode(&mut engine);
let tagged_root = MerkleNode::from_engine(engine);
(tagged_root, height)
}
// TODO: Optimize to avoid allocations
// In current rust generic iterators do not work with recursion :(
fn merklize_inner(
engine_proto: &sha256::HashEngine,
mut iter: impl ExactSizeIterator<Item = MerkleNode>,
depth: u8,
extend: bool,
empty_node: Option<MerkleNode>,
) -> (MerkleNode, u8) {
let len = iter.len() + extend as usize;
let empty_node = empty_node.unwrap_or_else(|| MerkleNode::hash(&[0xFF]));
// Computing tagged hash as per BIP-340
let mut tag_engine = engine_proto.clone();
tag_engine.input("depth=".as_bytes());
tag_engine.input(depth.to_string().as_bytes());
tag_engine.input(":width=".as_bytes());
tag_engine.input(len.to_string().as_bytes());
tag_engine.input(":height=".as_bytes());
let mut engine = MerkleNode::engine();
if len <= 2 {
tag_engine.input("0:".as_bytes());
let tag_hash =
sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine));
engine.input(&tag_hash[..]);
engine.input(&tag_hash[..]);
let mut leaf_tag_engine = engine_proto.clone();
leaf_tag_engine.input("leaf".as_bytes());
let leaf_tag =
sha256::Hash::hash(&sha256::Hash::from_engine(leaf_tag_engine));
let mut leaf_engine = MerkleNode::engine();
leaf_engine.input(&leaf_tag[..]);
leaf_engine.input(&leaf_tag[..]);
let mut leaf1 = leaf_engine.clone();
leaf1.input(
iter.next()
.as_ref()
.map(|d| d.as_ref())
.unwrap_or_else(|| empty_node.as_ref()),
);
MerkleNode::from_engine(leaf1).commit_encode(&mut engine);
leaf_engine.input(
iter.next()
.as_ref()
.map(|d| d.as_ref())
.unwrap_or_else(|| empty_node.as_ref()),
);
MerkleNode::from_engine(leaf_engine).commit_encode(&mut engine);
(MerkleNode::from_engine(engine), 1)
} else {
let div = len / 2 + len % 2;
let (node1, height1) = merklize_inner(
engine_proto,
// Normally we should use `iter.by_ref().take(div)`, but currently
// rust compilers is unable to parse recursion with generic types
iter.by_ref().take(div).collect::<Vec<_>>().into_iter(), | let iter = if extend {
iter.chain(vec![empty_node]).collect::<Vec<_>>().into_iter()
} else {
iter.collect::<Vec<_>>().into_iter()
};
let (node2, height2) = merklize_inner(
engine_proto,
iter,
depth + 1,
(div % 2 + len % 2) / 2 == 1,
Some(empty_node),
);
assert_eq!(
height1,
height2,
"merklization algorithm failure: height of subtrees is not equal \
(width = {}, depth = {}, prev_extend = {}, next_extend = {})",
len,
depth,
extend,
div % 2 == 1 && len % 2 == 1
);
tag_engine.input(height1.to_string().as_bytes());
tag_engine.input(":".as_bytes());
let tag_hash =
sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine));
engine.input(&tag_hash[..]);
engine.input(&tag_hash[..]);
node1.commit_encode(&mut engine);
node2.commit_encode(&mut engine);
(MerkleNode::from_engine(engine), height1 + 1)
}
}
/// The source data for the [LNPBP-81] merklization process.
///
/// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Default)]
pub struct MerkleSource<T>(
/// Array of the data which will be merklized
pub Vec<T>,
);
impl<L, I> From<I> for MerkleSource<L>
where
I: IntoIterator<Item = L>,
L: CommitEncode,
{
fn from(collection: I) -> Self { Self(collection.into_iter().collect()) }
}
impl<L> FromIterator<L> for MerkleSource<L>
where
L: CommitEncode,
{
fn from_iter<T: IntoIterator<Item = L>>(iter: T) -> Self {
iter.into_iter().collect::<Vec<_>>().into()
}
}
impl<L> CommitEncode for MerkleSource<L>
where
L: ConsensusMerkleCommit,
{
fn commit_encode<E: io::Write>(&self, e: E) -> usize {
let leafs = self.0.iter().map(L::consensus_commit);
merklize(L::MERKLE_NODE_PREFIX, leafs).0.commit_encode(e)
}
}
impl<L> ConsensusCommit for MerkleSource<L>
where
L: ConsensusMerkleCommit + CommitEncode,
{
type Commitment = MerkleNode;
#[inline]
fn consensus_commit(&self) -> Self::Commitment {
MerkleNode::from_slice(&self.commit_serialize())
.expect("MerkleSource::commit_serialize must produce MerkleNode")
}
#[inline]
fn consensus_verify(&self, commitment: &Self::Commitment) -> bool {
self.consensus_commit() == *commitment
}
}
/// Converts given piece of client-side-validated data into a structure which
/// can be used in merklization process.
///
/// This dedicated structure is required since with
/// `impl From<_> for MerkleSource` we would not be able to specify a concrete
/// tagged hash, which we require in [LNPBP-81] merklization and which we
/// provide here via [`ToMerkleSource::Leaf`]` associated type holding
/// [`ConsensusMerkleCommit::MERKLE_NODE_PREFIX`] prefix value.
///
/// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md
pub trait ToMerkleSource {
/// Defining type of the commitment produced during merlization process
type Leaf: ConsensusMerkleCommit;
/// Performs transformation of the data type into a merkilzable data
fn to_merkle_source(&self) -> MerkleSource<Self::Leaf>;
}
#[cfg(test)]
mod test {
use std::collections::BTreeMap;
use amplify::{bmap, s};
use bitcoin_hashes::hex::ToHex;
use bitcoin_hashes::{sha256d, Hash};
use strict_encoding::StrictEncode;
use super::*;
use crate::commit_encode::{strategies, Strategy};
use crate::CommitConceal;
#[test]
fn collections() {
// First, we define a data type
#[derive(
Clone,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Debug,
StrictEncode,
StrictDecode
)]
struct Item(pub String);
// Next, we say that it should be concealed using some function
// (double SHA256 hash in this case)
impl CommitConceal for Item {
type ConcealedCommitment = sha256d::Hash;
fn commit_conceal(&self) -> Self::ConcealedCommitment {
sha256d::Hash::hash(self.0.as_bytes())
}
}
// Next, we need to specify how the concealed data should be
// commit-encoded: this time we strict-serialize the hash
impl Strategy for sha256d::Hash {
type Strategy = strategies::UsingStrict;
}
// Now, we define commitment encoding for our concealable type: it
// should conceal the data
impl Strategy for Item {
type Strategy = strategies::UsingConceal | depth + 1,
false,
Some(empty_node),
);
| random_line_split |
merkle.rs | I: IntoIterator<Item = MerkleNode>,
<I as IntoIterator>::IntoIter: ExactSizeIterator<Item = MerkleNode>,
{
let mut tag_engine = sha256::Hash::engine();
tag_engine.input(prefix.as_bytes());
tag_engine.input(":merkle:".as_bytes());
let iter = data.into_iter();
let width = iter.len();
// Tagging merkle tree root
let (root, height) = merklize_inner(&tag_engine, iter, 0, false, None);
tag_engine.input("root:height=".as_bytes());
tag_engine.input(&height.to_string().into_bytes());
tag_engine.input(":width=".as_bytes());
tag_engine.input(&width.to_string().into_bytes());
let tag_hash = sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine));
let mut engine = MerkleNode::engine();
engine.input(&tag_hash[..]);
engine.input(&tag_hash[..]);
root.commit_encode(&mut engine);
let tagged_root = MerkleNode::from_engine(engine);
(tagged_root, height)
}
// TODO: Optimize to avoid allocations
// In current rust generic iterators do not work with recursion :(
fn merklize_inner(
engine_proto: &sha256::HashEngine,
mut iter: impl ExactSizeIterator<Item = MerkleNode>,
depth: u8,
extend: bool,
empty_node: Option<MerkleNode>,
) -> (MerkleNode, u8) {
let len = iter.len() + extend as usize;
let empty_node = empty_node.unwrap_or_else(|| MerkleNode::hash(&[0xFF]));
// Computing tagged hash as per BIP-340
let mut tag_engine = engine_proto.clone();
tag_engine.input("depth=".as_bytes());
tag_engine.input(depth.to_string().as_bytes());
tag_engine.input(":width=".as_bytes());
tag_engine.input(len.to_string().as_bytes());
tag_engine.input(":height=".as_bytes());
let mut engine = MerkleNode::engine();
if len <= 2 {
tag_engine.input("0:".as_bytes());
let tag_hash =
sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine));
engine.input(&tag_hash[..]);
engine.input(&tag_hash[..]);
let mut leaf_tag_engine = engine_proto.clone();
leaf_tag_engine.input("leaf".as_bytes());
let leaf_tag =
sha256::Hash::hash(&sha256::Hash::from_engine(leaf_tag_engine));
let mut leaf_engine = MerkleNode::engine();
leaf_engine.input(&leaf_tag[..]);
leaf_engine.input(&leaf_tag[..]);
let mut leaf1 = leaf_engine.clone();
leaf1.input(
iter.next()
.as_ref()
.map(|d| d.as_ref())
.unwrap_or_else(|| empty_node.as_ref()),
);
MerkleNode::from_engine(leaf1).commit_encode(&mut engine);
leaf_engine.input(
iter.next()
.as_ref()
.map(|d| d.as_ref())
.unwrap_or_else(|| empty_node.as_ref()),
);
MerkleNode::from_engine(leaf_engine).commit_encode(&mut engine);
(MerkleNode::from_engine(engine), 1)
} else {
let div = len / 2 + len % 2;
let (node1, height1) = merklize_inner(
engine_proto,
// Normally we should use `iter.by_ref().take(div)`, but currently
// rust compilers is unable to parse recursion with generic types
iter.by_ref().take(div).collect::<Vec<_>>().into_iter(),
depth + 1,
false,
Some(empty_node),
);
let iter = if extend {
iter.chain(vec![empty_node]).collect::<Vec<_>>().into_iter()
} else {
iter.collect::<Vec<_>>().into_iter()
};
let (node2, height2) = merklize_inner(
engine_proto,
iter,
depth + 1,
(div % 2 + len % 2) / 2 == 1,
Some(empty_node),
);
assert_eq!(
height1,
height2,
"merklization algorithm failure: height of subtrees is not equal \
(width = {}, depth = {}, prev_extend = {}, next_extend = {})",
len,
depth,
extend,
div % 2 == 1 && len % 2 == 1
);
tag_engine.input(height1.to_string().as_bytes());
tag_engine.input(":".as_bytes());
let tag_hash =
sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine));
engine.input(&tag_hash[..]);
engine.input(&tag_hash[..]);
node1.commit_encode(&mut engine);
node2.commit_encode(&mut engine);
(MerkleNode::from_engine(engine), height1 + 1)
}
}
/// The source data for the [LNPBP-81] merklization process.
///
/// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Default)]
pub struct MerkleSource<T>(
/// Array of the data which will be merklized
pub Vec<T>,
);
impl<L, I> From<I> for MerkleSource<L>
where
I: IntoIterator<Item = L>,
L: CommitEncode,
{
fn from(collection: I) -> Self { Self(collection.into_iter().collect()) }
}
impl<L> FromIterator<L> for MerkleSource<L>
where
L: CommitEncode,
{
fn from_iter<T: IntoIterator<Item = L>>(iter: T) -> Self {
iter.into_iter().collect::<Vec<_>>().into()
}
}
impl<L> CommitEncode for MerkleSource<L>
where
L: ConsensusMerkleCommit,
{
fn | <E: io::Write>(&self, e: E) -> usize {
let leafs = self.0.iter().map(L::consensus_commit);
merklize(L::MERKLE_NODE_PREFIX, leafs).0.commit_encode(e)
}
}
impl<L> ConsensusCommit for MerkleSource<L>
where
L: ConsensusMerkleCommit + CommitEncode,
{
type Commitment = MerkleNode;
#[inline]
fn consensus_commit(&self) -> Self::Commitment {
MerkleNode::from_slice(&self.commit_serialize())
.expect("MerkleSource::commit_serialize must produce MerkleNode")
}
#[inline]
fn consensus_verify(&self, commitment: &Self::Commitment) -> bool {
self.consensus_commit() == *commitment
}
}
/// Converts given piece of client-side-validated data into a structure which
/// can be used in merklization process.
///
/// This dedicated structure is required since with
/// `impl From<_> for MerkleSource` we would not be able to specify a concrete
/// tagged hash, which we require in [LNPBP-81] merklization and which we
/// provide here via [`ToMerkleSource::Leaf`]` associated type holding
/// [`ConsensusMerkleCommit::MERKLE_NODE_PREFIX`] prefix value.
///
/// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md
pub trait ToMerkleSource {
/// Defining type of the commitment produced during merlization process
type Leaf: ConsensusMerkleCommit;
/// Performs transformation of the data type into a merkilzable data
fn to_merkle_source(&self) -> MerkleSource<Self::Leaf>;
}
#[cfg(test)]
mod test {
use std::collections::BTreeMap;
use amplify::{bmap, s};
use bitcoin_hashes::hex::ToHex;
use bitcoin_hashes::{sha256d, Hash};
use strict_encoding::StrictEncode;
use super::*;
use crate::commit_encode::{strategies, Strategy};
use crate::CommitConceal;
#[test]
fn collections() {
// First, we define a data type
#[derive(
Clone,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Debug,
StrictEncode,
StrictDecode
)]
struct Item(pub String);
// Next, we say that it should be concealed using some function
// (double SHA256 hash in this case)
impl CommitConceal for Item {
type ConcealedCommitment = sha256d::Hash;
fn commit_conceal(&self) -> Self::ConcealedCommitment {
sha256d::Hash::hash(self.0.as_bytes())
}
}
// Next, we need to specify how the concealed data should be
// commit-encoded: this time we strict-serialize the hash
impl Strategy for sha256d::Hash {
type Strategy = strategies::UsingStrict;
}
// Now, we define commitment encoding for our concealable type: it
// should conceal the data
impl Strategy for Item {
type Strategy = strategies::UsingConce | commit_encode | identifier_name |
mysql_table_scanner.go | tableName string, maxFullDumpRowsCount int) (string, error) {
pks, err := utils.GetPrimaryKeys(sourceDB, dbName, tableName)
if err != nil {
return "", errors.Trace(err)
}
if len(pks) == 1 {
return pks[0], nil
}
uniqueIndexes, err := utils.GetUniqueIndexesWithoutPks(sourceDB, dbName, tableName)
if err != nil {
return "", errors.Trace(err)
}
if len(uniqueIndexes) > 0 {
return uniqueIndexes[0], nil
}
rowsCount, err := utils.EstimateRowsCount(sourceDB, dbName, tableName)
if err != nil {
return "", errors.Trace(err)
}
if rowsCount < maxFullDumpRowsCount {
return "*", nil
}
return "", errors.Errorf("no scan column can be found automatically for %s.%s", dbName, tableName)
}
func FindMaxMinValueFromDB(db *sql.DB, dbName string, tableName string, scanColumn string) (interface{}, interface{}) {
var max interface{}
var min interface{}
maxStatement := fmt.Sprintf("SELECT MAX(`%s`) FROM `%s`.`%s`", scanColumn, dbName, tableName)
log.Infof("[FindMaxMinValueFromDB] statement: %s", maxStatement)
maxRowPtrs, err := utils.QueryGeneralRowsDataWithSQL(db, maxStatement)
if err != nil {
log.Fatalf("[FindMaxMinValueFromDB] failed to QueryGeneralRowsDataWithSQL, err: %v", errors.ErrorStack(err))
}
max = reflect.ValueOf(maxRowPtrs[0][0]).Elem().Interface()
minStatement := fmt.Sprintf("SELECT MIN(`%s`) FROM `%s`.`%s`", scanColumn, dbName, tableName)
log.Infof("[FindMaxMinValueFromDB] statement: %s", minStatement)
minRowPtrs, err := utils.QueryGeneralRowsDataWithSQL(db, minStatement)
if err != nil {
log.Fatalf("[FindMaxMinValueFromDB] failed to QueryGeneralRowsDataWithSQL, err: %v", errors.ErrorStack(err))
}
min = reflect.ValueOf(minRowPtrs[0][0]).Elem().Interface()
return max, min
}
// LoopInBatch will iterate the table by sql like this:
// SELECT * FROM a WHERE some_key > some_value LIMIT 10000
// It will get the min, max value of the column and iterate batch by batch
func (tableScanner *TableScanner) LoopInBatch(db *sql.DB, tableDef *schema_store.Table, tableConfig *TableConfig, scanColumn string, max position_store.MySQLTablePosition, min position_store.MySQLTablePosition, batch int) {
pipelineName := tableScanner.pipelineName
if batch <= 0 {
log.Fatalf("[LoopInBatch] batch size is 0")
}
maxMapString, err := max.MapString()
if err != nil {
log.Fatalf("[LoopInBatch] failed to get maxString, max: %v, err: %v", max, errors.ErrorStack(err))
}
batchIdx := 0
firstLoop := true
maxReached := false
var statement string
currentMinPos, ok := tableScanner.positionStore.GetCurrent(utils.TableIdentity(tableDef.Schema, tableDef.Name))
if !ok {
tableScanner.positionStore.PutCurrent(utils.TableIdentity(tableDef.Schema, tableDef.Name), min)
currentMinPos = min
}
log.Infof("[LoopInBatch] prepare current: %v", currentMinPos)
currentMinValue := currentMinPos.Value
resultCount := 0
columnTypes, err := GetTableColumnTypes(db, tableDef.Schema, tableDef.Name)
if err != nil {
log.Fatalf("[LoopInBatch] failed to get columnType, err: %v", errors.ErrorStack(err))
}
scanIdx, err := GetScanIdx(columnTypes, scanColumn)
if err != nil {
log.Fatalf("[LoopInBatch] failed to get scanIdx, err: %v", errors.ErrorStack(err))
}
rowsBatchDataPtrs := newBatchDataPtrs(columnTypes, batch)
for {
if firstLoop {
statement = fmt.Sprintf("SELECT * FROM `%s`.`%s` WHERE %s >= ? ORDER BY %s LIMIT ?", tableDef.Schema, tableDef.Name, scanColumn, scanColumn)
firstLoop = false
} else {
statement = fmt.Sprintf("SELECT * FROM `%s`.`%s` WHERE %s > ? ORDER BY %s LIMIT ?", tableDef.Schema, tableDef.Name, scanColumn, scanColumn)
}
<-tableScanner.throttle.C
queryStartTime := time.Now()
rows, err := db.Query(statement, currentMinValue, batch)
if err != nil {
log.Fatalf("[LoopInBatch] table %s.%s, err: %v", tableDef.Schema, tableDef.Name, err)
}
rowIdx := 0
for rows.Next() {
metrics.ScannerJobFetchedCount.WithLabelValues(pipelineName).Add(1)
resultCount++
rowsBatchDataPtrs[rowIdx], err = utils.ScanGeneralRowsWithDataPtrs(rows, columnTypes, rowsBatchDataPtrs[rowIdx])
if err != nil {
log.Fatalf("[LoopInBatch] table %s.%s, scan error: %v", tableDef.Schema, tableDef.Name, errors.ErrorStack(err))
}
currentMinValue = reflect.ValueOf(rowsBatchDataPtrs[rowIdx][scanIdx]).Elem().Interface()
rowIdx++
if mysql.MySQLDataEquals(max.Value, currentMinValue) {
maxReached = true
break
}
}
err = rows.Err()
if err != nil {
log.Fatalf("[LoopInBatch] table %s.%s, rows err: %v", tableDef.Schema, tableDef.Name, err)
}
rows.Close()
// no result found for this query
if rowIdx == 0 {
log.Infof("[TableScanner] query result is 0, return")
return
}
metrics.ScannerBatchQueryDuration.WithLabelValues(pipelineName).Observe(time.Now().Sub(queryStartTime).Seconds())
batchIdx++
var lastMsg *core.Msg
// process this batch's data
for i := 0; i < rowIdx; i++ {
rowPtrs := rowsBatchDataPtrs[i]
posV := mysql.NormalizeSQLType(reflect.ValueOf(rowPtrs[scanIdx]).Elem().Interface())
position := position_store.MySQLTablePosition{Value: posV, Column: scanColumn}
msg := NewMsg(rowPtrs, columnTypes, tableDef, tableScanner.AfterMsgCommit, position)
if err := tableScanner.emitter.Emit(msg); err != nil {
log.Fatalf("[LoopInBatch] failed to emit job: %v", errors.ErrorStack(err))
}
lastMsg = msg
}
log.Infof("[LoopInBatch] sourceDB: %s, table: %s, currentMinPos: %v, maxMapString.column: %v, maxMapString.value: %v, maxMapString.type: %v, resultCount: %v",
tableDef.Schema, tableDef.Name, currentMinValue, maxMapString["column"], maxMapString["value"], maxMapString["type"], resultCount)
// we break the loop here in case the currentMinPos comes larger than the max we have in the beginning.
if maxReached {
log.Infof("[LoopInBatch] max reached")
if lastMsg != nil {
<-lastMsg.Done
// close the stream
msg := NewCloseInputStreamMsg(tableDef)
if err := tableScanner.emitter.Emit(msg); err != nil {
log.Fatalf("[LoopInBatch] failed to emit close stream msg: %v", errors.ErrorStack(err))
}
log.Infof("[LoopInBatch] sent close input stream msg")
}
return
}
select {
case <-tableScanner.ctx.Done():
log.Infof("[table_worker] canceled by context")
return
default:
continue
}
}
}
func (tableScanner *TableScanner) FindAll(db *sql.DB, tableDef *schema_store.Table, tableConfig *TableConfig) {
columnTypes, err := GetTableColumnTypes(db, tableDef.Schema, tableDef.Name)
if err != nil {
log.Fatalf("[FindAll] failed to get columnType: %v", errors.ErrorStack(err))
}
statement := fmt.Sprintf("SELECT * FROM `%s`.`%s`", tableDef.Schema, tableDef.Name)
allData, err := utils.QueryGeneralRowsDataWithSQL(db, statement)
if err != nil {
log.Fatalf("[FindAll] failed to find all, err: %v", errors.ErrorStack(err))
}
for i := range allData {
rowPtrs := allData[i]
msg := NewMsg(rowPtrs, columnTypes, tableDef, nil, position_store.MySQLTablePosition{})
if err := tableScanner.emitter.Emit(msg); err != nil {
log.Fatalf("[tableScanner] failed to emit: %v", errors.ErrorStack(err))
}
}
}
func (tableScanner *TableScanner) AfterMsgCommit(msg *core.Msg) error {
p, ok := msg.InputContext.(position_store.MySQLTablePosition)
if !ok {
return errors.Errorf("type invalid")
}
tableScanner.positionStore.PutCurrent(*msg.InputStreamKey, p)
return nil
}
func (tableScanner *TableScanner) | initTableDDL | identifier_name |
|
mysql_table_scanner.go | tableName)
}
func FindMaxMinValueFromDB(db *sql.DB, dbName string, tableName string, scanColumn string) (interface{}, interface{}) {
var max interface{}
var min interface{}
maxStatement := fmt.Sprintf("SELECT MAX(`%s`) FROM `%s`.`%s`", scanColumn, dbName, tableName)
log.Infof("[FindMaxMinValueFromDB] statement: %s", maxStatement)
maxRowPtrs, err := utils.QueryGeneralRowsDataWithSQL(db, maxStatement)
if err != nil {
log.Fatalf("[FindMaxMinValueFromDB] failed to QueryGeneralRowsDataWithSQL, err: %v", errors.ErrorStack(err))
}
max = reflect.ValueOf(maxRowPtrs[0][0]).Elem().Interface()
minStatement := fmt.Sprintf("SELECT MIN(`%s`) FROM `%s`.`%s`", scanColumn, dbName, tableName)
log.Infof("[FindMaxMinValueFromDB] statement: %s", minStatement)
minRowPtrs, err := utils.QueryGeneralRowsDataWithSQL(db, minStatement)
if err != nil {
log.Fatalf("[FindMaxMinValueFromDB] failed to QueryGeneralRowsDataWithSQL, err: %v", errors.ErrorStack(err))
}
min = reflect.ValueOf(minRowPtrs[0][0]).Elem().Interface()
return max, min
}
// LoopInBatch will iterate the table by sql like this:
// SELECT * FROM a WHERE some_key > some_value LIMIT 10000
// It will get the min, max value of the column and iterate batch by batch
func (tableScanner *TableScanner) LoopInBatch(db *sql.DB, tableDef *schema_store.Table, tableConfig *TableConfig, scanColumn string, max position_store.MySQLTablePosition, min position_store.MySQLTablePosition, batch int) {
pipelineName := tableScanner.pipelineName
if batch <= 0 {
log.Fatalf("[LoopInBatch] batch size is 0")
}
maxMapString, err := max.MapString()
if err != nil {
log.Fatalf("[LoopInBatch] failed to get maxString, max: %v, err: %v", max, errors.ErrorStack(err))
}
batchIdx := 0
firstLoop := true
maxReached := false
var statement string
currentMinPos, ok := tableScanner.positionStore.GetCurrent(utils.TableIdentity(tableDef.Schema, tableDef.Name))
if !ok {
tableScanner.positionStore.PutCurrent(utils.TableIdentity(tableDef.Schema, tableDef.Name), min)
currentMinPos = min
}
log.Infof("[LoopInBatch] prepare current: %v", currentMinPos)
currentMinValue := currentMinPos.Value
resultCount := 0
columnTypes, err := GetTableColumnTypes(db, tableDef.Schema, tableDef.Name)
if err != nil {
log.Fatalf("[LoopInBatch] failed to get columnType, err: %v", errors.ErrorStack(err))
}
scanIdx, err := GetScanIdx(columnTypes, scanColumn)
if err != nil {
log.Fatalf("[LoopInBatch] failed to get scanIdx, err: %v", errors.ErrorStack(err))
}
rowsBatchDataPtrs := newBatchDataPtrs(columnTypes, batch)
for {
if firstLoop {
statement = fmt.Sprintf("SELECT * FROM `%s`.`%s` WHERE %s >= ? ORDER BY %s LIMIT ?", tableDef.Schema, tableDef.Name, scanColumn, scanColumn)
firstLoop = false
} else {
statement = fmt.Sprintf("SELECT * FROM `%s`.`%s` WHERE %s > ? ORDER BY %s LIMIT ?", tableDef.Schema, tableDef.Name, scanColumn, scanColumn)
}
<-tableScanner.throttle.C
queryStartTime := time.Now()
rows, err := db.Query(statement, currentMinValue, batch)
if err != nil {
log.Fatalf("[LoopInBatch] table %s.%s, err: %v", tableDef.Schema, tableDef.Name, err)
}
rowIdx := 0
for rows.Next() {
metrics.ScannerJobFetchedCount.WithLabelValues(pipelineName).Add(1)
resultCount++
rowsBatchDataPtrs[rowIdx], err = utils.ScanGeneralRowsWithDataPtrs(rows, columnTypes, rowsBatchDataPtrs[rowIdx])
if err != nil {
log.Fatalf("[LoopInBatch] table %s.%s, scan error: %v", tableDef.Schema, tableDef.Name, errors.ErrorStack(err))
}
currentMinValue = reflect.ValueOf(rowsBatchDataPtrs[rowIdx][scanIdx]).Elem().Interface()
rowIdx++
if mysql.MySQLDataEquals(max.Value, currentMinValue) {
maxReached = true
break
}
}
err = rows.Err()
if err != nil {
log.Fatalf("[LoopInBatch] table %s.%s, rows err: %v", tableDef.Schema, tableDef.Name, err)
}
rows.Close()
// no result found for this query
if rowIdx == 0 {
log.Infof("[TableScanner] query result is 0, return")
return
}
metrics.ScannerBatchQueryDuration.WithLabelValues(pipelineName).Observe(time.Now().Sub(queryStartTime).Seconds())
batchIdx++
var lastMsg *core.Msg
// process this batch's data
for i := 0; i < rowIdx; i++ {
rowPtrs := rowsBatchDataPtrs[i]
posV := mysql.NormalizeSQLType(reflect.ValueOf(rowPtrs[scanIdx]).Elem().Interface())
position := position_store.MySQLTablePosition{Value: posV, Column: scanColumn}
msg := NewMsg(rowPtrs, columnTypes, tableDef, tableScanner.AfterMsgCommit, position)
if err := tableScanner.emitter.Emit(msg); err != nil {
log.Fatalf("[LoopInBatch] failed to emit job: %v", errors.ErrorStack(err))
}
lastMsg = msg
}
log.Infof("[LoopInBatch] sourceDB: %s, table: %s, currentMinPos: %v, maxMapString.column: %v, maxMapString.value: %v, maxMapString.type: %v, resultCount: %v",
tableDef.Schema, tableDef.Name, currentMinValue, maxMapString["column"], maxMapString["value"], maxMapString["type"], resultCount)
// we break the loop here in case the currentMinPos comes larger than the max we have in the beginning.
if maxReached {
log.Infof("[LoopInBatch] max reached")
if lastMsg != nil {
<-lastMsg.Done
// close the stream
msg := NewCloseInputStreamMsg(tableDef)
if err := tableScanner.emitter.Emit(msg); err != nil {
log.Fatalf("[LoopInBatch] failed to emit close stream msg: %v", errors.ErrorStack(err))
}
log.Infof("[LoopInBatch] sent close input stream msg")
}
return
}
select {
case <-tableScanner.ctx.Done():
log.Infof("[table_worker] canceled by context")
return
default:
continue
}
}
}
func (tableScanner *TableScanner) FindAll(db *sql.DB, tableDef *schema_store.Table, tableConfig *TableConfig) {
columnTypes, err := GetTableColumnTypes(db, tableDef.Schema, tableDef.Name)
if err != nil {
log.Fatalf("[FindAll] failed to get columnType: %v", errors.ErrorStack(err))
}
statement := fmt.Sprintf("SELECT * FROM `%s`.`%s`", tableDef.Schema, tableDef.Name)
allData, err := utils.QueryGeneralRowsDataWithSQL(db, statement)
if err != nil {
log.Fatalf("[FindAll] failed to find all, err: %v", errors.ErrorStack(err))
}
for i := range allData {
rowPtrs := allData[i]
msg := NewMsg(rowPtrs, columnTypes, tableDef, nil, position_store.MySQLTablePosition{})
if err := tableScanner.emitter.Emit(msg); err != nil {
log.Fatalf("[tableScanner] failed to emit: %v", errors.ErrorStack(err))
}
}
}
func (tableScanner *TableScanner) AfterMsgCommit(msg *core.Msg) error {
p, ok := msg.InputContext.(position_store.MySQLTablePosition)
if !ok {
return errors.Errorf("type invalid")
}
tableScanner.positionStore.PutCurrent(*msg.InputStreamKey, p)
return nil
}
func (tableScanner *TableScanner) initTableDDL(table *schema_store.Table) error {
row := tableScanner.db.QueryRow(fmt.Sprintf("SHOW CREATE TABLE `%s`.`%s`", table.Schema, table.Name))
var t, create string
err := row.Scan(&t, &create)
if err != nil {
return errors.Trace(err)
}
msg := NewCreateTableMsg(tableScanner.parser, table, create)
if err := tableScanner.emitter.Emit(msg); err != nil {
return errors.Trace(err)
}
<-msg.Done
return nil
}
func GetTableColumnTypes(db *sql.DB, schema string, table string) ([]*sql.ColumnType, error) | {
statement := fmt.Sprintf("SELECT * FROM `%s`.`%s` LIMIT 1", schema, table)
rows, err := db.Query(statement)
if err != nil {
return nil, errors.Trace(err)
}
defer rows.Close()
return rows.ColumnTypes()
} | identifier_body |
|
mysql_table_scanner.go | log "github.com/sirupsen/logrus"
"github.com/moiot/gravity/pkg/core"
"github.com/moiot/gravity/pkg/metrics"
"github.com/moiot/gravity/pkg/mysql"
"github.com/moiot/gravity/pkg/position_store"
"github.com/moiot/gravity/pkg/schema_store"
"github.com/moiot/gravity/pkg/utils"
)
var ErrTableEmpty = errors.New("table_scanner: this table is empty")
type TableScanner struct {
pipelineName string
tableWorkC chan *TableWork
cfg *PluginConfig
positionStore position_store.MySQLTablePositionStore
db *sql.DB
emitter core.Emitter
throttle *time.Ticker
ctx context.Context
schemaStore schema_store.SchemaStore
wg sync.WaitGroup
parser *parser.Parser
}
func (tableScanner *TableScanner) Start() error {
tableScanner.wg.Add(1)
go func() {
defer tableScanner.wg.Done()
for {
select {
case work, ok := <-tableScanner.tableWorkC:
if !ok {
log.Infof("[TableScanner] queue closed, exit")
return
}
err := tableScanner.initTableDDL(work.TableDef)
if err != nil {
log.Fatalf("[TableScanner] initTableDDL for %s.%s, err: %s", work.TableDef.Schema, work.TableDef.Name, err)
}
err = tableScanner.InitTablePosition(work.TableDef, work.TableConfig)
if err == ErrTableEmpty {
log.Infof("[TableScanner] Target table is empty. schema: %v, table: %v",
work.TableDef.Schema, work.TableDef.Name)
continue
} else if err != nil {
log.Fatalf("[TableScanner] InitTablePosition failed: %v", errors.ErrorStack(err))
}
max, min, ok := tableScanner.positionStore.GetMaxMin(utils.TableIdentity(work.TableDef.Schema, work.TableDef.Name))
log.Infof("positionStore.GetMaxMin: max value type: %v, max %v; min value type: %v, min %v", reflect.TypeOf(max.Value), max, reflect.TypeOf(min.Value), min)
scanColumn := max.Column
if !ok {
log.Fatalf("[table_scanner] failed to find max min")
}
// If the scan column is *, then we do a full dump of the table
if scanColumn == "*" {
tableScanner.FindAll(tableScanner.db, work.TableDef, work.TableConfig)
} else {
tableScanner.LoopInBatch(
tableScanner.db, work.TableDef,
work.TableConfig, scanColumn,
max,
min,
tableScanner.cfg.TableScanBatch)
if tableScanner.ctx.Err() == nil {
log.Infof("[table_worker] LoopInBatch done with table %s", work.TableDef.Name)
} else if tableScanner.ctx.Err() == context.Canceled {
log.Infof("[TableScanner] LoopInBatch canceled")
return
} else {
log.Fatalf("[TableScanner] LoopInBatch unknow case,err: %v", tableScanner.ctx.Err())
}
}
case <-tableScanner.ctx.Done():
log.Infof("[TableScanner] canceled by context")
return
}
}
}()
return nil
}
func (tableScanner *TableScanner) InitTablePosition(tableDef *schema_store.Table, tableConfig *TableConfig) error {
_, _, ok := tableScanner.positionStore.GetMaxMin(utils.TableIdentity(tableDef.Schema, tableDef.Name))
if !ok {
log.Infof("[InitTablePosition] init table position")
// detect scan column first
var scanColumn string
var scanType string
column, err := DetectScanColumn(tableScanner.db, tableDef.Schema, tableDef.Name, tableScanner.cfg.MaxFullDumpCount)
if err != nil {
return errors.Trace(err)
}
scanColumn = column
if scanColumn == "*" {
maxPos := position_store.MySQLTablePosition{Column: scanColumn}
minPos := position_store.MySQLTablePosition{Column: scanColumn}
tableScanner.positionStore.PutMaxMin(utils.TableIdentity(tableDef.Schema, tableDef.Name), maxPos, minPos)
} else {
max, min := FindMaxMinValueFromDB(tableScanner.db, tableDef.Schema, tableDef.Name, scanColumn)
maxPos := position_store.MySQLTablePosition{Value: max, Type: scanType, Column: scanColumn}
empty, err := tableScanner.validateTableEmpty(maxPos)
if err != nil {
return errors.Trace(err)
}
if empty {
return ErrTableEmpty
}
minPos := position_store.MySQLTablePosition{Value: min, Type: scanType, Column: scanColumn}
tableScanner.positionStore.PutMaxMin(utils.TableIdentity(tableDef.Schema, tableDef.Name), maxPos, minPos)
log.Infof("[InitTablePosition] PutMaxMin: max value type: %v, max: %v; min value type: %v, min: %v", reflect.TypeOf(maxPos.Value), maxPos, reflect.TypeOf(minPos.Value), minPos)
}
log.Infof("[InitTablePosition] schema: %v, table: %v, scanColumn: %v", tableDef.Schema, tableDef.Name, scanColumn)
}
return nil
}
func (tableScanner *TableScanner) validateTableEmpty(pos position_store.MySQLTablePosition) (bool, error) {
mapStr, err := pos.MapString()
if err != nil {
return false, errors.Trace(err)
}
return mapStr["value"] == "", nil
}
func (tableScanner *TableScanner) Wait() {
tableScanner.wg.Wait()
}
// DetectScanColumn find a column that we used to scan the table
// SHOW INDEX FROM ..
// Pick primary key, if there is only one primary key
// If pk not found try using unique index
// fail
func DetectScanColumn(sourceDB *sql.DB, dbName string, tableName string, maxFullDumpRowsCount int) (string, error) {
pks, err := utils.GetPrimaryKeys(sourceDB, dbName, tableName)
if err != nil {
return "", errors.Trace(err)
}
if len(pks) == 1 {
return pks[0], nil
}
uniqueIndexes, err := utils.GetUniqueIndexesWithoutPks(sourceDB, dbName, tableName)
if err != nil {
return "", errors.Trace(err)
}
if len(uniqueIndexes) > 0 {
return uniqueIndexes[0], nil
}
rowsCount, err := utils.EstimateRowsCount(sourceDB, dbName, tableName)
if err != nil {
return "", errors.Trace(err)
}
if rowsCount < maxFullDumpRowsCount {
return "*", nil
}
return "", errors.Errorf("no scan column can be found automatically for %s.%s", dbName, tableName)
}
func FindMaxMinValueFromDB(db *sql.DB, dbName string, tableName string, scanColumn string) (interface{}, interface{}) {
var max interface{}
var min interface{}
maxStatement := fmt.Sprintf("SELECT MAX(`%s`) FROM `%s`.`%s`", scanColumn, dbName, tableName)
log.Infof("[FindMaxMinValueFromDB] statement: %s", maxStatement)
maxRowPtrs, err := utils.QueryGeneralRowsDataWithSQL(db, maxStatement)
if err != nil {
log.Fatalf("[FindMaxMinValueFromDB] failed to QueryGeneralRowsDataWithSQL, err: %v", errors.ErrorStack(err))
}
max = reflect.ValueOf(maxRowPtrs[0][0]).Elem().Interface()
minStatement := fmt.Sprintf("SELECT MIN(`%s`) FROM `%s`.`%s`", scanColumn, dbName, tableName)
log.Infof("[FindMaxMinValueFromDB] statement: %s", minStatement)
minRowPtrs, err := utils.QueryGeneralRowsDataWithSQL(db, minStatement)
if err != nil {
log.Fatalf("[FindMaxMinValueFromDB] failed to QueryGeneralRowsDataWithSQL, err: %v", errors.ErrorStack(err))
}
min = reflect.ValueOf(minRowPtrs[0][0]).Elem().Interface()
return max, min
}
// LoopInBatch will iterate the table by sql like this:
// SELECT * FROM a WHERE some_key > some_value LIMIT 10000
// It will get the min, max value of the column and iterate batch by batch
func (tableScanner *TableScanner) LoopInBatch(db *sql.DB, tableDef *schema_store.Table, tableConfig *TableConfig, scanColumn string, max position_store.MySQLTablePosition, min position_store.MySQLTablePosition, batch int) {
pipelineName := tableScanner.pipelineName
if batch <= 0 {
log.Fatalf("[LoopInBatch] batch size is 0")
}
maxMapString, err := max.MapString()
if err != nil {
log.Fatalf("[LoopInBatch] failed to get maxString, max: %v, err: %v", max, errors.ErrorStack(err))
}
batchIdx := 0
firstLoop := true
maxReached := false
var statement string
currentMinPos, ok := tableScanner.positionStore.GetCurrent(utils.TableIdentity(tableDef.Schema, tableDef.Name))
if !ok {
tableScanner.positionStore.PutCurrent(utils.TableIdentity(tableDef.Schema, tableDef.Name), min)
currentMinPos = min
}
|
"github.com/juju/errors"
"github.com/pingcap/parser" | random_line_split |
|
mysql_table_scanner.go | }
func (tableScanner *TableScanner) Wait() {
tableScanner.wg.Wait()
}
// DetectScanColumn find a column that we used to scan the table
// SHOW INDEX FROM ..
// Pick primary key, if there is only one primary key
// If pk not found try using unique index
// fail
func DetectScanColumn(sourceDB *sql.DB, dbName string, tableName string, maxFullDumpRowsCount int) (string, error) {
pks, err := utils.GetPrimaryKeys(sourceDB, dbName, tableName)
if err != nil {
return "", errors.Trace(err)
}
if len(pks) == 1 {
return pks[0], nil
}
uniqueIndexes, err := utils.GetUniqueIndexesWithoutPks(sourceDB, dbName, tableName)
if err != nil {
return "", errors.Trace(err)
}
if len(uniqueIndexes) > 0 {
return uniqueIndexes[0], nil
}
rowsCount, err := utils.EstimateRowsCount(sourceDB, dbName, tableName)
if err != nil {
return "", errors.Trace(err)
}
if rowsCount < maxFullDumpRowsCount {
return "*", nil
}
return "", errors.Errorf("no scan column can be found automatically for %s.%s", dbName, tableName)
}
func FindMaxMinValueFromDB(db *sql.DB, dbName string, tableName string, scanColumn string) (interface{}, interface{}) {
var max interface{}
var min interface{}
maxStatement := fmt.Sprintf("SELECT MAX(`%s`) FROM `%s`.`%s`", scanColumn, dbName, tableName)
log.Infof("[FindMaxMinValueFromDB] statement: %s", maxStatement)
maxRowPtrs, err := utils.QueryGeneralRowsDataWithSQL(db, maxStatement)
if err != nil {
log.Fatalf("[FindMaxMinValueFromDB] failed to QueryGeneralRowsDataWithSQL, err: %v", errors.ErrorStack(err))
}
max = reflect.ValueOf(maxRowPtrs[0][0]).Elem().Interface()
minStatement := fmt.Sprintf("SELECT MIN(`%s`) FROM `%s`.`%s`", scanColumn, dbName, tableName)
log.Infof("[FindMaxMinValueFromDB] statement: %s", minStatement)
minRowPtrs, err := utils.QueryGeneralRowsDataWithSQL(db, minStatement)
if err != nil {
log.Fatalf("[FindMaxMinValueFromDB] failed to QueryGeneralRowsDataWithSQL, err: %v", errors.ErrorStack(err))
}
min = reflect.ValueOf(minRowPtrs[0][0]).Elem().Interface()
return max, min
}
// LoopInBatch will iterate the table by sql like this:
// SELECT * FROM a WHERE some_key > some_value LIMIT 10000
// It will get the min, max value of the column and iterate batch by batch
func (tableScanner *TableScanner) LoopInBatch(db *sql.DB, tableDef *schema_store.Table, tableConfig *TableConfig, scanColumn string, max position_store.MySQLTablePosition, min position_store.MySQLTablePosition, batch int) {
pipelineName := tableScanner.pipelineName
if batch <= 0 {
log.Fatalf("[LoopInBatch] batch size is 0")
}
maxMapString, err := max.MapString()
if err != nil {
log.Fatalf("[LoopInBatch] failed to get maxString, max: %v, err: %v", max, errors.ErrorStack(err))
}
batchIdx := 0
firstLoop := true
maxReached := false
var statement string
currentMinPos, ok := tableScanner.positionStore.GetCurrent(utils.TableIdentity(tableDef.Schema, tableDef.Name))
if !ok {
tableScanner.positionStore.PutCurrent(utils.TableIdentity(tableDef.Schema, tableDef.Name), min)
currentMinPos = min
}
log.Infof("[LoopInBatch] prepare current: %v", currentMinPos)
currentMinValue := currentMinPos.Value
resultCount := 0
columnTypes, err := GetTableColumnTypes(db, tableDef.Schema, tableDef.Name)
if err != nil {
log.Fatalf("[LoopInBatch] failed to get columnType, err: %v", errors.ErrorStack(err))
}
scanIdx, err := GetScanIdx(columnTypes, scanColumn)
if err != nil {
log.Fatalf("[LoopInBatch] failed to get scanIdx, err: %v", errors.ErrorStack(err))
}
rowsBatchDataPtrs := newBatchDataPtrs(columnTypes, batch)
for {
if firstLoop {
statement = fmt.Sprintf("SELECT * FROM `%s`.`%s` WHERE %s >= ? ORDER BY %s LIMIT ?", tableDef.Schema, tableDef.Name, scanColumn, scanColumn)
firstLoop = false
} else {
statement = fmt.Sprintf("SELECT * FROM `%s`.`%s` WHERE %s > ? ORDER BY %s LIMIT ?", tableDef.Schema, tableDef.Name, scanColumn, scanColumn)
}
<-tableScanner.throttle.C
queryStartTime := time.Now()
rows, err := db.Query(statement, currentMinValue, batch)
if err != nil {
log.Fatalf("[LoopInBatch] table %s.%s, err: %v", tableDef.Schema, tableDef.Name, err)
}
rowIdx := 0
for rows.Next() {
metrics.ScannerJobFetchedCount.WithLabelValues(pipelineName).Add(1)
resultCount++
rowsBatchDataPtrs[rowIdx], err = utils.ScanGeneralRowsWithDataPtrs(rows, columnTypes, rowsBatchDataPtrs[rowIdx])
if err != nil {
log.Fatalf("[LoopInBatch] table %s.%s, scan error: %v", tableDef.Schema, tableDef.Name, errors.ErrorStack(err))
}
currentMinValue = reflect.ValueOf(rowsBatchDataPtrs[rowIdx][scanIdx]).Elem().Interface()
rowIdx++
if mysql.MySQLDataEquals(max.Value, currentMinValue) {
maxReached = true
break
}
}
err = rows.Err()
if err != nil {
log.Fatalf("[LoopInBatch] table %s.%s, rows err: %v", tableDef.Schema, tableDef.Name, err)
}
rows.Close()
// no result found for this query
if rowIdx == 0 {
log.Infof("[TableScanner] query result is 0, return")
return
}
metrics.ScannerBatchQueryDuration.WithLabelValues(pipelineName).Observe(time.Now().Sub(queryStartTime).Seconds())
batchIdx++
var lastMsg *core.Msg
// process this batch's data
for i := 0; i < rowIdx; i++ {
rowPtrs := rowsBatchDataPtrs[i]
posV := mysql.NormalizeSQLType(reflect.ValueOf(rowPtrs[scanIdx]).Elem().Interface())
position := position_store.MySQLTablePosition{Value: posV, Column: scanColumn}
msg := NewMsg(rowPtrs, columnTypes, tableDef, tableScanner.AfterMsgCommit, position)
if err := tableScanner.emitter.Emit(msg); err != nil {
log.Fatalf("[LoopInBatch] failed to emit job: %v", errors.ErrorStack(err))
}
lastMsg = msg
}
log.Infof("[LoopInBatch] sourceDB: %s, table: %s, currentMinPos: %v, maxMapString.column: %v, maxMapString.value: %v, maxMapString.type: %v, resultCount: %v",
tableDef.Schema, tableDef.Name, currentMinValue, maxMapString["column"], maxMapString["value"], maxMapString["type"], resultCount)
// we break the loop here in case the currentMinPos comes larger than the max we have in the beginning.
if maxReached {
log.Infof("[LoopInBatch] max reached")
if lastMsg != nil {
<-lastMsg.Done
// close the stream
msg := NewCloseInputStreamMsg(tableDef)
if err := tableScanner.emitter.Emit(msg); err != nil {
log.Fatalf("[LoopInBatch] failed to emit close stream msg: %v", errors.ErrorStack(err))
}
log.Infof("[LoopInBatch] sent close input stream msg")
}
return
}
select {
case <-tableScanner.ctx.Done():
log.Infof("[table_worker] canceled by context")
return
default:
continue
}
}
}
func (tableScanner *TableScanner) FindAll(db *sql.DB, tableDef *schema_store.Table, tableConfig *TableConfig) {
columnTypes, err := GetTableColumnTypes(db, tableDef.Schema, tableDef.Name)
if err != nil {
log.Fatalf("[FindAll] failed to get columnType: %v", errors.ErrorStack(err))
}
statement := fmt.Sprintf("SELECT * FROM `%s`.`%s`", tableDef.Schema, tableDef.Name)
allData, err := utils.QueryGeneralRowsDataWithSQL(db, statement)
if err != nil {
log.Fatalf("[FindAll] failed to find all, err: %v", errors.ErrorStack(err))
}
for i := range allData {
rowPtrs := allData[i]
msg := NewMsg(rowPtrs, columnTypes, tableDef, nil, position_store.MySQLTablePosition{})
if err := tableScanner.emitter.Emit(msg); err != nil | {
log.Fatalf("[tableScanner] failed to emit: %v", errors.ErrorStack(err))
} | conditional_block |
|
index.ts | this._className = className[0];
// 3. switch from this `toString` to a much simpler one
this.toString = toStringCachedAtom;
return className[0];
};
};
const createServerToString = (
sheets: { [screen: string]: ISheet },
screens: IScreens = {},
cssClassnameProvider: (atom: IAtom, seq: number | null) => [string, string?] // [className, pseudo]
) => {
return function toString(this: IAtom) {
const className = cssClassnameProvider(this, null);
const value = this.value;
let cssRule = "";
if (className.length === 2) {
cssRule = `.${className[0]}${className[1]}{${this.cssHyphenProp}:${value};}`;
} else {
cssRule = `.${className[0]}{${this.cssHyphenProp}:${value};}`;
}
sheets[this.screen].insertRule(
this.screen ? screens[this.screen](cssRule) : cssRule
);
// We do not clean out the atom here, cause it will be reused
// to inject multiple times for each request
// 1. put on a _className
this._className = className[0];
// 2. switch from this `toString` to a much simpler one
this.toString = toStringCachedAtom;
return className[0];
};
};
const createThemeToString = (classPrefix: string, variablesSheet: ISheet) =>
function toString(this: IThemeAtom) {
const themeClassName = `${classPrefix ? `${classPrefix}-` : ""}theme-${
this.name
}`;
// @ts-ignore
variablesSheet.insertRule(
`.${themeClassName}{${Object.keys(this.definition).reduce(
(aggr, tokenType) => {
// @ts-ignore
return `${aggr}${Object.keys(this.definition[tokenType]).reduce(
(subAggr, tokenKey) => {
// @ts-ignore
return `${subAggr}--${tokenType}-${tokenKey}:${this.definition[tokenType][tokenKey]};`;
},
aggr
)}`;
},
""
)}}`
);
this.toString = () => themeClassName;
return themeClassName;
};
const composeIntoMap = (
map: Map<string, IAtom>,
atoms: (IAtom | IComposedAtom)[]
) => {
let i = atoms.length - 1;
for (; i >= 0; i--) {
const item = atoms[i];
// atoms can be undefined, null, false or '' using ternary like
// expressions with the properties
if (item && item[ATOM] && "atoms" in item) {
composeIntoMap(map, item.atoms);
} else if (item && item[ATOM]) {
if (!map.has((item as IAtom).id)) {
map.set((item as IAtom).id, item as IAtom);
}
} else if (item) {
map.set((item as unknown) as string, item as IAtom);
}
}
};
export const createTokens = <T extends ITokensDefinition>(tokens: T) => {
return tokens;
};
export const createCss = <T extends IConfig>(
config: T,
env: Window | null = typeof window === "undefined" ? null : window
): TCss<T> => {
const showFriendlyClassnames =
typeof config.showFriendlyClassnames === "boolean"
? config.showFriendlyClassnames
: process.env.NODE_ENV === "development";
const prefix = config.prefix || "";
const { vendorPrefix, vendorProps } = env
? getVendorPrefixAndProps(env)
: { vendorPrefix: "-node-", vendorProps: [] };
if (env && hotReloadingCache.has(prefix)) {
const instance = hotReloadingCache.get(prefix);
instance.dispose();
}
// pre-compute class prefix
const classPrefix = prefix
? showFriendlyClassnames
? `${prefix}_`
: prefix
: "";
const cssClassnameProvider = (
atom: IAtom,
seq: number | null
): [string, string?] => {
const hash =
seq === null
? hashString(
`${atom.screen || ""}${atom.cssHyphenProp.replace(
/-(moz|webkit|ms)-/,
""
)}${atom.pseudo || ""}${atom.value}`
)
: seq;
const name = showFriendlyClassnames
? `${atom.screen ? `${atom.screen}_` : ""}${atom.cssHyphenProp
.replace(/-(moz|webkit|ms)-/, "")
.split("-")
.map((part) => part[0])
.join("")}_${hash}`
: `_${hash}`;
const className = `${classPrefix}${name}`;
if (atom.pseudo) {
return [className, atom.pseudo];
}
return [className];
};
const { tags, sheets } = createSheets(env, config.screens);
const preInjectedRules = new Set<string>();
// tslint:disable-next-line
for (const sheet in sheets) {
for (let x = 0; x < sheets[sheet].cssRules.length; x++) {
preInjectedRules.add(sheets[sheet].cssRules[x].selectorText);
}
}
let toString = env
? createToString(
sheets,
config.screens,
cssClassnameProvider,
preInjectedRules
)
: createServerToString(sheets, config.screens, cssClassnameProvider);
let themeToString = createThemeToString(classPrefix, sheets.__variables__);
const compose = (...atoms: IAtom[]): IComposedAtom => {
const map = new Map<string, IAtom>();
composeIntoMap(map, atoms);
return {
atoms: Array.from(map.values()),
toString: toStringCompose,
[ATOM]: true,
};
};
const createAtom = (
cssProp: string,
value: any,
screen = "",
pseudo?: string
) => {
const token: any = cssPropToToken[cssProp as keyof ICssPropToToken<any>];
let tokenValue: any;
if (token) {
if (Array.isArray(token) && Array.isArray(value)) {
tokenValue = token.map((tokenName, index) =>
token &&
(tokens as any)[tokenName] &&
(tokens as any)[tokenName][value[index]]
? (tokens as any)[tokenName][value[index]]
: value[index]
);
} else {
tokenValue =
token && (tokens as any)[token] && (tokens as any)[token][value]
? (tokens as any)[token][value]
: value;
}
} else {
tokenValue = value;
}
const isVendorPrefixed = cssProp[0] === cssProp[0].toUpperCase();
// generate id used for specificity check
// two atoms are considered equal in regared to there specificity if the id is equal
const id =
cssProp.toLowerCase() +
(pseudo ? pseudo.split(":").sort().join(":") : "") +
screen;
// make a uid accouting for different values
const uid = id + value;
// If this was created before return the cached atom
if (atomCache.has(uid)) {
return atomCache.get(uid)!;
}
// prepare the cssProp
let cssHyphenProp = cssProp
.split(/(?=[A-Z])/)
.map((g) => g.toLowerCase())
.join("-");
if (isVendorPrefixed) {
cssHyphenProp = `-${cssHyphenProp}`;
} else if (vendorProps.includes(`${vendorPrefix}${cssHyphenProp}`)) {
cssHyphenProp = `${vendorPrefix}${cssHyphenProp}`;
}
// Create a new atom
const atom: IAtom = {
id,
cssHyphenProp,
value: tokenValue,
pseudo,
screen,
toString,
[ATOM]: true,
};
// Cache it
atomCache.set(uid, atom);
return atom;
};
const createCssAtoms = (
props: {
[key: string]: any;
},
cb: (atom: IAtom) => void,
screen = "",
pseudo: string[] = [],
canCallUtils = true,
canCallSpecificityProps = true
) => {
// tslint:disable-next-line
for (const prop in props) {
if (config.screens && prop in config.screens) {
if (screen) {
throw new Error(
`@stitches/css - You are nesting the screen "${prop}" into "${screen}", that makes no sense? :-)`
);
}
createCssAtoms(props[prop], cb, prop, pseudo);
} else if (!prop[0].match(/[a-zA-Z]/)) {
createCssAtoms(props[prop], cb, screen, pseudo.concat(prop));
} else if (canCallUtils && prop in utils) | else if (canCallSpecificityProps && prop in specificityProps) {
createCssAtoms(
specificityProps[prop](config)(props[prop]) as any,
cb,
screen,
| {
createCssAtoms(
utils[prop](config)(props[prop]) as any,
cb,
screen,
pseudo,
false
);
} | conditional_block |
index.ts | this._className = className;
// @ts-ignore
this.toString = toStringCachedAtom;
return className;
};
const createToString = (
sheets: { [screen: string]: ISheet },
screens: IScreens = {},
cssClassnameProvider: (atom: IAtom, seq: number | null) => [string, string?], // [className, pseudo]
preInjectedRules: Set<string>
) => {
let seq = 0;
return function toString(this: IAtom) {
const className = cssClassnameProvider(
this,
preInjectedRules.size ? null : seq++
);
const shouldInject =
!preInjectedRules.size || !preInjectedRules.has(`.${className[0]}`);
const value = this.value;
if (shouldInject) {
let cssRule = "";
if (className.length === 2) {
cssRule = `.${className[0]}${className[1]}{${this.cssHyphenProp}:${value};}`;
} else {
cssRule = `.${className[0]}{${this.cssHyphenProp}:${value};}`;
}
sheets[this.screen].insertRule(
this.screen ? screens[this.screen](cssRule) : cssRule
);
}
// We are switching this atom from IAtom simpler representation
// 1. delete everything but `id` for specificity check
// @ts-ignore
this.cssHyphenProp = this.value = this.pseudo = this.screen = undefined;
// 2. put on a _className
this._className = className[0];
// 3. switch from this `toString` to a much simpler one
this.toString = toStringCachedAtom;
return className[0];
};
};
const createServerToString = (
sheets: { [screen: string]: ISheet },
screens: IScreens = {},
cssClassnameProvider: (atom: IAtom, seq: number | null) => [string, string?] // [className, pseudo]
) => {
return function toString(this: IAtom) {
const className = cssClassnameProvider(this, null);
const value = this.value;
let cssRule = "";
if (className.length === 2) {
cssRule = `.${className[0]}${className[1]}{${this.cssHyphenProp}:${value};}`;
} else {
cssRule = `.${className[0]}{${this.cssHyphenProp}:${value};}`;
}
sheets[this.screen].insertRule(
this.screen ? screens[this.screen](cssRule) : cssRule
);
// We do not clean out the atom here, cause it will be reused
// to inject multiple times for each request
// 1. put on a _className
this._className = className[0];
// 2. switch from this `toString` to a much simpler one
this.toString = toStringCachedAtom;
return className[0];
};
};
const createThemeToString = (classPrefix: string, variablesSheet: ISheet) =>
function toString(this: IThemeAtom) {
const themeClassName = `${classPrefix ? `${classPrefix}-` : ""}theme-${
this.name
}`;
// @ts-ignore
variablesSheet.insertRule(
`.${themeClassName}{${Object.keys(this.definition).reduce(
(aggr, tokenType) => {
// @ts-ignore
return `${aggr}${Object.keys(this.definition[tokenType]).reduce(
(subAggr, tokenKey) => {
// @ts-ignore
return `${subAggr}--${tokenType}-${tokenKey}:${this.definition[tokenType][tokenKey]};`;
},
aggr
)}`;
},
""
)}}`
);
this.toString = () => themeClassName;
return themeClassName;
};
const composeIntoMap = (
map: Map<string, IAtom>,
atoms: (IAtom | IComposedAtom)[]
) => {
let i = atoms.length - 1;
for (; i >= 0; i--) {
const item = atoms[i];
// atoms can be undefined, null, false or '' using ternary like
// expressions with the properties
if (item && item[ATOM] && "atoms" in item) {
composeIntoMap(map, item.atoms);
} else if (item && item[ATOM]) {
if (!map.has((item as IAtom).id)) {
map.set((item as IAtom).id, item as IAtom);
}
} else if (item) {
map.set((item as unknown) as string, item as IAtom);
}
}
};
export const createTokens = <T extends ITokensDefinition>(tokens: T) => {
return tokens;
};
export const createCss = <T extends IConfig>(
config: T,
env: Window | null = typeof window === "undefined" ? null : window
): TCss<T> => {
const showFriendlyClassnames =
typeof config.showFriendlyClassnames === "boolean"
? config.showFriendlyClassnames
: process.env.NODE_ENV === "development";
const prefix = config.prefix || "";
const { vendorPrefix, vendorProps } = env
? getVendorPrefixAndProps(env)
: { vendorPrefix: "-node-", vendorProps: [] };
if (env && hotReloadingCache.has(prefix)) {
const instance = hotReloadingCache.get(prefix);
instance.dispose();
}
// pre-compute class prefix
const classPrefix = prefix
? showFriendlyClassnames
? `${prefix}_`
: prefix
: "";
const cssClassnameProvider = (
atom: IAtom,
seq: number | null
): [string, string?] => {
const hash =
seq === null
? hashString(
`${atom.screen || ""}${atom.cssHyphenProp.replace(
/-(moz|webkit|ms)-/,
""
)}${atom.pseudo || ""}${atom.value}`
)
: seq;
const name = showFriendlyClassnames
? `${atom.screen ? `${atom.screen}_` : ""}${atom.cssHyphenProp
.replace(/-(moz|webkit|ms)-/, "")
.split("-")
.map((part) => part[0])
.join("")}_${hash}`
: `_${hash}`;
const className = `${classPrefix}${name}`;
if (atom.pseudo) {
return [className, atom.pseudo];
}
return [className];
};
const { tags, sheets } = createSheets(env, config.screens);
const preInjectedRules = new Set<string>();
// tslint:disable-next-line
for (const sheet in sheets) {
for (let x = 0; x < sheets[sheet].cssRules.length; x++) {
preInjectedRules.add(sheets[sheet].cssRules[x].selectorText);
}
}
let toString = env
? createToString(
sheets,
config.screens,
cssClassnameProvider,
preInjectedRules
)
: createServerToString(sheets, config.screens, cssClassnameProvider);
let themeToString = createThemeToString(classPrefix, sheets.__variables__);
const compose = (...atoms: IAtom[]): IComposedAtom => {
const map = new Map<string, IAtom>();
composeIntoMap(map, atoms);
return {
atoms: Array.from(map.values()),
toString: toStringCompose,
[ATOM]: true,
};
};
const createAtom = (
cssProp: string,
value: any,
screen = "",
pseudo?: string
) => {
const token: any = cssPropToToken[cssProp as keyof ICssPropToToken<any>];
let tokenValue: any;
if (token) {
if (Array.isArray(token) && Array.isArray(value)) {
tokenValue = token.map((tokenName, index) =>
token &&
(tokens as any)[tokenName] &&
(tokens as any)[tokenName][value[index]]
? (tokens as any)[tokenName][value[index]]
: value[index]
);
} else {
tokenValue =
token && (tokens as any)[token] && (tokens as any)[token][value]
? (tokens as any)[token][value]
: value;
}
} else {
tokenValue = value;
}
const isVendorPrefixed = cssProp[0] === cssProp[0].toUpperCase();
// generate id used for specificity check
// two atoms are considered equal in regared to there specificity if the id is equal
const id =
cssProp.toLowerCase() +
(pseudo ? pseudo.split(":").sort().join(":") : "") +
screen;
// make a uid accouting for different values
const uid = id + value;
// If this was created before return the cached atom
if (atomCache.has(uid)) {
return atomCache.get(uid)!;
}
// prepare the cssProp
let cssHyphenProp = cssProp
.split(/(?=[A-Z])/)
.map((g) => g.toLowerCase())
.join("-");
if (isVendorPrefixed) {
cssHyphenProp = `-${cssHyphenProp}`;
} else if (vendor | const className = this.atoms.map((atom) => atom.toString()).join(" ");
// cache the className on this instance
// @ts-ignore | random_line_split |
|
telegraf.go | 2s"
omit_hostname = true
[[outputs.socket_writer]]
address = "tcp://{{.IngestAddress}}"
data_format = "json"
json_timestamp_units = "1ms"
[[inputs.internal]]
collect_memstats = false
`))
var (
telegrafStartupDuration = 10 * time.Second
)
const (
telegrafMaxTestMonitorRetries = 3
telegrafTestMonitorRetryDelay = 500 * time.Millisecond
)
type telegrafMainConfigData struct {
IngestAddress string
DefaultMonitoringInterval time.Duration
MaxFlushInterval time.Duration
}
type TelegrafRunner struct {
ingestAddress string
basePath string
running *AgentRunningContext
commandHandler CommandHandler
configServerMux *http.ServeMux
configServerURL string
configServerToken string
configServerHandler http.HandlerFunc
tomlMainConfig []byte
// tomlConfigs key is the "bound monitor id", i.e. monitorId_resourceId
tomlConfigs map[string][]byte
}
func (tr *TelegrafRunner) PurgeConfig() error {
tr.tomlConfigs = make(map[string][]byte)
return nil
}
func init() {
registerSpecificAgentRunner(telemetry_edge.AgentType_TELEGRAF, &TelegrafRunner{})
}
func (tr *TelegrafRunner) Load(agentBasePath string) error {
tr.ingestAddress = config.GetListenerAddress(config.TelegrafJsonListener)
tr.basePath = agentBasePath
tr.configServerToken = uuid.NewV4().String()
tr.configServerHandler = func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("authorization") != "Token "+tr.configServerToken {
http.Error(w, "unauthorized", http.StatusUnauthorized)
return
}
_, err := w.Write(tr.concatConfigs())
if err != nil {
log.Errorf("Error writing config page %v", err)
}
}
serverId := uuid.NewV4().String()
tr.configServerMux = http.NewServeMux()
tr.configServerMux.Handle("/"+serverId, tr.configServerHandler)
// Get the next available port
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return errors.Wrap(err, "couldn't create http listener")
}
listenerPort := listener.Addr().(*net.TCPAddr).Port
tr.configServerURL = fmt.Sprintf("http://127.0.0.1:%d/%s", listenerPort, serverId)
tr.tomlConfigs = make(map[string][]byte)
mainConfig, err := tr.createMainConfig()
if err != nil {
return errors.Wrap(err, "couldn't create main config")
}
tr.tomlMainConfig = mainConfig
go tr.serve(listener)
return nil
}
func (tr *TelegrafRunner) serve(listener net.Listener) {
log.Info("started webServer")
err := http.Serve(listener, tr.configServerMux)
// Note this is probably not the best way to handle webserver failure
log.Fatalf("web server error %v", err)
}
func (tr *TelegrafRunner) SetCommandHandler(handler CommandHandler) {
tr.commandHandler = handler
}
func (tr *TelegrafRunner) ProcessConfig(configure *telemetry_edge.EnvoyInstructionConfigure) error {
applied := 0
for _, op := range configure.GetOperations() {
log.WithField("op", op).Debug("processing telegraf config operation")
if tr.handleTelegrafConfigurationOp(op) {
applied++
}
}
if applied == 0 {
return &noAppliedConfigsError{}
}
return nil
}
func (tr *TelegrafRunner) concatConfigs() []byte |
func (tr *TelegrafRunner) handleTelegrafConfigurationOp(op *telemetry_edge.ConfigurationOp) bool {
switch op.GetType() {
case telemetry_edge.ConfigurationOp_CREATE, telemetry_edge.ConfigurationOp_MODIFY:
var finalConfig []byte
var err error
finalConfig, err = ConvertJsonToTelegrafToml(op.GetContent(), op.ExtraLabels, op.Interval)
if err != nil {
log.WithError(err).WithField("op", op).Warn("failed to convert config blob to TOML")
return false
}
tr.tomlConfigs[op.GetId()] = finalConfig
return true
case telemetry_edge.ConfigurationOp_REMOVE:
if _, ok := tr.tomlConfigs[op.GetId()]; ok {
delete(tr.tomlConfigs, op.GetId())
return true
}
return false
}
return false
}
func (tr *TelegrafRunner) PostInstall(agentVersionPath string) error {
resolvedExePath := path.Join(agentVersionPath, binSubpath, "telegraf")
err := addNetRawCapabilities(resolvedExePath)
if err != nil {
log.WithError(err).
WithField("agentExe", resolvedExePath).
Warn("failed to set net_raw capabilities on telegraf, native ping will not work")
}
return nil
}
func (tr *TelegrafRunner) EnsureRunningState(ctx context.Context, applyConfigs bool) {
log.Debug("ensuring telegraf is in correct running state")
if !tr.hasRequiredPaths() {
log.Debug("telegraf not runnable due to some missing paths and files, stopping if needed")
tr.commandHandler.Stop(tr.running)
return
}
if tr.running.IsRunning() {
log.
WithField("agentType", telemetry_edge.AgentType_TELEGRAF).
Debug("already running")
if applyConfigs {
log.
WithField("agentType", telemetry_edge.AgentType_TELEGRAF).
Info("signaling config reload")
tr.handleConfigReload()
}
return
}
runningContext := tr.commandHandler.CreateContext(ctx,
telemetry_edge.AgentType_TELEGRAF,
tr.exePath(), tr.basePath,
"--config", tr.configServerURL)
// telegraf returns the INFLUX_TOKEN in the http config request header
runningContext.AppendEnv("INFLUX_TOKEN=" + tr.configServerToken)
err := tr.commandHandler.StartAgentCommand(runningContext,
telemetry_edge.AgentType_TELEGRAF,
"Loaded inputs:", telegrafStartupDuration)
if err != nil {
log.WithError(err).
WithField("agentType", telemetry_edge.AgentType_TELEGRAF).
Warn("failed to start agent")
return
}
go tr.commandHandler.WaitOnAgentCommand(ctx, tr, runningContext)
tr.running = runningContext
log.WithField("pid", runningContext.Pid()).
WithField("agentType", telemetry_edge.AgentType_TELEGRAF).
Info("started agent")
}
// exePath returns path to executable relative to baseDir
func (tr *TelegrafRunner) exePath() string {
return filepath.Join(currentVerLink, binSubpath, "telegraf")
}
func (tr *TelegrafRunner) Stop() {
tr.commandHandler.Stop(tr.running)
tr.running = nil
}
func (tr *TelegrafRunner) createMainConfig() ([]byte, error) {
data := &telegrafMainConfigData{
IngestAddress: tr.ingestAddress,
DefaultMonitoringInterval: viper.GetDuration(config.AgentsDefaultMonitoringInterval),
MaxFlushInterval: viper.GetDuration(config.AgentsMaxFlushInterval),
}
var b bytes.Buffer
err := telegrafMainConfigTmpl.Execute(&b, data)
if err != nil {
return nil, errors.Wrap(err, "failed to execute telegraf main config template")
}
return b.Bytes(), nil
}
func (tr *TelegrafRunner) handleConfigReload() {
if err := tr.commandHandler.Signal(tr.running, syscall.SIGHUP); err != nil {
log.WithError(err).WithField("pid", tr.running.Pid()).
Warn("failed to signal agent process")
}
}
func (tr *TelegrafRunner) hasRequiredPaths() bool {
fullExePath := path.Join(tr.basePath, tr.exePath())
if !fileExists(fullExePath) {
log.WithField("exe", fullExePath).Debug("missing exe")
return false
}
return true
}
func (tr *TelegrafRunner) ProcessTestMonitor(correlationId string, content string, timeout time.Duration) (*telemetry_edge.TestMonitorResults, error) {
// Convert content to TOML
configToml, err := ConvertJsonToTelegrafToml(content, nil, 0)
if err != nil {
return nil, errors.Wrapf(err, "failed to convert config content")
}
// Generate token/id used for authenticating and pulling telegraf config
testConfigServerToken := uuid.NewV4().String()
testConfigServerId := uuid.NewV4().String()
// Bind to the next available port by using :0
listener, err := net.Listen("tcp", "12 | {
var configs []byte
configs = append(configs, tr.tomlMainConfig...)
// telegraf can only handle one 'inputs' header per file so add exactly one here
configs = append(configs, []byte("[inputs]")...)
for _, v := range tr.tomlConfigs {
// remove the other redundant '[inputs]' headers here
if bytes.Equal([]byte("[inputs]"), v[0:8]) {
v = v[8:]
}
configs = append(configs, v...)
}
return configs
} | identifier_body |
telegraf.go | func (tr *TelegrafRunner) Load(agentBasePath string) error {
tr.ingestAddress = config.GetListenerAddress(config.TelegrafJsonListener)
tr.basePath = agentBasePath
tr.configServerToken = uuid.NewV4().String()
tr.configServerHandler = func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("authorization") != "Token "+tr.configServerToken {
http.Error(w, "unauthorized", http.StatusUnauthorized)
return
}
_, err := w.Write(tr.concatConfigs())
if err != nil {
log.Errorf("Error writing config page %v", err)
}
}
serverId := uuid.NewV4().String()
tr.configServerMux = http.NewServeMux()
tr.configServerMux.Handle("/"+serverId, tr.configServerHandler)
// Get the next available port
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return errors.Wrap(err, "couldn't create http listener")
}
listenerPort := listener.Addr().(*net.TCPAddr).Port
tr.configServerURL = fmt.Sprintf("http://127.0.0.1:%d/%s", listenerPort, serverId)
tr.tomlConfigs = make(map[string][]byte)
mainConfig, err := tr.createMainConfig()
if err != nil {
return errors.Wrap(err, "couldn't create main config")
}
tr.tomlMainConfig = mainConfig
go tr.serve(listener)
return nil
}
func (tr *TelegrafRunner) serve(listener net.Listener) {
log.Info("started webServer")
err := http.Serve(listener, tr.configServerMux)
// Note this is probably not the best way to handle webserver failure
log.Fatalf("web server error %v", err)
}
func (tr *TelegrafRunner) SetCommandHandler(handler CommandHandler) {
tr.commandHandler = handler
}
func (tr *TelegrafRunner) ProcessConfig(configure *telemetry_edge.EnvoyInstructionConfigure) error {
applied := 0
for _, op := range configure.GetOperations() {
log.WithField("op", op).Debug("processing telegraf config operation")
if tr.handleTelegrafConfigurationOp(op) {
applied++
}
}
if applied == 0 {
return &noAppliedConfigsError{}
}
return nil
}
func (tr *TelegrafRunner) concatConfigs() []byte {
var configs []byte
configs = append(configs, tr.tomlMainConfig...)
// telegraf can only handle one 'inputs' header per file so add exactly one here
configs = append(configs, []byte("[inputs]")...)
for _, v := range tr.tomlConfigs {
// remove the other redundant '[inputs]' headers here
if bytes.Equal([]byte("[inputs]"), v[0:8]) {
v = v[8:]
}
configs = append(configs, v...)
}
return configs
}
func (tr *TelegrafRunner) handleTelegrafConfigurationOp(op *telemetry_edge.ConfigurationOp) bool {
switch op.GetType() {
case telemetry_edge.ConfigurationOp_CREATE, telemetry_edge.ConfigurationOp_MODIFY:
var finalConfig []byte
var err error
finalConfig, err = ConvertJsonToTelegrafToml(op.GetContent(), op.ExtraLabels, op.Interval)
if err != nil {
log.WithError(err).WithField("op", op).Warn("failed to convert config blob to TOML")
return false
}
tr.tomlConfigs[op.GetId()] = finalConfig
return true
case telemetry_edge.ConfigurationOp_REMOVE:
if _, ok := tr.tomlConfigs[op.GetId()]; ok {
delete(tr.tomlConfigs, op.GetId())
return true
}
return false
}
return false
}
func (tr *TelegrafRunner) PostInstall(agentVersionPath string) error {
resolvedExePath := path.Join(agentVersionPath, binSubpath, "telegraf")
err := addNetRawCapabilities(resolvedExePath)
if err != nil {
log.WithError(err).
WithField("agentExe", resolvedExePath).
Warn("failed to set net_raw capabilities on telegraf, native ping will not work")
}
return nil
}
func (tr *TelegrafRunner) EnsureRunningState(ctx context.Context, applyConfigs bool) {
log.Debug("ensuring telegraf is in correct running state")
if !tr.hasRequiredPaths() {
log.Debug("telegraf not runnable due to some missing paths and files, stopping if needed")
tr.commandHandler.Stop(tr.running)
return
}
if tr.running.IsRunning() {
log.
WithField("agentType", telemetry_edge.AgentType_TELEGRAF).
Debug("already running")
if applyConfigs {
log.
WithField("agentType", telemetry_edge.AgentType_TELEGRAF).
Info("signaling config reload")
tr.handleConfigReload()
}
return
}
runningContext := tr.commandHandler.CreateContext(ctx,
telemetry_edge.AgentType_TELEGRAF,
tr.exePath(), tr.basePath,
"--config", tr.configServerURL)
// telegraf returns the INFLUX_TOKEN in the http config request header
runningContext.AppendEnv("INFLUX_TOKEN=" + tr.configServerToken)
err := tr.commandHandler.StartAgentCommand(runningContext,
telemetry_edge.AgentType_TELEGRAF,
"Loaded inputs:", telegrafStartupDuration)
if err != nil {
log.WithError(err).
WithField("agentType", telemetry_edge.AgentType_TELEGRAF).
Warn("failed to start agent")
return
}
go tr.commandHandler.WaitOnAgentCommand(ctx, tr, runningContext)
tr.running = runningContext
log.WithField("pid", runningContext.Pid()).
WithField("agentType", telemetry_edge.AgentType_TELEGRAF).
Info("started agent")
}
// exePath returns path to executable relative to baseDir
func (tr *TelegrafRunner) exePath() string {
return filepath.Join(currentVerLink, binSubpath, "telegraf")
}
func (tr *TelegrafRunner) Stop() {
tr.commandHandler.Stop(tr.running)
tr.running = nil
}
func (tr *TelegrafRunner) createMainConfig() ([]byte, error) {
data := &telegrafMainConfigData{
IngestAddress: tr.ingestAddress,
DefaultMonitoringInterval: viper.GetDuration(config.AgentsDefaultMonitoringInterval),
MaxFlushInterval: viper.GetDuration(config.AgentsMaxFlushInterval),
}
var b bytes.Buffer
err := telegrafMainConfigTmpl.Execute(&b, data)
if err != nil {
return nil, errors.Wrap(err, "failed to execute telegraf main config template")
}
return b.Bytes(), nil
}
func (tr *TelegrafRunner) handleConfigReload() {
if err := tr.commandHandler.Signal(tr.running, syscall.SIGHUP); err != nil {
log.WithError(err).WithField("pid", tr.running.Pid()).
Warn("failed to signal agent process")
}
}
func (tr *TelegrafRunner) hasRequiredPaths() bool {
fullExePath := path.Join(tr.basePath, tr.exePath())
if !fileExists(fullExePath) {
log.WithField("exe", fullExePath).Debug("missing exe")
return false
}
return true
}
func (tr *TelegrafRunner) ProcessTestMonitor(correlationId string, content string, timeout time.Duration) (*telemetry_edge.TestMonitorResults, error) {
// Convert content to TOML
configToml, err := ConvertJsonToTelegrafToml(content, nil, 0)
if err != nil {
return nil, errors.Wrapf(err, "failed to convert config content")
}
// Generate token/id used for authenticating and pulling telegraf config
testConfigServerToken := uuid.NewV4().String()
testConfigServerId := uuid.NewV4().String()
// Bind to the next available port by using :0
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return nil, errors.Wrap(err, "couldn't create http listener")
}
//noinspection GoUnhandledErrorResult
defer listener.Close()
listenerPort := listener.Addr().(*net.TCPAddr).Port
hostPort := fmt.Sprintf("127.0.0.1:%d", listenerPort)
configServerErrors := make(chan error, 2)
testConfigRunner := telegrafTestConfigRunnerBuilder(testConfigServerId, testConfigServerToken)
// Start the config server
configServer := testConfigRunner.StartTestConfigServer(configToml, configServerErrors, listener)
// Run the telegraf test command
results := &telemetry_edge.TestMonitorResults{
CorrelationId: correlationId,
Errors: []string{},
}
// Sometimes telegraf --test completes with empty output and no error indicated,
// so retry a few times. If that still fails, then a parse error will be produced as without retrying.
var cmdOut []byte
for attempt := 0; attempt < telegrafMaxTestMonitorRetries; attempt++ {
cmdOut, err = testConfigRunner.RunCommand(hostPort, tr.exePath(), tr.basePath, timeout)
if err != nil || len(cmdOut) != 0 | {
break
} | conditional_block |
|
telegraf.go | 2s"
omit_hostname = true
[[outputs.socket_writer]]
address = "tcp://{{.IngestAddress}}"
data_format = "json"
json_timestamp_units = "1ms"
[[inputs.internal]]
collect_memstats = false
`))
var (
telegrafStartupDuration = 10 * time.Second
)
const (
telegrafMaxTestMonitorRetries = 3
telegrafTestMonitorRetryDelay = 500 * time.Millisecond
)
type telegrafMainConfigData struct {
IngestAddress string
DefaultMonitoringInterval time.Duration
MaxFlushInterval time.Duration
}
type TelegrafRunner struct {
ingestAddress string
basePath string
running *AgentRunningContext
commandHandler CommandHandler
configServerMux *http.ServeMux
configServerURL string
configServerToken string
configServerHandler http.HandlerFunc
tomlMainConfig []byte
// tomlConfigs key is the "bound monitor id", i.e. monitorId_resourceId
tomlConfigs map[string][]byte
}
func (tr *TelegrafRunner) PurgeConfig() error {
tr.tomlConfigs = make(map[string][]byte)
return nil
}
func init() {
registerSpecificAgentRunner(telemetry_edge.AgentType_TELEGRAF, &TelegrafRunner{})
}
func (tr *TelegrafRunner) Load(agentBasePath string) error {
tr.ingestAddress = config.GetListenerAddress(config.TelegrafJsonListener)
tr.basePath = agentBasePath
tr.configServerToken = uuid.NewV4().String()
tr.configServerHandler = func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("authorization") != "Token "+tr.configServerToken {
http.Error(w, "unauthorized", http.StatusUnauthorized)
return
}
_, err := w.Write(tr.concatConfigs())
if err != nil {
log.Errorf("Error writing config page %v", err)
}
}
serverId := uuid.NewV4().String()
tr.configServerMux = http.NewServeMux()
tr.configServerMux.Handle("/"+serverId, tr.configServerHandler)
// Get the next available port
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return errors.Wrap(err, "couldn't create http listener")
}
listenerPort := listener.Addr().(*net.TCPAddr).Port
tr.configServerURL = fmt.Sprintf("http://127.0.0.1:%d/%s", listenerPort, serverId)
tr.tomlConfigs = make(map[string][]byte)
mainConfig, err := tr.createMainConfig()
if err != nil {
return errors.Wrap(err, "couldn't create main config")
}
tr.tomlMainConfig = mainConfig
go tr.serve(listener)
return nil
}
func (tr *TelegrafRunner) serve(listener net.Listener) {
log.Info("started webServer")
err := http.Serve(listener, tr.configServerMux)
// Note this is probably not the best way to handle webserver failure
log.Fatalf("web server error %v", err)
}
func (tr *TelegrafRunner) SetCommandHandler(handler CommandHandler) {
tr.commandHandler = handler
}
func (tr *TelegrafRunner) ProcessConfig(configure *telemetry_edge.EnvoyInstructionConfigure) error {
applied := 0
for _, op := range configure.GetOperations() {
log.WithField("op", op).Debug("processing telegraf config operation")
if tr.handleTelegrafConfigurationOp(op) {
applied++
}
}
if applied == 0 {
return &noAppliedConfigsError{}
}
return nil
}
func (tr *TelegrafRunner) concatConfigs() []byte {
var configs []byte
configs = append(configs, tr.tomlMainConfig...)
// telegraf can only handle one 'inputs' header per file so add exactly one here
configs = append(configs, []byte("[inputs]")...)
for _, v := range tr.tomlConfigs {
// remove the other redundant '[inputs]' headers here
if bytes.Equal([]byte("[inputs]"), v[0:8]) {
v = v[8:]
}
configs = append(configs, v...)
}
return configs
}
func (tr *TelegrafRunner) handleTelegrafConfigurationOp(op *telemetry_edge.ConfigurationOp) bool {
switch op.GetType() {
case telemetry_edge.ConfigurationOp_CREATE, telemetry_edge.ConfigurationOp_MODIFY:
var finalConfig []byte
var err error
finalConfig, err = ConvertJsonToTelegrafToml(op.GetContent(), op.ExtraLabels, op.Interval)
if err != nil {
log.WithError(err).WithField("op", op).Warn("failed to convert config blob to TOML")
return false
}
tr.tomlConfigs[op.GetId()] = finalConfig
return true
case telemetry_edge.ConfigurationOp_REMOVE:
if _, ok := tr.tomlConfigs[op.GetId()]; ok {
delete(tr.tomlConfigs, op.GetId())
return true
}
return false
}
return false
}
func (tr *TelegrafRunner) PostInstall(agentVersionPath string) error {
resolvedExePath := path.Join(agentVersionPath, binSubpath, "telegraf")
err := addNetRawCapabilities(resolvedExePath)
if err != nil {
log.WithError(err).
WithField("agentExe", resolvedExePath).
Warn("failed to set net_raw capabilities on telegraf, native ping will not work")
}
return nil
}
func (tr *TelegrafRunner) EnsureRunningState(ctx context.Context, applyConfigs bool) {
log.Debug("ensuring telegraf is in correct running state")
if !tr.hasRequiredPaths() {
log.Debug("telegraf not runnable due to some missing paths and files, stopping if needed")
tr.commandHandler.Stop(tr.running)
return
}
if tr.running.IsRunning() {
log.
WithField("agentType", telemetry_edge.AgentType_TELEGRAF).
Debug("already running")
if applyConfigs {
log.
WithField("agentType", telemetry_edge.AgentType_TELEGRAF).
Info("signaling config reload")
tr.handleConfigReload()
}
return
}
runningContext := tr.commandHandler.CreateContext(ctx,
telemetry_edge.AgentType_TELEGRAF,
tr.exePath(), tr.basePath,
"--config", tr.configServerURL)
// telegraf returns the INFLUX_TOKEN in the http config request header
runningContext.AppendEnv("INFLUX_TOKEN=" + tr.configServerToken)
err := tr.commandHandler.StartAgentCommand(runningContext,
telemetry_edge.AgentType_TELEGRAF,
"Loaded inputs:", telegrafStartupDuration)
if err != nil {
log.WithError(err).
WithField("agentType", telemetry_edge.AgentType_TELEGRAF).
Warn("failed to start agent")
return
}
go tr.commandHandler.WaitOnAgentCommand(ctx, tr, runningContext)
tr.running = runningContext
log.WithField("pid", runningContext.Pid()).
WithField("agentType", telemetry_edge.AgentType_TELEGRAF).
Info("started agent")
}
// exePath returns path to executable relative to baseDir
func (tr *TelegrafRunner) exePath() string {
return filepath.Join(currentVerLink, binSubpath, "telegraf")
}
func (tr *TelegrafRunner) | () {
tr.commandHandler.Stop(tr.running)
tr.running = nil
}
func (tr *TelegrafRunner) createMainConfig() ([]byte, error) {
data := &telegrafMainConfigData{
IngestAddress: tr.ingestAddress,
DefaultMonitoringInterval: viper.GetDuration(config.AgentsDefaultMonitoringInterval),
MaxFlushInterval: viper.GetDuration(config.AgentsMaxFlushInterval),
}
var b bytes.Buffer
err := telegrafMainConfigTmpl.Execute(&b, data)
if err != nil {
return nil, errors.Wrap(err, "failed to execute telegraf main config template")
}
return b.Bytes(), nil
}
func (tr *TelegrafRunner) handleConfigReload() {
if err := tr.commandHandler.Signal(tr.running, syscall.SIGHUP); err != nil {
log.WithError(err).WithField("pid", tr.running.Pid()).
Warn("failed to signal agent process")
}
}
func (tr *TelegrafRunner) hasRequiredPaths() bool {
fullExePath := path.Join(tr.basePath, tr.exePath())
if !fileExists(fullExePath) {
log.WithField("exe", fullExePath).Debug("missing exe")
return false
}
return true
}
func (tr *TelegrafRunner) ProcessTestMonitor(correlationId string, content string, timeout time.Duration) (*telemetry_edge.TestMonitorResults, error) {
// Convert content to TOML
configToml, err := ConvertJsonToTelegrafToml(content, nil, 0)
if err != nil {
return nil, errors.Wrapf(err, "failed to convert config content")
}
// Generate token/id used for authenticating and pulling telegraf config
testConfigServerToken := uuid.NewV4().String()
testConfigServerId := uuid.NewV4().String()
// Bind to the next available port by using :0
listener, err := net.Listen("tcp", "127 | Stop | identifier_name |
telegraf.go | 2s"
omit_hostname = true
[[outputs.socket_writer]]
address = "tcp://{{.IngestAddress}}"
data_format = "json"
json_timestamp_units = "1ms"
[[inputs.internal]]
collect_memstats = false
`))
var (
telegrafStartupDuration = 10 * time.Second
)
const (
telegrafMaxTestMonitorRetries = 3
telegrafTestMonitorRetryDelay = 500 * time.Millisecond
)
type telegrafMainConfigData struct {
IngestAddress string
DefaultMonitoringInterval time.Duration |
type TelegrafRunner struct {
ingestAddress string
basePath string
running *AgentRunningContext
commandHandler CommandHandler
configServerMux *http.ServeMux
configServerURL string
configServerToken string
configServerHandler http.HandlerFunc
tomlMainConfig []byte
// tomlConfigs key is the "bound monitor id", i.e. monitorId_resourceId
tomlConfigs map[string][]byte
}
func (tr *TelegrafRunner) PurgeConfig() error {
tr.tomlConfigs = make(map[string][]byte)
return nil
}
func init() {
registerSpecificAgentRunner(telemetry_edge.AgentType_TELEGRAF, &TelegrafRunner{})
}
func (tr *TelegrafRunner) Load(agentBasePath string) error {
tr.ingestAddress = config.GetListenerAddress(config.TelegrafJsonListener)
tr.basePath = agentBasePath
tr.configServerToken = uuid.NewV4().String()
tr.configServerHandler = func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("authorization") != "Token "+tr.configServerToken {
http.Error(w, "unauthorized", http.StatusUnauthorized)
return
}
_, err := w.Write(tr.concatConfigs())
if err != nil {
log.Errorf("Error writing config page %v", err)
}
}
serverId := uuid.NewV4().String()
tr.configServerMux = http.NewServeMux()
tr.configServerMux.Handle("/"+serverId, tr.configServerHandler)
// Get the next available port
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return errors.Wrap(err, "couldn't create http listener")
}
listenerPort := listener.Addr().(*net.TCPAddr).Port
tr.configServerURL = fmt.Sprintf("http://127.0.0.1:%d/%s", listenerPort, serverId)
tr.tomlConfigs = make(map[string][]byte)
mainConfig, err := tr.createMainConfig()
if err != nil {
return errors.Wrap(err, "couldn't create main config")
}
tr.tomlMainConfig = mainConfig
go tr.serve(listener)
return nil
}
func (tr *TelegrafRunner) serve(listener net.Listener) {
log.Info("started webServer")
err := http.Serve(listener, tr.configServerMux)
// Note this is probably not the best way to handle webserver failure
log.Fatalf("web server error %v", err)
}
func (tr *TelegrafRunner) SetCommandHandler(handler CommandHandler) {
tr.commandHandler = handler
}
func (tr *TelegrafRunner) ProcessConfig(configure *telemetry_edge.EnvoyInstructionConfigure) error {
applied := 0
for _, op := range configure.GetOperations() {
log.WithField("op", op).Debug("processing telegraf config operation")
if tr.handleTelegrafConfigurationOp(op) {
applied++
}
}
if applied == 0 {
return &noAppliedConfigsError{}
}
return nil
}
func (tr *TelegrafRunner) concatConfigs() []byte {
var configs []byte
configs = append(configs, tr.tomlMainConfig...)
// telegraf can only handle one 'inputs' header per file so add exactly one here
configs = append(configs, []byte("[inputs]")...)
for _, v := range tr.tomlConfigs {
// remove the other redundant '[inputs]' headers here
if bytes.Equal([]byte("[inputs]"), v[0:8]) {
v = v[8:]
}
configs = append(configs, v...)
}
return configs
}
func (tr *TelegrafRunner) handleTelegrafConfigurationOp(op *telemetry_edge.ConfigurationOp) bool {
switch op.GetType() {
case telemetry_edge.ConfigurationOp_CREATE, telemetry_edge.ConfigurationOp_MODIFY:
var finalConfig []byte
var err error
finalConfig, err = ConvertJsonToTelegrafToml(op.GetContent(), op.ExtraLabels, op.Interval)
if err != nil {
log.WithError(err).WithField("op", op).Warn("failed to convert config blob to TOML")
return false
}
tr.tomlConfigs[op.GetId()] = finalConfig
return true
case telemetry_edge.ConfigurationOp_REMOVE:
if _, ok := tr.tomlConfigs[op.GetId()]; ok {
delete(tr.tomlConfigs, op.GetId())
return true
}
return false
}
return false
}
func (tr *TelegrafRunner) PostInstall(agentVersionPath string) error {
resolvedExePath := path.Join(agentVersionPath, binSubpath, "telegraf")
err := addNetRawCapabilities(resolvedExePath)
if err != nil {
log.WithError(err).
WithField("agentExe", resolvedExePath).
Warn("failed to set net_raw capabilities on telegraf, native ping will not work")
}
return nil
}
func (tr *TelegrafRunner) EnsureRunningState(ctx context.Context, applyConfigs bool) {
log.Debug("ensuring telegraf is in correct running state")
if !tr.hasRequiredPaths() {
log.Debug("telegraf not runnable due to some missing paths and files, stopping if needed")
tr.commandHandler.Stop(tr.running)
return
}
if tr.running.IsRunning() {
log.
WithField("agentType", telemetry_edge.AgentType_TELEGRAF).
Debug("already running")
if applyConfigs {
log.
WithField("agentType", telemetry_edge.AgentType_TELEGRAF).
Info("signaling config reload")
tr.handleConfigReload()
}
return
}
runningContext := tr.commandHandler.CreateContext(ctx,
telemetry_edge.AgentType_TELEGRAF,
tr.exePath(), tr.basePath,
"--config", tr.configServerURL)
// telegraf returns the INFLUX_TOKEN in the http config request header
runningContext.AppendEnv("INFLUX_TOKEN=" + tr.configServerToken)
err := tr.commandHandler.StartAgentCommand(runningContext,
telemetry_edge.AgentType_TELEGRAF,
"Loaded inputs:", telegrafStartupDuration)
if err != nil {
log.WithError(err).
WithField("agentType", telemetry_edge.AgentType_TELEGRAF).
Warn("failed to start agent")
return
}
go tr.commandHandler.WaitOnAgentCommand(ctx, tr, runningContext)
tr.running = runningContext
log.WithField("pid", runningContext.Pid()).
WithField("agentType", telemetry_edge.AgentType_TELEGRAF).
Info("started agent")
}
// exePath returns path to executable relative to baseDir
func (tr *TelegrafRunner) exePath() string {
return filepath.Join(currentVerLink, binSubpath, "telegraf")
}
func (tr *TelegrafRunner) Stop() {
tr.commandHandler.Stop(tr.running)
tr.running = nil
}
func (tr *TelegrafRunner) createMainConfig() ([]byte, error) {
data := &telegrafMainConfigData{
IngestAddress: tr.ingestAddress,
DefaultMonitoringInterval: viper.GetDuration(config.AgentsDefaultMonitoringInterval),
MaxFlushInterval: viper.GetDuration(config.AgentsMaxFlushInterval),
}
var b bytes.Buffer
err := telegrafMainConfigTmpl.Execute(&b, data)
if err != nil {
return nil, errors.Wrap(err, "failed to execute telegraf main config template")
}
return b.Bytes(), nil
}
func (tr *TelegrafRunner) handleConfigReload() {
if err := tr.commandHandler.Signal(tr.running, syscall.SIGHUP); err != nil {
log.WithError(err).WithField("pid", tr.running.Pid()).
Warn("failed to signal agent process")
}
}
func (tr *TelegrafRunner) hasRequiredPaths() bool {
fullExePath := path.Join(tr.basePath, tr.exePath())
if !fileExists(fullExePath) {
log.WithField("exe", fullExePath).Debug("missing exe")
return false
}
return true
}
func (tr *TelegrafRunner) ProcessTestMonitor(correlationId string, content string, timeout time.Duration) (*telemetry_edge.TestMonitorResults, error) {
// Convert content to TOML
configToml, err := ConvertJsonToTelegrafToml(content, nil, 0)
if err != nil {
return nil, errors.Wrapf(err, "failed to convert config content")
}
// Generate token/id used for authenticating and pulling telegraf config
testConfigServerToken := uuid.NewV4().String()
testConfigServerId := uuid.NewV4().String()
// Bind to the next available port by using :0
listener, err := net.Listen("tcp", "127. | MaxFlushInterval time.Duration
} | random_line_split |
main_test.go | 07"
beneficiaryIDs := []string{"10000", "11000"}
jobID := "1"
staging := fmt.Sprintf("%s/%s", os.Getenv("FHIR_STAGING_DIR"), jobID)
// clean out the data dir before beginning this test
os.RemoveAll(staging)
testUtils.CreateStaging(jobID)
for i := 0; i < len(beneficiaryIDs); i++ {
bbc.On("GetExplanationOfBenefitData", beneficiaryIDs[i]).Return(bbc.getData("ExplanationOfBenefit", beneficiaryIDs[i]))
}
_, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, jobID, "ExplanationOfBenefit")
if err != nil {
t.Fail()
}
files, err := ioutil.ReadDir(staging)
assert.Nil(t, err)
assert.Equal(t, 1, len(files))
for _, f := range files {
fmt.Println(f.Name())
filePath := fmt.Sprintf("%s/%s/%s", os.Getenv("FHIR_STAGING_DIR"), jobID, f.Name())
file, err := os.Open(filePath)
if err != nil {
log.Fatal(err)
}
scanner := bufio.NewScanner(file)
// 33 entries in test EOB data returned by bbc.getData, times two beneficiaries
for i := 0; i < 66; i++ {
assert.True(t, scanner.Scan())
var jsonOBJ map[string]interface{}
err := json.Unmarshal(scanner.Bytes(), &jsonOBJ)
assert.Nil(t, err)
assert.NotNil(t, jsonOBJ["fullUrl"], "JSON should contain a value for `fullUrl`.")
assert.NotNil(t, jsonOBJ["resource"], "JSON should contain a value for `resource`.")
}
assert.False(t, scanner.Scan(), "There should be only 66 entries in the file.")
bbc.AssertExpectations(t)
file.Close()
os.Remove(filePath)
}
}
func TestWriteEOBDataToFileNoClient(t *testing.T) {
_, err := writeBBDataToFile(nil, "9c05c1f8-349d-400f-9b69-7963f2262b08", []string{"20000", "21000"}, "1", "ExplanationOfBenefit")
assert.NotNil(t, err)
}
func TestWriteEOBDataToFileInvalidACO(t *testing.T) {
bbc := MockBlueButtonClient{}
acoID := "9c05c1f8-349d-400f-9b69-7963f2262zzz"
beneficiaryIDs := []string{"10000", "11000"}
_, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, "1", "ExplanationOfBenefit")
assert.NotNil(t, err)
}
func TestWriteEOBDataToFileWithErrorsBelowFailureThreshold(t *testing.T) {
os.Setenv("FHIR_STAGING_DIR", "data/test")
origFailPct := os.Getenv("EXPORT_FAIL_PCT")
defer os.Setenv("EXPORT_FAIL_PCT", origFailPct)
os.Setenv("EXPORT_FAIL_PCT", "70")
bbc := MockBlueButtonClient{}
// Set up the mock function to return the expected values
bbc.On("GetExplanationOfBenefitData", "10000").Return("", errors.New("error"))
bbc.On("GetExplanationOfBenefitData", "11000").Return("", errors.New("error"))
bbc.On("GetExplanationOfBenefitData", "12000").Return(bbc.getData("ExplanationOfBenefit", "12000"))
acoID := "387c3a62-96fa-4d93-a5d0-fd8725509dd9"
beneficiaryIDs := []string{"10000", "11000", "12000"}
jobID := "1"
testUtils.CreateStaging(jobID)
fileName, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, jobID, "ExplanationOfBenefit")
if err != nil {
t.Fail()
}
filePath := fmt.Sprintf("%s/%s/%s-error.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID)
fData, err := ioutil.ReadFile(filePath)
if err != nil {
t.Fail()
}
ooResp := `{"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]}
{"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]}`
assert.Equal(t, ooResp+"\n", string(fData))
bbc.AssertExpectations(t)
os.Remove(fmt.Sprintf("%s/%s/%s", os.Getenv("FHIR_STAGING_DIR"), jobID, fileName))
os.Remove(filePath)
}
func TestWriteEOBDataToFileWithErrorsAboveFailureThreshold(t *testing.T) {
os.Setenv("FHIR_STAGING_DIR", "data/test")
origFailPct := os.Getenv("EXPORT_FAIL_PCT")
defer os.Setenv("EXPORT_FAIL_PCT", origFailPct)
os.Setenv("EXPORT_FAIL_PCT", "60")
bbc := MockBlueButtonClient{}
// Set up the mock function to return the expected values
bbc.On("GetExplanationOfBenefitData", "10000").Return("", errors.New("error"))
bbc.On("GetExplanationOfBenefitData", "11000").Return("", errors.New("error"))
acoID := "387c3a62-96fa-4d93-a5d0-fd8725509dd9"
beneficiaryIDs := []string{"10000", "11000", "12000"}
jobID := "1"
testUtils.CreateStaging(jobID)
_, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, jobID, "ExplanationOfBenefit")
assert.Equal(t, "number of failed requests has exceeded threshold", err.Error())
filePath := fmt.Sprintf("%s/%s/%s-error.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID)
fData, err := ioutil.ReadFile(filePath)
if err != nil {
t.Fail()
}
ooResp := `{"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]}
{"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]}`
assert.Equal(t, ooResp+"\n", string(fData))
bbc.AssertExpectations(t)
// should not have requested third beneficiary EOB because failure threshold was reached after second
bbc.AssertNotCalled(t, "GetExplanationOfBenefitData", "12000")
os.Remove(fmt.Sprintf("%s/%s/%s.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID))
os.Remove(filePath)
}
func | (t *testing.T) {
origFailPct := os.Getenv("EXPORT_FAIL_PCT")
defer os.Setenv("EXPORT_FAIL_PCT", origFailPct)
os.Setenv("EXPORT_FAIL_PCT", "60")
assert.Equal(t, 60.0, getFailureThreshold())
os | TestGetFailureThreshold | identifier_name |
main_test.go | if err != nil {
log.Fatal(err)
}
scanner := bufio.NewScanner(file)
// 33 entries in test EOB data returned by bbc.getData, times two beneficiaries
for i := 0; i < 66; i++ {
assert.True(t, scanner.Scan())
var jsonOBJ map[string]interface{}
err := json.Unmarshal(scanner.Bytes(), &jsonOBJ)
assert.Nil(t, err)
assert.NotNil(t, jsonOBJ["fullUrl"], "JSON should contain a value for `fullUrl`.")
assert.NotNil(t, jsonOBJ["resource"], "JSON should contain a value for `resource`.")
}
assert.False(t, scanner.Scan(), "There should be only 66 entries in the file.")
bbc.AssertExpectations(t)
file.Close()
os.Remove(filePath)
}
}
func TestWriteEOBDataToFileNoClient(t *testing.T) {
_, err := writeBBDataToFile(nil, "9c05c1f8-349d-400f-9b69-7963f2262b08", []string{"20000", "21000"}, "1", "ExplanationOfBenefit")
assert.NotNil(t, err)
}
func TestWriteEOBDataToFileInvalidACO(t *testing.T) {
bbc := MockBlueButtonClient{}
acoID := "9c05c1f8-349d-400f-9b69-7963f2262zzz"
beneficiaryIDs := []string{"10000", "11000"}
_, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, "1", "ExplanationOfBenefit")
assert.NotNil(t, err)
}
func TestWriteEOBDataToFileWithErrorsBelowFailureThreshold(t *testing.T) {
os.Setenv("FHIR_STAGING_DIR", "data/test")
origFailPct := os.Getenv("EXPORT_FAIL_PCT")
defer os.Setenv("EXPORT_FAIL_PCT", origFailPct)
os.Setenv("EXPORT_FAIL_PCT", "70")
bbc := MockBlueButtonClient{}
// Set up the mock function to return the expected values
bbc.On("GetExplanationOfBenefitData", "10000").Return("", errors.New("error"))
bbc.On("GetExplanationOfBenefitData", "11000").Return("", errors.New("error"))
bbc.On("GetExplanationOfBenefitData", "12000").Return(bbc.getData("ExplanationOfBenefit", "12000"))
acoID := "387c3a62-96fa-4d93-a5d0-fd8725509dd9"
beneficiaryIDs := []string{"10000", "11000", "12000"}
jobID := "1"
testUtils.CreateStaging(jobID)
fileName, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, jobID, "ExplanationOfBenefit")
if err != nil {
t.Fail()
}
filePath := fmt.Sprintf("%s/%s/%s-error.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID)
fData, err := ioutil.ReadFile(filePath)
if err != nil {
t.Fail()
}
ooResp := `{"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]}
{"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]}`
assert.Equal(t, ooResp+"\n", string(fData))
bbc.AssertExpectations(t)
os.Remove(fmt.Sprintf("%s/%s/%s", os.Getenv("FHIR_STAGING_DIR"), jobID, fileName))
os.Remove(filePath)
}
func TestWriteEOBDataToFileWithErrorsAboveFailureThreshold(t *testing.T) {
os.Setenv("FHIR_STAGING_DIR", "data/test")
origFailPct := os.Getenv("EXPORT_FAIL_PCT")
defer os.Setenv("EXPORT_FAIL_PCT", origFailPct)
os.Setenv("EXPORT_FAIL_PCT", "60")
bbc := MockBlueButtonClient{}
// Set up the mock function to return the expected values
bbc.On("GetExplanationOfBenefitData", "10000").Return("", errors.New("error"))
bbc.On("GetExplanationOfBenefitData", "11000").Return("", errors.New("error"))
acoID := "387c3a62-96fa-4d93-a5d0-fd8725509dd9"
beneficiaryIDs := []string{"10000", "11000", "12000"}
jobID := "1"
testUtils.CreateStaging(jobID)
_, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, jobID, "ExplanationOfBenefit")
assert.Equal(t, "number of failed requests has exceeded threshold", err.Error())
filePath := fmt.Sprintf("%s/%s/%s-error.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID)
fData, err := ioutil.ReadFile(filePath)
if err != nil {
t.Fail()
}
ooResp := `{"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]}
{"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]}`
assert.Equal(t, ooResp+"\n", string(fData))
bbc.AssertExpectations(t)
// should not have requested third beneficiary EOB because failure threshold was reached after second
bbc.AssertNotCalled(t, "GetExplanationOfBenefitData", "12000")
os.Remove(fmt.Sprintf("%s/%s/%s.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID))
os.Remove(filePath)
}
func TestGetFailureThreshold(t *testing.T) {
origFailPct := os.Getenv("EXPORT_FAIL_PCT")
defer os.Setenv("EXPORT_FAIL_PCT", origFailPct)
os.Setenv("EXPORT_FAIL_PCT", "60")
assert.Equal(t, 60.0, getFailureThreshold())
os.Setenv("EXPORT_FAIL_PCT", "-1")
assert.Equal(t, 0.0, getFailureThreshold())
os.Setenv("EXPORT_FAIL_PCT", "500")
assert.Equal(t, 100.0, getFailureThreshold())
os.Setenv("EXPORT_FAIL_PCT", "zero")
assert.Equal(t, 50.0, getFailureThreshold())
}
func TestAppendErrorToFile(t *testing.T) {
os.Setenv("FHIR_STAGING_DIR", "data/test")
acoID := "328e83c3-bc46-4827-836c-0ba0c713dc7d"
jobID := "1"
testUtils.CreateStaging(jobID)
appendErrorToFile(acoID, "", "", "", jobID)
filePath := fmt.Sprintf("%s/%s/%s-error.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID)
fData, err := ioutil.ReadFile(filePath)
if err != nil {
t.Fail() | }
ooResp := `{"resourceType":"OperationOutcome","issue":[{"severity":"Error"}]}`
| random_line_split |
|
main_test.go | 7"
beneficiaryIDs := []string{"10000", "11000"}
jobID := "1"
staging := fmt.Sprintf("%s/%s", os.Getenv("FHIR_STAGING_DIR"), jobID)
// clean out the data dir before beginning this test
os.RemoveAll(staging)
testUtils.CreateStaging(jobID)
for i := 0; i < len(beneficiaryIDs); i++ {
bbc.On("GetExplanationOfBenefitData", beneficiaryIDs[i]).Return(bbc.getData("ExplanationOfBenefit", beneficiaryIDs[i]))
}
_, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, jobID, "ExplanationOfBenefit")
if err != nil {
t.Fail()
}
files, err := ioutil.ReadDir(staging)
assert.Nil(t, err)
assert.Equal(t, 1, len(files))
for _, f := range files {
fmt.Println(f.Name())
filePath := fmt.Sprintf("%s/%s/%s", os.Getenv("FHIR_STAGING_DIR"), jobID, f.Name())
file, err := os.Open(filePath)
if err != nil |
scanner := bufio.NewScanner(file)
// 33 entries in test EOB data returned by bbc.getData, times two beneficiaries
for i := 0; i < 66; i++ {
assert.True(t, scanner.Scan())
var jsonOBJ map[string]interface{}
err := json.Unmarshal(scanner.Bytes(), &jsonOBJ)
assert.Nil(t, err)
assert.NotNil(t, jsonOBJ["fullUrl"], "JSON should contain a value for `fullUrl`.")
assert.NotNil(t, jsonOBJ["resource"], "JSON should contain a value for `resource`.")
}
assert.False(t, scanner.Scan(), "There should be only 66 entries in the file.")
bbc.AssertExpectations(t)
file.Close()
os.Remove(filePath)
}
}
func TestWriteEOBDataToFileNoClient(t *testing.T) {
_, err := writeBBDataToFile(nil, "9c05c1f8-349d-400f-9b69-7963f2262b08", []string{"20000", "21000"}, "1", "ExplanationOfBenefit")
assert.NotNil(t, err)
}
func TestWriteEOBDataToFileInvalidACO(t *testing.T) {
bbc := MockBlueButtonClient{}
acoID := "9c05c1f8-349d-400f-9b69-7963f2262zzz"
beneficiaryIDs := []string{"10000", "11000"}
_, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, "1", "ExplanationOfBenefit")
assert.NotNil(t, err)
}
func TestWriteEOBDataToFileWithErrorsBelowFailureThreshold(t *testing.T) {
os.Setenv("FHIR_STAGING_DIR", "data/test")
origFailPct := os.Getenv("EXPORT_FAIL_PCT")
defer os.Setenv("EXPORT_FAIL_PCT", origFailPct)
os.Setenv("EXPORT_FAIL_PCT", "70")
bbc := MockBlueButtonClient{}
// Set up the mock function to return the expected values
bbc.On("GetExplanationOfBenefitData", "10000").Return("", errors.New("error"))
bbc.On("GetExplanationOfBenefitData", "11000").Return("", errors.New("error"))
bbc.On("GetExplanationOfBenefitData", "12000").Return(bbc.getData("ExplanationOfBenefit", "12000"))
acoID := "387c3a62-96fa-4d93-a5d0-fd8725509dd9"
beneficiaryIDs := []string{"10000", "11000", "12000"}
jobID := "1"
testUtils.CreateStaging(jobID)
fileName, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, jobID, "ExplanationOfBenefit")
if err != nil {
t.Fail()
}
filePath := fmt.Sprintf("%s/%s/%s-error.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID)
fData, err := ioutil.ReadFile(filePath)
if err != nil {
t.Fail()
}
ooResp := `{"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]}
{"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]}`
assert.Equal(t, ooResp+"\n", string(fData))
bbc.AssertExpectations(t)
os.Remove(fmt.Sprintf("%s/%s/%s", os.Getenv("FHIR_STAGING_DIR"), jobID, fileName))
os.Remove(filePath)
}
func TestWriteEOBDataToFileWithErrorsAboveFailureThreshold(t *testing.T) {
os.Setenv("FHIR_STAGING_DIR", "data/test")
origFailPct := os.Getenv("EXPORT_FAIL_PCT")
defer os.Setenv("EXPORT_FAIL_PCT", origFailPct)
os.Setenv("EXPORT_FAIL_PCT", "60")
bbc := MockBlueButtonClient{}
// Set up the mock function to return the expected values
bbc.On("GetExplanationOfBenefitData", "10000").Return("", errors.New("error"))
bbc.On("GetExplanationOfBenefitData", "11000").Return("", errors.New("error"))
acoID := "387c3a62-96fa-4d93-a5d0-fd8725509dd9"
beneficiaryIDs := []string{"10000", "11000", "12000"}
jobID := "1"
testUtils.CreateStaging(jobID)
_, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, jobID, "ExplanationOfBenefit")
assert.Equal(t, "number of failed requests has exceeded threshold", err.Error())
filePath := fmt.Sprintf("%s/%s/%s-error.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID)
fData, err := ioutil.ReadFile(filePath)
if err != nil {
t.Fail()
}
ooResp := `{"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]}
{"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]}`
assert.Equal(t, ooResp+"\n", string(fData))
bbc.AssertExpectations(t)
// should not have requested third beneficiary EOB because failure threshold was reached after second
bbc.AssertNotCalled(t, "GetExplanationOfBenefitData", "12000")
os.Remove(fmt.Sprintf("%s/%s/%s.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID))
os.Remove(filePath)
}
func TestGetFailureThreshold(t *testing.T) {
origFailPct := os.Getenv("EXPORT_FAIL_PCT")
defer os.Setenv("EXPORT_FAIL_PCT", origFailPct)
os.Setenv("EXPORT_FAIL_PCT", "60")
assert.Equal(t, 60.0, getFailureThreshold())
os | {
log.Fatal(err)
} | conditional_block |
main_test.go | ToFile(nil, "9c05c1f8-349d-400f-9b69-7963f2262b08", []string{"20000", "21000"}, "1", "ExplanationOfBenefit")
assert.NotNil(t, err)
}
func TestWriteEOBDataToFileInvalidACO(t *testing.T) {
bbc := MockBlueButtonClient{}
acoID := "9c05c1f8-349d-400f-9b69-7963f2262zzz"
beneficiaryIDs := []string{"10000", "11000"}
_, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, "1", "ExplanationOfBenefit")
assert.NotNil(t, err)
}
func TestWriteEOBDataToFileWithErrorsBelowFailureThreshold(t *testing.T) {
os.Setenv("FHIR_STAGING_DIR", "data/test")
origFailPct := os.Getenv("EXPORT_FAIL_PCT")
defer os.Setenv("EXPORT_FAIL_PCT", origFailPct)
os.Setenv("EXPORT_FAIL_PCT", "70")
bbc := MockBlueButtonClient{}
// Set up the mock function to return the expected values
bbc.On("GetExplanationOfBenefitData", "10000").Return("", errors.New("error"))
bbc.On("GetExplanationOfBenefitData", "11000").Return("", errors.New("error"))
bbc.On("GetExplanationOfBenefitData", "12000").Return(bbc.getData("ExplanationOfBenefit", "12000"))
acoID := "387c3a62-96fa-4d93-a5d0-fd8725509dd9"
beneficiaryIDs := []string{"10000", "11000", "12000"}
jobID := "1"
testUtils.CreateStaging(jobID)
fileName, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, jobID, "ExplanationOfBenefit")
if err != nil {
t.Fail()
}
filePath := fmt.Sprintf("%s/%s/%s-error.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID)
fData, err := ioutil.ReadFile(filePath)
if err != nil {
t.Fail()
}
ooResp := `{"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]}
{"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]}`
assert.Equal(t, ooResp+"\n", string(fData))
bbc.AssertExpectations(t)
os.Remove(fmt.Sprintf("%s/%s/%s", os.Getenv("FHIR_STAGING_DIR"), jobID, fileName))
os.Remove(filePath)
}
func TestWriteEOBDataToFileWithErrorsAboveFailureThreshold(t *testing.T) {
os.Setenv("FHIR_STAGING_DIR", "data/test")
origFailPct := os.Getenv("EXPORT_FAIL_PCT")
defer os.Setenv("EXPORT_FAIL_PCT", origFailPct)
os.Setenv("EXPORT_FAIL_PCT", "60")
bbc := MockBlueButtonClient{}
// Set up the mock function to return the expected values
bbc.On("GetExplanationOfBenefitData", "10000").Return("", errors.New("error"))
bbc.On("GetExplanationOfBenefitData", "11000").Return("", errors.New("error"))
acoID := "387c3a62-96fa-4d93-a5d0-fd8725509dd9"
beneficiaryIDs := []string{"10000", "11000", "12000"}
jobID := "1"
testUtils.CreateStaging(jobID)
_, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, jobID, "ExplanationOfBenefit")
assert.Equal(t, "number of failed requests has exceeded threshold", err.Error())
filePath := fmt.Sprintf("%s/%s/%s-error.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID)
fData, err := ioutil.ReadFile(filePath)
if err != nil {
t.Fail()
}
ooResp := `{"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]}
{"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]}`
assert.Equal(t, ooResp+"\n", string(fData))
bbc.AssertExpectations(t)
// should not have requested third beneficiary EOB because failure threshold was reached after second
bbc.AssertNotCalled(t, "GetExplanationOfBenefitData", "12000")
os.Remove(fmt.Sprintf("%s/%s/%s.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID))
os.Remove(filePath)
}
func TestGetFailureThreshold(t *testing.T) {
origFailPct := os.Getenv("EXPORT_FAIL_PCT")
defer os.Setenv("EXPORT_FAIL_PCT", origFailPct)
os.Setenv("EXPORT_FAIL_PCT", "60")
assert.Equal(t, 60.0, getFailureThreshold())
os.Setenv("EXPORT_FAIL_PCT", "-1")
assert.Equal(t, 0.0, getFailureThreshold())
os.Setenv("EXPORT_FAIL_PCT", "500")
assert.Equal(t, 100.0, getFailureThreshold())
os.Setenv("EXPORT_FAIL_PCT", "zero")
assert.Equal(t, 50.0, getFailureThreshold())
}
func TestAppendErrorToFile(t *testing.T) {
os.Setenv("FHIR_STAGING_DIR", "data/test")
acoID := "328e83c3-bc46-4827-836c-0ba0c713dc7d"
jobID := "1"
testUtils.CreateStaging(jobID)
appendErrorToFile(acoID, "", "", "", jobID)
filePath := fmt.Sprintf("%s/%s/%s-error.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID)
fData, err := ioutil.ReadFile(filePath)
if err != nil {
t.Fail()
}
ooResp := `{"resourceType":"OperationOutcome","issue":[{"severity":"Error"}]}`
assert.Equal(t, ooResp+"\n", string(fData))
os.Remove(filePath)
}
func (bbc *MockBlueButtonClient) GetExplanationOfBenefitData(patientID string, jobID string) (string, error) {
args := bbc.Called(patientID)
return args.String(0), args.Error(1)
}
// Returns copy of a static json file (From Blue Button Sandbox originally) after replacing the patient ID of 20000000000001 with the requested identifier
func (bbc *MockBlueButtonClient) getData(endpoint, patientID string) (string, error) | {
fData, err := ioutil.ReadFile("../shared_files/synthetic_beneficiary_data/" + endpoint)
if err != nil {
return "", err
}
cleanData := strings.Replace(string(fData), "20000000000001", patientID, -1)
return cleanData, err
} | identifier_body |
|
api_op_ChangeCidrCollection.go | mithy-go/transport/http"
)
// Creates, changes, or deletes CIDR blocks within a collection. Contains
// authoritative IP information mapping blocks to one or multiple locations. A
// change request can update multiple locations in a collection at a time, which is
// helpful if you want to move one or more CIDR blocks from one location to another
// in one transaction, without downtime. Limits The max number of CIDR blocks
// included in the request is 1000. As a result, big updates require multiple API
// calls. PUT and DELETE_IF_EXISTS Use ChangeCidrCollection to perform the
// following actions:
// - PUT : Create a CIDR block within the specified collection.
// - DELETE_IF_EXISTS : Delete an existing CIDR block from the collection.
func (c *Client) ChangeCidrCollection(ctx context.Context, params *ChangeCidrCollectionInput, optFns ...func(*Options)) (*ChangeCidrCollectionOutput, error) {
if params == nil {
params = &ChangeCidrCollectionInput{}
}
result, metadata, err := c.invokeOperation(ctx, "ChangeCidrCollection", params, optFns, c.addOperationChangeCidrCollectionMiddlewares)
if err != nil {
return nil, err
}
out := result.(*ChangeCidrCollectionOutput)
out.ResultMetadata = metadata
return out, nil
}
type ChangeCidrCollectionInput struct {
// Information about changes to a CIDR collection.
//
// This member is required.
Changes []types.CidrCollectionChange
// The UUID of the CIDR collection to update.
//
// This member is required.
Id *string
// A sequential counter that Amazon Route 53 sets to 1 when you create a
// collection and increments it by 1 each time you update the collection. We
// recommend that you use ListCidrCollection to get the current value of
// CollectionVersion for the collection that you want to update, and then include
// that value with the change request. This prevents Route 53 from overwriting an
// intervening update:
// - If the value in the request matches the value of CollectionVersion in the
// collection, Route 53 updates the collection.
// - If the value of CollectionVersion in the collection is greater than the
// value in the request, the collection was changed after you got the version
// number. Route 53 does not update the collection, and it returns a
// CidrCollectionVersionMismatch error.
CollectionVersion *int64
noSmithyDocumentSerde
}
type ChangeCidrCollectionOutput struct {
// The ID that is returned by ChangeCidrCollection . You can use it as input to
// GetChange to see if a CIDR collection change has propagated or not.
//
// This member is required.
Id *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationChangeCidrCollectionMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsRestxml_serializeOpChangeCidrCollection{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestxml_deserializeOpChangeCidrCollection{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addChangeCidrCollectionResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpChangeCidrCollectionValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opChangeCidrCollection(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil |
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opChangeCidrCollection(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "route53",
OperationName: "ChangeCidrCollection",
}
}
type opChangeCidrCollectionResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opChangeCidrCollectionResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opChangeCidrCollectionResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "route53"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "route53"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internal | {
return err
} | conditional_block |
api_op_ChangeCidrCollection.go | params = &ChangeCidrCollectionInput{}
}
result, metadata, err := c.invokeOperation(ctx, "ChangeCidrCollection", params, optFns, c.addOperationChangeCidrCollectionMiddlewares)
if err != nil {
return nil, err
}
out := result.(*ChangeCidrCollectionOutput)
out.ResultMetadata = metadata
return out, nil
}
type ChangeCidrCollectionInput struct {
// Information about changes to a CIDR collection.
//
// This member is required.
Changes []types.CidrCollectionChange
// The UUID of the CIDR collection to update.
//
// This member is required.
Id *string
// A sequential counter that Amazon Route 53 sets to 1 when you create a
// collection and increments it by 1 each time you update the collection. We
// recommend that you use ListCidrCollection to get the current value of
// CollectionVersion for the collection that you want to update, and then include
// that value with the change request. This prevents Route 53 from overwriting an
// intervening update:
// - If the value in the request matches the value of CollectionVersion in the
// collection, Route 53 updates the collection.
// - If the value of CollectionVersion in the collection is greater than the
// value in the request, the collection was changed after you got the version
// number. Route 53 does not update the collection, and it returns a
// CidrCollectionVersionMismatch error.
CollectionVersion *int64
noSmithyDocumentSerde
}
type ChangeCidrCollectionOutput struct {
// The ID that is returned by ChangeCidrCollection . You can use it as input to
// GetChange to see if a CIDR collection change has propagated or not.
//
// This member is required.
Id *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationChangeCidrCollectionMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsRestxml_serializeOpChangeCidrCollection{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestxml_deserializeOpChangeCidrCollection{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addChangeCidrCollectionResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpChangeCidrCollectionValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opChangeCidrCollection(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opChangeCidrCollection(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "route53",
OperationName: "ChangeCidrCollection",
}
}
type opChangeCidrCollectionResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opChangeCidrCollectionResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opChangeCidrCollectionResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "route53"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "route53"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A)
if v4aScheme.SigningName == nil {
v4aScheme.SigningName = aws.String("route53")
}
if v4aScheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName)
ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0])
break
case *internalauth.AuthenticationSchemeNone:
break
}
}
return next.HandleSerialize(ctx, in)
}
func | addChangeCidrCollectionResolveEndpointMiddleware | identifier_name |
|
api_op_ChangeCidrCollection.go | /smithy-go/transport/http"
)
// Creates, changes, or deletes CIDR blocks within a collection. Contains
// authoritative IP information mapping blocks to one or multiple locations. A
// change request can update multiple locations in a collection at a time, which is
// helpful if you want to move one or more CIDR blocks from one location to another
// in one transaction, without downtime. Limits The max number of CIDR blocks
// included in the request is 1000. As a result, big updates require multiple API
// calls. PUT and DELETE_IF_EXISTS Use ChangeCidrCollection to perform the
// following actions:
// - PUT : Create a CIDR block within the specified collection.
// - DELETE_IF_EXISTS : Delete an existing CIDR block from the collection.
func (c *Client) ChangeCidrCollection(ctx context.Context, params *ChangeCidrCollectionInput, optFns ...func(*Options)) (*ChangeCidrCollectionOutput, error) {
if params == nil {
params = &ChangeCidrCollectionInput{}
}
result, metadata, err := c.invokeOperation(ctx, "ChangeCidrCollection", params, optFns, c.addOperationChangeCidrCollectionMiddlewares)
if err != nil {
return nil, err
}
out := result.(*ChangeCidrCollectionOutput)
out.ResultMetadata = metadata
return out, nil
}
type ChangeCidrCollectionInput struct {
// Information about changes to a CIDR collection.
//
// This member is required.
Changes []types.CidrCollectionChange
// The UUID of the CIDR collection to update.
//
// This member is required.
Id *string
// A sequential counter that Amazon Route 53 sets to 1 when you create a
// collection and increments it by 1 each time you update the collection. We
// recommend that you use ListCidrCollection to get the current value of
// CollectionVersion for the collection that you want to update, and then include
// that value with the change request. This prevents Route 53 from overwriting an
// intervening update:
// - If the value in the request matches the value of CollectionVersion in the
// collection, Route 53 updates the collection.
// - If the value of CollectionVersion in the collection is greater than the
// value in the request, the collection was changed after you got the version
// number. Route 53 does not update the collection, and it returns a
// CidrCollectionVersionMismatch error.
CollectionVersion *int64
noSmithyDocumentSerde
}
type ChangeCidrCollectionOutput struct {
// The ID that is returned by ChangeCidrCollection . You can use it as input to
// GetChange to see if a CIDR collection change has propagated or not.
//
// This member is required.
Id *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationChangeCidrCollectionMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsRestxml_serializeOpChangeCidrCollection{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestxml_deserializeOpChangeCidrCollection{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
} | }
if err = addOpChangeCidrCollectionValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opChangeCidrCollection(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opChangeCidrCollection(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "route53",
OperationName: "ChangeCidrCollection",
}
}
type opChangeCidrCollectionResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opChangeCidrCollectionResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opChangeCidrCollectionResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "route53"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "route53"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.Authentication | if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addChangeCidrCollectionResolveEndpointMiddleware(stack, options); err != nil {
return err | random_line_split |
api_op_ChangeCidrCollection.go | mithy-go/transport/http"
)
// Creates, changes, or deletes CIDR blocks within a collection. Contains
// authoritative IP information mapping blocks to one or multiple locations. A
// change request can update multiple locations in a collection at a time, which is
// helpful if you want to move one or more CIDR blocks from one location to another
// in one transaction, without downtime. Limits The max number of CIDR blocks
// included in the request is 1000. As a result, big updates require multiple API
// calls. PUT and DELETE_IF_EXISTS Use ChangeCidrCollection to perform the
// following actions:
// - PUT : Create a CIDR block within the specified collection.
// - DELETE_IF_EXISTS : Delete an existing CIDR block from the collection.
func (c *Client) ChangeCidrCollection(ctx context.Context, params *ChangeCidrCollectionInput, optFns ...func(*Options)) (*ChangeCidrCollectionOutput, error) {
if params == nil {
params = &ChangeCidrCollectionInput{}
}
result, metadata, err := c.invokeOperation(ctx, "ChangeCidrCollection", params, optFns, c.addOperationChangeCidrCollectionMiddlewares)
if err != nil {
return nil, err
}
out := result.(*ChangeCidrCollectionOutput)
out.ResultMetadata = metadata
return out, nil
}
type ChangeCidrCollectionInput struct {
// Information about changes to a CIDR collection.
//
// This member is required.
Changes []types.CidrCollectionChange
// The UUID of the CIDR collection to update.
//
// This member is required.
Id *string
// A sequential counter that Amazon Route 53 sets to 1 when you create a
// collection and increments it by 1 each time you update the collection. We
// recommend that you use ListCidrCollection to get the current value of
// CollectionVersion for the collection that you want to update, and then include
// that value with the change request. This prevents Route 53 from overwriting an
// intervening update:
// - If the value in the request matches the value of CollectionVersion in the
// collection, Route 53 updates the collection.
// - If the value of CollectionVersion in the collection is greater than the
// value in the request, the collection was changed after you got the version
// number. Route 53 does not update the collection, and it returns a
// CidrCollectionVersionMismatch error.
CollectionVersion *int64
noSmithyDocumentSerde
}
type ChangeCidrCollectionOutput struct {
// The ID that is returned by ChangeCidrCollection . You can use it as input to
// GetChange to see if a CIDR collection change has propagated or not.
//
// This member is required.
Id *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationChangeCidrCollectionMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsRestxml_serializeOpChangeCidrCollection{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestxml_deserializeOpChangeCidrCollection{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addChangeCidrCollectionResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpChangeCidrCollectionValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opChangeCidrCollection(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opChangeCidrCollection(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "route53",
OperationName: "ChangeCidrCollection",
}
}
type opChangeCidrCollectionResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opChangeCidrCollectionResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opChangeCidrCollectionResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) | if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "route53"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "route53"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth | {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) | identifier_body |
mod.rs | to do for example
/// `spawn(format!("{:?}", some_public_key))`.
pub fn spawn(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send + 'static,
) {
self.spawn_inner(name, group, task, TaskType::Async)
}
/// Spawns the blocking task with the given name. See also `spawn`.
pub fn spawn_blocking(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send + 'static,
) {
self.spawn_inner(name, group, task, TaskType::Blocking)
}
/// Helper function that implements the spawning logic. See `spawn` and `spawn_blocking`.
fn spawn_inner(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send + 'static,
task_type: TaskType,
) {
let on_exit = self.on_exit.clone();
let metrics = self.metrics.clone();
let registry = self.task_registry.clone();
let group = match group.into() {
GroupName::Specific(var) => var,
// If no group is specified use default.
GroupName::Default => DEFAULT_GROUP_NAME,
};
let task_type_label = match task_type {
TaskType::Blocking => "blocking",
TaskType::Async => "async",
};
// Note that we increase the started counter here and not within the future. This way,
// we could properly visualize on Prometheus situations where the spawning doesn't work.
if let Some(metrics) = &self.metrics {
metrics.tasks_spawned.with_label_values(&[name, group, task_type_label]).inc();
// We do a dummy increase in order for the task to show up in metrics.
metrics
.tasks_ended
.with_label_values(&[name, "finished", group, task_type_label])
.inc_by(0);
}
let future = async move {
// Register the task and keep the "token" alive until the task is ended. Then this
// "token" will unregister this task.
let _registry_token = registry.register_task(name, group);
if let Some(metrics) = metrics {
// Add some wrappers around `task`.
let task = { | prometheus_future::with_poll_durations(poll_duration, poll_start, task);
// The logic of `AssertUnwindSafe` here is ok considering that we throw
// away the `Future` after it has panicked.
panic::AssertUnwindSafe(inner).catch_unwind()
};
futures::pin_mut!(task);
match select(on_exit, task).await {
Either::Right((Err(payload), _)) => {
metrics
.tasks_ended
.with_label_values(&[name, "panic", group, task_type_label])
.inc();
panic::resume_unwind(payload)
},
Either::Right((Ok(()), _)) => {
metrics
.tasks_ended
.with_label_values(&[name, "finished", group, task_type_label])
.inc();
},
Either::Left(((), _)) => {
// The `on_exit` has triggered.
metrics
.tasks_ended
.with_label_values(&[name, "interrupted", group, task_type_label])
.inc();
},
}
} else {
futures::pin_mut!(task);
let _ = select(on_exit, task).await;
}
}
.in_current_span();
match task_type {
TaskType::Async => {
self.tokio_handle.spawn(future);
},
TaskType::Blocking => {
let handle = self.tokio_handle.clone();
self.tokio_handle.spawn_blocking(move || {
handle.block_on(future);
});
},
}
}
}
impl sp_core::traits::SpawnNamed for SpawnTaskHandle {
fn spawn_blocking(
&self,
name: &'static str,
group: Option<&'static str>,
future: BoxFuture<'static, ()>,
) {
self.spawn_inner(name, group, future, TaskType::Blocking)
}
fn spawn(
&self,
name: &'static str,
group: Option<&'static str>,
future: BoxFuture<'static, ()>,
) {
self.spawn_inner(name, group, future, TaskType::Async)
}
}
/// A wrapper over `SpawnTaskHandle` that will notify a receiver whenever any
/// task spawned through it fails. The service should be on the receiver side
/// and will shut itself down whenever it receives any message, i.e. an
/// essential task has failed.
#[derive(Clone)]
pub struct SpawnEssentialTaskHandle {
essential_failed_tx: TracingUnboundedSender<()>,
inner: SpawnTaskHandle,
}
impl SpawnEssentialTaskHandle {
/// Creates a new `SpawnEssentialTaskHandle`.
pub fn new(
essential_failed_tx: TracingUnboundedSender<()>,
spawn_task_handle: SpawnTaskHandle,
) -> SpawnEssentialTaskHandle {
SpawnEssentialTaskHandle { essential_failed_tx, inner: spawn_task_handle }
}
/// Spawns the given task with the given name.
///
/// See also [`SpawnTaskHandle::spawn`].
pub fn spawn(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send + 'static,
) {
self.spawn_inner(name, group, task, TaskType::Async)
}
/// Spawns the blocking task with the given name.
///
/// See also [`SpawnTaskHandle::spawn_blocking`].
pub fn spawn_blocking(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send + 'static,
) {
self.spawn_inner(name, group, task, TaskType::Blocking)
}
fn spawn_inner(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send + 'static,
task_type: TaskType,
) {
let essential_failed = self.essential_failed_tx.clone();
let essential_task = std::panic::AssertUnwindSafe(task).catch_unwind().map(move |_| {
log::error!("Essential task `{}` failed. Shutting down service.", name);
let _ = essential_failed.close();
});
let _ = self.inner.spawn_inner(name, group, essential_task, task_type);
}
}
impl sp_core::traits::SpawnEssentialNamed for SpawnEssentialTaskHandle {
fn spawn_essential_blocking(
&self,
name: &'static str,
group: Option<&'static str>,
future: BoxFuture<'static, ()>,
) {
self.spawn_blocking(name, group, future);
}
fn spawn_essential(
&self,
name: &'static str,
group: Option<&'static str>,
future: BoxFuture<'static, ()>,
) {
self.spawn(name, group, future);
}
}
/// Helper struct to manage background/async tasks in Service.
pub struct TaskManager {
/// A future that resolves when the service has exited, this is useful to
/// make sure any internally spawned futures stop when the service does.
on_exit: exit_future::Exit,
/// A signal that makes the exit future above resolve, fired on drop.
_signal: Signal,
/// Tokio runtime handle that is used to spawn futures.
tokio_handle: Handle,
/// Prometheus metric where to report the polling times.
metrics: Option<Metrics>,
/// Send a signal when a spawned essential task has concluded. The next time
/// the service future is polled it should complete with an error.
essential_failed_tx: TracingUnboundedSender<()>,
/// A receiver for spawned essential-tasks concluding.
essential_failed_rx: TracingUnboundedReceiver<()>,
/// Things to keep alive until the task manager is dropped.
keep_alive: Box<dyn std::any::Any + Send>,
/// A list of other `TaskManager`'s to terminate and gracefully shutdown when the parent
/// terminates and gracefully shutdown. Also ends the parent `future()` if a child's essential
/// task fails.
children: Vec<TaskManager>,
/// The registry of all running tasks.
task_registry: TaskRegistry,
}
impl TaskManager {
/// If a Prometheus registry is passed, it will be used to report statistics about the
/// service tasks.
pub fn new(
tokio_handle: Handle,
prometheus_registry: Option<&Registry>,
) -> Result<Self, PrometheusError> {
let (signal, on_exit) = exit_future::signal();
// A side-channel for essential tasks to communicate shutdown.
let (essential_failed_tx, essential_failed_rx) =
tracing_unbounded("mp | let poll_duration =
metrics.poll_duration.with_label_values(&[name, group, task_type_label]);
let poll_start =
metrics.poll_start.with_label_values(&[name, group, task_type_label]);
let inner = | random_line_split |
mod.rs | to do for example
/// `spawn(format!("{:?}", some_public_key))`.
pub fn spawn(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send + 'static,
) {
self.spawn_inner(name, group, task, TaskType::Async)
}
/// Spawns the blocking task with the given name. See also `spawn`.
pub fn spawn_blocking(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send + 'static,
) {
self.spawn_inner(name, group, task, TaskType::Blocking)
}
/// Helper function that implements the spawning logic. See `spawn` and `spawn_blocking`.
fn spawn_inner(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send + 'static,
task_type: TaskType,
) {
let on_exit = self.on_exit.clone();
let metrics = self.metrics.clone();
let registry = self.task_registry.clone();
let group = match group.into() {
GroupName::Specific(var) => var,
// If no group is specified use default.
GroupName::Default => DEFAULT_GROUP_NAME,
};
let task_type_label = match task_type {
TaskType::Blocking => "blocking",
TaskType::Async => "async",
};
// Note that we increase the started counter here and not within the future. This way,
// we could properly visualize on Prometheus situations where the spawning doesn't work.
if let Some(metrics) = &self.metrics {
metrics.tasks_spawned.with_label_values(&[name, group, task_type_label]).inc();
// We do a dummy increase in order for the task to show up in metrics.
metrics
.tasks_ended
.with_label_values(&[name, "finished", group, task_type_label])
.inc_by(0);
}
let future = async move {
// Register the task and keep the "token" alive until the task is ended. Then this
// "token" will unregister this task.
let _registry_token = registry.register_task(name, group);
if let Some(metrics) = metrics {
// Add some wrappers around `task`.
let task = {
let poll_duration =
metrics.poll_duration.with_label_values(&[name, group, task_type_label]);
let poll_start =
metrics.poll_start.with_label_values(&[name, group, task_type_label]);
let inner =
prometheus_future::with_poll_durations(poll_duration, poll_start, task);
// The logic of `AssertUnwindSafe` here is ok considering that we throw
// away the `Future` after it has panicked.
panic::AssertUnwindSafe(inner).catch_unwind()
};
futures::pin_mut!(task);
match select(on_exit, task).await {
Either::Right((Err(payload), _)) => {
metrics
.tasks_ended
.with_label_values(&[name, "panic", group, task_type_label])
.inc();
panic::resume_unwind(payload)
},
Either::Right((Ok(()), _)) => {
metrics
.tasks_ended
.with_label_values(&[name, "finished", group, task_type_label])
.inc();
},
Either::Left(((), _)) => {
// The `on_exit` has triggered.
metrics
.tasks_ended
.with_label_values(&[name, "interrupted", group, task_type_label])
.inc();
},
}
} else {
futures::pin_mut!(task);
let _ = select(on_exit, task).await;
}
}
.in_current_span();
match task_type {
TaskType::Async => | ,
TaskType::Blocking => {
let handle = self.tokio_handle.clone();
self.tokio_handle.spawn_blocking(move || {
handle.block_on(future);
});
},
}
}
}
impl sp_core::traits::SpawnNamed for SpawnTaskHandle {
fn spawn_blocking(
&self,
name: &'static str,
group: Option<&'static str>,
future: BoxFuture<'static, ()>,
) {
self.spawn_inner(name, group, future, TaskType::Blocking)
}
fn spawn(
&self,
name: &'static str,
group: Option<&'static str>,
future: BoxFuture<'static, ()>,
) {
self.spawn_inner(name, group, future, TaskType::Async)
}
}
/// A wrapper over `SpawnTaskHandle` that will notify a receiver whenever any
/// task spawned through it fails. The service should be on the receiver side
/// and will shut itself down whenever it receives any message, i.e. an
/// essential task has failed.
#[derive(Clone)]
pub struct SpawnEssentialTaskHandle {
essential_failed_tx: TracingUnboundedSender<()>,
inner: SpawnTaskHandle,
}
impl SpawnEssentialTaskHandle {
/// Creates a new `SpawnEssentialTaskHandle`.
pub fn new(
essential_failed_tx: TracingUnboundedSender<()>,
spawn_task_handle: SpawnTaskHandle,
) -> SpawnEssentialTaskHandle {
SpawnEssentialTaskHandle { essential_failed_tx, inner: spawn_task_handle }
}
/// Spawns the given task with the given name.
///
/// See also [`SpawnTaskHandle::spawn`].
pub fn spawn(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send + 'static,
) {
self.spawn_inner(name, group, task, TaskType::Async)
}
/// Spawns the blocking task with the given name.
///
/// See also [`SpawnTaskHandle::spawn_blocking`].
pub fn spawn_blocking(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send + 'static,
) {
self.spawn_inner(name, group, task, TaskType::Blocking)
}
fn spawn_inner(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send + 'static,
task_type: TaskType,
) {
let essential_failed = self.essential_failed_tx.clone();
let essential_task = std::panic::AssertUnwindSafe(task).catch_unwind().map(move |_| {
log::error!("Essential task `{}` failed. Shutting down service.", name);
let _ = essential_failed.close();
});
let _ = self.inner.spawn_inner(name, group, essential_task, task_type);
}
}
impl sp_core::traits::SpawnEssentialNamed for SpawnEssentialTaskHandle {
fn spawn_essential_blocking(
&self,
name: &'static str,
group: Option<&'static str>,
future: BoxFuture<'static, ()>,
) {
self.spawn_blocking(name, group, future);
}
fn spawn_essential(
&self,
name: &'static str,
group: Option<&'static str>,
future: BoxFuture<'static, ()>,
) {
self.spawn(name, group, future);
}
}
/// Helper struct to manage background/async tasks in Service.
pub struct TaskManager {
/// A future that resolves when the service has exited, this is useful to
/// make sure any internally spawned futures stop when the service does.
on_exit: exit_future::Exit,
/// A signal that makes the exit future above resolve, fired on drop.
_signal: Signal,
/// Tokio runtime handle that is used to spawn futures.
tokio_handle: Handle,
/// Prometheus metric where to report the polling times.
metrics: Option<Metrics>,
/// Send a signal when a spawned essential task has concluded. The next time
/// the service future is polled it should complete with an error.
essential_failed_tx: TracingUnboundedSender<()>,
/// A receiver for spawned essential-tasks concluding.
essential_failed_rx: TracingUnboundedReceiver<()>,
/// Things to keep alive until the task manager is dropped.
keep_alive: Box<dyn std::any::Any + Send>,
/// A list of other `TaskManager`'s to terminate and gracefully shutdown when the parent
/// terminates and gracefully shutdown. Also ends the parent `future()` if a child's essential
/// task fails.
children: Vec<TaskManager>,
/// The registry of all running tasks.
task_registry: TaskRegistry,
}
impl TaskManager {
/// If a Prometheus registry is passed, it will be used to report statistics about the
/// service tasks.
pub fn new(
tokio_handle: Handle,
prometheus_registry: Option<&Registry>,
) -> Result<Self, PrometheusError> {
let (signal, on_exit) = exit_future::signal();
// A side-channel for essential tasks to communicate shutdown.
let (essential_failed_tx, essential_failed_rx) =
tracing_unbounded | {
self.tokio_handle.spawn(future);
} | conditional_block |
mod.rs | to do for example
/// `spawn(format!("{:?}", some_public_key))`.
pub fn spawn(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send + 'static,
) {
self.spawn_inner(name, group, task, TaskType::Async)
}
/// Spawns the blocking task with the given name. See also `spawn`.
pub fn spawn_blocking(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send + 'static,
) {
self.spawn_inner(name, group, task, TaskType::Blocking)
}
/// Helper function that implements the spawning logic. See `spawn` and `spawn_blocking`.
fn spawn_inner(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send + 'static,
task_type: TaskType,
) {
let on_exit = self.on_exit.clone();
let metrics = self.metrics.clone();
let registry = self.task_registry.clone();
let group = match group.into() {
GroupName::Specific(var) => var,
// If no group is specified use default.
GroupName::Default => DEFAULT_GROUP_NAME,
};
let task_type_label = match task_type {
TaskType::Blocking => "blocking",
TaskType::Async => "async",
};
// Note that we increase the started counter here and not within the future. This way,
// we could properly visualize on Prometheus situations where the spawning doesn't work.
if let Some(metrics) = &self.metrics {
metrics.tasks_spawned.with_label_values(&[name, group, task_type_label]).inc();
// We do a dummy increase in order for the task to show up in metrics.
metrics
.tasks_ended
.with_label_values(&[name, "finished", group, task_type_label])
.inc_by(0);
}
let future = async move {
// Register the task and keep the "token" alive until the task is ended. Then this
// "token" will unregister this task.
let _registry_token = registry.register_task(name, group);
if let Some(metrics) = metrics {
// Add some wrappers around `task`.
let task = {
let poll_duration =
metrics.poll_duration.with_label_values(&[name, group, task_type_label]);
let poll_start =
metrics.poll_start.with_label_values(&[name, group, task_type_label]);
let inner =
prometheus_future::with_poll_durations(poll_duration, poll_start, task);
// The logic of `AssertUnwindSafe` here is ok considering that we throw
// away the `Future` after it has panicked.
panic::AssertUnwindSafe(inner).catch_unwind()
};
futures::pin_mut!(task);
match select(on_exit, task).await {
Either::Right((Err(payload), _)) => {
metrics
.tasks_ended
.with_label_values(&[name, "panic", group, task_type_label])
.inc();
panic::resume_unwind(payload)
},
Either::Right((Ok(()), _)) => {
metrics
.tasks_ended
.with_label_values(&[name, "finished", group, task_type_label])
.inc();
},
Either::Left(((), _)) => {
// The `on_exit` has triggered.
metrics
.tasks_ended
.with_label_values(&[name, "interrupted", group, task_type_label])
.inc();
},
}
} else {
futures::pin_mut!(task);
let _ = select(on_exit, task).await;
}
}
.in_current_span();
match task_type {
TaskType::Async => {
self.tokio_handle.spawn(future);
},
TaskType::Blocking => {
let handle = self.tokio_handle.clone();
self.tokio_handle.spawn_blocking(move || {
handle.block_on(future);
});
},
}
}
}
impl sp_core::traits::SpawnNamed for SpawnTaskHandle {
fn spawn_blocking(
&self,
name: &'static str,
group: Option<&'static str>,
future: BoxFuture<'static, ()>,
) {
self.spawn_inner(name, group, future, TaskType::Blocking)
}
fn spawn(
&self,
name: &'static str,
group: Option<&'static str>,
future: BoxFuture<'static, ()>,
) {
self.spawn_inner(name, group, future, TaskType::Async)
}
}
/// A wrapper over `SpawnTaskHandle` that will notify a receiver whenever any
/// task spawned through it fails. The service should be on the receiver side
/// and will shut itself down whenever it receives any message, i.e. an
/// essential task has failed.
#[derive(Clone)]
pub struct | {
essential_failed_tx: TracingUnboundedSender<()>,
inner: SpawnTaskHandle,
}
impl SpawnEssentialTaskHandle {
/// Creates a new `SpawnEssentialTaskHandle`.
pub fn new(
essential_failed_tx: TracingUnboundedSender<()>,
spawn_task_handle: SpawnTaskHandle,
) -> SpawnEssentialTaskHandle {
SpawnEssentialTaskHandle { essential_failed_tx, inner: spawn_task_handle }
}
/// Spawns the given task with the given name.
///
/// See also [`SpawnTaskHandle::spawn`].
pub fn spawn(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send + 'static,
) {
self.spawn_inner(name, group, task, TaskType::Async)
}
/// Spawns the blocking task with the given name.
///
/// See also [`SpawnTaskHandle::spawn_blocking`].
pub fn spawn_blocking(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send + 'static,
) {
self.spawn_inner(name, group, task, TaskType::Blocking)
}
fn spawn_inner(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send + 'static,
task_type: TaskType,
) {
let essential_failed = self.essential_failed_tx.clone();
let essential_task = std::panic::AssertUnwindSafe(task).catch_unwind().map(move |_| {
log::error!("Essential task `{}` failed. Shutting down service.", name);
let _ = essential_failed.close();
});
let _ = self.inner.spawn_inner(name, group, essential_task, task_type);
}
}
impl sp_core::traits::SpawnEssentialNamed for SpawnEssentialTaskHandle {
fn spawn_essential_blocking(
&self,
name: &'static str,
group: Option<&'static str>,
future: BoxFuture<'static, ()>,
) {
self.spawn_blocking(name, group, future);
}
fn spawn_essential(
&self,
name: &'static str,
group: Option<&'static str>,
future: BoxFuture<'static, ()>,
) {
self.spawn(name, group, future);
}
}
/// Helper struct to manage background/async tasks in Service.
pub struct TaskManager {
/// A future that resolves when the service has exited, this is useful to
/// make sure any internally spawned futures stop when the service does.
on_exit: exit_future::Exit,
/// A signal that makes the exit future above resolve, fired on drop.
_signal: Signal,
/// Tokio runtime handle that is used to spawn futures.
tokio_handle: Handle,
/// Prometheus metric where to report the polling times.
metrics: Option<Metrics>,
/// Send a signal when a spawned essential task has concluded. The next time
/// the service future is polled it should complete with an error.
essential_failed_tx: TracingUnboundedSender<()>,
/// A receiver for spawned essential-tasks concluding.
essential_failed_rx: TracingUnboundedReceiver<()>,
/// Things to keep alive until the task manager is dropped.
keep_alive: Box<dyn std::any::Any + Send>,
/// A list of other `TaskManager`'s to terminate and gracefully shutdown when the parent
/// terminates and gracefully shutdown. Also ends the parent `future()` if a child's essential
/// task fails.
children: Vec<TaskManager>,
/// The registry of all running tasks.
task_registry: TaskRegistry,
}
impl TaskManager {
/// If a Prometheus registry is passed, it will be used to report statistics about the
/// service tasks.
pub fn new(
tokio_handle: Handle,
prometheus_registry: Option<&Registry>,
) -> Result<Self, PrometheusError> {
let (signal, on_exit) = exit_future::signal();
// A side-channel for essential tasks to communicate shutdown.
let (essential_failed_tx, essential_failed_rx) =
tracing_unbounded(" | SpawnEssentialTaskHandle | identifier_name |
adapters.py |
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import copy
import glob
import os
from distutils.version import StrictVersion
from urlparse import urljoin
import six
import yaml
from nailgun.errors import errors
from nailgun.logger import logger
from nailgun.objects.plugin import ClusterPlugins
from nailgun.objects.plugin import Plugin
from nailgun.settings import settings
@six.add_metaclass(abc.ABCMeta)
class PluginAdapterBase(object):
"""Implements wrapper for plugin db model configuration files logic
1. Uploading plugin provided cluster attributes
2. Uploading tasks
3. Enabling/Disabling of plugin based on cluster attributes
4. Providing repositories/deployment scripts related info to clients
"""
environment_config_name = 'environment_config.yaml'
plugin_metadata = 'metadata.yaml'
task_config_name = 'tasks.yaml'
def __init__(self, plugin):
self.plugin = plugin
self.plugin_path = os.path.join(
settings.PLUGINS_PATH,
self.path_name)
self.tasks = []
@abc.abstractmethod
def path_name(self):
"""A name which is used to create path to plugin scripts and repos"""
def sync_metadata_to_db(self):
"""Sync metadata from config yaml files into DB"""
metadata_file_path = os.path.join(
self.plugin_path, self.plugin_metadata)
metadata = self._load_config(metadata_file_path) or {}
Plugin.update(self.plugin, metadata)
def _load_config(self, config):
if os.access(config, os.R_OK):
with open(config, "r") as conf:
try:
return yaml.safe_load(conf.read())
except yaml.YAMLError as exc:
logger.warning(exc)
raise errors.ParseError(
'Problem with loading YAML file {0}'.format(config))
else:
logger.warning("Config {0} is not readable.".format(config))
def _load_tasks(self, config):
data = self._load_config(config)
for item in data:
# backward compatibility for plugins added in version 6.0,
# and it is expected that task with role: [controller]
# will be executed on all controllers
if (StrictVersion(self.plugin.package_version)
== StrictVersion('1.0')
and isinstance(item['role'], list)
and 'controller' in item['role']):
item['role'].append('primary-controller')
return data
def set_cluster_tasks(self):
"""Load plugins provided tasks and set them to instance tasks variable
Provided tasks are loaded from tasks config file.
"""
task_yaml = os.path.join(
self.plugin_path, self.task_config_name)
if os.path.exists(task_yaml):
self.tasks = self._load_tasks(task_yaml)
def filter_tasks(self, tasks, stage):
filtered = []
for task in tasks:
if stage and stage == task.get('stage'):
filtered.append(task)
return filtered
@property
def plugin_release_versions(self):
if not self.plugin.releases:
return set()
return set([rel['version'] for rel in self.plugin.releases])
@property
def | (self):
return u'{0}-{1}'.format(self.plugin.name, self.plugin.version)
@property
def slaves_scripts_path(self):
return settings.PLUGINS_SLAVES_SCRIPTS_PATH.format(
plugin_name=self.path_name)
@property
def deployment_tasks(self):
deployment_tasks = []
for task in self.plugin.deployment_tasks:
if task.get('parameters'):
task['parameters'].setdefault('cwd', self.slaves_scripts_path)
deployment_tasks.append(task)
return deployment_tasks
@property
def volumes_metadata(self):
return self.plugin.volumes_metadata
@property
def components_metadata(self):
return self.plugin.components_metadata
@property
def releases(self):
return self.plugin.releases
@property
def normalized_roles_metadata(self):
"""Block plugin disabling if nodes with plugin-provided roles exist"""
result = {}
for role, meta in six.iteritems(self.plugin.roles_metadata):
condition = "settings:{0}.metadata.enabled == false".format(
self.plugin.name)
meta = copy.copy(meta)
meta['restrictions'] = [condition] + meta.get('restrictions', [])
result[role] = meta
return result
def get_release_info(self, release):
"""Get plugin release information which corresponds to given release"""
rel_os = release.operating_system.lower()
version = release.version
release_info = filter(
lambda r: (
r['os'] == rel_os and
ClusterPlugins.is_release_version_compatible(version,
r['version'])),
self.plugin.releases)
return release_info[0]
def repo_files(self, cluster):
release_info = self.get_release_info(cluster.release)
repo_path = os.path.join(
settings.PLUGINS_PATH,
self.path_name,
release_info['repository_path'],
'*')
return glob.glob(repo_path)
def repo_url(self, cluster):
release_info = self.get_release_info(cluster.release)
repo_base = settings.PLUGINS_REPO_URL.format(
master_ip=settings.MASTER_IP,
plugin_name=self.path_name)
return urljoin(repo_base, release_info['repository_path'])
def master_scripts_path(self, cluster):
release_info = self.get_release_info(cluster.release)
# NOTE(eli): we cannot user urljoin here, because it
# works wrong, if protocol is rsync
base_url = settings.PLUGINS_SLAVES_RSYNC.format(
master_ip=settings.MASTER_IP,
plugin_name=self.path_name)
return '{0}{1}'.format(
base_url,
release_info['deployment_scripts_path'])
class PluginAdapterV1(PluginAdapterBase):
"""Plugins attributes class for package version 1.0.0"""
@property
def path_name(self):
"""Returns a name and full version
e.g. if there is a plugin with name "plugin_name" and version
is "1.0.0", the method returns "plugin_name-1.0.0"
"""
return self.full_name
class PluginAdapterV2(PluginAdapterBase):
"""Plugins attributes class for package version 2.0.0"""
@property
def path_name(self):
"""Returns a name and major version of the plugin
e.g. if there is a plugin with name "plugin_name" and version
is "1.0.0", the method returns "plugin_name-1.0".
It's different from previous version because in previous
version we did not have plugin updates, in 2.0.0 version
we should expect different plugin path.
See blueprint: https://blueprints.launchpad.net/fuel/+spec
/plugins-security-fixes-delivery
"""
return u'{0}-{1}'.format(self.plugin.name, self._major_version)
@property
def _major_version(self):
"""Returns major version of plugin's version
e.g. if plugin has 1.2.3 version, the method returns 1.2
"""
version_tuple = StrictVersion(self.plugin.version).version
major = '.'.join(map(str, version_tuple[:2]))
return major
class PluginAdapterV3(PluginAdapterV2):
"""Plugin wrapper class for package version 3.0.0"""
node_roles_config_name = 'node_roles.yaml'
volumes_config_name = 'volumes.yaml'
deployment_tasks_config_name = 'deployment_tasks.yaml'
network_roles_config_name = 'network_roles.yaml'
def sync_metadata_to_db(self):
"""Sync metadata from all config yaml files to DB"""
super(PluginAdapterV3, self).sync_metadata_to_db()
db_config_metadata_mapping = {
'attributes_metadata': self.environment_config_name,
'roles_metadata': self.node_roles_config_name,
'volumes_metadata': self.volumes_config_name,
'network_roles_metadata': self.network_roles_config_name,
'deployment_tasks': self.deployment_tasks_config_name,
'tasks': self.task_config_name
}
self._update_plugin(db_config_metadata_mapping)
def _update_plugin(self, mapping):
data_to_update = {}
for attribute, config in six.iteritems(mapping):
config_file_path = os.path.join(self.plugin_path, config)
attribute_data = self._load_config(config_file_path)
# Plugin columns have constraints for nullable data, so
# we need to check it
if attribute_data:
if attribute == 'attributes_metadata':
attribute_data = attribute_data['attributes']
data_to_update[attribute] = attribute_data
Plugin.update(self.plugin, data_to_update)
class PluginAdapterV4(PluginAdapterV3):
"""Plugin wrapper class for package version 4.0.0"""
components = 'components.yaml'
def sync_metadata_to_db(self):
super(PluginAdapterV4, self).sync_metadata_to_db()
db_config_metadata_mapping = {
'components_metadata': self.components
}
self._update_plugin(db_config_metadata_mapping)
__version_mapping = {
'1.0.': PluginAdapterV1,
'2.0.': PluginAdapterV2,
'3.0.': Plugin | full_name | identifier_name |
adapters.py | 0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import copy
import glob
import os
from distutils.version import StrictVersion
from urlparse import urljoin
import six
import yaml
from nailgun.errors import errors
from nailgun.logger import logger
from nailgun.objects.plugin import ClusterPlugins
from nailgun.objects.plugin import Plugin
from nailgun.settings import settings
@six.add_metaclass(abc.ABCMeta)
class PluginAdapterBase(object):
"""Implements wrapper for plugin db model configuration files logic
1. Uploading plugin provided cluster attributes
2. Uploading tasks
3. Enabling/Disabling of plugin based on cluster attributes
4. Providing repositories/deployment scripts related info to clients
"""
environment_config_name = 'environment_config.yaml'
plugin_metadata = 'metadata.yaml'
task_config_name = 'tasks.yaml'
def __init__(self, plugin):
self.plugin = plugin
self.plugin_path = os.path.join(
settings.PLUGINS_PATH,
self.path_name)
self.tasks = []
@abc.abstractmethod
def path_name(self):
"""A name which is used to create path to plugin scripts and repos"""
def sync_metadata_to_db(self):
"""Sync metadata from config yaml files into DB"""
metadata_file_path = os.path.join(
self.plugin_path, self.plugin_metadata)
metadata = self._load_config(metadata_file_path) or {}
Plugin.update(self.plugin, metadata)
def _load_config(self, config):
if os.access(config, os.R_OK):
with open(config, "r") as conf:
try:
return yaml.safe_load(conf.read())
except yaml.YAMLError as exc:
logger.warning(exc)
raise errors.ParseError(
'Problem with loading YAML file {0}'.format(config))
else:
logger.warning("Config {0} is not readable.".format(config))
def _load_tasks(self, config):
data = self._load_config(config)
for item in data:
# backward compatibility for plugins added in version 6.0,
# and it is expected that task with role: [controller]
# will be executed on all controllers
if (StrictVersion(self.plugin.package_version)
== StrictVersion('1.0')
and isinstance(item['role'], list)
and 'controller' in item['role']):
item['role'].append('primary-controller')
return data
def set_cluster_tasks(self):
"""Load plugins provided tasks and set them to instance tasks variable
Provided tasks are loaded from tasks config file.
"""
task_yaml = os.path.join(
self.plugin_path, self.task_config_name)
if os.path.exists(task_yaml):
self.tasks = self._load_tasks(task_yaml)
def filter_tasks(self, tasks, stage):
filtered = []
for task in tasks:
if stage and stage == task.get('stage'):
filtered.append(task)
return filtered
@property
def plugin_release_versions(self):
if not self.plugin.releases:
return set()
return set([rel['version'] for rel in self.plugin.releases])
@property
def full_name(self):
return u'{0}-{1}'.format(self.plugin.name, self.plugin.version)
@property
def slaves_scripts_path(self):
return settings.PLUGINS_SLAVES_SCRIPTS_PATH.format(
plugin_name=self.path_name)
@property
def deployment_tasks(self):
deployment_tasks = []
for task in self.plugin.deployment_tasks:
if task.get('parameters'):
task['parameters'].setdefault('cwd', self.slaves_scripts_path)
deployment_tasks.append(task)
return deployment_tasks
@property
def volumes_metadata(self):
return self.plugin.volumes_metadata
@property
def components_metadata(self):
return self.plugin.components_metadata
@property
def releases(self):
return self.plugin.releases
@property
def normalized_roles_metadata(self):
"""Block plugin disabling if nodes with plugin-provided roles exist"""
result = {}
for role, meta in six.iteritems(self.plugin.roles_metadata):
condition = "settings:{0}.metadata.enabled == false".format(
self.plugin.name)
meta = copy.copy(meta)
meta['restrictions'] = [condition] + meta.get('restrictions', [])
result[role] = meta
return result
def get_release_info(self, release):
"""Get plugin release information which corresponds to given release"""
rel_os = release.operating_system.lower()
version = release.version
release_info = filter(
lambda r: (
r['os'] == rel_os and
ClusterPlugins.is_release_version_compatible(version,
r['version'])),
self.plugin.releases)
return release_info[0]
def repo_files(self, cluster):
release_info = self.get_release_info(cluster.release)
repo_path = os.path.join(
settings.PLUGINS_PATH,
self.path_name,
release_info['repository_path'],
'*')
return glob.glob(repo_path)
def repo_url(self, cluster):
release_info = self.get_release_info(cluster.release)
repo_base = settings.PLUGINS_REPO_URL.format(
master_ip=settings.MASTER_IP,
plugin_name=self.path_name)
return urljoin(repo_base, release_info['repository_path'])
def master_scripts_path(self, cluster):
release_info = self.get_release_info(cluster.release)
# NOTE(eli): we cannot user urljoin here, because it
# works wrong, if protocol is rsync
base_url = settings.PLUGINS_SLAVES_RSYNC.format(
master_ip=settings.MASTER_IP,
plugin_name=self.path_name)
return '{0}{1}'.format(
base_url,
release_info['deployment_scripts_path'])
class PluginAdapterV1(PluginAdapterBase):
"""Plugins attributes class for package version 1.0.0"""
@property
def path_name(self):
"""Returns a name and full version
e.g. if there is a plugin with name "plugin_name" and version
is "1.0.0", the method returns "plugin_name-1.0.0"
"""
return self.full_name
class PluginAdapterV2(PluginAdapterBase):
"""Plugins attributes class for package version 2.0.0"""
@property
def path_name(self):
"""Returns a name and major version of the plugin
e.g. if there is a plugin with name "plugin_name" and version
is "1.0.0", the method returns "plugin_name-1.0".
It's different from previous version because in previous
version we did not have plugin updates, in 2.0.0 version
we should expect different plugin path.
See blueprint: https://blueprints.launchpad.net/fuel/+spec
/plugins-security-fixes-delivery
"""
return u'{0}-{1}'.format(self.plugin.name, self._major_version)
@property
def _major_version(self):
"""Returns major version of plugin's version
e.g. if plugin has 1.2.3 version, the method returns 1.2
"""
version_tuple = StrictVersion(self.plugin.version).version
major = '.'.join(map(str, version_tuple[:2]))
return major
class PluginAdapterV3(PluginAdapterV2):
"""Plugin wrapper class for package version 3.0.0"""
node_roles_config_name = 'node_roles.yaml'
volumes_config_name = 'volumes.yaml'
deployment_tasks_config_name = 'deployment_tasks.yaml'
network_roles_config_name = 'network_roles.yaml'
def sync_metadata_to_db(self):
"""Sync metadata from all config yaml files to DB"""
super(PluginAdapterV3, self).sync_metadata_to_db()
db_config_metadata_mapping = {
'attributes_metadata': self.environment_config_name,
'roles_metadata': self.node_roles_config_name,
'volumes_metadata': self.volumes_config_name,
'network_roles_metadata': self.network_roles_config_name,
'deployment_tasks': self.deployment_tasks_config_name,
'tasks': self.task_config_name
}
self._update_plugin(db_config_metadata_mapping)
def _update_plugin(self, mapping):
data_to_update = {}
for attribute, config in six.iteritems(mapping):
config_file_path = os.path.join(self.plugin_path, config)
attribute_data = self._load_config(config_file_path)
# Plugin columns have constraints for nullable data, so
# we need to check it
if attribute_data:
if attribute == 'attributes_metadata':
attribute_data = attribute_data['attributes'] | class PluginAdapterV4(PluginAdapterV3):
"""Plugin wrapper class for package version 4.0.0"""
components = 'components.yaml'
def sync_metadata_to_db(self):
super(PluginAdapterV4, self).sync_metadata_to_db()
db_config_metadata_mapping = {
'components_metadata': self.components
}
self._update_plugin(db_config_metadata_mapping)
__version_mapping = {
'1.0.': PluginAdapterV1,
'2.0.': PluginAdapterV2,
'3.0.': Plugin | data_to_update[attribute] = attribute_data
Plugin.update(self.plugin, data_to_update)
| random_line_split |
adapters.py |
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import copy
import glob
import os
from distutils.version import StrictVersion
from urlparse import urljoin
import six
import yaml
from nailgun.errors import errors
from nailgun.logger import logger
from nailgun.objects.plugin import ClusterPlugins
from nailgun.objects.plugin import Plugin
from nailgun.settings import settings
@six.add_metaclass(abc.ABCMeta)
class PluginAdapterBase(object):
"""Implements wrapper for plugin db model configuration files logic
1. Uploading plugin provided cluster attributes
2. Uploading tasks
3. Enabling/Disabling of plugin based on cluster attributes
4. Providing repositories/deployment scripts related info to clients
"""
environment_config_name = 'environment_config.yaml'
plugin_metadata = 'metadata.yaml'
task_config_name = 'tasks.yaml'
def __init__(self, plugin):
self.plugin = plugin
self.plugin_path = os.path.join(
settings.PLUGINS_PATH,
self.path_name)
self.tasks = []
@abc.abstractmethod
def path_name(self):
"""A name which is used to create path to plugin scripts and repos"""
def sync_metadata_to_db(self):
"""Sync metadata from config yaml files into DB"""
metadata_file_path = os.path.join(
self.plugin_path, self.plugin_metadata)
metadata = self._load_config(metadata_file_path) or {}
Plugin.update(self.plugin, metadata)
def _load_config(self, config):
if os.access(config, os.R_OK):
|
else:
logger.warning("Config {0} is not readable.".format(config))
def _load_tasks(self, config):
data = self._load_config(config)
for item in data:
# backward compatibility for plugins added in version 6.0,
# and it is expected that task with role: [controller]
# will be executed on all controllers
if (StrictVersion(self.plugin.package_version)
== StrictVersion('1.0')
and isinstance(item['role'], list)
and 'controller' in item['role']):
item['role'].append('primary-controller')
return data
def set_cluster_tasks(self):
"""Load plugins provided tasks and set them to instance tasks variable
Provided tasks are loaded from tasks config file.
"""
task_yaml = os.path.join(
self.plugin_path, self.task_config_name)
if os.path.exists(task_yaml):
self.tasks = self._load_tasks(task_yaml)
def filter_tasks(self, tasks, stage):
filtered = []
for task in tasks:
if stage and stage == task.get('stage'):
filtered.append(task)
return filtered
@property
def plugin_release_versions(self):
if not self.plugin.releases:
return set()
return set([rel['version'] for rel in self.plugin.releases])
@property
def full_name(self):
return u'{0}-{1}'.format(self.plugin.name, self.plugin.version)
@property
def slaves_scripts_path(self):
return settings.PLUGINS_SLAVES_SCRIPTS_PATH.format(
plugin_name=self.path_name)
@property
def deployment_tasks(self):
deployment_tasks = []
for task in self.plugin.deployment_tasks:
if task.get('parameters'):
task['parameters'].setdefault('cwd', self.slaves_scripts_path)
deployment_tasks.append(task)
return deployment_tasks
@property
def volumes_metadata(self):
return self.plugin.volumes_metadata
@property
def components_metadata(self):
return self.plugin.components_metadata
@property
def releases(self):
return self.plugin.releases
@property
def normalized_roles_metadata(self):
"""Block plugin disabling if nodes with plugin-provided roles exist"""
result = {}
for role, meta in six.iteritems(self.plugin.roles_metadata):
condition = "settings:{0}.metadata.enabled == false".format(
self.plugin.name)
meta = copy.copy(meta)
meta['restrictions'] = [condition] + meta.get('restrictions', [])
result[role] = meta
return result
def get_release_info(self, release):
"""Get plugin release information which corresponds to given release"""
rel_os = release.operating_system.lower()
version = release.version
release_info = filter(
lambda r: (
r['os'] == rel_os and
ClusterPlugins.is_release_version_compatible(version,
r['version'])),
self.plugin.releases)
return release_info[0]
def repo_files(self, cluster):
release_info = self.get_release_info(cluster.release)
repo_path = os.path.join(
settings.PLUGINS_PATH,
self.path_name,
release_info['repository_path'],
'*')
return glob.glob(repo_path)
def repo_url(self, cluster):
release_info = self.get_release_info(cluster.release)
repo_base = settings.PLUGINS_REPO_URL.format(
master_ip=settings.MASTER_IP,
plugin_name=self.path_name)
return urljoin(repo_base, release_info['repository_path'])
def master_scripts_path(self, cluster):
release_info = self.get_release_info(cluster.release)
# NOTE(eli): we cannot user urljoin here, because it
# works wrong, if protocol is rsync
base_url = settings.PLUGINS_SLAVES_RSYNC.format(
master_ip=settings.MASTER_IP,
plugin_name=self.path_name)
return '{0}{1}'.format(
base_url,
release_info['deployment_scripts_path'])
class PluginAdapterV1(PluginAdapterBase):
"""Plugins attributes class for package version 1.0.0"""
@property
def path_name(self):
"""Returns a name and full version
e.g. if there is a plugin with name "plugin_name" and version
is "1.0.0", the method returns "plugin_name-1.0.0"
"""
return self.full_name
class PluginAdapterV2(PluginAdapterBase):
"""Plugins attributes class for package version 2.0.0"""
@property
def path_name(self):
"""Returns a name and major version of the plugin
e.g. if there is a plugin with name "plugin_name" and version
is "1.0.0", the method returns "plugin_name-1.0".
It's different from previous version because in previous
version we did not have plugin updates, in 2.0.0 version
we should expect different plugin path.
See blueprint: https://blueprints.launchpad.net/fuel/+spec
/plugins-security-fixes-delivery
"""
return u'{0}-{1}'.format(self.plugin.name, self._major_version)
@property
def _major_version(self):
"""Returns major version of plugin's version
e.g. if plugin has 1.2.3 version, the method returns 1.2
"""
version_tuple = StrictVersion(self.plugin.version).version
major = '.'.join(map(str, version_tuple[:2]))
return major
class PluginAdapterV3(PluginAdapterV2):
"""Plugin wrapper class for package version 3.0.0"""
node_roles_config_name = 'node_roles.yaml'
volumes_config_name = 'volumes.yaml'
deployment_tasks_config_name = 'deployment_tasks.yaml'
network_roles_config_name = 'network_roles.yaml'
def sync_metadata_to_db(self):
"""Sync metadata from all config yaml files to DB"""
super(PluginAdapterV3, self).sync_metadata_to_db()
db_config_metadata_mapping = {
'attributes_metadata': self.environment_config_name,
'roles_metadata': self.node_roles_config_name,
'volumes_metadata': self.volumes_config_name,
'network_roles_metadata': self.network_roles_config_name,
'deployment_tasks': self.deployment_tasks_config_name,
'tasks': self.task_config_name
}
self._update_plugin(db_config_metadata_mapping)
def _update_plugin(self, mapping):
data_to_update = {}
for attribute, config in six.iteritems(mapping):
config_file_path = os.path.join(self.plugin_path, config)
attribute_data = self._load_config(config_file_path)
# Plugin columns have constraints for nullable data, so
# we need to check it
if attribute_data:
if attribute == 'attributes_metadata':
attribute_data = attribute_data['attributes']
data_to_update[attribute] = attribute_data
Plugin.update(self.plugin, data_to_update)
class PluginAdapterV4(PluginAdapterV3):
"""Plugin wrapper class for package version 4.0.0"""
components = 'components.yaml'
def sync_metadata_to_db(self):
super(PluginAdapterV4, self).sync_metadata_to_db()
db_config_metadata_mapping = {
'components_metadata': self.components
}
self._update_plugin(db_config_metadata_mapping)
__version_mapping = {
'1.0.': PluginAdapterV1,
'2.0.': PluginAdapterV2,
'3.0.': Plugin | with open(config, "r") as conf:
try:
return yaml.safe_load(conf.read())
except yaml.YAMLError as exc:
logger.warning(exc)
raise errors.ParseError(
'Problem with loading YAML file {0}'.format(config)) | conditional_block |
adapters.py | 0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import copy
import glob
import os
from distutils.version import StrictVersion
from urlparse import urljoin
import six
import yaml
from nailgun.errors import errors
from nailgun.logger import logger
from nailgun.objects.plugin import ClusterPlugins
from nailgun.objects.plugin import Plugin
from nailgun.settings import settings
@six.add_metaclass(abc.ABCMeta)
class PluginAdapterBase(object):
"""Implements wrapper for plugin db model configuration files logic
1. Uploading plugin provided cluster attributes
2. Uploading tasks
3. Enabling/Disabling of plugin based on cluster attributes
4. Providing repositories/deployment scripts related info to clients
"""
environment_config_name = 'environment_config.yaml'
plugin_metadata = 'metadata.yaml'
task_config_name = 'tasks.yaml'
def __init__(self, plugin):
self.plugin = plugin
self.plugin_path = os.path.join(
settings.PLUGINS_PATH,
self.path_name)
self.tasks = []
@abc.abstractmethod
def path_name(self):
"""A name which is used to create path to plugin scripts and repos"""
def sync_metadata_to_db(self):
"""Sync metadata from config yaml files into DB"""
metadata_file_path = os.path.join(
self.plugin_path, self.plugin_metadata)
metadata = self._load_config(metadata_file_path) or {}
Plugin.update(self.plugin, metadata)
def _load_config(self, config):
if os.access(config, os.R_OK):
with open(config, "r") as conf:
try:
return yaml.safe_load(conf.read())
except yaml.YAMLError as exc:
logger.warning(exc)
raise errors.ParseError(
'Problem with loading YAML file {0}'.format(config))
else:
logger.warning("Config {0} is not readable.".format(config))
def _load_tasks(self, config):
data = self._load_config(config)
for item in data:
# backward compatibility for plugins added in version 6.0,
# and it is expected that task with role: [controller]
# will be executed on all controllers
if (StrictVersion(self.plugin.package_version)
== StrictVersion('1.0')
and isinstance(item['role'], list)
and 'controller' in item['role']):
item['role'].append('primary-controller')
return data
def set_cluster_tasks(self):
"""Load plugins provided tasks and set them to instance tasks variable
Provided tasks are loaded from tasks config file.
"""
task_yaml = os.path.join(
self.plugin_path, self.task_config_name)
if os.path.exists(task_yaml):
self.tasks = self._load_tasks(task_yaml)
def filter_tasks(self, tasks, stage):
filtered = []
for task in tasks:
if stage and stage == task.get('stage'):
filtered.append(task)
return filtered
@property
def plugin_release_versions(self):
if not self.plugin.releases:
return set()
return set([rel['version'] for rel in self.plugin.releases])
@property
def full_name(self):
return u'{0}-{1}'.format(self.plugin.name, self.plugin.version)
@property
def slaves_scripts_path(self):
return settings.PLUGINS_SLAVES_SCRIPTS_PATH.format(
plugin_name=self.path_name)
@property
def deployment_tasks(self):
deployment_tasks = []
for task in self.plugin.deployment_tasks:
if task.get('parameters'):
task['parameters'].setdefault('cwd', self.slaves_scripts_path)
deployment_tasks.append(task)
return deployment_tasks
@property
def volumes_metadata(self):
return self.plugin.volumes_metadata
@property
def components_metadata(self):
return self.plugin.components_metadata
@property
def releases(self):
return self.plugin.releases
@property
def normalized_roles_metadata(self):
"""Block plugin disabling if nodes with plugin-provided roles exist"""
result = {}
for role, meta in six.iteritems(self.plugin.roles_metadata):
condition = "settings:{0}.metadata.enabled == false".format(
self.plugin.name)
meta = copy.copy(meta)
meta['restrictions'] = [condition] + meta.get('restrictions', [])
result[role] = meta
return result
def get_release_info(self, release):
"""Get plugin release information which corresponds to given release"""
rel_os = release.operating_system.lower()
version = release.version
release_info = filter(
lambda r: (
r['os'] == rel_os and
ClusterPlugins.is_release_version_compatible(version,
r['version'])),
self.plugin.releases)
return release_info[0]
def repo_files(self, cluster):
release_info = self.get_release_info(cluster.release)
repo_path = os.path.join(
settings.PLUGINS_PATH,
self.path_name,
release_info['repository_path'],
'*')
return glob.glob(repo_path)
def repo_url(self, cluster):
release_info = self.get_release_info(cluster.release)
repo_base = settings.PLUGINS_REPO_URL.format(
master_ip=settings.MASTER_IP,
plugin_name=self.path_name)
return urljoin(repo_base, release_info['repository_path'])
def master_scripts_path(self, cluster):
release_info = self.get_release_info(cluster.release)
# NOTE(eli): we cannot user urljoin here, because it
# works wrong, if protocol is rsync
base_url = settings.PLUGINS_SLAVES_RSYNC.format(
master_ip=settings.MASTER_IP,
plugin_name=self.path_name)
return '{0}{1}'.format(
base_url,
release_info['deployment_scripts_path'])
class PluginAdapterV1(PluginAdapterBase):
"""Plugins attributes class for package version 1.0.0"""
@property
def path_name(self):
"""Returns a name and full version
e.g. if there is a plugin with name "plugin_name" and version
is "1.0.0", the method returns "plugin_name-1.0.0"
"""
return self.full_name
class PluginAdapterV2(PluginAdapterBase):
"""Plugins attributes class for package version 2.0.0"""
@property
def path_name(self):
"""Returns a name and major version of the plugin
e.g. if there is a plugin with name "plugin_name" and version
is "1.0.0", the method returns "plugin_name-1.0".
It's different from previous version because in previous
version we did not have plugin updates, in 2.0.0 version
we should expect different plugin path.
See blueprint: https://blueprints.launchpad.net/fuel/+spec
/plugins-security-fixes-delivery
"""
return u'{0}-{1}'.format(self.plugin.name, self._major_version)
@property
def _major_version(self):
"""Returns major version of plugin's version
e.g. if plugin has 1.2.3 version, the method returns 1.2
"""
version_tuple = StrictVersion(self.plugin.version).version
major = '.'.join(map(str, version_tuple[:2]))
return major
class PluginAdapterV3(PluginAdapterV2):
| self._update_plugin(db_config_metadata_mapping)
def _update_plugin(self, mapping):
data_to_update = {}
for attribute, config in six.iteritems(mapping):
config_file_path = os.path.join(self.plugin_path, config)
attribute_data = self._load_config(config_file_path)
# Plugin columns have constraints for nullable data, so
# we need to check it
if attribute_data:
if attribute == 'attributes_metadata':
attribute_data = attribute_data['attributes']
data_to_update[attribute] = attribute_data
Plugin.update(self.plugin, data_to_update)
class PluginAdapterV4(PluginAdapterV3):
"""Plugin wrapper class for package version 4.0.0"""
components = 'components.yaml'
def sync_metadata_to_db(self):
super(PluginAdapterV4, self).sync_metadata_to_db()
db_config_metadata_mapping = {
'components_metadata': self.components
}
self._update_plugin(db_config_metadata_mapping)
__version_mapping = {
'1.0.': PluginAdapterV1,
'2.0.': PluginAdapterV2,
'3.0.': Plugin | """Plugin wrapper class for package version 3.0.0"""
node_roles_config_name = 'node_roles.yaml'
volumes_config_name = 'volumes.yaml'
deployment_tasks_config_name = 'deployment_tasks.yaml'
network_roles_config_name = 'network_roles.yaml'
def sync_metadata_to_db(self):
"""Sync metadata from all config yaml files to DB"""
super(PluginAdapterV3, self).sync_metadata_to_db()
db_config_metadata_mapping = {
'attributes_metadata': self.environment_config_name,
'roles_metadata': self.node_roles_config_name,
'volumes_metadata': self.volumes_config_name,
'network_roles_metadata': self.network_roles_config_name,
'deployment_tasks': self.deployment_tasks_config_name,
'tasks': self.task_config_name
}
| identifier_body |
food.py | , which will be used to replace the pronouns "He" and "She".
lastMaleName = ''
lastFemaleName = ''
index = 0
newText = ''
for i in range(wordCount):
word = classified_text[i][0]
partOfSpeech = classified_text[i][1]
if word == 'He' or word == 'he':
if lastMaleName != '':
newText = newText + lastMaleName
else:
newText = newText + word
elif word == 'She' or word == 'she':
if lastFemaleName != '':
newText = newText + lastFemaleName
else:
newText = newText + word
elif word == 'I':
newText = newText + username
else:
newText = newText + word
if partOfSpeech == 'PERSON':
if "female" in det.get_gender(word):
lastFemaleName = word
elif "male" in det.get_gender(word):
lastMaleName = word
index = index + len(word)
if index < len(text) and text[index] == ' ':
index = index + 1
newText += ' '
return newText
def | (text):
'''
This method splits the text into substrings, where each begins with a name
and continues until reaching the next name. It will return the list of substrings
and a list that contains the name in each substring.
'''
# Set your own path for the classification model and Stanford tagged jar file of StanfordNERTagger.
st = StanfordNERTagger(
'/home/pi/AIY-voice-kit-python/src/examples/voice/app/stanford-ner-2018-10-16/classifiers/english.all.3class.distsim.crf.ser.gz',
'/home/pi/AIY-voice-kit-python/src/examples/voice/app/stanford-ner-2018-10-16/stanford-ner.jar',
encoding='utf-8')
tokenized_text = word_tokenize(text)
classified_text = st.tag(tokenized_text)
wordCount = len(tokenized_text)
# charIndexes stores the starting indexes for each name from the text.
charIndexes = []
charCounter = 0
newCharCounter = 0
substrings = []
names = []
for i in range(wordCount):
word = classified_text[i][0]
partOfSpeech = classified_text[i][1]
if partOfSpeech == 'PERSON':
newCharCounter = text.find(word, charCounter)
charIndexes.append(newCharCounter)
charCounter = newCharCounter + 1
names.append(classified_text[i][0])
for i in range(len(charIndexes)):
currIndex = charIndexes[i]
if i == len(charIndexes) - 1:
substrings.append(text[currIndex: ])
else:
nextIndex = charIndexes[i + 1]
substrings.append(text[currIndex: nextIndex])
return substrings, names
def get_diet(substrings, names):
'''
This method uses the substrings to determine the foods, nutrients, and allergens consumed by each person.
It will return a dictionary containing the dietary information for each person.
'''
'''
"id" and "key" are used to make requests to the Edamam Food API,
and they are obtained by registering for an account from Edamam.
'''
id = '6bb24f34'
key = 'bcd38e86ec9f271288974f431e0c94e6'
diet = {}
for name in names:
if name not in diet:
diet[name] = {}
diet[name]['foods'] = []
diet[name]['quantities'] = []
diet[name]['allergens'] = []
diet[name]['nutrients'] = {"Energy": [], "Fat": [], "Carbs": [], "Fiber": [], "Sugars": [], "Protein": [], "Sodium": [], "Calcium": [],
"Magnesium": [], "Potassium": [], "Iron": [], "Vitamin C": [], "Vitamin E": [], "Vitamin K": []}
# For each substring, find the person's name and update the person's dietary information using the foods in the substring.
for i in range(len(substrings)):
substring = substrings[i]
name = names[i]
# Instantiate foodProcessor.
processor = foodProcessor.foodProcessor(key, id)
'''
Get list of foods, foodURIs, measureURIs, and quantities for each food.
foodURIs and measureURIs are used to get the nutrients for each food.
'''
foods, foodIds, measureURIs, quantities = processor.get_food_list(substring)
# Get allergens and nutrients from all foods.
details = processor.get_food_details(foodIds, measureURIs)
allergens = []
nutrients = {"Energy": [], "Fat": [], "Carbs": [], "Fiber": [], "Sugars": [], "Protein": [], "Sodium": [],
"Calcium": [], "Magnesium": [], "Potassium": [], "Iron": [], "Vitamin C": [], "Vitamin E": [],
"Vitamin K": []}
# Add the foods and quantities to the person's diet.
diet[name]['foods'].extend(foods)
diet[name]['quantities'].extend(quantities)
# For each food, add the allergens and nutrients to the person's diet.
for i in range(len(details)):
food = details[i]
diet[name]['allergens'].append(format_allergens(food['allergens']))
for nutrient in nutrients:
diet[name]['nutrients'][nutrient].append(food["nutrients"][nutrient])
return diet
def format_allergens(allergens):
'''
This method concatenates the list of allergens in each food to a string.
'''
if len(allergens) == 1:
return allergens[0]
algs = ''
for i in range(len(allergens)):
for j in range(len(allergens[i])):
if j == len(allergens[i]) - 1:
algs += allergens[i][j]
if i != len(allergens) - 1:
algs += ', '
else:
algs += allergens[i][j]
return algs
def log_diet(diet, rawText):
'''
This method uses the diet dictionary to log the dietary information for each person in the corresponding sheet.
It will also update everyone's summary log sheet.
'''
# Instantiate foodLog
flog = foodLog.foodLog()
cupertino = timezone('US/Pacific')
now = datetime.now(cupertino)
date = now.strftime("%B %d, %Y")
time = now.strftime("%I:%M %p")
credentials = flog.sheet_oauth()
for name in diet:
# ip contains the values that will be appended onto the next row of the Google Spreadsheet.
ip = []
ip.append(date)
ip.append(time)
ip.append(rawText)
'''
If the person consumed at least one food item, then construct a new row
containing dietary information to be logged in the person's sheet.
'''
if len(diet[name]['foods']) > 0:
ip.append(diet[name]['foods'][0])
ip.append(diet[name]['quantities'][0])
if len(diet[name]['allergens'][0]) == 0:
ip.append("NONE")
else:
ip.append(diet[name]['allergens'][0])
for nutrient in diet[name]['nutrients']:
ip.append(diet[name]['nutrients'][nutrient][0])
payload = {"values": [ip]}
flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, payload)
for i in range(1, len(diet[name]['foods'])):
ip = ["", "", "", diet[name]['foods'][i], diet[name]['quantities'][i]]
if len(diet[name]['allergens'][i]) == 0:
ip.append("NONE")
else:
ip.append(diet[name]['allergens'][i])
for nutrient in diet[name]['nutrients']:
ip.append(diet[name]['nutrients'][nutrient][i])
payload = {"values": [ip]}
flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, payload)
# Construct a new row containing nutrient totals to be logged in the person's sheet.
ip = ["", "", "", "", "", ""]
for nutrient in diet[name]['nutrients']:
total = 0
for quantity in diet[name]['nutrients'][nutrient]:
total += quantity
ip.append("Total: " + str(round(total, 1)))
payload = {"values": [ip]}
flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', | get_substrings | identifier_name |
food.py | , which will be used to replace the pronouns "He" and "She".
lastMaleName = ''
lastFemaleName = ''
index = 0
newText = ''
for i in range(wordCount):
word = classified_text[i][0]
partOfSpeech = classified_text[i][1]
if word == 'He' or word == 'he':
if lastMaleName != '':
newText = newText + lastMaleName
else:
newText = newText + word
elif word == 'She' or word == 'she':
if lastFemaleName != '':
newText = newText + lastFemaleName
else:
newText = newText + word
elif word == 'I':
newText = newText + username
else:
newText = newText + word
if partOfSpeech == 'PERSON':
|
index = index + len(word)
if index < len(text) and text[index] == ' ':
index = index + 1
newText += ' '
return newText
def get_substrings(text):
'''
This method splits the text into substrings, where each begins with a name
and continues until reaching the next name. It will return the list of substrings
and a list that contains the name in each substring.
'''
# Set your own path for the classification model and Stanford tagged jar file of StanfordNERTagger.
st = StanfordNERTagger(
'/home/pi/AIY-voice-kit-python/src/examples/voice/app/stanford-ner-2018-10-16/classifiers/english.all.3class.distsim.crf.ser.gz',
'/home/pi/AIY-voice-kit-python/src/examples/voice/app/stanford-ner-2018-10-16/stanford-ner.jar',
encoding='utf-8')
tokenized_text = word_tokenize(text)
classified_text = st.tag(tokenized_text)
wordCount = len(tokenized_text)
# charIndexes stores the starting indexes for each name from the text.
charIndexes = []
charCounter = 0
newCharCounter = 0
substrings = []
names = []
for i in range(wordCount):
word = classified_text[i][0]
partOfSpeech = classified_text[i][1]
if partOfSpeech == 'PERSON':
newCharCounter = text.find(word, charCounter)
charIndexes.append(newCharCounter)
charCounter = newCharCounter + 1
names.append(classified_text[i][0])
for i in range(len(charIndexes)):
currIndex = charIndexes[i]
if i == len(charIndexes) - 1:
substrings.append(text[currIndex: ])
else:
nextIndex = charIndexes[i + 1]
substrings.append(text[currIndex: nextIndex])
return substrings, names
def get_diet(substrings, names):
'''
This method uses the substrings to determine the foods, nutrients, and allergens consumed by each person.
It will return a dictionary containing the dietary information for each person.
'''
'''
"id" and "key" are used to make requests to the Edamam Food API,
and they are obtained by registering for an account from Edamam.
'''
id = '6bb24f34'
key = 'bcd38e86ec9f271288974f431e0c94e6'
diet = {}
for name in names:
if name not in diet:
diet[name] = {}
diet[name]['foods'] = []
diet[name]['quantities'] = []
diet[name]['allergens'] = []
diet[name]['nutrients'] = {"Energy": [], "Fat": [], "Carbs": [], "Fiber": [], "Sugars": [], "Protein": [], "Sodium": [], "Calcium": [],
"Magnesium": [], "Potassium": [], "Iron": [], "Vitamin C": [], "Vitamin E": [], "Vitamin K": []}
# For each substring, find the person's name and update the person's dietary information using the foods in the substring.
for i in range(len(substrings)):
substring = substrings[i]
name = names[i]
# Instantiate foodProcessor.
processor = foodProcessor.foodProcessor(key, id)
'''
Get list of foods, foodURIs, measureURIs, and quantities for each food.
foodURIs and measureURIs are used to get the nutrients for each food.
'''
foods, foodIds, measureURIs, quantities = processor.get_food_list(substring)
# Get allergens and nutrients from all foods.
details = processor.get_food_details(foodIds, measureURIs)
allergens = []
nutrients = {"Energy": [], "Fat": [], "Carbs": [], "Fiber": [], "Sugars": [], "Protein": [], "Sodium": [],
"Calcium": [], "Magnesium": [], "Potassium": [], "Iron": [], "Vitamin C": [], "Vitamin E": [],
"Vitamin K": []}
# Add the foods and quantities to the person's diet.
diet[name]['foods'].extend(foods)
diet[name]['quantities'].extend(quantities)
# For each food, add the allergens and nutrients to the person's diet.
for i in range(len(details)):
food = details[i]
diet[name]['allergens'].append(format_allergens(food['allergens']))
for nutrient in nutrients:
diet[name]['nutrients'][nutrient].append(food["nutrients"][nutrient])
return diet
def format_allergens(allergens):
'''
This method concatenates the list of allergens in each food to a string.
'''
if len(allergens) == 1:
return allergens[0]
algs = ''
for i in range(len(allergens)):
for j in range(len(allergens[i])):
if j == len(allergens[i]) - 1:
algs += allergens[i][j]
if i != len(allergens) - 1:
algs += ', '
else:
algs += allergens[i][j]
return algs
def log_diet(diet, rawText):
'''
This method uses the diet dictionary to log the dietary information for each person in the corresponding sheet.
It will also update everyone's summary log sheet.
'''
# Instantiate foodLog
flog = foodLog.foodLog()
cupertino = timezone('US/Pacific')
now = datetime.now(cupertino)
date = now.strftime("%B %d, %Y")
time = now.strftime("%I:%M %p")
credentials = flog.sheet_oauth()
for name in diet:
# ip contains the values that will be appended onto the next row of the Google Spreadsheet.
ip = []
ip.append(date)
ip.append(time)
ip.append(rawText)
'''
If the person consumed at least one food item, then construct a new row
containing dietary information to be logged in the person's sheet.
'''
if len(diet[name]['foods']) > 0:
ip.append(diet[name]['foods'][0])
ip.append(diet[name]['quantities'][0])
if len(diet[name]['allergens'][0]) == 0:
ip.append("NONE")
else:
ip.append(diet[name]['allergens'][0])
for nutrient in diet[name]['nutrients']:
ip.append(diet[name]['nutrients'][nutrient][0])
payload = {"values": [ip]}
flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, payload)
for i in range(1, len(diet[name]['foods'])):
ip = ["", "", "", diet[name]['foods'][i], diet[name]['quantities'][i]]
if len(diet[name]['allergens'][i]) == 0:
ip.append("NONE")
else:
ip.append(diet[name]['allergens'][i])
for nutrient in diet[name]['nutrients']:
ip.append(diet[name]['nutrients'][nutrient][i])
payload = {"values": [ip]}
flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, payload)
# Construct a new row containing nutrient totals to be logged in the person's sheet.
ip = ["", "", "", "", "", ""]
for nutrient in diet[name]['nutrients']:
total = 0
for quantity in diet[name]['nutrients'][nutrient]:
total += quantity
ip.append("Total: " + str(round(total, 1)))
payload = {"values": [ip]}
flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name | if "female" in det.get_gender(word):
lastFemaleName = word
elif "male" in det.get_gender(word):
lastMaleName = word | conditional_block |
food.py | , which will be used to replace the pronouns "He" and "She".
lastMaleName = ''
lastFemaleName = ''
index = 0
newText = ''
for i in range(wordCount):
word = classified_text[i][0]
partOfSpeech = classified_text[i][1]
if word == 'He' or word == 'he':
if lastMaleName != '':
newText = newText + lastMaleName
else:
newText = newText + word
elif word == 'She' or word == 'she':
if lastFemaleName != '':
newText = newText + lastFemaleName
else:
newText = newText + word
elif word == 'I':
newText = newText + username
else:
newText = newText + word
if partOfSpeech == 'PERSON':
if "female" in det.get_gender(word):
lastFemaleName = word
elif "male" in det.get_gender(word):
lastMaleName = word
index = index + len(word)
if index < len(text) and text[index] == ' ':
index = index + 1
newText += ' '
return newText
def get_substrings(text):
'''
This method splits the text into substrings, where each begins with a name
and continues until reaching the next name. It will return the list of substrings
and a list that contains the name in each substring.
'''
# Set your own path for the classification model and Stanford tagged jar file of StanfordNERTagger.
st = StanfordNERTagger(
'/home/pi/AIY-voice-kit-python/src/examples/voice/app/stanford-ner-2018-10-16/classifiers/english.all.3class.distsim.crf.ser.gz',
'/home/pi/AIY-voice-kit-python/src/examples/voice/app/stanford-ner-2018-10-16/stanford-ner.jar',
encoding='utf-8')
tokenized_text = word_tokenize(text)
classified_text = st.tag(tokenized_text)
wordCount = len(tokenized_text)
# charIndexes stores the starting indexes for each name from the text.
charIndexes = []
charCounter = 0
newCharCounter = 0
substrings = []
names = []
for i in range(wordCount):
word = classified_text[i][0]
partOfSpeech = classified_text[i][1]
if partOfSpeech == 'PERSON':
newCharCounter = text.find(word, charCounter)
charIndexes.append(newCharCounter)
charCounter = newCharCounter + 1
names.append(classified_text[i][0])
for i in range(len(charIndexes)):
currIndex = charIndexes[i]
if i == len(charIndexes) - 1:
substrings.append(text[currIndex: ])
else:
nextIndex = charIndexes[i + 1]
substrings.append(text[currIndex: nextIndex])
return substrings, names
def get_diet(substrings, names):
'''
This method uses the substrings to determine the foods, nutrients, and allergens consumed by each person.
It will return a dictionary containing the dietary information for each person.
'''
'''
"id" and "key" are used to make requests to the Edamam Food API,
and they are obtained by registering for an account from Edamam.
'''
id = '6bb24f34'
key = 'bcd38e86ec9f271288974f431e0c94e6'
diet = {}
for name in names:
if name not in diet:
diet[name] = {}
diet[name]['foods'] = []
diet[name]['quantities'] = []
diet[name]['allergens'] = []
diet[name]['nutrients'] = {"Energy": [], "Fat": [], "Carbs": [], "Fiber": [], "Sugars": [], "Protein": [], "Sodium": [], "Calcium": [],
"Magnesium": [], "Potassium": [], "Iron": [], "Vitamin C": [], "Vitamin E": [], "Vitamin K": []}
# For each substring, find the person's name and update the person's dietary information using the foods in the substring.
for i in range(len(substrings)):
substring = substrings[i]
name = names[i]
# Instantiate foodProcessor.
processor = foodProcessor.foodProcessor(key, id)
'''
Get list of foods, foodURIs, measureURIs, and quantities for each food.
foodURIs and measureURIs are used to get the nutrients for each food.
'''
foods, foodIds, measureURIs, quantities = processor.get_food_list(substring)
# Get allergens and nutrients from all foods.
details = processor.get_food_details(foodIds, measureURIs)
allergens = []
nutrients = {"Energy": [], "Fat": [], "Carbs": [], "Fiber": [], "Sugars": [], "Protein": [], "Sodium": [],
"Calcium": [], "Magnesium": [], "Potassium": [], "Iron": [], "Vitamin C": [], "Vitamin E": [],
"Vitamin K": []}
# Add the foods and quantities to the person's diet.
diet[name]['foods'].extend(foods)
diet[name]['quantities'].extend(quantities)
# For each food, add the allergens and nutrients to the person's diet.
for i in range(len(details)):
food = details[i]
diet[name]['allergens'].append(format_allergens(food['allergens']))
for nutrient in nutrients:
diet[name]['nutrients'][nutrient].append(food["nutrients"][nutrient])
return diet
def format_allergens(allergens):
'''
This method concatenates the list of allergens in each food to a string.
'''
if len(allergens) == 1:
return allergens[0]
algs = ''
for i in range(len(allergens)):
for j in range(len(allergens[i])):
if j == len(allergens[i]) - 1:
algs += allergens[i][j]
if i != len(allergens) - 1:
algs += ', '
else:
algs += allergens[i][j]
return algs
def log_diet(diet, rawText):
'''
This method uses the diet dictionary to log the dietary information for each person in the corresponding sheet.
It will also update everyone's summary log sheet.
'''
# Instantiate foodLog
flog = foodLog.foodLog()
cupertino = timezone('US/Pacific')
now = datetime.now(cupertino)
date = now.strftime("%B %d, %Y")
time = now.strftime("%I:%M %p")
credentials = flog.sheet_oauth()
for name in diet:
# ip contains the values that will be appended onto the next row of the Google Spreadsheet.
ip = []
ip.append(date)
ip.append(time)
ip.append(rawText)
'''
If the person consumed at least one food item, then construct a new row
containing dietary information to be logged in the person's sheet.
'''
if len(diet[name]['foods']) > 0:
ip.append(diet[name]['foods'][0])
ip.append(diet[name]['quantities'][0])
if len(diet[name]['allergens'][0]) == 0:
ip.append("NONE")
else:
ip.append(diet[name]['allergens'][0])
for nutrient in diet[name]['nutrients']:
ip.append(diet[name]['nutrients'][nutrient][0])
payload = {"values": [ip]}
flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, payload)
for i in range(1, len(diet[name]['foods'])):
ip = ["", "", "", diet[name]['foods'][i], diet[name]['quantities'][i]]
if len(diet[name]['allergens'][i]) == 0:
ip.append("NONE") | for nutrient in diet[name]['nutrients']:
ip.append(diet[name]['nutrients'][nutrient][i])
payload = {"values": [ip]}
flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, payload)
# Construct a new row containing nutrient totals to be logged in the person's sheet.
ip = ["", "", "", "", "", ""]
for nutrient in diet[name]['nutrients']:
total = 0
for quantity in diet[name]['nutrients'][nutrient]:
total += quantity
ip.append("Total: " + str(round(total, 1)))
payload = {"values": [ip]}
flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name | else:
ip.append(diet[name]['allergens'][i])
| random_line_split |
food.py | , which will be used to replace the pronouns "He" and "She".
lastMaleName = ''
lastFemaleName = ''
index = 0
newText = ''
for i in range(wordCount):
word = classified_text[i][0]
partOfSpeech = classified_text[i][1]
if word == 'He' or word == 'he':
if lastMaleName != '':
newText = newText + lastMaleName
else:
newText = newText + word
elif word == 'She' or word == 'she':
if lastFemaleName != '':
newText = newText + lastFemaleName
else:
newText = newText + word
elif word == 'I':
newText = newText + username
else:
newText = newText + word
if partOfSpeech == 'PERSON':
if "female" in det.get_gender(word):
lastFemaleName = word
elif "male" in det.get_gender(word):
lastMaleName = word
index = index + len(word)
if index < len(text) and text[index] == ' ':
index = index + 1
newText += ' '
return newText
def get_substrings(text):
'''
This method splits the text into substrings, where each begins with a name
and continues until reaching the next name. It will return the list of substrings
and a list that contains the name in each substring.
'''
# Set your own path for the classification model and Stanford tagged jar file of StanfordNERTagger.
st = StanfordNERTagger(
'/home/pi/AIY-voice-kit-python/src/examples/voice/app/stanford-ner-2018-10-16/classifiers/english.all.3class.distsim.crf.ser.gz',
'/home/pi/AIY-voice-kit-python/src/examples/voice/app/stanford-ner-2018-10-16/stanford-ner.jar',
encoding='utf-8')
tokenized_text = word_tokenize(text)
classified_text = st.tag(tokenized_text)
wordCount = len(tokenized_text)
# charIndexes stores the starting indexes for each name from the text.
charIndexes = []
charCounter = 0
newCharCounter = 0
substrings = []
names = []
for i in range(wordCount):
word = classified_text[i][0]
partOfSpeech = classified_text[i][1]
if partOfSpeech == 'PERSON':
newCharCounter = text.find(word, charCounter)
charIndexes.append(newCharCounter)
charCounter = newCharCounter + 1
names.append(classified_text[i][0])
for i in range(len(charIndexes)):
currIndex = charIndexes[i]
if i == len(charIndexes) - 1:
substrings.append(text[currIndex: ])
else:
nextIndex = charIndexes[i + 1]
substrings.append(text[currIndex: nextIndex])
return substrings, names
def get_diet(substrings, names):
'''
This method uses the substrings to determine the foods, nutrients, and allergens consumed by each person.
It will return a dictionary containing the dietary information for each person.
'''
'''
"id" and "key" are used to make requests to the Edamam Food API,
and they are obtained by registering for an account from Edamam.
'''
id = '6bb24f34'
key = 'bcd38e86ec9f271288974f431e0c94e6'
diet = {}
for name in names:
if name not in diet:
diet[name] = {}
diet[name]['foods'] = []
diet[name]['quantities'] = []
diet[name]['allergens'] = []
diet[name]['nutrients'] = {"Energy": [], "Fat": [], "Carbs": [], "Fiber": [], "Sugars": [], "Protein": [], "Sodium": [], "Calcium": [],
"Magnesium": [], "Potassium": [], "Iron": [], "Vitamin C": [], "Vitamin E": [], "Vitamin K": []}
# For each substring, find the person's name and update the person's dietary information using the foods in the substring.
for i in range(len(substrings)):
substring = substrings[i]
name = names[i]
# Instantiate foodProcessor.
processor = foodProcessor.foodProcessor(key, id)
'''
Get list of foods, foodURIs, measureURIs, and quantities for each food.
foodURIs and measureURIs are used to get the nutrients for each food.
'''
foods, foodIds, measureURIs, quantities = processor.get_food_list(substring)
# Get allergens and nutrients from all foods.
details = processor.get_food_details(foodIds, measureURIs)
allergens = []
nutrients = {"Energy": [], "Fat": [], "Carbs": [], "Fiber": [], "Sugars": [], "Protein": [], "Sodium": [],
"Calcium": [], "Magnesium": [], "Potassium": [], "Iron": [], "Vitamin C": [], "Vitamin E": [],
"Vitamin K": []}
# Add the foods and quantities to the person's diet.
diet[name]['foods'].extend(foods)
diet[name]['quantities'].extend(quantities)
# For each food, add the allergens and nutrients to the person's diet.
for i in range(len(details)):
food = details[i]
diet[name]['allergens'].append(format_allergens(food['allergens']))
for nutrient in nutrients:
diet[name]['nutrients'][nutrient].append(food["nutrients"][nutrient])
return diet
def format_allergens(allergens):
|
def log_diet(diet, rawText):
'''
This method uses the diet dictionary to log the dietary information for each person in the corresponding sheet.
It will also update everyone's summary log sheet.
'''
# Instantiate foodLog
flog = foodLog.foodLog()
cupertino = timezone('US/Pacific')
now = datetime.now(cupertino)
date = now.strftime("%B %d, %Y")
time = now.strftime("%I:%M %p")
credentials = flog.sheet_oauth()
for name in diet:
# ip contains the values that will be appended onto the next row of the Google Spreadsheet.
ip = []
ip.append(date)
ip.append(time)
ip.append(rawText)
'''
If the person consumed at least one food item, then construct a new row
containing dietary information to be logged in the person's sheet.
'''
if len(diet[name]['foods']) > 0:
ip.append(diet[name]['foods'][0])
ip.append(diet[name]['quantities'][0])
if len(diet[name]['allergens'][0]) == 0:
ip.append("NONE")
else:
ip.append(diet[name]['allergens'][0])
for nutrient in diet[name]['nutrients']:
ip.append(diet[name]['nutrients'][nutrient][0])
payload = {"values": [ip]}
flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, payload)
for i in range(1, len(diet[name]['foods'])):
ip = ["", "", "", diet[name]['foods'][i], diet[name]['quantities'][i]]
if len(diet[name]['allergens'][i]) == 0:
ip.append("NONE")
else:
ip.append(diet[name]['allergens'][i])
for nutrient in diet[name]['nutrients']:
ip.append(diet[name]['nutrients'][nutrient][i])
payload = {"values": [ip]}
flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, payload)
# Construct a new row containing nutrient totals to be logged in the person's sheet.
ip = ["", "", "", "", "", ""]
for nutrient in diet[name]['nutrients']:
total = 0
for quantity in diet[name]['nutrients'][nutrient]:
total += quantity
ip.append("Total: " + str(round(total, 1)))
payload = {"values": [ip]}
flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name | '''
This method concatenates the list of allergens in each food to a string.
'''
if len(allergens) == 1:
return allergens[0]
algs = ''
for i in range(len(allergens)):
for j in range(len(allergens[i])):
if j == len(allergens[i]) - 1:
algs += allergens[i][j]
if i != len(allergens) - 1:
algs += ', '
else:
algs += allergens[i][j]
return algs | identifier_body |
parser.go | break
}
}
n, err := p.objectItem()
if err == errEofToken |
// we don't return a nil node, because might want to use already
// collected items.
if err != nil {
return node, err
}
node.Add(n)
// object lists can be optionally comma-delimited e.g. when a list of maps
// is being expressed, so a comma is allowed here - it's simply consumed
tok := p.scan()
if tok.Type != token.COMMA {
p.unscan()
}
}
return node, nil
}
func (p *Parser) consumeComment() (comment *ast.Comment, endline int) {
endline = p.tok.Pos.Line
// count the endline if it's multiline comment, ie starting with /*
if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' {
// don't use range here - no need to decode Unicode code points
for i := 0; i < len(p.tok.Text); i++ {
if p.tok.Text[i] == '\n' {
endline++
}
}
}
comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text}
p.tok = p.sc.Scan()
return
}
func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
var list []*ast.Comment
endline = p.tok.Pos.Line
for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n {
var comment *ast.Comment
comment, endline = p.consumeComment()
list = append(list, comment)
}
// add comment group to the comments list
comments = &ast.CommentGroup{List: list}
p.comments = append(p.comments, comments)
return
}
// objectItem parses a single object item
func (p *Parser) objectItem() (*ast.ObjectItem, error) {
defer un(trace(p, "ParseObjectItem"))
keys, err := p.objectKey()
if len(keys) > 0 && err == errEofToken {
// We ignore eof token here since it is an error if we didn't
// receive a value (but we did receive a key) for the item.
err = nil
}
if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
// This is a strange boolean statement, but what it means is:
// We have keys with no value, and we're likely in an object
// (since RBrace ends an object). For this, we set err to nil so
// we continue and get the error below of having the wrong value
// type.
err = nil
// Reset the token type so we don't think it completed fine. See
// objectType which uses p.tok.Type to check if we're done with
// the object.
p.tok.Type = token.EOF
}
if err != nil {
return nil, err
}
o := &ast.ObjectItem{
Keys: keys,
}
if p.leadComment != nil {
o.LeadComment = p.leadComment
p.leadComment = nil
}
switch p.tok.Type {
case token.ASSIGN:
o.Assign = p.tok.Pos
o.Val, err = p.object()
if err != nil {
return nil, err
}
case token.LBRACE:
o.Val, err = p.objectType()
if err != nil {
return nil, err
}
default:
keyStr := make([]string, 0, len(keys))
for _, k := range keys {
keyStr = append(keyStr, k.Token.Text)
}
return nil, &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf(
"key '%s' expected start of object ('{') or assignment ('=')",
strings.Join(keyStr, " ")),
}
}
// key=#comment
// val
if p.lineComment != nil {
o.LineComment, p.lineComment = p.lineComment, nil
}
// do a look-ahead for line comment
p.scan()
if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil {
o.LineComment = p.lineComment
p.lineComment = nil
}
p.unscan()
return o, nil
}
// objectKey parses an object key and returns a ObjectKey AST
func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
keyCount := 0
keys := make([]*ast.ObjectKey, 0)
for {
tok := p.scan()
switch tok.Type {
case token.EOF:
// It is very important to also return the keys here as well as
// the error. This is because we need to be able to tell if we
// did parse keys prior to finding the EOF, or if we just found
// a bare EOF.
return keys, errEofToken
case token.ASSIGN:
// assignment or object only, but not nested objects. this is not
// allowed: `foo bar = {}`
if keyCount > 1 {
return nil, &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type),
}
}
if keyCount == 0 {
return nil, &PosError{
Pos: p.tok.Pos,
Err: errors.New("no object keys found!"),
}
}
return keys, nil
case token.LBRACE:
var err error
// If we have no keys, then it is a syntax error. i.e. {{}} is not
// allowed.
if len(keys) == 0 {
err = &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
}
}
// object
return keys, err
case token.IDENT, token.STRING:
keyCount++
keys = append(keys, &ast.ObjectKey{Token: p.tok})
case token.ILLEGAL:
return keys, &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf("illegal character"),
}
default:
return keys, &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
}
}
}
}
// object parses any type of object, such as number, bool, string, object or
// list.
func (p *Parser) object() (ast.Node, error) {
defer un(trace(p, "ParseType"))
tok := p.scan()
switch tok.Type {
case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
return p.literalType()
case token.LBRACE:
return p.objectType()
case token.LBRACK:
return p.listType()
case token.COMMENT:
// implement comment
case token.EOF:
return nil, errEofToken
}
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf("Unknown token: %+v", tok),
}
}
// objectType parses an object type and returns a ObjectType AST
func (p *Parser) objectType() (*ast.ObjectType, error) {
defer un(trace(p, "ParseObjectType"))
// we assume that the currently scanned token is a LBRACE
o := &ast.ObjectType{
Lbrace: p.tok.Pos,
}
l, err := p.objectList(true)
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
// not a RBRACE, it's an syntax error and we just return it.
if err != nil && p.tok.Type != token.RBRACE {
return nil, err
}
// No error, scan and expect the ending to be a brace
if tok := p.scan(); tok.Type != token.RBRACE {
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type),
}
}
o.List = l
o.Rbrace = p.tok.Pos // advanced via parseObjectList
return o, nil
}
// listType parses a list type and returns a ListType AST
func (p *Parser) listType() (*ast.ListType, error) {
defer un(trace(p, "ParseListType"))
// we assume that the currently scanned token is a LBRACK
l := &ast.ListType{
Lbrack: p.tok.Pos,
}
needComma := false
for {
tok := p.scan()
if needComma {
switch tok.Type {
case token.COMMA, token.R | {
break // we are finished
} | conditional_block |
parser.go | .Errorf("nested object expected: LBRACE got: %s", p.tok.Type),
}
}
if keyCount == 0 {
return nil, &PosError{
Pos: p.tok.Pos,
Err: errors.New("no object keys found!"),
}
}
return keys, nil
case token.LBRACE:
var err error
// If we have no keys, then it is a syntax error. i.e. {{}} is not
// allowed.
if len(keys) == 0 {
err = &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
}
}
// object
return keys, err
case token.IDENT, token.STRING:
keyCount++
keys = append(keys, &ast.ObjectKey{Token: p.tok})
case token.ILLEGAL:
return keys, &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf("illegal character"),
}
default:
return keys, &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
}
}
}
}
// object parses any type of object, such as number, bool, string, object or
// list.
func (p *Parser) object() (ast.Node, error) {
defer un(trace(p, "ParseType"))
tok := p.scan()
switch tok.Type {
case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
return p.literalType()
case token.LBRACE:
return p.objectType()
case token.LBRACK:
return p.listType()
case token.COMMENT:
// implement comment
case token.EOF:
return nil, errEofToken
}
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf("Unknown token: %+v", tok),
}
}
// objectType parses an object type and returns a ObjectType AST
func (p *Parser) objectType() (*ast.ObjectType, error) {
defer un(trace(p, "ParseObjectType"))
// we assume that the currently scanned token is a LBRACE
o := &ast.ObjectType{
Lbrace: p.tok.Pos,
}
l, err := p.objectList(true)
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
// not a RBRACE, it's an syntax error and we just return it.
if err != nil && p.tok.Type != token.RBRACE {
return nil, err
}
// No error, scan and expect the ending to be a brace
if tok := p.scan(); tok.Type != token.RBRACE {
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type),
}
}
o.List = l
o.Rbrace = p.tok.Pos // advanced via parseObjectList
return o, nil
}
// listType parses a list type and returns a ListType AST
func (p *Parser) listType() (*ast.ListType, error) {
defer un(trace(p, "ParseListType"))
// we assume that the currently scanned token is a LBRACK
l := &ast.ListType{
Lbrack: p.tok.Pos,
}
needComma := false
for {
tok := p.scan()
if needComma {
switch tok.Type {
case token.COMMA, token.RBRACK:
default:
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf(
"error parsing list, expected comma or list end, got: %s",
tok.Type),
}
}
}
switch tok.Type {
case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
node, err := p.literalType()
if err != nil {
return nil, err
}
// If there is a lead comment, apply it
if p.leadComment != nil {
node.LeadComment = p.leadComment
p.leadComment = nil
}
l.Add(node)
needComma = true
case token.COMMA:
// get next list item or we are at the end
// do a look-ahead for line comment
p.scan()
if p.lineComment != nil && len(l.List) > 0 {
lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
if ok {
lit.LineComment = p.lineComment
l.List[len(l.List)-1] = lit
p.lineComment = nil
}
}
p.unscan()
needComma = false
continue
case token.LBRACE:
// Looks like a nested object, so parse it out
node, err := p.objectType()
if err != nil {
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf(
"error while trying to parse object within list: %s", err),
}
}
l.Add(node)
needComma = true
case token.LBRACK:
node, err := p.listType()
if err != nil {
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf(
"error while trying to parse list within list: %s", err),
}
}
l.Add(node)
case token.RBRACK:
// finished
l.Rbrack = p.tok.Pos
return l, nil
default:
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type),
}
}
}
}
// literalType parses a literal type and returns a LiteralType AST
func (p *Parser) literalType() (*ast.LiteralType, error) {
defer un(trace(p, "ParseLiteral"))
return &ast.LiteralType{
Token: p.tok,
}, nil
}
// scan returns the next token from the underlying scanner. If a token has
// been unscanned then read that instead. In the process, it collects any
// comment groups encountered, and remembers the last lead and line comments.
func (p *Parser) scan() token.Token {
// If we have a token on the buffer, then return it.
if p.n != 0 {
p.n = 0
return p.tok
}
// Otherwise read the next token from the scanner and Save it to the buffer
// in case we unscan later.
prev := p.tok
p.tok = p.sc.Scan()
if p.tok.Type == token.COMMENT {
var comment *ast.CommentGroup
var endline int
// fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n",
// p.tok.Pos.Line, prev.Pos.Line, endline)
if p.tok.Pos.Line == prev.Pos.Line {
// The comment is on same line as the previous token; it
// cannot be a lead comment but may be a line comment.
comment, endline = p.consumeCommentGroup(0)
if p.tok.Pos.Line != endline {
// The next token is on a different line, thus
// the last comment group is a line comment.
p.lineComment = comment
}
}
// consume successor comments, if any
endline = -1
for p.tok.Type == token.COMMENT {
comment, endline = p.consumeCommentGroup(1)
}
if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE {
switch p.tok.Type {
case token.RBRACE, token.RBRACK:
// Do not count for these cases
default:
// The next token is following on the line immediately after the
// comment group, thus the last comment group is a lead comment.
p.leadComment = comment
}
}
}
return p.tok
}
// unscan pushes the previously read token back onto the buffer.
func (p *Parser) unscan() {
p.n = 1
}
// ----------------------------------------------------------------------------
// Parsing support
func (p *Parser) printTrace(a ...interface{}) {
if !p.enableTrace {
return
}
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
const n = len(dots)
fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
i := 2 * p.indent
for i > n {
fmt.Print(dots)
i -= n
}
// i <= n
fmt.Print(dots[0:i])
fmt.Println(a...)
}
func | trace | identifier_name |
|
parser.go | node := &ast.ObjectList{}
for {
if obj {
tok := p.scan()
p.unscan()
if tok.Type == token.RBRACE {
break
}
}
n, err := p.objectItem()
if err == errEofToken {
break // we are finished
}
// we don't return a nil node, because might want to use already
// collected items.
if err != nil {
return node, err
}
node.Add(n)
// object lists can be optionally comma-delimited e.g. when a list of maps
// is being expressed, so a comma is allowed here - it's simply consumed
tok := p.scan()
if tok.Type != token.COMMA {
p.unscan()
}
}
return node, nil
}
func (p *Parser) consumeComment() (comment *ast.Comment, endline int) {
endline = p.tok.Pos.Line
// count the endline if it's multiline comment, ie starting with /*
if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' {
// don't use range here - no need to decode Unicode code points
for i := 0; i < len(p.tok.Text); i++ {
if p.tok.Text[i] == '\n' {
endline++
}
}
}
comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text}
p.tok = p.sc.Scan()
return
}
func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
var list []*ast.Comment
endline = p.tok.Pos.Line
for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n {
var comment *ast.Comment
comment, endline = p.consumeComment()
list = append(list, comment)
}
// add comment group to the comments list
comments = &ast.CommentGroup{List: list}
p.comments = append(p.comments, comments)
return
}
// objectItem parses a single object item
func (p *Parser) objectItem() (*ast.ObjectItem, error) {
defer un(trace(p, "ParseObjectItem"))
keys, err := p.objectKey()
if len(keys) > 0 && err == errEofToken {
// We ignore eof token here since it is an error if we didn't
// receive a value (but we did receive a key) for the item.
err = nil
}
if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
// This is a strange boolean statement, but what it means is:
// We have keys with no value, and we're likely in an object
// (since RBrace ends an object). For this, we set err to nil so
// we continue and get the error below of having the wrong value
// type.
err = nil
// Reset the token type so we don't think it completed fine. See
// objectType which uses p.tok.Type to check if we're done with
// the object.
p.tok.Type = token.EOF
}
if err != nil {
return nil, err
}
o := &ast.ObjectItem{
Keys: keys,
}
if p.leadComment != nil {
o.LeadComment = p.leadComment
p.leadComment = nil
}
switch p.tok.Type {
case token.ASSIGN:
o.Assign = p.tok.Pos
o.Val, err = p.object()
if err != nil {
return nil, err
}
case token.LBRACE:
o.Val, err = p.objectType()
if err != nil {
return nil, err
}
default:
keyStr := make([]string, 0, len(keys))
for _, k := range keys {
keyStr = append(keyStr, k.Token.Text)
}
return nil, &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf(
"key '%s' expected start of object ('{') or assignment ('=')",
strings.Join(keyStr, " ")),
}
}
// key=#comment
// val
if p.lineComment != nil {
o.LineComment, p.lineComment = p.lineComment, nil
}
// do a look-ahead for line comment
p.scan()
if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil {
o.LineComment = p.lineComment
p.lineComment = nil
}
p.unscan()
return o, nil
}
// objectKey parses an object key and returns a ObjectKey AST
func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
keyCount := 0
keys := make([]*ast.ObjectKey, 0)
for {
tok := p.scan()
switch tok.Type {
case token.EOF:
// It is very important to also return the keys here as well as
// the error. This is because we need to be able to tell if we
// did parse keys prior to finding the EOF, or if we just found
// a bare EOF.
return keys, errEofToken
case token.ASSIGN:
// assignment or object only, but not nested objects. this is not
// allowed: `foo bar = {}`
if keyCount > 1 {
return nil, &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type),
}
}
if keyCount == 0 {
return nil, &PosError{
Pos: p.tok.Pos,
Err: errors.New("no object keys found!"),
}
}
return keys, nil
case token.LBRACE:
var err error
// If we have no keys, then it is a syntax error. i.e. {{}} is not
// allowed.
if len(keys) == 0 {
err = &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
}
}
// object
return keys, err
case token.IDENT, token.STRING:
keyCount++
keys = append(keys, &ast.ObjectKey{Token: p.tok})
case token.ILLEGAL:
return keys, &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf("illegal character"),
}
default:
return keys, &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
}
}
}
}
// object parses any type of object, such as number, bool, string, object or
// list.
func (p *Parser) object() (ast.Node, error) {
defer un(trace(p, "ParseType"))
tok := p.scan()
switch tok.Type {
case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
return p.literalType()
case token.LBRACE:
return p.objectType()
case token.LBRACK:
return p.listType()
case token.COMMENT:
// implement comment
case token.EOF:
return nil, errEofToken
}
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf("Unknown token: %+v", tok),
}
}
// objectType parses an object type and returns a ObjectType AST
func (p *Parser) objectType() (*ast.ObjectType, error) {
defer un(trace(p, "ParseObjectType"))
// we assume that the currently scanned token is a LBRACE
o := &ast.ObjectType{
Lbrace: p.tok.Pos,
}
l, err := p.objectList(true)
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
// not a RBRACE, it's an syntax error and we just return it.
if err != nil && p.tok.Type != token.RBRACE {
return nil, err
}
// No error, scan and expect the ending to be a brace
if tok := p.scan(); tok.Type != token.RBRACE {
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type),
}
}
o.List = l
o.Rbrace = p.tok.Pos // advanced via parseObjectList
return o, nil
}
| // The parameter" obj" tells this whether to we are within an object (braces:
// '{', '}') or just at the top level. If we're within an object, we end
// at an RBRACE.
func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
defer un(trace(p, "ParseObjectList")) | random_line_split |
|
parser.go | : fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type),
}
}
if keyCount == 0 {
return nil, &PosError{
Pos: p.tok.Pos,
Err: errors.New("no object keys found!"),
}
}
return keys, nil
case token.LBRACE:
var err error
// If we have no keys, then it is a syntax error. i.e. {{}} is not
// allowed.
if len(keys) == 0 {
err = &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
}
}
// object
return keys, err
case token.IDENT, token.STRING:
keyCount++
keys = append(keys, &ast.ObjectKey{Token: p.tok})
case token.ILLEGAL:
return keys, &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf("illegal character"),
}
default:
return keys, &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
}
}
}
}
// object parses any type of object, such as number, bool, string, object or
// list.
func (p *Parser) object() (ast.Node, error) {
defer un(trace(p, "ParseType"))
tok := p.scan()
switch tok.Type {
case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
return p.literalType()
case token.LBRACE:
return p.objectType()
case token.LBRACK:
return p.listType()
case token.COMMENT:
// implement comment
case token.EOF:
return nil, errEofToken
}
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf("Unknown token: %+v", tok),
}
}
// objectType parses an object type and returns a ObjectType AST
func (p *Parser) objectType() (*ast.ObjectType, error) {
defer un(trace(p, "ParseObjectType"))
// we assume that the currently scanned token is a LBRACE
o := &ast.ObjectType{
Lbrace: p.tok.Pos,
}
l, err := p.objectList(true)
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
// not a RBRACE, it's an syntax error and we just return it.
if err != nil && p.tok.Type != token.RBRACE {
return nil, err
}
// No error, scan and expect the ending to be a brace
if tok := p.scan(); tok.Type != token.RBRACE {
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type),
}
}
o.List = l
o.Rbrace = p.tok.Pos // advanced via parseObjectList
return o, nil
}
// listType parses a list type and returns a ListType AST
func (p *Parser) listType() (*ast.ListType, error) {
defer un(trace(p, "ParseListType"))
// we assume that the currently scanned token is a LBRACK
l := &ast.ListType{
Lbrack: p.tok.Pos,
}
needComma := false
for {
tok := p.scan()
if needComma {
switch tok.Type {
case token.COMMA, token.RBRACK:
default:
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf(
"error parsing list, expected comma or list end, got: %s",
tok.Type),
}
}
}
switch tok.Type {
case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
node, err := p.literalType()
if err != nil {
return nil, err
}
// If there is a lead comment, apply it
if p.leadComment != nil {
node.LeadComment = p.leadComment
p.leadComment = nil
}
l.Add(node)
needComma = true
case token.COMMA:
// get next list item or we are at the end
// do a look-ahead for line comment
p.scan()
if p.lineComment != nil && len(l.List) > 0 {
lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
if ok {
lit.LineComment = p.lineComment
l.List[len(l.List)-1] = lit
p.lineComment = nil
}
}
p.unscan()
needComma = false
continue
case token.LBRACE:
// Looks like a nested object, so parse it out
node, err := p.objectType()
if err != nil {
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf(
"error while trying to parse object within list: %s", err),
}
}
l.Add(node)
needComma = true
case token.LBRACK:
node, err := p.listType()
if err != nil {
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf(
"error while trying to parse list within list: %s", err),
}
}
l.Add(node)
case token.RBRACK:
// finished
l.Rbrack = p.tok.Pos
return l, nil
default:
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type),
}
}
}
}
// literalType parses a literal type and returns a LiteralType AST
func (p *Parser) literalType() (*ast.LiteralType, error) {
defer un(trace(p, "ParseLiteral"))
return &ast.LiteralType{
Token: p.tok,
}, nil
}
// scan returns the next token from the underlying scanner. If a token has
// been unscanned then read that instead. In the process, it collects any
// comment groups encountered, and remembers the last lead and line comments.
func (p *Parser) scan() token.Token {
// If we have a token on the buffer, then return it.
if p.n != 0 {
p.n = 0
return p.tok
}
// Otherwise read the next token from the scanner and Save it to the buffer
// in case we unscan later.
prev := p.tok
p.tok = p.sc.Scan()
if p.tok.Type == token.COMMENT {
var comment *ast.CommentGroup
var endline int
// fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n",
// p.tok.Pos.Line, prev.Pos.Line, endline)
if p.tok.Pos.Line == prev.Pos.Line {
// The comment is on same line as the previous token; it
// cannot be a lead comment but may be a line comment.
comment, endline = p.consumeCommentGroup(0)
if p.tok.Pos.Line != endline {
// The next token is on a different line, thus
// the last comment group is a line comment.
p.lineComment = comment
}
}
// consume successor comments, if any
endline = -1
for p.tok.Type == token.COMMENT {
comment, endline = p.consumeCommentGroup(1)
}
if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE {
switch p.tok.Type {
case token.RBRACE, token.RBRACK:
// Do not count for these cases
default:
// The next token is following on the line immediately after the
// comment group, thus the last comment group is a lead comment.
p.leadComment = comment
}
}
}
return p.tok
}
// unscan pushes the previously read token back onto the buffer.
func (p *Parser) unscan() {
p.n = 1
}
// ----------------------------------------------------------------------------
// Parsing support
func (p *Parser) printTrace(a ...interface{}) | {
if !p.enableTrace {
return
}
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
const n = len(dots)
fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
i := 2 * p.indent
for i > n {
fmt.Print(dots)
i -= n
}
// i <= n
fmt.Print(dots[0:i])
fmt.Println(a...)
} | identifier_body |
|
dom-movement.ts | * @throws {CannotEscapeIrrelevantNode} If the container is irrelevant.
*
* @throw {ReversedRangeError} If ``max`` is less than ``min``.
*/
constructor(readonly min: DOMLoc,
readonly max: DOMLoc,
readonly relevanceTest: NodeTest = () => true) {
if (!(this.isRelevant(min.node) && this.isRelevant(max.node))) {
throw new CannotEscapeIrrelevantNode();
}
// Man could be equal to min but it cannot be less than min.
if (max.compare(min) < 0) {
throw new ReversedRangeError();
}
}
static makeSpanningNode(node: Node, relevanceTest?: NodeTest): DOMSpace {
return new DOMSpace(new DOMLoc(node, 0),
new DOMLoc(node, node.childNodes.length),
relevanceTest);
}
/**
* Test whether a node is contextually relevant. This method runs some stock
* tests and if necessary calls [[Space.relevanceTest]].
*
* @param node The node to test.
*
* @returns ``true`` if the node is contextually relevant, ``false`` if not.
*/
isRelevant(node: Node): boolean {
const { nodeType } = node;
return (nodeType === Node.ELEMENT_NODE || nodeType === Node.TEXT_NODE ||
nodeType === Node.DOCUMENT_NODE ||
nodeType === Node.DOCUMENT_FRAGMENT_NODE) &&
this.relevanceTest(node);
}
/**
* Determine whether this space contains a location.
*
* @param loc The location to test.
*
* @returns Whether the location is inside the space.
*/
contains(loc: DOMLoc): boolean {
try {
return this.min.compare(loc) <= 0 && this.max.compare(loc) >= 0;
}
catch (ex) {
if (ex instanceof ComparingDisconnectedNodes) {
return false;
}
/* istanbul ignore next: there's currently no way to get here */
throw ex;
}
}
/**
* Determine whether this space contains a node.
*
* @param node The node to test.
*
* @returns Whether the node is inside the space.
*/
containsNode(node: Node): boolean {
return node.parentNode !== null &&
this.contains(DOMLoc.makePointingTo(node));
}
/**
* If the current location is irrelevant node, then produce a new relevant
* location pointing to the contextually irrelevant node. This is "escaping"
* the node in the sense that the location provided by this method is pointing
* at the irrelevant node *from outside*.
*
* This method also normalizes the location.
*
* @param location The location to escape.
*
* @returns If ``location`` was already relevant, and already normalized, then
* return ``location``. Otherwise, the new relevant location.
*
* @throws {DOMSpaceScopeError} If ``location`` is not within the space.
*/
escapeIrrelevantNode(location: DOMLoc): DOMLoc {
if (!this.contains(location)) {
throw new DOMSpaceScopeError();
}
const normalized = location.normalizeOffset();
let node: Node | null = normalized.node;
const ancestorsAndSelf: Node[] = [];
while (node !== null && this.containsNode(node)) {
ancestorsAndSelf.push(node);
node = node.parentNode;
}
// We reverse the nodes to scan them form topmost node down to the original
// location.
const reversed = ancestorsAndSelf.reverse();
const first = reversed[0];
for (const candidate of reversed) {
if (!this.isRelevant(candidate)) {
// parentNode cannot be null, unless we are the first in the array.
// tslint:disable-next-line:no-non-null-assertion
const parentNode = candidate.parentNode!;
// If this is the first candidate, then the parent is outside the
// container, and we cannot use it. We don't have a good location to
// return. This should never happen because the container is required to
// be relevant.
if (candidate === first) {
/* istanbul ignore next: there's no means to generate this error */
throw new Error("internal error: we should always be able to escape \
a location which is inside the space");
}
return new DOMLoc(parentNode,
indexOf(parentNode.childNodes, candidate));
}
}
// None of the ancestors or the node itself were irrelevant, so the original
// location was fine.
return normalized;
}
/**
* Compute the next relevant location from a starting point.
*
* @param start The location from which to start.
*
* @returns The next relevant location. Or ``null`` if there is no next
* relevant location within the space. Remember: the *location* is relevant,
* but can point to an irrelevant node.
*
* @throws {DOMSpaceScopeError} If ``start`` is not within the space.
*
* @throws {CannotEscapeIrrelevantNode} If ``start`` is irrelevant and cannot
* be escaped.
*/
next(start: DOMLoc): DOMLoc | null {
// tslint:disable-next-line:prefer-const
let { node, offset } = this.escapeIrrelevantNode(start);
let loc: DOMLoc | undefined;
switch (node.nodeType) {
case Node.DOCUMENT_FRAGMENT_NODE:
case Node.DOCUMENT_NODE:
case Node.ELEMENT_NODE:
const pointedNode = node.childNodes[offset++];
if (pointedNode !== undefined) {
loc = this.isRelevant(pointedNode) ?
new DOMLoc(pointedNode, 0) :
new DOMLoc(node, offset);
}
break;
case Node.TEXT_NODE:
if (++offset <= (node as Text).length) {
loc = new DOMLoc(node, offset);
}
break;
/* istanbul ignore next: we cannot get there */
default:
// Due to escapeIrrelevantNode, we should never get here.
throw new Error(`internal error: unexpected type ${node.nodeType}`);
}
if (loc === undefined) {
// If we get here, we have to move to the sibling after our starting node.
// Note that because of the escapeIrrelevantNode at the beginning of this
// function, the parent we encounter is necessarily relevant.
const { parentNode } = node;
if (parentNode === null) {
return null;
}
loc = new DOMLoc(parentNode, indexOf(parentNode.childNodes, node) + 1);
}
return this.contains(loc) ? loc : null;
}
/**
* Compute the previous relevant location from a starting point.
*
* @param start The location from which to start.
*
* @returns The previous relevant location. Or ``null`` if there is no
* previous relevant location inside the space. Remember: the *location* is
* relevant, but can point to an irrelevant node.
*
* @throws {DOMSpaceScopeError} If ``start`` is not within the space.
*
* @throws {CannotEscapeIrrelevantNode} If ``start`` is irrelevant and cannot
* be escaped.
*/
previous(start: DOMLoc): DOMLoc | null {
// tslint:disable-next-line:prefer-const
let { node, offset } = this.escapeIrrelevantNode(start);
let loc: DOMLoc | undefined;
switch (node.nodeType) {
case Node.DOCUMENT_FRAGMENT_NODE:
case Node.DOCUMENT_NODE:
case Node.ELEMENT_NODE:
const pointedNode = node.childNodes[--offset];
if (pointedNode !== undefined) {
loc = this.isRelevant(pointedNode) ?
new DOMLoc(pointedNode,
pointedNode.nodeType === Node.TEXT_NODE ?
(pointedNode as Text).length :
pointedNode.childNodes.length) :
new DOMLoc(node, offset);
}
break;
case Node.TEXT_NODE:
if (--offset >= 0) {
loc = new DOMLoc(node, offset);
}
break;
/* istanbul ignore next: we cannot get there */
default:
// Due to escapeIrrelevantNode, we should never get here.
throw new Error(`internal error: unexpected type ${node.nodeType}`);
}
if (loc === undefined) {
// If we get here, we have to move to the sibling before our starting
// node.
// Note that because of the escapeIrrelevantNode at the beginning of this
// function, the parents we encounter are necessarily relevant.
const { parentNode } = node;
if (parentNode === null) {
return null;
}
loc = new DOMLoc(parentNode, indexOf(parentNode.childNodes, node));
}
return this.contains(loc) ? loc : null;
}
/**
* Produce an iterable iterator that iterates in document order.
*/
*[Symbol.iterator](): IterableIterator<DOMLoc> {
let current: DOMLoc | null = this.min;
do {
yield current;
current = this.next(current);
} while (current !== null);
}
/**
* Produce an iterable iterator that iterates in reverse document order.
*/
*reversed(): IterableIterator<DOMLoc> {
let current: DOMLoc | null = this.max;
do | {
yield current;
current = this.previous(current);
} | conditional_block |
|
dom-movement.ts | (node: Node, offset: number, child: Node): 1 | 0 | -1 {
const pointed = node.childNodes[offset];
if (pointed === undefined) {
// Undefined means we are after all other elements. (A negative offset,
// before all nodes, is not possible here.)
return 1;
}
// We return -1 when pointed === child because the actual position we care
// about is *inside* child. Since it is inside child, ``[node, offset]``
// necessarily precedes that location.
return pointed === child ||
// tslint:disable-next-line:no-bitwise
(pointed.compareDocumentPosition(child) &
Node.DOCUMENT_POSITION_FOLLOWING) !== 0 ?
-1 : // child follows pointed
1; // child is before pointed
}
/**
* Models a DOM location. A DOM location is a pair of node and offset.
*
* In theory it would be possible to support nodes of any type, but this library
* currently only supports only ``Element``, ``Document``, ``DocumentFragment``,
* and ``Text`` for the node.
*
* Consider the following example:
*
* <p>I am a <b>little</b> teapot.</p>
*
* A location of ``(p, 0)`` points to the first text node of the top
* level ``p`` element.
*
* A location of ``(p.childNodes[0], 0)`` points to the letter "I" in first text
* node inside ``p``.
*
* A location of ``(p.childNodes[0], 7)`` points to the end of the first text
* node inside ``p``. This is a location after all the text in the node.
*
* A location of ``(p, 1)`` points to the ``b`` element inside ``p``.
*/
export class DOMLoc {
constructor(readonly node: Node, readonly offset: number) {
if (offset < 0) {
throw new Error("offset cannot be negative");
}
}
static makePointingTo(node: Node): DOMLoc {
const parent = node.parentNode;
if (parent === null) {
throw new Error("cannot point a node without a parent");
}
return new DOMLoc(parent, indexOf(parent.childNodes, node));
}
/**
* @returns A new [[Location]], if the ``node``, ``offset`` pair are not equal
* to those of this location. Otherwise, return ``this``.
*/
newIfDifferent(node: Node, offset: number): DOMLoc {
return (this.node === node && this.offset === offset) ? this :
new DOMLoc(node, offset);
}
/**
* This is the node to which this location points. When the location points to
* a text node, the pointed node is the text node. When the location points to
* anything else, the pointed node is the child node at the offset of the
* location. This may be undefined when the location points beyond the last
* child.
*/
get pointedNode(): Node | null {
const { node } = this;
if (node.nodeType === Node.TEXT_NODE) {
return node;
}
const pointed = node.childNodes[this.offset];
return pointed === undefined ? null : pointed;
}
/**
* The offset contained by this location, but normalized. An offset pointing
* beyond the end of the node's data will be normalized to point at the end of
* the node.
*/
get normalizedOffset(): number {
const { offset, node } = this;
switch (node.nodeType) {
case Node.DOCUMENT_NODE:
case Node.DOCUMENT_FRAGMENT_NODE:
case Node.ELEMENT_NODE: {
const { childNodes: { length } } = node;
return offset > length ? length : offset;
}
case Node.TEXT_NODE: {
const { length } = node as Text;
return offset > length ? length : offset;
}
default:
throw new Error(`cannot normalize offset in a node of type: \
${node.nodeType}`);
}
}
/**
* ``true`` if the location is already normalized. ``false`` if not.
*/
get isNormalized(): boolean {
return this.offset === this.normalizedOffset;
}
/**
* Convert a location with an offset which is out of bounds, to a location
* with an offset within bounds.
*
* An offset less than 0 will be normalized to 0. An offset pointing beyond
* the end of the node's data will be normalized to point at the end of the
* node.
*
* @returns A new [[Location]], if the offset was adjusted. Otherwise, it
* returns ``this``.
*/
normalizeOffset(): DOMLoc {
const normalized = this.normalizedOffset;
const { offset, node } = this;
return normalized === offset ? this : new DOMLoc(node, normalized);
}
/**
* Determine whether this location and another location are equal.
*
* @returns Whether ``this`` and ``other`` are equal.
*/
equals(other: DOMLoc | undefined | null): boolean {
return other != null &&
(this === other || (this.node === other.node &&
this.offset === other.offset));
}
/**
* Compare this location with another in document order.
*
* @param other The other location to compare.
*
* @returns -1 if ``this`` is earlier than ``other``, ``0`` if the two
* locations are equal, 1 if ``this`` is later than ``other``.
*
* @throws {ComparingDisconnectedNodes} If the two nodes are "disconnected"
* (i.e. do not belong to the same document).
*/
compare(other: DOMLoc): -1 | 0 | 1 {
if (this.equals(other)) {
return 0;
}
const { node, offset } = this;
const { node: otherNode, offset: otherOffset } = other;
if (node === otherNode) {
// The case where offset === otherOffset cannot happen here because it is
// covered above.
return offset - otherOffset < 0 ? -1 : 1;
}
const result = node.compareDocumentPosition(otherNode);
// tslint:disable:no-bitwise
if ((result & Node.DOCUMENT_POSITION_DISCONNECTED) !== 0) {
throw new ComparingDisconnectedNodes();
}
if ((result & Node.DOCUMENT_POSITION_FOLLOWING) !== 0) {
// otherNode follows node.
return (result & Node.DOCUMENT_POSITION_CONTAINED_BY) !== 0 ?
// otherNode is contained by node but we still need to figure out the
// relative positions of the node pointed by [node, offset] and
// otherNode.
pointedCompare(node, offset, otherNode) :
// otherNode just follows node, no parent child relation
-1;
}
if ((result & Node.DOCUMENT_POSITION_PRECEDING) === 0) {
/* istanbul ignore next: there's no means to generate this error */
throw new Error("neither preceding nor following: this should not \
happen");
}
// otherNode precedes node.
return ((result & Node.DOCUMENT_POSITION_CONTAINS) !== 0 &&
// otherNode contains node but we still need to figure out the
// relative positions of the node pointed by [otherNode,
// otherOffset] and node.
pointedCompare(otherNode, otherOffset, node) > 0) ? -1 : 1;
// tslint:enable:no-bitwise
}
}
type NodeTest = (node: Node) => boolean;
// tslint:disable-next-line:no-any
function indexOf(arrayLike: any, el: any): number {
return Array.prototype.indexOf.call(arrayLike, el);
}
/**
* A space delimits a part of a DOM tree in which one can obtain locations.
*/
export class DOMSpace implements Iterable<DOMLoc> {
/**
* @param min The minimum location included in this space.
*
* @param max The maximum location included in this space.
*
* @param relevanceTest A test to determine whether a node is relevant. This
* space does not produce locations into irrelevant nodes.
*
* @throws {CannotEscapeIrrelevantNode} If the container is irrelevant.
*
* @throw {ReversedRangeError} If ``max`` is less than ``min``.
*/
constructor(readonly min: DOMLoc,
readonly max: DOMLoc,
readonly relevanceTest: NodeTest = () => true) {
if (!(this.isRelevant(min.node) && this.isRelevant(max.node))) {
throw new CannotEscapeIrrelevantNode();
}
// Man could be equal to min but it cannot be less than min.
if (max.compare(min) < 0) {
throw new ReversedRangeError();
}
}
static makeSpanningNode(node: Node, relevanceTest?: NodeTest): DOMSpace {
return new DOMSpace(new DOMLoc(node, 0),
new DOMLoc(node, node.childNodes.length),
relevanceTest);
}
/**
* Test whether a node is contextually relevant. This method runs some stock
* tests and if necessary calls [[Space.relevanceTest]].
*
* @param node The node to test.
*
* @returns ``true`` if the | pointedCompare | identifier_name |
|
dom-movement.ts | _POSITION_FOLLOWING) !== 0 ?
-1 : // child follows pointed
1; // child is before pointed
}
/**
* Models a DOM location. A DOM location is a pair of node and offset.
*
* In theory it would be possible to support nodes of any type, but this library
* currently only supports only ``Element``, ``Document``, ``DocumentFragment``,
* and ``Text`` for the node.
*
* Consider the following example:
*
* <p>I am a <b>little</b> teapot.</p>
*
* A location of ``(p, 0)`` points to the first text node of the top
* level ``p`` element.
*
* A location of ``(p.childNodes[0], 0)`` points to the letter "I" in first text
* node inside ``p``.
*
* A location of ``(p.childNodes[0], 7)`` points to the end of the first text
* node inside ``p``. This is a location after all the text in the node.
*
* A location of ``(p, 1)`` points to the ``b`` element inside ``p``.
*/
export class DOMLoc {
constructor(readonly node: Node, readonly offset: number) {
if (offset < 0) {
throw new Error("offset cannot be negative");
}
}
static makePointingTo(node: Node): DOMLoc {
const parent = node.parentNode;
if (parent === null) {
throw new Error("cannot point a node without a parent");
}
return new DOMLoc(parent, indexOf(parent.childNodes, node));
}
/**
* @returns A new [[Location]], if the ``node``, ``offset`` pair are not equal
* to those of this location. Otherwise, return ``this``.
*/
newIfDifferent(node: Node, offset: number): DOMLoc {
return (this.node === node && this.offset === offset) ? this :
new DOMLoc(node, offset);
}
/**
* This is the node to which this location points. When the location points to
* a text node, the pointed node is the text node. When the location points to
* anything else, the pointed node is the child node at the offset of the
* location. This may be undefined when the location points beyond the last
* child.
*/
get pointedNode(): Node | null {
const { node } = this;
if (node.nodeType === Node.TEXT_NODE) {
return node;
}
const pointed = node.childNodes[this.offset];
return pointed === undefined ? null : pointed;
}
/**
* The offset contained by this location, but normalized. An offset pointing
* beyond the end of the node's data will be normalized to point at the end of
* the node.
*/
get normalizedOffset(): number {
const { offset, node } = this;
switch (node.nodeType) {
case Node.DOCUMENT_NODE:
case Node.DOCUMENT_FRAGMENT_NODE:
case Node.ELEMENT_NODE: {
const { childNodes: { length } } = node;
return offset > length ? length : offset;
}
case Node.TEXT_NODE: {
const { length } = node as Text;
return offset > length ? length : offset;
}
default:
throw new Error(`cannot normalize offset in a node of type: \
${node.nodeType}`);
}
}
/**
* ``true`` if the location is already normalized. ``false`` if not.
*/
get isNormalized(): boolean {
return this.offset === this.normalizedOffset;
}
/**
* Convert a location with an offset which is out of bounds, to a location
* with an offset within bounds.
*
* An offset less than 0 will be normalized to 0. An offset pointing beyond
* the end of the node's data will be normalized to point at the end of the
* node.
*
* @returns A new [[Location]], if the offset was adjusted. Otherwise, it
* returns ``this``.
*/
normalizeOffset(): DOMLoc {
const normalized = this.normalizedOffset;
const { offset, node } = this;
return normalized === offset ? this : new DOMLoc(node, normalized);
}
/**
* Determine whether this location and another location are equal.
*
* @returns Whether ``this`` and ``other`` are equal.
*/
equals(other: DOMLoc | undefined | null): boolean {
return other != null &&
(this === other || (this.node === other.node &&
this.offset === other.offset));
}
/**
* Compare this location with another in document order.
*
* @param other The other location to compare.
*
* @returns -1 if ``this`` is earlier than ``other``, ``0`` if the two
* locations are equal, 1 if ``this`` is later than ``other``.
*
* @throws {ComparingDisconnectedNodes} If the two nodes are "disconnected"
* (i.e. do not belong to the same document).
*/
compare(other: DOMLoc): -1 | 0 | 1 | if ((result & Node.DOCUMENT_POSITION_FOLLOWING) !== 0) {
// otherNode follows node.
return (result & Node.DOCUMENT_POSITION_CONTAINED_BY) !== 0 ?
// otherNode is contained by node but we still need to figure out the
// relative positions of the node pointed by [node, offset] and
// otherNode.
pointedCompare(node, offset, otherNode) :
// otherNode just follows node, no parent child relation
-1;
}
if ((result & Node.DOCUMENT_POSITION_PRECEDING) === 0) {
/* istanbul ignore next: there's no means to generate this error */
throw new Error("neither preceding nor following: this should not \
happen");
}
// otherNode precedes node.
return ((result & Node.DOCUMENT_POSITION_CONTAINS) !== 0 &&
// otherNode contains node but we still need to figure out the
// relative positions of the node pointed by [otherNode,
// otherOffset] and node.
pointedCompare(otherNode, otherOffset, node) > 0) ? -1 : 1;
// tslint:enable:no-bitwise
}
}
type NodeTest = (node: Node) => boolean;
// tslint:disable-next-line:no-any
function indexOf(arrayLike: any, el: any): number {
return Array.prototype.indexOf.call(arrayLike, el);
}
/**
* A space delimits a part of a DOM tree in which one can obtain locations.
*/
export class DOMSpace implements Iterable<DOMLoc> {
/**
* @param min The minimum location included in this space.
*
* @param max The maximum location included in this space.
*
* @param relevanceTest A test to determine whether a node is relevant. This
* space does not produce locations into irrelevant nodes.
*
* @throws {CannotEscapeIrrelevantNode} If the container is irrelevant.
*
* @throw {ReversedRangeError} If ``max`` is less than ``min``.
*/
constructor(readonly min: DOMLoc,
readonly max: DOMLoc,
readonly relevanceTest: NodeTest = () => true) {
if (!(this.isRelevant(min.node) && this.isRelevant(max.node))) {
throw new CannotEscapeIrrelevantNode();
}
// Man could be equal to min but it cannot be less than min.
if (max.compare(min) < 0) {
throw new ReversedRangeError();
}
}
static makeSpanningNode(node: Node, relevanceTest?: NodeTest): DOMSpace {
return new DOMSpace(new DOMLoc(node, 0),
new DOMLoc(node, node.childNodes.length),
relevanceTest);
}
/**
* Test whether a node is contextually relevant. This method runs some stock
* tests and if necessary calls [[Space.relevanceTest]].
*
* @param node The node to test.
*
* @returns ``true`` if the node is contextually relevant, ``false`` if not.
*/
isRelevant(node: Node): boolean {
const { nodeType } = node;
return (nodeType === Node.ELEMENT_NODE || nodeType === Node.TEXT_NODE ||
nodeType === Node.DOCUMENT_NODE ||
nodeType === Node.DOCUMENT_FRAGMENT_NODE) &&
this.relevanceTest(node);
}
/**
* Determine whether this space contains a location.
*
* @param loc The location to test.
*
* @returns Whether the location is inside the space.
*/
contains(loc: DOMLoc): boolean {
try {
return this.min.compare(loc) <= 0 && this.max.compare(loc) >= 0 | {
if (this.equals(other)) {
return 0;
}
const { node, offset } = this;
const { node: otherNode, offset: otherOffset } = other;
if (node === otherNode) {
// The case where offset === otherOffset cannot happen here because it is
// covered above.
return offset - otherOffset < 0 ? -1 : 1;
}
const result = node.compareDocumentPosition(otherNode);
// tslint:disable:no-bitwise
if ((result & Node.DOCUMENT_POSITION_DISCONNECTED) !== 0) {
throw new ComparingDisconnectedNodes();
}
| identifier_body |
dom-movement.ts | _POSITION_FOLLOWING) !== 0 ?
-1 : // child follows pointed
1; // child is before pointed
}
/**
* Models a DOM location. A DOM location is a pair of node and offset.
*
* In theory it would be possible to support nodes of any type, but this library
* currently only supports only ``Element``, ``Document``, ``DocumentFragment``,
* and ``Text`` for the node.
*
* Consider the following example:
*
* <p>I am a <b>little</b> teapot.</p>
*
* A location of ``(p, 0)`` points to the first text node of the top
* level ``p`` element.
*
* A location of ``(p.childNodes[0], 0)`` points to the letter "I" in first text
* node inside ``p``.
*
* A location of ``(p.childNodes[0], 7)`` points to the end of the first text
* node inside ``p``. This is a location after all the text in the node.
*
* A location of ``(p, 1)`` points to the ``b`` element inside ``p``.
*/
export class DOMLoc {
constructor(readonly node: Node, readonly offset: number) {
if (offset < 0) {
throw new Error("offset cannot be negative");
}
}
static makePointingTo(node: Node): DOMLoc {
const parent = node.parentNode;
if (parent === null) {
throw new Error("cannot point a node without a parent");
}
return new DOMLoc(parent, indexOf(parent.childNodes, node));
}
/**
* @returns A new [[Location]], if the ``node``, ``offset`` pair are not equal
* to those of this location. Otherwise, return ``this``.
*/
newIfDifferent(node: Node, offset: number): DOMLoc {
return (this.node === node && this.offset === offset) ? this :
new DOMLoc(node, offset);
}
/**
* This is the node to which this location points. When the location points to
* a text node, the pointed node is the text node. When the location points to
* anything else, the pointed node is the child node at the offset of the
* location. This may be undefined when the location points beyond the last
* child.
*/
get pointedNode(): Node | null {
const { node } = this;
if (node.nodeType === Node.TEXT_NODE) {
return node;
}
const pointed = node.childNodes[this.offset];
return pointed === undefined ? null : pointed;
}
/**
* The offset contained by this location, but normalized. An offset pointing
* beyond the end of the node's data will be normalized to point at the end of
* the node.
*/
get normalizedOffset(): number {
const { offset, node } = this;
switch (node.nodeType) {
case Node.DOCUMENT_NODE:
case Node.DOCUMENT_FRAGMENT_NODE:
case Node.ELEMENT_NODE: {
const { childNodes: { length } } = node;
return offset > length ? length : offset;
}
case Node.TEXT_NODE: {
const { length } = node as Text;
return offset > length ? length : offset;
}
default:
throw new Error(`cannot normalize offset in a node of type: \
${node.nodeType}`);
}
} | return this.offset === this.normalizedOffset;
}
/**
* Convert a location with an offset which is out of bounds, to a location
* with an offset within bounds.
*
* An offset less than 0 will be normalized to 0. An offset pointing beyond
* the end of the node's data will be normalized to point at the end of the
* node.
*
* @returns A new [[Location]], if the offset was adjusted. Otherwise, it
* returns ``this``.
*/
normalizeOffset(): DOMLoc {
const normalized = this.normalizedOffset;
const { offset, node } = this;
return normalized === offset ? this : new DOMLoc(node, normalized);
}
/**
* Determine whether this location and another location are equal.
*
* @returns Whether ``this`` and ``other`` are equal.
*/
equals(other: DOMLoc | undefined | null): boolean {
return other != null &&
(this === other || (this.node === other.node &&
this.offset === other.offset));
}
/**
* Compare this location with another in document order.
*
* @param other The other location to compare.
*
* @returns -1 if ``this`` is earlier than ``other``, ``0`` if the two
* locations are equal, 1 if ``this`` is later than ``other``.
*
* @throws {ComparingDisconnectedNodes} If the two nodes are "disconnected"
* (i.e. do not belong to the same document).
*/
compare(other: DOMLoc): -1 | 0 | 1 {
if (this.equals(other)) {
return 0;
}
const { node, offset } = this;
const { node: otherNode, offset: otherOffset } = other;
if (node === otherNode) {
// The case where offset === otherOffset cannot happen here because it is
// covered above.
return offset - otherOffset < 0 ? -1 : 1;
}
const result = node.compareDocumentPosition(otherNode);
// tslint:disable:no-bitwise
if ((result & Node.DOCUMENT_POSITION_DISCONNECTED) !== 0) {
throw new ComparingDisconnectedNodes();
}
if ((result & Node.DOCUMENT_POSITION_FOLLOWING) !== 0) {
// otherNode follows node.
return (result & Node.DOCUMENT_POSITION_CONTAINED_BY) !== 0 ?
// otherNode is contained by node but we still need to figure out the
// relative positions of the node pointed by [node, offset] and
// otherNode.
pointedCompare(node, offset, otherNode) :
// otherNode just follows node, no parent child relation
-1;
}
if ((result & Node.DOCUMENT_POSITION_PRECEDING) === 0) {
/* istanbul ignore next: there's no means to generate this error */
throw new Error("neither preceding nor following: this should not \
happen");
}
// otherNode precedes node.
return ((result & Node.DOCUMENT_POSITION_CONTAINS) !== 0 &&
// otherNode contains node but we still need to figure out the
// relative positions of the node pointed by [otherNode,
// otherOffset] and node.
pointedCompare(otherNode, otherOffset, node) > 0) ? -1 : 1;
// tslint:enable:no-bitwise
}
}
type NodeTest = (node: Node) => boolean;
// tslint:disable-next-line:no-any
function indexOf(arrayLike: any, el: any): number {
return Array.prototype.indexOf.call(arrayLike, el);
}
/**
* A space delimits a part of a DOM tree in which one can obtain locations.
*/
export class DOMSpace implements Iterable<DOMLoc> {
/**
* @param min The minimum location included in this space.
*
* @param max The maximum location included in this space.
*
* @param relevanceTest A test to determine whether a node is relevant. This
* space does not produce locations into irrelevant nodes.
*
* @throws {CannotEscapeIrrelevantNode} If the container is irrelevant.
*
* @throw {ReversedRangeError} If ``max`` is less than ``min``.
*/
constructor(readonly min: DOMLoc,
readonly max: DOMLoc,
readonly relevanceTest: NodeTest = () => true) {
if (!(this.isRelevant(min.node) && this.isRelevant(max.node))) {
throw new CannotEscapeIrrelevantNode();
}
// Man could be equal to min but it cannot be less than min.
if (max.compare(min) < 0) {
throw new ReversedRangeError();
}
}
static makeSpanningNode(node: Node, relevanceTest?: NodeTest): DOMSpace {
return new DOMSpace(new DOMLoc(node, 0),
new DOMLoc(node, node.childNodes.length),
relevanceTest);
}
/**
* Test whether a node is contextually relevant. This method runs some stock
* tests and if necessary calls [[Space.relevanceTest]].
*
* @param node The node to test.
*
* @returns ``true`` if the node is contextually relevant, ``false`` if not.
*/
isRelevant(node: Node): boolean {
const { nodeType } = node;
return (nodeType === Node.ELEMENT_NODE || nodeType === Node.TEXT_NODE ||
nodeType === Node.DOCUMENT_NODE ||
nodeType === Node.DOCUMENT_FRAGMENT_NODE) &&
this.relevanceTest(node);
}
/**
* Determine whether this space contains a location.
*
* @param loc The location to test.
*
* @returns Whether the location is inside the space.
*/
contains(loc: DOMLoc): boolean {
try {
return this.min.compare(loc) <= 0 && this.max.compare(loc) >= 0 |
/**
* ``true`` if the location is already normalized. ``false`` if not.
*/
get isNormalized(): boolean { | random_line_split |
routing.py | /profile')
# Regular user control panel
r('controls.index', '/account/controls')
r('controls.auth', '/account/controls/authentication')
r('controls.persona', '/account/controls/persona')
r('controls.persona.add', '/account/controls/persona/add')
r('controls.persona.remove', '/account/controls/persona/remove')
r('controls.openid', '/account/controls/openid')
r('controls.openid.add', '/account/controls/openid/add')
r('controls.openid.add_finish', '/account/controls/openid/add_finish')
r('controls.openid.remove', '/account/controls/openid/remove')
r('controls.rels', '/account/controls/relationships')
r('controls.rels.watch', '/account/controls/relationships/watch')
r('controls.rels.unwatch', '/account/controls/relationships/unwatch')
r('controls.info', '/account/controls/user_info')
r('controls.certs', '/account/controls/certificates')
r('controls.certs.add', '/account/controls/certificates/add')
r('controls.certs.generate_server',
'/account/controls/certificates/gen/cert-{name}.p12')
r('controls.certs.details',
'/account/controls/certificates/details/{serial:[0-9a-f]+}')
r('controls.certs.download',
'/account/controls/certificates/download/cert-{name}-{serial:[0-9a-f]+}.pem')
r('controls.certs.revoke',
'/account/controls/certificates/revoke/{serial:[0-9a-f]+}')
# User pages
kw = sqla_route_options('user', 'name', model.User.name)
r('users.view', '/users/{name}', **kw)
r('users.art', '/users/{name}/art', **kw)
r('users.art_by_album', '/users/{name}/art/{album}', **kw)
r('users.profile', '/users/{name}/profile', **kw)
r('users.watchstream', '/users/{name}/watchstream', **kw)
r('albums.user_index', '/users/{name}/albums', **kw)
r('api:users.list', '/users.json')
# Artwork
kw = sqla_route_options('artwork', 'id', model.Artwork.id)
kw['pregenerator'] = artwork_pregenerator
r('art.browse', '/art')
r('art.upload', '/art/upload')
r('art.view', r'/art/{id:\d+}{title:(-.+)?}', **kw)
r('art.add_tags', r'/art/{id:\d+}/add_tags', **kw)
r('art.remove_tags', r'/art/{id:\d+}/remove_tags', **kw)
r('art.rate', r'/art/{id:\d+}/rate', **kw)
# Tags
# XXX what should the tag name regex be, if anything?
# XXX should the regex be checked in the 'factory' instead? way easier that way...
kw = sqla_route_options('tag', 'name', model.Tag.name)
r('tags.list', '/tags')
r('tags.view', '/tags/{name}', **kw)
r('tags.artwork', '/tags/{name}/artwork', **kw)
# Albums
# XXX well this is getting complicated! needs to check user, needs to check id, needs to generate correctly, needs a title like art has
user_router = SugarRouter(config, '/users/{user}', model.User.name)
album_router = user_router.chain('/albums/{album}', model.Album.id, rel=model.Album.user)
album_router.add_route('albums.artwork', '')
# Administration
r('admin.dashboard', '/admin')
r('admin.log', '/admin/log')
# Debugging
r('debug.blank', '/debug/blank')
r('debug.crash', '/debug/crash')
r('debug.mako-crash', '/debug/mako-crash')
r('debug.status.303', '/debug/303')
r('debug.status.400', '/debug/400')
r('debug.status.403', '/debug/403')
r('debug.status.404', '/debug/404')
# Comments; made complex because they can attach to different parent URLs.
# Rather than hack around how Pyramid's routes works, we can just use our
# own class that does what we want!
# XXX 1: make this work for users as well
# XXX 2: make the other routes work
# XXX 3: possibly find a way to verify that the same logic is used here and for the main routes
parent_route_names = ('art.view', 'user.view')
mapper = config.get_routes_mapper()
parent_routes = [mapper.get_route(name) for name in parent_route_names]
commentables = dict(
users=model.User.name,
art=model.Artwork.id,
)
def comments_factory(request):
# XXX prefetching on these?
type = request.matchdict['type']
identifier = request.matchdict['identifier']
try:
sqla_column = commentables[type]
entity = model.session.query(sqla_column.parententity).filter(sqla_column == identifier).one()
except (NoResultFound, KeyError):
# 404!
raise NotFound()
if 'comment_id' not in request.matchdict:
return contextualize(entity.discussion)
# URLs to specific comments should have those comments as the context
try:
return contextualize(
model.session .query(model.Comment)
.with_parent(entity.discussion)
.filter(model.Comment.id == request.matchdict['comment_id'])
.one())
except NoResultFound:
raise NotFound()
def comments_pregenerator(request, elements, kw):
resource = None
comment = kw.get('comment', None)
if comment:
kw['comment_id'] = comment.id
if 'resource' not in kw:
resource = comment.discussion.resource
if not resource:
resource = kw['resource']
# XXX users...
entity = resource.member
kw['type'] = 'art'
kw['identifier'] = entity.id
return elements, kw
r('comments.list', '/{type}/{identifier}/comments', factory=comments_factory)
r('comments.write', '/{type}/{identifier}/comments/write', factory=comments_factory, pregenerator=comments_pregenerator)
r('comments.view', '/{type}/{identifier}/comments/{comment_id}', factory=comments_factory, pregenerator=comments_pregenerator)
r('comments.edit', '/{type}/{identifier}/comments/{comment_id}/edit', factory=comments_factory, pregenerator=comments_pregenerator)
r('comments.reply', '/{type}/{identifier}/comments/{comment_id}/write', factory=comments_factory, pregenerator=comments_pregenerator)
class SugarRouter(object):
"""Glues routing to the ORM.
Use me like this:
foo_router = SugarRouter(config, '/foos/{foo}', model.Foo.identifier)
foo_router.add_route('foo_edit', '/edit')
This will route `/foos/{foo}/edit` to `foo_edit`, with the bonus that the
context will be set to the corresponding `Foo` object.
The reverse works as well:
request.route_url('foo_edit', foo=some_foo_row)
"""
# TODO: support URLs like /art/123-title-that-doesnt-matter
# ...but only do it for the root url, i think
def __init__(self, config, url_prefix, sqla_column, parent_router=None, rel=None):
self.config = config
self.url_prefix = url_prefix
self.sqla_column = sqla_column
self.sqla_table = sqla_column.parententity
self.parent_router = parent_router
self.sqla_rel = rel
assert (self.parent_router is None) == (self.sqla_rel is None)
# This is the {key} that appears in the matchdict and generated route,
# as well as the kwarg passed to route_url
match = re.search(r'[{](\w+)[}]', url_prefix)
if not match:
raise ValueError("Can't find a route kwarg in {0!r}".format(url_prefix))
self.key = match.group(1)
### Dealing with chaining
def chain(self, url_prefix, sqla_column, rel):
"""Create a new sugar router with this one as the parent."""
return self.__class__(
self.config, url_prefix, sqla_column,
parent_router=self, rel=rel)
@property
def full_url_prefix(self):
|
def filter_sqlalchemy_query(self, query, request):
"""Takes a query, filters it as demanded by the matchdict, and returns
a new one.
"""
query = query.filter(self.sqla_column == request.matchdict[self.key])
if self.parent_router:
query = query.join(self.sqla_rel)
query = self.parent_router.filter_sqlalchemy_query(
| """Constructs a chain of url prefixes going up to the root."""
if self.parent_router:
ret = self.parent_router.full_url_prefix
else:
ret = ''
ret += self.url_prefix
return ret | identifier_body |
routing.py | /profile')
# Regular user control panel
r('controls.index', '/account/controls')
r('controls.auth', '/account/controls/authentication')
r('controls.persona', '/account/controls/persona')
r('controls.persona.add', '/account/controls/persona/add')
r('controls.persona.remove', '/account/controls/persona/remove')
r('controls.openid', '/account/controls/openid')
r('controls.openid.add', '/account/controls/openid/add')
r('controls.openid.add_finish', '/account/controls/openid/add_finish')
r('controls.openid.remove', '/account/controls/openid/remove')
r('controls.rels', '/account/controls/relationships')
r('controls.rels.watch', '/account/controls/relationships/watch')
r('controls.rels.unwatch', '/account/controls/relationships/unwatch')
r('controls.info', '/account/controls/user_info')
r('controls.certs', '/account/controls/certificates')
r('controls.certs.add', '/account/controls/certificates/add')
r('controls.certs.generate_server',
'/account/controls/certificates/gen/cert-{name}.p12')
r('controls.certs.details',
'/account/controls/certificates/details/{serial:[0-9a-f]+}')
r('controls.certs.download',
'/account/controls/certificates/download/cert-{name}-{serial:[0-9a-f]+}.pem')
r('controls.certs.revoke',
'/account/controls/certificates/revoke/{serial:[0-9a-f]+}')
# User pages
kw = sqla_route_options('user', 'name', model.User.name)
r('users.view', '/users/{name}', **kw)
r('users.art', '/users/{name}/art', **kw)
r('users.art_by_album', '/users/{name}/art/{album}', **kw)
r('users.profile', '/users/{name}/profile', **kw)
r('users.watchstream', '/users/{name}/watchstream', **kw)
r('albums.user_index', '/users/{name}/albums', **kw)
r('api:users.list', '/users.json')
# Artwork
kw = sqla_route_options('artwork', 'id', model.Artwork.id)
kw['pregenerator'] = artwork_pregenerator
r('art.browse', '/art')
r('art.upload', '/art/upload')
r('art.view', r'/art/{id:\d+}{title:(-.+)?}', **kw)
r('art.add_tags', r'/art/{id:\d+}/add_tags', **kw)
r('art.remove_tags', r'/art/{id:\d+}/remove_tags', **kw)
r('art.rate', r'/art/{id:\d+}/rate', **kw)
# Tags
# XXX what should the tag name regex be, if anything?
# XXX should the regex be checked in the 'factory' instead? way easier that way...
kw = sqla_route_options('tag', 'name', model.Tag.name)
r('tags.list', '/tags')
r('tags.view', '/tags/{name}', **kw)
r('tags.artwork', '/tags/{name}/artwork', **kw)
# Albums
# XXX well this is getting complicated! needs to check user, needs to check id, needs to generate correctly, needs a title like art has
user_router = SugarRouter(config, '/users/{user}', model.User.name)
album_router = user_router.chain('/albums/{album}', model.Album.id, rel=model.Album.user)
album_router.add_route('albums.artwork', '')
# Administration
r('admin.dashboard', '/admin')
r('admin.log', '/admin/log')
# Debugging
r('debug.blank', '/debug/blank')
r('debug.crash', '/debug/crash')
r('debug.mako-crash', '/debug/mako-crash')
r('debug.status.303', '/debug/303')
r('debug.status.400', '/debug/400')
r('debug.status.403', '/debug/403')
r('debug.status.404', '/debug/404')
# Comments; made complex because they can attach to different parent URLs.
# Rather than hack around how Pyramid's routes works, we can just use our
# own class that does what we want!
# XXX 1: make this work for users as well
# XXX 2: make the other routes work
# XXX 3: possibly find a way to verify that the same logic is used here and for the main routes
parent_route_names = ('art.view', 'user.view')
mapper = config.get_routes_mapper()
parent_routes = [mapper.get_route(name) for name in parent_route_names]
commentables = dict(
users=model.User.name,
art=model.Artwork.id,
)
def comments_factory(request):
# XXX prefetching on these?
type = request.matchdict['type']
identifier = request.matchdict['identifier']
try:
sqla_column = commentables[type]
entity = model.session.query(sqla_column.parententity).filter(sqla_column == identifier).one()
except (NoResultFound, KeyError):
# 404!
raise NotFound()
if 'comment_id' not in request.matchdict:
return contextualize(entity.discussion)
# URLs to specific comments should have those comments as the context
try:
return contextualize(
model.session .query(model.Comment)
.with_parent(entity.discussion)
.filter(model.Comment.id == request.matchdict['comment_id'])
.one())
except NoResultFound:
raise NotFound()
def comments_pregenerator(request, elements, kw):
resource = None
comment = kw.get('comment', None)
if comment:
kw['comment_id'] = comment.id
if 'resource' not in kw:
resource = comment.discussion.resource
if not resource:
resource = kw['resource']
# XXX users...
entity = resource.member
kw['type'] = 'art'
kw['identifier'] = entity.id
return elements, kw
r('comments.list', '/{type}/{identifier}/comments', factory=comments_factory)
r('comments.write', '/{type}/{identifier}/comments/write', factory=comments_factory, pregenerator=comments_pregenerator)
r('comments.view', '/{type}/{identifier}/comments/{comment_id}', factory=comments_factory, pregenerator=comments_pregenerator)
r('comments.edit', '/{type}/{identifier}/comments/{comment_id}/edit', factory=comments_factory, pregenerator=comments_pregenerator)
r('comments.reply', '/{type}/{identifier}/comments/{comment_id}/write', factory=comments_factory, pregenerator=comments_pregenerator)
class SugarRouter(object):
"""Glues routing to the ORM.
Use me like this:
foo_router = SugarRouter(config, '/foos/{foo}', model.Foo.identifier)
foo_router.add_route('foo_edit', '/edit')
This will route `/foos/{foo}/edit` to `foo_edit`, with the bonus that the
context will be set to the corresponding `Foo` object.
The reverse works as well:
request.route_url('foo_edit', foo=some_foo_row)
"""
# TODO: support URLs like /art/123-title-that-doesnt-matter
# ...but only do it for the root url, i think
def __init__(self, config, url_prefix, sqla_column, parent_router=None, rel=None):
self.config = config
self.url_prefix = url_prefix
self.sqla_column = sqla_column
self.sqla_table = sqla_column.parententity
self.parent_router = parent_router
self.sqla_rel = rel
assert (self.parent_router is None) == (self.sqla_rel is None)
# This is the {key} that appears in the matchdict and generated route,
# as well as the kwarg passed to route_url
match = re.search(r'[{](\w+)[}]', url_prefix)
if not match:
|
self.key = match.group(1)
### Dealing with chaining
def chain(self, url_prefix, sqla_column, rel):
"""Create a new sugar router with this one as the parent."""
return self.__class__(
self.config, url_prefix, sqla_column,
parent_router=self, rel=rel)
@property
def full_url_prefix(self):
"""Constructs a chain of url prefixes going up to the root."""
if self.parent_router:
ret = self.parent_router.full_url_prefix
else:
ret = ''
ret += self.url_prefix
return ret
def filter_sqlalchemy_query(self, query, request):
"""Takes a query, filters it as demanded by the matchdict, and returns
a new one.
"""
query = query.filter(self.sqla_column == request.matchdict[self.key])
if self.parent_router:
query = query.join(self.sqla_rel)
query = self.parent_router.filter_sqlalchemy_query | raise ValueError("Can't find a route kwarg in {0!r}".format(url_prefix)) | conditional_block |
routing.py | work', **kw)
# Albums
# XXX well this is getting complicated! needs to check user, needs to check id, needs to generate correctly, needs a title like art has
user_router = SugarRouter(config, '/users/{user}', model.User.name)
album_router = user_router.chain('/albums/{album}', model.Album.id, rel=model.Album.user)
album_router.add_route('albums.artwork', '')
# Administration
r('admin.dashboard', '/admin')
r('admin.log', '/admin/log')
# Debugging
r('debug.blank', '/debug/blank')
r('debug.crash', '/debug/crash')
r('debug.mako-crash', '/debug/mako-crash')
r('debug.status.303', '/debug/303')
r('debug.status.400', '/debug/400')
r('debug.status.403', '/debug/403')
r('debug.status.404', '/debug/404')
# Comments; made complex because they can attach to different parent URLs.
# Rather than hack around how Pyramid's routes works, we can just use our
# own class that does what we want!
# XXX 1: make this work for users as well
# XXX 2: make the other routes work
# XXX 3: possibly find a way to verify that the same logic is used here and for the main routes
parent_route_names = ('art.view', 'user.view')
mapper = config.get_routes_mapper()
parent_routes = [mapper.get_route(name) for name in parent_route_names]
commentables = dict(
users=model.User.name,
art=model.Artwork.id,
)
def comments_factory(request):
# XXX prefetching on these?
type = request.matchdict['type']
identifier = request.matchdict['identifier']
try:
sqla_column = commentables[type]
entity = model.session.query(sqla_column.parententity).filter(sqla_column == identifier).one()
except (NoResultFound, KeyError):
# 404!
raise NotFound()
if 'comment_id' not in request.matchdict:
return contextualize(entity.discussion)
# URLs to specific comments should have those comments as the context
try:
return contextualize(
model.session .query(model.Comment)
.with_parent(entity.discussion)
.filter(model.Comment.id == request.matchdict['comment_id'])
.one())
except NoResultFound:
raise NotFound()
def comments_pregenerator(request, elements, kw):
resource = None
comment = kw.get('comment', None)
if comment:
kw['comment_id'] = comment.id
if 'resource' not in kw:
resource = comment.discussion.resource
if not resource:
resource = kw['resource']
# XXX users...
entity = resource.member
kw['type'] = 'art'
kw['identifier'] = entity.id
return elements, kw
r('comments.list', '/{type}/{identifier}/comments', factory=comments_factory)
r('comments.write', '/{type}/{identifier}/comments/write', factory=comments_factory, pregenerator=comments_pregenerator)
r('comments.view', '/{type}/{identifier}/comments/{comment_id}', factory=comments_factory, pregenerator=comments_pregenerator)
r('comments.edit', '/{type}/{identifier}/comments/{comment_id}/edit', factory=comments_factory, pregenerator=comments_pregenerator)
r('comments.reply', '/{type}/{identifier}/comments/{comment_id}/write', factory=comments_factory, pregenerator=comments_pregenerator)
class SugarRouter(object):
"""Glues routing to the ORM.
Use me like this:
foo_router = SugarRouter(config, '/foos/{foo}', model.Foo.identifier)
foo_router.add_route('foo_edit', '/edit')
This will route `/foos/{foo}/edit` to `foo_edit`, with the bonus that the
context will be set to the corresponding `Foo` object.
The reverse works as well:
request.route_url('foo_edit', foo=some_foo_row)
"""
# TODO: support URLs like /art/123-title-that-doesnt-matter
# ...but only do it for the root url, i think
def __init__(self, config, url_prefix, sqla_column, parent_router=None, rel=None):
self.config = config
self.url_prefix = url_prefix
self.sqla_column = sqla_column
self.sqla_table = sqla_column.parententity
self.parent_router = parent_router
self.sqla_rel = rel
assert (self.parent_router is None) == (self.sqla_rel is None)
# This is the {key} that appears in the matchdict and generated route,
# as well as the kwarg passed to route_url
match = re.search(r'[{](\w+)[}]', url_prefix)
if not match:
raise ValueError("Can't find a route kwarg in {0!r}".format(url_prefix))
self.key = match.group(1)
### Dealing with chaining
def chain(self, url_prefix, sqla_column, rel):
"""Create a new sugar router with this one as the parent."""
return self.__class__(
self.config, url_prefix, sqla_column,
parent_router=self, rel=rel)
@property
def full_url_prefix(self):
"""Constructs a chain of url prefixes going up to the root."""
if self.parent_router:
ret = self.parent_router.full_url_prefix
else:
ret = ''
ret += self.url_prefix
return ret
def filter_sqlalchemy_query(self, query, request):
"""Takes a query, filters it as demanded by the matchdict, and returns
a new one.
"""
query = query.filter(self.sqla_column == request.matchdict[self.key])
if self.parent_router:
query = query.join(self.sqla_rel)
query = self.parent_router.filter_sqlalchemy_query(
query, request)
return query
### Actual routing stuff
def add_route(self, route_name, suffix, **kwargs):
"""Analog to `config.add_route()`, with magic baked in. Extra kwargs
are passed along.
"""
kwargs['pregenerator'] = self.pregenerator
kwargs['factory'] = self.factory
self.config.add_route(route_name, self.full_url_prefix + suffix, **kwargs)
def pregenerator(self, request, elements, kw):
"""Passed to Pyramid as a bound method when creating a route.
Converts the arguments to route_url (which should be row objects) into
URL-friendly strings.
"""
# Get the row object, and get the property from it
row = kw.pop(self.key)
kw[self.key] = self.sqla_column.__get__(row, type(row))
if self.parent_router:
# Parent needs its own treatment here, too. Fill in the parent
# object automatically
kw[self.parent_router.key] = self.sqla_rel.__get__(row, type(row))
elements, kw = self.parent_router.pregenerator(request, elements, kw)
return elements, kw
def factory(self, request):
"""Passed to Pyramid as a bound method when creating a route.
Translates a matched URL to an ORM row, which becomes the context.
"""
# This yields the "context", which should be the row object
try:
q = model.session.query(self.sqla_table)
q = self.filter_sqlalchemy_query(q, request)
return q.one()
except NoResultFound:
# 404!
raise NotFound()
def sqla_route_options(url_key, match_key, sqla_column):
"""Returns a dict of route options that are helpful for routes representing SQLA objects.
``url_key``:
The key to use for a SQLA object when calling ``route_url()``.
``match_key``:
The key in the matchdict that contains the row identifier.
``sqla_column``:
The SQLA ORM column that appears in the URL.
"""
def pregenerator(request, elements, kw):
# Get the row object, and get the property from it
row = kw.pop(url_key)
kw[match_key] = sqla_column.__get__(row, type(row))
return elements, kw
def factory(request):
# This yields the "context", which should be the row object
try:
return contextualize(
model.session.query(sqla_column.parententity)
.filter(sqla_column == request.matchdict[match_key])
.one())
except NoResultFound:
# 404!
raise NotFound()
return dict(pregenerator=pregenerator, factory=factory)
def artwork_pregenerator(request, elements, kw):
"""Special pregenerator for artwork URLs, which also include a title
sometimes.
"""
artwork = kw.pop('artwork')
kw['id'] = artwork.id
# n.b.: this won't hurt anything if the route doesn't have {title}, so it's
# calculated and thrown away. bad?
if artwork.title:
kw['title'] = '-' + _make_url_friendly(artwork.title)
else:
kw['title'] = ''
return elements, kw
def | _make_url_friendly | identifier_name |
|
routing.py | account/profile')
# Regular user control panel
r('controls.index', '/account/controls')
r('controls.auth', '/account/controls/authentication')
r('controls.persona', '/account/controls/persona')
r('controls.persona.add', '/account/controls/persona/add')
r('controls.persona.remove', '/account/controls/persona/remove')
r('controls.openid', '/account/controls/openid')
r('controls.openid.add', '/account/controls/openid/add')
r('controls.openid.add_finish', '/account/controls/openid/add_finish')
r('controls.openid.remove', '/account/controls/openid/remove')
r('controls.rels', '/account/controls/relationships')
r('controls.rels.watch', '/account/controls/relationships/watch')
r('controls.rels.unwatch', '/account/controls/relationships/unwatch')
r('controls.info', '/account/controls/user_info')
r('controls.certs', '/account/controls/certificates')
r('controls.certs.add', '/account/controls/certificates/add')
r('controls.certs.generate_server',
'/account/controls/certificates/gen/cert-{name}.p12')
r('controls.certs.details',
'/account/controls/certificates/details/{serial:[0-9a-f]+}')
r('controls.certs.download',
'/account/controls/certificates/download/cert-{name}-{serial:[0-9a-f]+}.pem')
r('controls.certs.revoke',
'/account/controls/certificates/revoke/{serial:[0-9a-f]+}')
# User pages
kw = sqla_route_options('user', 'name', model.User.name)
r('users.view', '/users/{name}', **kw)
r('users.art', '/users/{name}/art', **kw)
r('users.art_by_album', '/users/{name}/art/{album}', **kw)
r('users.profile', '/users/{name}/profile', **kw)
r('users.watchstream', '/users/{name}/watchstream', **kw)
r('albums.user_index', '/users/{name}/albums', **kw)
r('api:users.list', '/users.json')
# Artwork
kw = sqla_route_options('artwork', 'id', model.Artwork.id)
kw['pregenerator'] = artwork_pregenerator
r('art.browse', '/art')
r('art.upload', '/art/upload')
r('art.view', r'/art/{id:\d+}{title:(-.+)?}', **kw)
r('art.add_tags', r'/art/{id:\d+}/add_tags', **kw)
r('art.remove_tags', r'/art/{id:\d+}/remove_tags', **kw)
r('art.rate', r'/art/{id:\d+}/rate', **kw)
# Tags
# XXX what should the tag name regex be, if anything?
# XXX should the regex be checked in the 'factory' instead? way easier that way...
kw = sqla_route_options('tag', 'name', model.Tag.name)
r('tags.list', '/tags')
r('tags.view', '/tags/{name}', **kw)
r('tags.artwork', '/tags/{name}/artwork', **kw)
# Albums
# XXX well this is getting complicated! needs to check user, needs to check id, needs to generate correctly, needs a title like art has
user_router = SugarRouter(config, '/users/{user}', model.User.name)
album_router = user_router.chain('/albums/{album}', model.Album.id, rel=model.Album.user)
album_router.add_route('albums.artwork', '')
# Administration
r('admin.dashboard', '/admin')
r('admin.log', '/admin/log')
# Debugging
r('debug.blank', '/debug/blank')
r('debug.crash', '/debug/crash')
r('debug.mako-crash', '/debug/mako-crash')
r('debug.status.303', '/debug/303')
r('debug.status.400', '/debug/400')
r('debug.status.403', '/debug/403')
r('debug.status.404', '/debug/404')
# Comments; made complex because they can attach to different parent URLs.
# Rather than hack around how Pyramid's routes works, we can just use our
# own class that does what we want!
# XXX 1: make this work for users as well
# XXX 2: make the other routes work
# XXX 3: possibly find a way to verify that the same logic is used here and for the main routes
parent_route_names = ('art.view', 'user.view')
mapper = config.get_routes_mapper()
parent_routes = [mapper.get_route(name) for name in parent_route_names]
commentables = dict(
users=model.User.name,
art=model.Artwork.id,
)
def comments_factory(request):
# XXX prefetching on these?
type = request.matchdict['type']
identifier = request.matchdict['identifier']
try:
sqla_column = commentables[type]
entity = model.session.query(sqla_column.parententity).filter(sqla_column == identifier).one()
except (NoResultFound, KeyError):
# 404!
raise NotFound()
if 'comment_id' not in request.matchdict:
return contextualize(entity.discussion)
# URLs to specific comments should have those comments as the context
try:
return contextualize(
model.session .query(model.Comment)
.with_parent(entity.discussion)
.filter(model.Comment.id == request.matchdict['comment_id'])
.one())
except NoResultFound:
raise NotFound()
def comments_pregenerator(request, elements, kw):
resource = None
comment = kw.get('comment', None)
if comment:
kw['comment_id'] = comment.id
if 'resource' not in kw:
resource = comment.discussion.resource
if not resource:
resource = kw['resource']
# XXX users...
entity = resource.member
kw['type'] = 'art'
kw['identifier'] = entity.id
return elements, kw
r('comments.list', '/{type}/{identifier}/comments', factory=comments_factory)
r('comments.write', '/{type}/{identifier}/comments/write', factory=comments_factory, pregenerator=comments_pregenerator)
r('comments.view', '/{type}/{identifier}/comments/{comment_id}', factory=comments_factory, pregenerator=comments_pregenerator)
r('comments.edit', '/{type}/{identifier}/comments/{comment_id}/edit', factory=comments_factory, pregenerator=comments_pregenerator)
r('comments.reply', '/{type}/{identifier}/comments/{comment_id}/write', factory=comments_factory, pregenerator=comments_pregenerator)
class SugarRouter(object):
"""Glues routing to the ORM.
Use me like this:
foo_router = SugarRouter(config, '/foos/{foo}', model.Foo.identifier)
foo_router.add_route('foo_edit', '/edit')
This will route `/foos/{foo}/edit` to `foo_edit`, with the bonus that the
context will be set to the corresponding `Foo` object.
The reverse works as well:
request.route_url('foo_edit', foo=some_foo_row)
"""
# TODO: support URLs like /art/123-title-that-doesnt-matter
# ...but only do it for the root url, i think
def __init__(self, config, url_prefix, sqla_column, parent_router=None, rel=None):
self.config = config
self.url_prefix = url_prefix
self.sqla_column = sqla_column
self.sqla_table = sqla_column.parententity
self.parent_router = parent_router
self.sqla_rel = rel
assert (self.parent_router is None) == (self.sqla_rel is None)
# This is the {key} that appears in the matchdict and generated route,
# as well as the kwarg passed to route_url
match = re.search(r'[{](\w+)[}]', url_prefix)
if not match:
raise ValueError("Can't find a route kwarg in {0!r}".format(url_prefix))
self.key = match.group(1)
### Dealing with chaining
def chain(self, url_prefix, sqla_column, rel):
"""Create a new sugar router with this one as the parent."""
return self.__class__(
self.config, url_prefix, sqla_column,
parent_router=self, rel=rel)
@property
def full_url_prefix(self):
"""Constructs a chain of url prefixes going up to the root."""
if self.parent_router:
ret = self.parent_router.full_url_prefix
else:
ret = ''
ret += self.url_prefix
return ret | """Takes a query, filters it as demanded by the matchdict, and returns
a new one.
"""
query = query.filter(self.sqla_column == request.matchdict[self.key])
if self.parent_router:
query = query.join(self.sqla_rel)
query = self.parent_router.filter_sqlalchemy_query |
def filter_sqlalchemy_query(self, query, request): | random_line_split |
mod.rs |
.shape
.iter()
.map(|p| V2d {
x: bq.normalize(&p[0]),
y: bq.normalize(&p[1]),
})
.collect::<Vec<V2d>>();
let bbox = BBox::from_points(&shape);
BdryParams {
shape,
bbox,
skip_bb_check: self.skip_bb_check,
mag: self.mag,
}
}
}
#[derive(
Deserialize, Serialize, Clone, Copy, PartialEq, Default, Debug, Modify,
)]
pub struct RawInteractionParams {
pub coa: Option<RawCoaParams>,
pub chem_attr: Option<RawChemAttrParams>,
pub bdry: Option<RawBdryParams>,
pub phys_contact: RawPhysicalContactParams,
}
impl RawInteractionParams {
pub fn refine(&self, bq: &CharacteristicQuantities) -> InteractionParams {
InteractionParams {
coa: self.coa.as_ref().map(|coa| coa.refine(bq)),
chem_attr: self
.chem_attr
.as_ref()
.map(|chem_attr| chem_attr.refine(bq)),
bdry: self.bdry.as_ref().map(|bdry| bdry.refine(bq)),
phys_contact: self.phys_contact.refine(bq),
}
}
}
#[derive(
Deserialize, Serialize, Copy, Clone, PartialEq, Default, Debug, Modify,
)]
pub struct RawWorldParameters {
pub vertex_eta: Viscosity,
pub interactions: RawInteractionParams,
}
#[derive(Clone, Copy, Deserialize, Serialize, PartialEq, Default, Debug)]
pub struct PhysicalContactParams {
/// If two points are within this range, then they are considered
/// to be in contact for the purposes of CRL and adhesion.
pub zero_at: f64,
/// The square of `zero_at`.
pub zero_at_sq: f64,
/// If two points are within this range, then they are considered
/// to be in maximal contact, so that there is no smoothing factor
/// applied to CRL (i.e. the smoothing factor is `1.0`).
pub crl_one_at: f64,
/// The resting length of an adhesion. Same as `range.one_at * 0.8`.
pub adh_rest: f64,
/// This is distance at which the adhesion bond starts breaking/stops developing.
pub adh_break: f64,
/// Optional adhesion magnitude. If it is `None`, no adhesion
/// will be calculated.
pub adh_mag: Option<f64>,
/// Optional CAL magnitude. If it is `None`, simulation will
/// always execute CIL upon contact.
pub cal_mag: Option<f64>,
/// Magnitude of CIL that acts on Rho GTPase activation/
/// inactivation rates.
pub cil_mag: f64,
}
#[derive(Clone, Copy, Deserialize, Serialize, PartialEq, Debug)]
pub struct CoaParams {
//TODO: Expand upon LOS system.
/// Factor controlling to what extent line-of-sight blockage
/// should be penalized. See SI for further information.
pub los_penalty: f64,
/// The distance at which COA signal reaches half-maximum value.
pub halfmax_dist: f64,
/// Magnitude of COA that acts on Rac1 activation rates.
pub vertex_mag: f64,
//TODO: look up exactly what is being done for this (see where
// parameter is being generated for hint).
/// Factor controlling the shape of the exponential modelling
/// COA interaction (a function shaping parameter). It determines
/// the distance at which two points would sense COA at half-max
/// magnitude.
pub distrib_exp: f64,
/// If two vertices are within the square root of this distance , then COA cannot occur between
/// them.
pub too_close_dist_sq: f64,
}
#[derive(Clone, Copy, Deserialize, Serialize, PartialEq, Debug)]
pub struct ChemAttrParams {
/// Location of the chemoattractant center.
pub center: V2d,
/// Magnitude of chemoattractant a cell would sense if it were
/// right on top of the chemoattractant source.
pub center_mag: f64,
/// Assuming shallow chemoattractant gradient, which can be
/// modelled using a linear function with slope `slope`.
pub slope: f64,
}
#[derive(Clone, Deserialize, Serialize, PartialEq, Debug)]
pub struct BdryParams {
/// Shape of the boundary.
pub shape: Vec<V2d>,
/// Bounding box of the boundary.
pub bbox: BBox,
/// Should boundary bounding box be checked to see if cell is
/// within the boundary?
pub skip_bb_check: bool,
/// Magnitude of CIL-type interaction.
pub mag: f64,
}
#[derive(Clone, Deserialize, Serialize, PartialEq, Default, Debug)]
pub struct InteractionParams {
pub phys_contact: PhysicalContactParams,
pub coa: Option<CoaParams>,
pub chem_attr: Option<ChemAttrParams>,
pub bdry: Option<BdryParams>,
}
#[derive(Clone, Deserialize, Serialize, PartialEq, Default, Debug)]
pub struct WorldParameters {
/// Viscosity value used to calculate change in position of a
/// vertex due to calculated forces on it.
pub vertex_eta: f64,
pub interactions: InteractionParams,
}
impl RawWorldParameters {
pub fn refine(&self, bq: &CharacteristicQuantities) -> WorldParameters {
WorldParameters {
vertex_eta: bq.normalize(&self.vertex_eta),
interactions: self.interactions.refine(bq),
}
}
}
/// The "raw", unprocessed, parameters that are supplied by the user.
#[derive(Clone, Copy, Modify)]
pub struct RawParameters {
/// Cell diameter.
pub cell_diam: Length,
/// Fraction of max force achieved at `rgtp_act_at_max_f`.
pub halfmax_rgtp_max_f_frac: f64,
/// Stiffness of the membrane-cortex complex.
pub stiffness_cortex: Stress,
/// Typical lamellipod height: typical height of lamellipod (on the order of 100 nm).
pub lm_h: Length,
/// Halfmax Rho GTPase activity.
pub halfmax_rgtp_frac: f64,
/// Lamellipod stall stress: how much stress can lamellipod exert at most.
pub lm_ss: Stress,
/// Friction force opposing RhoA pulling.
pub rho_friction: f64,
/// Stiffness of cytoplasm.
pub stiffness_cyto: Force,
/// Diffusion rate of Rho GTPase on membrane.
pub diffusion_rgtp: Diffusion,
/// Initial distribution of Rac1.
pub init_rac: RgtpDistribution,
/// Initial distribution of RhoA.
pub init_rho: RgtpDistribution,
/// Baseline Rac1 activation rate.
pub kgtp_rac: Tinv,
/// Rac1 auto-activation rate.
pub kgtp_rac_auto: Tinv,
/// Baseline Rac1 inactivation rate.
pub kdgtp_rac: Tinv,
/// RhoA mediated inhibition of Rac1.
pub kdgtp_rho_on_rac: Tinv,
/// Strain at which Rac1 tension-mediated inhibition is half-strength.
pub halfmax_tension_inhib: f64,
/// Maximum tension-mediated Rac1 inhibition as a multiple of baseline Rac1 inactivation rate.
pub tension_inhib: f64,
/// Rate at which inactive membrane bound Rho GTPase dissociates from the
/// membrane.
pub k_mem_off: Tinv,
/// Rate at which cytosolic Rho GTPase associates with the membrane.
pub k_mem_on: Tinv,
/// Baseline RhoA activation rate.
pub kgtp_rho: Tinv,
/// RhoA auto-activation rate.
pub kgtp_auto_rho: Tinv,
/// Baseline RhoA inactivation rate.
pub kdgtp_rho: Tinv,
/// Rac1 mediated inhibition of RhoA.
pub kdgtp_rac_on_rho: Tinv,
/// Enable randomization of bursts in Rac1 activity?
pub randomization: bool,
/// Average period between randomization events.
pub rand_avg_t: Time,
/// Standard deviation of period between randomization events.
pub rand_std_t: Time,
/// Magnitude of randomly applied factor affecting Rac1 activation rate: how big a burst?
pub rand_mag: f64,
/// Fraction of vertices to be selected for increased Rac1 activation due to random events.
pub rand_vs: f64,
}
#[derive(Copy, Clone, Deserialize, Serialize, Default, Debug, PartialEq)]
pub struct Parameters {
/// Resting cell radius.
pub cell_r: f64,
/// Resting edge length.
pub rest_edge_len: f64,
/// Resting area.
pub rest_area: f64, | /// Stiffness of edge.
pub stiffness_edge: f64,
/// Rac1 mediated protrusive force constant.
pub const_protrusive: f64,
/// RhoA mediated protrusive force constant. | random_line_split |
|
mod.rs | line-of-sight blockage should be
/// penalized.
pub los_penalty: f64,
/// Distance from point of emission at which COA signal reaches half
/// its maximum value.
pub halfmax_dist: Length,
/// Magnitude of COA. It will be divided by `NVERTS` so that it scales based
/// on the number of vertices.
pub mag: f64,
/// If two vertices are within this distance, then COA cannot occur between them.
pub too_close_dist: Length,
}
impl RawCoaParams {
pub fn refine(&self, bq: &CharacteristicQuantities) -> CoaParams {
let halfmax_dist = bq.normalize(&self.halfmax_dist);
CoaParams {
los_penalty: self.los_penalty,
halfmax_dist,
vertex_mag: self.mag / NVERTS as f64,
// self.mag * exp(distrib_exp * x), where x is distance
// between points.
distrib_exp: 0.5f64.ln() / halfmax_dist,
too_close_dist_sq: bq.normalize(&self.too_close_dist).pow(2),
}
}
}
#[derive(Deserialize, Serialize, Clone, Copy, PartialEq, Default, Debug)]
pub struct RawChemAttrParams {
pub center: [Length; 2],
pub mag: f64,
pub drop_per_char_l: f64,
pub char_l: Length,
}
impl RawChemAttrParams {
pub fn refine(&self, bq: &CharacteristicQuantities) -> ChemAttrParams {
ChemAttrParams {
center: V2d {
x: bq.normalize(&self.center[0]),
y: bq.normalize(&self.center[1]),
},
center_mag: self.mag,
slope: self.drop_per_char_l / bq.normalize(&self.char_l),
}
}
}
#[derive(Deserialize, Serialize, Clone, Copy, PartialEq, Default, Debug)]
pub struct RawBdryParams {
shape: [[Length; 2]; 4],
skip_bb_check: bool,
mag: f64,
}
impl RawBdryParams {
pub fn refine(&self, bq: &CharacteristicQuantities) -> BdryParams {
let shape = self
.shape
.iter()
.map(|p| V2d {
x: bq.normalize(&p[0]),
y: bq.normalize(&p[1]),
})
.collect::<Vec<V2d>>();
let bbox = BBox::from_points(&shape);
BdryParams {
shape,
bbox,
skip_bb_check: self.skip_bb_check,
mag: self.mag,
}
}
}
#[derive(
Deserialize, Serialize, Clone, Copy, PartialEq, Default, Debug, Modify,
)]
pub struct RawInteractionParams {
pub coa: Option<RawCoaParams>,
pub chem_attr: Option<RawChemAttrParams>,
pub bdry: Option<RawBdryParams>,
pub phys_contact: RawPhysicalContactParams,
}
impl RawInteractionParams {
pub fn refine(&self, bq: &CharacteristicQuantities) -> InteractionParams {
InteractionParams {
coa: self.coa.as_ref().map(|coa| coa.refine(bq)),
chem_attr: self
.chem_attr
.as_ref()
.map(|chem_attr| chem_attr.refine(bq)),
bdry: self.bdry.as_ref().map(|bdry| bdry.refine(bq)),
phys_contact: self.phys_contact.refine(bq),
}
}
}
#[derive(
Deserialize, Serialize, Copy, Clone, PartialEq, Default, Debug, Modify,
)]
pub struct RawWorldParameters {
pub vertex_eta: Viscosity,
pub interactions: RawInteractionParams,
}
#[derive(Clone, Copy, Deserialize, Serialize, PartialEq, Default, Debug)]
pub struct PhysicalContactParams {
/// If two points are within this range, then they are considered
/// to be in contact for the purposes of CRL and adhesion.
pub zero_at: f64,
/// The square of `zero_at`.
pub zero_at_sq: f64,
/// If two points are within this range, then they are considered
/// to be in maximal contact, so that there is no smoothing factor
/// applied to CRL (i.e. the smoothing factor is `1.0`).
pub crl_one_at: f64,
/// The resting length of an adhesion. Same as `range.one_at * 0.8`.
pub adh_rest: f64,
/// This is distance at which the adhesion bond starts breaking/stops developing.
pub adh_break: f64,
/// Optional adhesion magnitude. If it is `None`, no adhesion
/// will be calculated.
pub adh_mag: Option<f64>,
/// Optional CAL magnitude. If it is `None`, simulation will
/// always execute CIL upon contact.
pub cal_mag: Option<f64>,
/// Magnitude of CIL that acts on Rho GTPase activation/
/// inactivation rates.
pub cil_mag: f64,
}
#[derive(Clone, Copy, Deserialize, Serialize, PartialEq, Debug)]
pub struct CoaParams {
//TODO: Expand upon LOS system.
/// Factor controlling to what extent line-of-sight blockage
/// should be penalized. See SI for further information.
pub los_penalty: f64,
/// The distance at which COA signal reaches half-maximum value.
pub halfmax_dist: f64,
/// Magnitude of COA that acts on Rac1 activation rates.
pub vertex_mag: f64,
//TODO: look up exactly what is being done for this (see where
// parameter is being generated for hint).
/// Factor controlling the shape of the exponential modelling
/// COA interaction (a function shaping parameter). It determines
/// the distance at which two points would sense COA at half-max
/// magnitude.
pub distrib_exp: f64,
/// If two vertices are within the square root of this distance , then COA cannot occur between
/// them.
pub too_close_dist_sq: f64,
}
#[derive(Clone, Copy, Deserialize, Serialize, PartialEq, Debug)]
pub struct ChemAttrParams {
/// Location of the chemoattractant center.
pub center: V2d,
/// Magnitude of chemoattractant a cell would sense if it were
/// right on top of the chemoattractant source.
pub center_mag: f64,
/// Assuming shallow chemoattractant gradient, which can be
/// modelled using a linear function with slope `slope`.
pub slope: f64,
}
#[derive(Clone, Deserialize, Serialize, PartialEq, Debug)]
pub struct BdryParams {
/// Shape of the boundary.
pub shape: Vec<V2d>,
/// Bounding box of the boundary.
pub bbox: BBox,
/// Should boundary bounding box be checked to see if cell is
/// within the boundary?
pub skip_bb_check: bool,
/// Magnitude of CIL-type interaction.
pub mag: f64,
}
#[derive(Clone, Deserialize, Serialize, PartialEq, Default, Debug)]
pub struct InteractionParams {
pub phys_contact: PhysicalContactParams,
pub coa: Option<CoaParams>,
pub chem_attr: Option<ChemAttrParams>,
pub bdry: Option<BdryParams>,
}
#[derive(Clone, Deserialize, Serialize, PartialEq, Default, Debug)]
pub struct W | {
/// Viscosity value used to calculate change in position of a
/// vertex due to calculated forces on it.
pub vertex_eta: f64,
pub interactions: InteractionParams,
}
impl RawWorldParameters {
pub fn refine(&self, bq: &CharacteristicQuantities) -> WorldParameters {
WorldParameters {
vertex_eta: bq.normalize(&self.vertex_eta),
interactions: self.interactions.refine(bq),
}
}
}
/// The "raw", unprocessed, parameters that are supplied by the user.
#[derive(Clone, Copy, Modify)]
pub struct RawParameters {
/// Cell diameter.
pub cell_diam: Length,
/// Fraction of max force achieved at `rgtp_act_at_max_f`.
pub halfmax_rgtp_max_f_frac: f64,
/// Stiffness of the membrane-cortex complex.
pub stiffness_cortex: Stress,
/// Typical lamellipod height: typical height of lamellipod (on the order of 100 nm).
pub lm_h: Length,
/// Halfmax Rho GTPase activity.
pub halfmax_rgtp_frac: f64,
/// Lamellipod stall stress: how much stress can lamellipod exert at most.
pub lm_ss: Stress,
/// Friction force opposing RhoA pulling.
pub rho_friction: f64,
/// Stiffness of cytoplasm.
pub stiffness_cyto: Force,
/// Diffusion rate of Rho GTPase on membrane.
pub diffusion_rgtp: Diffusion,
/// Initial distribution of Rac1.
pub init_rac: RgtpDistribution,
/// Initial distribution of RhoA.
pub init_rho: RgtpDistribution,
/// Baseline Rac1 activation rate.
pub kgtp_rac: Tinv,
/// Rac | orldParameters | identifier_name |
plugins.go | "] {
str += " " + name + "\n"
}
if len(pl["event_hooks"]) > 0 {
str += "\nEvent hook plugins:\n"
for _, name := range pl["event_hooks"] {
str += " hook." + name + "\n"
}
}
if len(pl["clustering"]) > 0 {
str += "\nClustering plugins:\n"
for _, name := range pl["clustering"] {
str += " " + name + "\n"
}
}
str += "\nOther plugins:\n"
for _, name := range pl["others"] {
str += " " + name + "\n"
}
return str
}
// ListPlugins makes a list of the registered plugins,
// keyed by plugin type.
func ListPlugins() map[string][]string {
p := make(map[string][]string)
// server type plugins
for name := range serverTypes |
// caddyfile loaders in registration order
for _, loader := range caddyfileLoaders {
p["caddyfile_loaders"] = append(p["caddyfile_loaders"], loader.name)
}
if defaultCaddyfileLoader.name != "" {
p["caddyfile_loaders"] = append(p["caddyfile_loaders"], defaultCaddyfileLoader.name)
}
// List the event hook plugins
eventHooks.Range(func(k, _ interface{}) bool {
p["event_hooks"] = append(p["event_hooks"], k.(string))
return true
})
// alphabetize the rest of the plugins
var others []string
for stype, stypePlugins := range plugins {
for name := range stypePlugins {
var s string
if stype != "" {
s = stype + "."
}
s += name
others = append(others, s)
}
}
sort.Strings(others)
for _, name := range others {
p["others"] = append(p["others"], name)
}
return p
}
// ValidDirectives returns the list of all directives that are
// recognized for the server type serverType. However, not all
// directives may be installed. This makes it possible to give
// more helpful error messages, like "did you mean ..." or
// "maybe you need to plug in ...".
func ValidDirectives(serverType string) []string {
stype, err := getServerType(serverType)
if err != nil {
return nil
}
return stype.Directives()
}
// ServerListener pairs a server to its listener and/or packetconn.
type ServerListener struct {
server Server
listener net.Listener
packet net.PacketConn
}
// LocalAddr returns the local network address of the packetconn. It returns
// nil when it is not set.
func (s ServerListener) LocalAddr() net.Addr {
if s.packet == nil {
return nil
}
return s.packet.LocalAddr()
}
// Addr returns the listener's network address. It returns nil when it is
// not set.
func (s ServerListener) Addr() net.Addr {
if s.listener == nil {
return nil
}
return s.listener.Addr()
}
// Context is a type which carries a server type through
// the load and setup phase; it maintains the state
// between loading the Caddyfile, then executing its
// directives, then making the servers for Caddy to
// manage. Typically, such state involves configuration
// structs, etc.
type Context interface {
// Called after the Caddyfile is parsed into server
// blocks but before the directives are executed,
// this method gives you an opportunity to inspect
// the server blocks and prepare for the execution
// of directives. Return the server blocks (which
// you may modify, if desired) and an error, if any.
// The first argument is the name or path to the
// configuration file (Caddyfile).
//
// This function can be a no-op and simply return its
// input if there is nothing to do here.
InspectServerBlocks(string, []caddyfile.ServerBlock) ([]caddyfile.ServerBlock, error)
// This is what Caddy calls to make server instances.
// By this time, all directives have been executed and,
// presumably, the context has enough state to produce
// server instances for Caddy to start.
MakeServers() ([]Server, error)
}
// RegisterServerType registers a server type srv by its
// name, typeName.
func RegisterServerType(typeName string, srv ServerType) {
if _, ok := serverTypes[typeName]; ok {
panic("server type already registered")
}
serverTypes[typeName] = srv
}
// ServerType contains information about a server type.
type ServerType struct {
// Function that returns the list of directives, in
// execution order, that are valid for this server
// type. Directives should be one word if possible
// and lower-cased.
Directives func() []string
// DefaultInput returns a default config input if none
// is otherwise loaded. This is optional, but highly
// recommended, otherwise a blank Caddyfile will be
// used.
DefaultInput func() Input
// The function that produces a new server type context.
// This will be called when a new Caddyfile is being
// loaded, parsed, and executed independently of any
// startup phases before this one. It's a way to keep
// each set of server instances separate and to reduce
// the amount of global state you need.
NewContext func(inst *Instance) Context
}
// Plugin is a type which holds information about a plugin.
type Plugin struct {
// ServerType is the type of server this plugin is for.
// Can be empty if not applicable, or if the plugin
// can associate with any server type.
ServerType string
// Action is the plugin's setup function, if associated
// with a directive in the Caddyfile.
Action SetupFunc
}
// RegisterPlugin plugs in plugin. All plugins should register
// themselves, even if they do not perform an action associated
// with a directive. It is important for the process to know
// which plugins are available.
//
// The plugin MUST have a name: lower case and one word.
// If this plugin has an action, it must be the name of
// the directive that invokes it. A name is always required
// and must be unique for the server type.
func RegisterPlugin(name string, plugin Plugin) {
if name == "" {
panic("plugin must have a name")
}
if _, ok := plugins[plugin.ServerType]; !ok {
plugins[plugin.ServerType] = make(map[string]Plugin)
}
if _, dup := plugins[plugin.ServerType][name]; dup {
panic("plugin named " + name + " already registered for server type " + plugin.ServerType)
}
plugins[plugin.ServerType][name] = plugin
}
// EventName represents the name of an event used with event hooks.
type EventName string
// Define names for the various events
const (
StartupEvent EventName = "startup"
ShutdownEvent = "shutdown"
CertRenewEvent = "certrenew"
InstanceStartupEvent = "instancestartup"
InstanceRestartEvent = "instancerestart"
)
// EventHook is a type which holds information about a startup hook plugin.
type EventHook func(eventType EventName, eventInfo interface{}) error
// RegisterEventHook plugs in hook. All the hooks should register themselves
// and they must have a name.
func RegisterEventHook(name string, hook EventHook) {
if name == "" {
panic("event hook must have a name")
}
_, dup := eventHooks.LoadOrStore(name, hook)
if dup {
panic("hook named " + name + " already registered")
}
}
// EmitEvent executes the different hooks passing the EventType as an
// argument. This is a blocking function. Hook developers should
// use 'go' keyword if they don't want to block Caddy.
func EmitEvent(event EventName, info interface{}) {
eventHooks.Range(func(k, v interface{}) bool {
err := v.(EventHook)(event, info)
if err != nil {
log.Printf("error on '%s' hook: %v", k.(string), err)
}
return true
})
}
// cloneEventHooks return a clone of the event hooks *sync.Map
func cloneEventHooks() *sync.Map {
c := &sync.Map{}
eventHooks.Range(func(k, v interface{}) bool {
c.Store(k, v)
return true
})
return c
}
// purgeEventHooks purges all event hooks from the map
func purgeEventHooks() {
eventHooks.Range(func(k, _ interface{}) bool {
eventHooks.Delete(k)
return true
})
}
// restoreEventHooks restores eventHooks with a provided *sync.Map
func restoreEventHooks(m *sync.Map) {
// Purge old event hooks
purgeEventHooks()
// Restore event hooks
m.Range(func(k, v interface{}) bool {
eventHooks.Store(k, v)
return true
})
}
// ParsingCallback is a function that is called after
// a directive's setup functions have been executed
// for all the server blocks.
type ParsingCallback func(Context) error
// RegisterParsingCallback registers callback to be called after
// executing the directive | {
p["server_types"] = append(p["server_types"], name)
} | conditional_block |
plugins.go | "\n"
}
if len(pl["event_hooks"]) > 0 {
str += "\nEvent hook plugins:\n"
for _, name := range pl["event_hooks"] {
str += " hook." + name + "\n"
}
}
if len(pl["clustering"]) > 0 {
str += "\nClustering plugins:\n"
for _, name := range pl["clustering"] {
str += " " + name + "\n"
}
}
str += "\nOther plugins:\n"
for _, name := range pl["others"] {
str += " " + name + "\n"
}
return str
}
// ListPlugins makes a list of the registered plugins,
// keyed by plugin type.
func ListPlugins() map[string][]string {
p := make(map[string][]string)
// server type plugins
for name := range serverTypes {
p["server_types"] = append(p["server_types"], name)
}
// caddyfile loaders in registration order
for _, loader := range caddyfileLoaders {
p["caddyfile_loaders"] = append(p["caddyfile_loaders"], loader.name)
}
if defaultCaddyfileLoader.name != "" {
p["caddyfile_loaders"] = append(p["caddyfile_loaders"], defaultCaddyfileLoader.name)
}
// List the event hook plugins
eventHooks.Range(func(k, _ interface{}) bool {
p["event_hooks"] = append(p["event_hooks"], k.(string))
return true
})
// alphabetize the rest of the plugins
var others []string
for stype, stypePlugins := range plugins {
for name := range stypePlugins {
var s string
if stype != "" {
s = stype + "."
}
s += name
others = append(others, s)
}
}
sort.Strings(others)
for _, name := range others {
p["others"] = append(p["others"], name)
}
return p
}
// ValidDirectives returns the list of all directives that are
// recognized for the server type serverType. However, not all
// directives may be installed. This makes it possible to give
// more helpful error messages, like "did you mean ..." or
// "maybe you need to plug in ...".
func ValidDirectives(serverType string) []string {
stype, err := getServerType(serverType)
if err != nil {
return nil
}
return stype.Directives()
}
// ServerListener pairs a server to its listener and/or packetconn.
type ServerListener struct {
server Server
listener net.Listener
packet net.PacketConn
}
// LocalAddr returns the local network address of the packetconn. It returns
// nil when it is not set.
func (s ServerListener) LocalAddr() net.Addr {
if s.packet == nil {
return nil
}
return s.packet.LocalAddr()
}
// Addr returns the listener's network address. It returns nil when it is
// not set.
func (s ServerListener) Addr() net.Addr {
if s.listener == nil {
return nil
}
return s.listener.Addr()
}
// Context is a type which carries a server type through
// the load and setup phase; it maintains the state
// between loading the Caddyfile, then executing its
// directives, then making the servers for Caddy to
// manage. Typically, such state involves configuration
// structs, etc.
type Context interface {
// Called after the Caddyfile is parsed into server
// blocks but before the directives are executed,
// this method gives you an opportunity to inspect
// the server blocks and prepare for the execution
// of directives. Return the server blocks (which
// you may modify, if desired) and an error, if any.
// The first argument is the name or path to the
// configuration file (Caddyfile).
//
// This function can be a no-op and simply return its
// input if there is nothing to do here.
InspectServerBlocks(string, []caddyfile.ServerBlock) ([]caddyfile.ServerBlock, error)
// This is what Caddy calls to make server instances.
// By this time, all directives have been executed and,
// presumably, the context has enough state to produce
// server instances for Caddy to start.
MakeServers() ([]Server, error)
}
// RegisterServerType registers a server type srv by its
// name, typeName.
func RegisterServerType(typeName string, srv ServerType) {
if _, ok := serverTypes[typeName]; ok {
panic("server type already registered")
}
serverTypes[typeName] = srv
}
// ServerType contains information about a server type.
type ServerType struct {
// Function that returns the list of directives, in
// execution order, that are valid for this server
// type. Directives should be one word if possible
// and lower-cased.
Directives func() []string
// DefaultInput returns a default config input if none
// is otherwise loaded. This is optional, but highly
// recommended, otherwise a blank Caddyfile will be
// used.
DefaultInput func() Input
// The function that produces a new server type context.
// This will be called when a new Caddyfile is being
// loaded, parsed, and executed independently of any
// startup phases before this one. It's a way to keep
// each set of server instances separate and to reduce
// the amount of global state you need.
NewContext func(inst *Instance) Context
}
// Plugin is a type which holds information about a plugin.
type Plugin struct {
// ServerType is the type of server this plugin is for.
// Can be empty if not applicable, or if the plugin
// can associate with any server type.
ServerType string
// Action is the plugin's setup function, if associated
// with a directive in the Caddyfile.
Action SetupFunc
}
// RegisterPlugin plugs in plugin. All plugins should register
// themselves, even if they do not perform an action associated
// with a directive. It is important for the process to know
// which plugins are available.
//
// The plugin MUST have a name: lower case and one word.
// If this plugin has an action, it must be the name of
// the directive that invokes it. A name is always required
// and must be unique for the server type.
func RegisterPlugin(name string, plugin Plugin) {
if name == "" {
panic("plugin must have a name")
}
if _, ok := plugins[plugin.ServerType]; !ok {
plugins[plugin.ServerType] = make(map[string]Plugin)
}
if _, dup := plugins[plugin.ServerType][name]; dup {
panic("plugin named " + name + " already registered for server type " + plugin.ServerType)
}
plugins[plugin.ServerType][name] = plugin
}
// EventName represents the name of an event used with event hooks.
type EventName string
// Define names for the various events
const (
StartupEvent EventName = "startup"
ShutdownEvent = "shutdown"
CertRenewEvent = "certrenew"
InstanceStartupEvent = "instancestartup"
InstanceRestartEvent = "instancerestart"
)
// EventHook is a type which holds information about a startup hook plugin.
type EventHook func(eventType EventName, eventInfo interface{}) error
// RegisterEventHook plugs in hook. All the hooks should register themselves
// and they must have a name.
func RegisterEventHook(name string, hook EventHook) {
if name == "" {
panic("event hook must have a name")
}
_, dup := eventHooks.LoadOrStore(name, hook)
if dup {
panic("hook named " + name + " already registered")
}
}
// EmitEvent executes the different hooks passing the EventType as an
// argument. This is a blocking function. Hook developers should
// use 'go' keyword if they don't want to block Caddy.
func EmitEvent(event EventName, info interface{}) {
eventHooks.Range(func(k, v interface{}) bool {
err := v.(EventHook)(event, info)
if err != nil {
log.Printf("error on '%s' hook: %v", k.(string), err)
}
return true
})
}
// cloneEventHooks return a clone of the event hooks *sync.Map
func cloneEventHooks() *sync.Map {
c := &sync.Map{}
eventHooks.Range(func(k, v interface{}) bool {
c.Store(k, v)
return true
})
return c
}
// purgeEventHooks purges all event hooks from the map
func purgeEventHooks() {
eventHooks.Range(func(k, _ interface{}) bool {
eventHooks.Delete(k)
return true
})
}
// restoreEventHooks restores eventHooks with a provided *sync.Map
func restoreEventHooks(m *sync.Map) {
// Purge old event hooks
purgeEventHooks()
// Restore event hooks
m.Range(func(k, v interface{}) bool {
eventHooks.Store(k, v)
return true
})
}
// ParsingCallback is a function that is called after
// a directive's setup functions have been executed
// for all the server blocks.
type ParsingCallback func(Context) error
// RegisterParsingCallback registers callback to be called after
// executing the directive afterDir for server type serverType.
func | RegisterParsingCallback | identifier_name |
|
plugins.go | "] {
str += " " + name + "\n"
}
if len(pl["event_hooks"]) > 0 {
str += "\nEvent hook plugins:\n"
for _, name := range pl["event_hooks"] {
str += " hook." + name + "\n"
}
}
if len(pl["clustering"]) > 0 {
str += "\nClustering plugins:\n"
for _, name := range pl["clustering"] {
str += " " + name + "\n"
}
}
str += "\nOther plugins:\n"
for _, name := range pl["others"] {
str += " " + name + "\n"
}
return str
}
// ListPlugins makes a list of the registered plugins,
// keyed by plugin type.
func ListPlugins() map[string][]string {
p := make(map[string][]string)
// server type plugins
for name := range serverTypes {
p["server_types"] = append(p["server_types"], name)
}
// caddyfile loaders in registration order
for _, loader := range caddyfileLoaders {
p["caddyfile_loaders"] = append(p["caddyfile_loaders"], loader.name)
}
if defaultCaddyfileLoader.name != "" {
p["caddyfile_loaders"] = append(p["caddyfile_loaders"], defaultCaddyfileLoader.name)
}
// List the event hook plugins
eventHooks.Range(func(k, _ interface{}) bool {
p["event_hooks"] = append(p["event_hooks"], k.(string))
return true
})
// alphabetize the rest of the plugins
var others []string
for stype, stypePlugins := range plugins {
for name := range stypePlugins {
var s string
if stype != "" {
s = stype + "."
}
s += name
others = append(others, s)
}
}
sort.Strings(others)
for _, name := range others {
p["others"] = append(p["others"], name)
}
return p
}
// ValidDirectives returns the list of all directives that are
// recognized for the server type serverType. However, not all
// directives may be installed. This makes it possible to give
// more helpful error messages, like "did you mean ..." or
// "maybe you need to plug in ...".
func ValidDirectives(serverType string) []string {
stype, err := getServerType(serverType)
if err != nil {
return nil
}
return stype.Directives()
}
// ServerListener pairs a server to its listener and/or packetconn.
type ServerListener struct {
server Server
listener net.Listener
packet net.PacketConn
}
// LocalAddr returns the local network address of the packetconn. It returns
// nil when it is not set.
func (s ServerListener) LocalAddr() net.Addr {
if s.packet == nil {
return nil
}
return s.packet.LocalAddr()
}
// Addr returns the listener's network address. It returns nil when it is
// not set.
func (s ServerListener) Addr() net.Addr {
if s.listener == nil {
return nil
}
return s.listener.Addr()
}
// Context is a type which carries a server type through
// the load and setup phase; it maintains the state
// between loading the Caddyfile, then executing its
// directives, then making the servers for Caddy to
// manage. Typically, such state involves configuration
// structs, etc.
type Context interface {
// Called after the Caddyfile is parsed into server
// blocks but before the directives are executed,
// this method gives you an opportunity to inspect
// the server blocks and prepare for the execution
// of directives. Return the server blocks (which
// you may modify, if desired) and an error, if any.
// The first argument is the name or path to the
// configuration file (Caddyfile).
//
// This function can be a no-op and simply return its
// input if there is nothing to do here.
InspectServerBlocks(string, []caddyfile.ServerBlock) ([]caddyfile.ServerBlock, error)
// This is what Caddy calls to make server instances.
// By this time, all directives have been executed and,
// presumably, the context has enough state to produce
// server instances for Caddy to start.
MakeServers() ([]Server, error)
}
// RegisterServerType registers a server type srv by its
// name, typeName.
func RegisterServerType(typeName string, srv ServerType) {
if _, ok := serverTypes[typeName]; ok {
panic("server type already registered")
}
serverTypes[typeName] = srv
}
// ServerType contains information about a server type.
type ServerType struct {
// Function that returns the list of directives, in
// execution order, that are valid for this server
// type. Directives should be one word if possible
// and lower-cased.
Directives func() []string
// DefaultInput returns a default config input if none
// is otherwise loaded. This is optional, but highly
// recommended, otherwise a blank Caddyfile will be
// used.
DefaultInput func() Input
// The function that produces a new server type context.
// This will be called when a new Caddyfile is being
// loaded, parsed, and executed independently of any
// startup phases before this one. It's a way to keep
// each set of server instances separate and to reduce
// the amount of global state you need.
NewContext func(inst *Instance) Context
}
// Plugin is a type which holds information about a plugin.
type Plugin struct {
// ServerType is the type of server this plugin is for.
// Can be empty if not applicable, or if the plugin
// can associate with any server type.
ServerType string
// Action is the plugin's setup function, if associated
// with a directive in the Caddyfile.
Action SetupFunc
}
// RegisterPlugin plugs in plugin. All plugins should register
// themselves, even if they do not perform an action associated
// with a directive. It is important for the process to know
// which plugins are available.
//
// The plugin MUST have a name: lower case and one word.
// If this plugin has an action, it must be the name of
// the directive that invokes it. A name is always required
// and must be unique for the server type.
func RegisterPlugin(name string, plugin Plugin) {
if name == "" {
panic("plugin must have a name")
}
if _, ok := plugins[plugin.ServerType]; !ok {
plugins[plugin.ServerType] = make(map[string]Plugin)
}
if _, dup := plugins[plugin.ServerType][name]; dup {
panic("plugin named " + name + " already registered for server type " + plugin.ServerType)
}
plugins[plugin.ServerType][name] = plugin
}
// EventName represents the name of an event used with event hooks.
type EventName string
// Define names for the various events
const (
StartupEvent EventName = "startup"
ShutdownEvent = "shutdown"
CertRenewEvent = "certrenew"
InstanceStartupEvent = "instancestartup"
InstanceRestartEvent = "instancerestart"
)
// EventHook is a type which holds information about a startup hook plugin.
type EventHook func(eventType EventName, eventInfo interface{}) error
// RegisterEventHook plugs in hook. All the hooks should register themselves
// and they must have a name.
func RegisterEventHook(name string, hook EventHook) {
if name == "" {
panic("event hook must have a name")
}
_, dup := eventHooks.LoadOrStore(name, hook)
if dup {
panic("hook named " + name + " already registered")
}
}
// EmitEvent executes the different hooks passing the EventType as an
// argument. This is a blocking function. Hook developers should
// use 'go' keyword if they don't want to block Caddy.
func EmitEvent(event EventName, info interface{}) {
eventHooks.Range(func(k, v interface{}) bool {
err := v.(EventHook)(event, info)
if err != nil {
log.Printf("error on '%s' hook: %v", k.(string), err)
}
return true
})
}
// cloneEventHooks return a clone of the event hooks *sync.Map
func cloneEventHooks() *sync.Map {
c := &sync.Map{}
eventHooks.Range(func(k, v interface{}) bool {
c.Store(k, v)
return true
})
return c
}
// purgeEventHooks purges all event hooks from the map
func purgeEventHooks() |
// restoreEventHooks restores eventHooks with a provided *sync.Map
func restoreEventHooks(m *sync.Map) {
// Purge old event hooks
purgeEventHooks()
// Restore event hooks
m.Range(func(k, v interface{}) bool {
eventHooks.Store(k, v)
return true
})
}
// ParsingCallback is a function that is called after
// a directive's setup functions have been executed
// for all the server blocks.
type ParsingCallback func(Context) error
// RegisterParsingCallback registers callback to be called after
// executing the directive | {
eventHooks.Range(func(k, _ interface{}) bool {
eventHooks.Delete(k)
return true
})
} | identifier_body |
plugins.go | // limitations under the License.
package caddy
import (
"fmt"
"log"
"net"
"sort"
"sync"
"github.com/coredns/caddy/caddyfile"
)
// These are all the registered plugins.
var (
// serverTypes is a map of registered server types.
serverTypes = make(map[string]ServerType)
// plugins is a map of server type to map of plugin name to
// Plugin. These are the "general" plugins that may or may
// not be associated with a specific server type. If it's
// applicable to multiple server types or the server type is
// irrelevant, the key is empty string (""). But all plugins
// must have a name.
plugins = make(map[string]map[string]Plugin)
// eventHooks is a map of hook name to Hook. All hooks plugins
// must have a name.
eventHooks = &sync.Map{}
// parsingCallbacks maps server type to map of directive
// to list of callback functions. These aren't really
// plugins on their own, but are often registered from
// plugins.
parsingCallbacks = make(map[string]map[string][]ParsingCallback)
// caddyfileLoaders is the list of all Caddyfile loaders
// in registration order.
caddyfileLoaders []caddyfileLoader
)
// DescribePlugins returns a string describing the registered plugins.
func DescribePlugins() string {
pl := ListPlugins()
str := "Server types:\n"
for _, name := range pl["server_types"] {
str += " " + name + "\n"
}
str += "\nCaddyfile loaders:\n"
for _, name := range pl["caddyfile_loaders"] {
str += " " + name + "\n"
}
if len(pl["event_hooks"]) > 0 {
str += "\nEvent hook plugins:\n"
for _, name := range pl["event_hooks"] {
str += " hook." + name + "\n"
}
}
if len(pl["clustering"]) > 0 {
str += "\nClustering plugins:\n"
for _, name := range pl["clustering"] {
str += " " + name + "\n"
}
}
str += "\nOther plugins:\n"
for _, name := range pl["others"] {
str += " " + name + "\n"
}
return str
}
// ListPlugins makes a list of the registered plugins,
// keyed by plugin type.
func ListPlugins() map[string][]string {
p := make(map[string][]string)
// server type plugins
for name := range serverTypes {
p["server_types"] = append(p["server_types"], name)
}
// caddyfile loaders in registration order
for _, loader := range caddyfileLoaders {
p["caddyfile_loaders"] = append(p["caddyfile_loaders"], loader.name)
}
if defaultCaddyfileLoader.name != "" {
p["caddyfile_loaders"] = append(p["caddyfile_loaders"], defaultCaddyfileLoader.name)
}
// List the event hook plugins
eventHooks.Range(func(k, _ interface{}) bool {
p["event_hooks"] = append(p["event_hooks"], k.(string))
return true
})
// alphabetize the rest of the plugins
var others []string
for stype, stypePlugins := range plugins {
for name := range stypePlugins {
var s string
if stype != "" {
s = stype + "."
}
s += name
others = append(others, s)
}
}
sort.Strings(others)
for _, name := range others {
p["others"] = append(p["others"], name)
}
return p
}
// ValidDirectives returns the list of all directives that are
// recognized for the server type serverType. However, not all
// directives may be installed. This makes it possible to give
// more helpful error messages, like "did you mean ..." or
// "maybe you need to plug in ...".
func ValidDirectives(serverType string) []string {
stype, err := getServerType(serverType)
if err != nil {
return nil
}
return stype.Directives()
}
// ServerListener pairs a server to its listener and/or packetconn.
type ServerListener struct {
server Server
listener net.Listener
packet net.PacketConn
}
// LocalAddr returns the local network address of the packetconn. It returns
// nil when it is not set.
func (s ServerListener) LocalAddr() net.Addr {
if s.packet == nil {
return nil
}
return s.packet.LocalAddr()
}
// Addr returns the listener's network address. It returns nil when it is
// not set.
func (s ServerListener) Addr() net.Addr {
if s.listener == nil {
return nil
}
return s.listener.Addr()
}
// Context is a type which carries a server type through
// the load and setup phase; it maintains the state
// between loading the Caddyfile, then executing its
// directives, then making the servers for Caddy to
// manage. Typically, such state involves configuration
// structs, etc.
type Context interface {
// Called after the Caddyfile is parsed into server
// blocks but before the directives are executed,
// this method gives you an opportunity to inspect
// the server blocks and prepare for the execution
// of directives. Return the server blocks (which
// you may modify, if desired) and an error, if any.
// The first argument is the name or path to the
// configuration file (Caddyfile).
//
// This function can be a no-op and simply return its
// input if there is nothing to do here.
InspectServerBlocks(string, []caddyfile.ServerBlock) ([]caddyfile.ServerBlock, error)
// This is what Caddy calls to make server instances.
// By this time, all directives have been executed and,
// presumably, the context has enough state to produce
// server instances for Caddy to start.
MakeServers() ([]Server, error)
}
// RegisterServerType registers a server type srv by its
// name, typeName.
func RegisterServerType(typeName string, srv ServerType) {
if _, ok := serverTypes[typeName]; ok {
panic("server type already registered")
}
serverTypes[typeName] = srv
}
// ServerType contains information about a server type.
type ServerType struct {
// Function that returns the list of directives, in
// execution order, that are valid for this server
// type. Directives should be one word if possible
// and lower-cased.
Directives func() []string
// DefaultInput returns a default config input if none
// is otherwise loaded. This is optional, but highly
// recommended, otherwise a blank Caddyfile will be
// used.
DefaultInput func() Input
// The function that produces a new server type context.
// This will be called when a new Caddyfile is being
// loaded, parsed, and executed independently of any
// startup phases before this one. It's a way to keep
// each set of server instances separate and to reduce
// the amount of global state you need.
NewContext func(inst *Instance) Context
}
// Plugin is a type which holds information about a plugin.
type Plugin struct {
// ServerType is the type of server this plugin is for.
// Can be empty if not applicable, or if the plugin
// can associate with any server type.
ServerType string
// Action is the plugin's setup function, if associated
// with a directive in the Caddyfile.
Action SetupFunc
}
// RegisterPlugin plugs in plugin. All plugins should register
// themselves, even if they do not perform an action associated
// with a directive. It is important for the process to know
// which plugins are available.
//
// The plugin MUST have a name: lower case and one word.
// If this plugin has an action, it must be the name of
// the directive that invokes it. A name is always required
// and must be unique for the server type.
func RegisterPlugin(name string, plugin Plugin) {
if name == "" {
panic("plugin must have a name")
}
if _, ok := plugins[plugin.ServerType]; !ok {
plugins[plugin.ServerType] = make(map[string]Plugin)
}
if _, dup := plugins[plugin.ServerType][name]; dup {
panic("plugin named " + name + " already registered for server type " + plugin.ServerType)
}
plugins[plugin.ServerType][name] = plugin
}
// EventName represents the name of an event used with event hooks.
type EventName string
// Define names for the various events
const (
StartupEvent EventName = "startup"
ShutdownEvent = "shutdown"
CertRenewEvent = "certrenew"
InstanceStartupEvent = "instancestartup"
InstanceRestartEvent = "instancerestart"
)
// EventHook is a type which holds information about a startup hook plugin.
type EventHook func(eventType EventName, eventInfo interface{}) error
// RegisterEventHook plugs in hook. All the hooks should register themselves
// and they must have a name.
func RegisterEventHook(name string, hook EventHook) {
if name == "" {
| // See the License for the specific language governing permissions and | random_line_split |
|
2#allAnglesLengths.py |
def indexFind(index_of_2,i1,j1,k1):
if index_of_2==i1:
indexOf0=j1
indexOf1=k1
elif index_of_2==j1:
indexOf0=i1
indexOf1=k1
elif index_of_2==k1:
indexOf0=i1
indexOf1=j1
return indexOf0, indexOf1
def processFiles(fileName):
"""Calculates all angles, all lengths, representative angle and maxDist after performing rule-based labelling.
Arguments:
fileName: The protein file in PDB/ENT format.
Returns:
all_angleList: A Counter having all angles formed by their medians on opposite edges of the
non-collinear triangle formed by the three amino acids at i, j and k
and their frequencies of occurences in this protein file rounded to next significant digit.
rep_angleList: A Counter having representative angle and its frequency
all_lengthsList: A counter having lengths of all edges of the non-collinear triangle formed.
maxDist: Maximum length among all lengths calculated above.
"""
print fileName
count_t1 = 0
inFile=open(fileName,'r')
all_angleList = Counter()
rep_angleList = Counter()
all_lengthsList = Counter()
maxDist_List = Counter()
global xCord, yCord, zCord
aminoAcidName={}
xCord={}
yCord={}
zCord={}
seq_number={}
counter=0
for i in inFile:
if (i[0:6].rstrip()=="NUMMDL"):
numOfModels=i[10:14].rstrip()
if ((i[0:6].rstrip()=="ENDMDL")or (i[0:6].rstrip()=='TER')):
break
if (i[0:6].rstrip()=="MODEL" and int(i[10:14].rstrip())>1):
break
if(i[0:4].rstrip())=="ATOM" and(i[13:15].rstrip())=="CA" and(i[16]=='A'or i[16]==' ')and i[17:20]!= "UNK" :
aminoAcidName[counter]=int(aminoAcidLabel[i[17:20]])
xCord[counter]=(float(i[30:38]))
yCord[counter]=(float(i[38:46]))
zCord[counter]=(float(i[46:54]))
seq_number[counter]=str(i[22:27])
counter+=1
protLen=len(yCord)
initialLabel=[]
sortedLabel=[]
sortedIndex=[]
outDist={}
for m in range(0,3):
initialLabel.append(0)
sortedLabel.append(0)
sortedIndex.append(0)
for i in range(0,protLen-2):
for j in range(i+1,protLen-1):
for k in range(j+1, protLen):
global i1,j1,k1
i1=i
j1=j
k1=k
keepLabelIndex={}
keepLabelIndex[aminoAcidName[i]]=i
keepLabelIndex[aminoAcidName[j]]=j
keepLabelIndex[aminoAcidName[k]]=k
initialLabel[0]=aminoAcidName[i]
initialLabel[1]=aminoAcidName[j]
initialLabel[2]=aminoAcidName[k]
sortedLabel=list(initialLabel)
sortedLabel.sort(reverse=True)
#Perform Rule- based labelling
if (sortedLabel[0]==sortedLabel[1])and(sortedLabel[1]==sortedLabel[2]):
dist1_2Temp=calcDist(i,j)
dist1_3Temp=calcDist(i,k)
dist2_3Temp=calcDist(j,k)
if dist1_2Temp>=(max(dist1_2Temp,dist1_3Temp,dist2_3Temp)):
indexOf0=i
indexOf1=j
indexOf2=k
elif dist1_3Temp>=(max(dist1_2Temp,dist1_3Temp,dist2_3Temp)):
indexOf0=i
indexOf1=k
indexOf2=j
else:
indexOf0=j
indexOf1=k
indexOf2=i
elif(aminoAcidName[i]!=aminoAcidName[j])and(aminoAcidName[i]!=aminoAcidName[k]) and(aminoAcidName[j]!=aminoAcidName[k]):
for index_ in range(0,3):
sortedIndex[index_]=keepLabelIndex[sortedLabel[index_]]
indexOf0=sortedIndex[0]
indexOf1=sortedIndex[1]
indexOf2=sortedIndex[2]
elif(sortedLabel[0]==sortedLabel[1])and(sortedLabel[1]!=sortedLabel[2]):
indexOf2=keepLabelIndex[sortedLabel[2]]
indices=indexFind(indexOf2,i,j,k)
a=indexOf2
b=indices[0]
c=indices[1]
dist1_3Temp=calcDist(b,a)
dist2_3Temp=calcDist(c,a)
if dist1_3Temp>=dist2_3Temp:
indexOf0=indices[0]
indexOf1=indices[1]
else:
indexOf0=indices[1]
indexOf1=indices[0]
elif(sortedLabel[0]!=sortedLabel[1])and(sortedLabel[1]==sortedLabel[2]):
indexOf0=keepLabelIndex[sortedLabel[0]]
indices=indexFind(indexOf0,i,j,k)
if calcDist(indexOf0,indices[0])>= calcDist(indexOf0,indices[1]):
indexOf1=indices[0]
indexOf2=indices[1]
else:
indexOf2=indices[0]
indexOf1=indices[1]
dist01=calcDist(indexOf0,indexOf1)
s2=dist01/2
dist02=calcDist(indexOf0,indexOf2)
s1=dist02
dist12=dist01
dist03=calcDist(indexOf1,indexOf2)
# All lengths calculation
all_lengthsList[round(dist01,round_off_to)] += 1
all_lengthsList[round(dist02,round_off_to)] += 1
all_lengthsList[round(dist03,round_off_to)] += 1
maxDist_List[round(max(dist01,dist02,dist03),round_off_to)] +=1
s3=(((xCord[indexOf0]+xCord[indexOf1])/2-xCord[indexOf2])**2
+((yCord[indexOf0]+yCord[indexOf1])/2-yCord[indexOf2])**2
+((zCord[indexOf0]+zCord[indexOf1])/2-zCord[indexOf2])**2)**0.5
Theta1=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14
if Theta1<=90:
all_angleList[round(Theta1,round_off_to)] +=1
rep_angleList[round(Theta1,round_off_to)] +=1
else:
all_angleList[round(abs(180-Theta1),round_off_to)] +=1
rep_angleList[round(abs(180-Theta1),round_off_to)] +=1
#if Theta1>90:
# Theta1=abs(180-Theta1)
#print 'Second Theta1, ',Theta1
#Theta 2
dist02=calcDist(indexOf1,indexOf0)
s1=dist02
dist01=calcDist(indexOf1,indexOf2)
s2=dist01/2
s3=(((xCord[indexOf1]+xCord[indexOf2])/2-xCord[indexOf0])**2
+((yCord[indexOf1]+yCord[indexOf2])/2-yCord[indexOf0])**2
+((zCord[indexOf1]+zCord[indexOf2])/2-zCord[indexOf0])**2)**0.5
Theta2=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14
#if Theta2 > 90:
# Theta2 = abs(180-Theta2)
if Theta2<=90:
all_angleList[round(Theta2,round_off_to)] +=1
else:
all_angleList[ | """Calculate Distance between two points in 3D space."""
x1=xCord[indexLabel1]
x2=xCord[indexLabel2]
y1=yCord[indexLabel1]
y2=yCord[indexLabel2]
z1=zCord[indexLabel1]
z2=zCord[indexLabel2]
distance=(((x1-x2)**2+(y2-y1)**2+(z2-z1)**2)**0.5)
return distance | identifier_body |
|
2#allAnglesLengths.py | =i1
indexOf1=j1
return indexOf0, indexOf1
def processFiles(fileName):
"""Calculates all angles, all lengths, representative angle and maxDist after performing rule-based labelling.
Arguments:
fileName: The protein file in PDB/ENT format.
Returns:
all_angleList: A Counter having all angles formed by their medians on opposite edges of the
non-collinear triangle formed by the three amino acids at i, j and k
and their frequencies of occurences in this protein file rounded to next significant digit.
rep_angleList: A Counter having representative angle and its frequency
all_lengthsList: A counter having lengths of all edges of the non-collinear triangle formed.
maxDist: Maximum length among all lengths calculated above.
"""
print fileName
count_t1 = 0
inFile=open(fileName,'r')
all_angleList = Counter()
rep_angleList = Counter()
all_lengthsList = Counter()
maxDist_List = Counter()
global xCord, yCord, zCord
aminoAcidName={}
xCord={}
yCord={}
zCord={}
seq_number={}
counter=0
for i in inFile:
if (i[0:6].rstrip()=="NUMMDL"):
numOfModels=i[10:14].rstrip()
if ((i[0:6].rstrip()=="ENDMDL")or (i[0:6].rstrip()=='TER')):
break
if (i[0:6].rstrip()=="MODEL" and int(i[10:14].rstrip())>1):
break
if(i[0:4].rstrip())=="ATOM" and(i[13:15].rstrip())=="CA" and(i[16]=='A'or i[16]==' ')and i[17:20]!= "UNK" :
aminoAcidName[counter]=int(aminoAcidLabel[i[17:20]])
xCord[counter]=(float(i[30:38]))
yCord[counter]=(float(i[38:46]))
zCord[counter]=(float(i[46:54]))
seq_number[counter]=str(i[22:27])
counter+=1
protLen=len(yCord)
initialLabel=[]
sortedLabel=[]
sortedIndex=[]
outDist={}
for m in range(0,3):
initialLabel.append(0)
sortedLabel.append(0)
sortedIndex.append(0)
for i in range(0,protLen-2):
for j in range(i+1,protLen-1):
for k in range(j+1, protLen):
global i1,j1,k1
i1=i
j1=j
k1=k
keepLabelIndex={}
keepLabelIndex[aminoAcidName[i]]=i
keepLabelIndex[aminoAcidName[j]]=j
keepLabelIndex[aminoAcidName[k]]=k
initialLabel[0]=aminoAcidName[i]
initialLabel[1]=aminoAcidName[j]
initialLabel[2]=aminoAcidName[k]
sortedLabel=list(initialLabel)
sortedLabel.sort(reverse=True)
#Perform Rule- based labelling
if (sortedLabel[0]==sortedLabel[1])and(sortedLabel[1]==sortedLabel[2]):
dist1_2Temp=calcDist(i,j)
dist1_3Temp=calcDist(i,k)
dist2_3Temp=calcDist(j,k)
if dist1_2Temp>=(max(dist1_2Temp,dist1_3Temp,dist2_3Temp)):
indexOf0=i
indexOf1=j
indexOf2=k
elif dist1_3Temp>=(max(dist1_2Temp,dist1_3Temp,dist2_3Temp)):
indexOf0=i
indexOf1=k
indexOf2=j
else:
indexOf0=j
indexOf1=k
indexOf2=i
elif(aminoAcidName[i]!=aminoAcidName[j])and(aminoAcidName[i]!=aminoAcidName[k]) and(aminoAcidName[j]!=aminoAcidName[k]):
for index_ in range(0,3):
sortedIndex[index_]=keepLabelIndex[sortedLabel[index_]]
indexOf0=sortedIndex[0]
indexOf1=sortedIndex[1]
indexOf2=sortedIndex[2]
elif(sortedLabel[0]==sortedLabel[1])and(sortedLabel[1]!=sortedLabel[2]):
indexOf2=keepLabelIndex[sortedLabel[2]]
indices=indexFind(indexOf2,i,j,k)
a=indexOf2
b=indices[0]
c=indices[1]
dist1_3Temp=calcDist(b,a)
dist2_3Temp=calcDist(c,a)
if dist1_3Temp>=dist2_3Temp:
indexOf0=indices[0]
indexOf1=indices[1]
else:
indexOf0=indices[1]
indexOf1=indices[0]
elif(sortedLabel[0]!=sortedLabel[1])and(sortedLabel[1]==sortedLabel[2]):
indexOf0=keepLabelIndex[sortedLabel[0]]
indices=indexFind(indexOf0,i,j,k)
if calcDist(indexOf0,indices[0])>= calcDist(indexOf0,indices[1]):
indexOf1=indices[0]
indexOf2=indices[1]
else:
indexOf2=indices[0]
indexOf1=indices[1]
dist01=calcDist(indexOf0,indexOf1)
s2=dist01/2
dist02=calcDist(indexOf0,indexOf2)
s1=dist02
dist12=dist01
dist03=calcDist(indexOf1,indexOf2)
# All lengths calculation
all_lengthsList[round(dist01,round_off_to)] += 1
all_lengthsList[round(dist02,round_off_to)] += 1
all_lengthsList[round(dist03,round_off_to)] += 1
maxDist_List[round(max(dist01,dist02,dist03),round_off_to)] +=1
s3=(((xCord[indexOf0]+xCord[indexOf1])/2-xCord[indexOf2])**2
+((yCord[indexOf0]+yCord[indexOf1])/2-yCord[indexOf2])**2
+((zCord[indexOf0]+zCord[indexOf1])/2-zCord[indexOf2])**2)**0.5
Theta1=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14
if Theta1<=90:
all_angleList[round(Theta1,round_off_to)] +=1
rep_angleList[round(Theta1,round_off_to)] +=1
else:
all_angleList[round(abs(180-Theta1),round_off_to)] +=1
rep_angleList[round(abs(180-Theta1),round_off_to)] +=1
#if Theta1>90:
# Theta1=abs(180-Theta1)
#print 'Second Theta1, ',Theta1
#Theta 2
dist02=calcDist(indexOf1,indexOf0)
s1=dist02
dist01=calcDist(indexOf1,indexOf2)
s2=dist01/2
s3=(((xCord[indexOf1]+xCord[indexOf2])/2-xCord[indexOf0])**2
+((yCord[indexOf1]+yCord[indexOf2])/2-yCord[indexOf0])**2
+((zCord[indexOf1]+zCord[indexOf2])/2-zCord[indexOf0])**2)**0.5 | Theta2=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14
#if Theta2 > 90:
# Theta2 = abs(180-Theta2)
if Theta2<=90:
all_angleList[round(Theta2,round_off_to)] +=1
else:
all_angleList[round(abs(180-Theta2),round_off_to)] +=1
#Theta 3
dist02=calcDist(indexOf2,indexOf1)
s1=dist02
dist01=calcDist(indexOf2,indexOf0)
s2=dist01/2
s3=(((xCord[indexOf2]+xCord[indexOf0])/2-xCord[indexOf1])**2+
((yCord[indexOf2]+yCord[indexOf0])/2-yCord[indexOf1])**2+
((zCord[indexOf2]+zCord[indexOf0])/2-zCord[indexOf1])**2)**0.5
Theta3=180*(math.acos((s1**2-s2**2-s3**2)/( | random_line_split |
|
2#allAnglesLengths.py | (indexLabel1,indexLabel2):
"""Calculate Distance between two points in 3D space."""
x1=xCord[indexLabel1]
x2=xCord[indexLabel2]
y1=yCord[indexLabel1]
y2=yCord[indexLabel2]
z1=zCord[indexLabel1]
z2=zCord[indexLabel2]
distance=(((x1-x2)**2+(y2-y1)**2+(z2-z1)**2)**0.5)
return distance
def indexFind(index_of_2,i1,j1,k1):
if index_of_2==i1:
indexOf0=j1
indexOf1=k1
elif index_of_2==j1:
indexOf0=i1
indexOf1=k1
elif index_of_2==k1:
indexOf0=i1
indexOf1=j1
return indexOf0, indexOf1
def processFiles(fileName):
"""Calculates all angles, all lengths, representative angle and maxDist after performing rule-based labelling.
Arguments:
fileName: The protein file in PDB/ENT format.
Returns:
all_angleList: A Counter having all angles formed by their medians on opposite edges of the
non-collinear triangle formed by the three amino acids at i, j and k
and their frequencies of occurences in this protein file rounded to next significant digit.
rep_angleList: A Counter having representative angle and its frequency
all_lengthsList: A counter having lengths of all edges of the non-collinear triangle formed.
maxDist: Maximum length among all lengths calculated above.
"""
print fileName
count_t1 = 0
inFile=open(fileName,'r')
all_angleList = Counter()
rep_angleList = Counter()
all_lengthsList = Counter()
maxDist_List = Counter()
global xCord, yCord, zCord
aminoAcidName={}
xCord={}
yCord={}
zCord={}
seq_number={}
counter=0
for i in inFile:
if (i[0:6].rstrip()=="NUMMDL"):
numOfModels=i[10:14].rstrip()
if ((i[0:6].rstrip()=="ENDMDL")or (i[0:6].rstrip()=='TER')):
break
if (i[0:6].rstrip()=="MODEL" and int(i[10:14].rstrip())>1):
break
if(i[0:4].rstrip())=="ATOM" and(i[13:15].rstrip())=="CA" and(i[16]=='A'or i[16]==' ')and i[17:20]!= "UNK" :
aminoAcidName[counter]=int(aminoAcidLabel[i[17:20]])
xCord[counter]=(float(i[30:38]))
yCord[counter]=(float(i[38:46]))
zCord[counter]=(float(i[46:54]))
seq_number[counter]=str(i[22:27])
counter+=1
protLen=len(yCord)
initialLabel=[]
sortedLabel=[]
sortedIndex=[]
outDist={}
for m in range(0,3):
initialLabel.append(0)
sortedLabel.append(0)
sortedIndex.append(0)
for i in range(0,protLen-2):
for j in range(i+1,protLen-1):
for k in range(j+1, protLen):
global i1,j1,k1
i1=i
j1=j
k1=k
keepLabelIndex={}
keepLabelIndex[aminoAcidName[i]]=i
keepLabelIndex[aminoAcidName[j]]=j
keepLabelIndex[aminoAcidName[k]]=k
initialLabel[0]=aminoAcidName[i]
initialLabel[1]=aminoAcidName[j]
initialLabel[2]=aminoAcidName[k]
sortedLabel=list(initialLabel)
sortedLabel.sort(reverse=True)
#Perform Rule- based labelling
if (sortedLabel[0]==sortedLabel[1])and(sortedLabel[1]==sortedLabel[2]):
dist1_2Temp=calcDist(i,j)
dist1_3Temp=calcDist(i,k)
dist2_3Temp=calcDist(j,k)
if dist1_2Temp>=(max(dist1_2Temp,dist1_3Temp,dist2_3Temp)):
indexOf0=i
indexOf1=j
indexOf2=k
elif dist1_3Temp>=(max(dist1_2Temp,dist1_3Temp,dist2_3Temp)):
indexOf0=i
indexOf1=k
indexOf2=j
else:
indexOf0=j
indexOf1=k
indexOf2=i
elif(aminoAcidName[i]!=aminoAcidName[j])and(aminoAcidName[i]!=aminoAcidName[k]) and(aminoAcidName[j]!=aminoAcidName[k]):
for index_ in range(0,3):
sortedIndex[index_]=keepLabelIndex[sortedLabel[index_]]
indexOf0=sortedIndex[0]
indexOf1=sortedIndex[1]
indexOf2=sortedIndex[2]
elif(sortedLabel[0]==sortedLabel[1])and(sortedLabel[1]!=sortedLabel[2]):
indexOf2=keepLabelIndex[sortedLabel[2]]
indices=indexFind(indexOf2,i,j,k)
a=indexOf2
b=indices[0]
c=indices[1]
dist1_3Temp=calcDist(b,a)
dist2_3Temp=calcDist(c,a)
if dist1_3Temp>=dist2_3Temp:
indexOf0=indices[0]
indexOf1=indices[1]
else:
indexOf0=indices[1]
indexOf1=indices[0]
elif(sortedLabel[0]!=sortedLabel[1])and(sortedLabel[1]==sortedLabel[2]):
indexOf0=keepLabelIndex[sortedLabel[0]]
indices=indexFind(indexOf0,i,j,k)
if calcDist(indexOf0,indices[0])>= calcDist(indexOf0,indices[1]):
indexOf1=indices[0]
indexOf2=indices[1]
else:
indexOf2=indices[0]
indexOf1=indices[1]
dist01=calcDist(indexOf0,indexOf1)
s2=dist01/2
dist02=calcDist(indexOf0,indexOf2)
s1=dist02
dist12=dist01
dist03=calcDist(indexOf1,indexOf2)
# All lengths calculation
all_lengthsList[round(dist01,round_off_to)] += 1
all_lengthsList[round(dist02,round_off_to)] += 1
all_lengthsList[round(dist03,round_off_to)] += 1
maxDist_List[round(max(dist01,dist02,dist03),round_off_to)] +=1
s3=(((xCord[indexOf0]+xCord[indexOf1])/2-xCord[indexOf2])**2
+((yCord[indexOf0]+yCord[indexOf1])/2-yCord[indexOf2])**2
+((zCord[indexOf0]+zCord[indexOf1])/2-zCord[indexOf2])**2)**0.5
Theta1=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14
if Theta1<=90:
all_angleList[round(Theta1,round_off_to)] +=1
rep_angleList[round(Theta1,round_off_to)] +=1
else:
all_angleList[round(abs(180-Theta1),round_off_to)] +=1
rep_angleList[round(abs(180-Theta1),round_off_to)] +=1
#if Theta1>90:
# Theta1=abs(180-Theta1)
#print 'Second Theta1, ',Theta1
#Theta 2
dist02=calcDist(indexOf1,indexOf0)
s1=dist02
dist01=calcDist(indexOf1,indexOf2)
s2=dist01/2
s3=(((xCord[indexOf1]+xCord[indexOf2])/2-xCord[indexOf0])**2
+((yCord[indexOf1]+yCord[indexOf2])/2-yCord[indexOf0])**2
+((zCord[indexOf1]+zCord[indexOf2])/2-zCord[indexOf0])**2)**0.5
Theta2=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14
#if Theta2 > 90:
# Theta2 = abs(180-Theta2)
if Theta2<=90:
all_angleList[round(Theta2,round_off_to)] += | calcDist | identifier_name |
|
2#allAnglesLengths.py | 1
indexOf1=j1
return indexOf0, indexOf1
def processFiles(fileName):
"""Calculates all angles, all lengths, representative angle and maxDist after performing rule-based labelling.
Arguments:
fileName: The protein file in PDB/ENT format.
Returns:
all_angleList: A Counter having all angles formed by their medians on opposite edges of the
non-collinear triangle formed by the three amino acids at i, j and k
and their frequencies of occurences in this protein file rounded to next significant digit.
rep_angleList: A Counter having representative angle and its frequency
all_lengthsList: A counter having lengths of all edges of the non-collinear triangle formed.
maxDist: Maximum length among all lengths calculated above.
"""
print fileName
count_t1 = 0
inFile=open(fileName,'r')
all_angleList = Counter()
rep_angleList = Counter()
all_lengthsList = Counter()
maxDist_List = Counter()
global xCord, yCord, zCord
aminoAcidName={}
xCord={}
yCord={}
zCord={}
seq_number={}
counter=0
for i in inFile:
if (i[0:6].rstrip()=="NUMMDL"):
numOfModels=i[10:14].rstrip()
if ((i[0:6].rstrip()=="ENDMDL")or (i[0:6].rstrip()=='TER')):
break
if (i[0:6].rstrip()=="MODEL" and int(i[10:14].rstrip())>1):
break
if(i[0:4].rstrip())=="ATOM" and(i[13:15].rstrip())=="CA" and(i[16]=='A'or i[16]==' ')and i[17:20]!= "UNK" :
aminoAcidName[counter]=int(aminoAcidLabel[i[17:20]])
xCord[counter]=(float(i[30:38]))
yCord[counter]=(float(i[38:46]))
zCord[counter]=(float(i[46:54]))
seq_number[counter]=str(i[22:27])
counter+=1
protLen=len(yCord)
initialLabel=[]
sortedLabel=[]
sortedIndex=[]
outDist={}
for m in range(0,3):
initialLabel.append(0)
sortedLabel.append(0)
sortedIndex.append(0)
for i in range(0,protLen-2):
for j in range(i+1,protLen-1):
for k in range(j+1, protLen):
global i1,j1,k1
i1=i
j1=j
k1=k
keepLabelIndex={}
keepLabelIndex[aminoAcidName[i]]=i
keepLabelIndex[aminoAcidName[j]]=j
keepLabelIndex[aminoAcidName[k]]=k
initialLabel[0]=aminoAcidName[i]
initialLabel[1]=aminoAcidName[j]
initialLabel[2]=aminoAcidName[k]
sortedLabel=list(initialLabel)
sortedLabel.sort(reverse=True)
#Perform Rule- based labelling
if (sortedLabel[0]==sortedLabel[1])and(sortedLabel[1]==sortedLabel[2]):
dist1_2Temp=calcDist(i,j)
dist1_3Temp=calcDist(i,k)
dist2_3Temp=calcDist(j,k)
if dist1_2Temp>=(max(dist1_2Temp,dist1_3Temp,dist2_3Temp)):
indexOf0=i
indexOf1=j
indexOf2=k
elif dist1_3Temp>=(max(dist1_2Temp,dist1_3Temp,dist2_3Temp)):
indexOf0=i
indexOf1=k
indexOf2=j
else:
indexOf0=j
indexOf1=k
indexOf2=i
elif(aminoAcidName[i]!=aminoAcidName[j])and(aminoAcidName[i]!=aminoAcidName[k]) and(aminoAcidName[j]!=aminoAcidName[k]):
|
elif(sortedLabel[0]==sortedLabel[1])and(sortedLabel[1]!=sortedLabel[2]):
indexOf2=keepLabelIndex[sortedLabel[2]]
indices=indexFind(indexOf2,i,j,k)
a=indexOf2
b=indices[0]
c=indices[1]
dist1_3Temp=calcDist(b,a)
dist2_3Temp=calcDist(c,a)
if dist1_3Temp>=dist2_3Temp:
indexOf0=indices[0]
indexOf1=indices[1]
else:
indexOf0=indices[1]
indexOf1=indices[0]
elif(sortedLabel[0]!=sortedLabel[1])and(sortedLabel[1]==sortedLabel[2]):
indexOf0=keepLabelIndex[sortedLabel[0]]
indices=indexFind(indexOf0,i,j,k)
if calcDist(indexOf0,indices[0])>= calcDist(indexOf0,indices[1]):
indexOf1=indices[0]
indexOf2=indices[1]
else:
indexOf2=indices[0]
indexOf1=indices[1]
dist01=calcDist(indexOf0,indexOf1)
s2=dist01/2
dist02=calcDist(indexOf0,indexOf2)
s1=dist02
dist12=dist01
dist03=calcDist(indexOf1,indexOf2)
# All lengths calculation
all_lengthsList[round(dist01,round_off_to)] += 1
all_lengthsList[round(dist02,round_off_to)] += 1
all_lengthsList[round(dist03,round_off_to)] += 1
maxDist_List[round(max(dist01,dist02,dist03),round_off_to)] +=1
s3=(((xCord[indexOf0]+xCord[indexOf1])/2-xCord[indexOf2])**2
+((yCord[indexOf0]+yCord[indexOf1])/2-yCord[indexOf2])**2
+((zCord[indexOf0]+zCord[indexOf1])/2-zCord[indexOf2])**2)**0.5
Theta1=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14
if Theta1<=90:
all_angleList[round(Theta1,round_off_to)] +=1
rep_angleList[round(Theta1,round_off_to)] +=1
else:
all_angleList[round(abs(180-Theta1),round_off_to)] +=1
rep_angleList[round(abs(180-Theta1),round_off_to)] +=1
#if Theta1>90:
# Theta1=abs(180-Theta1)
#print 'Second Theta1, ',Theta1
#Theta 2
dist02=calcDist(indexOf1,indexOf0)
s1=dist02
dist01=calcDist(indexOf1,indexOf2)
s2=dist01/2
s3=(((xCord[indexOf1]+xCord[indexOf2])/2-xCord[indexOf0])**2
+((yCord[indexOf1]+yCord[indexOf2])/2-yCord[indexOf0])**2
+((zCord[indexOf1]+zCord[indexOf2])/2-zCord[indexOf0])**2)**0.5
Theta2=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14
#if Theta2 > 90:
# Theta2 = abs(180-Theta2)
if Theta2<=90:
all_angleList[round(Theta2,round_off_to)] +=1
else:
all_angleList[round(abs(180-Theta2),round_off_to)] +=1
#Theta 3
dist02=calcDist(indexOf2,indexOf1)
s1=dist02
dist01=calcDist(indexOf2,indexOf0)
s2=dist01/2
s3=(((xCord[indexOf2]+xCord[indexOf0])/2-xCord[indexOf1])**2+
((yCord[indexOf2]+yCord[indexOf0])/2-yCord[indexOf1])**2+
((zCord[indexOf2]+zCord[indexOf0])/2-zCord[indexOf1])**2)**0.5
Theta3=180*(math.acos((s1**2-s2**2-s3**2 | for index_ in range(0,3):
sortedIndex[index_]=keepLabelIndex[sortedLabel[index_]]
indexOf0=sortedIndex[0]
indexOf1=sortedIndex[1]
indexOf2=sortedIndex[2] | conditional_block |
kmz.go | the given file name. The Float slice is in order: northLat,
// southLat, eastLong, westLong in decimal degrees
func getBox(image string) (base string, box []float64, err error) {
c := strings.Split(image, "_")
if len(c) != 5 {
err = fmt.Errorf("File name must include bounding box name_N_S_E_W.jpg in decimal degrees, e.g. Grouse-Mountain_49.336694_49.470628_-123.132056_-122.9811.jpg")
return
}
base = filepath.Base(c[0])
for i := 1; i < 5; i++ {
if i == 4 {
s := strings.SplitN(c[i], ".", 3)
if len(s) == 3 {
c[i] = s[0] + "." + s[1]
}
}
f, err := strconv.ParseFloat(c[i], 64)
if err != nil {
err = fmt.Errorf("Error parsing lat/long degrees in file name: %v", err)
return "", nil, err
}
box = append(box, f)
}
if box[north] <= box[south] || box[north] > 90 || box[south] < -90 {
return base, box, fmt.Errorf("North boundary must be greater than south boundary and in [-90,90]")
}
return
}
// imageWxH returns the width and height of image file in pixels
func imageWxH(imageFilename string) (width int, height int, err error) {
if _, err := os.Stat(imageFilename); os.IsNotExist(err) {
return 0, 0, err
}
cmd := exec.Command(identifyProg, "-format", "%w %h", imageFilename)
glog.Infof("About to run: %#v\n", cmd.Args)
var b []byte
b, err = cmd.Output()
if err != nil {
return 0, 0, err
}
wh := bytes.Split(b, []byte(" "))
if len(wh) != 2 {
return 0, 0, fmt.Errorf("Expected two ints separated by space, but got: %v", b)
}
width, err = strconv.Atoi(string(wh[0]))
if err != nil {
return
}
height, err = strconv.Atoi(string(wh[1]))
if err != nil {
return
}
return
}
// process the name-geo-anchored files args into KMZs. Uses
// "max_tiles" and and "drawing_order" from viper if present.
func process(v *viper.Viper, args []string) error {
maxTiles := v.GetInt("max_tiles")
drawingOrder := v.GetInt("drawing_order")
keepTmp := v.GetBool("keep_tmp")
fmt.Printf("maxTiles %v, drawingOrder: %v, keepTmp: %v\n", maxTiles, drawingOrder, keepTmp)
if len(args) == 0 {
return fmt.Errorf("Image file required: must provide one or more imaage file path")
}
for _, image := range args {
if _, err := os.Stat(image); os.IsNotExist(err) {
return err
}
absImage, err := filepath.Abs(image)
if err != nil {
return fmt.Errorf("Issue with an image file path: %v", err)
}
base, box, err := getBox(absImage)
if err != nil {
return fmt.Errorf("Error with image file name: %v", err)
}
origMap, err := newMapTileFromFile(absImage, box[north], box[south], box[east], box[west])
if err != nil {
return fmt.Errorf("Error extracting image dimensions: %v", err)
}
maxPixels := maxTiles * 1024 * 1024
tmpDir, err := ioutil.TempDir("", "cutkmz-")
if err != nil {
return fmt.Errorf("Error creating a temporary directory: %v", err)
}
tilesDir := filepath.Join(tmpDir, base, "tiles")
err = os.MkdirAll(tilesDir, 0755)
if err != nil {
return fmt.Errorf("Error making tiles dir in tmp dir: %v", err)
}
fixedJpg := filepath.Join(tmpDir, "fixed.jpg")
if maxPixels < (origMap.height * origMap.width) {
resizeFixToJpg(fixedJpg, absImage, maxPixels)
} else {
fixToJpg(fixedJpg, absImage)
}
// Need to know pixel width of map from which we
// chopped the tiles so we know which row a tile is
// in. Knowing the tile's row allows us to set its
// bounding box correctly.
fixedMap, err := newMapTileFromFile(fixedJpg, box[north], box[south], box[east], box[west])
if err != nil {
return err
}
// chop chop chop. bork. bork bork.
chopToJpgs(fixedJpg, tilesDir, base)
var kdocWtr *os.File
if kdocWtr, err = os.Create(filepath.Join(tmpDir, base, "doc.kml")); err != nil {
return err
}
if err = startKML(kdocWtr, base); err != nil {
return err
}
// For each jpg tile create an entry in the kml file
// with its bounding box. Imagemagick crop+adjoin
// chopped & numbered the tile image files
// lexocographically ascending starting from top left
// (000) (NW) eastwards & then down to bottom right
// (SE). ReadDir gives sorted result.
var tileFiles []os.FileInfo
if tileFiles, err = ioutil.ReadDir(tilesDir); err != nil {
return err
}
var widthSum int
currNorth := fixedMap.box[north]
currWest := fixedMap.box[west]
for _, tf := range tileFiles {
tile, err := newMapTileFromFile(filepath.Join(tilesDir, tf.Name()), currNorth, 0, 0, currWest)
if err != nil {
return err
}
// righmost tiles might be narrower, bottom
// ones shorter so must re-compute S & E edge
// for each tile; cannot assume all same
// size. Also double checks assumption that
// chopping preserves number of pixels
finishTileBox(tile, fixedMap)
var relTPath string // file ref inside KML must be relative to kmz root
if relTPath, err = filepath.Rel(filepath.Join(tmpDir, base), tile.fpath); err != nil {
return err
}
if err = kmlAddOverlay(kdocWtr, tf.Name(), tile.box, drawingOrder, relTPath); err != nil {
return err
}
widthSum += tile.width
if widthSum >= fixedMap.width {
// drop down a row
currNorth = tile.box[south]
currWest = fixedMap.box[west]
widthSum = 0
} else {
currWest = tile.box[east]
}
}
endKML(kdocWtr)
kdocWtr.Close()
var zf *os.File
if zf, err = os.Create(base + ".kmz"); err != nil {
return err
}
zipd(filepath.Join(tmpDir, base), zf)
zf.Close()
if !keepTmp {
err = os.RemoveAll(tmpDir)
if err != nil {
return fmt.Errorf("Error removing tmp dir & contents: %v", err)
}
}
}
return nil
}
func startKML(w io.Writer, name string) error {
t, err := template.New("kmlhdr").Parse(kmlHdrTmpl)
if err != nil {
return err
}
root := struct{ Name string }{name}
return t.Execute(w, &root)
}
func kmlAddOverlay(w io.Writer, tileName string, tbox [4]float64, drawingOrder int, relTileFile string) error {
t, err := template.New("kmloverlay").Parse(kmlOverlayTmpl)
if err != nil {
return err
}
root := struct {
Name string
TileFileName string
DrawingOrder int
North float64
South float64
East float64
West float64
}{tileName, relTileFile, drawingOrder, tbox[north], tbox[south], tbox[east], tbox[west]}
return t.Execute(w, &root)
}
func endKML(w io.Writer) error | {
t, err := template.New("kmlftr").Parse(kmlFtr)
if err != nil {
return err
}
return t.Execute(w, nil)
} | identifier_body |
|
kmz.go | two ints separated by space, but got: %v", b)
}
width, err = strconv.Atoi(string(wh[0]))
if err != nil {
return
}
height, err = strconv.Atoi(string(wh[1]))
if err != nil {
return
}
return
}
// process the name-geo-anchored files args into KMZs. Uses
// "max_tiles" and and "drawing_order" from viper if present.
func process(v *viper.Viper, args []string) error {
maxTiles := v.GetInt("max_tiles")
drawingOrder := v.GetInt("drawing_order")
keepTmp := v.GetBool("keep_tmp")
fmt.Printf("maxTiles %v, drawingOrder: %v, keepTmp: %v\n", maxTiles, drawingOrder, keepTmp)
if len(args) == 0 {
return fmt.Errorf("Image file required: must provide one or more imaage file path")
}
for _, image := range args {
if _, err := os.Stat(image); os.IsNotExist(err) {
return err
}
absImage, err := filepath.Abs(image)
if err != nil {
return fmt.Errorf("Issue with an image file path: %v", err)
}
base, box, err := getBox(absImage)
if err != nil {
return fmt.Errorf("Error with image file name: %v", err)
}
origMap, err := newMapTileFromFile(absImage, box[north], box[south], box[east], box[west])
if err != nil {
return fmt.Errorf("Error extracting image dimensions: %v", err)
}
maxPixels := maxTiles * 1024 * 1024
tmpDir, err := ioutil.TempDir("", "cutkmz-")
if err != nil {
return fmt.Errorf("Error creating a temporary directory: %v", err)
}
tilesDir := filepath.Join(tmpDir, base, "tiles")
err = os.MkdirAll(tilesDir, 0755)
if err != nil {
return fmt.Errorf("Error making tiles dir in tmp dir: %v", err)
}
fixedJpg := filepath.Join(tmpDir, "fixed.jpg")
if maxPixels < (origMap.height * origMap.width) {
resizeFixToJpg(fixedJpg, absImage, maxPixels)
} else {
fixToJpg(fixedJpg, absImage)
}
// Need to know pixel width of map from which we
// chopped the tiles so we know which row a tile is
// in. Knowing the tile's row allows us to set its
// bounding box correctly.
fixedMap, err := newMapTileFromFile(fixedJpg, box[north], box[south], box[east], box[west])
if err != nil {
return err
}
// chop chop chop. bork. bork bork.
chopToJpgs(fixedJpg, tilesDir, base)
var kdocWtr *os.File
if kdocWtr, err = os.Create(filepath.Join(tmpDir, base, "doc.kml")); err != nil {
return err
}
if err = startKML(kdocWtr, base); err != nil {
return err
}
// For each jpg tile create an entry in the kml file
// with its bounding box. Imagemagick crop+adjoin
// chopped & numbered the tile image files
// lexocographically ascending starting from top left
// (000) (NW) eastwards & then down to bottom right
// (SE). ReadDir gives sorted result.
var tileFiles []os.FileInfo
if tileFiles, err = ioutil.ReadDir(tilesDir); err != nil {
return err
}
var widthSum int
currNorth := fixedMap.box[north]
currWest := fixedMap.box[west]
for _, tf := range tileFiles {
tile, err := newMapTileFromFile(filepath.Join(tilesDir, tf.Name()), currNorth, 0, 0, currWest)
if err != nil {
return err
}
// righmost tiles might be narrower, bottom
// ones shorter so must re-compute S & E edge
// for each tile; cannot assume all same
// size. Also double checks assumption that
// chopping preserves number of pixels
finishTileBox(tile, fixedMap)
var relTPath string // file ref inside KML must be relative to kmz root
if relTPath, err = filepath.Rel(filepath.Join(tmpDir, base), tile.fpath); err != nil {
return err
}
if err = kmlAddOverlay(kdocWtr, tf.Name(), tile.box, drawingOrder, relTPath); err != nil {
return err
}
widthSum += tile.width
if widthSum >= fixedMap.width {
// drop down a row
currNorth = tile.box[south]
currWest = fixedMap.box[west]
widthSum = 0
} else {
currWest = tile.box[east]
}
}
endKML(kdocWtr)
kdocWtr.Close()
var zf *os.File
if zf, err = os.Create(base + ".kmz"); err != nil {
return err
}
zipd(filepath.Join(tmpDir, base), zf)
zf.Close()
if !keepTmp {
err = os.RemoveAll(tmpDir)
if err != nil {
return fmt.Errorf("Error removing tmp dir & contents: %v", err)
}
}
}
return nil
}
func startKML(w io.Writer, name string) error {
t, err := template.New("kmlhdr").Parse(kmlHdrTmpl)
if err != nil {
return err
}
root := struct{ Name string }{name}
return t.Execute(w, &root)
}
func kmlAddOverlay(w io.Writer, tileName string, tbox [4]float64, drawingOrder int, relTileFile string) error {
t, err := template.New("kmloverlay").Parse(kmlOverlayTmpl)
if err != nil {
return err
}
root := struct {
Name string
TileFileName string
DrawingOrder int
North float64
South float64
East float64
West float64
}{tileName, relTileFile, drawingOrder, tbox[north], tbox[south], tbox[east], tbox[west]}
return t.Execute(w, &root)
}
func endKML(w io.Writer) error {
t, err := template.New("kmlftr").Parse(kmlFtr)
if err != nil {
return err
}
return t.Execute(w, nil)
}
// finishTileBox completes the tile.box by setting its east and south
// boundaries relative to its current north and west values using the
// tile pixel size reltative to the full map size.
func finishTileBox(tile, fullMap *mapTile) {
nsDeltaDeg, ewDeltaDeg := delta(tile.width, tile.height, fullMap.box, fullMap.width, fullMap.height)
tile.box[south] = tile.box[north] - nsDeltaDeg
tile.box[east] = tile.box[west] + ewDeltaDeg
}
// delta returns the how many degrees further South the bottom of the
// tile is than the top, and how many degrees further east the east
// edge of the tile is than the west, given the tile width & height in
// pixels, the map's bounding box in decimal degrees, and the map's
// total width and height in pixels
func delta(tileWidth, tileHeight int, box [4]float64, totWidth, totHeight int) (nsDeltaDeg float64, ewDeltaDeg float64) {
nsDeltaDeg = (float64(tileHeight) / float64(totHeight)) * (box[north] - box[south])
ewDeg := eastDelta(box[east], box[west])
ewDeltaDeg = (float64(tileWidth) / float64(totWidth)) * ewDeg
return
}
// eastDelta returns the positve decimal degrees difference between the
// given east and west longitudes
func eastDelta(e, w float64) float64 {
e = normEasting(e)
w = normEasting(w)
if e < w {
return 360 + e - w
}
return e - w
}
// normEasting returns the given longitude in dec degress normalized to be within [-180,180]
func normEasting(deg float64) float64 {
// go's Mod fcn preserves sign on first param
if deg < -180 {
return math.Mod(deg+180, 360) + 180
}
if deg > 180 {
return math.Mod(deg-180, 360) - 180
}
return deg
}
func | resizeFixToJpg | identifier_name |
|
kmz.go | Short: "Creates .kmz from a JPG with map tiles small enough for a Garmin GPS",
Long: `Creates .kmz map tiles for a Garmin from a larger geo-poisitioned map image. Tested on a 62s & 64s
Crunches and converts a raster image (.jpg,.gif,.tiff etc) to match what Garmin devices can handle wrt resolution and max tile-size.
Rather than expect metadata files with geo-positioning information for
the jpg, cutkmz expects it to be encoded into the file's name,
"name-geo-anchored". Harder to lose. For example:
Grouse-Mountain_49.336694_49.470628_-123.132056_-122.9811.jpg
Underscores are required: <map-name>_<North-lat>_<South-lat>_<East-long>_<West-long>.<fmt>
Garmin limits the max tiles per model (100 on 62s, 500 on Montana,
Oregon 600 series and GPSMAP 64 series. Tiles of more than 1 megapixel
(w*h) add no additional clarity. If you have a large image, it will be
reduced in quality until it can be chopped in max-tiles or less
1024x1024 chunks.
Connect your GPS via USB and copy the generated kmz files into /Garmin/CustomMap (SD or main mem).
Garmin limitations on .kmz files and the images in them:
* image must be jpeg, not 'progressive'
* only considers the /doc.kml in the .kmz
* tiles over 1MP, e.g. > 1024x1024 or 512x2048 etc pixels do not add increased resolution
* each tile jpeg should be less than 3MB.
* Max images/tiles per device: typically 100. 500 on some.
* smaller image files are rendered faster
Requires the imagemagick to be installed on your system, and uses its
"convert" and "identify" programs | if err := process(viper.GetViper(), args); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
fmt.Fprintf(os.Stderr, "see 'cutkmz kmz -h' for help\n")
os.Exit(1)
}
},
}
func init() {
RootCmd.AddCommand(kmzCmd)
kmzCmd.Flags().StringP("image", "i", "", "image file named with its bounding box in decimal degrees.")
viper.BindPFlag("image", kmzCmd.Flags().Lookup("image"))
kmzCmd.Flags().IntP("max_tiles", "t", 100, "max # pieces to cut jpg into. Beware of device limits.")
viper.BindPFlag("max_tiles", kmzCmd.Flags().Lookup("max_tiles"))
kmzCmd.Flags().IntP("drawing_order", "d", 51, "Garmins make values > 50 visible. Tune if have overlapping overlays.")
viper.BindPFlag("drawing_order", kmzCmd.Flags().Lookup("drawing_order"))
kmzCmd.Flags().BoolP("keep_tmp", "k", false, "Don't delete intermediate files from $TMPDIR.")
viper.BindPFlag("keep_tmp", kmzCmd.Flags().Lookup("keep_tmp"))
kmzCmd.Flags().AddGoFlagSet(flag.CommandLine)
flag.CommandLine.VisitAll(func(f *flag.Flag) {
viper.BindPFlag(f.Name, kmzCmd.Flags().Lookup(f.Name))
})
flag.CommandLine.Parse(nil) // shut up 'not parsed' complaints
}
// getBox returns map name & lat/long bounding box by extracing it
// from the given file name. The Float slice is in order: northLat,
// southLat, eastLong, westLong in decimal degrees
func getBox(image string) (base string, box []float64, err error) {
c := strings.Split(image, "_")
if len(c) != 5 {
err = fmt.Errorf("File name must include bounding box name_N_S_E_W.jpg in decimal degrees, e.g. Grouse-Mountain_49.336694_49.470628_-123.132056_-122.9811.jpg")
return
}
base = filepath.Base(c[0])
for i := 1; i < 5; i++ {
if i == 4 {
s := strings.SplitN(c[i], ".", 3)
if len(s) == 3 {
c[i] = s[0] + "." + s[1]
}
}
f, err := strconv.ParseFloat(c[i], 64)
if err != nil {
err = fmt.Errorf("Error parsing lat/long degrees in file name: %v", err)
return "", nil, err
}
box = append(box, f)
}
if box[north] <= box[south] || box[north] > 90 || box[south] < -90 {
return base, box, fmt.Errorf("North boundary must be greater than south boundary and in [-90,90]")
}
return
}
// imageWxH returns the width and height of image file in pixels
func imageWxH(imageFilename string) (width int, height int, err error) {
if _, err := os.Stat(imageFilename); os.IsNotExist(err) {
return 0, 0, err
}
cmd := exec.Command(identifyProg, "-format", "%w %h", imageFilename)
glog.Infof("About to run: %#v\n", cmd.Args)
var b []byte
b, err = cmd.Output()
if err != nil {
return 0, 0, err
}
wh := bytes.Split(b, []byte(" "))
if len(wh) != 2 {
return 0, 0, fmt.Errorf("Expected two ints separated by space, but got: %v", b)
}
width, err = strconv.Atoi(string(wh[0]))
if err != nil {
return
}
height, err = strconv.Atoi(string(wh[1]))
if err != nil {
return
}
return
}
// process the name-geo-anchored files args into KMZs. Uses
// "max_tiles" and and "drawing_order" from viper if present.
func process(v *viper.Viper, args []string) error {
maxTiles := v.GetInt("max_tiles")
drawingOrder := v.GetInt("drawing_order")
keepTmp := v.GetBool("keep_tmp")
fmt.Printf("maxTiles %v, drawingOrder: %v, keepTmp: %v\n", maxTiles, drawingOrder, keepTmp)
if len(args) == 0 {
return fmt.Errorf("Image file required: must provide one or more imaage file path")
}
for _, image := range args {
if _, err := os.Stat(image); os.IsNotExist(err) {
return err
}
absImage, err := filepath.Abs(image)
if err != nil {
return fmt.Errorf("Issue with an image file path: %v", err)
}
base, box, err := getBox(absImage)
if err != nil {
return fmt.Errorf("Error with image file name: %v", err)
}
origMap, err := newMapTileFromFile(absImage, box[north], box[south], box[east], box[west])
if err != nil {
return fmt.Errorf("Error extracting image dimensions: %v", err)
}
maxPixels := maxTiles * 1024 * 1024
tmpDir, err := ioutil.TempDir("", "cutkmz-")
if err != nil {
return fmt.Errorf("Error creating a temporary directory: %v", err)
}
tilesDir := filepath.Join(tmpDir, base, "tiles")
err = os.MkdirAll(tilesDir, 0755)
if err != nil {
return fmt.Errorf("Error making tiles dir in tmp dir: %v", err)
}
fixedJpg := filepath.Join(tmpDir, "fixed.jpg")
if maxPixels < (origMap.height * origMap.width) {
resizeFixToJpg(fixedJpg, absImage, maxPixels)
} else {
fixToJpg(fixedJpg, absImage)
}
// Need to know pixel width of map from which we
// chopped the tiles so we know which row a tile is
// in. Knowing the tile's row allows us to set its
// bounding box correctly.
fixedMap, err := newMapTileFromFile(fixedJpg, box[north], box[south], box[east], box[west])
if err != nil {
return err
}
// chop chop chop. bork. bork bork.
chopToJpgs(fixedJpg, tilesDir, base)
var kdocWtr *os.File
if kdocWtr, err = os.Create |
`,
Run: func(cmd *cobra.Command, args []string) { | random_line_split |
kmz.go | , height int, err error) {
if _, err := os.Stat(imageFilename); os.IsNotExist(err) {
return 0, 0, err
}
cmd := exec.Command(identifyProg, "-format", "%w %h", imageFilename)
glog.Infof("About to run: %#v\n", cmd.Args)
var b []byte
b, err = cmd.Output()
if err != nil {
return 0, 0, err
}
wh := bytes.Split(b, []byte(" "))
if len(wh) != 2 {
return 0, 0, fmt.Errorf("Expected two ints separated by space, but got: %v", b)
}
width, err = strconv.Atoi(string(wh[0]))
if err != nil {
return
}
height, err = strconv.Atoi(string(wh[1]))
if err != nil {
return
}
return
}
// process the name-geo-anchored files args into KMZs. Uses
// "max_tiles" and and "drawing_order" from viper if present.
func process(v *viper.Viper, args []string) error {
maxTiles := v.GetInt("max_tiles")
drawingOrder := v.GetInt("drawing_order")
keepTmp := v.GetBool("keep_tmp")
fmt.Printf("maxTiles %v, drawingOrder: %v, keepTmp: %v\n", maxTiles, drawingOrder, keepTmp)
if len(args) == 0 {
return fmt.Errorf("Image file required: must provide one or more imaage file path")
}
for _, image := range args {
if _, err := os.Stat(image); os.IsNotExist(err) {
return err
}
absImage, err := filepath.Abs(image)
if err != nil {
return fmt.Errorf("Issue with an image file path: %v", err)
}
base, box, err := getBox(absImage)
if err != nil {
return fmt.Errorf("Error with image file name: %v", err)
}
origMap, err := newMapTileFromFile(absImage, box[north], box[south], box[east], box[west])
if err != nil {
return fmt.Errorf("Error extracting image dimensions: %v", err)
}
maxPixels := maxTiles * 1024 * 1024
tmpDir, err := ioutil.TempDir("", "cutkmz-")
if err != nil {
return fmt.Errorf("Error creating a temporary directory: %v", err)
}
tilesDir := filepath.Join(tmpDir, base, "tiles")
err = os.MkdirAll(tilesDir, 0755)
if err != nil {
return fmt.Errorf("Error making tiles dir in tmp dir: %v", err)
}
fixedJpg := filepath.Join(tmpDir, "fixed.jpg")
if maxPixels < (origMap.height * origMap.width) {
resizeFixToJpg(fixedJpg, absImage, maxPixels)
} else {
fixToJpg(fixedJpg, absImage)
}
// Need to know pixel width of map from which we
// chopped the tiles so we know which row a tile is
// in. Knowing the tile's row allows us to set its
// bounding box correctly.
fixedMap, err := newMapTileFromFile(fixedJpg, box[north], box[south], box[east], box[west])
if err != nil {
return err
}
// chop chop chop. bork. bork bork.
chopToJpgs(fixedJpg, tilesDir, base)
var kdocWtr *os.File
if kdocWtr, err = os.Create(filepath.Join(tmpDir, base, "doc.kml")); err != nil {
return err
}
if err = startKML(kdocWtr, base); err != nil {
return err
}
// For each jpg tile create an entry in the kml file
// with its bounding box. Imagemagick crop+adjoin
// chopped & numbered the tile image files
// lexocographically ascending starting from top left
// (000) (NW) eastwards & then down to bottom right
// (SE). ReadDir gives sorted result.
var tileFiles []os.FileInfo
if tileFiles, err = ioutil.ReadDir(tilesDir); err != nil {
return err
}
var widthSum int
currNorth := fixedMap.box[north]
currWest := fixedMap.box[west]
for _, tf := range tileFiles {
tile, err := newMapTileFromFile(filepath.Join(tilesDir, tf.Name()), currNorth, 0, 0, currWest)
if err != nil {
return err
}
// righmost tiles might be narrower, bottom
// ones shorter so must re-compute S & E edge
// for each tile; cannot assume all same
// size. Also double checks assumption that
// chopping preserves number of pixels
finishTileBox(tile, fixedMap)
var relTPath string // file ref inside KML must be relative to kmz root
if relTPath, err = filepath.Rel(filepath.Join(tmpDir, base), tile.fpath); err != nil {
return err
}
if err = kmlAddOverlay(kdocWtr, tf.Name(), tile.box, drawingOrder, relTPath); err != nil {
return err
}
widthSum += tile.width
if widthSum >= fixedMap.width {
// drop down a row
currNorth = tile.box[south]
currWest = fixedMap.box[west]
widthSum = 0
} else {
currWest = tile.box[east]
}
}
endKML(kdocWtr)
kdocWtr.Close()
var zf *os.File
if zf, err = os.Create(base + ".kmz"); err != nil {
return err
}
zipd(filepath.Join(tmpDir, base), zf)
zf.Close()
if !keepTmp {
err = os.RemoveAll(tmpDir)
if err != nil {
return fmt.Errorf("Error removing tmp dir & contents: %v", err)
}
}
}
return nil
}
func startKML(w io.Writer, name string) error {
t, err := template.New("kmlhdr").Parse(kmlHdrTmpl)
if err != nil {
return err
}
root := struct{ Name string }{name}
return t.Execute(w, &root)
}
func kmlAddOverlay(w io.Writer, tileName string, tbox [4]float64, drawingOrder int, relTileFile string) error {
t, err := template.New("kmloverlay").Parse(kmlOverlayTmpl)
if err != nil {
return err
}
root := struct {
Name string
TileFileName string
DrawingOrder int
North float64
South float64
East float64
West float64
}{tileName, relTileFile, drawingOrder, tbox[north], tbox[south], tbox[east], tbox[west]}
return t.Execute(w, &root)
}
func endKML(w io.Writer) error {
t, err := template.New("kmlftr").Parse(kmlFtr)
if err != nil {
return err
}
return t.Execute(w, nil)
}
// finishTileBox completes the tile.box by setting its east and south
// boundaries relative to its current north and west values using the
// tile pixel size reltative to the full map size.
func finishTileBox(tile, fullMap *mapTile) {
nsDeltaDeg, ewDeltaDeg := delta(tile.width, tile.height, fullMap.box, fullMap.width, fullMap.height)
tile.box[south] = tile.box[north] - nsDeltaDeg
tile.box[east] = tile.box[west] + ewDeltaDeg
}
// delta returns the how many degrees further South the bottom of the
// tile is than the top, and how many degrees further east the east
// edge of the tile is than the west, given the tile width & height in
// pixels, the map's bounding box in decimal degrees, and the map's
// total width and height in pixels
func delta(tileWidth, tileHeight int, box [4]float64, totWidth, totHeight int) (nsDeltaDeg float64, ewDeltaDeg float64) {
nsDeltaDeg = (float64(tileHeight) / float64(totHeight)) * (box[north] - box[south])
ewDeg := eastDelta(box[east], box[west])
ewDeltaDeg = (float64(tileWidth) / float64(totWidth)) * ewDeg
return
}
// eastDelta returns the positve decimal degrees difference between the
// given east and west longitudes
func eastDelta(e, w float64) float64 {
e = normEasting(e)
w = normEasting(w)
if e < w | {
return 360 + e - w
} | conditional_block |
|
redis.go | , errors.Errorf("invalid option value %q", kv[1]))
}
handlers := map[string]optionHandler{
"max_retries": {int: &options.MaxRetries},
"min_retry_backoff": {duration: &options.MinRetryBackoff},
"max_retry_backoff": {duration: &options.MaxRetryBackoff},
"dial_timeout": {duration: &options.DialTimeout},
"read_timeout": {duration: &options.ReadTimeout},
"write_timeout": {duration: &options.WriteTimeout},
"pool_fifo": {bool: &options.PoolFIFO},
"pool_size": {int: &options.PoolSize},
"pool_timeout": {duration: &options.PoolTimeout},
"min_idle_conns": {int: &options.MinIdleConns},
"max_idle_conns": {int: &options.MaxIdleConns},
"conn_max_idle_time": {duration: &options.ConnMaxIdleTime},
"conn_max_lifetime": {duration: &options.ConnMaxLifetime},
}
lowerKey := strings.ToLower(key)
if handler, ok := handlers[lowerKey]; ok {
if handler.int != nil {
*handler.int, err = strconv.Atoi(value)
if err != nil {
return errors.Wrap(err, fmt.Errorf("invalid '%s' value: %q", key, value))
}
} else if handler.duration != nil {
*handler.duration, err = time.ParseDuration(value)
if err != nil {
return errors.Wrap(err, fmt.Errorf("invalid '%s' value: %q", key, value))
}
} else if handler.bool != nil {
*handler.bool, err = strconv.ParseBool(value)
if err != nil {
return errors.Wrap(err, fmt.Errorf("invalid '%s' value: %q", key, value))
}
} else {
return fmt.Errorf("redis: unexpected option: %s", key)
}
} else {
return fmt.Errorf("redis: unexpected option: %s", key)
}
return nil
}
// Redis cache storage.
type Redis struct {
storage.TablePrefix
client redis.UniversalClient
}
// Close redis connection.
func (r *Redis) Close() error {
return r.client.Close()
}
func (r *Redis) Ping() error {
return r.client.Ping(context.Background()).Err()
}
// Init nothing.
func (r *Redis) Init() error {
return nil
}
func (r *Redis) Scan(work func(string) error) error {
ctx := context.Background()
if clusterClient, isCluster := r.client.(*redis.ClusterClient); isCluster {
return clusterClient.ForEachMaster(ctx, func(ctx context.Context, client *redis.Client) error {
return r.scan(ctx, client, work)
})
} else {
return r.scan(ctx, r.client, work)
}
}
func (r *Redis) scan(ctx context.Context, client redis.UniversalClient, work func(string) error) error {
var (
result []string
cursor uint64
err error
)
for {
result, cursor, err = client.Scan(ctx, cursor, string(r.TablePrefix)+"*", 0).Result()
if err != nil {
return errors.Trace(err)
}
for _, key := range result {
if err = work(key[len(r.TablePrefix):]); err != nil {
return errors.Trace(err)
}
}
if cursor == 0 {
return nil
}
}
}
func (r *Redis) Purge() error {
ctx := context.Background()
if clusterClient, isCluster := r.client.(*redis.ClusterClient); isCluster {
return clusterClient.ForEachMaster(ctx, func(ctx context.Context, client *redis.Client) error {
return r.purge(ctx, client, isCluster)
})
} else {
return r.purge(ctx, r.client, isCluster)
}
}
func (r *Redis) purge(ctx context.Context, client redis.UniversalClient, isCluster bool) error {
var (
result []string
cursor uint64
err error
)
for {
result, cursor, err = client.Scan(ctx, cursor, string(r.TablePrefix)+"*", 0).Result()
if err != nil {
return errors.Trace(err)
}
if len(result) > 0 {
if isCluster {
p := client.Pipeline()
for _, key := range result {
if err = p.Del(ctx, key).Err(); err != nil {
return errors.Trace(err)
}
}
if _, err = p.Exec(ctx); err != nil {
return errors.Trace(err)
}
} else {
if err = client.Del(ctx, result...).Err(); err != nil {
return errors.Trace(err)
}
}
}
if cursor == 0 {
return nil
}
}
}
func (r *Redis) Set(ctx context.Context, values ...Value) error {
p := r.client.Pipeline()
for _, v := range values {
if err := p.Set(ctx, r.Key(v.name), v.value, 0).Err(); err != nil {
return errors.Trace(err)
}
}
_, err := p.Exec(ctx)
return errors.Trace(err)
}
// Get returns a value from Redis.
func (r *Redis) Get(ctx context.Context, key string) *ReturnValue {
val, err := r.client.Get(ctx, r.Key(key)).Result()
if err != nil {
if err == redis.Nil {
return &ReturnValue{err: errors.Annotate(ErrObjectNotExist, key)}
}
return &ReturnValue{err: err}
}
return &ReturnValue{value: val}
}
// Delete object from Redis.
func (r *Redis) Delete(ctx context.Context, key string) error {
return r.client.Del(ctx, r.Key(key)).Err()
}
// GetSet returns members of a set from Redis.
func (r *Redis) GetSet(ctx context.Context, key string) ([]string, error) {
return r.client.SMembers(ctx, r.Key(key)).Result()
}
// SetSet overrides a set with members in Redis.
func (r *Redis) SetSet(ctx context.Context, key string, members ...string) error {
if len(members) == 0 {
return nil
}
// convert strings to interfaces
values := make([]interface{}, 0, len(members))
for _, member := range members {
values = append(values, member)
}
// push set
pipeline := r.client.Pipeline()
pipeline.Del(ctx, r.Key(key))
pipeline.SAdd(ctx, r.Key(key), values...)
_, err := pipeline.Exec(ctx)
return err
}
// AddSet adds members to a set in Redis.
func (r *Redis) AddSet(ctx context.Context, key string, members ...string) error {
if len(members) == 0 {
return nil
}
// convert strings to interfaces
values := make([]interface{}, 0, len(members))
for _, member := range members {
values = append(values, member)
}
// push set
return r.client.SAdd(ctx, r.Key(key), values...).Err()
}
// RemSet removes members from a set in Redis.
func (r *Redis) RemSet(ctx context.Context, key string, members ...string) error {
if len(members) == 0 {
return nil
}
return r.client.SRem(ctx, r.Key(key), members).Err()
}
// GetSorted get scores from sorted set.
func (r *Redis) GetSorted(ctx context.Context, key string, begin, end int) ([]Scored, error) {
members, err := r.client.ZRevRangeWithScores(ctx, r.Key(key), int64(begin), int64(end)).Result()
if err != nil {
return nil, err
}
results := make([]Scored, 0, len(members))
for _, member := range members {
results = append(results, Scored{Id: member.Member.(string), Score: member.Score})
}
return results, nil
}
func (r *Redis) GetSortedByScore(ctx context.Context, key string, begin, end float64) ([]Scored, error) {
members, err := r.client.ZRangeByScoreWithScores(ctx, r.Key(key), &redis.ZRangeBy{
Min: strconv.FormatFloat(begin, 'g', -1, 64),
Max: strconv.FormatFloat(end, 'g', -1, 64),
Offset: 0,
Count: -1,
}).Result()
if err != nil {
return nil, err
}
results := make([]Scored, 0, len(members))
for _, member := range members {
results = append(results, Scored{Id: member.Member.(string), Score: member.Score})
}
return results, nil
}
func (r *Redis) RemSortedByScore(ctx context.Context, key string, begin, end float64) error {
return r.client.ZRemRangeByScore(ctx, r.Key(key),
strconv.FormatFloat(begin, 'g', -1, 64),
strconv.FormatFloat(end, 'g', -1, 64)).
Err()
}
// AddSorted add scores to sorted set.
func (r *Redis) | AddSorted | identifier_name |
|
redis.go | url.QueryUnescape(kv[1])
if err != nil {
return errors.Wrap(err, errors.Errorf("invalid option value %q", kv[1]))
}
handlers := map[string]optionHandler{
"max_retries": {int: &options.MaxRetries},
"min_retry_backoff": {duration: &options.MinRetryBackoff},
"max_retry_backoff": {duration: &options.MaxRetryBackoff},
"dial_timeout": {duration: &options.DialTimeout},
"read_timeout": {duration: &options.ReadTimeout},
"write_timeout": {duration: &options.WriteTimeout},
"pool_fifo": {bool: &options.PoolFIFO},
"pool_size": {int: &options.PoolSize},
"pool_timeout": {duration: &options.PoolTimeout},
"min_idle_conns": {int: &options.MinIdleConns},
"max_idle_conns": {int: &options.MaxIdleConns},
"conn_max_idle_time": {duration: &options.ConnMaxIdleTime},
"conn_max_lifetime": {duration: &options.ConnMaxLifetime},
}
lowerKey := strings.ToLower(key)
if handler, ok := handlers[lowerKey]; ok {
if handler.int != nil {
*handler.int, err = strconv.Atoi(value)
if err != nil {
return errors.Wrap(err, fmt.Errorf("invalid '%s' value: %q", key, value))
}
} else if handler.duration != nil {
*handler.duration, err = time.ParseDuration(value)
if err != nil {
return errors.Wrap(err, fmt.Errorf("invalid '%s' value: %q", key, value))
}
} else if handler.bool != nil {
*handler.bool, err = strconv.ParseBool(value)
if err != nil {
return errors.Wrap(err, fmt.Errorf("invalid '%s' value: %q", key, value))
}
} else {
return fmt.Errorf("redis: unexpected option: %s", key)
}
} else {
return fmt.Errorf("redis: unexpected option: %s", key)
}
return nil
}
// Redis cache storage.
type Redis struct {
storage.TablePrefix
client redis.UniversalClient
}
// Close redis connection.
func (r *Redis) Close() error {
return r.client.Close()
}
func (r *Redis) Ping() error {
return r.client.Ping(context.Background()).Err()
}
// Init nothing.
func (r *Redis) Init() error {
return nil
}
func (r *Redis) Scan(work func(string) error) error {
ctx := context.Background()
if clusterClient, isCluster := r.client.(*redis.ClusterClient); isCluster {
return clusterClient.ForEachMaster(ctx, func(ctx context.Context, client *redis.Client) error {
return r.scan(ctx, client, work)
})
} else {
return r.scan(ctx, r.client, work)
}
}
func (r *Redis) scan(ctx context.Context, client redis.UniversalClient, work func(string) error) error {
var (
result []string
cursor uint64
err error
)
for {
result, cursor, err = client.Scan(ctx, cursor, string(r.TablePrefix)+"*", 0).Result()
if err != nil {
return errors.Trace(err)
}
for _, key := range result {
if err = work(key[len(r.TablePrefix):]); err != nil {
return errors.Trace(err)
}
}
if cursor == 0 {
return nil
}
}
}
func (r *Redis) Purge() error {
ctx := context.Background()
if clusterClient, isCluster := r.client.(*redis.ClusterClient); isCluster {
return clusterClient.ForEachMaster(ctx, func(ctx context.Context, client *redis.Client) error {
return r.purge(ctx, client, isCluster)
})
} else {
return r.purge(ctx, r.client, isCluster)
}
}
func (r *Redis) purge(ctx context.Context, client redis.UniversalClient, isCluster bool) error {
var (
result []string
cursor uint64
err error
)
for {
result, cursor, err = client.Scan(ctx, cursor, string(r.TablePrefix)+"*", 0).Result()
if err != nil {
return errors.Trace(err)
}
if len(result) > 0 {
if isCluster {
p := client.Pipeline()
for _, key := range result {
if err = p.Del(ctx, key).Err(); err != nil {
return errors.Trace(err)
}
}
if _, err = p.Exec(ctx); err != nil {
return errors.Trace(err)
}
} else {
if err = client.Del(ctx, result...).Err(); err != nil {
return errors.Trace(err)
}
}
}
if cursor == 0 {
return nil
}
}
}
func (r *Redis) Set(ctx context.Context, values ...Value) error {
p := r.client.Pipeline()
for _, v := range values {
if err := p.Set(ctx, r.Key(v.name), v.value, 0).Err(); err != nil {
return errors.Trace(err)
}
}
_, err := p.Exec(ctx)
return errors.Trace(err)
}
// Get returns a value from Redis.
func (r *Redis) Get(ctx context.Context, key string) *ReturnValue {
val, err := r.client.Get(ctx, r.Key(key)).Result()
if err != nil {
if err == redis.Nil {
return &ReturnValue{err: errors.Annotate(ErrObjectNotExist, key)}
}
return &ReturnValue{err: err}
}
return &ReturnValue{value: val}
}
// Delete object from Redis.
func (r *Redis) Delete(ctx context.Context, key string) error {
return r.client.Del(ctx, r.Key(key)).Err()
}
// GetSet returns members of a set from Redis.
func (r *Redis) GetSet(ctx context.Context, key string) ([]string, error) {
return r.client.SMembers(ctx, r.Key(key)).Result()
}
// SetSet overrides a set with members in Redis.
func (r *Redis) SetSet(ctx context.Context, key string, members ...string) error {
if len(members) == 0 {
return nil
}
// convert strings to interfaces
values := make([]interface{}, 0, len(members))
for _, member := range members {
values = append(values, member)
}
// push set
pipeline := r.client.Pipeline()
pipeline.Del(ctx, r.Key(key))
pipeline.SAdd(ctx, r.Key(key), values...)
_, err := pipeline.Exec(ctx)
return err
}
// AddSet adds members to a set in Redis.
func (r *Redis) AddSet(ctx context.Context, key string, members ...string) error {
if len(members) == 0 {
return nil
}
// convert strings to interfaces
values := make([]interface{}, 0, len(members))
for _, member := range members {
values = append(values, member)
}
// push set
return r.client.SAdd(ctx, r.Key(key), values...).Err()
}
// RemSet removes members from a set in Redis.
func (r *Redis) RemSet(ctx context.Context, key string, members ...string) error {
if len(members) == 0 {
return nil
}
return r.client.SRem(ctx, r.Key(key), members).Err()
}
// GetSorted get scores from sorted set.
func (r *Redis) GetSorted(ctx context.Context, key string, begin, end int) ([]Scored, error) {
members, err := r.client.ZRevRangeWithScores(ctx, r.Key(key), int64(begin), int64(end)).Result()
if err != nil {
return nil, err
}
results := make([]Scored, 0, len(members))
for _, member := range members {
results = append(results, Scored{Id: member.Member.(string), Score: member.Score})
}
return results, nil
}
func (r *Redis) GetSortedByScore(ctx context.Context, key string, begin, end float64) ([]Scored, error) {
members, err := r.client.ZRangeByScoreWithScores(ctx, r.Key(key), &redis.ZRangeBy{
Min: strconv.FormatFloat(begin, 'g', -1, 64),
Max: strconv.FormatFloat(end, 'g', -1, 64),
Offset: 0,
Count: -1,
}).Result()
if err != nil {
return nil, err
}
results := make([]Scored, 0, len(members))
for _, member := range members {
results = append(results, Scored{Id: member.Member.(string), Score: member.Score})
}
return results, nil
}
func (r *Redis) RemSortedByScore(ctx context.Context, key string, begin, end float64) error {
return r.client.ZRemRangeByScore(ctx, r.Key(key),
strconv.FormatFloat(begin, 'g', -1, 64), | strconv.FormatFloat(end, 'g', -1, 64)).
Err()
} | random_line_split |
|
redis.go | Unescape(username)
if err != nil {
return nil, errors.Wrap(err, fmt.Errorf("invalid username"))
}
// Validate and process the password.
if strings.Contains(password, ":") {
return nil, fmt.Errorf("unescaped colon in password")
}
if strings.Contains(password, "/") {
return nil, fmt.Errorf("unescaped slash in password")
}
options.Password, err = url.PathUnescape(password)
if err != nil {
return nil, errors.Wrap(err, fmt.Errorf("invalid password"))
}
}
// fetch the hosts field
hosts := uri
if idx := strings.IndexAny(uri, "/?@"); idx != -1 {
if uri[idx] == '@' {
return nil, fmt.Errorf("unescaped @ sign in user info")
}
hosts = uri[:idx]
}
options.Addrs = strings.Split(hosts, ",")
uri = uri[len(hosts):]
if len(uri) > 0 && uri[0] == '/' {
uri = uri[1:]
}
// grab connection arguments from URI
connectionArgsFromQueryString, err := extractQueryArgsFromURI(uri)
if err != nil {
return nil, err
}
for _, pair := range connectionArgsFromQueryString {
err = addOption(options, pair)
if err != nil {
return nil, err
}
}
return options, nil
}
func extractQueryArgsFromURI(uri string) ([]string, error) {
if len(uri) == 0 {
return nil, nil
}
if uri[0] != '?' {
return nil, errors.New("must have a ? separator between path and query")
}
uri = uri[1:]
if len(uri) == 0 {
return nil, nil
}
return strings.FieldsFunc(uri, func(r rune) bool { return r == ';' || r == '&' }), nil
}
type optionHandler struct {
int *int
bool *bool
duration *time.Duration
}
func addOption(options *redis.ClusterOptions, pair string) error {
kv := strings.SplitN(pair, "=", 2)
if len(kv) != 2 || kv[0] == "" {
return fmt.Errorf("invalid option")
}
key, err := url.QueryUnescape(kv[0])
if err != nil {
return errors.Wrap(err, errors.Errorf("invalid option key %q", kv[0]))
}
value, err := url.QueryUnescape(kv[1])
if err != nil {
return errors.Wrap(err, errors.Errorf("invalid option value %q", kv[1]))
}
handlers := map[string]optionHandler{
"max_retries": {int: &options.MaxRetries},
"min_retry_backoff": {duration: &options.MinRetryBackoff},
"max_retry_backoff": {duration: &options.MaxRetryBackoff},
"dial_timeout": {duration: &options.DialTimeout},
"read_timeout": {duration: &options.ReadTimeout},
"write_timeout": {duration: &options.WriteTimeout},
"pool_fifo": {bool: &options.PoolFIFO},
"pool_size": {int: &options.PoolSize},
"pool_timeout": {duration: &options.PoolTimeout},
"min_idle_conns": {int: &options.MinIdleConns},
"max_idle_conns": {int: &options.MaxIdleConns},
"conn_max_idle_time": {duration: &options.ConnMaxIdleTime},
"conn_max_lifetime": {duration: &options.ConnMaxLifetime},
}
lowerKey := strings.ToLower(key)
if handler, ok := handlers[lowerKey]; ok {
if handler.int != nil {
*handler.int, err = strconv.Atoi(value)
if err != nil {
return errors.Wrap(err, fmt.Errorf("invalid '%s' value: %q", key, value))
}
} else if handler.duration != nil {
*handler.duration, err = time.ParseDuration(value)
if err != nil {
return errors.Wrap(err, fmt.Errorf("invalid '%s' value: %q", key, value))
}
} else if handler.bool != nil {
*handler.bool, err = strconv.ParseBool(value)
if err != nil {
return errors.Wrap(err, fmt.Errorf("invalid '%s' value: %q", key, value))
}
} else {
return fmt.Errorf("redis: unexpected option: %s", key)
}
} else {
return fmt.Errorf("redis: unexpected option: %s", key)
}
return nil
}
// Redis cache storage.
type Redis struct {
storage.TablePrefix
client redis.UniversalClient
}
// Close redis connection.
func (r *Redis) Close() error {
return r.client.Close()
}
func (r *Redis) Ping() error {
return r.client.Ping(context.Background()).Err()
}
// Init nothing.
func (r *Redis) Init() error {
return nil
}
func (r *Redis) Scan(work func(string) error) error {
ctx := context.Background()
if clusterClient, isCluster := r.client.(*redis.ClusterClient); isCluster {
return clusterClient.ForEachMaster(ctx, func(ctx context.Context, client *redis.Client) error {
return r.scan(ctx, client, work)
})
} else {
return r.scan(ctx, r.client, work)
}
}
func (r *Redis) scan(ctx context.Context, client redis.UniversalClient, work func(string) error) error {
var (
result []string
cursor uint64
err error
)
for {
result, cursor, err = client.Scan(ctx, cursor, string(r.TablePrefix)+"*", 0).Result()
if err != nil {
return errors.Trace(err)
}
for _, key := range result {
if err = work(key[len(r.TablePrefix):]); err != nil {
return errors.Trace(err)
}
}
if cursor == 0 {
return nil
}
}
}
func (r *Redis) Purge() error {
ctx := context.Background()
if clusterClient, isCluster := r.client.(*redis.ClusterClient); isCluster {
return clusterClient.ForEachMaster(ctx, func(ctx context.Context, client *redis.Client) error {
return r.purge(ctx, client, isCluster)
})
} else {
return r.purge(ctx, r.client, isCluster)
}
}
func (r *Redis) purge(ctx context.Context, client redis.UniversalClient, isCluster bool) error {
var (
result []string
cursor uint64
err error
)
for {
result, cursor, err = client.Scan(ctx, cursor, string(r.TablePrefix)+"*", 0).Result()
if err != nil {
return errors.Trace(err)
}
if len(result) > 0 {
if isCluster {
p := client.Pipeline()
for _, key := range result {
if err = p.Del(ctx, key).Err(); err != nil {
return errors.Trace(err)
}
}
if _, err = p.Exec(ctx); err != nil {
return errors.Trace(err)
}
} else {
if err = client.Del(ctx, result...).Err(); err != nil {
return errors.Trace(err)
}
}
}
if cursor == 0 {
return nil
}
}
}
func (r *Redis) Set(ctx context.Context, values ...Value) error {
p := r.client.Pipeline()
for _, v := range values {
if err := p.Set(ctx, r.Key(v.name), v.value, 0).Err(); err != nil {
return errors.Trace(err)
}
}
_, err := p.Exec(ctx)
return errors.Trace(err)
}
// Get returns a value from Redis.
func (r *Redis) Get(ctx context.Context, key string) *ReturnValue {
val, err := r.client.Get(ctx, r.Key(key)).Result()
if err != nil {
if err == redis.Nil |
return &ReturnValue{err: err}
}
return &ReturnValue{value: val}
}
// Delete object from Redis.
func (r *Redis) Delete(ctx context.Context, key string) error {
return r.client.Del(ctx, r.Key(key)).Err()
}
// GetSet returns members of a set from Redis.
func (r *Redis) GetSet(ctx context.Context, key string) ([]string, error) {
return r.client.SMembers(ctx, r.Key(key)).Result()
}
// SetSet overrides a set with members in Redis.
func (r *Redis) SetSet(ctx context.Context, key string, members ...string) error {
if len(members) == 0 {
return nil
}
// convert strings to interfaces
values := make([]interface{}, 0, len(members))
for _, member := range members {
values = append(values, member)
}
// push set
pipeline := r.client.Pipeline()
pipeline.Del(ctx, r.Key(key))
pipeline.SAdd(ctx, r.Key(key), values...)
_, err := pipeline.Exec(ctx)
return err
}
// AddSet adds members to a set in Redis.
func (r *Redis) AddSet(ctx context.Context, | {
return &ReturnValue{err: errors.Annotate(ErrObjectNotExist, key)}
} | conditional_block |
redis.go | Unescape(username)
if err != nil {
return nil, errors.Wrap(err, fmt.Errorf("invalid username"))
}
// Validate and process the password.
if strings.Contains(password, ":") {
return nil, fmt.Errorf("unescaped colon in password")
}
if strings.Contains(password, "/") {
return nil, fmt.Errorf("unescaped slash in password")
}
options.Password, err = url.PathUnescape(password)
if err != nil {
return nil, errors.Wrap(err, fmt.Errorf("invalid password"))
}
}
// fetch the hosts field
hosts := uri
if idx := strings.IndexAny(uri, "/?@"); idx != -1 {
if uri[idx] == '@' {
return nil, fmt.Errorf("unescaped @ sign in user info")
}
hosts = uri[:idx]
}
options.Addrs = strings.Split(hosts, ",")
uri = uri[len(hosts):]
if len(uri) > 0 && uri[0] == '/' {
uri = uri[1:]
}
// grab connection arguments from URI
connectionArgsFromQueryString, err := extractQueryArgsFromURI(uri)
if err != nil {
return nil, err
}
for _, pair := range connectionArgsFromQueryString {
err = addOption(options, pair)
if err != nil {
return nil, err
}
}
return options, nil
}
func extractQueryArgsFromURI(uri string) ([]string, error) {
if len(uri) == 0 {
return nil, nil
}
if uri[0] != '?' {
return nil, errors.New("must have a ? separator between path and query")
}
uri = uri[1:]
if len(uri) == 0 {
return nil, nil
}
return strings.FieldsFunc(uri, func(r rune) bool { return r == ';' || r == '&' }), nil
}
type optionHandler struct {
int *int
bool *bool
duration *time.Duration
}
func addOption(options *redis.ClusterOptions, pair string) error {
kv := strings.SplitN(pair, "=", 2)
if len(kv) != 2 || kv[0] == "" {
return fmt.Errorf("invalid option")
}
key, err := url.QueryUnescape(kv[0])
if err != nil {
return errors.Wrap(err, errors.Errorf("invalid option key %q", kv[0]))
}
value, err := url.QueryUnescape(kv[1])
if err != nil {
return errors.Wrap(err, errors.Errorf("invalid option value %q", kv[1]))
}
handlers := map[string]optionHandler{
"max_retries": {int: &options.MaxRetries},
"min_retry_backoff": {duration: &options.MinRetryBackoff},
"max_retry_backoff": {duration: &options.MaxRetryBackoff},
"dial_timeout": {duration: &options.DialTimeout},
"read_timeout": {duration: &options.ReadTimeout},
"write_timeout": {duration: &options.WriteTimeout},
"pool_fifo": {bool: &options.PoolFIFO},
"pool_size": {int: &options.PoolSize},
"pool_timeout": {duration: &options.PoolTimeout},
"min_idle_conns": {int: &options.MinIdleConns},
"max_idle_conns": {int: &options.MaxIdleConns},
"conn_max_idle_time": {duration: &options.ConnMaxIdleTime},
"conn_max_lifetime": {duration: &options.ConnMaxLifetime},
}
lowerKey := strings.ToLower(key)
if handler, ok := handlers[lowerKey]; ok {
if handler.int != nil {
*handler.int, err = strconv.Atoi(value)
if err != nil {
return errors.Wrap(err, fmt.Errorf("invalid '%s' value: %q", key, value))
}
} else if handler.duration != nil {
*handler.duration, err = time.ParseDuration(value)
if err != nil {
return errors.Wrap(err, fmt.Errorf("invalid '%s' value: %q", key, value))
}
} else if handler.bool != nil {
*handler.bool, err = strconv.ParseBool(value)
if err != nil {
return errors.Wrap(err, fmt.Errorf("invalid '%s' value: %q", key, value))
}
} else {
return fmt.Errorf("redis: unexpected option: %s", key)
}
} else {
return fmt.Errorf("redis: unexpected option: %s", key)
}
return nil
}
// Redis cache storage.
type Redis struct {
storage.TablePrefix
client redis.UniversalClient
}
// Close redis connection.
func (r *Redis) Close() error {
return r.client.Close()
}
func (r *Redis) Ping() error |
// Init nothing.
func (r *Redis) Init() error {
return nil
}
func (r *Redis) Scan(work func(string) error) error {
ctx := context.Background()
if clusterClient, isCluster := r.client.(*redis.ClusterClient); isCluster {
return clusterClient.ForEachMaster(ctx, func(ctx context.Context, client *redis.Client) error {
return r.scan(ctx, client, work)
})
} else {
return r.scan(ctx, r.client, work)
}
}
func (r *Redis) scan(ctx context.Context, client redis.UniversalClient, work func(string) error) error {
var (
result []string
cursor uint64
err error
)
for {
result, cursor, err = client.Scan(ctx, cursor, string(r.TablePrefix)+"*", 0).Result()
if err != nil {
return errors.Trace(err)
}
for _, key := range result {
if err = work(key[len(r.TablePrefix):]); err != nil {
return errors.Trace(err)
}
}
if cursor == 0 {
return nil
}
}
}
func (r *Redis) Purge() error {
ctx := context.Background()
if clusterClient, isCluster := r.client.(*redis.ClusterClient); isCluster {
return clusterClient.ForEachMaster(ctx, func(ctx context.Context, client *redis.Client) error {
return r.purge(ctx, client, isCluster)
})
} else {
return r.purge(ctx, r.client, isCluster)
}
}
func (r *Redis) purge(ctx context.Context, client redis.UniversalClient, isCluster bool) error {
var (
result []string
cursor uint64
err error
)
for {
result, cursor, err = client.Scan(ctx, cursor, string(r.TablePrefix)+"*", 0).Result()
if err != nil {
return errors.Trace(err)
}
if len(result) > 0 {
if isCluster {
p := client.Pipeline()
for _, key := range result {
if err = p.Del(ctx, key).Err(); err != nil {
return errors.Trace(err)
}
}
if _, err = p.Exec(ctx); err != nil {
return errors.Trace(err)
}
} else {
if err = client.Del(ctx, result...).Err(); err != nil {
return errors.Trace(err)
}
}
}
if cursor == 0 {
return nil
}
}
}
func (r *Redis) Set(ctx context.Context, values ...Value) error {
p := r.client.Pipeline()
for _, v := range values {
if err := p.Set(ctx, r.Key(v.name), v.value, 0).Err(); err != nil {
return errors.Trace(err)
}
}
_, err := p.Exec(ctx)
return errors.Trace(err)
}
// Get returns a value from Redis.
func (r *Redis) Get(ctx context.Context, key string) *ReturnValue {
val, err := r.client.Get(ctx, r.Key(key)).Result()
if err != nil {
if err == redis.Nil {
return &ReturnValue{err: errors.Annotate(ErrObjectNotExist, key)}
}
return &ReturnValue{err: err}
}
return &ReturnValue{value: val}
}
// Delete object from Redis.
func (r *Redis) Delete(ctx context.Context, key string) error {
return r.client.Del(ctx, r.Key(key)).Err()
}
// GetSet returns members of a set from Redis.
func (r *Redis) GetSet(ctx context.Context, key string) ([]string, error) {
return r.client.SMembers(ctx, r.Key(key)).Result()
}
// SetSet overrides a set with members in Redis.
func (r *Redis) SetSet(ctx context.Context, key string, members ...string) error {
if len(members) == 0 {
return nil
}
// convert strings to interfaces
values := make([]interface{}, 0, len(members))
for _, member := range members {
values = append(values, member)
}
// push set
pipeline := r.client.Pipeline()
pipeline.Del(ctx, r.Key(key))
pipeline.SAdd(ctx, r.Key(key), values...)
_, err := pipeline.Exec(ctx)
return err
}
// AddSet adds members to a set in Redis.
func (r *Redis) AddSet(ctx context.Context, | {
return r.client.Ping(context.Background()).Err()
} | identifier_body |
input.go |
AltX
AltY
AltZ
AltOpenBracket
AltFwdSlash
AltCloseBracket
AltCaret
AltUnderscore
AltGrave
Alta
Altb
Altc
Altd
Alte
Altf
Altg
Alth
Alti
Altj
Altk
Altl
Altm
Altn
Alto
Altp
Altq
Altr
Alts
Altt
Altu
Altv
Altw
Altx
Alty
Altz
AltOpenCurly
AltPipe
AltCloseCurly
AltTilde
AltBS
//End printable
// actual number values below are arbitrary, are not in order.
// Extended keyCodes
Del
AltDel
BTab
BSpace
PgUp
PgDn
Up
Down
Right
Left
Home
End
// /relative/ order of the next 8 matter
SUp // Shift
SDown
SRight
SLeft
CtrlUp
CtrlDown
CtrlRight
CtrlLeft
// don't actually need to be in order
F1
F2
F3
F4
F5
F6
F7
F8
F9
F10
F11
F12
//sequential calculated. Keep in relative order
CtrlAlta
CtrlAltb
CtrlAltc
CtrlAltd
CtrlAlte
CtrlAltf
CtrlAltg
CtrlAlth
CtrlAlti
CtrlAltj
CtrlAltk
CtrlAltl
CtrlAltm
CtrlAltn
CtrlAlto
CtrlAltp
CtrlAltq
CtrlAltr
CtrlAlts
CtrlAltt
CtrlAltu
CtrlAltv
CtrlAltw
CtrlAltx
CtrlAlty
CtrlAltz
)
type EvType uint8
const (
EventInvalid EvType = iota
KeySpecial
KeyPrint
Mouse
)
/*
How to determine mouse action:
Mousedown: Type=Mouse && Btn != 3 && !Motion
Mouseup: Type=Mouse && Btn == 3 && !Motion
Mousedrag: Type=Mouse && Btn != 3 && Motion
Mousemove: Type=Mouse && Btn == 3 && Motion
ScrollUp: Type=Mouse && Btn=4
ScrollDn: Type=Mouse && Btn=5
*/
type MouseEvent struct {
Y int
X int
Btn int // 0=Primary, 1=Middle, 2=Right, 3=Release, 4=ScrUp, 5=ScrDown
Shift bool
Meta bool
Ctrl bool
Motion bool
buf []byte
}
type Event struct {
Type EvType
Key rune
M *MouseEvent
}
// returns true if a specific character int(rune) is a printable character (alphanumeric, punctuation)
func Printable(i int) bool { return i >= 32 && i <= 126 }
/*
Grabs Stdin(or whatever passed fd) to listen for keyboard input. Returns 3 things:
- a channel to listen for key events on
- a terminal restore function. Always safe to call, especially when error is set
- error condition
This is the primary use of the top-level tui package, if you intend to capture input, or mouse events
*/
func GetInput(ctx context.Context, fd int) (<-chan Event, func() error, error) {
ch := make(chan Event, 1000)
st, err := terminal.GetState(fd)
if err != nil {
return nil, func() error { return nil }, err
}
restore := func() error { return terminal.Restore(fd, st) }
_, err = terminal.MakeRaw(fd)
if err != nil {
return nil, restore, err
}
ib := inputBuf{b: make([]byte, 0, 9)}
go func() {
for {
select {
case <-ctx.Done():
return
case ev := <-ib.readEvent(fd):
ch <- ev
}
}
}()
return ch, restore, nil
}
type inputBuf struct {
b []byte
mu sync.Mutex
}
func (ib *inputBuf) readEvent(fd int) <-chan Event {
ch := make(chan Event)
go func() {
ib.mu.Lock()
defer func() {
ib.mu.Unlock()
}()
for ; len(ib.b) == 0; ib.b = fillBuf(fd, ib.b) {
}
if len(ib.b) == 0 {
close(ch)
return
}
sz := 1
defer func() {
ib.b = ib.b[sz:]
}()
switch ib.b[0] {
case byte(CtrlC), byte(CtrlG), byte(CtrlQ):
ch <- Event{KeySpecial, rune(ib.b[0]), nil}
return
case 127:
ch <- Event{KeySpecial, BSpace, nil}
return
case 0:
ch <- Event{KeySpecial, Null, nil} // Ctrl-space?
return
case byte(ESC):
ch <- ib.escSequence(&sz)
return
}
if ib.b[0] < 32 { // Ctrl-A_Z
ch <- Event{KeySpecial, rune(ib.b[0]), nil}
return
}
char, rsz := utf8.DecodeRune(ib.b)
if char == utf8.RuneError {
ch <- Event{KeySpecial, ESC, nil}
return
}
sz = rsz
ch <- Event{KeyPrint, char, nil}
}()
return ch
}
/*
* Gets first byte, blocking to do so.
* Tries to get any extra bytes within a 100ms timespan
* like esc key sequences (arrows, etc)
*
*/
func fillBuf(fd int, buf []byte) []byte {
const pollInt = 5 //ms
const span = 100 //ms -- reflected via retries*pollInt
c, ok := getchar(fd, false)
if !ok {
return buf
}
buf = append(buf, byte(c))
retries := 0
if c == int(ESC) {
retries = span / pollInt // 20
}
pc := c
for {
c, ok := getchar(fd, true)
if !ok {
if retries > 0 {
retries--
time.Sleep(pollInt * time.Millisecond)
continue
}
break
} else if c == int(ESC) && pc != c {
retries = span / pollInt // got the next char, keep going
} else {
retries = 0
}
buf = append(buf, byte(c))
pc = c
}
return buf
}
func | (fd int, nonblock bool) (int, bool) {
b := make([]byte, 1)
err := setNonBlock(fd, nonblock)
if err != nil {
return 0, false
}
if n, err := sysRead(fd, b); err != nil || n < 1 {
return 0, false
}
return int(b[0]), true
}
//@todo: more shift/ctrl/alt of extended keys like Home, F#, PgUp
//http://www.manmrk.net/tutorials/ISPF/XE/xehelp/html/HID00000594.htm
//this is the ugliest, code ever. to check the seemingly most random
//assignment of codes to meaningful keys
func (ib *inputBuf) escSequence(sz *int) Event {
if len(ib.b) < 2 {
return Event{KeySpecial, ESC, nil}
}
*sz = 2
switch ib.b[1] {
case byte(ESC):
return Event{KeySpecial, ESC, nil}
case 127:
return Event{KeySpecial, AltBS, nil}
case 91, 79: // [, O
if len(ib.b) < 3 {
if ib.b[1] == '[' {
return Event{KeySpecial, AltOpenBracket, nil}
} else if ib.b[1] == 'O' {
return Event{KeySpecial, AltO, nil}
}
return debugEv(ib.b)
}
*sz = 3
switch ib.b[2] {
case 65:
return Event{KeySpecial, Up, nil}
case 66:
return Event{KeySpecial, Down, nil}
case 67:
return Event{KeySpecial, Right, nil}
case 68:
return Event{KeySpecial, Left, nil}
case 90:
return Event{KeySpecial, BTab, nil}
case 72:
return Event{KeySpecial, Home, nil}
case 70:
return Event{ | getchar | identifier_name |
input.go |
AltX
AltY
AltZ
AltOpenBracket
AltFwdSlash
AltCloseBracket
AltCaret
AltUnderscore
AltGrave
Alta
Altb
Altc
Altd
Alte
Altf
Altg
Alth
Alti
Altj
Altk
Altl
Altm
Altn
Alto
Altp
Altq
Altr
Alts
Altt
Altu
Altv
Altw
Altx
Alty
Altz
AltOpenCurly
AltPipe
AltCloseCurly
AltTilde
AltBS
//End printable
// actual number values below are arbitrary, are not in order.
// Extended keyCodes
Del
AltDel
BTab
BSpace
PgUp
PgDn
Up
Down
Right
Left
Home
End
// /relative/ order of the next 8 matter
SUp // Shift
SDown
SRight
SLeft
CtrlUp
CtrlDown
CtrlRight
CtrlLeft
// don't actually need to be in order
F1
F2
F3
F4
F5
F6
F7
F8
F9
F10
F11
F12
//sequential calculated. Keep in relative order
CtrlAlta
CtrlAltb
CtrlAltc
CtrlAltd
CtrlAlte
CtrlAltf
CtrlAltg
CtrlAlth
CtrlAlti
CtrlAltj
CtrlAltk
CtrlAltl
CtrlAltm
CtrlAltn
CtrlAlto
CtrlAltp
CtrlAltq
CtrlAltr
CtrlAlts
CtrlAltt
CtrlAltu
CtrlAltv
CtrlAltw
CtrlAltx
CtrlAlty
CtrlAltz
)
type EvType uint8
const (
EventInvalid EvType = iota
KeySpecial
KeyPrint
Mouse
)
/*
How to determine mouse action:
Mousedown: Type=Mouse && Btn != 3 && !Motion
Mouseup: Type=Mouse && Btn == 3 && !Motion
Mousedrag: Type=Mouse && Btn != 3 && Motion
Mousemove: Type=Mouse && Btn == 3 && Motion
ScrollUp: Type=Mouse && Btn=4
ScrollDn: Type=Mouse && Btn=5
*/
type MouseEvent struct {
Y int
X int
Btn int // 0=Primary, 1=Middle, 2=Right, 3=Release, 4=ScrUp, 5=ScrDown
Shift bool
Meta bool
Ctrl bool
Motion bool
buf []byte
}
type Event struct {
Type EvType
Key rune
M *MouseEvent
}
// returns true if a specific character int(rune) is a printable character (alphanumeric, punctuation)
func Printable(i int) bool { return i >= 32 && i <= 126 }
/*
Grabs Stdin(or whatever passed fd) to listen for keyboard input. Returns 3 things:
- a channel to listen for key events on
- a terminal restore function. Always safe to call, especially when error is set
- error condition
This is the primary use of the top-level tui package, if you intend to capture input, or mouse events
*/
func GetInput(ctx context.Context, fd int) (<-chan Event, func() error, error) {
ch := make(chan Event, 1000)
st, err := terminal.GetState(fd)
if err != nil {
return nil, func() error { return nil }, err
}
restore := func() error { return terminal.Restore(fd, st) }
_, err = terminal.MakeRaw(fd)
if err != nil |
ib := inputBuf{b: make([]byte, 0, 9)}
go func() {
for {
select {
case <-ctx.Done():
return
case ev := <-ib.readEvent(fd):
ch <- ev
}
}
}()
return ch, restore, nil
}
type inputBuf struct {
b []byte
mu sync.Mutex
}
func (ib *inputBuf) readEvent(fd int) <-chan Event {
ch := make(chan Event)
go func() {
ib.mu.Lock()
defer func() {
ib.mu.Unlock()
}()
for ; len(ib.b) == 0; ib.b = fillBuf(fd, ib.b) {
}
if len(ib.b) == 0 {
close(ch)
return
}
sz := 1
defer func() {
ib.b = ib.b[sz:]
}()
switch ib.b[0] {
case byte(CtrlC), byte(CtrlG), byte(CtrlQ):
ch <- Event{KeySpecial, rune(ib.b[0]), nil}
return
case 127:
ch <- Event{KeySpecial, BSpace, nil}
return
case 0:
ch <- Event{KeySpecial, Null, nil} // Ctrl-space?
return
case byte(ESC):
ch <- ib.escSequence(&sz)
return
}
if ib.b[0] < 32 { // Ctrl-A_Z
ch <- Event{KeySpecial, rune(ib.b[0]), nil}
return
}
char, rsz := utf8.DecodeRune(ib.b)
if char == utf8.RuneError {
ch <- Event{KeySpecial, ESC, nil}
return
}
sz = rsz
ch <- Event{KeyPrint, char, nil}
}()
return ch
}
/*
* Gets first byte, blocking to do so.
* Tries to get any extra bytes within a 100ms timespan
* like esc key sequences (arrows, etc)
*
*/
func fillBuf(fd int, buf []byte) []byte {
const pollInt = 5 //ms
const span = 100 //ms -- reflected via retries*pollInt
c, ok := getchar(fd, false)
if !ok {
return buf
}
buf = append(buf, byte(c))
retries := 0
if c == int(ESC) {
retries = span / pollInt // 20
}
pc := c
for {
c, ok := getchar(fd, true)
if !ok {
if retries > 0 {
retries--
time.Sleep(pollInt * time.Millisecond)
continue
}
break
} else if c == int(ESC) && pc != c {
retries = span / pollInt // got the next char, keep going
} else {
retries = 0
}
buf = append(buf, byte(c))
pc = c
}
return buf
}
func getchar(fd int, nonblock bool) (int, bool) {
b := make([]byte, 1)
err := setNonBlock(fd, nonblock)
if err != nil {
return 0, false
}
if n, err := sysRead(fd, b); err != nil || n < 1 {
return 0, false
}
return int(b[0]), true
}
//@todo: more shift/ctrl/alt of extended keys like Home, F#, PgUp
//http://www.manmrk.net/tutorials/ISPF/XE/xehelp/html/HID00000594.htm
//this is the ugliest, code ever. to check the seemingly most random
//assignment of codes to meaningful keys
func (ib *inputBuf) escSequence(sz *int) Event {
if len(ib.b) < 2 {
return Event{KeySpecial, ESC, nil}
}
*sz = 2
switch ib.b[1] {
case byte(ESC):
return Event{KeySpecial, ESC, nil}
case 127:
return Event{KeySpecial, AltBS, nil}
case 91, 79: // [, O
if len(ib.b) < 3 {
if ib.b[1] == '[' {
return Event{KeySpecial, AltOpenBracket, nil}
} else if ib.b[1] == 'O' {
return Event{KeySpecial, AltO, nil}
}
return debugEv(ib.b)
}
*sz = 3
switch ib.b[2] {
case 65:
return Event{KeySpecial, Up, nil}
case 66:
return Event{KeySpecial, Down, nil}
case 67:
return Event{KeySpecial, Right, nil}
case 68:
return Event{KeySpecial, Left, nil}
case 90:
return Event{KeySpecial, BTab, nil}
case 72:
return Event{KeySpecial, Home, nil}
case 70:
return Event{ | {
return nil, restore, err
} | conditional_block |
input.go | W
AltX
AltY
AltZ
AltOpenBracket
AltFwdSlash
AltCloseBracket
AltCaret
AltUnderscore
AltGrave
Alta
Altb
Altc
Altd
Alte
Altf
Altg
Alth
Alti
Altj
Altk
Altl
Altm
Altn
Alto
Altp
Altq
Altr
Alts
Altt
Altu
Altv
Altw
Altx
Alty
Altz
AltOpenCurly
AltPipe
AltCloseCurly
AltTilde
AltBS
//End printable
// actual number values below are arbitrary, are not in order.
// Extended keyCodes
Del
AltDel
BTab
BSpace
PgUp
PgDn
Up
Down
Right
Left
Home
End
// /relative/ order of the next 8 matter
SUp // Shift
SDown
SRight
SLeft
CtrlUp
CtrlDown
CtrlRight
CtrlLeft
// don't actually need to be in order
F1
F2
F3
F4
F5
F6
F7
F8
F9
F10
F11
F12
//sequential calculated. Keep in relative order
CtrlAlta
CtrlAltb
CtrlAltc
CtrlAltd
CtrlAlte
CtrlAltf
CtrlAltg
CtrlAlth
CtrlAlti
CtrlAltj
CtrlAltk
CtrlAltl
CtrlAltm
CtrlAltn
CtrlAlto
CtrlAltp
CtrlAltq
CtrlAltr
CtrlAlts
CtrlAltt
CtrlAltu
CtrlAltv
CtrlAltw
CtrlAltx
CtrlAlty
CtrlAltz
)
type EvType uint8
const (
EventInvalid EvType = iota
KeySpecial
KeyPrint
Mouse
)
/*
How to determine mouse action:
Mousedown: Type=Mouse && Btn != 3 && !Motion
Mouseup: Type=Mouse && Btn == 3 && !Motion
Mousedrag: Type=Mouse && Btn != 3 && Motion
Mousemove: Type=Mouse && Btn == 3 && Motion
ScrollUp: Type=Mouse && Btn=4
ScrollDn: Type=Mouse && Btn=5
*/
type MouseEvent struct {
Y int
X int
Btn int // 0=Primary, 1=Middle, 2=Right, 3=Release, 4=ScrUp, 5=ScrDown
Shift bool
Meta bool
Ctrl bool
Motion bool
buf []byte
}
type Event struct {
Type EvType
Key rune
M *MouseEvent
}
// returns true if a specific character int(rune) is a printable character (alphanumeric, punctuation)
func Printable(i int) bool { return i >= 32 && i <= 126 }
/*
Grabs Stdin(or whatever passed fd) to listen for keyboard input. Returns 3 things:
- a channel to listen for key events on
- a terminal restore function. Always safe to call, especially when error is set
- error condition
This is the primary use of the top-level tui package, if you intend to capture input, or mouse events
*/
func GetInput(ctx context.Context, fd int) (<-chan Event, func() error, error) {
ch := make(chan Event, 1000)
st, err := terminal.GetState(fd)
if err != nil {
return nil, func() error { return nil }, err
}
restore := func() error { return terminal.Restore(fd, st) }
_, err = terminal.MakeRaw(fd)
if err != nil {
return nil, restore, err
}
ib := inputBuf{b: make([]byte, 0, 9)}
go func() {
for {
select {
case <-ctx.Done():
return
case ev := <-ib.readEvent(fd):
ch <- ev
}
}
}()
return ch, restore, nil
}
type inputBuf struct {
b []byte
mu sync.Mutex
}
func (ib *inputBuf) readEvent(fd int) <-chan Event | }()
switch ib.b[0] {
case byte(CtrlC), byte(CtrlG), byte(CtrlQ):
ch <- Event{KeySpecial, rune(ib.b[0]), nil}
return
case 127:
ch <- Event{KeySpecial, BSpace, nil}
return
case 0:
ch <- Event{KeySpecial, Null, nil} // Ctrl-space?
return
case byte(ESC):
ch <- ib.escSequence(&sz)
return
}
if ib.b[0] < 32 { // Ctrl-A_Z
ch <- Event{KeySpecial, rune(ib.b[0]), nil}
return
}
char, rsz := utf8.DecodeRune(ib.b)
if char == utf8.RuneError {
ch <- Event{KeySpecial, ESC, nil}
return
}
sz = rsz
ch <- Event{KeyPrint, char, nil}
}()
return ch
}
/*
* Gets first byte, blocking to do so.
* Tries to get any extra bytes within a 100ms timespan
* like esc key sequences (arrows, etc)
*
*/
func fillBuf(fd int, buf []byte) []byte {
const pollInt = 5 //ms
const span = 100 //ms -- reflected via retries*pollInt
c, ok := getchar(fd, false)
if !ok {
return buf
}
buf = append(buf, byte(c))
retries := 0
if c == int(ESC) {
retries = span / pollInt // 20
}
pc := c
for {
c, ok := getchar(fd, true)
if !ok {
if retries > 0 {
retries--
time.Sleep(pollInt * time.Millisecond)
continue
}
break
} else if c == int(ESC) && pc != c {
retries = span / pollInt // got the next char, keep going
} else {
retries = 0
}
buf = append(buf, byte(c))
pc = c
}
return buf
}
func getchar(fd int, nonblock bool) (int, bool) {
b := make([]byte, 1)
err := setNonBlock(fd, nonblock)
if err != nil {
return 0, false
}
if n, err := sysRead(fd, b); err != nil || n < 1 {
return 0, false
}
return int(b[0]), true
}
//@todo: more shift/ctrl/alt of extended keys like Home, F#, PgUp
//http://www.manmrk.net/tutorials/ISPF/XE/xehelp/html/HID00000594.htm
//this is the ugliest, code ever. to check the seemingly most random
//assignment of codes to meaningful keys
func (ib *inputBuf) escSequence(sz *int) Event {
if len(ib.b) < 2 {
return Event{KeySpecial, ESC, nil}
}
*sz = 2
switch ib.b[1] {
case byte(ESC):
return Event{KeySpecial, ESC, nil}
case 127:
return Event{KeySpecial, AltBS, nil}
case 91, 79: // [, O
if len(ib.b) < 3 {
if ib.b[1] == '[' {
return Event{KeySpecial, AltOpenBracket, nil}
} else if ib.b[1] == 'O' {
return Event{KeySpecial, AltO, nil}
}
return debugEv(ib.b)
}
*sz = 3
switch ib.b[2] {
case 65:
return Event{KeySpecial, Up, nil}
case 66:
return Event{KeySpecial, Down, nil}
case 67:
return Event{KeySpecial, Right, nil}
case 68:
return Event{KeySpecial, Left, nil}
case 90:
return Event{KeySpecial, BTab, nil}
case 72:
return Event{KeySpecial, Home, nil}
case 70:
return Event{ | {
ch := make(chan Event)
go func() {
ib.mu.Lock()
defer func() {
ib.mu.Unlock()
}()
for ; len(ib.b) == 0; ib.b = fillBuf(fd, ib.b) {
}
if len(ib.b) == 0 {
close(ch)
return
}
sz := 1
defer func() {
ib.b = ib.b[sz:] | identifier_body |
input.go | W
AltX
AltY
AltZ
AltOpenBracket
AltFwdSlash
AltCloseBracket
AltCaret
AltUnderscore
AltGrave
Alta
Altb
Altc
Altd
Alte
Altf
Altg
Alth
Alti
Altj
Altk
Altl
Altm
Altn
Alto
Altp
Altq
Altr
Alts
Altt
Altu
Altv
Altw
Altx
Alty
Altz
AltOpenCurly
AltPipe
AltCloseCurly
AltTilde
AltBS
//End printable
// actual number values below are arbitrary, are not in order.
// Extended keyCodes
Del
AltDel
BTab
BSpace
PgUp
PgDn
Up
Down
Right
Left
Home
End
// /relative/ order of the next 8 matter
SUp // Shift
SDown
SRight
SLeft
CtrlUp
CtrlDown
CtrlRight
CtrlLeft
// don't actually need to be in order
F1
F2
F3
F4
F5
F6
F7
F8
F9
F10
F11
F12
//sequential calculated. Keep in relative order
CtrlAlta
CtrlAltb
CtrlAltc
CtrlAltd
CtrlAlte
CtrlAltf
CtrlAltg
CtrlAlth
CtrlAlti
CtrlAltj
CtrlAltk
CtrlAltl
CtrlAltm
CtrlAltn
CtrlAlto
CtrlAltp
CtrlAltq
CtrlAltr
CtrlAlts
CtrlAltt
CtrlAltu
CtrlAltv
CtrlAltw
CtrlAltx
CtrlAlty |
type EvType uint8
const (
EventInvalid EvType = iota
KeySpecial
KeyPrint
Mouse
)
/*
How to determine mouse action:
Mousedown: Type=Mouse && Btn != 3 && !Motion
Mouseup: Type=Mouse && Btn == 3 && !Motion
Mousedrag: Type=Mouse && Btn != 3 && Motion
Mousemove: Type=Mouse && Btn == 3 && Motion
ScrollUp: Type=Mouse && Btn=4
ScrollDn: Type=Mouse && Btn=5
*/
type MouseEvent struct {
Y int
X int
Btn int // 0=Primary, 1=Middle, 2=Right, 3=Release, 4=ScrUp, 5=ScrDown
Shift bool
Meta bool
Ctrl bool
Motion bool
buf []byte
}
type Event struct {
Type EvType
Key rune
M *MouseEvent
}
// returns true if a specific character int(rune) is a printable character (alphanumeric, punctuation)
func Printable(i int) bool { return i >= 32 && i <= 126 }
/*
Grabs Stdin(or whatever passed fd) to listen for keyboard input. Returns 3 things:
- a channel to listen for key events on
- a terminal restore function. Always safe to call, especially when error is set
- error condition
This is the primary use of the top-level tui package, if you intend to capture input, or mouse events
*/
func GetInput(ctx context.Context, fd int) (<-chan Event, func() error, error) {
ch := make(chan Event, 1000)
st, err := terminal.GetState(fd)
if err != nil {
return nil, func() error { return nil }, err
}
restore := func() error { return terminal.Restore(fd, st) }
_, err = terminal.MakeRaw(fd)
if err != nil {
return nil, restore, err
}
ib := inputBuf{b: make([]byte, 0, 9)}
go func() {
for {
select {
case <-ctx.Done():
return
case ev := <-ib.readEvent(fd):
ch <- ev
}
}
}()
return ch, restore, nil
}
type inputBuf struct {
b []byte
mu sync.Mutex
}
func (ib *inputBuf) readEvent(fd int) <-chan Event {
ch := make(chan Event)
go func() {
ib.mu.Lock()
defer func() {
ib.mu.Unlock()
}()
for ; len(ib.b) == 0; ib.b = fillBuf(fd, ib.b) {
}
if len(ib.b) == 0 {
close(ch)
return
}
sz := 1
defer func() {
ib.b = ib.b[sz:]
}()
switch ib.b[0] {
case byte(CtrlC), byte(CtrlG), byte(CtrlQ):
ch <- Event{KeySpecial, rune(ib.b[0]), nil}
return
case 127:
ch <- Event{KeySpecial, BSpace, nil}
return
case 0:
ch <- Event{KeySpecial, Null, nil} // Ctrl-space?
return
case byte(ESC):
ch <- ib.escSequence(&sz)
return
}
if ib.b[0] < 32 { // Ctrl-A_Z
ch <- Event{KeySpecial, rune(ib.b[0]), nil}
return
}
char, rsz := utf8.DecodeRune(ib.b)
if char == utf8.RuneError {
ch <- Event{KeySpecial, ESC, nil}
return
}
sz = rsz
ch <- Event{KeyPrint, char, nil}
}()
return ch
}
/*
* Gets first byte, blocking to do so.
* Tries to get any extra bytes within a 100ms timespan
* like esc key sequences (arrows, etc)
*
*/
func fillBuf(fd int, buf []byte) []byte {
const pollInt = 5 //ms
const span = 100 //ms -- reflected via retries*pollInt
c, ok := getchar(fd, false)
if !ok {
return buf
}
buf = append(buf, byte(c))
retries := 0
if c == int(ESC) {
retries = span / pollInt // 20
}
pc := c
for {
c, ok := getchar(fd, true)
if !ok {
if retries > 0 {
retries--
time.Sleep(pollInt * time.Millisecond)
continue
}
break
} else if c == int(ESC) && pc != c {
retries = span / pollInt // got the next char, keep going
} else {
retries = 0
}
buf = append(buf, byte(c))
pc = c
}
return buf
}
func getchar(fd int, nonblock bool) (int, bool) {
b := make([]byte, 1)
err := setNonBlock(fd, nonblock)
if err != nil {
return 0, false
}
if n, err := sysRead(fd, b); err != nil || n < 1 {
return 0, false
}
return int(b[0]), true
}
//@todo: more shift/ctrl/alt of extended keys like Home, F#, PgUp
//http://www.manmrk.net/tutorials/ISPF/XE/xehelp/html/HID00000594.htm
//this is the ugliest, code ever. to check the seemingly most random
//assignment of codes to meaningful keys
func (ib *inputBuf) escSequence(sz *int) Event {
if len(ib.b) < 2 {
return Event{KeySpecial, ESC, nil}
}
*sz = 2
switch ib.b[1] {
case byte(ESC):
return Event{KeySpecial, ESC, nil}
case 127:
return Event{KeySpecial, AltBS, nil}
case 91, 79: // [, O
if len(ib.b) < 3 {
if ib.b[1] == '[' {
return Event{KeySpecial, AltOpenBracket, nil}
} else if ib.b[1] == 'O' {
return Event{KeySpecial, AltO, nil}
}
return debugEv(ib.b)
}
*sz = 3
switch ib.b[2] {
case 65:
return Event{KeySpecial, Up, nil}
case 66:
return Event{KeySpecial, Down, nil}
case 67:
return Event{KeySpecial, Right, nil}
case 68:
return Event{KeySpecial, Left, nil}
case 90:
return Event{KeySpecial, BTab, nil}
case 72:
return Event{KeySpecial, Home, nil}
case 70:
return Event{Key | CtrlAltz
) | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.