file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
mod.rs | enum FluentValue<'source> {
String(Cow<'source, str>),
Number(FluentNumber),
Custom(Box<dyn FluentType + Send>),
None,
Error,
}
impl<'s> PartialEq for FluentValue<'s> {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(FluentValue::String(s), FluentValue::String(s2)) => s == s2,
(FluentValue::Number(s), FluentValue::Number(s2)) => s == s2,
(FluentValue::Custom(s), FluentValue::Custom(s2)) => s == s2,
_ => false,
}
}
}
impl<'s> Clone for FluentValue<'s> {
fn clone(&self) -> Self {
match self {
FluentValue::String(s) => FluentValue::String(s.clone()),
FluentValue::Number(s) => FluentValue::Number(s.clone()),
FluentValue::Custom(s) => {
let new_value: Box<dyn FluentType + Send> = s.duplicate();
FluentValue::Custom(new_value)
}
FluentValue::Error => FluentValue::Error,
FluentValue::None => FluentValue::None,
}
}
}
impl<'source> FluentValue<'source> {
/// Attempts to parse the string representation of a `value` that supports
/// [`ToString`] into a [`FluentValue::Number`]. If it fails, it will instead
/// convert it to a [`FluentValue::String`].
///
/// ```
/// use fluent_bundle::types::{FluentNumber, FluentNumberOptions, FluentValue};
///
/// // "2" parses into a `FluentNumber`
/// assert_eq!(
/// FluentValue::try_number("2"),
/// FluentValue::Number(FluentNumber::new(2.0, FluentNumberOptions::default()))
/// );
///
/// // Floats can be parsed as well.
/// assert_eq!(
/// FluentValue::try_number("3.141569"),
/// FluentValue::Number(FluentNumber::new(
/// 3.141569,
/// FluentNumberOptions {
/// minimum_fraction_digits: Some(6),
/// ..Default::default()
/// }
/// ))
/// );
///
/// // When a value is not a valid number, it falls back to a `FluentValue::String`
/// assert_eq!(
/// FluentValue::try_number("A string"),
/// FluentValue::String("A string".into())
/// );
/// ```
pub fn try_number(value: &'source str) -> Self {
if let Ok(number) = FluentNumber::from_str(value) {
number.into()
} else {
value.into()
}
}
/// Checks to see if two [`FluentValues`](FluentValue) match each other by having the
/// same type and contents. The special exception is in the case of a string being
/// compared to a number. Here attempt to check that the plural rule category matches.
///
/// ```
/// use fluent_bundle::resolver::Scope;
/// use fluent_bundle::{types::FluentValue, FluentBundle, FluentResource};
/// use unic_langid::langid;
///
/// let langid_ars = langid!("en");
/// let bundle: FluentBundle<FluentResource> = FluentBundle::new(vec![langid_ars]);
/// let scope = Scope::new(&bundle, None, None);
///
/// // Matching examples:
/// assert!(FluentValue::try_number("2").matches(&FluentValue::try_number("2"), &scope));
/// assert!(FluentValue::from("fluent").matches(&FluentValue::from("fluent"), &scope));
/// assert!(
/// FluentValue::from("one").matches(&FluentValue::try_number("1"), &scope),
/// "Plural rules are matched."
/// );
///
/// // Non-matching examples:
/// assert!(!FluentValue::try_number("2").matches(&FluentValue::try_number("3"), &scope));
/// assert!(!FluentValue::from("fluent").matches(&FluentValue::from("not fluent"), &scope));
/// assert!(!FluentValue::from("two").matches(&FluentValue::try_number("100"), &scope),);
/// ```
pub fn matches<R: Borrow<FluentResource>, M>(
&self,
other: &FluentValue,
scope: &Scope<R, M>,
) -> bool
where
M: MemoizerKind,
{
match (self, other) {
(&FluentValue::String(ref a), &FluentValue::String(ref b)) => a == b,
(&FluentValue::Number(ref a), &FluentValue::Number(ref b)) => a == b,
(&FluentValue::String(ref a), &FluentValue::Number(ref b)) => {
let cat = match a.as_ref() {
"zero" => PluralCategory::ZERO,
"one" => PluralCategory::ONE,
"two" => PluralCategory::TWO,
"few" => PluralCategory::FEW,
"many" => PluralCategory::MANY,
"other" => PluralCategory::OTHER,
_ => return false,
};
// This string matches a plural rule keyword. Check if the number
// matches the plural rule category.
scope
.bundle
.intls
.with_try_get_threadsafe::<PluralRules, _, _>(
(PluralRuleType::CARDINAL,),
|pr| pr.0.select(b) == Ok(cat),
)
.unwrap()
}
_ => false,
}
}
/// Write out a string version of the [`FluentValue`] to `W`.
pub fn write<W, R, M>(&self, w: &mut W, scope: &Scope<R, M>) -> fmt::Result
where
W: fmt::Write,
R: Borrow<FluentResource>,
M: MemoizerKind,
{
if let Some(formatter) = &scope.bundle.formatter {
if let Some(val) = formatter(self, &scope.bundle.intls) {
return w.write_str(&val);
}
}
match self {
FluentValue::String(s) => w.write_str(s),
FluentValue::Number(n) => w.write_str(&n.as_string()),
FluentValue::Custom(s) => w.write_str(&scope.bundle.intls.stringify_value(&**s)),
FluentValue::Error => Ok(()),
FluentValue::None => Ok(()),
}
}
/// Converts the [`FluentValue`] to a string.
///
/// Clones inner values when owned, borrowed data is not cloned.
/// Prefer using [`FluentValue::into_string()`] when possible.
pub fn as_string<R: Borrow<FluentResource>, M>(&self, scope: &Scope<R, M>) -> Cow<'source, str>
where
M: MemoizerKind,
{
if let Some(formatter) = &scope.bundle.formatter {
if let Some(val) = formatter(self, &scope.bundle.intls) {
return val.into();
}
}
match self {
FluentValue::String(s) => s.clone(),
FluentValue::Number(n) => n.as_string(),
FluentValue::Custom(s) => scope.bundle.intls.stringify_value(&**s),
FluentValue::Error => "".into(),
FluentValue::None => "".into(),
}
}
/// Converts the [`FluentValue`] to a string.
///
/// Takes self by-value to be able to skip expensive clones.
/// Prefer this method over [`FluentValue::as_string()`] when possible.
pub fn into_string<R: Borrow<FluentResource>, M>(self, scope: &Scope<R, M>) -> Cow<'source, str>
where
M: MemoizerKind,
{
if let Some(formatter) = &scope.bundle.formatter {
if let Some(val) = formatter(&self, &scope.bundle.intls) {
return val.into();
}
}
match self {
FluentValue::String(s) => s,
FluentValue::Number(n) => n.as_string(),
FluentValue::Custom(s) => scope.bundle.intls.stringify_value(s.as_ref()),
FluentValue::Error => "".into(),
FluentValue::None => "".into(),
}
}
pub fn into_owned<'a>(&self) -> FluentValue<'a> {
match self {
FluentValue::String(str) => FluentValue::String(Cow::from(str.to_string())),
FluentValue::Number(s) => FluentValue::Number(s.clone()),
FluentValue::Custom(s) => FluentValue::Custom(s.duplicate()),
FluentValue::Error => FluentValue::Error,
FluentValue::None => FluentValue::None,
}
}
}
impl<'source> From<String> for FluentValue<'source> {
fn from(s: String) -> Self {
FluentValue::String(s.into())
}
}
impl<'source> From<&'source String> for FluentValue<'source> {
fn from(s: &'source String) -> Self | {
FluentValue::String(s.into())
} | identifier_body |
|
mod.rs | uentResource;
/// Custom types can implement the [`FluentType`] trait in order to generate a string
/// value for use in the message generation process.
pub trait FluentType: fmt::Debug + AnyEq + 'static {
/// Create a clone of the underlying type.
fn duplicate(&self) -> Box<dyn FluentType + Send>;
/// Convert the custom type into a string value, for instance a custom DateTime
/// type could return "Oct. 27, 2022".
fn as_string(&self, intls: &intl_memoizer::IntlLangMemoizer) -> Cow<'static, str>;
/// Convert the custom type into a string value, for instance a custom DateTime
/// type could return "Oct. 27, 2022". This operation is provided the threadsafe
/// [IntlLangMemoizer](intl_memoizer::concurrent::IntlLangMemoizer).
fn as_string_threadsafe(
&self,
intls: &intl_memoizer::concurrent::IntlLangMemoizer,
) -> Cow<'static, str>;
}
impl PartialEq for dyn FluentType + Send {
fn eq(&self, other: &Self) -> bool {
self.equals(other.as_any())
}
}
pub trait AnyEq: Any + 'static {
fn equals(&self, other: &dyn Any) -> bool;
fn as_any(&self) -> &dyn Any;
}
impl<T: Any + PartialEq> AnyEq for T {
fn equals(&self, other: &dyn Any) -> bool {
other
.downcast_ref::<Self>()
.map_or(false, |that| self == that)
}
fn as_any(&self) -> &dyn Any {
self
}
}
/// The `FluentValue` enum represents values which can be formatted to a String.
///
/// Those values are either passed as arguments to [`FluentBundle::format_pattern`] or
/// produced by functions, or generated in the process of pattern resolution.
///
/// [`FluentBundle::format_pattern`]: crate::bundle::FluentBundle::format_pattern
#[derive(Debug)]
pub enum FluentValue<'source> {
String(Cow<'source, str>),
Number(FluentNumber),
Custom(Box<dyn FluentType + Send>),
None,
Error,
}
impl<'s> PartialEq for FluentValue<'s> {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(FluentValue::String(s), FluentValue::String(s2)) => s == s2,
(FluentValue::Number(s), FluentValue::Number(s2)) => s == s2,
(FluentValue::Custom(s), FluentValue::Custom(s2)) => s == s2,
_ => false,
}
}
}
impl<'s> Clone for FluentValue<'s> {
fn clone(&self) -> Self {
match self {
FluentValue::String(s) => FluentValue::String(s.clone()),
FluentValue::Number(s) => FluentValue::Number(s.clone()),
FluentValue::Custom(s) => {
let new_value: Box<dyn FluentType + Send> = s.duplicate();
FluentValue::Custom(new_value)
}
FluentValue::Error => FluentValue::Error,
FluentValue::None => FluentValue::None,
}
}
}
impl<'source> FluentValue<'source> {
/// Attempts to parse the string representation of a `value` that supports
/// [`ToString`] into a [`FluentValue::Number`]. If it fails, it will instead
/// convert it to a [`FluentValue::String`].
///
/// ```
/// use fluent_bundle::types::{FluentNumber, FluentNumberOptions, FluentValue};
///
/// // "2" parses into a `FluentNumber`
/// assert_eq!(
/// FluentValue::try_number("2"),
/// FluentValue::Number(FluentNumber::new(2.0, FluentNumberOptions::default()))
/// );
///
/// // Floats can be parsed as well.
/// assert_eq!(
/// FluentValue::try_number("3.141569"),
/// FluentValue::Number(FluentNumber::new(
/// 3.141569,
/// FluentNumberOptions {
/// minimum_fraction_digits: Some(6),
/// ..Default::default()
/// }
/// ))
/// );
///
/// // When a value is not a valid number, it falls back to a `FluentValue::String`
/// assert_eq!(
/// FluentValue::try_number("A string"),
/// FluentValue::String("A string".into())
/// );
/// ```
pub fn try_number(value: &'source str) -> Self {
if let Ok(number) = FluentNumber::from_str(value) {
number.into()
} else {
value.into()
}
}
/// Checks to see if two [`FluentValues`](FluentValue) match each other by having the
/// same type and contents. The special exception is in the case of a string being
/// compared to a number. Here attempt to check that the plural rule category matches.
///
/// ```
/// use fluent_bundle::resolver::Scope;
/// use fluent_bundle::{types::FluentValue, FluentBundle, FluentResource};
/// use unic_langid::langid;
///
/// let langid_ars = langid!("en");
/// let bundle: FluentBundle<FluentResource> = FluentBundle::new(vec![langid_ars]);
/// let scope = Scope::new(&bundle, None, None);
///
/// // Matching examples:
/// assert!(FluentValue::try_number("2").matches(&FluentValue::try_number("2"), &scope));
/// assert!(FluentValue::from("fluent").matches(&FluentValue::from("fluent"), &scope));
/// assert!(
/// FluentValue::from("one").matches(&FluentValue::try_number("1"), &scope),
/// "Plural rules are matched."
/// );
///
/// // Non-matching examples:
/// assert!(!FluentValue::try_number("2").matches(&FluentValue::try_number("3"), &scope));
/// assert!(!FluentValue::from("fluent").matches(&FluentValue::from("not fluent"), &scope));
/// assert!(!FluentValue::from("two").matches(&FluentValue::try_number("100"), &scope),);
/// ```
pub fn matches<R: Borrow<FluentResource>, M>(
&self,
other: &FluentValue,
scope: &Scope<R, M>,
) -> bool
where
M: MemoizerKind,
{
match (self, other) {
(&FluentValue::String(ref a), &FluentValue::String(ref b)) => a == b,
(&FluentValue::Number(ref a), &FluentValue::Number(ref b)) => a == b,
(&FluentValue::String(ref a), &FluentValue::Number(ref b)) => {
let cat = match a.as_ref() {
"zero" => PluralCategory::ZERO,
"one" => PluralCategory::ONE,
"two" => PluralCategory::TWO,
"few" => PluralCategory::FEW,
"many" => PluralCategory::MANY,
"other" => PluralCategory::OTHER,
_ => return false,
};
// This string matches a plural rule keyword. Check if the number
// matches the plural rule category.
scope
.bundle
.intls
.with_try_get_threadsafe::<PluralRules, _, _>(
(PluralRuleType::CARDINAL,),
|pr| pr.0.select(b) == Ok(cat),
)
.unwrap()
}
_ => false,
}
}
/// Write out a string version of the [`FluentValue`] to `W`.
pub fn write<W, R, M>(&self, w: &mut W, scope: &Scope<R, M>) -> fmt::Result
where
W: fmt::Write,
R: Borrow<FluentResource>,
M: MemoizerKind,
{
if let Some(formatter) = &scope.bundle.formatter {
if let Some(val) = formatter(self, &scope.bundle.intls) {
return w.write_str(&val); | match self {
FluentValue::String(s) => w.write_str(s),
FluentValue::Number(n) => w.write_str(&n.as_string()),
FluentValue::Custom(s) => w.write_str(&scope.bundle.intls.stringify_value(&**s)),
FluentValue::Error => Ok(()),
FluentValue::None => Ok(()),
}
}
/// Converts the [`FluentValue`] to a string.
///
/// Clones inner values when owned, borrowed data is not cloned.
/// Prefer using [`FluentValue::into_string()`] when possible.
pub fn as_string<R: Borrow<FluentResource>, M>(&self, scope: &Scope<R, M>) -> Cow<'source, str>
where
M: MemoizerKind,
{
if let Some(formatter) = &scope.bundle.formatter {
if let Some(val) | }
} | random_line_split |
simplesnake.js | NOTE: the code is styled in a very specific but not very popular
* style in part to illustrate different approaches to code
* organization and structuring.
*
*
*
**********************************************************************/
var VERSION = '2.0'
/*********************************************************************/
function makeEvent(handler_attr){
return function(func){
if(func === null){
delete this[handler_attr]
} else if(func instanceof Function){
var handlers = this[handler_attr] = this[handler_attr] || []
handlers.push(func)
} else {
var that = this
var args = [].slice.call(arguments)
this[handler_attr]
&& this[handler_attr]
.forEach(function(handler){
handler.apply(that, args) }) }
return this } }
var Snake = {
config: {
field_size: 32,
interval: 150,
},
_field: null,
_cells: null,
players: null,
field_size: null,
get random_point(){
var cells = this._cells
var l = cells.length
var w = this.field_size.width
do {
var i = Math.floor(Math.random() * l)
} while(cells[i].classList.length > 0)
return {
x: i%w,
y: Math.floor(i/w),
} },
get random_direction(){
return ('nesw')[Math.floor(Math.random() * 4)] },
// utils...
call: function(func){
return func.apply(this, [].slice.call(arguments, 1)) },
apply: function(func, args){
return func.apply(this, args) },
normalize_point: function(point){
point = point || {}
var w = this.field_size.width
var x = point.x % w
x = x < 0 ? (x + w) : x
var h = this.field_size.height
var y = point.y % h
y = y < 0 ? (y + h) : y
return { x, y } },
// system...
setup: function(field, size, interval){
this.config.field_size = size || this.config.field_size
this.config.interval = interval || this.config.interval
field = field || this._field
field = this._field = typeof(field) == typeof('str') ?
document.querySelector(field)
: field
this._make_field()
this._cells = [].slice.call(field.querySelectorAll('td'))
this.field_size = {
width: field.querySelector('tr').querySelectorAll('td').length,
height: field.querySelectorAll('tr').length,
}
this.players = {}
return this
.appleEaten(null)
.snakeKilled(null) },
_make_field: function(w){
var l = []
l.length = w || this.config.field_size
l.fill('<td/>')
this._field.innerHTML =
`<table class="field" cellspacing="0">\n${
l.map(function(){
return ` <tr> ${ l.join('') } </tr>`
}).join('\n')
}\n</table>` },
_tick: function(){
var that = this
var l = this._cells.length
var w = this.field_size.width
var h = this.field_size.height
var tick = this.__tick = (this.__tick + 1 || 0)
var directions = 'neswn'
this._cells.forEach(function(cell, i){
var color = cell.style.backgroundColor
// skip cells we touched on this tick...
if(cell.tick == tick){
return }
// snake...
if(cell.age != null){
// handle cell age...
if(cell.age == 0){
delete cell.age
cell.classList.remove('snake')
cell.style.backgroundColor = ''
} else {
cell.age -= 1 }
// snake head -> move...
var direction = cell.direction
if(directions.indexOf(direction) >= 0){
// turn...
if(that.players[color] != ''){
var turn = that.players[color] || ''
var j = turn == 'left' ?
directions.indexOf(direction) - 1
: directions.indexOf(direction) + 1
j = j < 0 ? 3 : j
direction = directions[j]
that.players[color] = '' }
// get next cell index...
var next =
direction == 'n' ?
(i < w ?
l - w + i
: i - w)
: direction == 's' ?
(i > (l-w-1) ?
i - (l-w)
: i + w)
: direction == 'e' ?
((i+1)%w == 0 ?
i - (w-1)
: i + 1)
: (i%w == 0 ?
i + (w-1)
: i - 1)
next = that._cells[next]
var age = cell.age
var move = false
// special case: other snake's head -> kill both...
if(next.direction){
var other = next.style.backgroundColor
next.classList.remove('snake')
next.style.backgroundColor = ''
// NOTE: we are not deleteing .direction here as
// we can have upto 4 snakes colliding...
next.direction = ''
that.snakeKilled(other, next.age+1)
that.snakeKilled(color, age+2)
delete next.age
// apple -> increment age...
} else if(next.classList.contains('apple')){
age += 1
move = true
next.classList.remove('apple')
that.appleEaten(color, age+2)
// empty -> just move...
} else if(next.classList.length == 0){
move = true
// other -> kill...
} else {
that.snakeKilled(color, age+2) }
// do the move...
if(move){
next.tick = tick
next.style.backgroundColor = color
next.classList.add('snake')
next.age = age + 1
next.direction = direction }
delete cell.direction } }
cell.tick = tick })
this.tick(tick) },
// constructors...
snake: function(color, age, point, direction){
point = this.normalize_point(point || this.random_point)
var head = this._cells[point.x + point.y * this.field_size.width]
head.style.backgroundColor = color
head.classList.add('snake')
head.direction = direction
|| this.random_direction
head.age = (age || 5) - 1
this.players[color] = ''
return this
.snakeBorn(color) },
apple: function(point){
point = this.normalize_point(point || this.random_point)
var c = this._cells[point.x + point.y * this.field_size.width]
c.classList.add('apple')
c.style.backgroundColor = ''
return this },
wall: function(point, direction, length){
direction = direction
|| this.random_direction
point = this.normalize_point(point || this.random_point)
var x = point.x
var y = point.y
|
while(length > 0){
var c = this._cells[x + y * this.field_size.width]
c.classList.add('wall')
c.style.backgroundColor = ''
x += direction == 'e' ?
1
: direction == 'w' ?
-1
: 0
x = x < 0 ?
this.field_size.width + x
: x % this.field_size.width
y += direction == 'n' ?
-1
: direction == 's' ?
1
: 0
y = y < 0 ?
this.field_size.height + y
: y % this.field_size.height
length -= 1 }
return this },
level: function(level){
var that = this
level.forEach(function(wall){
that.wall.apply(that, wall) })
return this },
// events...
snakeKilled: makeEvent('__killHandlers'),
snakeBorn: makeEvent('__birthHandlers'),
appleEaten: makeEvent('__appleEatenHandlers'),
tick: makeEvent('__tickHandlers'),
gameStarted: makeEvent('__startHandlers'),
gameStopped: makeEvent('__stopHandlers'),
// actions...
start: function(t){
this.__timer = this.__timer
|| setInterval(
this._tick.bind(this),
t
|| this.config.interval
|| 200)
// reset player control actions...
var that = this
Object.keys(this.players)
.forEach(function(k){
that.players[k] = '' })
return this
.tick()
.gameStarted() },
stop: function(){
clearInterval(this.__timer)
delete this.__timer
delete this.__tick
return this
.gameStopped() },
pause: function(){
return this.__timer ?
this.stop()
: this.start() },
left: function(color){ | length = length
|| Math.random() * this.field_size.width
| random_line_split |
simplesnake.js | ''
var j = turn == 'left' ?
directions.indexOf(direction) - 1
: directions.indexOf(direction) + 1
j = j < 0 ? 3 : j
direction = directions[j]
that.players[color] = '' }
// get next cell index...
var next =
direction == 'n' ?
(i < w ?
l - w + i
: i - w)
: direction == 's' ?
(i > (l-w-1) ?
i - (l-w)
: i + w)
: direction == 'e' ?
((i+1)%w == 0 ?
i - (w-1)
: i + 1)
: (i%w == 0 ?
i + (w-1)
: i - 1)
next = that._cells[next]
var age = cell.age
var move = false
// special case: other snake's head -> kill both...
if(next.direction){
var other = next.style.backgroundColor
next.classList.remove('snake')
next.style.backgroundColor = ''
// NOTE: we are not deleteing .direction here as
// we can have upto 4 snakes colliding...
next.direction = ''
that.snakeKilled(other, next.age+1)
that.snakeKilled(color, age+2)
delete next.age
// apple -> increment age...
} else if(next.classList.contains('apple')){
age += 1
move = true
next.classList.remove('apple')
that.appleEaten(color, age+2)
// empty -> just move...
} else if(next.classList.length == 0){
move = true
// other -> kill...
} else {
that.snakeKilled(color, age+2) }
// do the move...
if(move){
next.tick = tick
next.style.backgroundColor = color
next.classList.add('snake')
next.age = age + 1
next.direction = direction }
delete cell.direction } }
cell.tick = tick })
this.tick(tick) },
// constructors...
snake: function(color, age, point, direction){
point = this.normalize_point(point || this.random_point)
var head = this._cells[point.x + point.y * this.field_size.width]
head.style.backgroundColor = color
head.classList.add('snake')
head.direction = direction
|| this.random_direction
head.age = (age || 5) - 1
this.players[color] = ''
return this
.snakeBorn(color) },
apple: function(point){
point = this.normalize_point(point || this.random_point)
var c = this._cells[point.x + point.y * this.field_size.width]
c.classList.add('apple')
c.style.backgroundColor = ''
return this },
wall: function(point, direction, length){
direction = direction
|| this.random_direction
point = this.normalize_point(point || this.random_point)
var x = point.x
var y = point.y
length = length
|| Math.random() * this.field_size.width
while(length > 0){
var c = this._cells[x + y * this.field_size.width]
c.classList.add('wall')
c.style.backgroundColor = ''
x += direction == 'e' ?
1
: direction == 'w' ?
-1
: 0
x = x < 0 ?
this.field_size.width + x
: x % this.field_size.width
y += direction == 'n' ?
-1
: direction == 's' ?
1
: 0
y = y < 0 ?
this.field_size.height + y
: y % this.field_size.height
length -= 1 }
return this },
level: function(level){
var that = this
level.forEach(function(wall){
that.wall.apply(that, wall) })
return this },
// events...
snakeKilled: makeEvent('__killHandlers'),
snakeBorn: makeEvent('__birthHandlers'),
appleEaten: makeEvent('__appleEatenHandlers'),
tick: makeEvent('__tickHandlers'),
gameStarted: makeEvent('__startHandlers'),
gameStopped: makeEvent('__stopHandlers'),
// actions...
start: function(t){
this.__timer = this.__timer
|| setInterval(
this._tick.bind(this),
t
|| this.config.interval
|| 200)
// reset player control actions...
var that = this
Object.keys(this.players)
.forEach(function(k){
that.players[k] = '' })
return this
.tick()
.gameStarted() },
stop: function(){
clearInterval(this.__timer)
delete this.__timer
delete this.__tick
return this
.gameStopped() },
pause: function(){
return this.__timer ?
this.stop()
: this.start() },
left: function(color){
this.players[color || Object.keys(this.players)[0]] = 'left'
return this },
right: function(color){
this.players[color || Object.keys(this.players)[0]] = 'right'
return this },
}
/*********************************************************************/
// control event handlers...
var KEY_CONFIG = {
' ': ['pause'],
n: setup,
ArrowLeft: ['left'],
ArrowRight: ['right'],
// IE compatibility...
Left: ['left'],
Right: ['right'],
'?': function(){
this
.stop()
.call(showHints) },
}
function makeKeyboardHandler(snake){
return function(event){
clearHints()
var action = KEY_CONFIG[event.key]
action
&& (action instanceof Function ?
action.call(snake)
: action[0] in snake ?
snake[action[0]].apply(snake, action.slice(1))
: null) }}
var __DEBOUNCE = false
var __DEBOUNCE_TIMEOUT = 100
function makeTapHandler(snake){
return function(event){
// prevent clicks and touches from triggering the same action
// twice -- only handle the first one within timeout...
// NOTE: this should not affect events of the same type...
if(__DEBOUNCE && event.type != __DEBOUNCE){
return }
__DEBOUNCE = event.type
setTimeout(function(){ __DEBOUNCE = false }, __DEBOUNCE_TIMEOUT)
clearHints()
// top of screen (1/8)...
;(event.clientY
|| event.changedTouches[0].pageY) <= (window.innerHeight / 8) ?
setup()
// bottom of screen 1/8...
: (event.clientY
|| event.changedTouches[0].pageY) >= (window.innerHeight / 8)*7 ?
Snake.pause()
// left/right of screen...
: (event.clientX
|| event.changedTouches[0].pageX) <= (window.innerWidth / 2) ?
Snake.left()
: Snake.right() }}
//---------------------------------------------------------------------
// misc stuff...
function showHints(){
document.body.classList.add('hints') }
function clearHints(){
document.body.classList.remove('hints') }
function digitizeBackground(snake, walls){
snake._cells.forEach(function(c){
var v = Math.floor(Math.random() * 6)
// bg cell...
c.classList.length == 0 ?
(c.style.backgroundColor =
`rgb(${255 - v}, ${255 - v}, ${255 - v})`)
// wall...
: walls && c.classList.contains('wall') ?
(c.style.backgroundColor =
`rgb(${220 - v*2}, ${220 - v*2}, ${220 - v*2})`)
// skip the rest...
: null })
return snake }
//---------------------------------------------------------------------
var __CACHE_UPDATE_CHECK = 5*60*1000
var __HANDLER_SET = false
function setup(snake, timer, size){
snake = snake || Snake
// levels...
var A = Math.round((size || snake.config.field_size)/8)
var Level = {
W3: [
[null, null, A*6],
[null, null, A*6],
[null, null, A*6],
],
Halves: [
[null, null, A*8],
],
Quarters: [
[null, 's', A*8],
[null, 'e', A*8],
],
Random3: [[], [], []],
get random(){
var l = Object.keys(this)
.filter(function(e){
return e != 'random' })
do {
var level = this[l[ Math.round(Math.random()*l.length) ]]
} while(!(level instanceof Array))
return level },
}
function | showScore | identifier_name |
|
simplesnake.js | .querySelector(field)
: field
this._make_field()
this._cells = [].slice.call(field.querySelectorAll('td'))
this.field_size = {
width: field.querySelector('tr').querySelectorAll('td').length,
height: field.querySelectorAll('tr').length,
}
this.players = {}
return this
.appleEaten(null)
.snakeKilled(null) },
_make_field: function(w){
var l = []
l.length = w || this.config.field_size
l.fill('<td/>')
this._field.innerHTML =
`<table class="field" cellspacing="0">\n${
l.map(function(){
return ` <tr> ${ l.join('') } </tr>`
}).join('\n')
}\n</table>` },
_tick: function(){
var that = this
var l = this._cells.length
var w = this.field_size.width
var h = this.field_size.height
var tick = this.__tick = (this.__tick + 1 || 0)
var directions = 'neswn'
this._cells.forEach(function(cell, i){
var color = cell.style.backgroundColor
// skip cells we touched on this tick...
if(cell.tick == tick){
return }
// snake...
if(cell.age != null){
// handle cell age...
if(cell.age == 0){
delete cell.age
cell.classList.remove('snake')
cell.style.backgroundColor = ''
} else {
cell.age -= 1 }
// snake head -> move...
var direction = cell.direction
if(directions.indexOf(direction) >= 0){
// turn...
if(that.players[color] != ''){
var turn = that.players[color] || ''
var j = turn == 'left' ?
directions.indexOf(direction) - 1
: directions.indexOf(direction) + 1
j = j < 0 ? 3 : j
direction = directions[j]
that.players[color] = '' }
// get next cell index...
var next =
direction == 'n' ?
(i < w ?
l - w + i
: i - w)
: direction == 's' ?
(i > (l-w-1) ?
i - (l-w)
: i + w)
: direction == 'e' ?
((i+1)%w == 0 ?
i - (w-1)
: i + 1)
: (i%w == 0 ?
i + (w-1)
: i - 1)
next = that._cells[next]
var age = cell.age
var move = false
// special case: other snake's head -> kill both...
if(next.direction){
var other = next.style.backgroundColor
next.classList.remove('snake')
next.style.backgroundColor = ''
// NOTE: we are not deleteing .direction here as
// we can have upto 4 snakes colliding...
next.direction = ''
that.snakeKilled(other, next.age+1)
that.snakeKilled(color, age+2)
delete next.age
// apple -> increment age...
} else if(next.classList.contains('apple')){
age += 1
move = true
next.classList.remove('apple')
that.appleEaten(color, age+2)
// empty -> just move...
} else if(next.classList.length == 0){
move = true
// other -> kill...
} else {
that.snakeKilled(color, age+2) }
// do the move...
if(move){
next.tick = tick
next.style.backgroundColor = color
next.classList.add('snake')
next.age = age + 1
next.direction = direction }
delete cell.direction } }
cell.tick = tick })
this.tick(tick) },
// constructors...
snake: function(color, age, point, direction){
point = this.normalize_point(point || this.random_point)
var head = this._cells[point.x + point.y * this.field_size.width]
head.style.backgroundColor = color
head.classList.add('snake')
head.direction = direction
|| this.random_direction
head.age = (age || 5) - 1
this.players[color] = ''
return this
.snakeBorn(color) },
apple: function(point){
point = this.normalize_point(point || this.random_point)
var c = this._cells[point.x + point.y * this.field_size.width]
c.classList.add('apple')
c.style.backgroundColor = ''
return this },
wall: function(point, direction, length){
direction = direction
|| this.random_direction
point = this.normalize_point(point || this.random_point)
var x = point.x
var y = point.y
length = length
|| Math.random() * this.field_size.width
while(length > 0){
var c = this._cells[x + y * this.field_size.width]
c.classList.add('wall')
c.style.backgroundColor = ''
x += direction == 'e' ?
1
: direction == 'w' ?
-1
: 0
x = x < 0 ?
this.field_size.width + x
: x % this.field_size.width
y += direction == 'n' ?
-1
: direction == 's' ?
1
: 0
y = y < 0 ?
this.field_size.height + y
: y % this.field_size.height
length -= 1 }
return this },
level: function(level){
var that = this
level.forEach(function(wall){
that.wall.apply(that, wall) })
return this },
// events...
snakeKilled: makeEvent('__killHandlers'),
snakeBorn: makeEvent('__birthHandlers'),
appleEaten: makeEvent('__appleEatenHandlers'),
tick: makeEvent('__tickHandlers'),
gameStarted: makeEvent('__startHandlers'),
gameStopped: makeEvent('__stopHandlers'),
// actions...
start: function(t){
this.__timer = this.__timer
|| setInterval(
this._tick.bind(this),
t
|| this.config.interval
|| 200)
// reset player control actions...
var that = this
Object.keys(this.players)
.forEach(function(k){
that.players[k] = '' })
return this
.tick()
.gameStarted() },
stop: function(){
clearInterval(this.__timer)
delete this.__timer
delete this.__tick
return this
.gameStopped() },
pause: function(){
return this.__timer ?
this.stop()
: this.start() },
left: function(color){
this.players[color || Object.keys(this.players)[0]] = 'left'
return this },
right: function(color){
this.players[color || Object.keys(this.players)[0]] = 'right'
return this },
}
/*********************************************************************/
// control event handlers...
var KEY_CONFIG = {
' ': ['pause'],
n: setup,
ArrowLeft: ['left'],
ArrowRight: ['right'],
// IE compatibility...
Left: ['left'],
Right: ['right'],
'?': function(){
this
.stop()
.call(showHints) },
}
function makeKeyboardHandler(snake){
return function(event){
clearHints()
var action = KEY_CONFIG[event.key]
action
&& (action instanceof Function ?
action.call(snake)
: action[0] in snake ?
snake[action[0]].apply(snake, action.slice(1))
: null) }}
var __DEBOUNCE = false
var __DEBOUNCE_TIMEOUT = 100
function makeTapHandler(snake){
return function(event){
// prevent clicks and touches from triggering the same action
// twice -- only handle the first one within timeout...
// NOTE: this should not affect events of the same type...
if(__DEBOUNCE && event.type != __DEBOUNCE){
return }
__DEBOUNCE = event.type
setTimeout(function(){ __DEBOUNCE = false }, __DEBOUNCE_TIMEOUT)
clearHints()
// top of screen (1/8)...
;(event.clientY
|| event.changedTouches[0].pageY) <= (window.innerHeight / 8) ?
setup()
// bottom of screen 1/8...
: (event.clientY
|| event.changedTouches[0].pageY) >= (window.innerHeight / 8)*7 ?
Snake.pause()
// left/right of screen...
: (event.clientX
|| event.changedTouches[0].pageX) <= (window.innerWidth / 2) ?
Snake.left()
: Snake.right() }}
//---------------------------------------------------------------------
// misc stuff...
function showHints(){
document.body.classList.add('hints') }
function clearHints() | {
document.body.classList.remove('hints') } | identifier_body |
|
simplesnake.js | NOTE: the code is styled in a very specific but not very popular
* style in part to illustrate different approaches to code
* organization and structuring.
*
*
*
**********************************************************************/
var VERSION = '2.0'
/*********************************************************************/
function makeEvent(handler_attr){
return function(func){
if(func === null){
delete this[handler_attr]
} else if(func instanceof Function){
var handlers = this[handler_attr] = this[handler_attr] || []
handlers.push(func)
} else {
var that = this
var args = [].slice.call(arguments)
this[handler_attr]
&& this[handler_attr]
.forEach(function(handler){
handler.apply(that, args) }) }
return this } }
var Snake = {
config: {
field_size: 32,
interval: 150,
},
_field: null,
_cells: null,
players: null,
field_size: null,
get random_point(){
var cells = this._cells
var l = cells.length
var w = this.field_size.width
do {
var i = Math.floor(Math.random() * l)
} while(cells[i].classList.length > 0)
return {
x: i%w,
y: Math.floor(i/w),
} },
get random_direction(){
return ('nesw')[Math.floor(Math.random() * 4)] },
// utils...
call: function(func){
return func.apply(this, [].slice.call(arguments, 1)) },
apply: function(func, args){
return func.apply(this, args) },
normalize_point: function(point){
point = point || {}
var w = this.field_size.width
var x = point.x % w
x = x < 0 ? (x + w) : x
var h = this.field_size.height
var y = point.y % h
y = y < 0 ? (y + h) : y
return { x, y } },
// system...
setup: function(field, size, interval){
this.config.field_size = size || this.config.field_size
this.config.interval = interval || this.config.interval
field = field || this._field
field = this._field = typeof(field) == typeof('str') ?
document.querySelector(field)
: field
this._make_field()
this._cells = [].slice.call(field.querySelectorAll('td'))
this.field_size = {
width: field.querySelector('tr').querySelectorAll('td').length,
height: field.querySelectorAll('tr').length,
}
this.players = {}
return this
.appleEaten(null)
.snakeKilled(null) },
_make_field: function(w){
var l = []
l.length = w || this.config.field_size
l.fill('<td/>')
this._field.innerHTML =
`<table class="field" cellspacing="0">\n${
l.map(function(){
return ` <tr> ${ l.join('') } </tr>`
}).join('\n')
}\n</table>` },
_tick: function(){
var that = this
var l = this._cells.length
var w = this.field_size.width
var h = this.field_size.height
var tick = this.__tick = (this.__tick + 1 || 0)
var directions = 'neswn'
this._cells.forEach(function(cell, i){
var color = cell.style.backgroundColor
// skip cells we touched on this tick...
if(cell.tick == tick){
return }
// snake...
if(cell.age != null){
// handle cell age...
if(cell.age == 0){
delete cell.age
cell.classList.remove('snake')
cell.style.backgroundColor = ''
} else {
cell.age -= 1 }
// snake head -> move...
var direction = cell.direction
if(directions.indexOf(direction) >= 0){
// turn...
if(that.players[color] != ''){
var turn = that.players[color] || ''
var j = turn == 'left' ?
directions.indexOf(direction) - 1
: directions.indexOf(direction) + 1
j = j < 0 ? 3 : j
direction = directions[j]
that.players[color] = '' }
// get next cell index...
var next =
direction == 'n' ?
(i < w ?
l - w + i
: i - w)
: direction == 's' ?
(i > (l-w-1) ?
i - (l-w)
: i + w)
: direction == 'e' ?
((i+1)%w == 0 ?
i - (w-1)
: i + 1)
: (i%w == 0 ?
i + (w-1)
: i - 1)
next = that._cells[next]
var age = cell.age
var move = false
// special case: other snake's head -> kill both...
if(next.direction){
var other = next.style.backgroundColor
next.classList.remove('snake')
next.style.backgroundColor = ''
// NOTE: we are not deleteing .direction here as
// we can have upto 4 snakes colliding...
next.direction = ''
that.snakeKilled(other, next.age+1)
that.snakeKilled(color, age+2)
delete next.age
// apple -> increment age...
} else if(next.classList.contains('apple')){
age += 1
move = true
next.classList.remove('apple')
that.appleEaten(color, age+2)
// empty -> just move...
} else if(next.classList.length == 0) | else {
that.snakeKilled(color, age+2) }
// do the move...
if(move){
next.tick = tick
next.style.backgroundColor = color
next.classList.add('snake')
next.age = age + 1
next.direction = direction }
delete cell.direction } }
cell.tick = tick })
this.tick(tick) },
// constructors...
snake: function(color, age, point, direction){
point = this.normalize_point(point || this.random_point)
var head = this._cells[point.x + point.y * this.field_size.width]
head.style.backgroundColor = color
head.classList.add('snake')
head.direction = direction
|| this.random_direction
head.age = (age || 5) - 1
this.players[color] = ''
return this
.snakeBorn(color) },
apple: function(point){
point = this.normalize_point(point || this.random_point)
var c = this._cells[point.x + point.y * this.field_size.width]
c.classList.add('apple')
c.style.backgroundColor = ''
return this },
wall: function(point, direction, length){
direction = direction
|| this.random_direction
point = this.normalize_point(point || this.random_point)
var x = point.x
var y = point.y
length = length
|| Math.random() * this.field_size.width
while(length > 0){
var c = this._cells[x + y * this.field_size.width]
c.classList.add('wall')
c.style.backgroundColor = ''
x += direction == 'e' ?
1
: direction == 'w' ?
-1
: 0
x = x < 0 ?
this.field_size.width + x
: x % this.field_size.width
y += direction == 'n' ?
-1
: direction == 's' ?
1
: 0
y = y < 0 ?
this.field_size.height + y
: y % this.field_size.height
length -= 1 }
return this },
level: function(level){
var that = this
level.forEach(function(wall){
that.wall.apply(that, wall) })
return this },
// events...
snakeKilled: makeEvent('__killHandlers'),
snakeBorn: makeEvent('__birthHandlers'),
appleEaten: makeEvent('__appleEatenHandlers'),
tick: makeEvent('__tickHandlers'),
gameStarted: makeEvent('__startHandlers'),
gameStopped: makeEvent('__stopHandlers'),
// actions...
start: function(t){
this.__timer = this.__timer
|| setInterval(
this._tick.bind(this),
t
|| this.config.interval
|| 200)
// reset player control actions...
var that = this
Object.keys(this.players)
.forEach(function(k){
that.players[k] = '' })
return this
.tick()
.gameStarted() },
stop: function(){
clearInterval(this.__timer)
delete this.__timer
delete this.__tick
return this
.gameStopped() },
pause: function(){
return this.__timer ?
this.stop()
: this.start() },
left: function(color){ | {
move = true
// other -> kill...
} | conditional_block |
realtimeLogger.py | .strptime(date_str, '%Y-%m-%d %H:%M:%S.%f')
return date.strftime('%H:%M:%S')
def my_makedirs(path):
if not os.path.isdir(path):
os.makedirs(path)
def df_maker(f,C_mode):
df_list = {'Time':[],'1ch':[],'2ch':[],'3ch':[],'4ch':[]}
index = 0
for row in f:#row is list
if index == 0:
dataday = row[0]
index = 1
else:
if row[0] != 0:
df_list['Time'].append(dataday + row[0])
df_list['1ch'].append(float(row[1]))
df_list['2ch'].append(float(row[2]))
df_list['3ch'].append(float(row[3]))
df_list['4ch'].append(float(row[4]))
return df_list['Time'],df_list['1ch'],df_list['2ch'],df_list['3ch'],df_list['4ch']
def get_Srate(time_dat):
now_time = eliminate_f(time_dat[0])
i = 0
try:
#### load head ####
while(1):
i += 1
if now_time != eliminate_f(time_dat[i]):
now_time = eliminate_f(time_dat[i])
break
#### count ####
count = 0
while(1):
i += 1
count += 1
if now_time != eliminate_f(time_dat[i]):
return count
except IndexError:
return 0
def eliminate_errerdata(Cutoff,time,data):
data_median = median(data)
arg = []
#### search ####
for j in range(len(data)):
if abs(data[j] - data_median) >= Cutoff:
arg.insert(0, j)
#### eliminate ####
for j in arg:
time.pop(j)
data.pop(j)
return time, data
def medianFilter(time_dat,dat,Cutoff):
out = {'time':[],'data':[]}
now_time = eliminate_f(time_dat[0])
buf_t = []
buf_d = []
i = -1
try:
while(1):
i += 1
buf_t.append(time_dat[i])
buf_d.append(dat[i])
if i+1 == len(dat):
clean_data = eliminate_errerdata(Cutoff,buf_t,buf_d)
out['time'].extend(clean_data[0])
out['data'].extend(clean_data[1])
return out['time'], out['data']
if now_time != eliminate_f(time_dat[i+1]):
clean_data = eliminate_errerdata(Cutoff,buf_t,buf_d)
out['time'].extend(clean_data[0])
out['data'].extend(clean_data[1])
buf_t.clear
buf_d.clear
except IndexError:
return time_dat,dat
def _redraw(_, ax, data, C_mode, C_range):
"""グラフを再描画するための関数"""
# 現在のグラフを消去する
plt.cla() |
# print(data[990])
df = df_maker(data,C_mode)
# 折れ線グラフを再描画する
ax_1ch.yaxis.grid(True)
ax_2ch.yaxis.grid(True)
ax_3ch.yaxis.grid(True)
ax_4ch.yaxis.grid(True)
t_1ch = df[0]
d_1ch = df[1]
d_2ch = df[2]
d_3ch = df[3]
d_4ch = df[4]
ax_1ch.set_ylabel('X [nT]', fontsize=18)
ax_2ch.set_ylabel('Y [nT]', fontsize=18)
ax_3ch.set_ylabel('Z [nT]', fontsize=18)
ax_4ch.set_ylabel('Temperature [C]', fontsize=18)
ax_1ch.set_ylim([mean(d_1ch) - (C_range/2),mean(d_1ch) + (C_range/2)])
ax_2ch.set_ylim([mean(d_2ch) - (C_range/2),mean(d_2ch) + (C_range/2)])
ax_3ch.set_ylim([mean(d_3ch) - (C_range/2),mean(d_3ch) + (C_range/2)])
ax_1ch.plot(pd.to_datetime(t_1ch, utc=True), d_1ch, color='r')
ax_2ch.plot(pd.to_datetime(t_1ch, utc=True), d_2ch, color='g')
ax_3ch.plot(pd.to_datetime(t_1ch, utc=True), d_3ch, color='b')
ax_4ch.plot(pd.to_datetime(t_1ch, utc=True), d_4ch, color='k')
ax_1ch.set_title('(JST) ' + 'magnetic force(nT)' + C_mode +"range="+ str(C_range) +"rate="+ str(df[5]))
def ploting(plotData,C_save,C_range,C_mode,C_Drate):
fig = plt.figure(figsize=(12, 12))
ax_1ch = fig.add_subplot(411) #Z axis
ax_2ch = fig.add_subplot(412) #Y axis
ax_3ch = fig.add_subplot(413) #X axis
ax_4ch = fig.add_subplot(414)
while True:
df = df_maker(plotData ,C_mode)
if len(df[0]) > 200:
# 折れ線グラフを再描画する
plt.cla()
ax_1ch.yaxis.grid(True)
ax_2ch.yaxis.grid(True)
ax_3ch.yaxis.grid(True)
ax_4ch.yaxis.grid(True)
t_1ch = df[0]
d_1ch = df[1]
d_2ch = df[2]
d_3ch = df[3]
d_4ch = df[4]
ax_1ch.set_ylabel('X [nT]', fontsize=18)
ax_2ch.set_ylabel('Y [nT]', fontsize=18)
ax_3ch.set_ylabel('Z [nT]', fontsize=18)
ax_4ch.set_ylabel('Temperature [C]', fontsize=18)
ax_1ch.set_ylim([mean(d_1ch) - (C_range/2),mean(d_1ch) + (C_range/2)])
ax_2ch.set_ylim([mean(d_2ch) - (C_range/2),mean(d_2ch) + (C_range/2)])
ax_3ch.set_ylim([mean(d_3ch) - (C_range/2),mean(d_3ch) + (C_range/2)])
ax_1ch.plot(pd.to_datetime(t_1ch, utc=True), d_1ch, color='r')
ax_2ch.plot(pd.to_datetime(t_1ch, utc=True), d_2ch, color='g')
ax_3ch.plot(pd.to_datetime(t_1ch, utc=True), d_3ch, color='b')
ax_4ch.plot(pd.to_datetime(t_1ch, utc=True), d_4ch, color='k')
ax_1ch.set_title('(JST) ' + 'magnetic force(nT)' + C_mode +"range="+ str(C_range) +"rate="+ str(get_Srate(df[0])))
plt.pause(0.01)
def measurement(plotData,C_save,C_Drate):
#1007B:1029A:1024A:LM60
slope = [6.041297912, 6.032822234, 6.024782582, 1.004970735]
intercept = [-15.21584595, -15.20405742, -15.17129194, -0.000415594]
transform = [0.16*0.001, 0.16*0.001, 0.16*0.001, 6.25*0.001]
off_set = [0,0,0,424*0.001]
# 描画するデータ
now = datetime.datetime.now(timezone.utc)
plotData.append(['{0:%Y-%m-%d }'.format(now),
'Magnetic force(nT)_1ch','Magnetic force(nT)_2ch',
'Magnetic force(nT)_3ch','Magnetic force(nT)_4ch'])
ads = ADS1256()
ads.drate = (DRATE_100 if C_Drate == 100 else
DRATE_500 if C_Drate == 500 else
DRATE_100 | print("redraw") | random_line_split |
realtimeLogger.py | .strptime(date_str, '%Y-%m-%d %H:%M:%S.%f')
return date.strftime('%H:%M:%S')
def my_makedirs(path):
if not os.path.isdir(path):
os.makedirs(path)
def df_maker(f,C_mode):
df_list = {'Time':[],'1ch':[],'2ch':[],'3ch':[],'4ch':[]}
index = 0
for row in f:#row is list
if index == 0:
dataday = row[0]
index = 1
else:
if row[0] != 0:
df_list['Time'].append(dataday + row[0])
df_list['1ch'].append(float(row[1]))
df_list['2ch'].append(float(row[2]))
df_list['3ch'].append(float(row[3]))
df_list['4ch'].append(float(row[4]))
return df_list['Time'],df_list['1ch'],df_list['2ch'],df_list['3ch'],df_list['4ch']
def get_Srate(time_dat):
now_time = eliminate_f(time_dat[0])
i = 0
try:
#### load head ####
while(1):
i += 1
if now_time != eliminate_f(time_dat[i]):
now_time = eliminate_f(time_dat[i])
break
#### count ####
count = 0
while(1):
i += 1
count += 1
if now_time != eliminate_f(time_dat[i]):
return count
except IndexError:
return 0
def eliminate_errerdata(Cutoff,time,data):
data_median = median(data)
arg = []
#### search ####
for j in range(len(data)):
if abs(data[j] - data_median) >= Cutoff:
arg.insert(0, j)
#### eliminate ####
for j in arg:
time.pop(j)
data.pop(j)
return time, data
def | (time_dat,dat,Cutoff):
out = {'time':[],'data':[]}
now_time = eliminate_f(time_dat[0])
buf_t = []
buf_d = []
i = -1
try:
while(1):
i += 1
buf_t.append(time_dat[i])
buf_d.append(dat[i])
if i+1 == len(dat):
clean_data = eliminate_errerdata(Cutoff,buf_t,buf_d)
out['time'].extend(clean_data[0])
out['data'].extend(clean_data[1])
return out['time'], out['data']
if now_time != eliminate_f(time_dat[i+1]):
clean_data = eliminate_errerdata(Cutoff,buf_t,buf_d)
out['time'].extend(clean_data[0])
out['data'].extend(clean_data[1])
buf_t.clear
buf_d.clear
except IndexError:
return time_dat,dat
def _redraw(_, ax, data, C_mode, C_range):
"""グラフを再描画するための関数"""
# 現在のグラフを消去する
plt.cla()
print("redraw")
# print(data[990])
df = df_maker(data,C_mode)
# 折れ線グラフを再描画する
ax_1ch.yaxis.grid(True)
ax_2ch.yaxis.grid(True)
ax_3ch.yaxis.grid(True)
ax_4ch.yaxis.grid(True)
t_1ch = df[0]
d_1ch = df[1]
d_2ch = df[2]
d_3ch = df[3]
d_4ch = df[4]
ax_1ch.set_ylabel('X [nT]', fontsize=18)
ax_2ch.set_ylabel('Y [nT]', fontsize=18)
ax_3ch.set_ylabel('Z [nT]', fontsize=18)
ax_4ch.set_ylabel('Temperature [C]', fontsize=18)
ax_1ch.set_ylim([mean(d_1ch) - (C_range/2),mean(d_1ch) + (C_range/2)])
ax_2ch.set_ylim([mean(d_2ch) - (C_range/2),mean(d_2ch) + (C_range/2)])
ax_3ch.set_ylim([mean(d_3ch) - (C_range/2),mean(d_3ch) + (C_range/2)])
ax_1ch.plot(pd.to_datetime(t_1ch, utc=True), d_1ch, color='r')
ax_2ch.plot(pd.to_datetime(t_1ch, utc=True), d_2ch, color='g')
ax_3ch.plot(pd.to_datetime(t_1ch, utc=True), d_3ch, color='b')
ax_4ch.plot(pd.to_datetime(t_1ch, utc=True), d_4ch, color='k')
ax_1ch.set_title('(JST) ' + 'magnetic force(nT)' + C_mode +"range="+ str(C_range) +"rate="+ str(df[5]))
def ploting(plotData,C_save,C_range,C_mode,C_Drate):
fig = plt.figure(figsize=(12, 12))
ax_1ch = fig.add_subplot(411) #Z axis
ax_2ch = fig.add_subplot(412) #Y axis
ax_3ch = fig.add_subplot(413) #X axis
ax_4ch = fig.add_subplot(414)
while True:
df = df_maker(plotData ,C_mode)
if len(df[0]) > 200:
# 折れ線グラフを再描画する
plt.cla()
ax_1ch.yaxis.grid(True)
ax_2ch.yaxis.grid(True)
ax_3ch.yaxis.grid(True)
ax_4ch.yaxis.grid(True)
t_1ch = df[0]
d_1ch = df[1]
d_2ch = df[2]
d_3ch = df[3]
d_4ch = df[4]
ax_1ch.set_ylabel('X [nT]', fontsize=18)
ax_2ch.set_ylabel('Y [nT]', fontsize=18)
ax_3ch.set_ylabel('Z [nT]', fontsize=18)
ax_4ch.set_ylabel('Temperature [C]', fontsize=18)
ax_1ch.set_ylim([mean(d_1ch) - (C_range/2),mean(d_1ch) + (C_range/2)])
ax_2ch.set_ylim([mean(d_2ch) - (C_range/2),mean(d_2ch) + (C_range/2)])
ax_3ch.set_ylim([mean(d_3ch) - (C_range/2),mean(d_3ch) + (C_range/2)])
ax_1ch.plot(pd.to_datetime(t_1ch, utc=True), d_1ch, color='r')
ax_2ch.plot(pd.to_datetime(t_1ch, utc=True), d_2ch, color='g')
ax_3ch.plot(pd.to_datetime(t_1ch, utc=True), d_3ch, color='b')
ax_4ch.plot(pd.to_datetime(t_1ch, utc=True), d_4ch, color='k')
ax_1ch.set_title('(JST) ' + 'magnetic force(nT)' + C_mode +"range="+ str(C_range) +"rate="+ str(get_Srate(df[0])))
plt.pause(0.01)
def measurement(plotData,C_save,C_Drate):
#1007B:1029A:1024A:LM60
slope = [6.041297912, 6.032822234, 6.024782582, 1.004970735]
intercept = [-15.21584595, -15.20405742, -15.17129194, -0.000415594]
transform = [0.16*0.001, 0.16*0.001, 0.16*0.001, 6.25*0.001]
off_set = [0,0,0,424*0.001]
# 描画するデータ
now = datetime.datetime.now(timezone.utc)
plotData.append(['{0:%Y-%m-%d }'.format(now),
'Magnetic force(nT)_1ch','Magnetic force(nT)_2ch',
'Magnetic force(nT)_3ch','Magnetic force(nT)_4ch'])
ads = ADS1256()
ads.drate = (DRATE_100 if C_Drate == 100 else
DRATE_500 if C_Drate == 500 else
DRATE_10 | medianFilter | identifier_name |
realtimeLogger.py | .strptime(date_str, '%Y-%m-%d %H:%M:%S.%f')
return date.strftime('%H:%M:%S')
def my_makedirs(path):
if not os.path.isdir(path):
os.makedirs(path)
def df_maker(f,C_mode):
df_list = {'Time':[],'1ch':[],'2ch':[],'3ch':[],'4ch':[]}
index = 0
for row in f:#row is list
if index == 0:
dataday = row[0]
index = 1
else:
if row[0] != 0:
|
return df_list['Time'],df_list['1ch'],df_list['2ch'],df_list['3ch'],df_list['4ch']
def get_Srate(time_dat):
now_time = eliminate_f(time_dat[0])
i = 0
try:
#### load head ####
while(1):
i += 1
if now_time != eliminate_f(time_dat[i]):
now_time = eliminate_f(time_dat[i])
break
#### count ####
count = 0
while(1):
i += 1
count += 1
if now_time != eliminate_f(time_dat[i]):
return count
except IndexError:
return 0
def eliminate_errerdata(Cutoff,time,data):
data_median = median(data)
arg = []
#### search ####
for j in range(len(data)):
if abs(data[j] - data_median) >= Cutoff:
arg.insert(0, j)
#### eliminate ####
for j in arg:
time.pop(j)
data.pop(j)
return time, data
def medianFilter(time_dat,dat,Cutoff):
out = {'time':[],'data':[]}
now_time = eliminate_f(time_dat[0])
buf_t = []
buf_d = []
i = -1
try:
while(1):
i += 1
buf_t.append(time_dat[i])
buf_d.append(dat[i])
if i+1 == len(dat):
clean_data = eliminate_errerdata(Cutoff,buf_t,buf_d)
out['time'].extend(clean_data[0])
out['data'].extend(clean_data[1])
return out['time'], out['data']
if now_time != eliminate_f(time_dat[i+1]):
clean_data = eliminate_errerdata(Cutoff,buf_t,buf_d)
out['time'].extend(clean_data[0])
out['data'].extend(clean_data[1])
buf_t.clear
buf_d.clear
except IndexError:
return time_dat,dat
def _redraw(_, ax, data, C_mode, C_range):
"""グラフを再描画するための関数"""
# 現在のグラフを消去する
plt.cla()
print("redraw")
# print(data[990])
df = df_maker(data,C_mode)
# 折れ線グラフを再描画する
ax_1ch.yaxis.grid(True)
ax_2ch.yaxis.grid(True)
ax_3ch.yaxis.grid(True)
ax_4ch.yaxis.grid(True)
t_1ch = df[0]
d_1ch = df[1]
d_2ch = df[2]
d_3ch = df[3]
d_4ch = df[4]
ax_1ch.set_ylabel('X [nT]', fontsize=18)
ax_2ch.set_ylabel('Y [nT]', fontsize=18)
ax_3ch.set_ylabel('Z [nT]', fontsize=18)
ax_4ch.set_ylabel('Temperature [C]', fontsize=18)
ax_1ch.set_ylim([mean(d_1ch) - (C_range/2),mean(d_1ch) + (C_range/2)])
ax_2ch.set_ylim([mean(d_2ch) - (C_range/2),mean(d_2ch) + (C_range/2)])
ax_3ch.set_ylim([mean(d_3ch) - (C_range/2),mean(d_3ch) + (C_range/2)])
ax_1ch.plot(pd.to_datetime(t_1ch, utc=True), d_1ch, color='r')
ax_2ch.plot(pd.to_datetime(t_1ch, utc=True), d_2ch, color='g')
ax_3ch.plot(pd.to_datetime(t_1ch, utc=True), d_3ch, color='b')
ax_4ch.plot(pd.to_datetime(t_1ch, utc=True), d_4ch, color='k')
ax_1ch.set_title('(JST) ' + 'magnetic force(nT)' + C_mode +"range="+ str(C_range) +"rate="+ str(df[5]))
def ploting(plotData,C_save,C_range,C_mode,C_Drate):
fig = plt.figure(figsize=(12, 12))
ax_1ch = fig.add_subplot(411) #Z axis
ax_2ch = fig.add_subplot(412) #Y axis
ax_3ch = fig.add_subplot(413) #X axis
ax_4ch = fig.add_subplot(414)
while True:
df = df_maker(plotData ,C_mode)
if len(df[0]) > 200:
# 折れ線グラフを再描画する
plt.cla()
ax_1ch.yaxis.grid(True)
ax_2ch.yaxis.grid(True)
ax_3ch.yaxis.grid(True)
ax_4ch.yaxis.grid(True)
t_1ch = df[0]
d_1ch = df[1]
d_2ch = df[2]
d_3ch = df[3]
d_4ch = df[4]
ax_1ch.set_ylabel('X [nT]', fontsize=18)
ax_2ch.set_ylabel('Y [nT]', fontsize=18)
ax_3ch.set_ylabel('Z [nT]', fontsize=18)
ax_4ch.set_ylabel('Temperature [C]', fontsize=18)
ax_1ch.set_ylim([mean(d_1ch) - (C_range/2),mean(d_1ch) + (C_range/2)])
ax_2ch.set_ylim([mean(d_2ch) - (C_range/2),mean(d_2ch) + (C_range/2)])
ax_3ch.set_ylim([mean(d_3ch) - (C_range/2),mean(d_3ch) + (C_range/2)])
ax_1ch.plot(pd.to_datetime(t_1ch, utc=True), d_1ch, color='r')
ax_2ch.plot(pd.to_datetime(t_1ch, utc=True), d_2ch, color='g')
ax_3ch.plot(pd.to_datetime(t_1ch, utc=True), d_3ch, color='b')
ax_4ch.plot(pd.to_datetime(t_1ch, utc=True), d_4ch, color='k')
ax_1ch.set_title('(JST) ' + 'magnetic force(nT)' + C_mode +"range="+ str(C_range) +"rate="+ str(get_Srate(df[0])))
plt.pause(0.01)
def measurement(plotData,C_save,C_Drate):
#1007B:1029A:1024A:LM60
slope = [6.041297912, 6.032822234, 6.024782582, 1.004970735]
intercept = [-15.21584595, -15.20405742, -15.17129194, -0.000415594]
transform = [0.16*0.001, 0.16*0.001, 0.16*0.001, 6.25*0.001]
off_set = [0,0,0,424*0.001]
# 描画するデータ
now = datetime.datetime.now(timezone.utc)
plotData.append(['{0:%Y-%m-%d }'.format(now),
'Magnetic force(nT)_1ch','Magnetic force(nT)_2ch',
'Magnetic force(nT)_3ch','Magnetic force(nT)_4ch'])
ads = ADS1256()
ads.drate = (DRATE_100 if C_Drate == 100 else
DRATE_500 if C_Drate == 500 else
DRATE_10 | df_list['Time'].append(dataday + row[0])
df_list['1ch'].append(float(row[1]))
df_list['2ch'].append(float(row[2]))
df_list['3ch'].append(float(row[3]))
df_list['4ch'].append(float(row[4])) | conditional_block |
realtimeLogger.py | .strptime(date_str, '%Y-%m-%d %H:%M:%S.%f')
return date.strftime('%H:%M:%S')
def my_makedirs(path):
|
def df_maker(f,C_mode):
df_list = {'Time':[],'1ch':[],'2ch':[],'3ch':[],'4ch':[]}
index = 0
for row in f:#row is list
if index == 0:
dataday = row[0]
index = 1
else:
if row[0] != 0:
df_list['Time'].append(dataday + row[0])
df_list['1ch'].append(float(row[1]))
df_list['2ch'].append(float(row[2]))
df_list['3ch'].append(float(row[3]))
df_list['4ch'].append(float(row[4]))
return df_list['Time'],df_list['1ch'],df_list['2ch'],df_list['3ch'],df_list['4ch']
def get_Srate(time_dat):
now_time = eliminate_f(time_dat[0])
i = 0
try:
#### load head ####
while(1):
i += 1
if now_time != eliminate_f(time_dat[i]):
now_time = eliminate_f(time_dat[i])
break
#### count ####
count = 0
while(1):
i += 1
count += 1
if now_time != eliminate_f(time_dat[i]):
return count
except IndexError:
return 0
def eliminate_errerdata(Cutoff,time,data):
data_median = median(data)
arg = []
#### search ####
for j in range(len(data)):
if abs(data[j] - data_median) >= Cutoff:
arg.insert(0, j)
#### eliminate ####
for j in arg:
time.pop(j)
data.pop(j)
return time, data
def medianFilter(time_dat,dat,Cutoff):
out = {'time':[],'data':[]}
now_time = eliminate_f(time_dat[0])
buf_t = []
buf_d = []
i = -1
try:
while(1):
i += 1
buf_t.append(time_dat[i])
buf_d.append(dat[i])
if i+1 == len(dat):
clean_data = eliminate_errerdata(Cutoff,buf_t,buf_d)
out['time'].extend(clean_data[0])
out['data'].extend(clean_data[1])
return out['time'], out['data']
if now_time != eliminate_f(time_dat[i+1]):
clean_data = eliminate_errerdata(Cutoff,buf_t,buf_d)
out['time'].extend(clean_data[0])
out['data'].extend(clean_data[1])
buf_t.clear
buf_d.clear
except IndexError:
return time_dat,dat
def _redraw(_, ax, data, C_mode, C_range):
"""グラフを再描画するための関数"""
# 現在のグラフを消去する
plt.cla()
print("redraw")
# print(data[990])
df = df_maker(data,C_mode)
# 折れ線グラフを再描画する
ax_1ch.yaxis.grid(True)
ax_2ch.yaxis.grid(True)
ax_3ch.yaxis.grid(True)
ax_4ch.yaxis.grid(True)
t_1ch = df[0]
d_1ch = df[1]
d_2ch = df[2]
d_3ch = df[3]
d_4ch = df[4]
ax_1ch.set_ylabel('X [nT]', fontsize=18)
ax_2ch.set_ylabel('Y [nT]', fontsize=18)
ax_3ch.set_ylabel('Z [nT]', fontsize=18)
ax_4ch.set_ylabel('Temperature [C]', fontsize=18)
ax_1ch.set_ylim([mean(d_1ch) - (C_range/2),mean(d_1ch) + (C_range/2)])
ax_2ch.set_ylim([mean(d_2ch) - (C_range/2),mean(d_2ch) + (C_range/2)])
ax_3ch.set_ylim([mean(d_3ch) - (C_range/2),mean(d_3ch) + (C_range/2)])
ax_1ch.plot(pd.to_datetime(t_1ch, utc=True), d_1ch, color='r')
ax_2ch.plot(pd.to_datetime(t_1ch, utc=True), d_2ch, color='g')
ax_3ch.plot(pd.to_datetime(t_1ch, utc=True), d_3ch, color='b')
ax_4ch.plot(pd.to_datetime(t_1ch, utc=True), d_4ch, color='k')
ax_1ch.set_title('(JST) ' + 'magnetic force(nT)' + C_mode +"range="+ str(C_range) +"rate="+ str(df[5]))
def ploting(plotData,C_save,C_range,C_mode,C_Drate):
fig = plt.figure(figsize=(12, 12))
ax_1ch = fig.add_subplot(411) #Z axis
ax_2ch = fig.add_subplot(412) #Y axis
ax_3ch = fig.add_subplot(413) #X axis
ax_4ch = fig.add_subplot(414)
while True:
df = df_maker(plotData ,C_mode)
if len(df[0]) > 200:
# 折れ線グラフを再描画する
plt.cla()
ax_1ch.yaxis.grid(True)
ax_2ch.yaxis.grid(True)
ax_3ch.yaxis.grid(True)
ax_4ch.yaxis.grid(True)
t_1ch = df[0]
d_1ch = df[1]
d_2ch = df[2]
d_3ch = df[3]
d_4ch = df[4]
ax_1ch.set_ylabel('X [nT]', fontsize=18)
ax_2ch.set_ylabel('Y [nT]', fontsize=18)
ax_3ch.set_ylabel('Z [nT]', fontsize=18)
ax_4ch.set_ylabel('Temperature [C]', fontsize=18)
ax_1ch.set_ylim([mean(d_1ch) - (C_range/2),mean(d_1ch) + (C_range/2)])
ax_2ch.set_ylim([mean(d_2ch) - (C_range/2),mean(d_2ch) + (C_range/2)])
ax_3ch.set_ylim([mean(d_3ch) - (C_range/2),mean(d_3ch) + (C_range/2)])
ax_1ch.plot(pd.to_datetime(t_1ch, utc=True), d_1ch, color='r')
ax_2ch.plot(pd.to_datetime(t_1ch, utc=True), d_2ch, color='g')
ax_3ch.plot(pd.to_datetime(t_1ch, utc=True), d_3ch, color='b')
ax_4ch.plot(pd.to_datetime(t_1ch, utc=True), d_4ch, color='k')
ax_1ch.set_title('(JST) ' + 'magnetic force(nT)' + C_mode +"range="+ str(C_range) +"rate="+ str(get_Srate(df[0])))
plt.pause(0.01)
def measurement(plotData,C_save,C_Drate):
#1007B:1029A:1024A:LM60
slope = [6.041297912, 6.032822234, 6.024782582, 1.004970735]
intercept = [-15.21584595, -15.20405742, -15.17129194, -0.000415594]
transform = [0.16*0.001, 0.16*0.001, 0.16*0.001, 6.25*0.001]
off_set = [0,0,0,424*0.001]
# 描画するデータ
now = datetime.datetime.now(timezone.utc)
plotData.append(['{0:%Y-%m-%d }'.format(now),
'Magnetic force(nT)_1ch','Magnetic force(nT)_2ch',
'Magnetic force(nT)_3ch','Magnetic force(nT)_4ch'])
ads = ADS1256()
ads.drate = (DRATE_100 if C_Drate == 100 else
DRATE_500 if C_Drate == 500 else
DRATE_10 | if not os.path.isdir(path):
os.makedirs(path) | identifier_body |
validation.py | _OPERATION_RESPONSES,
)
from .events import (
ReferenceNotFoundValidationError,
ParameterDefinitionValidationError,
SecurityDefinitionNotFoundValidationError,
OAuth2ScopeNotFoundInSecurityDefinitionValidationError,
DuplicateOperationIdValidationError,
JsonSchemaValidationError,
ValidationError,
ReferenceInvalidSyntax,
ReferenceInvalidSection,
)
def check_security(swagger: Dict):
"""
Check that uses of security with its scopes matches a securityDefinition
:param swagger:
:return:
"""
events = set()
secdefs = swagger.get("securityDefinitions", {})
security_jspath = JSPATH_SECURITY
for sec_key, scopes, path in get_elements(swagger, security_jspath):
# retrieve security definition name from security declaration
secdef = secdefs.get(sec_key)
if secdef is None:
events.add(
SecurityDefinitionNotFoundValidationError(
path=path, reason=f"securityDefinitions '{sec_key}' does not exist"
)
)
else:
# retrieve scopes declared in the secdef
declared_scopes = secdef.get("scopes", [])
if not isinstance(scopes, list):
continue
# verify scopes can be resolved
for scope in scopes:
if scope not in declared_scopes:
events.add(
OAuth2ScopeNotFoundInSecurityDefinitionValidationError(
path=path + (scope,),
reason=f"scope {scope} is not declared in the scopes of the securityDefinitions '{sec_key}'",
)
)
return events
def _check_parameter(param: Dict, path_param):
"""Check a parameter structure
For a parameter, the check for consistency are on:
- required and default
- type/format and default
- enum
"""
events = set()
name = param.get("name", "unnamed-parameter")
required = param.get("required", False)
default = param.get("default")
_type = param.get("type")
format = param.get("format")
enum = param.get("enum")
# check if required=True and default are both given
if required and default is not None:
events.add(
ParameterDefinitionValidationError(
path=path_param,
reason=f"The parameter is required yet it has a default value",
parameter_name=name,
)
)
# check if type==array that there is an items
if _type == "array" and "items" not in param:
events.add(
ParameterDefinitionValidationError(
path=path_param,
reason=f"The parameter is of type 'array' but is missing an 'items' field",
parameter_name=name,
)
)
# check enum does not contain duplicates
if enum:
if len(set(enum)) != len(enum):
events.add(
ParameterDefinitionValidationError(
path=path_param + ("enum",),
reason=f"The enum values {enum} contains duplicate values",
parameter_name=name,
)
)
if default is not None and default not in enum:
events.add(
ParameterDefinitionValidationError(
path=path_param + ("default",),
reason=f"The default value {repr(default)} is not one of the enum values {enum}",
parameter_name=name,
)
)
# check type/format & default value in accordance with type/format
# https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types
map_type2subtypes_pythontype = {
("string", None): str,
("string", "byte"): re.compile(
r"^(?:[A-Za-z0-9+/\s]{4})*(?:[A-Za-z0-9+/\s]{2}==|[A-Za-z0-9+/\s]{3}=)?$"
),
("string", "binary"): str,
("string", "date"): re.compile(r"^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])$"),
("string", "dateTime"): re.compile(
r"^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])" # date
r"[Tt]"
r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.[0-9]+)?" # time
r"(([Zz])|([+|\-]([01][0-9]|2[0-3]):[0-5][0-9]))$" # offset
),
("string", "password"): str,
("integer", None): numbers.Integral,
("integer", "int32"): numbers.Integral,
("integer", "int64"): numbers.Integral,
("number", None): numbers.Real,
("number", "float"): numbers.Real,
("number", "double"): numbers.Real,
("boolean", None): bool,
("array", None): list,
}
if default is not None and _type:
regexp_or_type = map_type2subtypes_pythontype.get((_type, format))
# if no match with both _type, format, check if match only on _type (format being freeform)
if not regexp_or_type:
regexp_or_type = map_type2subtypes_pythontype.get((_type, None))
if regexp_or_type:
# the type & format matches one of the Swagger Specifications documented type & format combinations
# we can check the default format
# decompose regexp_or_type into type and RE expression
if isinstance(regexp_or_type, type):
# regexp_or_type is a standard python type
re_pattern = None
py_type = regexp_or_type
else:
# regexp_or_type is a regexp expression
re_pattern = regexp_or_type
py_type = str
if not isinstance(default, py_type):
events.add(
ParameterDefinitionValidationError(
path=path_param + ("default",),
reason=f"The default value {repr(default)} is not of the expected type '{_type}'",
parameter_name=name,
)
)
# if a regexp matching string is expected
if re_pattern is not None:
if not (isinstance(default, str) and re_pattern.match(default)):
events.add(
ParameterDefinitionValidationError(
path=path_param + ("default",),
reason=f"The default value '{default}' does not conform to the string format '{format}'",
parameter_name=name,
)
)
return events
def check_parameters(swagger: Dict):
"""
Check parameters for:
- duplicate items in enum
- default parameter is in line with type when type=string
:param swagger:
:return:
"""
events = set()
parameters_jspath = JSPATH_PARAMETERS
for _, param, path in get_elements(swagger, parameters_jspath):
while True:
events |= _check_parameter(param, path)
if param.get("type") == "array":
# recurse in array items type
path += ("items",)
param = param.get("items", {})
else:
break
return events
def check_references(swagger: Dict):
"""
Find reference in paths, for /definitions/ and /responses/ /securityDefinitions/.
Follow from these, references to other references, till no more added.
:param swagger:
:return:
"""
events = set()
ref_jspath = JSPATH_REFERENCES | try:
rt, obj = reference[2:].split("/")
except ValueError:
events.add(
ReferenceInvalidSyntax(
path=path, reason=f"reference {reference} not of the form '#/section/item'"
)
)
continue
if rt not in REFERENCE_SECTIONS:
events.add(
ReferenceInvalidSection(
path=path,
reason=f"Reference {reference} not referring to one of the sections {REFERENCE_SECTIONS}",
)
)
# resolve reference (error if not possible)
try:
swagger[rt][obj]
except KeyError:
events.add(
ReferenceNotFoundValidationError(
path=path, reason=f"reference '#/{rt}/{obj}' does not exist"
)
)
return events
def detect_duplicate_operationId(swagger: Dict):
"""Return list of Action with duplicate operationIds"""
events = set()
# retrieve all operationIds
operationId_jspath = JSPATH_OPERATIONID
def get_operationId_name(name_value_path):
return name_value_path[1]
operationIds = sorted(get_elements(swagger, operationId_jspath), key=get_operationId_name)
for opId, key_pths in groupby(operationIds, key=get_operationId_name):
pths = tuple(subpth for _, _, subpth in key_pths)
if len(pths) > 1:
pth_first, *pths = pths
for pth in pths:
events.add(
DuplicateOperationIdValidationError(
path=pth,
path_already_used=pth_first,
reason=f"the operationId '{opId}' is already used in an endpoint.",
operationId=opId,
)
)
return events
def check_schema(swagger: |
for _, reference, path in get_elements(swagger, ref_jspath):
# handle only local references
if reference.startswith("#/"):
# decompose reference (error if not possible) | random_line_split |
validation.py | _OPERATION_RESPONSES,
)
from .events import (
ReferenceNotFoundValidationError,
ParameterDefinitionValidationError,
SecurityDefinitionNotFoundValidationError,
OAuth2ScopeNotFoundInSecurityDefinitionValidationError,
DuplicateOperationIdValidationError,
JsonSchemaValidationError,
ValidationError,
ReferenceInvalidSyntax,
ReferenceInvalidSection,
)
def check_security(swagger: Dict):
"""
Check that uses of security with its scopes matches a securityDefinition
:param swagger:
:return:
"""
events = set()
secdefs = swagger.get("securityDefinitions", {})
security_jspath = JSPATH_SECURITY
for sec_key, scopes, path in get_elements(swagger, security_jspath):
# retrieve security definition name from security declaration
secdef = secdefs.get(sec_key)
if secdef is None:
events.add(
SecurityDefinitionNotFoundValidationError(
path=path, reason=f"securityDefinitions '{sec_key}' does not exist"
)
)
else:
# retrieve scopes declared in the secdef
declared_scopes = secdef.get("scopes", [])
if not isinstance(scopes, list):
continue
# verify scopes can be resolved
for scope in scopes:
if scope not in declared_scopes:
events.add(
OAuth2ScopeNotFoundInSecurityDefinitionValidationError(
path=path + (scope,),
reason=f"scope {scope} is not declared in the scopes of the securityDefinitions '{sec_key}'",
)
)
return events
def _check_parameter(param: Dict, path_param):
"""Check a parameter structure
For a parameter, the check for consistency are on:
- required and default
- type/format and default
- enum
"""
events = set()
name = param.get("name", "unnamed-parameter")
required = param.get("required", False)
default = param.get("default")
_type = param.get("type")
format = param.get("format")
enum = param.get("enum")
# check if required=True and default are both given
if required and default is not None:
events.add(
ParameterDefinitionValidationError(
path=path_param,
reason=f"The parameter is required yet it has a default value",
parameter_name=name,
)
)
# check if type==array that there is an items
if _type == "array" and "items" not in param:
events.add(
ParameterDefinitionValidationError(
path=path_param,
reason=f"The parameter is of type 'array' but is missing an 'items' field",
parameter_name=name,
)
)
# check enum does not contain duplicates
if enum:
if len(set(enum)) != len(enum):
events.add(
ParameterDefinitionValidationError(
path=path_param + ("enum",),
reason=f"The enum values {enum} contains duplicate values",
parameter_name=name,
)
)
if default is not None and default not in enum:
events.add(
ParameterDefinitionValidationError(
path=path_param + ("default",),
reason=f"The default value {repr(default)} is not one of the enum values {enum}",
parameter_name=name,
)
)
# check type/format & default value in accordance with type/format
# https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types
map_type2subtypes_pythontype = {
("string", None): str,
("string", "byte"): re.compile(
r"^(?:[A-Za-z0-9+/\s]{4})*(?:[A-Za-z0-9+/\s]{2}==|[A-Za-z0-9+/\s]{3}=)?$"
),
("string", "binary"): str,
("string", "date"): re.compile(r"^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])$"),
("string", "dateTime"): re.compile(
r"^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])" # date
r"[Tt]"
r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.[0-9]+)?" # time
r"(([Zz])|([+|\-]([01][0-9]|2[0-3]):[0-5][0-9]))$" # offset
),
("string", "password"): str,
("integer", None): numbers.Integral,
("integer", "int32"): numbers.Integral,
("integer", "int64"): numbers.Integral,
("number", None): numbers.Real,
("number", "float"): numbers.Real,
("number", "double"): numbers.Real,
("boolean", None): bool,
("array", None): list,
}
if default is not None and _type:
regexp_or_type = map_type2subtypes_pythontype.get((_type, format))
# if no match with both _type, format, check if match only on _type (format being freeform)
if not regexp_or_type:
regexp_or_type = map_type2subtypes_pythontype.get((_type, None))
if regexp_or_type:
# the type & format matches one of the Swagger Specifications documented type & format combinations
# we can check the default format
# decompose regexp_or_type into type and RE expression
if isinstance(regexp_or_type, type):
# regexp_or_type is a standard python type
re_pattern = None
py_type = regexp_or_type
else:
# regexp_or_type is a regexp expression
re_pattern = regexp_or_type
py_type = str
if not isinstance(default, py_type):
events.add(
ParameterDefinitionValidationError(
path=path_param + ("default",),
reason=f"The default value {repr(default)} is not of the expected type '{_type}'",
parameter_name=name,
)
)
# if a regexp matching string is expected
if re_pattern is not None:
if not (isinstance(default, str) and re_pattern.match(default)):
events.add(
ParameterDefinitionValidationError(
path=path_param + ("default",),
reason=f"The default value '{default}' does not conform to the string format '{format}'",
parameter_name=name,
)
)
return events
def check_parameters(swagger: Dict):
"""
Check parameters for:
- duplicate items in enum
- default parameter is in line with type when type=string
:param swagger:
:return:
"""
events = set()
parameters_jspath = JSPATH_PARAMETERS
for _, param, path in get_elements(swagger, parameters_jspath):
while True:
events |= _check_parameter(param, path)
if param.get("type") == "array":
# recurse in array items type
path += ("items",)
param = param.get("items", {})
else:
break
return events
def check_references(swagger: Dict):
"""
Find reference in paths, for /definitions/ and /responses/ /securityDefinitions/.
Follow from these, references to other references, till no more added.
:param swagger:
:return:
"""
events = set()
ref_jspath = JSPATH_REFERENCES
for _, reference, path in get_elements(swagger, ref_jspath):
# handle only local references
| # resolve reference (error if not possible)
try:
swagger[rt][obj]
except KeyError:
events.add(
ReferenceNotFoundValidationError(
path=path, reason=f"reference '#/{rt}/{obj}' does not exist"
)
)
return events
def detect_duplicate_operationId(swagger: Dict):
"""Return list of Action with duplicate operationIds"""
events = set()
# retrieve all operationIds
operationId_jspath = JSPATH_OPERATIONID
def get_operationId_name(name_value_path):
return name_value_path[1]
operationIds = sorted(get_elements(swagger, operationId_jspath), key=get_operationId_name)
for opId, key_pths in groupby(operationIds, key=get_operationId_name):
pths = tuple(subpth for _, _, subpth in key_pths)
if len(pths) > 1:
pth_first, *pths = pths
for pth in pths:
events.add(
DuplicateOperationIdValidationError(
path=pth,
path_already_used=pth_first,
reason=f"the operationId '{opId}' is already used in an endpoint.",
operationId=opId,
)
)
return events
def check_schema(swagger: | if reference.startswith("#/"):
# decompose reference (error if not possible)
try:
rt, obj = reference[2:].split("/")
except ValueError:
events.add(
ReferenceInvalidSyntax(
path=path, reason=f"reference {reference} not of the form '#/section/item'"
)
)
continue
if rt not in REFERENCE_SECTIONS:
events.add(
ReferenceInvalidSection(
path=path,
reason=f"Reference {reference} not referring to one of the sections {REFERENCE_SECTIONS}",
)
)
| conditional_block |
validation.py | _OPERATION_RESPONSES,
)
from .events import (
ReferenceNotFoundValidationError,
ParameterDefinitionValidationError,
SecurityDefinitionNotFoundValidationError,
OAuth2ScopeNotFoundInSecurityDefinitionValidationError,
DuplicateOperationIdValidationError,
JsonSchemaValidationError,
ValidationError,
ReferenceInvalidSyntax,
ReferenceInvalidSection,
)
def check_security(swagger: Dict):
"""
Check that uses of security with its scopes matches a securityDefinition
:param swagger:
:return:
"""
events = set()
secdefs = swagger.get("securityDefinitions", {})
security_jspath = JSPATH_SECURITY
for sec_key, scopes, path in get_elements(swagger, security_jspath):
# retrieve security definition name from security declaration
secdef = secdefs.get(sec_key)
if secdef is None:
events.add(
SecurityDefinitionNotFoundValidationError(
path=path, reason=f"securityDefinitions '{sec_key}' does not exist"
)
)
else:
# retrieve scopes declared in the secdef
declared_scopes = secdef.get("scopes", [])
if not isinstance(scopes, list):
continue
# verify scopes can be resolved
for scope in scopes:
if scope not in declared_scopes:
events.add(
OAuth2ScopeNotFoundInSecurityDefinitionValidationError(
path=path + (scope,),
reason=f"scope {scope} is not declared in the scopes of the securityDefinitions '{sec_key}'",
)
)
return events
def _check_parameter(param: Dict, path_param):
| path=path_param,
reason=f"The parameter is required yet it has a default value",
parameter_name=name,
)
)
# check if type==array that there is an items
if _type == "array" and "items" not in param:
events.add(
ParameterDefinitionValidationError(
path=path_param,
reason=f"The parameter is of type 'array' but is missing an 'items' field",
parameter_name=name,
)
)
# check enum does not contain duplicates
if enum:
if len(set(enum)) != len(enum):
events.add(
ParameterDefinitionValidationError(
path=path_param + ("enum",),
reason=f"The enum values {enum} contains duplicate values",
parameter_name=name,
)
)
if default is not None and default not in enum:
events.add(
ParameterDefinitionValidationError(
path=path_param + ("default",),
reason=f"The default value {repr(default)} is not one of the enum values {enum}",
parameter_name=name,
)
)
# check type/format & default value in accordance with type/format
# https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types
map_type2subtypes_pythontype = {
("string", None): str,
("string", "byte"): re.compile(
r"^(?:[A-Za-z0-9+/\s]{4})*(?:[A-Za-z0-9+/\s]{2}==|[A-Za-z0-9+/\s]{3}=)?$"
),
("string", "binary"): str,
("string", "date"): re.compile(r"^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])$"),
("string", "dateTime"): re.compile(
r"^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])" # date
r"[Tt]"
r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.[0-9]+)?" # time
r"(([Zz])|([+|\-]([01][0-9]|2[0-3]):[0-5][0-9]))$" # offset
),
("string", "password"): str,
("integer", None): numbers.Integral,
("integer", "int32"): numbers.Integral,
("integer", "int64"): numbers.Integral,
("number", None): numbers.Real,
("number", "float"): numbers.Real,
("number", "double"): numbers.Real,
("boolean", None): bool,
("array", None): list,
}
if default is not None and _type:
regexp_or_type = map_type2subtypes_pythontype.get((_type, format))
# if no match with both _type, format, check if match only on _type (format being freeform)
if not regexp_or_type:
regexp_or_type = map_type2subtypes_pythontype.get((_type, None))
if regexp_or_type:
# the type & format matches one of the Swagger Specifications documented type & format combinations
# we can check the default format
# decompose regexp_or_type into type and RE expression
if isinstance(regexp_or_type, type):
# regexp_or_type is a standard python type
re_pattern = None
py_type = regexp_or_type
else:
# regexp_or_type is a regexp expression
re_pattern = regexp_or_type
py_type = str
if not isinstance(default, py_type):
events.add(
ParameterDefinitionValidationError(
path=path_param + ("default",),
reason=f"The default value {repr(default)} is not of the expected type '{_type}'",
parameter_name=name,
)
)
# if a regexp matching string is expected
if re_pattern is not None:
if not (isinstance(default, str) and re_pattern.match(default)):
events.add(
ParameterDefinitionValidationError(
path=path_param + ("default",),
reason=f"The default value '{default}' does not conform to the string format '{format}'",
parameter_name=name,
)
)
return events
def check_parameters(swagger: Dict):
"""
Check parameters for:
- duplicate items in enum
- default parameter is in line with type when type=string
:param swagger:
:return:
"""
events = set()
parameters_jspath = JSPATH_PARAMETERS
for _, param, path in get_elements(swagger, parameters_jspath):
while True:
events |= _check_parameter(param, path)
if param.get("type") == "array":
# recurse in array items type
path += ("items",)
param = param.get("items", {})
else:
break
return events
def check_references(swagger: Dict):
"""
Find reference in paths, for /definitions/ and /responses/ /securityDefinitions/.
Follow from these, references to other references, till no more added.
:param swagger:
:return:
"""
events = set()
ref_jspath = JSPATH_REFERENCES
for _, reference, path in get_elements(swagger, ref_jspath):
# handle only local references
if reference.startswith("#/"):
# decompose reference (error if not possible)
try:
rt, obj = reference[2:].split("/")
except ValueError:
events.add(
ReferenceInvalidSyntax(
path=path, reason=f"reference {reference} not of the form '#/section/item'"
)
)
continue
if rt not in REFERENCE_SECTIONS:
events.add(
ReferenceInvalidSection(
path=path,
reason=f"Reference {reference} not referring to one of the sections {REFERENCE_SECTIONS}",
)
)
# resolve reference (error if not possible)
try:
swagger[rt][obj]
except KeyError:
events.add(
ReferenceNotFoundValidationError(
path=path, reason=f"reference '#/{rt}/{obj}' does not exist"
)
)
return events
def detect_duplicate_operationId(swagger: Dict):
"""Return list of Action with duplicate operationIds"""
events = set()
# retrieve all operationIds
operationId_jspath = JSPATH_OPERATIONID
def get_operationId_name(name_value_path):
return name_value_path[1]
operationIds = sorted(get_elements(swagger, operationId_jspath), key=get_operationId_name)
for opId, key_pths in groupby(operationIds, key=get_operationId_name):
pths = tuple(subpth for _, _, subpth in key_pths)
if len(pths) > 1:
pth_first, *pths = pths
for pth in pths:
events.add(
DuplicateOperationIdValidationError(
path=pth,
path_already_used=pth_first,
reason=f"the operationId '{opId}' is already used in an endpoint.",
operationId=opId,
)
)
return events
def check_schema(swagger: | """Check a parameter structure
For a parameter, the check for consistency are on:
- required and default
- type/format and default
- enum
"""
events = set()
name = param.get("name", "unnamed-parameter")
required = param.get("required", False)
default = param.get("default")
_type = param.get("type")
format = param.get("format")
enum = param.get("enum")
# check if required=True and default are both given
if required and default is not None:
events.add(
ParameterDefinitionValidationError( | identifier_body |
validation.py | _RESPONSES,
)
from .events import (
ReferenceNotFoundValidationError,
ParameterDefinitionValidationError,
SecurityDefinitionNotFoundValidationError,
OAuth2ScopeNotFoundInSecurityDefinitionValidationError,
DuplicateOperationIdValidationError,
JsonSchemaValidationError,
ValidationError,
ReferenceInvalidSyntax,
ReferenceInvalidSection,
)
def check_security(swagger: Dict):
"""
Check that uses of security with its scopes matches a securityDefinition
:param swagger:
:return:
"""
events = set()
secdefs = swagger.get("securityDefinitions", {})
security_jspath = JSPATH_SECURITY
for sec_key, scopes, path in get_elements(swagger, security_jspath):
# retrieve security definition name from security declaration
secdef = secdefs.get(sec_key)
if secdef is None:
events.add(
SecurityDefinitionNotFoundValidationError(
path=path, reason=f"securityDefinitions '{sec_key}' does not exist"
)
)
else:
# retrieve scopes declared in the secdef
declared_scopes = secdef.get("scopes", [])
if not isinstance(scopes, list):
continue
# verify scopes can be resolved
for scope in scopes:
if scope not in declared_scopes:
events.add(
OAuth2ScopeNotFoundInSecurityDefinitionValidationError(
path=path + (scope,),
reason=f"scope {scope} is not declared in the scopes of the securityDefinitions '{sec_key}'",
)
)
return events
def _check_parameter(param: Dict, path_param):
"""Check a parameter structure
For a parameter, the check for consistency are on:
- required and default
- type/format and default
- enum
"""
events = set()
name = param.get("name", "unnamed-parameter")
required = param.get("required", False)
default = param.get("default")
_type = param.get("type")
format = param.get("format")
enum = param.get("enum")
# check if required=True and default are both given
if required and default is not None:
events.add(
ParameterDefinitionValidationError(
path=path_param,
reason=f"The parameter is required yet it has a default value",
parameter_name=name,
)
)
# check if type==array that there is an items
if _type == "array" and "items" not in param:
events.add(
ParameterDefinitionValidationError(
path=path_param,
reason=f"The parameter is of type 'array' but is missing an 'items' field",
parameter_name=name,
)
)
# check enum does not contain duplicates
if enum:
if len(set(enum)) != len(enum):
events.add(
ParameterDefinitionValidationError(
path=path_param + ("enum",),
reason=f"The enum values {enum} contains duplicate values",
parameter_name=name,
)
)
if default is not None and default not in enum:
events.add(
ParameterDefinitionValidationError(
path=path_param + ("default",),
reason=f"The default value {repr(default)} is not one of the enum values {enum}",
parameter_name=name,
)
)
# check type/format & default value in accordance with type/format
# https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types
map_type2subtypes_pythontype = {
("string", None): str,
("string", "byte"): re.compile(
r"^(?:[A-Za-z0-9+/\s]{4})*(?:[A-Za-z0-9+/\s]{2}==|[A-Za-z0-9+/\s]{3}=)?$"
),
("string", "binary"): str,
("string", "date"): re.compile(r"^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])$"),
("string", "dateTime"): re.compile(
r"^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])" # date
r"[Tt]"
r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.[0-9]+)?" # time
r"(([Zz])|([+|\-]([01][0-9]|2[0-3]):[0-5][0-9]))$" # offset
),
("string", "password"): str,
("integer", None): numbers.Integral,
("integer", "int32"): numbers.Integral,
("integer", "int64"): numbers.Integral,
("number", None): numbers.Real,
("number", "float"): numbers.Real,
("number", "double"): numbers.Real,
("boolean", None): bool,
("array", None): list,
}
if default is not None and _type:
regexp_or_type = map_type2subtypes_pythontype.get((_type, format))
# if no match with both _type, format, check if match only on _type (format being freeform)
if not regexp_or_type:
regexp_or_type = map_type2subtypes_pythontype.get((_type, None))
if regexp_or_type:
# the type & format matches one of the Swagger Specifications documented type & format combinations
# we can check the default format
# decompose regexp_or_type into type and RE expression
if isinstance(regexp_or_type, type):
# regexp_or_type is a standard python type
re_pattern = None
py_type = regexp_or_type
else:
# regexp_or_type is a regexp expression
re_pattern = regexp_or_type
py_type = str
if not isinstance(default, py_type):
events.add(
ParameterDefinitionValidationError(
path=path_param + ("default",),
reason=f"The default value {repr(default)} is not of the expected type '{_type}'",
parameter_name=name,
)
)
# if a regexp matching string is expected
if re_pattern is not None:
if not (isinstance(default, str) and re_pattern.match(default)):
events.add(
ParameterDefinitionValidationError(
path=path_param + ("default",),
reason=f"The default value '{default}' does not conform to the string format '{format}'",
parameter_name=name,
)
)
return events
def check_parameters(swagger: Dict):
"""
Check parameters for:
- duplicate items in enum
- default parameter is in line with type when type=string
:param swagger:
:return:
"""
events = set()
parameters_jspath = JSPATH_PARAMETERS
for _, param, path in get_elements(swagger, parameters_jspath):
while True:
events |= _check_parameter(param, path)
if param.get("type") == "array":
# recurse in array items type
path += ("items",)
param = param.get("items", {})
else:
break
return events
def | (swagger: Dict):
"""
Find reference in paths, for /definitions/ and /responses/ /securityDefinitions/.
Follow from these, references to other references, till no more added.
:param swagger:
:return:
"""
events = set()
ref_jspath = JSPATH_REFERENCES
for _, reference, path in get_elements(swagger, ref_jspath):
# handle only local references
if reference.startswith("#/"):
# decompose reference (error if not possible)
try:
rt, obj = reference[2:].split("/")
except ValueError:
events.add(
ReferenceInvalidSyntax(
path=path, reason=f"reference {reference} not of the form '#/section/item'"
)
)
continue
if rt not in REFERENCE_SECTIONS:
events.add(
ReferenceInvalidSection(
path=path,
reason=f"Reference {reference} not referring to one of the sections {REFERENCE_SECTIONS}",
)
)
# resolve reference (error if not possible)
try:
swagger[rt][obj]
except KeyError:
events.add(
ReferenceNotFoundValidationError(
path=path, reason=f"reference '#/{rt}/{obj}' does not exist"
)
)
return events
def detect_duplicate_operationId(swagger: Dict):
"""Return list of Action with duplicate operationIds"""
events = set()
# retrieve all operationIds
operationId_jspath = JSPATH_OPERATIONID
def get_operationId_name(name_value_path):
return name_value_path[1]
operationIds = sorted(get_elements(swagger, operationId_jspath), key=get_operationId_name)
for opId, key_pths in groupby(operationIds, key=get_operationId_name):
pths = tuple(subpth for _, _, subpth in key_pths)
if len(pths) > 1:
pth_first, *pths = pths
for pth in pths:
events.add(
DuplicateOperationIdValidationError(
path=pth,
path_already_used=pth_first,
reason=f"the operationId '{opId}' is already used in an endpoint.",
operationId=opId,
)
)
return events
def check_schema(swagger: | check_references | identifier_name |
staged_file.rs | file::WriteError),
/// Failed to rename the staged file.
#[error("Failed to rename temp file to target: {0}")]
RenameError(#[from] fuchsia_fs::node::RenameError),
/// Failed to flush data.
#[error("Failed to flush to disk: {0}")]
FlushError(#[source] zx::Status),
/// Failed to close the staged file.
#[error("Failed to close backing storage: {0}")]
CloseError(#[source] zx::Status),
/// Failed to readdir.
#[error("Failed to readdir: {0}")]
ReaddirError(#[from] fuchsia_fs::directory::Error),
/// Failed to unlink file.
#[error("Failed to unlink file: {0}")]
UnlinkError(#[source] zx::Status),
}
/// StagedFile is a wrapper around a |&DirectoryProxy| and a |FileProxy|
/// for a staged file within that directory.
/// The primary purpose of StagedFile is to implement the atomic write workflow
/// summarized as open -> write -> sync -> close -> rename. This workflow is
/// simplified to simply write -> commit.
/// One caveat to the use of StagedFile is that in the event of power loss or
/// a crash, there may be orphaned temporary files in the directory.
/// This means that clients _should_ clean up their directories of temporary
/// files prior to operating in that directory. As such, it is important to
/// choose a |filename_prefix| that is guaranteed not to collide with
/// |target_filename|s given when calling StagedFile::commit.
/// It would have been preferable to use the tempfile crate here, but it lacks
/// the ability to open things without making use of paths and namespaces, and
/// as such, StagedFile should only be used in cases where we must supply our
/// own |DirectoryProxy|.
pub struct StagedFile<'a> {
dir_proxy: &'a fio::DirectoryProxy,
temp_filename: String,
file_proxy: fio::FileProxy,
}
impl<'a> StagedFile<'a> {
/// Creates a new instance of StagedFile bound to the lifetime of
/// |dir_proxy| that respects |filename_prefix|.
/// |filename_prefix| must have a length > 0.
pub async fn new(
dir_proxy: &'a fio::DirectoryProxy,
tempfile_prefix: &str,
) -> Result<StagedFile<'a>, StagedFileError> {
if tempfile_prefix.is_empty() {
return Err(StagedFileError::InvalidArguments(String::from(
"filename_prefix must not be empty",
)));
} | &temp_filename,
fio::OpenFlags::RIGHT_READABLE
| fio::OpenFlags::RIGHT_WRITABLE
| fio::OpenFlags::CREATE,
)
.await?;
Ok(StagedFile { dir_proxy, temp_filename, file_proxy })
}
/// Writes data to the backing staged file proxy.
/// This file is not guaranteed to be persisted until commit is called,
/// at which point it will be renamed to |target_filename|.
pub async fn write(&mut self, data: &[u8]) -> Result<(), StagedFileError> {
let () = fuchsia_fs::file::write(&self.file_proxy, data).await?;
Ok(())
}
/// Commits the data in the staged file to |target_filename| via the
/// traditional sync -> close -> rename atomic write workflow.
/// Calling commit does not guarantee that |target_filename| will be
/// available, but it does guarantee atomicity of the file if it does
/// exist.
pub async fn commit(self, target_filename: &str) -> Result<(), StagedFileError> {
// Do the usual atomic commit via sync, close, and rename-to-target.
// Stale files left by a crash should be cleaned up by calling cleanup_stale_files on the
// next startup.
let () = self
.file_proxy
.sync()
.await?
.map_err(zx::Status::from_raw)
.map_err(StagedFileError::FlushError)?;
let () = self
.file_proxy
.close()
.await?
.map_err(zx::Status::from_raw)
.map_err(StagedFileError::CloseError)?;
fuchsia_fs::directory::rename(self.dir_proxy, &self.temp_filename, target_filename)
.await
.map_err(StagedFileError::RenameError)?;
Ok(())
}
/// Helper function to unlink files in a directory given a function that
/// takes a filename and returns whether or not to unlink it.
pub async fn cleanup_stale_files(
dir_proxy: &fio::DirectoryProxy,
tempfile_prefix: &str,
) -> Result<(), Vec<StagedFileError>> {
let dirents_res = fuchsia_fs::directory::readdir(dir_proxy).await;
let dirents = dirents_res.map_err(|err| vec![StagedFileError::ReaddirError(err)])?;
let mut failures = Vec::new();
for d in dirents.iter() {
let name = &d.name;
// For filenames that are known to be temporary, try to remove them.
if name.starts_with(tempfile_prefix) {
warn!("Removing unexpected file '{}' from directory", &name);
let fidl_res = dir_proxy.unlink(name, fio::UnlinkOptions::EMPTY).await;
match fidl_res {
Err(x) => failures.push(StagedFileError::FidlError(x)),
Ok(unlink_res) => {
if let Err(unlink_err) = unlink_res {
failures.push(StagedFileError::UnlinkError(zx::Status::from_raw(
unlink_err,
)));
}
}
}
}
}
if failures.is_empty() {
Ok(())
} else {
Err(failures)
}
}
}
/// Generates a temporary filename using |thread_rng| to append random chars to
/// a given |prefix|.
fn generate_tempfile_name(prefix: &str) -> String {
// Generate a tempfile with name "{prefix}-{random}"
let mut buf = String::with_capacity(TEMPFILE_RANDOM_LENGTH + prefix.len() + 1);
buf.push_str(prefix);
buf.push('-');
let mut rng = thread_rng();
std::iter::repeat(())
.map(|()| rng.sample(rand::distributions::Alphanumeric))
.map(char::from)
.take(TEMPFILE_RANDOM_LENGTH)
.for_each(|c| buf.push(c));
buf
}
#[cfg(test)]
mod test {
use {super::*, tempfile::TempDir};
#[fuchsia::test]
async fn test_normal_flow() {
let tmp_dir = TempDir::new().unwrap();
let dir = fuchsia_fs::directory::open_in_namespace(
tmp_dir.path().to_str().unwrap(),
fio::OpenFlags::RIGHT_READABLE | fio::OpenFlags::RIGHT_WRITABLE,
)
.expect("could not open temp dir");
let mut staged_file = StagedFile::new(&dir, "prefix-").await.unwrap();
staged_file.write(b"this is some file content".as_ref()).await.unwrap();
staged_file.commit("target_file_01").await.unwrap();
// Check that target_file_01 has been created.
let open_res = fuchsia_fs::directory::open_file(
&dir,
"target_file_01",
fio::OpenFlags::RIGHT_READABLE,
)
.await;
assert!(open_res.is_ok());
let file_bytes = fuchsia_fs::file::read(&open_res.unwrap()).await.unwrap();
assert_eq!(file_bytes, b"this is some file content");
}
#[fuchsia::test]
async fn test_empty_tempfile_prefix() {
let tmp_dir = TempDir::new().unwrap();
let dir = fuchsia_fs::directory::open_in_namespace(
tmp_dir.path().to_str().unwrap(),
fio::OpenFlags::RIGHT_READABLE | fio::OpenFlags::RIGHT_WRITABLE,
)
.expect("could not open temp dir");
assert!(StagedFile::new(&dir, "").await.is_err());
}
async fn write_test_file_content(dir_proxy: &fio::DirectoryProxy, filename: &str, data: &[u8]) {
let file_proxy = fuchsia_fs::directory::open_file(
dir_proxy,
filename,
fio::OpenFlags::RIGHT_READABLE
| fio::OpenFlags::RIGHT_WRITABLE
| fio::OpenFlags::CREATE,
)
.await
.expect("could not open test file");
fuchsia_fs::file::write(&file_proxy, data).await.expect("could not write test file data")
}
async fn file_exists_with_data(
dir_proxy: &fio::DirectoryProxy,
filename: &str,
expected_data: &[u8],
) -> bool {
let file =
fuchsia_fs::directory::open_file(dir_proxy, filename, fio::OpenFlags::RIGHT_READABLE)
.await
.expect("could not open file");
let bytes = fuchsia_fs::file::read(&file).await.expect(" | let temp_filename = generate_tempfile_name(tempfile_prefix);
let file_proxy = fuchsia_fs::directory::open_file(
dir_proxy, | random_line_split |
staged_file.rs | ::WriteError),
/// Failed to rename the staged file.
#[error("Failed to rename temp file to target: {0}")]
RenameError(#[from] fuchsia_fs::node::RenameError),
/// Failed to flush data.
#[error("Failed to flush to disk: {0}")]
FlushError(#[source] zx::Status),
/// Failed to close the staged file.
#[error("Failed to close backing storage: {0}")]
CloseError(#[source] zx::Status),
/// Failed to readdir.
#[error("Failed to readdir: {0}")]
ReaddirError(#[from] fuchsia_fs::directory::Error),
/// Failed to unlink file.
#[error("Failed to unlink file: {0}")]
UnlinkError(#[source] zx::Status),
}
/// StagedFile is a wrapper around a |&DirectoryProxy| and a |FileProxy|
/// for a staged file within that directory.
/// The primary purpose of StagedFile is to implement the atomic write workflow
/// summarized as open -> write -> sync -> close -> rename. This workflow is
/// simplified to simply write -> commit.
/// One caveat to the use of StagedFile is that in the event of power loss or
/// a crash, there may be orphaned temporary files in the directory.
/// This means that clients _should_ clean up their directories of temporary
/// files prior to operating in that directory. As such, it is important to
/// choose a |filename_prefix| that is guaranteed not to collide with
/// |target_filename|s given when calling StagedFile::commit.
/// It would have been preferable to use the tempfile crate here, but it lacks
/// the ability to open things without making use of paths and namespaces, and
/// as such, StagedFile should only be used in cases where we must supply our
/// own |DirectoryProxy|.
pub struct StagedFile<'a> {
dir_proxy: &'a fio::DirectoryProxy,
temp_filename: String,
file_proxy: fio::FileProxy,
}
impl<'a> StagedFile<'a> {
/// Creates a new instance of StagedFile bound to the lifetime of
/// |dir_proxy| that respects |filename_prefix|.
/// |filename_prefix| must have a length > 0.
pub async fn new(
dir_proxy: &'a fio::DirectoryProxy,
tempfile_prefix: &str,
) -> Result<StagedFile<'a>, StagedFileError> {
if tempfile_prefix.is_empty() {
return Err(StagedFileError::InvalidArguments(String::from(
"filename_prefix must not be empty",
)));
}
let temp_filename = generate_tempfile_name(tempfile_prefix);
let file_proxy = fuchsia_fs::directory::open_file(
dir_proxy,
&temp_filename,
fio::OpenFlags::RIGHT_READABLE
| fio::OpenFlags::RIGHT_WRITABLE
| fio::OpenFlags::CREATE,
)
.await?;
Ok(StagedFile { dir_proxy, temp_filename, file_proxy })
}
/// Writes data to the backing staged file proxy.
/// This file is not guaranteed to be persisted until commit is called,
/// at which point it will be renamed to |target_filename|.
pub async fn write(&mut self, data: &[u8]) -> Result<(), StagedFileError> {
let () = fuchsia_fs::file::write(&self.file_proxy, data).await?;
Ok(())
}
/// Commits the data in the staged file to |target_filename| via the
/// traditional sync -> close -> rename atomic write workflow.
/// Calling commit does not guarantee that |target_filename| will be
/// available, but it does guarantee atomicity of the file if it does
/// exist.
pub async fn commit(self, target_filename: &str) -> Result<(), StagedFileError> {
// Do the usual atomic commit via sync, close, and rename-to-target.
// Stale files left by a crash should be cleaned up by calling cleanup_stale_files on the
// next startup.
let () = self
.file_proxy
.sync()
.await?
.map_err(zx::Status::from_raw)
.map_err(StagedFileError::FlushError)?;
let () = self
.file_proxy
.close()
.await?
.map_err(zx::Status::from_raw)
.map_err(StagedFileError::CloseError)?;
fuchsia_fs::directory::rename(self.dir_proxy, &self.temp_filename, target_filename)
.await
.map_err(StagedFileError::RenameError)?;
Ok(())
}
/// Helper function to unlink files in a directory given a function that
/// takes a filename and returns whether or not to unlink it.
pub async fn cleanup_stale_files(
dir_proxy: &fio::DirectoryProxy,
tempfile_prefix: &str,
) -> Result<(), Vec<StagedFileError>> {
let dirents_res = fuchsia_fs::directory::readdir(dir_proxy).await;
let dirents = dirents_res.map_err(|err| vec![StagedFileError::ReaddirError(err)])?;
let mut failures = Vec::new();
for d in dirents.iter() {
let name = &d.name;
// For filenames that are known to be temporary, try to remove them.
if name.starts_with(tempfile_prefix) {
warn!("Removing unexpected file '{}' from directory", &name);
let fidl_res = dir_proxy.unlink(name, fio::UnlinkOptions::EMPTY).await;
match fidl_res {
Err(x) => failures.push(StagedFileError::FidlError(x)),
Ok(unlink_res) => |
}
}
}
if failures.is_empty() {
Ok(())
} else {
Err(failures)
}
}
}
/// Generates a temporary filename using |thread_rng| to append random chars to
/// a given |prefix|.
fn generate_tempfile_name(prefix: &str) -> String {
// Generate a tempfile with name "{prefix}-{random}"
let mut buf = String::with_capacity(TEMPFILE_RANDOM_LENGTH + prefix.len() + 1);
buf.push_str(prefix);
buf.push('-');
let mut rng = thread_rng();
std::iter::repeat(())
.map(|()| rng.sample(rand::distributions::Alphanumeric))
.map(char::from)
.take(TEMPFILE_RANDOM_LENGTH)
.for_each(|c| buf.push(c));
buf
}
#[cfg(test)]
mod test {
use {super::*, tempfile::TempDir};
#[fuchsia::test]
async fn test_normal_flow() {
let tmp_dir = TempDir::new().unwrap();
let dir = fuchsia_fs::directory::open_in_namespace(
tmp_dir.path().to_str().unwrap(),
fio::OpenFlags::RIGHT_READABLE | fio::OpenFlags::RIGHT_WRITABLE,
)
.expect("could not open temp dir");
let mut staged_file = StagedFile::new(&dir, "prefix-").await.unwrap();
staged_file.write(b"this is some file content".as_ref()).await.unwrap();
staged_file.commit("target_file_01").await.unwrap();
// Check that target_file_01 has been created.
let open_res = fuchsia_fs::directory::open_file(
&dir,
"target_file_01",
fio::OpenFlags::RIGHT_READABLE,
)
.await;
assert!(open_res.is_ok());
let file_bytes = fuchsia_fs::file::read(&open_res.unwrap()).await.unwrap();
assert_eq!(file_bytes, b"this is some file content");
}
#[fuchsia::test]
async fn test_empty_tempfile_prefix() {
let tmp_dir = TempDir::new().unwrap();
let dir = fuchsia_fs::directory::open_in_namespace(
tmp_dir.path().to_str().unwrap(),
fio::OpenFlags::RIGHT_READABLE | fio::OpenFlags::RIGHT_WRITABLE,
)
.expect("could not open temp dir");
assert!(StagedFile::new(&dir, "").await.is_err());
}
async fn write_test_file_content(dir_proxy: &fio::DirectoryProxy, filename: &str, data: &[u8]) {
let file_proxy = fuchsia_fs::directory::open_file(
dir_proxy,
filename,
fio::OpenFlags::RIGHT_READABLE
| fio::OpenFlags::RIGHT_WRITABLE
| fio::OpenFlags::CREATE,
)
.await
.expect("could not open test file");
fuchsia_fs::file::write(&file_proxy, data).await.expect("could not write test file data")
}
async fn file_exists_with_data(
dir_proxy: &fio::DirectoryProxy,
filename: &str,
expected_data: &[u8],
) -> bool {
let file =
fuchsia_fs::directory::open_file(dir_proxy, filename, fio::OpenFlags::RIGHT_READABLE)
.await
.expect("could not open file");
let bytes = fuchsia_fs::file::read(&file).await.expect | {
if let Err(unlink_err) = unlink_res {
failures.push(StagedFileError::UnlinkError(zx::Status::from_raw(
unlink_err,
)));
}
} | conditional_block |
staged_file.rs | {
/// Invalid arguments.
#[error("Invalid arguments to create a staged file: {0}")]
InvalidArguments(String),
/// Failed to open a file or directory.
#[error("Failed to open: {0}")]
OpenError(#[from] fuchsia_fs::node::OpenError),
/// Failed during a FIDL call.
#[error("Failed during FIDL call: {0}")]
FidlError(#[from] fidl::Error),
/// Failed to write to the staged file.
#[error("Failed to write to backing storage: {0}")]
WriteError(#[from] fuchsia_fs::file::WriteError),
/// Failed to rename the staged file.
#[error("Failed to rename temp file to target: {0}")]
RenameError(#[from] fuchsia_fs::node::RenameError),
/// Failed to flush data.
#[error("Failed to flush to disk: {0}")]
FlushError(#[source] zx::Status),
/// Failed to close the staged file.
#[error("Failed to close backing storage: {0}")]
CloseError(#[source] zx::Status),
/// Failed to readdir.
#[error("Failed to readdir: {0}")]
ReaddirError(#[from] fuchsia_fs::directory::Error),
/// Failed to unlink file.
#[error("Failed to unlink file: {0}")]
UnlinkError(#[source] zx::Status),
}
/// StagedFile is a wrapper around a |&DirectoryProxy| and a |FileProxy|
/// for a staged file within that directory.
/// The primary purpose of StagedFile is to implement the atomic write workflow
/// summarized as open -> write -> sync -> close -> rename. This workflow is
/// simplified to simply write -> commit.
/// One caveat to the use of StagedFile is that in the event of power loss or
/// a crash, there may be orphaned temporary files in the directory.
/// This means that clients _should_ clean up their directories of temporary
/// files prior to operating in that directory. As such, it is important to
/// choose a |filename_prefix| that is guaranteed not to collide with
/// |target_filename|s given when calling StagedFile::commit.
/// It would have been preferable to use the tempfile crate here, but it lacks
/// the ability to open things without making use of paths and namespaces, and
/// as such, StagedFile should only be used in cases where we must supply our
/// own |DirectoryProxy|.
pub struct StagedFile<'a> {
dir_proxy: &'a fio::DirectoryProxy,
temp_filename: String,
file_proxy: fio::FileProxy,
}
impl<'a> StagedFile<'a> {
/// Creates a new instance of StagedFile bound to the lifetime of
/// |dir_proxy| that respects |filename_prefix|.
/// |filename_prefix| must have a length > 0.
pub async fn new(
dir_proxy: &'a fio::DirectoryProxy,
tempfile_prefix: &str,
) -> Result<StagedFile<'a>, StagedFileError> {
if tempfile_prefix.is_empty() {
return Err(StagedFileError::InvalidArguments(String::from(
"filename_prefix must not be empty",
)));
}
let temp_filename = generate_tempfile_name(tempfile_prefix);
let file_proxy = fuchsia_fs::directory::open_file(
dir_proxy,
&temp_filename,
fio::OpenFlags::RIGHT_READABLE
| fio::OpenFlags::RIGHT_WRITABLE
| fio::OpenFlags::CREATE,
)
.await?;
Ok(StagedFile { dir_proxy, temp_filename, file_proxy })
}
/// Writes data to the backing staged file proxy.
/// This file is not guaranteed to be persisted until commit is called,
/// at which point it will be renamed to |target_filename|.
pub async fn write(&mut self, data: &[u8]) -> Result<(), StagedFileError> {
let () = fuchsia_fs::file::write(&self.file_proxy, data).await?;
Ok(())
}
/// Commits the data in the staged file to |target_filename| via the
/// traditional sync -> close -> rename atomic write workflow.
/// Calling commit does not guarantee that |target_filename| will be
/// available, but it does guarantee atomicity of the file if it does
/// exist.
pub async fn commit(self, target_filename: &str) -> Result<(), StagedFileError> {
// Do the usual atomic commit via sync, close, and rename-to-target.
// Stale files left by a crash should be cleaned up by calling cleanup_stale_files on the
// next startup.
let () = self
.file_proxy
.sync()
.await?
.map_err(zx::Status::from_raw)
.map_err(StagedFileError::FlushError)?;
let () = self
.file_proxy
.close()
.await?
.map_err(zx::Status::from_raw)
.map_err(StagedFileError::CloseError)?;
fuchsia_fs::directory::rename(self.dir_proxy, &self.temp_filename, target_filename)
.await
.map_err(StagedFileError::RenameError)?;
Ok(())
}
/// Helper function to unlink files in a directory given a function that
/// takes a filename and returns whether or not to unlink it.
pub async fn cleanup_stale_files(
dir_proxy: &fio::DirectoryProxy,
tempfile_prefix: &str,
) -> Result<(), Vec<StagedFileError>> {
let dirents_res = fuchsia_fs::directory::readdir(dir_proxy).await;
let dirents = dirents_res.map_err(|err| vec![StagedFileError::ReaddirError(err)])?;
let mut failures = Vec::new();
for d in dirents.iter() {
let name = &d.name;
// For filenames that are known to be temporary, try to remove them.
if name.starts_with(tempfile_prefix) {
warn!("Removing unexpected file '{}' from directory", &name);
let fidl_res = dir_proxy.unlink(name, fio::UnlinkOptions::EMPTY).await;
match fidl_res {
Err(x) => failures.push(StagedFileError::FidlError(x)),
Ok(unlink_res) => {
if let Err(unlink_err) = unlink_res {
failures.push(StagedFileError::UnlinkError(zx::Status::from_raw(
unlink_err,
)));
}
}
}
}
}
if failures.is_empty() {
Ok(())
} else {
Err(failures)
}
}
}
/// Generates a temporary filename using |thread_rng| to append random chars to
/// a given |prefix|.
fn generate_tempfile_name(prefix: &str) -> String {
// Generate a tempfile with name "{prefix}-{random}"
let mut buf = String::with_capacity(TEMPFILE_RANDOM_LENGTH + prefix.len() + 1);
buf.push_str(prefix);
buf.push('-');
let mut rng = thread_rng();
std::iter::repeat(())
.map(|()| rng.sample(rand::distributions::Alphanumeric))
.map(char::from)
.take(TEMPFILE_RANDOM_LENGTH)
.for_each(|c| buf.push(c));
buf
}
#[cfg(test)]
mod test {
use {super::*, tempfile::TempDir};
#[fuchsia::test]
async fn test_normal_flow() {
let tmp_dir = TempDir::new().unwrap();
let dir = fuchsia_fs::directory::open_in_namespace(
tmp_dir.path().to_str().unwrap(),
fio::OpenFlags::RIGHT_READABLE | fio::OpenFlags::RIGHT_WRITABLE,
)
.expect("could not open temp dir");
let mut staged_file = StagedFile::new(&dir, "prefix-").await.unwrap();
staged_file.write(b"this is some file content".as_ref()).await.unwrap();
staged_file.commit("target_file_01").await.unwrap();
// Check that target_file_01 has been created.
let open_res = fuchsia_fs::directory::open_file(
&dir,
"target_file_01",
fio::OpenFlags::RIGHT_READABLE,
)
.await;
assert!(open_res.is_ok());
let file_bytes = fuchsia_fs::file::read(&open_res.unwrap()).await.unwrap();
assert_eq!(file_bytes, b"this is some file content");
}
#[fuchsia::test]
async fn test_empty_tempfile_prefix() {
let tmp_dir = TempDir::new().unwrap();
let dir = fuchsia_fs::directory::open_in_namespace(
tmp_dir.path().to_str().unwrap(),
fio::OpenFlags::RIGHT_READABLE | fio::OpenFlags::RIGHT_WRITABLE,
)
.expect("could not open temp dir");
assert!(StagedFile::new(&dir, "").await.is_err());
}
async fn write_test_file_content(dir_proxy: &fio::DirectoryProxy, filename: &str, data: &[u8]) {
let file_proxy = fuchsia_fs::directory::open_file(
dir_proxy,
filename,
fio::OpenFlags::RIGHT_READABLE
| fio::OpenFlags::RIGHT_WRITABLE
| fio::OpenFlags::CREATE,
| StagedFileError | identifier_name |
|
staged_file.rs | |target_filename|s given when calling StagedFile::commit.
/// It would have been preferable to use the tempfile crate here, but it lacks
/// the ability to open things without making use of paths and namespaces, and
/// as such, StagedFile should only be used in cases where we must supply our
/// own |DirectoryProxy|.
pub struct StagedFile<'a> {
dir_proxy: &'a fio::DirectoryProxy,
temp_filename: String,
file_proxy: fio::FileProxy,
}
impl<'a> StagedFile<'a> {
/// Creates a new instance of StagedFile bound to the lifetime of
/// |dir_proxy| that respects |filename_prefix|.
/// |filename_prefix| must have a length > 0.
pub async fn new(
dir_proxy: &'a fio::DirectoryProxy,
tempfile_prefix: &str,
) -> Result<StagedFile<'a>, StagedFileError> {
if tempfile_prefix.is_empty() {
return Err(StagedFileError::InvalidArguments(String::from(
"filename_prefix must not be empty",
)));
}
let temp_filename = generate_tempfile_name(tempfile_prefix);
let file_proxy = fuchsia_fs::directory::open_file(
dir_proxy,
&temp_filename,
fio::OpenFlags::RIGHT_READABLE
| fio::OpenFlags::RIGHT_WRITABLE
| fio::OpenFlags::CREATE,
)
.await?;
Ok(StagedFile { dir_proxy, temp_filename, file_proxy })
}
/// Writes data to the backing staged file proxy.
/// This file is not guaranteed to be persisted until commit is called,
/// at which point it will be renamed to |target_filename|.
pub async fn write(&mut self, data: &[u8]) -> Result<(), StagedFileError> {
let () = fuchsia_fs::file::write(&self.file_proxy, data).await?;
Ok(())
}
/// Commits the data in the staged file to |target_filename| via the
/// traditional sync -> close -> rename atomic write workflow.
/// Calling commit does not guarantee that |target_filename| will be
/// available, but it does guarantee atomicity of the file if it does
/// exist.
pub async fn commit(self, target_filename: &str) -> Result<(), StagedFileError> {
// Do the usual atomic commit via sync, close, and rename-to-target.
// Stale files left by a crash should be cleaned up by calling cleanup_stale_files on the
// next startup.
let () = self
.file_proxy
.sync()
.await?
.map_err(zx::Status::from_raw)
.map_err(StagedFileError::FlushError)?;
let () = self
.file_proxy
.close()
.await?
.map_err(zx::Status::from_raw)
.map_err(StagedFileError::CloseError)?;
fuchsia_fs::directory::rename(self.dir_proxy, &self.temp_filename, target_filename)
.await
.map_err(StagedFileError::RenameError)?;
Ok(())
}
/// Helper function to unlink files in a directory given a function that
/// takes a filename and returns whether or not to unlink it.
pub async fn cleanup_stale_files(
dir_proxy: &fio::DirectoryProxy,
tempfile_prefix: &str,
) -> Result<(), Vec<StagedFileError>> {
let dirents_res = fuchsia_fs::directory::readdir(dir_proxy).await;
let dirents = dirents_res.map_err(|err| vec![StagedFileError::ReaddirError(err)])?;
let mut failures = Vec::new();
for d in dirents.iter() {
let name = &d.name;
// For filenames that are known to be temporary, try to remove them.
if name.starts_with(tempfile_prefix) {
warn!("Removing unexpected file '{}' from directory", &name);
let fidl_res = dir_proxy.unlink(name, fio::UnlinkOptions::EMPTY).await;
match fidl_res {
Err(x) => failures.push(StagedFileError::FidlError(x)),
Ok(unlink_res) => {
if let Err(unlink_err) = unlink_res {
failures.push(StagedFileError::UnlinkError(zx::Status::from_raw(
unlink_err,
)));
}
}
}
}
}
if failures.is_empty() {
Ok(())
} else {
Err(failures)
}
}
}
/// Generates a temporary filename using |thread_rng| to append random chars to
/// a given |prefix|.
fn generate_tempfile_name(prefix: &str) -> String {
// Generate a tempfile with name "{prefix}-{random}"
let mut buf = String::with_capacity(TEMPFILE_RANDOM_LENGTH + prefix.len() + 1);
buf.push_str(prefix);
buf.push('-');
let mut rng = thread_rng();
std::iter::repeat(())
.map(|()| rng.sample(rand::distributions::Alphanumeric))
.map(char::from)
.take(TEMPFILE_RANDOM_LENGTH)
.for_each(|c| buf.push(c));
buf
}
#[cfg(test)]
mod test {
use {super::*, tempfile::TempDir};
#[fuchsia::test]
async fn test_normal_flow() {
let tmp_dir = TempDir::new().unwrap();
let dir = fuchsia_fs::directory::open_in_namespace(
tmp_dir.path().to_str().unwrap(),
fio::OpenFlags::RIGHT_READABLE | fio::OpenFlags::RIGHT_WRITABLE,
)
.expect("could not open temp dir");
let mut staged_file = StagedFile::new(&dir, "prefix-").await.unwrap();
staged_file.write(b"this is some file content".as_ref()).await.unwrap();
staged_file.commit("target_file_01").await.unwrap();
// Check that target_file_01 has been created.
let open_res = fuchsia_fs::directory::open_file(
&dir,
"target_file_01",
fio::OpenFlags::RIGHT_READABLE,
)
.await;
assert!(open_res.is_ok());
let file_bytes = fuchsia_fs::file::read(&open_res.unwrap()).await.unwrap();
assert_eq!(file_bytes, b"this is some file content");
}
#[fuchsia::test]
async fn test_empty_tempfile_prefix() {
let tmp_dir = TempDir::new().unwrap();
let dir = fuchsia_fs::directory::open_in_namespace(
tmp_dir.path().to_str().unwrap(),
fio::OpenFlags::RIGHT_READABLE | fio::OpenFlags::RIGHT_WRITABLE,
)
.expect("could not open temp dir");
assert!(StagedFile::new(&dir, "").await.is_err());
}
async fn write_test_file_content(dir_proxy: &fio::DirectoryProxy, filename: &str, data: &[u8]) {
let file_proxy = fuchsia_fs::directory::open_file(
dir_proxy,
filename,
fio::OpenFlags::RIGHT_READABLE
| fio::OpenFlags::RIGHT_WRITABLE
| fio::OpenFlags::CREATE,
)
.await
.expect("could not open test file");
fuchsia_fs::file::write(&file_proxy, data).await.expect("could not write test file data")
}
async fn file_exists_with_data(
dir_proxy: &fio::DirectoryProxy,
filename: &str,
expected_data: &[u8],
) -> bool {
let file =
fuchsia_fs::directory::open_file(dir_proxy, filename, fio::OpenFlags::RIGHT_READABLE)
.await
.expect("could not open file");
let bytes = fuchsia_fs::file::read(&file).await.expect("could not read file data");
expected_data == bytes
}
#[fuchsia::test]
async fn test_cleanup_stale_files() | {
let tmp_dir = TempDir::new().unwrap();
let dir = fuchsia_fs::directory::open_in_namespace(
tmp_dir.path().to_str().unwrap(),
fio::OpenFlags::RIGHT_READABLE | fio::OpenFlags::RIGHT_WRITABLE,
)
.expect("could not open temp dir");
// Write a variety of staged and non-staged files to the directory.
write_test_file_content(&dir, "staged-001", b"staged-001".as_ref()).await;
write_test_file_content(&dir, "real-001", b"real-001".as_ref()).await;
write_test_file_content(&dir, "staged-002", b"staged-002".as_ref()).await;
write_test_file_content(&dir, "real-002", b"real-002".as_ref()).await;
write_test_file_content(&dir, "staged-003", b"staged-003".as_ref()).await;
write_test_file_content(&dir, "004", b"004".as_ref()).await;
// Clean up stale files.
StagedFile::cleanup_stale_files(&dir, "staged-").await.unwrap();
// Ensure that only the non-staged files remain. | identifier_body |
|
python3 | :
return U / S
def main():
options = _parse_args()
V = 1000
data = np.genfromtxt("a-leer.csv", delimiter="\t")
t = data[:,0]
U = data[:,1] / V / 1000
U_err = 0.7e-3 / V
offset = np.mean(U[-3:])
x = np.linspace(min(t), max(t))
y = np.ones(x.size) * offset
pl.plot(x, y * 10**6, label="Offset")
print "Offset: {:.3g} V".format(offset)
pl.errorbar(t, U * 10**6, yerr=U_err * 10**6, linestyle="none", marker="+",
label="Messdaten")
pl.grid(True)
pl.legend(loc="best")
pl.title(u"Bestimmung des Offsets")
pl.xlabel(ur"Zeit $t / \mathrm{s}$")
pl.ylabel(ur"Thermospannung $U / \mathrm{\mu V}$")
pl.savefig("Plot_a-leer.pdf")
pl.clf()
V = 100
data = np.genfromtxt("a-Lampe.csv", delimiter="\t")
t = data[:,0]
U = data[:,1] / V / 1000 - offset
U_err = 0.7e-3 / V
x = np.linspace(min(t), max(t))
y = np.ones(x.size) * max(U) * 0.9
pl.plot(x, y * 10**6, label=ur"$90\%$")
pl.errorbar(t, U * 10**6, yerr=U_err * 10**6, linestyle="none", marker="+",
label="Messdaten")
pl.grid(True)
pl.legend(loc="best")
pl.title(u"Bestimmung der Ansprechzeit")
pl.xlabel(ur"Zeit $t / \mathrm{s}$")
pl.ylabel(ur"Thermospannung $U / \mathrm{\mu V}$")
pl.savefig("Plot_a-Lampe.pdf")
pl.clf()
# Lesliewürfel
print """
Lesliewürfel
============
"""
glanz = np.genfromtxt("b-glanz.csv", delimiter="\t")
matt = np.genfromtxt("b-matt.csv", delimiter="\t")
schwarz = np.genfromtxt("b-schwarz.csv", delimiter="\t")
weiss = np.genfromtxt("b-weiss.csv", delimiter="\t")
T0 = 19.0 + 273.15
T0_err = 1.0
glanz[:,0] += 273.15
matt[:,0] += 273.15
schwarz[:,0] += 273.15
weiss[:,0] += 273.15
glanz[:,1] /= 1000 * V
matt[:,1] /= 1000 * V
schwarz[:,1] /= 1000 * V
weiss[:,1] /= 1000 * V
glanz[:,1] -= offset
matt[:,1] -= offset
schwarz[:,1] -= offset
weiss[:,1] -= offset
glanz_phi = phif(glanz[:,1])
matt_phi = phif(matt[:,1])
schwarz_phi = phif(schwarz[:,1])
weiss_phi = phif(weiss[:,1])
T_err = 0.3
sigma = 5.670373e-8
def boltzmann(T, epsilon, offset):
return epsilon * sigma * T**4 + offset
glanz_popt, glanz_pconv = op.curve_fit(boltzmann, glanz[:,0], glanz_phi)
matt_popt, matt_pconv = op.curve_fit(boltzmann, matt[:,0], matt_phi)
schwarz_popt, schwarz_pconv = op.curve_fit(boltzmann, schwarz[:,0], schwarz_phi)
weiss_popt, weiss_pconv = op.curve_fit(boltzmann, weiss[:,0], weiss_phi)
glanz_x = np.linspace(min(glanz[:,0]), max(glanz[:,0]))
glanz_y = boltzmann(glanz_x, *glanz_popt)
pl.plot(glanz_x, glanz_y, label="Fit glanz", color="gold")
matt_x = np.linspace(min(matt[:,0]), max(matt[:,0]))
matt_y = boltzmann(matt_x, *matt_popt)
pl.plot(matt_x, matt_y, label="Fit matt", color="yellow")
schwarz_x = np.linspace(min(schwarz[:,0]), max(schwarz[:,0]))
schwarz_y = boltzmann(schwarz_x, *schwarz_popt)
pl.plot(schwarz_x, schwarz_y, label="Fit schwarz", color="black")
weiss_x = np.linspace(min(weiss[:,0]), max(weiss[:,0]))
weiss_y = boltzmann(weiss_x, *weiss_popt)
pl.plot(weiss_x, weiss_y, label="Fit weiss", color="gray")
print "glanz ε = {:.3g} ± {:.3g}".format(glanz_popt[0], np.sqrt(glanz_pconv.diagonal()[0]))
print "glanz offset = {:.3g} ± {:.3g}".format(glanz_popt[1], np.sqrt(glanz_pconv.diagonal()[1]))
print "matt ε = {:.3g} ± {:.3g}".format(matt_popt[0], np.sqrt(matt_pconv.diagonal()[0]))
print "matt offset = {:.3g} ± {:.3g}".format(matt_popt[1], np.sqrt(matt_pconv.diagonal()[1]))
print "schwarz ε = {:.3g} ± {:.3g}".format(schwarz_popt[0], np.sqrt(schwarz_pconv.diagonal()[0]))
print "schwarz offset = {:.3g} ± {:.3g}".format(schwarz_popt[1], np.sqrt(schwarz_pconv.diagonal()[1]))
print "weiss ε = {:.3g} ± {:.3g}".format(weiss_popt[0], np.sqrt(weiss_pconv.diagonal()[0]))
print "weiss offset = {:.3g} ± {:.3g}".format(weiss_popt[1], np.sqrt(weiss_pconv.diagonal()[1]))
pl.errorbar(glanz[:,0], glanz_phi, xerr=T_err, yerr=U_err/S,
label="glanz", color="gold", linestyle="none")
pl.errorbar(matt[:,0], matt_phi, xerr=T_err, yerr=U_err/S,
label="matt", color="yellow", linestyle="none")
pl.errorbar(schwarz[:,0], schwarz_phi, xerr=T_err, yerr=U_err/S,
label="schwarz", color="black", linestyle="none")
pl.errorbar(weiss[:,0], weiss_phi, xerr=T_err, yerr=U_err/S,
label="weiss", color="gray", linestyle="none")
header = ["T / K", "Phi/F in W/m^2", "Fehler T", "Fehler Phi/F"]
print """
Tabellen für den Lesliewürfel-Plot
----------------------------------
"""
print "Glanz"
glanz_table = PrettyTable(header)
for row in zip(glanz[:,0], glanz_phi, np.ones(glanz[:,0].size)*T_err, np.ones(glanz_phi.size)*U_err/S):
glanz_table.add_row(row)
print glanz_table
print
print "Matt"
matt_table = PrettyTable(header)
for row in zip(matt[:,0], matt_phi, np.ones(matt[:,0].size)*T_err, np.ones(matt_phi.size)*U_err/S):
matt_table.add_row(row)
print matt_table
print
print "Schwarz"
schwarz_table = PrettyTable(header)
for row in zip(schwarz[:,0], schwarz_phi, np.ones(schwarz[:,0].size)*T_err, np.ones(schwarz_phi.size)*U_err/S):
schwarz_table.add_row(row)
print schwarz_table
print
print "Weiß"
weiss_table = PrettyTable(header)
for row in zip(weiss[:,0], weiss_phi, np.ones(weiss[:,0].size)*T_err, np.ones(weiss_phi.size)*U_err/S):
weiss_table.add_row(row)
print weiss_table
print
epsilon = 0.1
x = np.linspace(min([min(x) for x in [glanz[:,0], matt[:,0], schwarz[:,0],
weiss[:,0]]]),
max([max(x) for x in [glanz[:,0], matt[:,0 | f(U) | identifier_name |
|
python3 |
pl.plot(x, y * 10**6, label=ur"$90\%$")
pl.errorbar(t, U * 10**6, yerr=U_err * 10**6, linestyle="none", marker="+",
label="Messdaten")
pl.grid(True)
pl.legend(loc="best")
pl.title(u"Bestimmung der Ansprechzeit")
pl.xlabel(ur"Zeit $t / \mathrm{s}$")
pl.ylabel(ur"Thermospannung $U / \mathrm{\mu V}$")
pl.savefig("Plot_a-Lampe.pdf")
pl.clf()
# Lesliewürfel
print """
Lesliewürfel
============
"""
glanz = np.genfromtxt("b-glanz.csv", delimiter="\t")
matt = np.genfromtxt("b-matt.csv", delimiter="\t")
schwarz = np.genfromtxt("b-schwarz.csv", delimiter="\t")
weiss = np.genfromtxt("b-weiss.csv", delimiter="\t")
T0 = 19.0 + 273.15
T0_err = 1.0
glanz[:,0] += 273.15
matt[:,0] += 273.15
schwarz[:,0] += 273.15
weiss[:,0] += 273.15
glanz[:,1] /= 1000 * V
matt[:,1] /= 1000 * V
schwarz[:,1] /= 1000 * V
weiss[:,1] /= 1000 * V
glanz[:,1] -= offset
matt[:,1] -= offset
schwarz[:,1] -= offset
weiss[:,1] -= offset
glanz_phi = phif(glanz[:,1])
matt_phi = phif(matt[:,1])
schwarz_phi = phif(schwarz[:,1])
weiss_phi = phif(weiss[:,1])
T_err = 0.3
sigma = 5.670373e-8
def boltzmann(T, epsilon, offset):
retur | glanz_popt, glanz_pconv = op.curve_fit(boltzmann, glanz[:,0], glanz_phi)
matt_popt, matt_pconv = op.curve_fit(boltzmann, matt[:,0], matt_phi)
schwarz_popt, schwarz_pconv = op.curve_fit(boltzmann, schwarz[:,0], schwarz_phi)
weiss_popt, weiss_pconv = op.curve_fit(boltzmann, weiss[:,0], weiss_phi)
glanz_x = np.linspace(min(glanz[:,0]), max(glanz[:,0]))
glanz_y = boltzmann(glanz_x, *glanz_popt)
pl.plot(glanz_x, glanz_y, label="Fit glanz", color="gold")
matt_x = np.linspace(min(matt[:,0]), max(matt[:,0]))
matt_y = boltzmann(matt_x, *matt_popt)
pl.plot(matt_x, matt_y, label="Fit matt", color="yellow")
schwarz_x = np.linspace(min(schwarz[:,0]), max(schwarz[:,0]))
schwarz_y = boltzmann(schwarz_x, *schwarz_popt)
pl.plot(schwarz_x, schwarz_y, label="Fit schwarz", color="black")
weiss_x = np.linspace(min(weiss[:,0]), max(weiss[:,0]))
weiss_y = boltzmann(weiss_x, *weiss_popt)
pl.plot(weiss_x, weiss_y, label="Fit weiss", color="gray")
print "glanz ε = {:.3g} ± {:.3g}".format(glanz_popt[0], np.sqrt(glanz_pconv.diagonal()[0]))
print "glanz offset = {:.3g} ± {:.3g}".format(glanz_popt[1], np.sqrt(glanz_pconv.diagonal()[1]))
print "matt ε = {:.3g} ± {:.3g}".format(matt_popt[0], np.sqrt(matt_pconv.diagonal()[0]))
print "matt offset = {:.3g} ± {:.3g}".format(matt_popt[1], np.sqrt(matt_pconv.diagonal()[1]))
print "schwarz ε = {:.3g} ± {:.3g}".format(schwarz_popt[0], np.sqrt(schwarz_pconv.diagonal()[0]))
print "schwarz offset = {:.3g} ± {:.3g}".format(schwarz_popt[1], np.sqrt(schwarz_pconv.diagonal()[1]))
print "weiss ε = {:.3g} ± {:.3g}".format(weiss_popt[0], np.sqrt(weiss_pconv.diagonal()[0]))
print "weiss offset = {:.3g} ± {:.3g}".format(weiss_popt[1], np.sqrt(weiss_pconv.diagonal()[1]))
pl.errorbar(glanz[:,0], glanz_phi, xerr=T_err, yerr=U_err/S,
label="glanz", color="gold", linestyle="none")
pl.errorbar(matt[:,0], matt_phi, xerr=T_err, yerr=U_err/S,
label="matt", color="yellow", linestyle="none")
pl.errorbar(schwarz[:,0], schwarz_phi, xerr=T_err, yerr=U_err/S,
label="schwarz", color="black", linestyle="none")
pl.errorbar(weiss[:,0], weiss_phi, xerr=T_err, yerr=U_err/S,
label="weiss", color="gray", linestyle="none")
header = ["T / K", "Phi/F in W/m^2", "Fehler T", "Fehler Phi/F"]
print """
Tabellen für den Lesliewürfel-Plot
----------------------------------
"""
print "Glanz"
glanz_table = PrettyTable(header)
for row in zip(glanz[:,0], glanz_phi, np.ones(glanz[:,0].size)*T_err, np.ones(glanz_phi.size)*U_err/S):
glanz_table.add_row(row)
print glanz_table
print
print "Matt"
matt_table = PrettyTable(header)
for row in zip(matt[:,0], matt_phi, np.ones(matt[:,0].size)*T_err, np.ones(matt_phi.size)*U_err/S):
matt_table.add_row(row)
print matt_table
print
print "Schwarz"
schwarz_table = PrettyTable(header)
for row in zip(schwarz[:,0], schwarz_phi, np.ones(schwarz[:,0].size)*T_err, np.ones(schwarz_phi.size)*U_err/S):
schwarz_table.add_row(row)
print schwarz_table
print
print "Weiß"
weiss_table = PrettyTable(header)
for row in zip(weiss[:,0], weiss_phi, np.ones(weiss[:,0].size)*T_err, np.ones(weiss_phi.size)*U_err/S):
weiss_table.add_row(row)
print weiss_table
print
epsilon = 0.1
x = np.linspace(min([min(x) for x in [glanz[:,0], matt[:,0], schwarz[:,0],
weiss[:,0]]]),
max([max(x) for x in [glanz[:,0], matt[:,0], schwarz[:,0],
weiss[:,0]]]),
100)
offset = - epsilon * sigma * T0**4
print "ideal offset = {:.3g}".format(offset)
y = boltzmann(x, epsilon, offset)
pl.plot(x, y, label=ur"$\epsilon = 0.1$")
pl.grid(True)
pl.title(u"Lesliewürfel")
pl.xlabel(ur"Temperatur $T / \mathrm{K}$")
pl.ylabel(ur"Strahlungsfluss $\frac{\Phi}{F} / \mathrm{\frac{W}{m^2}}$")
pl.legend(loc="best", prop={"size": 12})
pl.savefig("Plot_b.pdf")
pl.clf()
# Aufgabe c
print """
Aufgabe c
=========
"""
data = np.genfromtxt("c-erste.csv", delimiter="\t")
d = data[:,0] / 100
U = data[:,1] / V
phi = phif(U)
def c(x, a, b):
return a*x + b
dx = d**(-2)
dy = phi
dx_err = np.abs(-2 * d**(-3)) * 0.001
dy_err = 0.001 / S
popt, pconv = op.curve_fit(c, dx, dy)
x = np.linspace(min(dx), max(dx))
y = c(x, *popt)
pl.plot(x, y, label="Fit")
print "Fitparameter"
| n epsilon * sigma * T**4 + offset
| identifier_body |
python3 | warz_popt[0], np.sqrt(schwarz_pconv.diagonal()[0]))
print "schwarz offset = {:.3g} ± {:.3g}".format(schwarz_popt[1], np.sqrt(schwarz_pconv.diagonal()[1]))
print "weiss ε = {:.3g} ± {:.3g}".format(weiss_popt[0], np.sqrt(weiss_pconv.diagonal()[0]))
print "weiss offset = {:.3g} ± {:.3g}".format(weiss_popt[1], np.sqrt(weiss_pconv.diagonal()[1]))
pl.errorbar(glanz[:,0], glanz_phi, xerr=T_err, yerr=U_err/S,
label="glanz", color="gold", linestyle="none")
pl.errorbar(matt[:,0], matt_phi, xerr=T_err, yerr=U_err/S,
label="matt", color="yellow", linestyle="none")
pl.errorbar(schwarz[:,0], schwarz_phi, xerr=T_err, yerr=U_err/S,
label="schwarz", color="black", linestyle="none")
pl.errorbar(weiss[:,0], weiss_phi, xerr=T_err, yerr=U_err/S,
label="weiss", color="gray", linestyle="none")
header = ["T / K", "Phi/F in W/m^2", "Fehler T", "Fehler Phi/F"]
print """
Tabellen für den Lesliewürfel-Plot
----------------------------------
"""
print "Glanz"
glanz_table = PrettyTable(header)
for row in zip(glanz[:,0], glanz_phi, np.ones(glanz[:,0].size)*T_err, np.ones(glanz_phi.size)*U_err/S):
glanz_table.add_row(row)
print glanz_table
print
print "Matt"
matt_table = PrettyTable(header)
for row in zip(matt[:,0], matt_phi, np.ones(matt[:,0].size)*T_err, np.ones(matt_phi.size)*U_err/S):
matt_table.add_row(row)
print matt_table
print
print "Schwarz"
schwarz_table = PrettyTable(header)
for row in zip(schwarz[:,0], schwarz_phi, np.ones(schwarz[:,0].size)*T_err, np.ones(schwarz_phi.size)*U_err/S):
schwarz_table.add_row(row)
print schwarz_table
print
print "Weiß"
weiss_table = PrettyTable(header)
for row in zip(weiss[:,0], weiss_phi, np.ones(weiss[:,0].size)*T_err, np.ones(weiss_phi.size)*U_err/S):
weiss_table.add_row(row)
print weiss_table
print
epsilon = 0.1
x = np.linspace(min([min(x) for x in [glanz[:,0], matt[:,0], schwarz[:,0],
weiss[:,0]]]),
max([max(x) for x in [glanz[:,0], matt[:,0], schwarz[:,0],
weiss[:,0]]]),
100)
offset = - epsilon * sigma * T0**4
print "ideal offset = {:.3g}".format(offset)
y = boltzmann(x, epsilon, offset)
pl.plot(x, y, label=ur"$\epsilon = 0.1$")
pl.grid(True)
pl.title(u"Lesliewürfel")
pl.xlabel(ur"Temperatur $T / \mathrm{K}$")
pl.ylabel(ur"Strahlungsfluss $\frac{\Phi}{F} / \mathrm{\frac{W}{m^2}}$")
pl.legend(loc="best", prop={"size": 12})
pl.savefig("Plot_b.pdf")
pl.clf()
# Aufgabe c
print """
Aufgabe c
=========
"""
data = np.genfromtxt("c-erste.csv", delimiter="\t")
d = data[:,0] / 100
U = data[:,1] / V
phi = phif(U)
def c(x, a, b):
return a*x + b
dx = d**(-2)
dy = phi
dx_err = np.abs(-2 * d**(-3)) * 0.001
dy_err = 0.001 / S
popt, pconv = op.curve_fit(c, dx, dy)
x = np.linspace(min(dx), max(dx))
y = c(x, *popt)
pl.plot(x, y, label="Fit")
print "Fitparameter"
print "a", popt[0], "±", np.sqrt(pconv.diagonal()[0])
print "b", popt[1], "±", np.sqrt(pconv.diagonal()[1])
pl.errorbar(dx, dy, xerr=dx_err, yerr=dy_err, linestyle="none",
marker="+", label="Messdaten")
pl.grid(True)
pl.title(u"Halogenlampe bei verschiedenen Abständen")
pl.xlabel(ur"Abstand $d^{-2} / \mathrm{m^{-2}}$")
pl.ylabel(ur"Strahlungsfluss $\frac{\Phi}{F} / \mathrm{\frac{W}{m^2}}$")
pl.legend(loc="best")
pl.savefig("Plot_c-erste.pdf")
pl.clf()
print
print "Tabelle für Aufgabe c"
fields = ["d^-2 in m^-2", "Phi/F in W/m^2", "Fehler d^-2", "Fehler Phi/F"]
table = PrettyTable(fields)
table.align = "l"
for row in zip(dx, dy, dx_err, np.ones(dy.size)*dy_err):
table.add_row(row)
print table
print
data = np.genfromtxt("c-zweite.csv", delimiter="\t")
U1 = data[:,0]
I1 = data[:,1]
U2 = data[:,2] / V
U_err = 0.001
I_err = 0.01
p = U1 * I1
R = U1 / I1
R_err = np.sqrt(
(1/I1 * U_err)**2
+ (U1/I1**2 * I_err)**2
)
phi = phif(U2)
phi_err = U_err / S
alpha = 4.82e-3
beta = 6.76e-7
R0 = 0.35
R0_err = 0.05
T = (-alpha*R0 + np.sqrt(R0)*np.sqrt(4*beta*R + alpha**2*R0 - 4*beta*R0) +
2*beta*R0*T0)/(2*beta*R0)
popt, pconv = op.curve_fit(boltzmann, T, phi, sigma=phi_err)
x = np.linspace(min(T), max(T))
y = boltzmann(x, *popt)
pl.plot(x, y, label="Fit")
epsilon = popt[0]
epsilon_err = np.sqrt(pconv.diagonal()[0])
print "ε = {:.3g} ± {:.3g}".format(epsilon, epsilon_err)
f1 = (1/(np.sqrt(R0)*np.sqrt(4*beta*R + alpha**2*R0 - 4*beta*R0))) * R_err
f2 = T0_err
f3 = ((-alpha + ((alpha**2 - 4*beta)*np.sqrt(R0))/( 2*np.sqrt(4*beta*R + alpha**2*R0 - 4*beta*R0)) + np.sqrt( 4*beta*R + alpha**2*R0 - 4*beta*R0)/(2*np.sqrt(R0)) + 2*beta*T0)/( 2*beta*R0) - (-alpha*R0 + np.sqrt(R0)*np.sqrt(4*beta*R + alpha**2*R0 - 4*beta*R0) + 2*beta*R0*T0)/( 2*beta*R0**2)) * R0_err
T_err = np.sqrt(f1**2 + f2**2 + f3**2)
pl.errorbar(T, phi, xerr=T_err, yerr=phi_err, label="Messdaten",
linestyle="none", marker="+")
pl.grid(True)
pl.legend(loc="best")
pl.title(u"Halogenlampe bei verschiedenen Leistungen")
pl.xlabel(u"Temperatur $T / \mathrm{K}$")
pl.ylabel(ur"Strahlungsfluss $\frac{\Phi}{F} / \mathrm{\frac{W}{m^2}}$")
pl.savefig("Plot_c-zweite.pdf")
pl.clf()
def _parse_args():
"""
Parses the command line arguments.
:return: Namespace with arguments.
:rtype: Namespace
"""
parser = argparse.ArgumentParser(description="")
#parser.add_argument("args", metavar="N", type=str, nargs="*", help="Positional arguments.")
#parser.add_argument("", dest="", type="", default=, help=)
#parser.add_argument("--version", action="version", version="<the version>")
| random_line_split |
||
python3 |
pl.plot(x, y * 10**6, label=ur"$90\%$")
pl.errorbar(t, U * 10**6, yerr=U_err * 10**6, linestyle="none", marker="+",
label="Messdaten")
pl.grid(True)
pl.legend(loc="best")
pl.title(u"Bestimmung der Ansprechzeit")
pl.xlabel(ur"Zeit $t / \mathrm{s}$")
pl.ylabel(ur"Thermospannung $U / \mathrm{\mu V}$")
pl.savefig("Plot_a-Lampe.pdf")
pl.clf()
# Lesliewürfel
print """
Lesliewürfel
============
"""
glanz = np.genfromtxt("b-glanz.csv", delimiter="\t")
matt = np.genfromtxt("b-matt.csv", delimiter="\t")
schwarz = np.genfromtxt("b-schwarz.csv", delimiter="\t")
weiss = np.genfromtxt("b-weiss.csv", delimiter="\t")
T0 = 19.0 + 273.15
T0_err = 1.0
glanz[:,0] += 273.15
matt[:,0] += 273.15
schwarz[:,0] += 273.15
weiss[:,0] += 273.15
glanz[:,1] /= 1000 * V
matt[:,1] /= 1000 * V
schwarz[:,1] /= 1000 * V
weiss[:,1] /= 1000 * V
glanz[:,1] -= offset
matt[:,1] -= offset
schwarz[:,1] -= offset
weiss[:,1] -= offset
glanz_phi = phif(glanz[:,1])
matt_phi = phif(matt[:,1])
schwarz_phi = phif(schwarz[:,1])
weiss_phi = phif(weiss[:,1])
T_err = 0.3
sigma = 5.670373e-8
def boltzmann(T, epsilon, offset):
return epsilon * sigma * T**4 + offset
glanz_popt, glanz_pconv = op.curve_fit(boltzmann, glanz[:,0], glanz_phi)
matt_popt, matt_pconv = op.curve_fit(boltzmann, matt[:,0], matt_phi)
schwarz_popt, schwarz_pconv = op.curve_fit(boltzmann, schwarz[:,0], schwarz_phi)
weiss_popt, weiss_pconv = op.curve_fit(boltzmann, weiss[:,0], weiss_phi)
glanz_x = np.linspace(min(glanz[:,0]), max(glanz[:,0]))
glanz_y = boltzmann(glanz_x, *glanz_popt)
pl.plot(glanz_x, glanz_y, label="Fit glanz", color="gold")
matt_x = np.linspace(min(matt[:,0]), max(matt[:,0]))
matt_y = boltzmann(matt_x, *matt_popt)
pl.plot(matt_x, matt_y, label="Fit matt", color="yellow")
schwarz_x = np.linspace(min(schwarz[:,0]), max(schwarz[:,0]))
schwarz_y = boltzmann(schwarz_x, *schwarz_popt)
pl.plot(schwarz_x, schwarz_y, label="Fit schwarz", color="black")
weiss_x = np.linspace(min(weiss[:,0]), max(weiss[:,0]))
weiss_y = boltzmann(weiss_x, *weiss_popt)
pl.plot(weiss_x, weiss_y, label="Fit weiss", color="gray")
print "glanz ε = {:.3g} ± {:.3g}".format(glanz_popt[0], np.sqrt(glanz_pconv.diagonal()[0]))
print "glanz offset = {:.3g} ± {:.3g}".format(glanz_popt[1], np.sqrt(glanz_pconv.diagonal()[1]))
print "matt ε = {:.3g} ± {:.3g}".format(matt_popt[0], np.sqrt(matt_pconv.diagonal()[0]))
print "matt offset = {:.3g} ± {:.3g}".format(matt_popt[1], np.sqrt(matt_pconv.diagonal()[1]))
print "schwarz ε = {:.3g} ± {:.3g}".format(schwarz_popt[0], np.sqrt(schwarz_pconv.diagonal()[0]))
print "schwarz offset = {:.3g} ± {:.3g}".format(schwarz_popt[1], np.sqrt(schwarz_pconv.diagonal()[1]))
print "weiss ε = {:.3g} ± {:.3g}".format(weiss_popt[0], np.sqrt(weiss_pconv.diagonal()[0]))
print "weiss offset = {:.3g} ± {:.3g}".format(weiss_popt[1], np.sqrt(weiss_pconv.diagonal()[1]))
pl.errorbar(glanz[:,0], glanz_phi, xerr=T_err, yerr=U_err/S,
label="glanz", color="gold", linestyle="none")
pl.errorbar(matt[:,0], matt_phi, xerr=T_err, yerr=U_err/S,
label="matt", color="yellow", linestyle="none")
pl.errorbar(schwarz[:,0], schwarz_phi, xerr=T_err, yerr=U_err/S,
label="schwarz", color="black", linestyle="none")
pl.errorbar(weiss[:,0], weiss_phi, xerr=T_err, yerr=U_err/S,
label="weiss", color="gray", linestyle="none")
header = ["T / K", "Phi/F in W/m^2", "Fehler T", "Fehler Phi/F"]
print """
Tabellen für den Lesliewürfel-Plot
----------------------------------
"""
print "Glanz"
glanz_table = PrettyTable(header)
for row in zip(glanz[:,0], glanz_phi, np.ones(glanz[:,0].size)*T_err, np.ones(glanz_phi.size)*U_err/S):
glanz_table.add_row(row)
print glanz_table
print
print "Matt"
matt_table = PrettyTable(header)
for row in zip(matt[:,0], matt_phi, np.ones(matt[:,0].size)*T_err, np.ones(matt_phi.size)*U_err/S):
matt_table.add_row(row)
print matt_table
print
print "Schwarz"
schwarz_table = PrettyTable(header)
for row in zip(schwarz[:,0], schwarz_phi, np.ones(schwarz[:,0].size)*T_err, np.ones(schwarz_phi.size)*U_err/S):
schwarz_table.add_row(row)
print schwarz_table
print
print "Weiß"
weiss_table = PrettyTable(header)
for row in zip(weiss[:,0], weiss_phi, np.ones(weiss[:,0].size)*T_err, np.ones(weiss_phi.size)*U_err/S):
weiss_table.add_row( | le
print
epsilon = 0.1
x = np.linspace(min([min(x) for x in [glanz[:,0], matt[:,0], schwarz[:,0],
weiss[:,0]]]),
max([max(x) for x in [glanz[:,0], matt[:,0], schwarz[:,0],
weiss[:,0]]]),
100)
offset = - epsilon * sigma * T0**4
print "ideal offset = {:.3g}".format(offset)
y = boltzmann(x, epsilon, offset)
pl.plot(x, y, label=ur"$\epsilon = 0.1$")
pl.grid(True)
pl.title(u"Lesliewürfel")
pl.xlabel(ur"Temperatur $T / \mathrm{K}$")
pl.ylabel(ur"Strahlungsfluss $\frac{\Phi}{F} / \mathrm{\frac{W}{m^2}}$")
pl.legend(loc="best", prop={"size": 12})
pl.savefig("Plot_b.pdf")
pl.clf()
# Aufgabe c
print """
Aufgabe c
=========
"""
data = np.genfromtxt("c-erste.csv", delimiter="\t")
d = data[:,0] / 100
U = data[:,1] / V
phi = phif(U)
def c(x, a, b):
return a*x + b
dx = d**(-2)
dy = phi
dx_err = np.abs(-2 * d**(-3)) * 0.001
dy_err = 0.001 / S
popt, pconv = op.curve_fit(c, dx, dy)
x = np.linspace(min(dx), max(dx))
y = c(x, *popt)
pl.plot(x, y, label="Fit")
print "Fitparameter"
| row)
print weiss_tab | conditional_block |
table1.py |
def __str__ (this):
return this.functor+"("+",".join(this.varList)+") = "+str(this.val)
def __repr__ (this):
return str(this)
""" No used, no more
def match (this, node, grounding):
if not node.isLiteralNode():
raise Exception ("Attempt to match nonprobability Node "+str(node)+" with probability value")
if this.functor != node.functor:
return (False,0)
else:
for (var,val) in zip(node.varList, this.varList):
if val != grounding.val(var):
return (False,(var,val))
return (True, this.val)
"""
def eq(this, node):
return this.functor == node.functor and this.varList == node.varList and this.val == node.val
def isLiteralNode(this):
return this.val != Node.QUERY and this.val != Node.IN_TREE
def isQueryNode(this):
return this.val == Node.QUERY
class Rule (object):
""" A rule specifying a conditional probability (the class is likely misnamed) """
def __init__ (this, child, parentList, prob):
this.child = child
this.parentList = parentList
this.prob = prob
def __str__(this):
return "P("+str(this.child)+" | "+",".join([str(n) for n in this.parentList])+") = "+str(this.prob)
class Grounding (object):
""" A specific assignment of constants to variables """
def __init__ (this, varList):
this.varList = varList
def val (this, var):
for (v, ground) in this.varList:
if v == var:
return ground
else:
raise Exception ("Var not ground: " + var)
def groundNode (this, node):
gndList = []
for var in node.varList:
gndList.append(this.val(var))
return Node (node.functor, gndList, node.val)
def __repr__(this):
return ", ".join(v[0]+"="+str(v[1]) for v in this.varList)
class Database (object):
""" The database, specifying the functor values for given arguments """
def __init__ (this, attrs):
this.attributes = attrs
def funcVal(this, node):
if node.isLiteralNode():
raise Exception ("Attempted to match nonquery Node "+str(node)+" probability")
for n in this.attributes:
if n.functor == node.functor and all ([p[0]==p[1] for p in zip(n.varList,node.varList)]):
return n.val
else:
raise Exception ("Functor not ground: " + node)
class NetNode (object):
"""
A node within a Bayes net. In addition to the functor description (Node), this has a list of parent Nodes.
"""
def __init__(this, node, parents):
this.node = node
this.parents = parents
def __str__(this):
return str(this.node)+" <- ("+", ".join([str(sn) for sn in this.parents])+")"
class BayesNet (object):
""" The Bayes net """
def __init__(this):
this.nodes = []
def append(this, netNode):
for child in netNode.parents:
if child not in this.nodes:
raise Exception ("BayesNet node " + str(netNode) + " added but child " + str (child) + " not already in net")
this.nodes.append(netNode)
def jointProbs(this, grounding, db, ruleSet):
probs = []
joint = 1.0
for node in this.nodes:
#print "searching",node
gn = fillNode(node.node, grounding, db)
#print "filled node", gn
gcn = [fillNode(n.node, grounding, db) for n in node.parents]
#print "filled parents", gcn
p = ruleMatch(ruleSet, gn, gcn)
if p == -1:
p = default(gn.functor)
probs.append((gn, p))
joint *= p
probs.append(joint)
probs.append(math.log(joint))
return probs
def variableList(this):
vars = set()
for n in this.nodes:
for v in n.node.varList:
vars.add(v)
return sorted(list(vars))
def query (node, grounding, db):
""" Ground a node and look it up in the db """
return db.funcVal(grounding.groundNode(node))
def fillNode(node, grounding, db):
""" Return a grounded node, with the value for its functor according to db """
gn = copy.deepcopy(node)
gn.val = query(gn, grounding, db)
return gn
def ruleMatch (ruleSet, node, parents):
"""
Locate the value for a grounded node and its parents in a rule set, return -1 if not found.
For functors with binary ranges, when all parents match but child's value does not, return 1-prob for other value.
"""
def getProb (node):
for rule in ruleSet:
#print rule
if (rule.child.eq(node) and
len(rule.parentList)==len(parents) and
all([n[0].eq(n[1]) for n in zip(rule.parentList,parents)])):
#print "winning eq", [n for n in zip(rule.parentList,parents)]
return rule.prob
else:
return -1
prob = getProb (node)
if prob == -1 and functorRangeSize(node.functor) == 2:
tn = copy.copy(node)
tn.val = functorOtherValue(tn.functor, tn.val)
prob = getProb (tn)
if prob != -1:
return 1 - prob
else:
return prob
return prob
def default(functor):
""" Return default uniform distribution for the range of a functor """
return 1.0/functorRangeSize(functor)
def functorRange(functor):
""" Look up the range for a functor """
for (name, range) in functorRangeList:
if functor == name:
return range
else:
raise Exception ("Functor " + functor + " not present in range list")
def functorRangeSize(functor):
""" Return cardinality of range for a functor """
return len(functorRange(functor))
def functorOtherValue(functor, val):
""" For functors with a binary range, return the other element """
range = functorRange(functor)
assert len(range) == 2
if val == range[0]:
return range[1]
else:
return range[0]
def atomList(joints):
""" Return the atoms, derived from the first entry in the joint probability table """
assert len(joints) > 0
first = joints[0]
functorList = first[1][:-2] # Second element of row, last two elements of that are joint prob and log prob
atomList = []
for (node,_) in functorList:
atomList.append(node.functor+"("+",".join(node.varList)+")")
return atomList
def jointProbabilities(constants, db, ruleList, bn):
""" Compute the joint probabilities for all combinations of values """
vars = bn.variableList()
combs = generateCombos(vars, constants)
joints = []
for grounding in combs:
joints.append((grounding, bn.jointProbs(grounding, db, ruleList)))
return (vars, atomList(joints), joints)
def generateCombos(vars,constants):
""" Generate all possible groundings (assignments of constants to variables) """
# SUPER NOT GENERALIZED---TOO LATE AT NIGHT FOR ME TO DO RECURSIVE ALGORITHMS
assert len(vars) == 2 and len(constants) == 2
combs = []
for c1 in constants:
for c2 in constants:
combs.append(Grounding([(vars[0], c1), (vars[1], c2)]))
return combs
def formatJointTableForLaTeX(joints):
"""
` Given a joint probability table, format it for LaTeX.
This function will have to be tailored for every paper.
This function simply generates the {tabular} part of the table. The prologue and epilogue,
including the caption and label, must be specified in the including file.
"""
(varList, atoms, probs) = joints
cols = len(varList) + len (probs[0][1])
with open("table1.tex","w") as out:
out.write ("\\begin{tabular}{|" + "|".join(["c"]*(cols-2))+"||c|c|}\n")
out.write ("\\hline\n")
# Table header
out.write (" & ".join(varList) + " & " + " & ".join([a for a in atoms]) + " & Joint $p$ & ln~$p$ \\\\ \\hline\n")
# Table rows
logps = []
for (grounding, probs) in probs:
out.write (" & ".join([val for ( | this.functor = functor
this.varList = varList
this.val = val | identifier_body |
|
table1.py | Attempt to match nonprobability Node "+str(node)+" with probability value")
if this.functor != node.functor:
return (False,0)
else:
for (var,val) in zip(node.varList, this.varList):
if val != grounding.val(var):
return (False,(var,val))
return (True, this.val)
"""
def eq(this, node):
return this.functor == node.functor and this.varList == node.varList and this.val == node.val
def isLiteralNode(this):
return this.val != Node.QUERY and this.val != Node.IN_TREE
def isQueryNode(this):
return this.val == Node.QUERY
class Rule (object):
""" A rule specifying a conditional probability (the class is likely misnamed) """
def __init__ (this, child, parentList, prob):
this.child = child
this.parentList = parentList
this.prob = prob
def __str__(this):
return "P("+str(this.child)+" | "+",".join([str(n) for n in this.parentList])+") = "+str(this.prob)
class Grounding (object):
""" A specific assignment of constants to variables """
def | (this, varList):
this.varList = varList
def val (this, var):
for (v, ground) in this.varList:
if v == var:
return ground
else:
raise Exception ("Var not ground: " + var)
def groundNode (this, node):
gndList = []
for var in node.varList:
gndList.append(this.val(var))
return Node (node.functor, gndList, node.val)
def __repr__(this):
return ", ".join(v[0]+"="+str(v[1]) for v in this.varList)
class Database (object):
""" The database, specifying the functor values for given arguments """
def __init__ (this, attrs):
this.attributes = attrs
def funcVal(this, node):
if node.isLiteralNode():
raise Exception ("Attempted to match nonquery Node "+str(node)+" probability")
for n in this.attributes:
if n.functor == node.functor and all ([p[0]==p[1] for p in zip(n.varList,node.varList)]):
return n.val
else:
raise Exception ("Functor not ground: " + node)
class NetNode (object):
"""
A node within a Bayes net. In addition to the functor description (Node), this has a list of parent Nodes.
"""
def __init__(this, node, parents):
this.node = node
this.parents = parents
def __str__(this):
return str(this.node)+" <- ("+", ".join([str(sn) for sn in this.parents])+")"
class BayesNet (object):
""" The Bayes net """
def __init__(this):
this.nodes = []
def append(this, netNode):
for child in netNode.parents:
if child not in this.nodes:
raise Exception ("BayesNet node " + str(netNode) + " added but child " + str (child) + " not already in net")
this.nodes.append(netNode)
def jointProbs(this, grounding, db, ruleSet):
probs = []
joint = 1.0
for node in this.nodes:
#print "searching",node
gn = fillNode(node.node, grounding, db)
#print "filled node", gn
gcn = [fillNode(n.node, grounding, db) for n in node.parents]
#print "filled parents", gcn
p = ruleMatch(ruleSet, gn, gcn)
if p == -1:
p = default(gn.functor)
probs.append((gn, p))
joint *= p
probs.append(joint)
probs.append(math.log(joint))
return probs
def variableList(this):
vars = set()
for n in this.nodes:
for v in n.node.varList:
vars.add(v)
return sorted(list(vars))
def query (node, grounding, db):
""" Ground a node and look it up in the db """
return db.funcVal(grounding.groundNode(node))
def fillNode(node, grounding, db):
""" Return a grounded node, with the value for its functor according to db """
gn = copy.deepcopy(node)
gn.val = query(gn, grounding, db)
return gn
def ruleMatch (ruleSet, node, parents):
"""
Locate the value for a grounded node and its parents in a rule set, return -1 if not found.
For functors with binary ranges, when all parents match but child's value does not, return 1-prob for other value.
"""
def getProb (node):
for rule in ruleSet:
#print rule
if (rule.child.eq(node) and
len(rule.parentList)==len(parents) and
all([n[0].eq(n[1]) for n in zip(rule.parentList,parents)])):
#print "winning eq", [n for n in zip(rule.parentList,parents)]
return rule.prob
else:
return -1
prob = getProb (node)
if prob == -1 and functorRangeSize(node.functor) == 2:
tn = copy.copy(node)
tn.val = functorOtherValue(tn.functor, tn.val)
prob = getProb (tn)
if prob != -1:
return 1 - prob
else:
return prob
return prob
def default(functor):
""" Return default uniform distribution for the range of a functor """
return 1.0/functorRangeSize(functor)
def functorRange(functor):
""" Look up the range for a functor """
for (name, range) in functorRangeList:
if functor == name:
return range
else:
raise Exception ("Functor " + functor + " not present in range list")
def functorRangeSize(functor):
""" Return cardinality of range for a functor """
return len(functorRange(functor))
def functorOtherValue(functor, val):
""" For functors with a binary range, return the other element """
range = functorRange(functor)
assert len(range) == 2
if val == range[0]:
return range[1]
else:
return range[0]
def atomList(joints):
""" Return the atoms, derived from the first entry in the joint probability table """
assert len(joints) > 0
first = joints[0]
functorList = first[1][:-2] # Second element of row, last two elements of that are joint prob and log prob
atomList = []
for (node,_) in functorList:
atomList.append(node.functor+"("+",".join(node.varList)+")")
return atomList
def jointProbabilities(constants, db, ruleList, bn):
""" Compute the joint probabilities for all combinations of values """
vars = bn.variableList()
combs = generateCombos(vars, constants)
joints = []
for grounding in combs:
joints.append((grounding, bn.jointProbs(grounding, db, ruleList)))
return (vars, atomList(joints), joints)
def generateCombos(vars,constants):
""" Generate all possible groundings (assignments of constants to variables) """
# SUPER NOT GENERALIZED---TOO LATE AT NIGHT FOR ME TO DO RECURSIVE ALGORITHMS
assert len(vars) == 2 and len(constants) == 2
combs = []
for c1 in constants:
for c2 in constants:
combs.append(Grounding([(vars[0], c1), (vars[1], c2)]))
return combs
def formatJointTableForLaTeX(joints):
"""
` Given a joint probability table, format it for LaTeX.
This function will have to be tailored for every paper.
This function simply generates the {tabular} part of the table. The prologue and epilogue,
including the caption and label, must be specified in the including file.
"""
(varList, atoms, probs) = joints
cols = len(varList) + len (probs[0][1])
with open("table1.tex","w") as out:
out.write ("\\begin{tabular}{|" + "|".join(["c"]*(cols-2))+"||c|c|}\n")
out.write ("\\hline\n")
# Table header
out.write (" & ".join(varList) + " & " + " & ".join([a for a in atoms]) + " & Joint $p$ & ln~$p$ \\\\ \\hline\n")
# Table rows
logps = []
for (grounding, probs) in probs:
out.write (" & ".join([val for (var, val) in grounding.varList]) + " & " +
" & ".join([str(n.val)+" ({:.1f})".format(p) for (n,p) in probs[:-2]]) +
" & {:.2f}".format(probs[-2]) + " & {:.2f}".format(probs[-1]) + "\\\\\n")
logps.append(probs[-1])
# A line to indicate there are further | __init__ | identifier_name |
table1.py | Attempt to match nonprobability Node "+str(node)+" with probability value")
if this.functor != node.functor:
return (False,0)
else:
for (var,val) in zip(node.varList, this.varList):
if val != grounding.val(var):
return (False,(var,val))
return (True, this.val)
"""
def eq(this, node):
return this.functor == node.functor and this.varList == node.varList and this.val == node.val
def isLiteralNode(this):
return this.val != Node.QUERY and this.val != Node.IN_TREE
def isQueryNode(this):
return this.val == Node.QUERY
class Rule (object):
""" A rule specifying a conditional probability (the class is likely misnamed) """
def __init__ (this, child, parentList, prob):
this.child = child
this.parentList = parentList
this.prob = prob
def __str__(this):
return "P("+str(this.child)+" | "+",".join([str(n) for n in this.parentList])+") = "+str(this.prob)
class Grounding (object):
""" A specific assignment of constants to variables """
def __init__ (this, varList):
this.varList = varList
def val (this, var):
for (v, ground) in this.varList:
if v == var:
return ground
else:
raise Exception ("Var not ground: " + var)
def groundNode (this, node):
gndList = []
for var in node.varList:
gndList.append(this.val(var))
return Node (node.functor, gndList, node.val)
def __repr__(this):
return ", ".join(v[0]+"="+str(v[1]) for v in this.varList)
class Database (object):
""" The database, specifying the functor values for given arguments """
def __init__ (this, attrs):
this.attributes = attrs
def funcVal(this, node):
if node.isLiteralNode():
raise Exception ("Attempted to match nonquery Node "+str(node)+" probability")
for n in this.attributes:
if n.functor == node.functor and all ([p[0]==p[1] for p in zip(n.varList,node.varList)]):
return n.val
else:
raise Exception ("Functor not ground: " + node)
class NetNode (object):
"""
A node within a Bayes net. In addition to the functor description (Node), this has a list of parent Nodes.
"""
def __init__(this, node, parents):
this.node = node
this.parents = parents
def __str__(this):
return str(this.node)+" <- ("+", ".join([str(sn) for sn in this.parents])+")"
class BayesNet (object):
""" The Bayes net """
def __init__(this):
this.nodes = []
def append(this, netNode):
for child in netNode.parents:
if child not in this.nodes:
raise Exception ("BayesNet node " + str(netNode) + " added but child " + str (child) + " not already in net")
this.nodes.append(netNode)
def jointProbs(this, grounding, db, ruleSet):
probs = []
joint = 1.0
for node in this.nodes:
#print "searching",node
gn = fillNode(node.node, grounding, db)
#print "filled node", gn
gcn = [fillNode(n.node, grounding, db) for n in node.parents]
#print "filled parents", gcn
p = ruleMatch(ruleSet, gn, gcn)
if p == -1:
p = default(gn.functor)
probs.append((gn, p))
joint *= p
probs.append(joint)
probs.append(math.log(joint))
return probs
def variableList(this):
vars = set()
for n in this.nodes:
for v in n.node.varList:
vars.add(v)
return sorted(list(vars))
def query (node, grounding, db):
""" Ground a node and look it up in the db """
return db.funcVal(grounding.groundNode(node))
def fillNode(node, grounding, db):
""" Return a grounded node, with the value for its functor according to db """
gn = copy.deepcopy(node)
gn.val = query(gn, grounding, db)
return gn
def ruleMatch (ruleSet, node, parents):
"""
Locate the value for a grounded node and its parents in a rule set, return -1 if not found.
For functors with binary ranges, when all parents match but child's value does not, return 1-prob for other value.
"""
def getProb (node):
for rule in ruleSet:
#print rule
if (rule.child.eq(node) and
len(rule.parentList)==len(parents) and
all([n[0].eq(n[1]) for n in zip(rule.parentList,parents)])):
#print "winning eq", [n for n in zip(rule.parentList,parents)]
return rule.prob
else:
return -1
prob = getProb (node)
if prob == -1 and functorRangeSize(node.functor) == 2:
tn = copy.copy(node)
tn.val = functorOtherValue(tn.functor, tn.val)
prob = getProb (tn)
if prob != -1:
return 1 - prob
else:
return prob
return prob
def default(functor):
""" Return default uniform distribution for the range of a functor """
return 1.0/functorRangeSize(functor)
def functorRange(functor):
""" Look up the range for a functor """
for (name, range) in functorRangeList:
if functor == name:
return range
else:
raise Exception ("Functor " + functor + " not present in range list")
def functorRangeSize(functor):
""" Return cardinality of range for a functor """
return len(functorRange(functor))
def functorOtherValue(functor, val):
""" For functors with a binary range, return the other element """
range = functorRange(functor)
assert len(range) == 2
if val == range[0]:
|
else:
return range[0]
def atomList(joints):
""" Return the atoms, derived from the first entry in the joint probability table """
assert len(joints) > 0
first = joints[0]
functorList = first[1][:-2] # Second element of row, last two elements of that are joint prob and log prob
atomList = []
for (node,_) in functorList:
atomList.append(node.functor+"("+",".join(node.varList)+")")
return atomList
def jointProbabilities(constants, db, ruleList, bn):
""" Compute the joint probabilities for all combinations of values """
vars = bn.variableList()
combs = generateCombos(vars, constants)
joints = []
for grounding in combs:
joints.append((grounding, bn.jointProbs(grounding, db, ruleList)))
return (vars, atomList(joints), joints)
def generateCombos(vars,constants):
""" Generate all possible groundings (assignments of constants to variables) """
# SUPER NOT GENERALIZED---TOO LATE AT NIGHT FOR ME TO DO RECURSIVE ALGORITHMS
assert len(vars) == 2 and len(constants) == 2
combs = []
for c1 in constants:
for c2 in constants:
combs.append(Grounding([(vars[0], c1), (vars[1], c2)]))
return combs
def formatJointTableForLaTeX(joints):
"""
` Given a joint probability table, format it for LaTeX.
This function will have to be tailored for every paper.
This function simply generates the {tabular} part of the table. The prologue and epilogue,
including the caption and label, must be specified in the including file.
"""
(varList, atoms, probs) = joints
cols = len(varList) + len (probs[0][1])
with open("table1.tex","w") as out:
out.write ("\\begin{tabular}{|" + "|".join(["c"]*(cols-2))+"||c|c|}\n")
out.write ("\\hline\n")
# Table header
out.write (" & ".join(varList) + " & " + " & ".join([a for a in atoms]) + " & Joint $p$ & ln~$p$ \\\\ \\hline\n")
# Table rows
logps = []
for (grounding, probs) in probs:
out.write (" & ".join([val for (var, val) in grounding.varList]) + " & " +
" & ".join([str(n.val)+" ({:.1f})".format(p) for (n,p) in probs[:-2]]) +
" & {:.2f}".format(probs[-2]) + " & {:.2f}".format(probs[-1]) + "\\\\\n")
logps.append(probs[-1])
# A line to indicate there are further | return range[1] | conditional_block |
table1.py | Attempt to match nonprobability Node "+str(node)+" with probability value")
if this.functor != node.functor:
return (False,0)
else:
for (var,val) in zip(node.varList, this.varList):
if val != grounding.val(var):
return (False,(var,val))
return (True, this.val)
"""
def eq(this, node):
return this.functor == node.functor and this.varList == node.varList and this.val == node.val
def isLiteralNode(this):
return this.val != Node.QUERY and this.val != Node.IN_TREE
def isQueryNode(this):
return this.val == Node.QUERY
class Rule (object): | this.prob = prob
def __str__(this):
return "P("+str(this.child)+" | "+",".join([str(n) for n in this.parentList])+") = "+str(this.prob)
class Grounding (object):
""" A specific assignment of constants to variables """
def __init__ (this, varList):
this.varList = varList
def val (this, var):
for (v, ground) in this.varList:
if v == var:
return ground
else:
raise Exception ("Var not ground: " + var)
def groundNode (this, node):
gndList = []
for var in node.varList:
gndList.append(this.val(var))
return Node (node.functor, gndList, node.val)
def __repr__(this):
return ", ".join(v[0]+"="+str(v[1]) for v in this.varList)
class Database (object):
""" The database, specifying the functor values for given arguments """
def __init__ (this, attrs):
this.attributes = attrs
def funcVal(this, node):
if node.isLiteralNode():
raise Exception ("Attempted to match nonquery Node "+str(node)+" probability")
for n in this.attributes:
if n.functor == node.functor and all ([p[0]==p[1] for p in zip(n.varList,node.varList)]):
return n.val
else:
raise Exception ("Functor not ground: " + node)
class NetNode (object):
"""
A node within a Bayes net. In addition to the functor description (Node), this has a list of parent Nodes.
"""
def __init__(this, node, parents):
this.node = node
this.parents = parents
def __str__(this):
return str(this.node)+" <- ("+", ".join([str(sn) for sn in this.parents])+")"
class BayesNet (object):
""" The Bayes net """
def __init__(this):
this.nodes = []
def append(this, netNode):
for child in netNode.parents:
if child not in this.nodes:
raise Exception ("BayesNet node " + str(netNode) + " added but child " + str (child) + " not already in net")
this.nodes.append(netNode)
def jointProbs(this, grounding, db, ruleSet):
probs = []
joint = 1.0
for node in this.nodes:
#print "searching",node
gn = fillNode(node.node, grounding, db)
#print "filled node", gn
gcn = [fillNode(n.node, grounding, db) for n in node.parents]
#print "filled parents", gcn
p = ruleMatch(ruleSet, gn, gcn)
if p == -1:
p = default(gn.functor)
probs.append((gn, p))
joint *= p
probs.append(joint)
probs.append(math.log(joint))
return probs
def variableList(this):
vars = set()
for n in this.nodes:
for v in n.node.varList:
vars.add(v)
return sorted(list(vars))
def query (node, grounding, db):
""" Ground a node and look it up in the db """
return db.funcVal(grounding.groundNode(node))
def fillNode(node, grounding, db):
""" Return a grounded node, with the value for its functor according to db """
gn = copy.deepcopy(node)
gn.val = query(gn, grounding, db)
return gn
def ruleMatch (ruleSet, node, parents):
"""
Locate the value for a grounded node and its parents in a rule set, return -1 if not found.
For functors with binary ranges, when all parents match but child's value does not, return 1-prob for other value.
"""
def getProb (node):
for rule in ruleSet:
#print rule
if (rule.child.eq(node) and
len(rule.parentList)==len(parents) and
all([n[0].eq(n[1]) for n in zip(rule.parentList,parents)])):
#print "winning eq", [n for n in zip(rule.parentList,parents)]
return rule.prob
else:
return -1
prob = getProb (node)
if prob == -1 and functorRangeSize(node.functor) == 2:
tn = copy.copy(node)
tn.val = functorOtherValue(tn.functor, tn.val)
prob = getProb (tn)
if prob != -1:
return 1 - prob
else:
return prob
return prob
def default(functor):
""" Return default uniform distribution for the range of a functor """
return 1.0/functorRangeSize(functor)
def functorRange(functor):
""" Look up the range for a functor """
for (name, range) in functorRangeList:
if functor == name:
return range
else:
raise Exception ("Functor " + functor + " not present in range list")
def functorRangeSize(functor):
""" Return cardinality of range for a functor """
return len(functorRange(functor))
def functorOtherValue(functor, val):
""" For functors with a binary range, return the other element """
range = functorRange(functor)
assert len(range) == 2
if val == range[0]:
return range[1]
else:
return range[0]
def atomList(joints):
""" Return the atoms, derived from the first entry in the joint probability table """
assert len(joints) > 0
first = joints[0]
functorList = first[1][:-2] # Second element of row, last two elements of that are joint prob and log prob
atomList = []
for (node,_) in functorList:
atomList.append(node.functor+"("+",".join(node.varList)+")")
return atomList
def jointProbabilities(constants, db, ruleList, bn):
""" Compute the joint probabilities for all combinations of values """
vars = bn.variableList()
combs = generateCombos(vars, constants)
joints = []
for grounding in combs:
joints.append((grounding, bn.jointProbs(grounding, db, ruleList)))
return (vars, atomList(joints), joints)
def generateCombos(vars,constants):
""" Generate all possible groundings (assignments of constants to variables) """
# SUPER NOT GENERALIZED---TOO LATE AT NIGHT FOR ME TO DO RECURSIVE ALGORITHMS
assert len(vars) == 2 and len(constants) == 2
combs = []
for c1 in constants:
for c2 in constants:
combs.append(Grounding([(vars[0], c1), (vars[1], c2)]))
return combs
def formatJointTableForLaTeX(joints):
"""
` Given a joint probability table, format it for LaTeX.
This function will have to be tailored for every paper.
This function simply generates the {tabular} part of the table. The prologue and epilogue,
including the caption and label, must be specified in the including file.
"""
(varList, atoms, probs) = joints
cols = len(varList) + len (probs[0][1])
with open("table1.tex","w") as out:
out.write ("\\begin{tabular}{|" + "|".join(["c"]*(cols-2))+"||c|c|}\n")
out.write ("\\hline\n")
# Table header
out.write (" & ".join(varList) + " & " + " & ".join([a for a in atoms]) + " & Joint $p$ & ln~$p$ \\\\ \\hline\n")
# Table rows
logps = []
for (grounding, probs) in probs:
out.write (" & ".join([val for (var, val) in grounding.varList]) + " & " +
" & ".join([str(n.val)+" ({:.1f})".format(p) for (n,p) in probs[:-2]]) +
" & {:.2f}".format(probs[-2]) + " & {:.2f}".format(probs[-1]) + "\\\\\n")
logps.append(probs[-1])
# A line to indicate there are further entries in | """ A rule specifying a conditional probability (the class is likely misnamed) """
def __init__ (this, child, parentList, prob):
this.child = child
this.parentList = parentList | random_line_split |
result.rs | match le {
LdapError::Io { source, .. } => source,
_ => io::Error::new(io::ErrorKind::Other, format!("{}", le)),
}
}
}
/// Common components of an LDAP operation result.
///
/// This structure faithfully replicates the components dictated by the standard,
/// and is distinctly C-like with its reliance on numeric codes for the indication
/// of outcome. It would be tempting to hide it behind an automatic `Result`-like
/// interface, but there are scenarios where this would preclude intentional
/// incorporation of error conditions into query design. Instead, the struct
/// implements helper methods, [`success()`](#method.success) and
/// [`non_error()`](#method.non_error), which may be used for ergonomic error
/// handling when simple condition checking suffices.
#[derive(Clone, Debug)]
pub struct LdapResult {
/// Result code.
///
/// Generally, the value of zero indicates successful completion, but there's
/// a number of other non-error codes arising as a result of various operations.
/// See [Section A.1 of RFC 4511](https://tools.ietf.org/html/rfc4511#appendix-A.1).
pub rc: u32,
/// Matched component DN, where applicable.
pub matched: String,
/// Additional diagnostic text.
pub text: String,
/// Referrals.
///
/// Absence of referrals is represented by an empty vector.
pub refs: Vec<String>,
/// Response controls.
///
/// Missing and empty controls are both represented by an empty vector.
pub ctrls: Vec<Control>,
}
#[doc(hidden)]
impl From<Tag> for LdapResult {
fn from(t: Tag) -> LdapResult {
<LdapResultExt as From<Tag>>::from(t).0
}
}
impl Error for LdapResult {}
impl fmt::Display for LdapResult {
fn fmt(&self, f: &mut fmt::Formatter) -> StdResult<(), fmt::Error> {
fn description(this: &LdapResult) -> &'static str {
match this.rc {
0 => "success",
1 => "operationsError",
2 => "protocolError",
3 => "timeLimitExceeded",
4 => "sizeLimitExceeded",
5 => "compareFalse",
6 => "compareTrue",
7 => "authMethodNotSupported",
8 => "strongerAuthRequired",
10 => "referral",
11 => "adminLimitExceeded",
12 => "unavailableCriticalExtension",
13 => "confidentialityRequired",
14 => "saslBindInProgress",
16 => "noSuchAttribute",
17 => "undefinedAttributeType",
18 => "inappropriateMatching",
19 => "constraintViolation",
20 => "attributeOrValueExists",
21 => "invalidAttributeSyntax",
32 => "noSuchObject",
33 => "aliasProblem",
34 => "invalidDNSyntax",
36 => "aliasDereferencingProblem",
48 => "inappropriateAuthentication",
49 => "invalidCredentials",
50 => "insufficientAccessRights",
51 => "busy",
52 => "unavailable",
53 => "unwillingToPerform",
54 => "loopDetect",
64 => "namingViolation",
65 => "objectClassViolation",
66 => "notAllowedOnNonLeaf",
67 => "notAllowedOnRDN",
68 => "entryAlreadyExists",
69 => "objectClassModsProhibited",
71 => "affectsMultipleDSAs",
80 => "other",
88 => "abandoned",
122 => "assertionFailed",
_ => "unknown",
}
}
write!(
f,
"rc={} ({}), dn: \"{}\", text: \"{}\"",
self.rc,
description(self),
self.matched,
self.text
)
}
}
impl LdapResult {
/// If the result code is zero, return the instance itself wrapped
/// in `Ok()`, otherwise wrap the instance in an `LdapError`.
pub fn success(self) -> Result<Self> {
if self.rc == 0 {
Ok(self)
} else {
Err(LdapError::from(self))
}
}
/// If the result code is 0 or 10 (referral), return the instance
/// itself wrapped in `Ok()`, otherwise wrap the instance in an
/// `LdapError`.
pub fn non_error(self) -> Result<Self> {
if self.rc == 0 || self.rc == 10 {
Ok(self)
} else {
Err(LdapError::from(self))
}
}
}
#[derive(Clone, Debug)]
pub(crate) struct LdapResultExt(pub LdapResult, pub Exop);
impl From<Tag> for LdapResultExt {
fn from(t: Tag) -> LdapResultExt {
let t = match t {
Tag::StructureTag(t) => t,
Tag::Null(_) => {
return LdapResultExt(
LdapResult {
rc: 0,
matched: String::from(""),
text: String::from(""),
refs: vec![],
ctrls: vec![],
},
Exop {
name: None,
val: None,
},
)
}
_ => unimplemented!(),
};
let mut tags = t.expect_constructed().expect("result sequence").into_iter();
let rc = match parse_uint(
tags.next()
.expect("element")
.match_class(TagClass::Universal)
.and_then(|t| t.match_id(Types::Enumerated as u64))
.and_then(|t| t.expect_primitive())
.expect("result code")
.as_slice(),
) {
IResult::Done(_, rc) => rc as u32,
_ => panic!("failed to parse result code"),
};
let matched = String::from_utf8(
tags.next()
.expect("element")
.expect_primitive()
.expect("octet string"),
)
.expect("matched dn");
let text = String::from_utf8(
tags.next()
.expect("element")
.expect_primitive()
.expect("octet string"),
)
.expect("diagnostic message");
let mut refs = Vec::new();
let mut exop_name = None;
let mut exop_val = None;
loop {
match tags.next() {
None => break,
Some(comp) => match comp.id {
3 => {
refs.extend(parse_refs(comp));
}
10 => {
exop_name = Some(
String::from_utf8(comp.expect_primitive().expect("octet string"))
.expect("exop name"),
);
}
11 => {
exop_val = Some(comp.expect_primitive().expect("octet string"));
}
_ => (),
},
}
}
LdapResultExt(
LdapResult {
rc,
matched,
text,
refs,
ctrls: vec![],
},
Exop {
name: exop_name,
val: exop_val,
},
)
}
}
/// Wrapper for results of a Search operation which returns all entries at once.
///
/// The wrapper exists so that methods [`success()`](#method.success) and
/// [`non_error()`](#method.non_error) can be called on an instance. Those methods
/// destructure the wrapper and return its components as elements of an anonymous
/// tuple.
#[derive(Clone, Debug)]
pub struct SearchResult(pub Vec<ResultEntry>, pub LdapResult);
impl SearchResult {
/// If the result code is zero, return an anonymous tuple of component structs
/// wrapped in `Ok()`, otherwise wrap the `LdapResult` part in an `LdapError`.
pub fn success(self) -> Result<(Vec<ResultEntry>, LdapResult)> {
if self.1.rc == 0 {
Ok((self.0, self.1))
} else {
Err(LdapError::from(self.1))
}
}
/// If the result code is 0 or 10 (referral), return an anonymous tuple of component
/// structs wrapped in `Ok()`, otherwise wrap the `LdapResult` part in an `LdapError`.
pub fn non_error(self) -> Result<(Vec<ResultEntry>, LdapResult)> {
if self.1.rc == 0 || self.1.rc == 10 {
Ok((self.0, self.1))
} else {
Err(LdapError::from(self.1))
}
}
}
/// Wrapper for the result of a Compare operation.
///
/// Compare uniquely has two non-zero return codes to indicate the outcome of a successful
/// comparison, while other return codes indicate errors, as usual (except 10 for referral).
/// The [`equal()`](#method.equal) method optimizes for the expected case of ignoring
/// referrals; [`non_error()`](#method.non_error) can be used when that's not possible.
#[derive(Clone, Debug)]
pub struct | CompareResult | identifier_name |
|
result.rs | .
#[error("result recv error: {source}")]
ResultRecv {
#[from]
source: oneshot::error::RecvError,
},
/// Error while sending an internal ID scrubbing request to the connection handler.
#[error("id scrub send error: {source}")]
IdScrubSend {
#[from]
source: mpsc::error::SendError<RequestId>,
},
/// Operation or connection timeout.
#[error("timeout: {elapsed}")]
Timeout {
#[from]
elapsed: time::Elapsed,
},
/// Error parsing the string representation of a search filter.
#[error("filter parse error")]
FilterParsing,
/// Premature end of a search stream.
#[error("premature end of search stream")]
EndOfStream,
/// URL parsing error.
#[error("url parse error: {source}")]
UrlParsing {
#[from]
source: url::ParseError,
},
/// Unknown LDAP URL scheme.
#[error("unknown LDAP URL scheme: {0}")]
UnknownScheme(String),
#[cfg(feature = "tls-native")]
/// Native TLS library error.
#[error("native TLS error: {source}")]
NativeTLS {
#[from]
source: native_tls::Error,
},
#[cfg(feature = "tls-rustls")]
/// Rustls library error.
#[error("rustls error: {source}")]
Rustls {
#[from]
source: rustls::TLSError,
},
#[cfg(feature = "tls-rustls")]
/// Rustls DNS name error.
#[error("rustls DNS error: {source}")]
DNSName {
#[from]
source: tokio_rustls::webpki::InvalidDNSNameError,
},
/// LDAP operation result with an error return code.
#[error("LDAP operation result: {result}")]
LdapResult {
#[from]
result: LdapResult,
},
/// No values provided for the Add operation.
#[error("empty value set for Add")]
AddNoValues,
/// No values provided for the Add operation.
#[error("adapter init error: {0}")]
AdapterInit(String),
}
impl From<LdapError> for io::Error {
fn from(le: LdapError) -> io::Error {
match le {
LdapError::Io { source, .. } => source,
_ => io::Error::new(io::ErrorKind::Other, format!("{}", le)),
}
}
}
/// Common components of an LDAP operation result.
/// | /// interface, but there are scenarios where this would preclude intentional
/// incorporation of error conditions into query design. Instead, the struct
/// implements helper methods, [`success()`](#method.success) and
/// [`non_error()`](#method.non_error), which may be used for ergonomic error
/// handling when simple condition checking suffices.
#[derive(Clone, Debug)]
pub struct LdapResult {
/// Result code.
///
/// Generally, the value of zero indicates successful completion, but there's
/// a number of other non-error codes arising as a result of various operations.
/// See [Section A.1 of RFC 4511](https://tools.ietf.org/html/rfc4511#appendix-A.1).
pub rc: u32,
/// Matched component DN, where applicable.
pub matched: String,
/// Additional diagnostic text.
pub text: String,
/// Referrals.
///
/// Absence of referrals is represented by an empty vector.
pub refs: Vec<String>,
/// Response controls.
///
/// Missing and empty controls are both represented by an empty vector.
pub ctrls: Vec<Control>,
}
#[doc(hidden)]
impl From<Tag> for LdapResult {
fn from(t: Tag) -> LdapResult {
<LdapResultExt as From<Tag>>::from(t).0
}
}
impl Error for LdapResult {}
impl fmt::Display for LdapResult {
fn fmt(&self, f: &mut fmt::Formatter) -> StdResult<(), fmt::Error> {
fn description(this: &LdapResult) -> &'static str {
match this.rc {
0 => "success",
1 => "operationsError",
2 => "protocolError",
3 => "timeLimitExceeded",
4 => "sizeLimitExceeded",
5 => "compareFalse",
6 => "compareTrue",
7 => "authMethodNotSupported",
8 => "strongerAuthRequired",
10 => "referral",
11 => "adminLimitExceeded",
12 => "unavailableCriticalExtension",
13 => "confidentialityRequired",
14 => "saslBindInProgress",
16 => "noSuchAttribute",
17 => "undefinedAttributeType",
18 => "inappropriateMatching",
19 => "constraintViolation",
20 => "attributeOrValueExists",
21 => "invalidAttributeSyntax",
32 => "noSuchObject",
33 => "aliasProblem",
34 => "invalidDNSyntax",
36 => "aliasDereferencingProblem",
48 => "inappropriateAuthentication",
49 => "invalidCredentials",
50 => "insufficientAccessRights",
51 => "busy",
52 => "unavailable",
53 => "unwillingToPerform",
54 => "loopDetect",
64 => "namingViolation",
65 => "objectClassViolation",
66 => "notAllowedOnNonLeaf",
67 => "notAllowedOnRDN",
68 => "entryAlreadyExists",
69 => "objectClassModsProhibited",
71 => "affectsMultipleDSAs",
80 => "other",
88 => "abandoned",
122 => "assertionFailed",
_ => "unknown",
}
}
write!(
f,
"rc={} ({}), dn: \"{}\", text: \"{}\"",
self.rc,
description(self),
self.matched,
self.text
)
}
}
impl LdapResult {
/// If the result code is zero, return the instance itself wrapped
/// in `Ok()`, otherwise wrap the instance in an `LdapError`.
pub fn success(self) -> Result<Self> {
if self.rc == 0 {
Ok(self)
} else {
Err(LdapError::from(self))
}
}
/// If the result code is 0 or 10 (referral), return the instance
/// itself wrapped in `Ok()`, otherwise wrap the instance in an
/// `LdapError`.
pub fn non_error(self) -> Result<Self> {
if self.rc == 0 || self.rc == 10 {
Ok(self)
} else {
Err(LdapError::from(self))
}
}
}
#[derive(Clone, Debug)]
pub(crate) struct LdapResultExt(pub LdapResult, pub Exop);
impl From<Tag> for LdapResultExt {
fn from(t: Tag) -> LdapResultExt {
let t = match t {
Tag::StructureTag(t) => t,
Tag::Null(_) => {
return LdapResultExt(
LdapResult {
rc: 0,
matched: String::from(""),
text: String::from(""),
refs: vec![],
ctrls: vec![],
},
Exop {
name: None,
val: None,
},
)
}
_ => unimplemented!(),
};
let mut tags = t.expect_constructed().expect("result sequence").into_iter();
let rc = match parse_uint(
tags.next()
.expect("element")
.match_class(TagClass::Universal)
.and_then(|t| t.match_id(Types::Enumerated as u64))
.and_then(|t| t.expect_primitive())
.expect("result code")
.as_slice(),
) {
IResult::Done(_, rc) => rc as u32,
_ => panic!("failed to parse result code"),
};
let matched = String::from_utf8(
tags.next()
.expect("element")
.expect_primitive()
.expect("octet string"),
)
.expect("matched dn");
let text = String::from_utf8(
tags.next()
.expect("element")
.expect_primitive()
.expect("octet string"),
)
.expect("diagnostic message");
let mut refs = Vec::new();
let mut exop_name = None;
let mut exop_val = None;
loop {
match tags.next() {
None => break,
Some(comp) => match comp.id {
3 => {
refs.extend(parse_refs(comp));
}
10 => {
exop_name = Some(
String::from_utf8(comp.expect_primitive().expect("octet string"))
.expect("exop name"),
);
}
11 => | /// This structure faithfully replicates the components dictated by the standard,
/// and is distinctly C-like with its reliance on numeric codes for the indication
/// of outcome. It would be tempting to hide it behind an automatic `Result`-like | random_line_split |
graph_views.py | and test_data.count() == 0:
return redirect('study:two_input')
# 今日の日付を取得
base = datetime.datetime.today()
print('base= ', base)
# ■■■■■■ 勉強時間入力データの加工 ■■■■■■
# record_data = Record.objects.filter(author=request.user).all()
if record_data.count() >0:
record_df = read_frame(record_data, fieldnames=[
'author', 'created_at', 'category', 'time'])
record_df = record_df.replace(
{'国語': '1', '数学': '2', '英語': '3', '理科': '4', '社会': '5'})
record_df['date'] = pd.to_datetime(record_df['created_at'].dt.strftime("%Y-%m-%d"))
record_df['time'] = record_df['time'].astype(int) # 時間の加工
record_df = record_df.drop(['created_at', 'author'], axis=1) # 列の削除
print('record_df=',record_df)
# category time date
# 0 1 30 2021-11-01
# 1 4 60 2021-11-01
# 2 1 30 2021-11-01
# ■■■■■■ テスト結果入力データの加工 ■■■■■■
# test_df = Test.objects.filter(author=request.user).all()
test_df_nan = pd.DataFrame([
[0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, base]],
columns=['id', 't国', 't数', 't英', 't理', 't社', 't総',
'r国', 'r数', 'r英', 'r理', 'r社', 'r総','date'])
print('test_df_nan= ',test_df_nan)
if test_data.count() >0:
test_df = read_frame(test_data)
test_df = test_df.rename(
columns={'tscore_japanese': 't国', 'tscore_math': 't数', 'tscore_english': 't英',
'tscore_science': 't理', 'tscore_social_studies': 't社', 'tscore_overall': 't総',
'rank_japanese': 'r国', 'rank_math': 'r数', 'rank_english': 'r英',
'rank_science': 'r理', 'rank_social_studies': 'r社', 'rank_overall': 'r総'})
test_df['date'] = pd.to_datetime(test_df['date']).dt.tz_localize(None) #timezone:UTCを無くす
test_df = test_df.drop(['created_at', 'author'], axis=1)
# test_df = test_df.sort_values('date', ascending=False) # 日付で並び替え 古いのが下
print('test_df= ', test_df)
# id 国 数 英 理 社 date
# 3 9 75 65 55 45 35 2021-10-24
# 1 7 72 62 52 42 32 2021-10-17
# 0 6 70 60 50 40 30 2021-10-10
test_df = pd.concat([test_df, test_df_nan]).sort_values('date', ascending=False) # 日付で並び替え 古いのが下(降順)
else:
test_df = test_df_nan
print('test_df= ', test_df)
# ■■■■■■ グラフの表示日数計算&データ作成用 ■■■■■■
# 1. テスト結果入力の古い日付を取得
old_test_day = test_df.iloc[-1]['date']
print('テスト結果入力の古い日付を取得=', old_test_day)
# 2. 今日の日付を取得
# 50行目で取得
# 3. ログインユーザー作成日を取得
result = User.objects.get(id=request.user.id)
user_creat_day = result.date_joined
user_day = pd.to_datetime(user_creat_day).tz_localize(None)
# 4. グラフ作成の一番古い日(last_day)を選び、表示日数を決める
which_day = user_day - old_test_day
if which_day.days < 0:
last_day = (base - user_day).days
else:
last_day = (base - old_test_day).days
print('user_day=', user_day)
print('old_test_day=', old_test_day)
print('last_day=', last_day)
# 5. 今日からlast_dayまでのデータを作る
if last_day == 0:
dates = base - np.arange(last_day+2) * datetime.timedelta(days=1)
else:
dates = base - np.arange(last_day+1) * datetime.timedelta(days=1)
# dates_df = pd.DataFrame({'date': dates})
# dates_df['category'] = int(1) # 日付データにいったんカテゴリ列(国語)を作成
# dates_df.loc[last_day] = [base, 2] # 最後の行にデータ追加 カテゴリを用意
# dates_df.loc[last_day + 1] = [base, 3]
# dates_df.loc[last_day + 2] = [base, 4]
# dates_df.loc[last_day + 3] = [base, 5]
# dates_df['time_int'] = int(0) # 日付データにいったん時間をを作成
# 日付データに国語、ゼロ時間を入れる
dates_df = pd.DataFrame({'date': dates, 'category':'1', 'time':np.nan})
# 今日の日付で5教科をゼロ時間でデータを入れる(初期の5教科入力前の表示枠もれを防ぐ)
dates_df_5cate = pd.DataFrame([
[base, '1', np.nan],
[base, '2', np.nan],
[base, '3', np.nan],
[base, '4', np.nan],
[base, '5', np.nan]],
columns=['date', 'category', 'time'])
base_df = pd.concat([dates_df, dates_df_5cate])
print('base_df=',base_df)
# date category time_int
# 0 2021-10-25 1 0
# 1 2021-10-24 1 0
# 2 2021-10-23 1 0
# 3 2021-10-22 1 0
# 0 2021-10-25 2 0
# 1 2021-10-25 3 0
# 2 2021-10-25 4 0
# 3 2021-10-25 5 0
# ■■■■■■ クロス集計表の作成 ■■■■■■
# dfを縦に結合
if record_data.count() ==0:
record_df = dates_df_5cate
else:
record_df = pd.concat([record_df, base_df]).sort_values('date')
record_df['date'] = pd.to_datetime(record_df['date']).dt.strftime("%Y-%m-%d")
record_df = record_df.pivot_table(
index='category', columns='date', values='time', aggfunc='sum') # クロス集計表の作成
record_df = record_df.reindex(index=['5', '4', '3', '2', '1'])
record_df = record_df.fillna(0)
print('クロス集計後のrecord_df= ',record_df)
subject = ['国', '数', '英', '理', '社']
print('record_df.columns.values= ',record_df.columns.values)
# ■■■■■■■■■■■■■■■■■■■■■■■■
# ■ グラフ表示用 plotly作成 ■
# ■■■■■■■■■■■■■■■■■■■■■■■■
# 基本グラフの | nt() == 0 | identifier_name |
|
graph_views.py | 'author', 'created_at', 'category', 'time'])
record_df = record_df.replace(
{'国語': '1', '数学': '2', '英語': '3', '理科': '4', '社会': '5'})
record_df['date'] = pd.to_datetime(record_df['created_at'].dt.strftime("%Y-%m-%d"))
record_df['time'] = record_df['time'].astype(int) # 時間の加工
record_df = record_df.drop(['created_at', 'author'], axis=1) # 列の削除
print('record_df=',record_df)
# category time date
# 0 1 30 2021-11-01
# 1 4 60 2021-11-01
# 2 1 30 2021-11-01
# ■■■■■■ テスト結果入力データの加工 ■■■■■■
# test_df = Test.objects.filter(author=request.user).all()
test_df_nan = pd.DataFrame([
[0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, base]],
columns=['id', 't国', 't数', 't英', 't理', 't社', 't総',
'r国', 'r数', 'r英', 'r理', 'r社', 'r総','date'])
print('test_df_nan= ',test_df_nan)
if test_data.count() >0:
test_df = read_frame(test_data)
test_df = test_df.rename(
columns={'tscore_japanese': 't国', 'tscore_math': 't数', 'tscore_english': 't英',
'tscore_science': 't理', 'tscore_social_studies': 't社', 'tscore_overall': 't総',
'rank_japanese': 'r国', 'rank_math': 'r数', 'rank_english': 'r英',
'rank_science': 'r理', 'rank_social_studies': 'r社', 'rank_overall': 'r総'})
test_df['date'] = pd.to_datetime(test_df['date']).dt.tz_localize(None) #timezone:UTCを無くす
test_df = test_df.drop(['created_at', 'author'], axis=1)
# test_df = test_df.sort_values('date', ascending=False) # 日付で並び替え 古いのが下
print('test_df= ', test_df)
# id 国 数 英 理 社 date
# 3 9 75 65 55 45 35 2021-10-24
# 1 7 72 62 52 42 32 2021-10-17
# 0 6 70 60 50 40 30 2021-10-10
test_df = pd.concat([test_df, test_df_nan]).sort_values('date', ascending=False) # 日付で並び替え 古いのが下(降順)
else:
test_df = test_df_nan
print('test_df= ', test_df)
# ■■■■■■ グラフの表示日数計算&データ作成用 ■■■■■■
# 1. テスト結果入力の古い日付を取得
old_test_day = test_df.iloc[-1]['date']
print('テスト結果入力の古い日付を取得=', old_test_day)
# 2. 今日の日付を取得
# 50行目で取得
# 3. ログインユーザー作成日を取得
result = User.objects.get(id=request.user.id)
user_creat_day = result.date_joined
user_day = pd.to_datetime(user_creat_day).tz_localize(None)
# 4. グラフ作成の一番古い日(last_day)を選び、表示日数を決める
which_day = user_day - old_test_day
if which_day.days < 0:
last_day = (base - user_day).days
else:
last_day = (base - old_test_day).days
print('user_day=', user_day)
print('old_test_day=', old_test_day)
print('last_day=', last_day)
# 5. 今日からlast_dayまでのデータを作る
if last_day == 0:
dates = base - np.arange(last_day+2) * datetime.timedelta(days=1)
else:
dates = base - np.arange(last_day+1) * datetime.timedelta(days=1)
# dates_df = pd.DataFrame({'date': dates})
# dates_df['category'] = int(1) # 日付データにいったんカテゴリ列(国語)を作成
# dates_df.loc[last_day] = [base, 2] # 最後の行にデータ追加 カテゴリを用意
# dates_df.loc[last_day + 1] = [base, 3]
# dates_df.loc[last_day + 2] = [base, 4]
# dates_df.loc[last_day + 3] = [base, 5]
# dates_df['time_int'] = int(0) # 日付データにいったん時間をを作成
# 日付データに国語、ゼロ時間を入れる
dates_df = pd.DataFrame({'date': dates, 'category':'1', 'time':np.nan})
# 今日の日付で5教科をゼロ時間でデータを入れる(初期の5教科入力前の表示枠もれを防ぐ)
dates_df_5cate = pd.DataFrame([
[base, '1', np.nan],
[base, '2', np.nan],
[base, '3', np.nan],
[base, '4', np.nan],
[base, '5', np.nan]],
columns=['date', 'category', 'time'])
base_df = pd.concat([dates_df, dates_df_5cate])
print('base_df=',base_df)
# date category time_int
# 0 2021-10-25 1 0
# 1 2021-10-24 1 0
# 2 2021-10-23 1 0
# 3 2021-10-22 1 0
# 0 2021-10-25 2 0
# 1 2021-10-25 3 0
# 2 2021-10-25 4 0
# 3 2021-10-25 5 0
# ■■■■■■ クロス集計表の作成 ■■■■■■
# dfを縦に結合
if record_data.count() ==0:
record_df = dates_df_5cate
else:
record_df = pd.concat([record_df, base_df]).sort_values('date')
record_df['date'] = pd.to_datetime(record_df['date']).dt.strftime("%Y-%m-%d")
record_df = record_df.pivot_table(
index='category', columns='date', values='time', aggfunc='sum') # クロス集計表の作成
record_df = record_df.reindex(index=['5', '4', '3', '2', '1'])
record_df = record_df.fillna(0)
print('クロス集計後のrecord_df= ',record_df)
subject = ['国', '数', '英', '理', '社']
print('record_df.columns.values= ',record_df.columns.values)
# ■■■■■■■■■■■■■■■■■■■■■■■■
# ■ グラフ表示用 plotly作成 ■
# ■■■■■■■■■■■■■■■■■■■■■■■■
# 基本グラフの設定
# heatmapを基本グラフに追加
# 折線グラフ(偏差値)を基本グラフに追加
# 折線グラフ(学年順位)を基本グラフに追加
# グラフのレイアウト設定
# htmlに表示する設定
# ■■■■■■ 基本グラフの設定 ■■■■■■
# 1段目 heatmap | _data, fieldnames=[
| conditional_block |
|
graph_views.py | ■■■■ クロス集計表の作成 ■■■■■■
# dfを縦に結合
if record_data.count() ==0:
record_df = dates_df_5cate
else:
record_df = pd.concat([record_df, base_df]).sort_values('date')
record_df['date'] = pd.to_datetime(record_df['date']).dt.strftime("%Y-%m-%d")
record_df = record_df.pivot_table(
index='category', columns='date', values='time', aggfunc='sum') # クロス集計表の作成
record_df = record_df.reindex(index=['5', '4', '3', '2', '1'])
record_df = record_df.fillna(0)
print('クロス集計後のrecord_df= ',record_df)
subject = ['国', '数', '英', '理', '社']
print('record_df.columns.values= ',record_df.columns.values)
# ■■■■■■■■■■■■■■■■■■■■■■■■
# ■ グラフ表示用 plotly作成 ■
# ■■■■■■■■■■■■■■■■■■■■■■■■
# 基本グラフの設定
# heatmapを基本グラフに追加
# 折線グラフ(偏差値)を基本グラフに追加
# 折線グラフ(学年順位)を基本グラフに追加
# グラフのレイアウト設定
# htmlに表示する設定
# ■■■■■■ 基本グラフの設定 ■■■■■■
# 1段目 heatmap
# 2段目 折線グラフ(学年順位)
# 3段目 折線グラフ(偏差値)
fig_3 = make_subplots(
rows=3, cols=1,
shared_xaxes=True,
vertical_spacing=0.02,
# subplot_titles=dict(
# text=('学習記録', '学年順位', '偏差値'),
# ),
)
# *赤線が出ているが、問題なく動く
# ■■■■■■ heatmapの設定 ■■■■■■
fig_3.add_trace(
go.Heatmap(
x=record_df.columns.values,
y=subject[::-1],
z=record_df,
# labels=dict(x="日", y="教科", color="分"),
# # # x=(1, 7), # 縦軸ラベルに表示する値、10刻み
# # # opacity=0.5, # マップの透明度を0.5に
showlegend=True, # 凡例を強制的に表示
colorbar_tickangle=-90, #目盛り数字の角度
colorbar_y=0.85, #たて位置
colorbar=dict(
thickness=10,
thicknessmode='pixels',
len=0.33, # カラーバーの長さを0.8に(デフォルトは1)
lenmode='fraction',
outlinewidth=0,
# outlinecolor='gray', # カラーバーの枠線の色
# outlinewidth=1, # カラーバーの枠線の太さ
# bordercolor='gray', # カラーバーとラベルを含むカラーバー自体の枠線の色
# borderwidth=1, # カラーバーとラベルを含むカラーバー自体の枠線の太さ
title=dict(
text='分',
side='top') # カラーバーのタイトルをつける位置(デフォルトはtop)
),
colorscale=[
[0, '#202020'], # NaNに該当する値を区別する
[0.01, 'rgb(255,255,255)'], # NaNに該当する値を灰色にして区別する
[1, 'rgb(255,20,147)']
],
ygap=2, # y軸の隙間
xgap=2 # x軸の隙間
# yaxis=(
# range=(0, 120),
),
row=1,col=1
)
# ■■■■■■ 折線グラフ(学年順位) ■■■■■■
fig_3.add_trace(go.Scatter(
name='国語',
x=test_df['date'], y=test_df['r国'], mode="lines+markers",
marker=dict(color='#ffff00'),
showlegend=True,
),
row=2, col=1
)
fig_3.add_trace(go.Scatter(
name='数学',
x=test_df['date'], y=test_df['r数'], mode="lines+markers",
marker=dict(color='#7f00f0'),
),
row=2, col=1
)
fig_3.add_trace(go.Scatter(
name='英語',
x=test_df['date'], y=test_df['r英'], mode="lines+markers",
marker=dict(color='#00ffff'),
),
row=2, col=1
)
fig_3.add_trace(go.Scatter(
name='理科',
x=test_df['date'], y=test_df['r理'], mode="lines+markers",
marker=dict(color='#0000ff'),
),
row=2, col=1
)
fig_3.add_trace(go.Scatter(
name='社会',
x=test_df['date'], y=test_df['r社'], mode="lines+markers",
marker=dict(color='#00ff00'),
),
row=2, col=1
)
fig_3.add_trace(go.Scatter(
name='総合',
x=test_df['date'], y=test_df['r総'], mode="lines+markers",
marker=dict(color='#ff0000'),
),
row=2, col=1
)
# ■■■■■■ 折線グラフ(偏差値) ■■■■■■
fig_3.add_trace(go.Scatter(
name='国語',
x=test_df['date'], y=test_df['t国'], mode="lines+markers",
marker=dict(color='#ffff00'),
showlegend=True,
),
row=3, col=1
)
fig_3.add_trace(go.Scatter(
name='数学',
x=test_df['date'], y=test_df['t数'], mode="lines+markers",
marker=dict(color='#7f00f0'),
),
row=3, col=1
)
fig_3.add_trace(go.Scatter(
name='英語',
x=test_df['date'], y=test_df['t英'], mode="lines+markers",
marker=dict(color='#00ffff'),
),
row=3, col=1
)
fig_3.add_trace(go.Scatter(
name='理科',
x=test_df['date'], y=test_df['t理'], mode="lines+markers",
marker=dict(color='#0000ff'),
),
row=3, col=1
)
fig_3.add_trace(go.Scatter(
name='社会',
x=test_df['date'], y=test_df['t社'], mode="lines+markers",
marker=dict(color='#00ff00'),
),
row=3, col=1
)
fig_3.add_trace(go.Scatter(
name='社会',
x=test_df['date'], y=test_df['t総'], mode="lines+markers",
marker=dict(color='#ff0000'),
),
row=3, col=1
)
# ■■■■■■ グラフのレイアウト設定 ■■■■■■
fig_3.update_layout(
width=380,
height=500,
showlegend=False, # 凡例を強制的に表示(デフォルトでは複数系列あると表示)
template='plotly_dark',
plot_bgcolor = '#212529',
margin=dict( # グラフ領域の余白設定
l=15, r=30, t=30, b=10,
pad = 0, # グラフから軸のラベルまでのpadding
autoexpand=True, # LegendやSidebarが被ったときに自動で余白を増やすかどうか
),
yaxis2=dict(
autorange='reversed',
),
annotations=[{'font': {'size': 14},
'showarrow': False, | 'text': '学習記録',
'x': 0.05,
'xref': 'paper', | random_line_split |
|
graph_views.py | # test_df = Test.objects.filter(author=request.user).all()
test_df_nan = pd.DataFrame([
[0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, base]],
columns=['id', 't国', 't数', 't英', 't理', 't社', 't総',
'r国', 'r数', 'r英', 'r理', 'r社', 'r総','date'])
print('test_df_nan= ',test_df_nan)
if test_data.count() >0:
test_df = read_frame(test_data)
test_df = test_df.rename(
columns={'tscore_japanese': 't国', 'tscore_math': 't数', 'tscore_english': 't英',
'tscore_science': 't理', 'tscore_social_studies': 't社', 'tscore_overall': 't総',
'rank_japanese': 'r国', 'rank_math': 'r数', 'rank_english': 'r英',
'rank_science': 'r理', 'rank_social_studies': 'r社', 'rank_overall': 'r総'})
test_df['date'] = pd.to_datetime(test_df['date']).dt.tz_localize(None) #timezone:UTCを無くす
test_df = test_df.drop(['created_at', 'author'], axis=1)
# test_df = test_df.sort_values('date', ascending=False) # 日付で並び替え 古いのが下
print('test_df= ', test_df)
# id 国 数 英 理 社 date
# 3 9 75 65 55 45 35 2021-10-24
# 1 7 72 62 52 42 32 2021-10-17
# 0 6 70 60 50 40 30 2021-10-10
test_df = pd.concat([test_df, test_df_nan]).sort_values('date', ascending=False) # 日付で並び替え 古いのが下(降順)
else:
test_df = test_df_nan
print('test_df= ', test_df)
# ■■■■■■ グラフの表示日数計算&データ作成用 ■■■■■■
# 1. テスト結果入力の古い日付を取得
old_test_day = test_df.iloc[-1]['date']
print('テスト結果入力の古い日付を取得=', old_test_day)
# 2. 今日の日付を取得
# 50行目で取得
# 3. ログインユーザー作成日を取得
result = User.objects.get(id=request.user.id)
user_creat_day = result.date_joined
user_day = pd.to_datetime(user_creat_day).tz_localize(None)
# 4. グラフ作成の一番古い日(last_day)を選び、表示日数を決める
which_day = user_day - old_test_day
if which_day.days < 0:
last_day = (base - user_day).days
else:
last_day = (base - old_test_day).days
print('user_day=', user_day)
print('old_test_day=', old_test_day)
print('last_day=', last_day)
# 5. 今日からlast_dayまでのデータを作る
if last_day == 0:
dates = base - np.arange(last_day+2) * datetime.timedelta(days=1)
else:
dates = base - np.arange(last_day+1) * datetime.timedelta(days=1)
# dates_df = pd.DataFrame({'date': dates})
# dates_df['category'] = int(1) # 日付データにいったんカテゴリ列(国語)を作成
# dates_df.loc[last_day] = [base, 2] # 最後の行にデータ追加 カテゴリを用意
# dates_df.loc[last_day + 1] = [base, 3]
# dates_df.loc[last_day + 2] = [base, 4]
# dates_df.loc[last_day + 3] = [base, 5]
# dates_df['time_int'] = int(0) # 日付データにいったん時間をを作成
# 日付データに国語、ゼロ時間を入れる
dates_df = pd.DataFrame({'date': dates, 'category':'1', 'time':np.nan})
# 今日の日付で5教科をゼロ時間でデータを入れる(初期の5教科入力前の表示枠もれを防ぐ)
dates_df_5cate = pd.DataFrame([
[base, '1', np.nan],
[base, '2', np.nan],
[base, '3', np.nan],
[base, '4', np.nan],
[base, '5', np.nan]],
columns=['date', 'category', 'time'])
base_df = pd.concat([dates_df, dates_df_5cate])
print('base_df=',base_df)
# date category time_int
# 0 2021-10-25 1 0
# 1 2021-10-24 1 0
# 2 2021-10-23 1 0
# 3 2021-10-22 1 0
# 0 2021-10-25 2 0
# 1 2021-10-25 3 0
# 2 2021-10-25 4 0
# 3 2021-10-25 5 0
# ■■■■■■ クロス集計表の作成 ■■■■■■
# dfを縦に結合
if record_data.count() ==0:
record_df = dates_df_5cate
else:
record_df = pd.concat([record_df, base_df]).sort_values('date')
record_df['date'] = pd.to_datetime(record_df['date']).dt.strftime("%Y-%m-%d")
record_df = record_df.pivot_table(
index='category', columns='date', values='time', aggfunc='sum') # クロス集計表の作成
record_df = record_df.reindex(index=['5', '4', '3', '2', '1'])
record_df = record_df.fillna(0)
print('クロス集計後のrecord_df= ',record_df)
subject = ['国', '数', '英', '理', '社']
print('record_df.columns.values= ',record_df.columns.values)
# ■■■■■■■■■■■■■■■■■■■■■■■■
# ■ グラフ表示用 plotly作成 ■
# ■■■■■■■■■■■■■■■■■■■■■■■■
# 基本グラフの設定
# heatmapを基本グラフに追加
# 折線グラフ(偏差値)を基本グラフに追加
# 折線グラフ( |
print('base= ', base)
# ■■■■■■ 勉強時間入力データの加工 ■■■■■■
# record_data = Record.objects.filter(author=request.user).all()
if record_data.count() >0:
record_df = read_frame(record_data, fieldnames=[
'author', 'created_at', 'category', 'time'])
record_df = record_df.replace(
{'国語': '1', '数学': '2', '英語': '3', '理科': '4', '社会': '5'})
record_df['date'] = pd.to_datetime(record_df['created_at'].dt.strftime("%Y-%m-%d"))
record_df['time'] = record_df['time'].astype(int) # 時間の加工
record_df = record_df.drop(['created_at', 'author'], axis=1) # 列の削除
print('record_df=',record_df)
# category time date
# 0 1 30 2021-11-01
# 1 4 60 2021-11-01
# 2 1 30 2021-11-01
# ■■■■■■ テスト結果入力データの加工 ■■■■■■ | identifier_body |
|
bytesDemo.go | = []byte("迁客骚人,文人墨客无不倾倒于俺滴代码")
c = []byte("人,文人墨客")
co := bytes.Contains(b, c)
fmt.Printf("c 是否在 b 中:%v", co) //true
}
//ContainsAny 报告字符中的任何 UTF-8 编码的 Unicode 代码点是否在 d 中
//3 看字节中utf8字符串是否包含 字符串 忽视空格
func containsAnyDemo() {
d = []byte("若能杯水如名淡,应信村茶比酒香") //字节
ca := bytes.ContainsAny(d, "茶比,香") //忽视空格,忽略顺序
fmt.Printf("d 是否是UTF-8编码:%v", ca) //true
}
//ContainsRune 报告 Unicode 代码点 r 是否在 b 之内。
//4 看字节数组是否包含 单个字符
func containsRuneDemo() {
a = []byte("迁客骚人,文人墨客无不倾倒于俺滴代码") //字节
ca := bytes.ContainsRune(a, '俺')
fmt.Printf("单个字节是否在 a字节数组中: %v", ca) //true
}
//Count 计算s中不重叠实例的数量。如果 sep 为空片段,则 Count 返回1 + s中的 Unicode 代码点数。
//5 计算目的字符个数,当为空时返回长度+1
func countDemo() {
fmt.Println(bytes.Count([]byte("cheese"), []byte("se")))
fmt.Println(bytes.Count([]byte("five"), []byte(""))) // before & after each rune
}
//6 Equal 返回一个布尔值,报告 a 和 b 是否是相同的长度并且包含相同的字节。零参数相当于一个空片。
func equalDemo() {
a = []byte("迁客骚人,文人墨客无不倾倒于俺滴代码")
b = []byte("迁客骚人,文人墨客无不倾倒于俺滴代码")
eq := bytes.Equal(a, b)
fmt.Printf("a和b字节数组是否相等: %v", eq) //true
}
//EqualFold 报告无论 s 和 t,解释为 UTF-8 字符串,在 Unicode 大小写折叠下是否相等。
//7 EqualFold 比较相等忽视大小写,但是不忽视空格,会比较空格
func equalFoldDemo() {
fmt.Println(bytes.EqualFold([]byte("God is a girl 呀 666"), []byte("god IS A GiRl 呀 666")))
}
// func Fields(s []byte) [][]byte
//8 字段在一个或多个连续空白字符的每个实例周围分割切片,如果 s 仅包含空格,则返回 s 的子片段或空列表。
//依据空格分割切片
func fieldsDemo() {
fmt.Printf("Fields are: %q", bytes.Fields([]byte("foo bar 你好 baz")))
}
//9 func FieldsFunc(s []byte, f func(rune) bool) [][]byte
//FieldsFunc 将s解释为 UTF-8 编码的 Unicode 代码点序列。
// 它在每次满足 f(c) 的代码点 c 运行时分割片 s 并返回s的一个子片段。
// 如果s中的所有代码点满足 f(c) 或len(s) == 0,则返回空片。
// FieldsFunc 不保证它调用f(c)的顺序。如果f没有为给定的 c 返回一致的结果,那么 FieldsFunc 可能会崩溃。
//解释:按照指定字符分割字符串
func fieldsFuncDemo() {
f := func(c rune) bool {
//return !unicode.IsLetter(c) && !unicode.IsNumber(c)
if c == 'b' {
return true
}
return false
}
fmt.Printf("Fields are: %q", bytes.FieldsFunc([]byte(" foo1;bar2,baz3..."), f))
//Fields are: [" foo1;" "ar2," "az3..."]
}
//10 func HasPrefix(s, prefix []byte) bool
//HasPrefix测试字节片是否以前缀开头
func hasPrefixDemo() {
fmt.Println(bytes.HasPrefix([]byte("Gopher"), []byte("Go"))) //true
fmt.Println(bytes.HasPrefix([]byte("Gopher"), []byte("C"))) //false
//所有开头
fmt.Println(bytes.HasPrefix([]byte("Gopher"), []byte(""))) //true
}
//11 HasSuffix 测试字节片段是否以后缀结尾。
func hasSuffixDemo() {
fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("go")))
fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("O")))
fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("Ami")))
fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("")))
}
//12 Index
//13 IndexAny 返回字符串中任何第一个出现的字符的下标
// IndexAny 将 s 解释为 UTF-8 编码的 Unicode 代码点序列。
// 它返回字符中任何 Unicode 代码点的 s 中第一次出现的字节索引。
// 如果字符为空或者没有共同的代码点,则返回-1。
//14 func IndexByte(s []byte, c byte) int
// IndexByte 返回 s 的第一个实例的索引,如果 c 不存在于 s 中,则返回 -1。
//15 func IndexFunc 指定规则(遇到指定情况返回下表)
//func IndexFunc(s []byte, f func(r rune) bool) int
//IndexFunc 将 s 解释为一系列UTF-8编码的Unicode代码点。
// 它返回满足 f(c) 的第一个 Unicode 代码点的 s 中的字节索引,否则返回 -1。
//
func indexFuncDemo() {
f2 := func(c rune) bool {
return unicode.Is(unicode.Han, c)
}
fmt.Println(bytes.IndexFunc([]byte("Hello, 世界"), f2))
fmt.Println(bytes.IndexFunc([]byte("Hello, world"), f2))
}
//16 func Join 指定分隔符拼接byte数组
//func Join(s [][]byte, sep []byte) []byte
//Join 连接 s的元素以创建一个新的字节片。分隔符 sep 放置在生成的切片中的元素之间。
func jsonDemo() {
s := [][]byte{[]byte("foo"), []byte("bar"), []byte("baz")}
fmt.Printf("%s", bytes.Join(s, []byte(", ")))
}
//17 func LastIndex
//func LastIndex(s, sep []byte) int
//LastIndex 返回 s 中最后一个 sep 实例的索引,如果 sep 中不存在 s,则返回-1。
func lastIndexDemo() {
fmt.Println(bytes.Index([]byte("go gopher"), []byte("go")))
fmt.Println(bytes.LastIndex([]byte("go gopher"), []byte("go")))
fmt.Println(bytes.LastIndex([]byte("go gopher"), []byte("rodent")))
}
//18 func LastIndexAny :它返回字符中任何 Unicode 代码点的最后一次出现的字节索引。
//func LastIndexAny(s []byte, chars string) int
//LastIndexAny 将 s 解释为UTF-8编码的 Unicode 代码点序列。
// 它返回字符中任何 Unicode 代码点的最后一次出现的字节索引。
// 如果字符为空或者没有共同的代码点,则返回-1。
//19 func LastIndexByte 查看 byte位置
//func LastIndexByte(s []byte, c byte) int
//LastIndexByte 返回 s 的最后一个实例的索引,如果 c 不存在于 s 中,则返回-1。
func lastIndexByteDemo() {
a = []byte("迁客骚人,文人墨客无不倾倒于俺滴代码")
//a byte : [232 191 129 229 174 162 233 170 154 228 186 186 44 230 150 135 228 186 186 229 162 168 229 174 162 230 151 160 228 184 141 229 128 190 229 128 146 228 186 142 228 191 186 230 187 180 228 187 163 231 160 129]
fmt.Println(bytes.LastIndexByte(a, 191))
}
//20 func LastIndexFunc 指定最后一个,其实是为了对返回的数据进行操作
//func LastIndexFunc(s []byte, f func(r rune) bool) int
//LastIndexFunc 将 s 解释为UTF-8编码的 Unicode 代码点序列。
// 它返回满足 f(c) 的最后一个 Unicode 代码点的 s 中的字节索引,否则返回-1。
func lastIndexFuncDemo() {
f | 2 := func(c r | identifier_name |
|
bytesDemo.go | 70 154 228 186 186 44 230 150 135 228 186 186 229 162 168 229 174 162 230 151 160 228 184 141 229 128 190 229 128 146 228 186 142 228 191 186 230 187 180 228 187 163 231 160 129]
//c byte : [231 179 159 232 128 129 229 164 180 229 173 144 229 157 143 229 190 151 229 190 136]
}
}
//func Contains(b, subslice []byte) bool 包含报告 sublice 是否在 b 之内。
//2 看字节数组是否包含 字节数组
func containsDemo() {
b = []byte("迁客骚人,文人墨客无不倾倒于俺滴代码")
c = []byte("人,文人墨客")
co := bytes.Contains(b, c)
fmt.Printf("c 是否在 b 中:%v", co) //true
}
//ContainsAny 报告字符中的任何 UTF-8 编码的 Unicode 代码点是否在 d 中
//3 看字节中utf8字符串是否包含 字符串 忽视空格
func containsAnyDemo() {
d = []byte("若能杯水如名淡,应信村茶比酒香") //字节
ca := bytes.ContainsAny(d, "茶比,香") //忽视空格,忽略顺序
fmt.Printf("d 是否是UTF-8编码:%v", ca) //true
}
//ContainsRune 报告 Unicode 代码点 r 是否在 b 之内。
//4 看字节数组是否包含 单个字符
func containsRuneDemo() {
a = []byte("迁客骚人,文人墨客无不倾倒于俺滴代码") //字节
ca := bytes.ContainsRune(a, '俺')
fmt.Printf("单个字节是否在 a字节数组中: %v", ca) //true
}
//Count 计算s中不重叠实例的数量。如果 sep 为空片段,则 Count 返回1 + s中的 Unicode 代码点数。
//5 计算目的字符个数,当为空时返回长度+1
func countDemo() {
fmt.Println(bytes.Count([]byte("cheese"), []byte("se")))
fmt.Println(bytes.Count([]byte("five"), []byte(""))) // before & after each rune
}
//6 Equal 返回一个布尔值,报告 a 和 b 是否是相同的长度并且包含相同的字节。零参数相当于一个空片。
func equalDemo() {
a = []byte("迁客骚人,文人墨客无不倾倒于俺滴代码")
b = []byte("迁客骚人,文人墨客无不倾倒于俺滴代码")
eq := bytes.Equal(a, b)
fmt.Printf("a和b字节数组是否相等: %v", eq) //true
}
//EqualFold 报告无论 s 和 t,解释为 UTF-8 字符串,在 Unicode 大小写折叠下是否相等。
//7 EqualFold 比较相等忽视大小写,但是不忽视空格,会比较空格
func equalFoldDemo() {
fmt.Println(bytes.EqualFold([]byte("God is a girl 呀 666"), []byte("god IS A GiRl 呀 666")))
}
// func Fields(s []byte) [][]byte
//8 字段在一个或多个连续空白字符的每个实例周围分割切片,如果 s 仅包含空格,则返回 s 的子片段或空列表。
//依据空格分割切片
func fieldsDemo() {
fmt.Printf("Fields are: %q", bytes.Fields([]byte("foo bar 你好 baz")))
}
//9 func FieldsFunc(s []byte, f func(rune) bool) [][]byte
//FieldsFunc 将s解释为 UTF-8 编码的 Unicode 代码点序列。
// 它在每次满足 f(c) 的代码点 c 运行时分割片 s 并返回s的一个子片段。
// 如果s中的所有代码点满足 f(c) 或len(s) == 0,则返回空片。
// FieldsFunc 不保证它调用f(c)的顺序。如果f没有为给定的 c 返回一致的结果,那么 FieldsFunc 可能会崩溃。
//解释:按照指定字符分割字符串
func fieldsFuncDemo() {
f := func(c rune) bool {
//return !unicode.IsLetter(c) && !unicode.IsNumber(c)
if c == 'b' {
return true
}
return false
}
fmt.Printf("Fields are: %q", bytes.FieldsFunc([]byte(" foo1;bar2,baz3..."), f))
//Fields are: [" foo1;" "ar2," "az3..."]
}
//10 func HasPrefix(s, prefix []byte) bool
//HasPrefix测试字节片是否以前缀开头
func hasPrefixDemo() {
fmt.Println(bytes.HasPrefix([]byte("Gopher"), []byte("Go"))) //true
fmt.Println(bytes.HasPrefix([]byte("Gopher"), []byte("C"))) //false
//所有开头
fmt.Println(bytes.HasPrefix([]byte("Gopher"), []byte(""))) //true
}
//11 HasSuffix 测试字节片段是否以后缀结尾。
func hasSuffixDemo() {
fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("go")))
fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("O")))
fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("Ami")))
fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("")))
}
//12 Index
//13 IndexAny 返回字符串中任何第一个出现的字符的下标
// IndexAny 将 s 解释为 UTF-8 编码的 Unicode 代码点序列。
// 它返回字符中任何 Unicode 代码点的 s 中第一次出现的字节索引。
// 如果字符为空或者没有共同的代码点,则返回-1。
//14 func IndexByte(s []byte, c byte) int
// IndexByte 返回 s 的第一个实例的索引,如果 c 不存在于 s 中,则返回 -1。
//15 func IndexFunc 指定规则(遇到指定情况返回下表)
//func IndexFunc(s []byte, f func(r rune) bool) int
//IndexFunc 将 s 解释为一系列UTF-8编码的Unicode代码点。
// 它返回满足 f(c) 的第一个 Unicode 代码点的 s 中的字节索引,否则返回 -1。
//
func indexFuncDemo() {
f2 := func(c rune) bool {
return unicode.Is(unicode.Han, c)
}
fmt.Println(bytes | ello, 世界"), f2))
fmt.Println(bytes.IndexFunc([]byte("Hello, world"), f2))
}
//16 func Join 指定分隔符拼接byte数组
//func Join(s [][]byte, sep []byte) []byte
//Join 连接 s的元素以创建一个新的字节片。分隔符 sep 放置在生成的切片中的元素之间。
func jsonDemo() {
s := [][]byte{[]byte("foo"), []byte("bar"), []byte("baz")}
fmt.Printf("%s", bytes.Join(s, []byte(", ")))
}
//17 func LastIndex
//func LastIndex(s, sep []byte) int
//LastIndex 返回 s 中最后一个 sep 实例的索引,如果 sep 中不存在 s,则返回-1。
func lastIndexDemo() {
fmt.Println(bytes.Index([]byte("go gopher"), []byte("go")))
fmt.Println(bytes.LastIndex([]byte("go gopher"), []byte("go")))
fmt.Println(bytes.LastIndex([]byte("go gopher"), []byte("rodent")))
}
//18 func LastIndexAny :它返回字符中任何 Unicode 代码点的最后一次出现的字节索引。
//func LastIndexAny(s []byte, chars string) int
//LastIndexAny 将 s 解释为UTF-8编码的 Unicode 代码点序列。
// 它返回字符中任何 Unicode 代码点的最后一次出现的字节索引。
// 如果字符为空或者没有共同的代码点,则返回-1。
//19 func LastIndexByte 查看 byte位置
//func LastIndexByte(s []byte, c byte) int
//LastIndexByte 返回 s 的最后一个实例的索引,如果 c 不存在于 s 中,则返回-1。
func lastIndexByteDemo() {
a = []byte("迁客骚人,文人墨客无不倾倒于俺滴代码")
//a byte : [232 | .IndexFunc([]byte("H | conditional_block |
bytesDemo.go | "), []byte("go")))
fmt.Println(bytes.LastIndex([]byte("go gopher"), []byte("go")))
fmt.Println(bytes.LastIndex([]byte("go gopher"), []byte("rodent")))
}
//18 func LastIndexAny :它返回字符中任何 Unicode 代码点的最后一次出现的字节索引。
//func LastIndexAny(s []byte, chars string) int
//LastIndexAny 将 s 解释为UTF-8编码的 Unicode 代码点序列。
// 它返回字符中任何 Unicode 代码点的最后一次出现的字节索引。
// 如果字符为空或者没有共同的代码点,则返回-1。
//19 func LastIndexByte 查看 byte位置
//func LastIndexByte(s []byte, c byte) int
//LastIndexByte 返回 s 的最后一个实例的索引,如果 c 不存在于 s 中,则返回-1。
func lastIndexByteDemo() {
a = []byte("迁客骚人,文人墨客无不倾倒于俺滴代码")
//a byte : [232 191 129 229 174 162 233 170 154 228 186 186 44 230 150 135 228 186 186 229 162 168 229 174 162 230 151 160 228 184 141 229 128 190 229 128 146 228 186 142 228 191 186 230 187 180 228 187 163 231 160 129]
fmt.Println(bytes.LastIndexByte(a, 191))
}
//20 func LastIndexFunc 指定最后一个,其实是为了对返回的数据进行操作
//func LastIndexFunc(s []byte, f func(r rune) bool) int
//LastIndexFunc 将 s 解释为UTF-8编码的 Unicode 代码点序列。
// 它返回满足 f(c) 的最后一个 Unicode 代码点的 s 中的字节索引,否则返回-1。
func lastIndexFuncDemo() {
f2 := func(c rune) bool {
if c == '世' {
return true
}
return false
}
fmt.Println(bytes.LastIndexFunc([]byte("Hello, 世界"), f2))
fmt.Println(bytes.LastIndexFunc([]byte("Hello, world"), f2))
}
//21 func Map(mapping func(r rune) rune, s []byte) []byte //做替换单个字符
//Map 根据映射函数返回字节切片s的所有字符修改后的副本。如果映射返回负值,
// 则字符将从字符串中删除而不会被替换。
// s 和输出中的字符被解释为 UTF-8 编码的 Unicode 代码点。
func mapDemo() {
rot13 := func(r rune) rune {
//switch {
//case r >= 'A' && r <= 'Z':
// return 'A' + (r-'A'+13)%26
//case r >= 'a' && r <= 'z':
// return 'a' + (r-'a'+13)%26
//}
if r == 'b' {
return 'c'
}
return r
}
fmt.Printf("%s", bytes.Map(rot13, []byte("'Twas brillig and the slithy gopher...")))
}
//22 func Repeat 重复一个字节多次,然后返回 比如 a ,我指定了3次 就返回 aaa
//func Repeat(b []byte, count int) []byte
//重复返回由 b 的计数副本组成的新字节片段。
//如果 count 为负数或者 (len(b) * count) 的结果溢出,它会发生混乱。
func repeatDemo() {
fmt.Printf("ba%s", bytes.Repeat([]byte("na"), 2))
}
//23 func Replace []byte数组的取代 可以指定去掉多
//func Replace(s, old, new []byte, n int) []byte
//Replace 将返回 slice 的一个副本,其中前 n 个非重叠的旧实例将被 new 替换。
// 如果 old 是空的,它会在切片的开头和每个 UTF-8 序列之后进行匹配,比如 abcd 我用x来取代,但是old没写,出来的结果就是:xaxbxcxd
// 最多可产生 k-1 切片的 k+1 替换。如果 n<0,则替换次数没有限制。
func replaceDemo() {
fmt.Printf("%s\n", bytes.Replace([]byte("oink oink oink"), []byte("k"), []byte("ky"), 2))
fmt.Printf("%s\n", bytes.Replace([]byte("oink oink oink"), []byte(""), []byte("ll"), -1))
}
//24 func Runes 将byte转换成unicode
//func Runes(s []byte) []rune
//符文返回相当于 s 的一段符文(Unicode 代码点)。
func runesDemo() {
a = []byte("迁客骚人,文人墨客无不倾倒于俺滴代码") //迁
rune := bytes.Runes(a)
fmt.Printf("%v\n", rune)
//[36801 23458 39578 20154 44 25991 20154 22696 23458 26080 19981 20542 20498 20110 20474 28404 20195 30721]
//Unicode其实是: 迁 客 骚 人,
//ASCLL 码是 :迁 客 骚 人 ,
}
//25 func Split 指定字符串分割后,将指定字符串替换为"",其他正常分割,能看来看替换掉哪些位置
//func Split(s, sep []byte) [][]byte
//将切片分割成由 sep 分隔的所有子切片,并返回这些分隔符之间的一部分子切片。
// 如果 sep 为空,Split会在每个UTF-8序列之后分裂。它相当于 SplitN,计数为 -1 。
func splitDemo() {
fmt.Printf("%q\n", bytes.Split([]byte("a,b,c"), []byte(",")))
fmt.Printf("%q\n", bytes.Split([]byte("a man a plan a canal panama"), []byte("a ")))
fmt.Printf("%q\n", bytes.Split([]byte(" xyz "), []byte("")))
fmt.Printf("%q\n", bytes.Split([]byte(""), []byte("Bernardo O'Higgins")))
//["a" "b" "c"]
//["" "man " "plan " "canal panama"]
//[" " "x" "y" "z" " "]
//[""]
}
//26func SplitN 和Split一样,只不过能指定分割后返回的数量,若是数量不过分割,后面的都是一个字节数组,若是超过了,就按最大的算
//func SplitN(s, sep []byte, n int) [][]byte
//将 SplitN 切片成由 sep 分隔的子片段,并返回这些分隔片之间的一部分子片段。
// 如果 sep 为空, SplitN 会在每个UTF-8序列之后分裂。计数确定要返回的子备份数量:
func splitNDemo() {
fmt.Printf("%q\n", bytes.SplitN([]byte("a,b,c"), []byte(","), 30))
z := bytes.SplitN([]byte("a,b,c"), []byte(","), 0)
fmt.Printf("%q (nil = %v)\n", z, z == nil)
//["a" "b" "c"]
//[] (nil = true)
}
//27 func SplitAfter
//func SplitAfter(s, sep []byte) [][]byte
//SplitAfter 在 sep 的每个实例之后切片到所有 sublices 中,
// 并返回这些 sublices 的一部分。如果 sep 为空,则 SplitAfter 会在每个 UTF-8 序列后分割。
// 它相当于 SplitAfterN ,计数为 -1 。
func splitAfterDemo() {
fmt.Printf("%q\n", bytes.SplitAfter([]byte("a,b,c"), []byte(",")))
//["a," "b," "c"]
}
| //28 SplitAfterN 参考SplitN
//func SplitAfterN(s, sep []byte, n int) [][]byte
//在每个 sep 实例之后,SplitAfterN 将 s 分割成子项,并返回这些子项的一部分。
// 如果 sep 为空,则 SplitAfterN 在每个 UTF-8 序列之后分割。计数确定要返回的子备份数量: | random_line_split |
|
bytesDemo.go | 70 154 228 186 186 44 230 150 135 228 186 186 229 162 168 229 174 162 230 151 160 228 184 141 229 128 190 229 128 146 228 186 142 228 191 186 230 187 180 228 187 163 231 160 129]
//c byte : [231 179 159 232 128 129 229 164 180 229 173 144 229 157 143 229 190 151 229 190 136]
}
}
//func Contains(b, subslice []byte) bool 包含报告 sublice 是否在 b 之内。
//2 看字节数组是否包含 字节数组
func containsDemo() {
b = []byte("迁客骚人,文人墨客无不倾倒于俺滴代码")
c = []byte("人,文人墨客")
co := bytes.Contains(b, c)
fmt.Printf("c 是否在 b 中:%v", co) //true
}
//ContainsAny 报告字符中的任何 UTF-8 编码的 Unicode 代码点是否在 d 中
//3 看字节中utf8字符串是否包含 字符串 忽视空格
func containsAnyDemo() {
d = []byte("若能杯水如名淡,应信村茶比酒香") //字节
ca := bytes.ContainsAny(d, "茶比,香") //忽视空格,忽略顺序
fmt.Printf("d 是否是UTF-8编码:%v", ca) //true
}
//ContainsRune 报告 Unicode 代码点 r 是否在 b 之内。
//4 看字节数组是否包含 单个字符
func containsRuneDemo() {
a = []byte("迁客骚人,文人墨客无不倾倒于俺滴代码") //字节
ca := bytes.ContainsRune(a, '俺')
fmt.Printf("单个字节是否在 a字节数组中: %v", ca) //true
}
//Count 计算s中不重叠实例的数量。如果 sep 为空片段,则 Count 返回1 + s中的 Unicode 代码点数。
//5 计算目的字符个数,当为空时返回长度+1
func countDemo() {
fmt.Println(bytes.Count([]byte("cheese"), []byte("se")))
fmt.Println(bytes.Count([]byte("five"), []byte(""))) // before & after each rune
}
//6 Equal 返回一个布尔值,报告 a 和 b 是否是相同的长度并且包含相同的字节。零参数相当于一个空片。
func equalDemo() {
a = []byte("迁客骚人,文人墨客无不倾倒于俺滴代码")
b = []byte("迁客骚人,文人墨客无不倾倒于俺滴代码")
eq := bytes.Equal(a, b)
fmt.Printf("a和b字节数组是否相等: %v", eq) //true
}
//EqualFold 报告无论 s 和 t,解释为 UTF-8 字符串,在 Unicode 大小写折叠下是否相等。
//7 EqualFold 比较相等忽视大小写,但是不忽视空格,会比较空格
func equalFoldDemo() {
fmt.Println(bytes.EqualFold([]byte("God is a girl 呀 666"), []byte("god IS A GiRl 呀 666")))
}
// func Fields(s []byte) [][]byte
//8 字段在一个或多个连续空白字符的每个实例周围分割切片,如果 s 仅包含空格,则返回 s 的子片段或空列表。
//依据空格分割切片
func fieldsDemo() {
fmt.Printf("Fields are: %q", bytes.Fields([]byte("foo bar 你好 baz")))
}
//9 func FieldsFunc(s []byte, f func(rune) bool) [][]byte
//FieldsFunc 将s解释为 UTF-8 编码的 Unicode 代码点序列。
// 它在每次满足 f(c) 的代码点 c 运行时分割片 s 并返回s的一个子片段。
// 如果s中的所有代码点满足 f(c) 或len(s) == 0,则返回空片。
// FieldsFunc 不保证它调用f(c)的顺序。如果f没有为给定的 c 返回一致的结果,那么 FieldsFunc 可能会崩溃。
//解释:按照指定字符分割字符串
func fieldsFuncDemo() {
f := func(c rune) bool {
//return !unicode.IsLetter(c) && !unicode.IsNumber(c)
if c == 'b' {
return true
}
return false
}
fmt.Printf("Fields are: %q", bytes.FieldsFunc([]byte(" foo1;bar2,baz3..."), f))
//Fields are: [" foo1;" "ar2," "az3..."]
}
//10 func HasPrefix(s, prefix []byte) bool
//HasPrefix测试字节片是否以前缀开头
func hasPrefixDemo() {
fmt.Println(bytes.HasPrefix([]byte("Gopher"), []byte("Go"))) //true
fmt.Println(bytes.HasPrefix([]byte("Gopher"), []byte("C"))) //false
//所有开头
fmt.Println(bytes.HasPrefix([]byte("Gopher"), []byte(""))) //true
}
//11 HasSuffix 测试字节片段是否以后缀结尾。
func hasSuffixDemo() {
fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("go")))
fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("O")))
fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("Ami")))
fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("")))
}
//12 Index
//13 IndexAny 返回字符串中任何第一个出现的字符的下标
// IndexAny 将 s 解释为 UTF-8 编码的 Unicode 代码点序列。
// 它返回字符中任何 Unicode 代码点的 s 中第一次出现的字节索引。
// 如果字符为空或者没有共同的代码点,则返回-1。
//14 func IndexByte(s []byte, c byte) int
// IndexByte 返回 s 的第一个实例的索引,如果 c 不存在于 s 中,则返回 -1。
//15 func IndexFunc 指定规则(遇到指定情况返回下表)
//func IndexFunc(s []byte, f func(r rune) bool) int
//IndexFunc 将 s 解释为一系列UTF-8编码的Unicode代码点。
// 它返回满足 f(c) 的第一个 Unicode 代码点的 s 中的字节索引,否则返回 -1。
//
func inde | byte
//Join 连接 s的元素以创建一个新的字节片。分隔符 sep 放置在生成的切片中的元素之间。
func jsonDemo() {
s := [][]byte{[]byte("foo"), []byte("bar"), []byte("baz")}
fmt.Printf("%s", bytes.Join(s, []byte(", ")))
}
//17 func LastIndex
//func LastIndex(s, sep []byte) int
//LastIndex 返回 s 中最后一个 sep 实例的索引,如果 sep 中不存在 s,则返回-1。
func lastIndexDemo() {
fmt.Println(bytes.Index([]byte("go gopher"), []byte("go")))
fmt.Println(bytes.LastIndex([]byte("go gopher"), []byte("go")))
fmt.Println(bytes.LastIndex([]byte("go gopher"), []byte("rodent")))
}
//18 func LastIndexAny :它返回字符中任何 Unicode 代码点的最后一次出现的字节索引。
//func LastIndexAny(s []byte, chars string) int
//LastIndexAny 将 s 解释为UTF-8编码的 Unicode 代码点序列。
// 它返回字符中任何 Unicode 代码点的最后一次出现的字节索引。
// 如果字符为空或者没有共同的代码点,则返回-1。
//19 func LastIndexByte 查看 byte位置
//func LastIndexByte(s []byte, c byte) int
//LastIndexByte 返回 s 的最后一个实例的索引,如果 c 不存在于 s 中,则返回-1。
func lastIndexByteDemo() {
a = []byte("迁客骚人,文人墨客无不倾倒于俺滴代码")
//a byte : [232 | xFuncDemo() {
f2 := func(c rune) bool {
return unicode.Is(unicode.Han, c)
}
fmt.Println(bytes.IndexFunc([]byte("Hello, 世界"), f2))
fmt.Println(bytes.IndexFunc([]byte("Hello, world"), f2))
}
//16 func Join 指定分隔符拼接byte数组
//func Join(s [][]byte, sep []byte) [] | identifier_body |
dom-utils.ts |
function nthChild(element: HTMLElement) {
let childNumber = 0
const childNodes = element.parentNode!.childNodes
for (const node of childNodes) {
if (node.nodeType === Node.ELEMENT_NODE) ++childNumber
if (node === element) return `:nth-child('${childNumber}')`
}
}
function attributes(element: HTMLElement) {
let attributes = ''
for (const attr of element.attributes) {
attributes += `[${attr.name}="${attr.value}"]`
}
return attributes
}
export function path(el: HTMLElement, rootNode = document.documentElement) : string {
const selector = el.tagName.toLowerCase() + id(el) + classes(el) + attributes(el) + nthChild(el)
const hasParent = el.parentNode && el.parentNode !== rootNode && (el.parentNode as any).tagName
return hasParent ? path(el.parentNode as HTMLElement, rootNode) + ' > ' + selector : selector
}
export function hash(el: HTMLElement) {
const cssPath = path(el)
const type = (el as HTMLInputElement).type
const checked = (el as HTMLInputElement).checked
const value = (el as HTMLInputElement).value
const textContent = (el as HTMLInputElement).textContent
}
export function traverseChildElements(
element: Element,
each: (element: Element, level: number) => boolean,
bind?: any,
level = 0
) {
level++
element = (element.shadowRoot || element) as Element
for (let child: Element | null = element.firstElementChild; child; child = child.nextElementSibling) {
if ((child as HTMLElement).assignedSlot) continue
const assignedElements = (child as HTMLSlotElement).assignedElements?.({flatten:true})
if (assignedElements) for (const assigned of assignedElements) {
if (each.call(bind, assigned, level)) {
traverseChildElements(child, each, bind, level)
}
}
if (each.call(bind, child, level)) {
traverseChildElements(child, each, bind, level)
}
}
}
export function addCSSRule(sheet:any, selector:string, rules:string, index:number) : void {
if ('insertRule' in sheet) {
sheet.insertRule(selector + '{' + rules + '}', index)
} else if ('addRule' in sheet) {
sheet.addRule(selector, rules, index)
}
}
export class Bounds {
left = 0
top = 0
width = 0
height = 0
copy(rect: Bounds) {
this.top = rect.top
this.left = rect.left
this.width = rect.width
this.height = rect.height
return this
}
}
export class Edges {
left = 0
top = 0
right = 0
bottom = 0
copy(rect: Edges) {
this.top = rect.top
this.left = rect.left
this.right = rect.right
this.bottom = rect.bottom
return this
}
}
export function getBounds(
element: Element,
bounds: Bounds = new Bounds(),
referenceElement?: Element
) {
const doc = element.ownerDocument!
const docEl = doc.documentElement
const body = doc.body
if (element === docEl) {
return getDocumentBounds(doc, bounds)
}
if (referenceElement === element) {
bounds.left = 0
bounds.top = 0
bounds.width = (element as HTMLElement).offsetWidth
bounds.height = (element as HTMLElement).offsetHeight
return
}
const defaultView = element.ownerDocument!.defaultView!
let el: HTMLElement | null = element as HTMLElement
let computedStyle
let offsetParent = el.offsetParent as HTMLElement
let prevComputedStyle = defaultView.getComputedStyle(el, null)
let top = el.offsetTop
let left = el.offsetLeft
if (
offsetParent &&
referenceElement &&
(
offsetParent.contains(referenceElement) ||
offsetParent.contains((referenceElement.getRootNode() as ShadowRoot).host)
) &&
offsetParent !== referenceElement
) {
getBounds(referenceElement, bounds, offsetParent)
left -= bounds.left
top -= bounds.top
}
while (
(el = el.parentElement) &&
el !== body &&
el !== docEl &&
el !== referenceElement
) {
if (prevComputedStyle.position === 'fixed') {
break
}
computedStyle = defaultView.getComputedStyle(el, null)
top -= el.scrollTop
left -= el.scrollLeft
if (el === offsetParent) {
top += el.offsetTop
left += el.offsetLeft
top += parseFloat(computedStyle.borderTopWidth) || 0
left += parseFloat(computedStyle.borderLeftWidth) || 0
offsetParent = el.offsetParent as HTMLElement
}
prevComputedStyle = computedStyle
}
// if (prevComputedStyle.position === 'relative' || prevComputedStyle.position === 'static') {
// getDocumentBounds(doc, bounds)
// top += bounds.top
// left += bounds.left
// }
if (prevComputedStyle.position === 'fixed') {
top += Math.max(docEl.scrollTop, body.scrollTop)
left += Math.max(docEl.scrollLeft, body.scrollLeft)
}
// let el = element
// let left = el.offsetLeft
// let top = el.offsetTop
// let offsetParent = el.offsetParent
// while (el && el.nodeType !== Node.DOCUMENT_NODE) {
// left -= el.scrollLeft
// top -= el.scrollTop
// if (el === offsetParent) {
// const style = window.getComputedStyle(el)
// left += el.offsetLeft + parseFloat(style.borderLeftWidth!) || 0
// top += el.offsetTop + parseFloat(style.borderTopWidth!) || 0
// offsetParent = el.offsetParent
// }
// el = el.offsetParent as any
// }
bounds.left = left
bounds.top = top
bounds.width = (element as HTMLElement).offsetWidth
bounds.height = (element as HTMLElement).offsetHeight
return bounds
}
export function getMargin(element: Element, margin: Edges) {
if ((element as HTMLElement).offsetParent === null) {
margin.left = margin.right = margin.top = margin.bottom = 0
return
}
let style = getComputedStyle(element)
margin.left = parseFloat(style.marginLeft) || 0
margin.right = parseFloat(style.marginRight) || 0
margin.top = parseFloat(style.marginTop) || 0
margin.bottom = parseFloat(style.marginBottom) || 0
}
export function getBorder(element: Element, border: Edges) {
let style = getComputedStyle(element)
border.left = parseFloat(style.borderLeftWidth) || 0
border.right = parseFloat(style.borderRightWidth) || 0
border.top = parseFloat(style.borderTopWidth) || 0
border.bottom = parseFloat(style.borderBottomWidth) || 0
}
export function getPadding(element: Element, padding: Edges) {
let style = getComputedStyle(element)
padding.left = parseFloat(style.paddingLeft) || 0
padding.right = parseFloat(style.paddingRight) || 0
padding.top = parseFloat(style.paddingTop) || 0
padding.bottom = parseFloat(style.paddingBottom) || 0
}
/*
* On some mobile browsers, the value reported by window.innerHeight
* is not the true viewport height. This method returns
* the actual viewport.
*/
export function getViewportBounds(bounds: Bounds) {
if (!viewportTester.parentNode) document.documentElement.append(viewportTester)
bounds.left = pageXOffset
bounds.top = pageYOffset
bounds.width = viewportTester.offsetWidth
bounds.height = viewportTester.offsetHeight
return bounds
}
const viewportTester = document.createElement('div')
viewportTester.id = 'VIEWPORT'
viewportTester.style.position = 'fixed'
viewportTester.style.width = '100vw'
viewportTester.style.height = '100vh'
viewportTester.style.visibility = 'hidden'
viewportTester.style.pointerEvents = 'none'
export function getDocumentBounds(document: Document, bounds: Bounds) {
const documentElement = document.documentElement
const body = document.body
const documentElementStyle = getComputedStyle(documentElement)
const bodyStyle = getComputedStyle(body)
bounds.top =
body.offsetTop + parseFloat(documentElementStyle.marginTop as '') ||
0 + parseFloat(bodyStyle.marginTop as '') ||
0
bounds.left =
body.offsetLeft + parseFloat(documentElementStyle.marginLeft as '') ||
0 + parseFloat(bodyStyle.marginLeft as '') ||
0
bounds.width = Math.max(
Math.max(body.scrollWidth, documentElement.scrollWidth),
Math.max(body.offsetWidth, documentElement.offsetWidth),
Math.max(body.clientWidth, documentElement.clientWidth)
)
bounds.height = Math.max(
Math.max(body.scrollHeight, documentElement.scrollHeight),
Math.max(body.offsetHeight, documentElement.offsetHeight),
Math.max(body.clientHeight, documentElement.clientHeight)
)
return bounds
}
export function toDOM(html:string) {
const wrapper = document.createElement('div')
wrapper.innerHTML = html
const el = wrapper.firstElementChild!
wrapper | {
let classSelector = ''
const classList = element.classList
for (const c of classList) {
classSelector += '.' + c
}
return classSelector
} | identifier_body |
|
dom-utils.ts | attr.name}="${attr.value}"]`
}
return attributes
}
export function path(el: HTMLElement, rootNode = document.documentElement) : string {
const selector = el.tagName.toLowerCase() + id(el) + classes(el) + attributes(el) + nthChild(el)
const hasParent = el.parentNode && el.parentNode !== rootNode && (el.parentNode as any).tagName
return hasParent ? path(el.parentNode as HTMLElement, rootNode) + ' > ' + selector : selector
}
export function hash(el: HTMLElement) {
const cssPath = path(el)
const type = (el as HTMLInputElement).type
const checked = (el as HTMLInputElement).checked
const value = (el as HTMLInputElement).value
const textContent = (el as HTMLInputElement).textContent
}
export function traverseChildElements(
element: Element,
each: (element: Element, level: number) => boolean,
bind?: any,
level = 0
) {
level++
element = (element.shadowRoot || element) as Element
for (let child: Element | null = element.firstElementChild; child; child = child.nextElementSibling) {
if ((child as HTMLElement).assignedSlot) continue
const assignedElements = (child as HTMLSlotElement).assignedElements?.({flatten:true})
if (assignedElements) for (const assigned of assignedElements) {
if (each.call(bind, assigned, level)) {
traverseChildElements(child, each, bind, level)
}
}
if (each.call(bind, child, level)) {
traverseChildElements(child, each, bind, level)
}
}
}
export function addCSSRule(sheet:any, selector:string, rules:string, index:number) : void {
if ('insertRule' in sheet) {
sheet.insertRule(selector + '{' + rules + '}', index)
} else if ('addRule' in sheet) |
}
export class Bounds {
left = 0
top = 0
width = 0
height = 0
copy(rect: Bounds) {
this.top = rect.top
this.left = rect.left
this.width = rect.width
this.height = rect.height
return this
}
}
export class Edges {
left = 0
top = 0
right = 0
bottom = 0
copy(rect: Edges) {
this.top = rect.top
this.left = rect.left
this.right = rect.right
this.bottom = rect.bottom
return this
}
}
export function getBounds(
element: Element,
bounds: Bounds = new Bounds(),
referenceElement?: Element
) {
const doc = element.ownerDocument!
const docEl = doc.documentElement
const body = doc.body
if (element === docEl) {
return getDocumentBounds(doc, bounds)
}
if (referenceElement === element) {
bounds.left = 0
bounds.top = 0
bounds.width = (element as HTMLElement).offsetWidth
bounds.height = (element as HTMLElement).offsetHeight
return
}
const defaultView = element.ownerDocument!.defaultView!
let el: HTMLElement | null = element as HTMLElement
let computedStyle
let offsetParent = el.offsetParent as HTMLElement
let prevComputedStyle = defaultView.getComputedStyle(el, null)
let top = el.offsetTop
let left = el.offsetLeft
if (
offsetParent &&
referenceElement &&
(
offsetParent.contains(referenceElement) ||
offsetParent.contains((referenceElement.getRootNode() as ShadowRoot).host)
) &&
offsetParent !== referenceElement
) {
getBounds(referenceElement, bounds, offsetParent)
left -= bounds.left
top -= bounds.top
}
while (
(el = el.parentElement) &&
el !== body &&
el !== docEl &&
el !== referenceElement
) {
if (prevComputedStyle.position === 'fixed') {
break
}
computedStyle = defaultView.getComputedStyle(el, null)
top -= el.scrollTop
left -= el.scrollLeft
if (el === offsetParent) {
top += el.offsetTop
left += el.offsetLeft
top += parseFloat(computedStyle.borderTopWidth) || 0
left += parseFloat(computedStyle.borderLeftWidth) || 0
offsetParent = el.offsetParent as HTMLElement
}
prevComputedStyle = computedStyle
}
// if (prevComputedStyle.position === 'relative' || prevComputedStyle.position === 'static') {
// getDocumentBounds(doc, bounds)
// top += bounds.top
// left += bounds.left
// }
if (prevComputedStyle.position === 'fixed') {
top += Math.max(docEl.scrollTop, body.scrollTop)
left += Math.max(docEl.scrollLeft, body.scrollLeft)
}
// let el = element
// let left = el.offsetLeft
// let top = el.offsetTop
// let offsetParent = el.offsetParent
// while (el && el.nodeType !== Node.DOCUMENT_NODE) {
// left -= el.scrollLeft
// top -= el.scrollTop
// if (el === offsetParent) {
// const style = window.getComputedStyle(el)
// left += el.offsetLeft + parseFloat(style.borderLeftWidth!) || 0
// top += el.offsetTop + parseFloat(style.borderTopWidth!) || 0
// offsetParent = el.offsetParent
// }
// el = el.offsetParent as any
// }
bounds.left = left
bounds.top = top
bounds.width = (element as HTMLElement).offsetWidth
bounds.height = (element as HTMLElement).offsetHeight
return bounds
}
export function getMargin(element: Element, margin: Edges) {
if ((element as HTMLElement).offsetParent === null) {
margin.left = margin.right = margin.top = margin.bottom = 0
return
}
let style = getComputedStyle(element)
margin.left = parseFloat(style.marginLeft) || 0
margin.right = parseFloat(style.marginRight) || 0
margin.top = parseFloat(style.marginTop) || 0
margin.bottom = parseFloat(style.marginBottom) || 0
}
export function getBorder(element: Element, border: Edges) {
let style = getComputedStyle(element)
border.left = parseFloat(style.borderLeftWidth) || 0
border.right = parseFloat(style.borderRightWidth) || 0
border.top = parseFloat(style.borderTopWidth) || 0
border.bottom = parseFloat(style.borderBottomWidth) || 0
}
export function getPadding(element: Element, padding: Edges) {
let style = getComputedStyle(element)
padding.left = parseFloat(style.paddingLeft) || 0
padding.right = parseFloat(style.paddingRight) || 0
padding.top = parseFloat(style.paddingTop) || 0
padding.bottom = parseFloat(style.paddingBottom) || 0
}
/*
* On some mobile browsers, the value reported by window.innerHeight
* is not the true viewport height. This method returns
* the actual viewport.
*/
export function getViewportBounds(bounds: Bounds) {
if (!viewportTester.parentNode) document.documentElement.append(viewportTester)
bounds.left = pageXOffset
bounds.top = pageYOffset
bounds.width = viewportTester.offsetWidth
bounds.height = viewportTester.offsetHeight
return bounds
}
const viewportTester = document.createElement('div')
viewportTester.id = 'VIEWPORT'
viewportTester.style.position = 'fixed'
viewportTester.style.width = '100vw'
viewportTester.style.height = '100vh'
viewportTester.style.visibility = 'hidden'
viewportTester.style.pointerEvents = 'none'
export function getDocumentBounds(document: Document, bounds: Bounds) {
const documentElement = document.documentElement
const body = document.body
const documentElementStyle = getComputedStyle(documentElement)
const bodyStyle = getComputedStyle(body)
bounds.top =
body.offsetTop + parseFloat(documentElementStyle.marginTop as '') ||
0 + parseFloat(bodyStyle.marginTop as '') ||
0
bounds.left =
body.offsetLeft + parseFloat(documentElementStyle.marginLeft as '') ||
0 + parseFloat(bodyStyle.marginLeft as '') ||
0
bounds.width = Math.max(
Math.max(body.scrollWidth, documentElement.scrollWidth),
Math.max(body.offsetWidth, documentElement.offsetWidth),
Math.max(body.clientWidth, documentElement.clientWidth)
)
bounds.height = Math.max(
Math.max(body.scrollHeight, documentElement.scrollHeight),
Math.max(body.offsetHeight, documentElement.offsetHeight),
Math.max(body.clientHeight, documentElement.clientHeight)
)
return bounds
}
export function toDOM(html:string) {
const wrapper = document.createElement('div')
wrapper.innerHTML = html
const el = wrapper.firstElementChild!
wrapper.removeChild(el)
return el
}
export const downloadBlob = function(blob:Blob, filename:string){
const a = document.createElement('a')
a.download = filename
a.href = window.URL.createObjectURL(blob)
a.dataset.downloadurl = ['application/octet-stream', a.download, a.href].join(':')
a.click()
}
const scratchMat1 = new Matrix4()
const scratchMat2 = new Matrix4()
export function parseCSSTransform(computedStyle:CSSStyleDeclaration, width:number, height:number, pixelSize:number, out = new Matrix4()) {
const transform = computedStyle.transform
const transformOrigin = computedStyle | {
sheet.addRule(selector, rules, index)
} | conditional_block |
dom-utils.ts | ${attr.name}="${attr.value}"]`
}
return attributes
}
export function path(el: HTMLElement, rootNode = document.documentElement) : string {
const selector = el.tagName.toLowerCase() + id(el) + classes(el) + attributes(el) + nthChild(el)
const hasParent = el.parentNode && el.parentNode !== rootNode && (el.parentNode as any).tagName
return hasParent ? path(el.parentNode as HTMLElement, rootNode) + ' > ' + selector : selector
}
export function hash(el: HTMLElement) {
const cssPath = path(el)
const type = (el as HTMLInputElement).type
const checked = (el as HTMLInputElement).checked
const value = (el as HTMLInputElement).value
const textContent = (el as HTMLInputElement).textContent
}
export function traverseChildElements(
element: Element,
each: (element: Element, level: number) => boolean,
bind?: any,
level = 0
) {
level++
element = (element.shadowRoot || element) as Element
for (let child: Element | null = element.firstElementChild; child; child = child.nextElementSibling) {
if ((child as HTMLElement).assignedSlot) continue
const assignedElements = (child as HTMLSlotElement).assignedElements?.({flatten:true})
if (assignedElements) for (const assigned of assignedElements) {
if (each.call(bind, assigned, level)) {
traverseChildElements(child, each, bind, level)
}
}
if (each.call(bind, child, level)) {
traverseChildElements(child, each, bind, level)
}
}
}
export function addCSSRule(sheet:any, selector:string, rules:string, index:number) : void {
if ('insertRule' in sheet) {
sheet.insertRule(selector + '{' + rules + '}', index)
} else if ('addRule' in sheet) {
sheet.addRule(selector, rules, index)
}
}
export class Bounds {
left = 0
top = 0
width = 0
height = 0
copy(rect: Bounds) {
this.top = rect.top
this.left = rect.left
this.width = rect.width
this.height = rect.height
return this
}
}
export class Edges {
left = 0
top = 0
right = 0
bottom = 0
copy(rect: Edges) {
this.top = rect.top
this.left = rect.left
this.right = rect.right
this.bottom = rect.bottom
return this
}
}
export function getBounds(
element: Element,
bounds: Bounds = new Bounds(),
referenceElement?: Element
) {
const doc = element.ownerDocument!
const docEl = doc.documentElement
const body = doc.body
if (element === docEl) {
return getDocumentBounds(doc, bounds)
}
if (referenceElement === element) {
bounds.left = 0
bounds.top = 0
bounds.width = (element as HTMLElement).offsetWidth
bounds.height = (element as HTMLElement).offsetHeight
return
}
const defaultView = element.ownerDocument!.defaultView!
let el: HTMLElement | null = element as HTMLElement
let computedStyle
let offsetParent = el.offsetParent as HTMLElement
let prevComputedStyle = defaultView.getComputedStyle(el, null)
let top = el.offsetTop
let left = el.offsetLeft
if (
offsetParent &&
referenceElement &&
(
offsetParent.contains(referenceElement) ||
offsetParent.contains((referenceElement.getRootNode() as ShadowRoot).host)
) &&
offsetParent !== referenceElement
) {
getBounds(referenceElement, bounds, offsetParent)
left -= bounds.left
top -= bounds.top
}
while (
(el = el.parentElement) &&
el !== body &&
el !== docEl &&
el !== referenceElement
) {
if (prevComputedStyle.position === 'fixed') {
break
}
computedStyle = defaultView.getComputedStyle(el, null)
top -= el.scrollTop
left -= el.scrollLeft
if (el === offsetParent) {
top += el.offsetTop | top += parseFloat(computedStyle.borderTopWidth) || 0
left += parseFloat(computedStyle.borderLeftWidth) || 0
offsetParent = el.offsetParent as HTMLElement
}
prevComputedStyle = computedStyle
}
// if (prevComputedStyle.position === 'relative' || prevComputedStyle.position === 'static') {
// getDocumentBounds(doc, bounds)
// top += bounds.top
// left += bounds.left
// }
if (prevComputedStyle.position === 'fixed') {
top += Math.max(docEl.scrollTop, body.scrollTop)
left += Math.max(docEl.scrollLeft, body.scrollLeft)
}
// let el = element
// let left = el.offsetLeft
// let top = el.offsetTop
// let offsetParent = el.offsetParent
// while (el && el.nodeType !== Node.DOCUMENT_NODE) {
// left -= el.scrollLeft
// top -= el.scrollTop
// if (el === offsetParent) {
// const style = window.getComputedStyle(el)
// left += el.offsetLeft + parseFloat(style.borderLeftWidth!) || 0
// top += el.offsetTop + parseFloat(style.borderTopWidth!) || 0
// offsetParent = el.offsetParent
// }
// el = el.offsetParent as any
// }
bounds.left = left
bounds.top = top
bounds.width = (element as HTMLElement).offsetWidth
bounds.height = (element as HTMLElement).offsetHeight
return bounds
}
export function getMargin(element: Element, margin: Edges) {
if ((element as HTMLElement).offsetParent === null) {
margin.left = margin.right = margin.top = margin.bottom = 0
return
}
let style = getComputedStyle(element)
margin.left = parseFloat(style.marginLeft) || 0
margin.right = parseFloat(style.marginRight) || 0
margin.top = parseFloat(style.marginTop) || 0
margin.bottom = parseFloat(style.marginBottom) || 0
}
export function getBorder(element: Element, border: Edges) {
let style = getComputedStyle(element)
border.left = parseFloat(style.borderLeftWidth) || 0
border.right = parseFloat(style.borderRightWidth) || 0
border.top = parseFloat(style.borderTopWidth) || 0
border.bottom = parseFloat(style.borderBottomWidth) || 0
}
export function getPadding(element: Element, padding: Edges) {
let style = getComputedStyle(element)
padding.left = parseFloat(style.paddingLeft) || 0
padding.right = parseFloat(style.paddingRight) || 0
padding.top = parseFloat(style.paddingTop) || 0
padding.bottom = parseFloat(style.paddingBottom) || 0
}
/*
* On some mobile browsers, the value reported by window.innerHeight
* is not the true viewport height. This method returns
* the actual viewport.
*/
export function getViewportBounds(bounds: Bounds) {
if (!viewportTester.parentNode) document.documentElement.append(viewportTester)
bounds.left = pageXOffset
bounds.top = pageYOffset
bounds.width = viewportTester.offsetWidth
bounds.height = viewportTester.offsetHeight
return bounds
}
const viewportTester = document.createElement('div')
viewportTester.id = 'VIEWPORT'
viewportTester.style.position = 'fixed'
viewportTester.style.width = '100vw'
viewportTester.style.height = '100vh'
viewportTester.style.visibility = 'hidden'
viewportTester.style.pointerEvents = 'none'
export function getDocumentBounds(document: Document, bounds: Bounds) {
const documentElement = document.documentElement
const body = document.body
const documentElementStyle = getComputedStyle(documentElement)
const bodyStyle = getComputedStyle(body)
bounds.top =
body.offsetTop + parseFloat(documentElementStyle.marginTop as '') ||
0 + parseFloat(bodyStyle.marginTop as '') ||
0
bounds.left =
body.offsetLeft + parseFloat(documentElementStyle.marginLeft as '') ||
0 + parseFloat(bodyStyle.marginLeft as '') ||
0
bounds.width = Math.max(
Math.max(body.scrollWidth, documentElement.scrollWidth),
Math.max(body.offsetWidth, documentElement.offsetWidth),
Math.max(body.clientWidth, documentElement.clientWidth)
)
bounds.height = Math.max(
Math.max(body.scrollHeight, documentElement.scrollHeight),
Math.max(body.offsetHeight, documentElement.offsetHeight),
Math.max(body.clientHeight, documentElement.clientHeight)
)
return bounds
}
export function toDOM(html:string) {
const wrapper = document.createElement('div')
wrapper.innerHTML = html
const el = wrapper.firstElementChild!
wrapper.removeChild(el)
return el
}
export const downloadBlob = function(blob:Blob, filename:string){
const a = document.createElement('a')
a.download = filename
a.href = window.URL.createObjectURL(blob)
a.dataset.downloadurl = ['application/octet-stream', a.download, a.href].join(':')
a.click()
}
const scratchMat1 = new Matrix4()
const scratchMat2 = new Matrix4()
export function parseCSSTransform(computedStyle:CSSStyleDeclaration, width:number, height:number, pixelSize:number, out = new Matrix4()) {
const transform = computedStyle.transform
const transformOrigin = computedStyle.transformOrigin
| left += el.offsetLeft | random_line_split |
dom-utils.ts | attr.name}="${attr.value}"]`
}
return attributes
}
export function path(el: HTMLElement, rootNode = document.documentElement) : string {
const selector = el.tagName.toLowerCase() + id(el) + classes(el) + attributes(el) + nthChild(el)
const hasParent = el.parentNode && el.parentNode !== rootNode && (el.parentNode as any).tagName
return hasParent ? path(el.parentNode as HTMLElement, rootNode) + ' > ' + selector : selector
}
export function hash(el: HTMLElement) {
const cssPath = path(el)
const type = (el as HTMLInputElement).type
const checked = (el as HTMLInputElement).checked
const value = (el as HTMLInputElement).value
const textContent = (el as HTMLInputElement).textContent
}
export function traverseChildElements(
element: Element,
each: (element: Element, level: number) => boolean,
bind?: any,
level = 0
) {
level++
element = (element.shadowRoot || element) as Element
for (let child: Element | null = element.firstElementChild; child; child = child.nextElementSibling) {
if ((child as HTMLElement).assignedSlot) continue
const assignedElements = (child as HTMLSlotElement).assignedElements?.({flatten:true})
if (assignedElements) for (const assigned of assignedElements) {
if (each.call(bind, assigned, level)) {
traverseChildElements(child, each, bind, level)
}
}
if (each.call(bind, child, level)) {
traverseChildElements(child, each, bind, level)
}
}
}
export function addCSSRule(sheet:any, selector:string, rules:string, index:number) : void {
if ('insertRule' in sheet) {
sheet.insertRule(selector + '{' + rules + '}', index)
} else if ('addRule' in sheet) {
sheet.addRule(selector, rules, index)
}
}
export class Bounds {
left = 0
top = 0
width = 0
height = 0
copy(rect: Bounds) {
this.top = rect.top
this.left = rect.left
this.width = rect.width
this.height = rect.height
return this
}
}
export class Edges {
left = 0
top = 0
right = 0
bottom = 0
copy(rect: Edges) {
this.top = rect.top
this.left = rect.left
this.right = rect.right
this.bottom = rect.bottom
return this
}
}
export function getBounds(
element: Element,
bounds: Bounds = new Bounds(),
referenceElement?: Element
) {
const doc = element.ownerDocument!
const docEl = doc.documentElement
const body = doc.body
if (element === docEl) {
return getDocumentBounds(doc, bounds)
}
if (referenceElement === element) {
bounds.left = 0
bounds.top = 0
bounds.width = (element as HTMLElement).offsetWidth
bounds.height = (element as HTMLElement).offsetHeight
return
}
const defaultView = element.ownerDocument!.defaultView!
let el: HTMLElement | null = element as HTMLElement
let computedStyle
let offsetParent = el.offsetParent as HTMLElement
let prevComputedStyle = defaultView.getComputedStyle(el, null)
let top = el.offsetTop
let left = el.offsetLeft
if (
offsetParent &&
referenceElement &&
(
offsetParent.contains(referenceElement) ||
offsetParent.contains((referenceElement.getRootNode() as ShadowRoot).host)
) &&
offsetParent !== referenceElement
) {
getBounds(referenceElement, bounds, offsetParent)
left -= bounds.left
top -= bounds.top
}
while (
(el = el.parentElement) &&
el !== body &&
el !== docEl &&
el !== referenceElement
) {
if (prevComputedStyle.position === 'fixed') {
break
}
computedStyle = defaultView.getComputedStyle(el, null)
top -= el.scrollTop
left -= el.scrollLeft
if (el === offsetParent) {
top += el.offsetTop
left += el.offsetLeft
top += parseFloat(computedStyle.borderTopWidth) || 0
left += parseFloat(computedStyle.borderLeftWidth) || 0
offsetParent = el.offsetParent as HTMLElement
}
prevComputedStyle = computedStyle
}
// if (prevComputedStyle.position === 'relative' || prevComputedStyle.position === 'static') {
// getDocumentBounds(doc, bounds)
// top += bounds.top
// left += bounds.left
// }
if (prevComputedStyle.position === 'fixed') {
top += Math.max(docEl.scrollTop, body.scrollTop)
left += Math.max(docEl.scrollLeft, body.scrollLeft)
}
// let el = element
// let left = el.offsetLeft
// let top = el.offsetTop
// let offsetParent = el.offsetParent
// while (el && el.nodeType !== Node.DOCUMENT_NODE) {
// left -= el.scrollLeft
// top -= el.scrollTop
// if (el === offsetParent) {
// const style = window.getComputedStyle(el)
// left += el.offsetLeft + parseFloat(style.borderLeftWidth!) || 0
// top += el.offsetTop + parseFloat(style.borderTopWidth!) || 0
// offsetParent = el.offsetParent
// }
// el = el.offsetParent as any
// }
bounds.left = left
bounds.top = top
bounds.width = (element as HTMLElement).offsetWidth
bounds.height = (element as HTMLElement).offsetHeight
return bounds
}
export function getMargin(element: Element, margin: Edges) {
if ((element as HTMLElement).offsetParent === null) {
margin.left = margin.right = margin.top = margin.bottom = 0
return
}
let style = getComputedStyle(element)
margin.left = parseFloat(style.marginLeft) || 0
margin.right = parseFloat(style.marginRight) || 0
margin.top = parseFloat(style.marginTop) || 0
margin.bottom = parseFloat(style.marginBottom) || 0
}
export function getBorder(element: Element, border: Edges) {
let style = getComputedStyle(element)
border.left = parseFloat(style.borderLeftWidth) || 0
border.right = parseFloat(style.borderRightWidth) || 0
border.top = parseFloat(style.borderTopWidth) || 0
border.bottom = parseFloat(style.borderBottomWidth) || 0
}
export function getPadding(element: Element, padding: Edges) {
let style = getComputedStyle(element)
padding.left = parseFloat(style.paddingLeft) || 0
padding.right = parseFloat(style.paddingRight) || 0
padding.top = parseFloat(style.paddingTop) || 0
padding.bottom = parseFloat(style.paddingBottom) || 0
}
/*
* On some mobile browsers, the value reported by window.innerHeight
* is not the true viewport height. This method returns
* the actual viewport.
*/
export function | (bounds: Bounds) {
if (!viewportTester.parentNode) document.documentElement.append(viewportTester)
bounds.left = pageXOffset
bounds.top = pageYOffset
bounds.width = viewportTester.offsetWidth
bounds.height = viewportTester.offsetHeight
return bounds
}
const viewportTester = document.createElement('div')
viewportTester.id = 'VIEWPORT'
viewportTester.style.position = 'fixed'
viewportTester.style.width = '100vw'
viewportTester.style.height = '100vh'
viewportTester.style.visibility = 'hidden'
viewportTester.style.pointerEvents = 'none'
export function getDocumentBounds(document: Document, bounds: Bounds) {
const documentElement = document.documentElement
const body = document.body
const documentElementStyle = getComputedStyle(documentElement)
const bodyStyle = getComputedStyle(body)
bounds.top =
body.offsetTop + parseFloat(documentElementStyle.marginTop as '') ||
0 + parseFloat(bodyStyle.marginTop as '') ||
0
bounds.left =
body.offsetLeft + parseFloat(documentElementStyle.marginLeft as '') ||
0 + parseFloat(bodyStyle.marginLeft as '') ||
0
bounds.width = Math.max(
Math.max(body.scrollWidth, documentElement.scrollWidth),
Math.max(body.offsetWidth, documentElement.offsetWidth),
Math.max(body.clientWidth, documentElement.clientWidth)
)
bounds.height = Math.max(
Math.max(body.scrollHeight, documentElement.scrollHeight),
Math.max(body.offsetHeight, documentElement.offsetHeight),
Math.max(body.clientHeight, documentElement.clientHeight)
)
return bounds
}
export function toDOM(html:string) {
const wrapper = document.createElement('div')
wrapper.innerHTML = html
const el = wrapper.firstElementChild!
wrapper.removeChild(el)
return el
}
export const downloadBlob = function(blob:Blob, filename:string){
const a = document.createElement('a')
a.download = filename
a.href = window.URL.createObjectURL(blob)
a.dataset.downloadurl = ['application/octet-stream', a.download, a.href].join(':')
a.click()
}
const scratchMat1 = new Matrix4()
const scratchMat2 = new Matrix4()
export function parseCSSTransform(computedStyle:CSSStyleDeclaration, width:number, height:number, pixelSize:number, out = new Matrix4()) {
const transform = computedStyle.transform
const transformOrigin = computedStyle.transform | getViewportBounds | identifier_name |
Legend.js | ) {
button.show();
button.setHandler(function () {
sheet && sheet.show();
});
}
// TODO: Investigate why we have to set parent to null.
view.setParent(null);
view.setScrollable(true);
sheet.add(view);
sheet.setConfig({
enter: "bottom",
anim: anim,
enterAnimation: anim,
exitAnimation: anim
});
Ext.Viewport.add(sheet);
}
} else {
button.hide();
view.setConfig({
parent: null,
scrollable: false
});
view.setRenderTo(chart && chart.element);
}
},
updateDock: function (dock) {
this.wire();
},
applyButton: function (button, oldButton) {
return Ext.factory(button, "Ext.Button", oldButton);
},
updateButton: function (button, oldButton) {
if (oldButton) {
oldButton.destroy();
}
this.wire();
},
applySheet: function (sheet, oldSheet) {
return Ext.factory(sheet, "Ext.Sheet", oldSheet);
},
updateSheet: function (sheet, oldSheet) {
if (oldSheet) {
oldSheet.destroy();
}
sheet.on({
delegate: 'button',
tap: function () {
sheet.hide();
}
});
this.wire();
},
updateChart: function (chart, oldChart) {
var me = this,
button = me.getButton(),
chartEl = chart && chart.element, view = me.getView(),
sheet = me.getSheet(), sheetAnim;
me.wire();
if (oldChart) {
oldChart.un('serieschange', me.orient, me);
}
if (chart) {
chart.on('serieschange', me.orient, me);
}
},
applyView: function (view, currentView) {
return Ext.factory(view, "Ext.chart.legend.View", currentView);
},
updateView: function (view, currentView) {
if (currentView) {
currentView.setLegend(false);
}
this.wire();
},
/**
* @private Determine whether the legend should be displayed. Looks at the legend's 'visible' config,
* and also the 'showInLegend' config for each of the series.
* @return {Boolean}
*/
isDisplayed: function () {
return this.getVisible() && this.getChart().getSeries().findIndex('showInLegend', true) !== -1;
},
/**
* Returns whether the legend is configured with orientation-specific positions.
* @return {Boolean}
*/
isOrientationSpecific: function () {
var position = this.getPosition();
return (Ext.isObject(position) && 'portrait' in position);
},
/**
* Get the target position of the legend, after resolving any orientation-specific configs.
* In most cases this method should be used rather than reading the `position` property directly.
* @return {String/Object} The position config value
*/
getDockedPosition: function () {
var me = this,
position = me.getPosition();
// Grab orientation-specific config if specified
if (me.isOrientationSpecific()) {
position = position[Ext.Viewport.getOrientation()];
}
// If legend is docked, default non-String values to 'bottom'
if (me.getPopup() && !Ext.isString(position)) {
position = 'bottom';
}
return position;
},
/**
* Returns whether the orientation of the legend items is vertical.
* @return {Boolean} `true` if the legend items are to be arranged stacked vertically, `false` if they
* are to be arranged side-by-side.
*/
isVertical: function () {
var position = this.getDockedPosition();
return this.getPopup() || (Ext.isObject(position) ? position.vertical : "left|right|float".indexOf('' + position) !== -1);
},
/**
* Update the legend component to match the current viewport orientation.
*/
orient: function () {
var me = this,
sheet = me.getSheet(),
position = me.getDockedPosition(),
orientation = Ext.Viewport.getOrientation();
me.wire();
me.getView().orient();
if (me.getPopup() && me.lastOrientation !== orientation) {
if (sheet) {
sheet.hide();
sheet.setSize(null, null);
if (orientation == 'landscape') {
sheet.setConfig({
stretchX: true,
stretchY: false,
top: 0,
bottom: 0,
left: null,
right: 0,
width: 200,
height: null,
enter: 'right',
exit: 'right',
zIndex: 90
});
} else {
sheet.setConfig({
stretchX: true,
stretchY: false,
top: null,
bottom: 0,
left: 0,
right: 0,
width: null,
height: 200,
enter: 'bottom',
exit: 'bottom',
zIndex: 90
});
}
// sheet.orient();
}
me.lastOrientation = orientation;
}
me.getView().refreshStore();
},
/**
* @private Update the position of the legend if it is displayed and not docked.
*/
updateLocation: function () {
if (!this.getPopup()) {
var me = this,
chart = me.getChart(),
chartBBox = chart.chartBBox,
insets = chart.getInsetPadding(),
isObject = Ext.isObject(insets),
insetLeft = (isObject ? insets.left : insets) || 0,
insetRight = (isObject ? insets.right : insets) || 0,
insetBottom = (isObject ? insets.bottom : insets) || 0,
insetTop = (isObject ? insets.top : insets) || 0,
chartWidth = chart.element.getWidth(),
chartHeight = chart.element.getHeight(),
seriesWidth = chartBBox.width - (insetLeft + insetRight),
seriesHeight = chartBBox.height - (insetTop + insetBottom),
chartX = chartBBox.x + insetLeft,
chartY = chartBBox.y + insetTop,
isVertical = me.isVertical(),
view = me.getView(),
math = Math,
mfloor = math.floor,
mmin = math.min,
mmax = math.max,
x, y, legendWidth, legendHeight, maxWidth, maxHeight, position, undef;
me.orient();
if (me.isDisplayed()) {
// Calculate the natural size
view.show();
view.element.setSize(isVertical ? undef : null, isVertical ? null : undef); //clear fixed scroller length
legendWidth = view.element.getWidth();
legendHeight = view.element.getHeight();
position = me.getDockedPosition();
if (Ext.isObject(position)) {
// Object with x/y properties: use them directly
x = position.x;
y = position.y;
} else {
// Named positions - calculate x/y based on chart dimensions
switch (position) {
case "left":
x = insetLeft;
y = mfloor(chartY + seriesHeight / 2 - legendHeight / 2);
break;
case "right":
x = mfloor(chartWidth - legendWidth) - insetRight;
y = mfloor(chartY + seriesHeight / 2 - legendHeight / 2);
break;
case "top":
x = mfloor(chartX + seriesWidth / 2 - legendWidth / 2);
y = insetTop;
break;
default:
x = mfloor(chartX + seriesWidth / 2 - legendWidth / 2);
y = mfloor(chartHeight - legendHeight) - insetBottom;
}
x = mmax(x, insetLeft);
y = mmax(y, insetTop);
}
maxWidth = chartWidth - x - insetRight;
maxHeight = chartHeight - y - insetBottom;
view.setLeft(x);
view.setTop(y);
if (legendWidth > maxWidth || legendHeight > maxHeight) {
view.element.setSize(mmin(legendWidth, maxWidth), mmin(legendHeight, maxHeight));
}
} else {
view.hide();
}
}
},
/**
* Calculate and return the number of pixels that should be reserved for the legend along
* its edge. Only returns a non-zero value if the legend is positioned to one of the four
* named edges, and if it is not {@link #dock docked}.
*/
getInsetSize: function () {
var me = this,
pos = me.getDockedPosition(),
chartPadding = me.getChart().insets,
left = chartPadding.left,
bottom = chartPadding.bottom,
top = chartPadding.top,
right = chartPadding.right,
size = 0,
view;
if (!me.getPopup() && me.isDisplayed()) {
view = me.getView();
view.show();
if (pos === 'left' || pos === 'right') {
size = view.element.getWidth() + left;
}
else if (pos === 'top' || pos === 'bottom') {
size = view.element.getHeight() + top;
}
}
return size;
},
| /**
* Shows the legend if it is currently hidden.
*/
show: function () { | random_line_split |
|
Legend.js | : 'Legend',
items: [
{
xtype: 'spacer'
},
{
text: 'OK'
}
]
}
]
},
button: {
hide: true,
showAnimation: 'fade',
cls: Ext.baseCSSPrefix + 'legend-button',
iconCls: Ext.baseCSSPrefix + 'legend-button-icon',
iconMask: true
}
},
/**
* @event combine
* Fired when two legend items are combined together via drag-drop.
* @param {Ext.chart.Legend} legend
* @param {Ext.chart.series.Series} series The series owning the items being combined
* @param {Number} index1 The index of the first legend item
* @param {Number} index2 The index of the second legend item
*/
/**
* @event split
* Fired when a previously-combined legend item is split into its original constituent items.
* @param {Ext.chart.Legend} legend
* @param {Ext.chart.series.Series} series The series owning the item being split
* @param {Number} index The index of the legend item being split
*/
/**
* @constructor
* @param {Object} config
*/
constructor: function (config) {
var me = this;
me.initConfig(config);
me.callParent(arguments);
me.mixins.observable.constructor.apply(me, arguments);
},
wire: function () {
var me = this,
popup = me.getPopup(),
chart = me.getChart(),
toolbar = chart && chart.getToolbar(),
button = me.getButton(),
view = me.getView(),
orientation = Ext.Viewport.getOrientation(),
sheet,
anim = {
type: 'slide',
duration: 150,
direction: orientation === 'portrait' ? "down" : "left"
};
if (view) {
view.setLegend(this);
}
if (toolbar && button) {
toolbar.add(button);
}
if (popup) {
if ((sheet = me.getSheet())) {
if (button) {
button.show();
button.setHandler(function () {
sheet && sheet.show();
});
}
// TODO: Investigate why we have to set parent to null.
view.setParent(null);
view.setScrollable(true);
sheet.add(view);
sheet.setConfig({
enter: "bottom",
anim: anim,
enterAnimation: anim,
exitAnimation: anim
});
Ext.Viewport.add(sheet);
}
} else {
button.hide();
view.setConfig({
parent: null,
scrollable: false
});
view.setRenderTo(chart && chart.element);
}
},
updateDock: function (dock) {
this.wire();
},
applyButton: function (button, oldButton) {
return Ext.factory(button, "Ext.Button", oldButton);
},
updateButton: function (button, oldButton) {
if (oldButton) {
oldButton.destroy();
}
this.wire();
},
applySheet: function (sheet, oldSheet) {
return Ext.factory(sheet, "Ext.Sheet", oldSheet);
},
updateSheet: function (sheet, oldSheet) {
if (oldSheet) {
oldSheet.destroy();
}
sheet.on({
delegate: 'button',
tap: function () {
sheet.hide();
}
});
this.wire();
},
updateChart: function (chart, oldChart) {
var me = this,
button = me.getButton(),
chartEl = chart && chart.element, view = me.getView(),
sheet = me.getSheet(), sheetAnim;
me.wire();
if (oldChart) {
oldChart.un('serieschange', me.orient, me);
}
if (chart) {
chart.on('serieschange', me.orient, me);
}
},
applyView: function (view, currentView) {
return Ext.factory(view, "Ext.chart.legend.View", currentView);
},
updateView: function (view, currentView) {
if (currentView) {
currentView.setLegend(false);
}
this.wire();
},
/**
* @private Determine whether the legend should be displayed. Looks at the legend's 'visible' config,
* and also the 'showInLegend' config for each of the series.
* @return {Boolean}
*/
isDisplayed: function () {
return this.getVisible() && this.getChart().getSeries().findIndex('showInLegend', true) !== -1;
},
/**
* Returns whether the legend is configured with orientation-specific positions.
* @return {Boolean}
*/
isOrientationSpecific: function () {
var position = this.getPosition();
return (Ext.isObject(position) && 'portrait' in position);
},
/**
* Get the target position of the legend, after resolving any orientation-specific configs.
* In most cases this method should be used rather than reading the `position` property directly.
* @return {String/Object} The position config value
*/
getDockedPosition: function () {
var me = this,
position = me.getPosition();
// Grab orientation-specific config if specified
if (me.isOrientationSpecific()) {
position = position[Ext.Viewport.getOrientation()];
}
// If legend is docked, default non-String values to 'bottom'
if (me.getPopup() && !Ext.isString(position)) {
position = 'bottom';
}
return position;
},
/**
* Returns whether the orientation of the legend items is vertical.
* @return {Boolean} `true` if the legend items are to be arranged stacked vertically, `false` if they
* are to be arranged side-by-side.
*/
isVertical: function () {
var position = this.getDockedPosition();
return this.getPopup() || (Ext.isObject(position) ? position.vertical : "left|right|float".indexOf('' + position) !== -1);
},
/**
* Update the legend component to match the current viewport orientation.
*/
orient: function () {
var me = this,
sheet = me.getSheet(),
position = me.getDockedPosition(),
orientation = Ext.Viewport.getOrientation();
me.wire();
me.getView().orient();
if (me.getPopup() && me.lastOrientation !== orientation) {
if (sheet) {
sheet.hide();
sheet.setSize(null, null);
if (orientation == 'landscape') {
sheet.setConfig({
stretchX: true,
stretchY: false,
top: 0,
bottom: 0,
left: null,
right: 0,
width: 200,
height: null,
enter: 'right',
exit: 'right',
zIndex: 90
});
} else {
sheet.setConfig({
stretchX: true,
stretchY: false,
top: null,
bottom: 0,
left: 0,
right: 0,
width: null,
height: 200,
enter: 'bottom',
exit: 'bottom',
zIndex: 90
});
}
// sheet.orient();
}
me.lastOrientation = orientation;
}
me.getView().refreshStore();
},
/**
* @private Update the position of the legend if it is displayed and not docked.
*/
updateLocation: function () {
if (!this.getPopup()) {
var me = this,
chart = me.getChart(),
chartBBox = chart.chartBBox,
insets = chart.getInsetPadding(),
isObject = Ext.isObject(insets),
insetLeft = (isObject ? insets.left : insets) || 0,
insetRight = (isObject ? insets.right : insets) || 0,
insetBottom = (isObject ? insets.bottom : insets) || 0,
insetTop = (isObject ? insets.top : insets) || 0,
chartWidth = chart.element.getWidth(),
chartHeight = chart.element.getHeight(),
seriesWidth = chartBBox.width - (insetLeft + insetRight),
seriesHeight = chartBBox.height - (insetTop + insetBottom),
chartX = chartBBox.x + insetLeft,
chartY = chartBBox.y + insetTop,
isVertical = me.isVertical(),
view = me.getView(),
math = Math,
mfloor = math.floor,
mmin = math.min,
mmax = math.max,
x, y, legendWidth, legendHeight, maxWidth, maxHeight, position, undef;
me.orient();
if (me.isDisplayed()) | {
// Calculate the natural size
view.show();
view.element.setSize(isVertical ? undef : null, isVertical ? null : undef); //clear fixed scroller length
legendWidth = view.element.getWidth();
legendHeight = view.element.getHeight();
position = me.getDockedPosition();
if (Ext.isObject(position)) {
// Object with x/y properties: use them directly
x = position.x;
y = position.y;
} else {
// Named positions - calculate x/y based on chart dimensions
switch (position) {
case "left":
x = insetLeft;
y = mfloor(chartY + seriesHeight / 2 - legendHeight / 2);
break;
case "right": | conditional_block |
|
deflate.rs | / 2));
println!("{}", 2u32.pow(code as u32 / 2 - 1));
println!("{}", 2u32.pow(code as u32 / 2) + 2u32.pow(code as u32 / 2 - 1) + 1);
2u32.pow(code as u32 / 2) + 2u32.pow(code as u32 / 2 - 1) + 1
},
_ => panic!("Logic error handling base distance"),
};
let num_distance_extra_bits = match code {
0 ... 3 => 0,
4 ... 29 => (code / 2) - 1,
_ => panic!("Distance is undefined for: {}", code),
};
let distance_offset = input_stream.next_bits(num_distance_extra_bits) as u32;
println!("Code: {} Base Distance: {} Offset: {} Bits: {}", code, base_distance, distance_offset, num_distance_extra_bits);
let distance = base_distance + distance_offset;
distance as usize
}
fn read_repeat_length(value: usize, input_stream: &mut BitStream) -> usize {
let num_length_extra_bits = match value {
257 ... 264 => 0,
265 ... 284 => (value - 265) / 4 + 1,
285 => 0,
_ => panic!("Unsupported value for length: {}", value),
};
let length_offset = input_stream.next_bits(num_length_extra_bits) as usize;
let base_length = match value {
257 ... 264 => value - 254,
265 ... 268 => 11 + 2 * (value - 265),
269 ... 272 => 19 + 4 * (value - 269),
273 ... 276 => 35 + 8 * (value - 273),
277 ... 280 => 67 + 16 * (value - 277),
281 ... 284 => 131 + 32 * (value - 281),
285 => 258,
_ => panic!("Unsupported value for length: {}", value),
};
println!("Base Length: {} Offset: {} Bits: {}", base_length, length_offset, num_length_extra_bits);
let length = base_length + length_offset;
return length
}
fn read_code_length_huffman(length: usize, input_stream: &mut BitStream) -> Huffman<usize> {
// Read the hlit + 4 code lengths (3 bits each)
let alphabet = vec![16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15];
let mut bit_lengths = vec![0; alphabet.len()];
assert_eq!(alphabet.len(), 19);
for index in 0..length {
let length = input_stream.next_bits(3) as usize;
bit_lengths[index] = length
}
println!("Alphabet : {:?}", alphabet);
println!("Bit Lengths : {:?}", bit_lengths);
let h = Huffman::new(&alphabet, &bit_lengths);
println!("Code Length Huffman = {:?}", h);
h
}
fn read_literal_length_huffman(length: usize, input_stream: &mut BitStream, code_length_huffman: &Huffman<usize>) -> Huffman<usize> {
// Includes 0 and 285, but not 286
let alphabet = (0..length).collect();
let lengths = DynamicBlock::read_code_lengths(length, input_stream, code_length_huffman);
let result = Huffman::new(&alphabet, &lengths);
println!("Literal/Length Huffman = {:?}", result);
result
}
fn read_distance_huffman(length: usize, input_stream: &mut BitStream, code_length_huffman: &Huffman<usize>) -> Huffman<usize> {
let alphabet = (0..length).collect();
let lengths = DynamicBlock::read_code_lengths(length, input_stream, code_length_huffman);
let result = Huffman::new(&alphabet, &lengths);
println!("Distance Huffman = {:?}", result);
result
}
fn read_code_lengths(count: usize, input_stream: &mut BitStream, code_length_huffman: &Huffman<usize>) -> Vec<usize> {
let mut lengths = Vec::new();
while lengths.len() < count {
let length = DynamicBlock::get_next_huffman_encoded_value(code_length_huffman, input_stream);
println!("Found this length: {}", length);
// Literal value
if length <= 15 {
lengths.push(length);
continue
}
// Otherwise, it's a repeater of a previous value or zero
let (repeat_value, count) = match length {
16 => {
let value = (*lengths.last().expect("Cannot repeat at start of stream")).clone();
let count = input_stream.next_bits(2) + 3;
(value, count)
},
17 => (0, input_stream.next_bits(3) + 3),
18 => (0, input_stream.next_bits(7) + 11),
_ => panic!("Unsupported code length {}", length)
};
for _ in 0..count {
lengths.push(repeat_value)
}
}
// By the end, we should NOT have more or less than we want
// The encoding should generate exactly `count` entries into
// the list of code lengths
assert_eq!(lengths.len(), count);
println!("Lengths by alphabet: {:?}", lengths);
lengths
}
fn get_next_huffman_encoded_value<T : Copy + Eq>(huffman: &Huffman<T>, input_stream: &mut BitStream) -> T {
match huffman {
Huffman::Branch{zero, one} => {
if input_stream.next() {
DynamicBlock::get_next_huffman_encoded_value(one, input_stream)
} else {
DynamicBlock::get_next_huffman_encoded_value(zero, input_stream)
}
},
Huffman::Leaf(value) => *value,
Huffman::DeadEnd => panic!("Reached dead end!"),
}
}
fn next(&mut self, bit_stream: &mut BitStream) -> DEFLATEResult {
if self.repeats_remaining > 0 {
self.repeats_remaining -= 1;
return DEFLATEResult::Repeat(self.last_repeat_distance)
}
let value = DynamicBlock::get_next_huffman_encoded_value(&self.literal_length_huffman, bit_stream);
println!("Found value: {}", value);
if value < 256 {
// This is a literal byte to emit to the output stream
// We know it's a byte because of the check above and
// it's defined that way by the standard
DEFLATEResult::Literal(value as u8)
} else if value == 256 {
println!("End of block encountered");
DEFLATEResult::EndOfBlock
} else if value <= 285 {
// The value is between [257, 285] inclusive on both ends
// This means it's a back reference so we have to copy
// from the buffer of written bytes some distance away
// and for some amount of repetition
let repeat_length = DynamicBlock::read_repeat_length(value, bit_stream);
let distance = DynamicBlock::read_repeat_distance(&self.distance_huffman, bit_stream);
self.last_repeat_distance = distance;
self.repeats_remaining = repeat_length - 1;
DEFLATEResult::Repeat(distance)
} else {
panic!("Unsupported value: {}", value);
}
}
}
enum DEFLATEResult {
Literal(u8),
EndOfBlock,
Repeat(usize),
}
/// Keeps track of the state of the deflater. The state is necessary because
/// although we emit one byte at a time, we can generate multiple bytes at a
/// time with the repeat function.
pub struct DEFLATEReader<'a> {
// The following two fields are used to manage input/output
buffered_writer: BufferedWriter,
bit_stream: BitStream<'a>,
// The following two fields control if we read another block
has_seen_final_block: bool,
current_block: Option<DynamicBlock>,
}
const BTYPE_DYNAMIC : u8 = 0b10;
impl <'a> DEFLATEReader<'a> {
pub fn new<T : Source<u8> + 'a>(input: &'a mut T) -> DEFLATEReader<'a> | {
DEFLATEReader{
buffered_writer: BufferedWriter::new(),
bit_stream: BitStream::new(input),
has_seen_final_block: false,
current_block: None,
}
} | identifier_body |
|
deflate.rs | // A code ends up mapping to some base distance plus some
// extra bits to read to add to that base distance
let code = DynamicBlock::get_next_huffman_encoded_value(&distance_huffman, input_stream);
println!("Modulo: {}", code % 2);
let base_distance = match code {
0 ... 3 => {
code as u32 + 1
},
_ if code % 2 == 0 => {
println!("Even code");
2u32.pow(code as u32 / 2) + 1
},
_ if code % 2 == 1 => {
println!("Odd code");
println!("{}", 2u32.pow(code as u32 / 2));
println!("{}", 2u32.pow(code as u32 / 2 - 1));
println!("{}", 2u32.pow(code as u32 / 2) + 2u32.pow(code as u32 / 2 - 1) + 1);
2u32.pow(code as u32 / 2) + 2u32.pow(code as u32 / 2 - 1) + 1
},
_ => panic!("Logic error handling base distance"),
};
let num_distance_extra_bits = match code {
0 ... 3 => 0,
4 ... 29 => (code / 2) - 1,
_ => panic!("Distance is undefined for: {}", code),
};
let distance_offset = input_stream.next_bits(num_distance_extra_bits) as u32;
println!("Code: {} Base Distance: {} Offset: {} Bits: {}", code, base_distance, distance_offset, num_distance_extra_bits);
let distance = base_distance + distance_offset;
distance as usize
}
fn read_repeat_length(value: usize, input_stream: &mut BitStream) -> usize {
let num_length_extra_bits = match value {
257 ... 264 => 0,
265 ... 284 => (value - 265) / 4 + 1,
285 => 0,
_ => panic!("Unsupported value for length: {}", value),
};
let length_offset = input_stream.next_bits(num_length_extra_bits) as usize;
let base_length = match value {
257 ... 264 => value - 254,
265 ... 268 => 11 + 2 * (value - 265),
269 ... 272 => 19 + 4 * (value - 269),
273 ... 276 => 35 + 8 * (value - 273),
277 ... 280 => 67 + 16 * (value - 277),
281 ... 284 => 131 + 32 * (value - 281),
285 => 258,
_ => panic!("Unsupported value for length: {}", value),
};
println!("Base Length: {} Offset: {} Bits: {}", base_length, length_offset, num_length_extra_bits);
let length = base_length + length_offset;
return length
}
fn read_code_length_huffman(length: usize, input_stream: &mut BitStream) -> Huffman<usize> {
// Read the hlit + 4 code lengths (3 bits each)
let alphabet = vec![16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15];
let mut bit_lengths = vec![0; alphabet.len()];
assert_eq!(alphabet.len(), 19);
for index in 0..length {
let length = input_stream.next_bits(3) as usize;
bit_lengths[index] = length
}
println!("Alphabet : {:?}", alphabet);
println!("Bit Lengths : {:?}", bit_lengths);
let h = Huffman::new(&alphabet, &bit_lengths);
println!("Code Length Huffman = {:?}", h);
h
}
fn read_literal_length_huffman(length: usize, input_stream: &mut BitStream, code_length_huffman: &Huffman<usize>) -> Huffman<usize> {
// Includes 0 and 285, but not 286
let alphabet = (0..length).collect();
let lengths = DynamicBlock::read_code_lengths(length, input_stream, code_length_huffman);
let result = Huffman::new(&alphabet, &lengths);
println!("Literal/Length Huffman = {:?}", result);
result
}
fn read_distance_huffman(length: usize, input_stream: &mut BitStream, code_length_huffman: &Huffman<usize>) -> Huffman<usize> {
let alphabet = (0..length).collect();
let lengths = DynamicBlock::read_code_lengths(length, input_stream, code_length_huffman);
let result = Huffman::new(&alphabet, &lengths);
println!("Distance Huffman = {:?}", result);
result
}
fn read_code_lengths(count: usize, input_stream: &mut BitStream, code_length_huffman: &Huffman<usize>) -> Vec<usize> {
let mut lengths = Vec::new();
while lengths.len() < count {
let length = DynamicBlock::get_next_huffman_encoded_value(code_length_huffman, input_stream);
println!("Found this length: {}", length);
// Literal value
if length <= 15 {
lengths.push(length);
continue
}
// Otherwise, it's a repeater of a previous value or zero
let (repeat_value, count) = match length {
16 => {
let value = (*lengths.last().expect("Cannot repeat at start of stream")).clone();
let count = input_stream.next_bits(2) + 3;
(value, count)
},
17 => (0, input_stream.next_bits(3) + 3),
18 => (0, input_stream.next_bits(7) + 11),
_ => panic!("Unsupported code length {}", length)
};
for _ in 0..count {
lengths.push(repeat_value)
}
}
// By the end, we should NOT have more or less than we want
// The encoding should generate exactly `count` entries into
// the list of code lengths
assert_eq!(lengths.len(), count);
println!("Lengths by alphabet: {:?}", lengths);
lengths
}
fn get_next_huffman_encoded_value<T : Copy + Eq>(huffman: &Huffman<T>, input_stream: &mut BitStream) -> T {
match huffman {
Huffman::Branch{zero, one} => {
if input_stream.next() {
DynamicBlock::get_next_huffman_encoded_value(one, input_stream)
} else {
DynamicBlock::get_next_huffman_encoded_value(zero, input_stream)
}
},
Huffman::Leaf(value) => *value,
Huffman::DeadEnd => panic!("Reached dead end!"),
}
}
fn next(&mut self, bit_stream: &mut BitStream) -> DEFLATEResult {
if self.repeats_remaining > 0 {
self.repeats_remaining -= 1;
return DEFLATEResult::Repeat(self.last_repeat_distance)
}
let value = DynamicBlock::get_next_huffman_encoded_value(&self.literal_length_huffman, bit_stream);
println!("Found value: {}", value);
if value < 256 {
// This is a literal byte to emit to the output stream
// We know it's a byte because of the check above and
// it's defined that way by the standard
DEFLATEResult::Literal(value as u8)
} else if value == 256 {
println!("End of block encountered");
DEFLATEResult::EndOfBlock
} else if value <= 285 {
// The value is between [257, 285] inclusive on both ends
// This means it's a back reference so we have to copy
// from the buffer of written bytes some distance away
// and for some amount of repetition
let repeat_length = DynamicBlock::read_repeat_length(value, bit_stream);
let distance = DynamicBlock::read_repeat_distance(&self.distance_huffman, bit_stream);
self.last_repeat_distance = distance;
self.repeats_remaining = repeat_length - 1;
DEFLATEResult::Repeat(distance)
} else {
panic!("Unsupported value: {}", value);
}
}
}
enum DEFLATEResult {
Literal(u8),
EndOfBlock,
Repeat(usize),
}
/// Keeps track of the state of the deflater. The state is necessary because
/// although we emit one byte at a time, we can generate multiple bytes at a
/// time with the repeat function.
pub struct | DEFLATEReader | identifier_name |
|
deflate.rs | , &code_length_huffman);
let distance_huffman = DynamicBlock::read_distance_huffman(hdist, bit_stream, &code_length_huffman);
DynamicBlock{
repeats_remaining: 0,
last_repeat_distance: 0,
literal_length_huffman,
distance_huffman,
}
}
fn read_repeat_distance(distance_huffman: &Huffman<usize>, input_stream: &mut BitStream) -> usize {
// A code ends up mapping to some base distance plus some
// extra bits to read to add to that base distance
let code = DynamicBlock::get_next_huffman_encoded_value(&distance_huffman, input_stream);
println!("Modulo: {}", code % 2);
let base_distance = match code {
0 ... 3 => {
code as u32 + 1
},
_ if code % 2 == 0 => {
println!("Even code");
2u32.pow(code as u32 / 2) + 1
},
_ if code % 2 == 1 => {
println!("Odd code");
println!("{}", 2u32.pow(code as u32 / 2));
println!("{}", 2u32.pow(code as u32 / 2 - 1));
println!("{}", 2u32.pow(code as u32 / 2) + 2u32.pow(code as u32 / 2 - 1) + 1);
2u32.pow(code as u32 / 2) + 2u32.pow(code as u32 / 2 - 1) + 1
},
_ => panic!("Logic error handling base distance"),
};
let num_distance_extra_bits = match code {
0 ... 3 => 0,
4 ... 29 => (code / 2) - 1,
_ => panic!("Distance is undefined for: {}", code),
};
let distance_offset = input_stream.next_bits(num_distance_extra_bits) as u32;
println!("Code: {} Base Distance: {} Offset: {} Bits: {}", code, base_distance, distance_offset, num_distance_extra_bits);
let distance = base_distance + distance_offset;
distance as usize
}
fn read_repeat_length(value: usize, input_stream: &mut BitStream) -> usize {
let num_length_extra_bits = match value {
257 ... 264 => 0,
265 ... 284 => (value - 265) / 4 + 1,
285 => 0,
_ => panic!("Unsupported value for length: {}", value),
};
let length_offset = input_stream.next_bits(num_length_extra_bits) as usize;
let base_length = match value {
257 ... 264 => value - 254,
265 ... 268 => 11 + 2 * (value - 265),
269 ... 272 => 19 + 4 * (value - 269),
273 ... 276 => 35 + 8 * (value - 273),
277 ... 280 => 67 + 16 * (value - 277),
281 ... 284 => 131 + 32 * (value - 281),
285 => 258,
_ => panic!("Unsupported value for length: {}", value),
};
println!("Base Length: {} Offset: {} Bits: {}", base_length, length_offset, num_length_extra_bits);
let length = base_length + length_offset;
return length
}
fn read_code_length_huffman(length: usize, input_stream: &mut BitStream) -> Huffman<usize> {
// Read the hlit + 4 code lengths (3 bits each)
let alphabet = vec![16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15];
let mut bit_lengths = vec![0; alphabet.len()];
assert_eq!(alphabet.len(), 19);
for index in 0..length {
let length = input_stream.next_bits(3) as usize;
bit_lengths[index] = length
}
println!("Alphabet : {:?}", alphabet);
println!("Bit Lengths : {:?}", bit_lengths);
let h = Huffman::new(&alphabet, &bit_lengths);
println!("Code Length Huffman = {:?}", h);
h
}
fn read_literal_length_huffman(length: usize, input_stream: &mut BitStream, code_length_huffman: &Huffman<usize>) -> Huffman<usize> {
// Includes 0 and 285, but not 286
let alphabet = (0..length).collect();
let lengths = DynamicBlock::read_code_lengths(length, input_stream, code_length_huffman); | println!("Literal/Length Huffman = {:?}", result);
result
}
fn read_distance_huffman(length: usize, input_stream: &mut BitStream, code_length_huffman: &Huffman<usize>) -> Huffman<usize> {
let alphabet = (0..length).collect();
let lengths = DynamicBlock::read_code_lengths(length, input_stream, code_length_huffman);
let result = Huffman::new(&alphabet, &lengths);
println!("Distance Huffman = {:?}", result);
result
}
fn read_code_lengths(count: usize, input_stream: &mut BitStream, code_length_huffman: &Huffman<usize>) -> Vec<usize> {
let mut lengths = Vec::new();
while lengths.len() < count {
let length = DynamicBlock::get_next_huffman_encoded_value(code_length_huffman, input_stream);
println!("Found this length: {}", length);
// Literal value
if length <= 15 {
lengths.push(length);
continue
}
// Otherwise, it's a repeater of a previous value or zero
let (repeat_value, count) = match length {
16 => {
let value = (*lengths.last().expect("Cannot repeat at start of stream")).clone();
let count = input_stream.next_bits(2) + 3;
(value, count)
},
17 => (0, input_stream.next_bits(3) + 3),
18 => (0, input_stream.next_bits(7) + 11),
_ => panic!("Unsupported code length {}", length)
};
for _ in 0..count {
lengths.push(repeat_value)
}
}
// By the end, we should NOT have more or less than we want
// The encoding should generate exactly `count` entries into
// the list of code lengths
assert_eq!(lengths.len(), count);
println!("Lengths by alphabet: {:?}", lengths);
lengths
}
fn get_next_huffman_encoded_value<T : Copy + Eq>(huffman: &Huffman<T>, input_stream: &mut BitStream) -> T {
match huffman {
Huffman::Branch{zero, one} => {
if input_stream.next() {
DynamicBlock::get_next_huffman_encoded_value(one, input_stream)
} else {
DynamicBlock::get_next_huffman_encoded_value(zero, input_stream)
}
},
Huffman::Leaf(value) => *value,
Huffman::DeadEnd => panic!("Reached dead end!"),
}
}
fn next(&mut self, bit_stream: &mut BitStream) -> DEFLATEResult {
if self.repeats_remaining > 0 {
self.repeats_remaining -= 1;
return DEFLATEResult::Repeat(self.last_repeat_distance)
}
let value = DynamicBlock::get_next_huffman_encoded_value(&self.literal_length_huffman, bit_stream);
println!("Found value: {}", value);
if value < 256 {
// This is a literal byte to emit to the output stream
// We know it's a byte because of the check above and
// it's defined that way by the standard
DEFLATEResult::Literal(value as u8)
} else if value == 256 {
println!("End of block encountered");
DEFLATEResult::EndOfBlock
} else if value <= 285 {
// The value is between [257, 285] inclusive on both ends
// This means it's a back reference so we have to copy
// from the buffer of written bytes some distance away
// and for some amount of repetition
let repeat_length = DynamicBlock::read_repeat_length(value, bit_stream);
let distance = DynamicBlock::read_repeat_distance(&self.distance_huffman, bit_stream);
self.last_repeat_distance = distance;
self.repeats_remaining = repeat_length - 1;
DEFLATEResult::Repeat | let result = Huffman::new(&alphabet, &lengths); | random_line_split |
deflate.rs | &code_length_huffman);
let distance_huffman = DynamicBlock::read_distance_huffman(hdist, bit_stream, &code_length_huffman);
DynamicBlock{
repeats_remaining: 0,
last_repeat_distance: 0,
literal_length_huffman,
distance_huffman,
}
}
fn read_repeat_distance(distance_huffman: &Huffman<usize>, input_stream: &mut BitStream) -> usize {
// A code ends up mapping to some base distance plus some
// extra bits to read to add to that base distance
let code = DynamicBlock::get_next_huffman_encoded_value(&distance_huffman, input_stream);
println!("Modulo: {}", code % 2);
let base_distance = match code {
0 ... 3 => {
code as u32 + 1
},
_ if code % 2 == 0 => {
println!("Even code");
2u32.pow(code as u32 / 2) + 1
},
_ if code % 2 == 1 => {
println!("Odd code");
println!("{}", 2u32.pow(code as u32 / 2));
println!("{}", 2u32.pow(code as u32 / 2 - 1));
println!("{}", 2u32.pow(code as u32 / 2) + 2u32.pow(code as u32 / 2 - 1) + 1);
2u32.pow(code as u32 / 2) + 2u32.pow(code as u32 / 2 - 1) + 1
},
_ => panic!("Logic error handling base distance"),
};
let num_distance_extra_bits = match code {
0 ... 3 => 0,
4 ... 29 => (code / 2) - 1,
_ => panic!("Distance is undefined for: {}", code),
};
let distance_offset = input_stream.next_bits(num_distance_extra_bits) as u32;
println!("Code: {} Base Distance: {} Offset: {} Bits: {}", code, base_distance, distance_offset, num_distance_extra_bits);
let distance = base_distance + distance_offset;
distance as usize
}
fn read_repeat_length(value: usize, input_stream: &mut BitStream) -> usize {
let num_length_extra_bits = match value {
257 ... 264 => 0,
265 ... 284 => (value - 265) / 4 + 1,
285 => 0,
_ => panic!("Unsupported value for length: {}", value),
};
let length_offset = input_stream.next_bits(num_length_extra_bits) as usize;
let base_length = match value {
257 ... 264 => value - 254,
265 ... 268 => 11 + 2 * (value - 265),
269 ... 272 => 19 + 4 * (value - 269),
273 ... 276 => 35 + 8 * (value - 273),
277 ... 280 => 67 + 16 * (value - 277),
281 ... 284 => 131 + 32 * (value - 281),
285 => 258,
_ => panic!("Unsupported value for length: {}", value),
};
println!("Base Length: {} Offset: {} Bits: {}", base_length, length_offset, num_length_extra_bits);
let length = base_length + length_offset;
return length
}
fn read_code_length_huffman(length: usize, input_stream: &mut BitStream) -> Huffman<usize> {
// Read the hlit + 4 code lengths (3 bits each)
let alphabet = vec![16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15];
let mut bit_lengths = vec![0; alphabet.len()];
assert_eq!(alphabet.len(), 19);
for index in 0..length {
let length = input_stream.next_bits(3) as usize;
bit_lengths[index] = length
}
println!("Alphabet : {:?}", alphabet);
println!("Bit Lengths : {:?}", bit_lengths);
let h = Huffman::new(&alphabet, &bit_lengths);
println!("Code Length Huffman = {:?}", h);
h
}
fn read_literal_length_huffman(length: usize, input_stream: &mut BitStream, code_length_huffman: &Huffman<usize>) -> Huffman<usize> {
// Includes 0 and 285, but not 286
let alphabet = (0..length).collect();
let lengths = DynamicBlock::read_code_lengths(length, input_stream, code_length_huffman);
let result = Huffman::new(&alphabet, &lengths);
println!("Literal/Length Huffman = {:?}", result);
result
}
fn read_distance_huffman(length: usize, input_stream: &mut BitStream, code_length_huffman: &Huffman<usize>) -> Huffman<usize> {
let alphabet = (0..length).collect();
let lengths = DynamicBlock::read_code_lengths(length, input_stream, code_length_huffman);
let result = Huffman::new(&alphabet, &lengths);
println!("Distance Huffman = {:?}", result);
result
}
fn read_code_lengths(count: usize, input_stream: &mut BitStream, code_length_huffman: &Huffman<usize>) -> Vec<usize> {
let mut lengths = Vec::new();
while lengths.len() < count {
let length = DynamicBlock::get_next_huffman_encoded_value(code_length_huffman, input_stream);
println!("Found this length: {}", length);
// Literal value
if length <= 15 {
lengths.push(length);
continue
}
// Otherwise, it's a repeater of a previous value or zero
let (repeat_value, count) = match length {
16 => {
let value = (*lengths.last().expect("Cannot repeat at start of stream")).clone();
let count = input_stream.next_bits(2) + 3;
(value, count)
},
17 => (0, input_stream.next_bits(3) + 3),
18 => (0, input_stream.next_bits(7) + 11),
_ => panic!("Unsupported code length {}", length)
};
for _ in 0..count {
lengths.push(repeat_value)
}
}
// By the end, we should NOT have more or less than we want
// The encoding should generate exactly `count` entries into
// the list of code lengths
assert_eq!(lengths.len(), count);
println!("Lengths by alphabet: {:?}", lengths);
lengths
}
fn get_next_huffman_encoded_value<T : Copy + Eq>(huffman: &Huffman<T>, input_stream: &mut BitStream) -> T {
match huffman {
Huffman::Branch{zero, one} => {
if input_stream.next() {
DynamicBlock::get_next_huffman_encoded_value(one, input_stream)
} else |
},
Huffman::Leaf(value) => *value,
Huffman::DeadEnd => panic!("Reached dead end!"),
}
}
fn next(&mut self, bit_stream: &mut BitStream) -> DEFLATEResult {
if self.repeats_remaining > 0 {
self.repeats_remaining -= 1;
return DEFLATEResult::Repeat(self.last_repeat_distance)
}
let value = DynamicBlock::get_next_huffman_encoded_value(&self.literal_length_huffman, bit_stream);
println!("Found value: {}", value);
if value < 256 {
// This is a literal byte to emit to the output stream
// We know it's a byte because of the check above and
// it's defined that way by the standard
DEFLATEResult::Literal(value as u8)
} else if value == 256 {
println!("End of block encountered");
DEFLATEResult::EndOfBlock
} else if value <= 285 {
// The value is between [257, 285] inclusive on both ends
// This means it's a back reference so we have to copy
// from the buffer of written bytes some distance away
// and for some amount of repetition
let repeat_length = DynamicBlock::read_repeat_length(value, bit_stream);
let distance = DynamicBlock::read_repeat_distance(&self.distance_huffman, bit_stream);
self.last_repeat_distance = distance;
self.repeats_remaining = repeat_length - 1;
DEFLATEResult:: | {
DynamicBlock::get_next_huffman_encoded_value(zero, input_stream)
} | conditional_block |
pykernel.py | the PUB objects with the message about to be executed.
* Implement random port and security key logic.
* Implement control messages.
* Implement event loop and poll version.
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports.
import __builtin__
from code import CommandCompiler
import sys
import time
import traceback
# System library imports.
import zmq
# Local imports.
from IPython.utils.traitlets import HasTraits, Instance
from completer import KernelCompleter
from entry_point import base_launch_kernel, make_default_main
from session import Session, Message
#-----------------------------------------------------------------------------
# Main kernel class
#-----------------------------------------------------------------------------
class Kernel(HasTraits):
# Private interface
# This is a dict of port number that the kernel is listening on. It is set
# by record_ports and used by connect_request.
_recorded_ports = None
#---------------------------------------------------------------------------
# Kernel interface
#---------------------------------------------------------------------------
session = Instance(Session)
reply_socket = Instance('zmq.Socket')
pub_socket = Instance('zmq.Socket')
req_socket = Instance('zmq.Socket')
def __init__(self, **kwargs):
super(Kernel, self).__init__(**kwargs)
self.user_ns = {}
self.history = []
self.compiler = CommandCompiler()
self.completer = KernelCompleter(self.user_ns)
# Build dict of handlers for message types
msg_types = [ 'execute_request', 'complete_request',
'object_info_request', 'shutdown_request' ]
self.handlers = {}
for msg_type in msg_types:
self.handlers[msg_type] = getattr(self, msg_type)
def start(self):
""" Start the kernel main loop.
"""
while True:
ident = self.reply_socket.recv()
assert self.reply_socket.rcvmore(), "Missing message part."
msg = self.reply_socket.recv_json()
omsg = Message(msg)
print>>sys.__stdout__
print>>sys.__stdout__, omsg
handler = self.handlers.get(omsg.msg_type, None)
if handler is None:
print >> sys.__stderr__, "UNKNOWN MESSAGE TYPE:", omsg
else:
handler(ident, omsg)
def record_ports(self, xrep_port, pub_port, req_port, hb_port):
"""Record the ports that this kernel is using.
The creator of the Kernel instance must call this methods if they
want the :meth:`connect_request` method to return the port numbers.
"""
self._recorded_ports = {
'xrep_port' : xrep_port,
'pub_port' : pub_port,
'req_port' : req_port,
'hb_port' : hb_port
}
#---------------------------------------------------------------------------
# Kernel request handlers
#---------------------------------------------------------------------------
def | (self, ident, parent):
try:
code = parent[u'content'][u'code']
except:
print>>sys.__stderr__, "Got bad msg: "
print>>sys.__stderr__, Message(parent)
return
pyin_msg = self.session.msg(u'pyin',{u'code':code}, parent=parent)
self.pub_socket.send_json(pyin_msg)
try:
comp_code = self.compiler(code, '<zmq-kernel>')
# Replace raw_input. Note that is not sufficient to replace
# raw_input in the user namespace.
raw_input = lambda prompt='': self._raw_input(prompt, ident, parent)
__builtin__.raw_input = raw_input
# Set the parent message of the display hook and out streams.
sys.displayhook.set_parent(parent)
sys.stdout.set_parent(parent)
sys.stderr.set_parent(parent)
exec comp_code in self.user_ns, self.user_ns
except:
etype, evalue, tb = sys.exc_info()
tb = traceback.format_exception(etype, evalue, tb)
exc_content = {
u'status' : u'error',
u'traceback' : tb,
u'ename' : unicode(etype.__name__),
u'evalue' : unicode(evalue)
}
exc_msg = self.session.msg(u'pyerr', exc_content, parent)
self.pub_socket.send_json(exc_msg)
reply_content = exc_content
else:
reply_content = { 'status' : 'ok', 'payload' : {} }
# Flush output before sending the reply.
sys.stderr.flush()
sys.stdout.flush()
# Send the reply.
reply_msg = self.session.msg(u'execute_reply', reply_content, parent)
print>>sys.__stdout__, Message(reply_msg)
self.reply_socket.send(ident, zmq.SNDMORE)
self.reply_socket.send_json(reply_msg)
if reply_msg['content']['status'] == u'error':
self._abort_queue()
def complete_request(self, ident, parent):
matches = {'matches' : self._complete(parent),
'status' : 'ok'}
completion_msg = self.session.send(self.reply_socket, 'complete_reply',
matches, parent, ident)
print >> sys.__stdout__, completion_msg
def object_info_request(self, ident, parent):
context = parent['content']['oname'].split('.')
object_info = self._object_info(context)
msg = self.session.send(self.reply_socket, 'object_info_reply',
object_info, parent, ident)
print >> sys.__stdout__, msg
def shutdown_request(self, ident, parent):
content = dict(parent['content'])
msg = self.session.send(self.reply_socket, 'shutdown_reply',
content, parent, ident)
msg = self.session.send(self.pub_socket, 'shutdown_reply',
content, parent, ident)
print >> sys.__stdout__, msg
time.sleep(0.1)
sys.exit(0)
#---------------------------------------------------------------------------
# Protected interface
#---------------------------------------------------------------------------
def _abort_queue(self):
while True:
try:
ident = self.reply_socket.recv(zmq.NOBLOCK)
except zmq.ZMQError, e:
if e.errno == zmq.EAGAIN:
break
else:
assert self.reply_socket.rcvmore(), "Missing message part."
msg = self.reply_socket.recv_json()
print>>sys.__stdout__, "Aborting:"
print>>sys.__stdout__, Message(msg)
msg_type = msg['msg_type']
reply_type = msg_type.split('_')[0] + '_reply'
reply_msg = self.session.msg(reply_type, {'status':'aborted'}, msg)
print>>sys.__stdout__, Message(reply_msg)
self.reply_socket.send(ident,zmq.SNDMORE)
self.reply_socket.send_json(reply_msg)
# We need to wait a bit for requests to come in. This can probably
# be set shorter for true asynchronous clients.
time.sleep(0.1)
def _raw_input(self, prompt, ident, parent):
# Flush output before making the request.
sys.stderr.flush()
sys.stdout.flush()
# Send the input request.
content = dict(prompt=prompt)
msg = self.session.msg(u'input_request', content, parent)
self.req_socket.send_json(msg)
# Await a response.
reply = self.req_socket.recv_json()
try:
value = reply['content']['value']
except:
print>>sys.__stderr__, "Got bad raw_input reply: "
print>>sys.__stderr__, Message(parent)
value = ''
return value
def _complete(self, msg):
return self.completer.complete(msg.content.line, msg.content.text)
def _object_info(self, context):
symbol, leftover = self._symbol_from_context(context)
if symbol is not None and not leftover:
doc = getattr(symbol, '__doc__', '')
else:
doc = ''
object_info = dict(docstring = doc)
return object_info
def _symbol_from_context(self, context):
if not context:
return None, context
base_symbol_string = context[0]
symbol = self.user_ns.get(base_symbol_string, None)
if symbol is None:
symbol = __builtin__.__dict__.get(base_symbol_string, None)
if symbol is None:
return None, context
context = context[1:]
for i, name in enumerate(context):
new_symbol = getattr(symbol, name, None)
if new_symbol is None:
return symbol, context[i:]
else:
symbol = new_symbol
return symbol, []
#-----------------------------------------------------------------------------
# Kernel main and launch functions
#-----------------------------------------------------------------------------
def launch_kernel(ip=None, xrep_port=0, pub_port=0, req_port=0, hb_port=0,
independent=False):
""" Launches a localhost kernel, binding to the specified ports.
Parameters
----------
ip : str, optional
The ip address the kernel will bind to.
xrep_port : int, optional
The port to use for XREP channel.
pub_port : int, optional
The port to use for the SUB channel.
req_port : int, optional
The port to use for the REQ (raw input) channel.
hb_port : int, optional
The port to use for the hearbeat REP channel.
independent : bool, optional (default False)
If set, the kernel process is guaranteed to survive if this process
dies. If not set, an effort is made to ensure that the kernel is killed
when this process dies. Note that in this case it is still good practice
to kill kernels manually before exiting.
Returns
-------
A tuple of form:
(kernel_process, xrep_port, pub_port, | execute_request | identifier_name |
pykernel.py | the PUB objects with the message about to be executed.
* Implement random port and security key logic.
* Implement control messages.
* Implement event loop and poll version.
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports.
import __builtin__
from code import CommandCompiler
import sys
import time
import traceback
# System library imports.
import zmq
# Local imports.
from IPython.utils.traitlets import HasTraits, Instance
from completer import KernelCompleter
from entry_point import base_launch_kernel, make_default_main
from session import Session, Message
#-----------------------------------------------------------------------------
# Main kernel class
#-----------------------------------------------------------------------------
class Kernel(HasTraits):
# Private interface
# This is a dict of port number that the kernel is listening on. It is set
# by record_ports and used by connect_request.
_recorded_ports = None
#---------------------------------------------------------------------------
# Kernel interface
#---------------------------------------------------------------------------
session = Instance(Session)
reply_socket = Instance('zmq.Socket')
pub_socket = Instance('zmq.Socket')
req_socket = Instance('zmq.Socket')
def __init__(self, **kwargs):
super(Kernel, self).__init__(**kwargs)
self.user_ns = {}
self.history = []
self.compiler = CommandCompiler()
self.completer = KernelCompleter(self.user_ns)
# Build dict of handlers for message types
msg_types = [ 'execute_request', 'complete_request',
'object_info_request', 'shutdown_request' ]
self.handlers = {}
for msg_type in msg_types:
self.handlers[msg_type] = getattr(self, msg_type)
def start(self):
""" Start the kernel main loop.
"""
while True:
ident = self.reply_socket.recv()
assert self.reply_socket.rcvmore(), "Missing message part."
msg = self.reply_socket.recv_json()
omsg = Message(msg)
print>>sys.__stdout__ | handler(ident, omsg)
def record_ports(self, xrep_port, pub_port, req_port, hb_port):
"""Record the ports that this kernel is using.
The creator of the Kernel instance must call this methods if they
want the :meth:`connect_request` method to return the port numbers.
"""
self._recorded_ports = {
'xrep_port' : xrep_port,
'pub_port' : pub_port,
'req_port' : req_port,
'hb_port' : hb_port
}
#---------------------------------------------------------------------------
# Kernel request handlers
#---------------------------------------------------------------------------
def execute_request(self, ident, parent):
try:
code = parent[u'content'][u'code']
except:
print>>sys.__stderr__, "Got bad msg: "
print>>sys.__stderr__, Message(parent)
return
pyin_msg = self.session.msg(u'pyin',{u'code':code}, parent=parent)
self.pub_socket.send_json(pyin_msg)
try:
comp_code = self.compiler(code, '<zmq-kernel>')
# Replace raw_input. Note that is not sufficient to replace
# raw_input in the user namespace.
raw_input = lambda prompt='': self._raw_input(prompt, ident, parent)
__builtin__.raw_input = raw_input
# Set the parent message of the display hook and out streams.
sys.displayhook.set_parent(parent)
sys.stdout.set_parent(parent)
sys.stderr.set_parent(parent)
exec comp_code in self.user_ns, self.user_ns
except:
etype, evalue, tb = sys.exc_info()
tb = traceback.format_exception(etype, evalue, tb)
exc_content = {
u'status' : u'error',
u'traceback' : tb,
u'ename' : unicode(etype.__name__),
u'evalue' : unicode(evalue)
}
exc_msg = self.session.msg(u'pyerr', exc_content, parent)
self.pub_socket.send_json(exc_msg)
reply_content = exc_content
else:
reply_content = { 'status' : 'ok', 'payload' : {} }
# Flush output before sending the reply.
sys.stderr.flush()
sys.stdout.flush()
# Send the reply.
reply_msg = self.session.msg(u'execute_reply', reply_content, parent)
print>>sys.__stdout__, Message(reply_msg)
self.reply_socket.send(ident, zmq.SNDMORE)
self.reply_socket.send_json(reply_msg)
if reply_msg['content']['status'] == u'error':
self._abort_queue()
def complete_request(self, ident, parent):
matches = {'matches' : self._complete(parent),
'status' : 'ok'}
completion_msg = self.session.send(self.reply_socket, 'complete_reply',
matches, parent, ident)
print >> sys.__stdout__, completion_msg
def object_info_request(self, ident, parent):
context = parent['content']['oname'].split('.')
object_info = self._object_info(context)
msg = self.session.send(self.reply_socket, 'object_info_reply',
object_info, parent, ident)
print >> sys.__stdout__, msg
def shutdown_request(self, ident, parent):
content = dict(parent['content'])
msg = self.session.send(self.reply_socket, 'shutdown_reply',
content, parent, ident)
msg = self.session.send(self.pub_socket, 'shutdown_reply',
content, parent, ident)
print >> sys.__stdout__, msg
time.sleep(0.1)
sys.exit(0)
#---------------------------------------------------------------------------
# Protected interface
#---------------------------------------------------------------------------
def _abort_queue(self):
while True:
try:
ident = self.reply_socket.recv(zmq.NOBLOCK)
except zmq.ZMQError, e:
if e.errno == zmq.EAGAIN:
break
else:
assert self.reply_socket.rcvmore(), "Missing message part."
msg = self.reply_socket.recv_json()
print>>sys.__stdout__, "Aborting:"
print>>sys.__stdout__, Message(msg)
msg_type = msg['msg_type']
reply_type = msg_type.split('_')[0] + '_reply'
reply_msg = self.session.msg(reply_type, {'status':'aborted'}, msg)
print>>sys.__stdout__, Message(reply_msg)
self.reply_socket.send(ident,zmq.SNDMORE)
self.reply_socket.send_json(reply_msg)
# We need to wait a bit for requests to come in. This can probably
# be set shorter for true asynchronous clients.
time.sleep(0.1)
def _raw_input(self, prompt, ident, parent):
# Flush output before making the request.
sys.stderr.flush()
sys.stdout.flush()
# Send the input request.
content = dict(prompt=prompt)
msg = self.session.msg(u'input_request', content, parent)
self.req_socket.send_json(msg)
# Await a response.
reply = self.req_socket.recv_json()
try:
value = reply['content']['value']
except:
print>>sys.__stderr__, "Got bad raw_input reply: "
print>>sys.__stderr__, Message(parent)
value = ''
return value
def _complete(self, msg):
return self.completer.complete(msg.content.line, msg.content.text)
def _object_info(self, context):
symbol, leftover = self._symbol_from_context(context)
if symbol is not None and not leftover:
doc = getattr(symbol, '__doc__', '')
else:
doc = ''
object_info = dict(docstring = doc)
return object_info
def _symbol_from_context(self, context):
if not context:
return None, context
base_symbol_string = context[0]
symbol = self.user_ns.get(base_symbol_string, None)
if symbol is None:
symbol = __builtin__.__dict__.get(base_symbol_string, None)
if symbol is None:
return None, context
context = context[1:]
for i, name in enumerate(context):
new_symbol = getattr(symbol, name, None)
if new_symbol is None:
return symbol, context[i:]
else:
symbol = new_symbol
return symbol, []
#-----------------------------------------------------------------------------
# Kernel main and launch functions
#-----------------------------------------------------------------------------
def launch_kernel(ip=None, xrep_port=0, pub_port=0, req_port=0, hb_port=0,
independent=False):
""" Launches a localhost kernel, binding to the specified ports.
Parameters
----------
ip : str, optional
The ip address the kernel will bind to.
xrep_port : int, optional
The port to use for XREP channel.
pub_port : int, optional
The port to use for the SUB channel.
req_port : int, optional
The port to use for the REQ (raw input) channel.
hb_port : int, optional
The port to use for the hearbeat REP channel.
independent : bool, optional (default False)
If set, the kernel process is guaranteed to survive if this process
dies. If not set, an effort is made to ensure that the kernel is killed
when this process dies. Note that in this case it is still good practice
to kill kernels manually before exiting.
Returns
-------
A tuple of form:
(kernel_process, xrep_port, pub_port, req | print>>sys.__stdout__, omsg
handler = self.handlers.get(omsg.msg_type, None)
if handler is None:
print >> sys.__stderr__, "UNKNOWN MESSAGE TYPE:", omsg
else: | random_line_split |
pykernel.py | the PUB objects with the message about to be executed.
* Implement random port and security key logic.
* Implement control messages.
* Implement event loop and poll version.
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports.
import __builtin__
from code import CommandCompiler
import sys
import time
import traceback
# System library imports.
import zmq
# Local imports.
from IPython.utils.traitlets import HasTraits, Instance
from completer import KernelCompleter
from entry_point import base_launch_kernel, make_default_main
from session import Session, Message
#-----------------------------------------------------------------------------
# Main kernel class
#-----------------------------------------------------------------------------
class Kernel(HasTraits):
# Private interface
# This is a dict of port number that the kernel is listening on. It is set
# by record_ports and used by connect_request.
_recorded_ports = None
#---------------------------------------------------------------------------
# Kernel interface
#---------------------------------------------------------------------------
session = Instance(Session)
reply_socket = Instance('zmq.Socket')
pub_socket = Instance('zmq.Socket')
req_socket = Instance('zmq.Socket')
def __init__(self, **kwargs):
super(Kernel, self).__init__(**kwargs)
self.user_ns = {}
self.history = []
self.compiler = CommandCompiler()
self.completer = KernelCompleter(self.user_ns)
# Build dict of handlers for message types
msg_types = [ 'execute_request', 'complete_request',
'object_info_request', 'shutdown_request' ]
self.handlers = {}
for msg_type in msg_types:
self.handlers[msg_type] = getattr(self, msg_type)
def start(self):
""" Start the kernel main loop.
"""
while True:
ident = self.reply_socket.recv()
assert self.reply_socket.rcvmore(), "Missing message part."
msg = self.reply_socket.recv_json()
omsg = Message(msg)
print>>sys.__stdout__
print>>sys.__stdout__, omsg
handler = self.handlers.get(omsg.msg_type, None)
if handler is None:
print >> sys.__stderr__, "UNKNOWN MESSAGE TYPE:", omsg
else:
handler(ident, omsg)
def record_ports(self, xrep_port, pub_port, req_port, hb_port):
"""Record the ports that this kernel is using.
The creator of the Kernel instance must call this methods if they
want the :meth:`connect_request` method to return the port numbers.
"""
self._recorded_ports = {
'xrep_port' : xrep_port,
'pub_port' : pub_port,
'req_port' : req_port,
'hb_port' : hb_port
}
#---------------------------------------------------------------------------
# Kernel request handlers
#---------------------------------------------------------------------------
def execute_request(self, ident, parent):
try:
code = parent[u'content'][u'code']
except:
print>>sys.__stderr__, "Got bad msg: "
print>>sys.__stderr__, Message(parent)
return
pyin_msg = self.session.msg(u'pyin',{u'code':code}, parent=parent)
self.pub_socket.send_json(pyin_msg)
try:
comp_code = self.compiler(code, '<zmq-kernel>')
# Replace raw_input. Note that is not sufficient to replace
# raw_input in the user namespace.
raw_input = lambda prompt='': self._raw_input(prompt, ident, parent)
__builtin__.raw_input = raw_input
# Set the parent message of the display hook and out streams.
sys.displayhook.set_parent(parent)
sys.stdout.set_parent(parent)
sys.stderr.set_parent(parent)
exec comp_code in self.user_ns, self.user_ns
except:
etype, evalue, tb = sys.exc_info()
tb = traceback.format_exception(etype, evalue, tb)
exc_content = {
u'status' : u'error',
u'traceback' : tb,
u'ename' : unicode(etype.__name__),
u'evalue' : unicode(evalue)
}
exc_msg = self.session.msg(u'pyerr', exc_content, parent)
self.pub_socket.send_json(exc_msg)
reply_content = exc_content
else:
reply_content = { 'status' : 'ok', 'payload' : {} }
# Flush output before sending the reply.
sys.stderr.flush()
sys.stdout.flush()
# Send the reply.
reply_msg = self.session.msg(u'execute_reply', reply_content, parent)
print>>sys.__stdout__, Message(reply_msg)
self.reply_socket.send(ident, zmq.SNDMORE)
self.reply_socket.send_json(reply_msg)
if reply_msg['content']['status'] == u'error':
self._abort_queue()
def complete_request(self, ident, parent):
matches = {'matches' : self._complete(parent),
'status' : 'ok'}
completion_msg = self.session.send(self.reply_socket, 'complete_reply',
matches, parent, ident)
print >> sys.__stdout__, completion_msg
def object_info_request(self, ident, parent):
context = parent['content']['oname'].split('.')
object_info = self._object_info(context)
msg = self.session.send(self.reply_socket, 'object_info_reply',
object_info, parent, ident)
print >> sys.__stdout__, msg
def shutdown_request(self, ident, parent):
content = dict(parent['content'])
msg = self.session.send(self.reply_socket, 'shutdown_reply',
content, parent, ident)
msg = self.session.send(self.pub_socket, 'shutdown_reply',
content, parent, ident)
print >> sys.__stdout__, msg
time.sleep(0.1)
sys.exit(0)
#---------------------------------------------------------------------------
# Protected interface
#---------------------------------------------------------------------------
def _abort_queue(self):
while True:
try:
ident = self.reply_socket.recv(zmq.NOBLOCK)
except zmq.ZMQError, e:
if e.errno == zmq.EAGAIN:
break
else:
assert self.reply_socket.rcvmore(), "Missing message part."
msg = self.reply_socket.recv_json()
print>>sys.__stdout__, "Aborting:"
print>>sys.__stdout__, Message(msg)
msg_type = msg['msg_type']
reply_type = msg_type.split('_')[0] + '_reply'
reply_msg = self.session.msg(reply_type, {'status':'aborted'}, msg)
print>>sys.__stdout__, Message(reply_msg)
self.reply_socket.send(ident,zmq.SNDMORE)
self.reply_socket.send_json(reply_msg)
# We need to wait a bit for requests to come in. This can probably
# be set shorter for true asynchronous clients.
time.sleep(0.1)
def _raw_input(self, prompt, ident, parent):
# Flush output before making the request.
sys.stderr.flush()
sys.stdout.flush()
# Send the input request.
content = dict(prompt=prompt)
msg = self.session.msg(u'input_request', content, parent)
self.req_socket.send_json(msg)
# Await a response.
reply = self.req_socket.recv_json()
try:
value = reply['content']['value']
except:
print>>sys.__stderr__, "Got bad raw_input reply: "
print>>sys.__stderr__, Message(parent)
value = ''
return value
def _complete(self, msg):
return self.completer.complete(msg.content.line, msg.content.text)
def _object_info(self, context):
symbol, leftover = self._symbol_from_context(context)
if symbol is not None and not leftover:
doc = getattr(symbol, '__doc__', '')
else:
doc = ''
object_info = dict(docstring = doc)
return object_info
def _symbol_from_context(self, context):
if not context:
return None, context
base_symbol_string = context[0]
symbol = self.user_ns.get(base_symbol_string, None)
if symbol is None:
symbol = __builtin__.__dict__.get(base_symbol_string, None)
if symbol is None:
return None, context
context = context[1:]
for i, name in enumerate(context):
|
return symbol, []
#-----------------------------------------------------------------------------
# Kernel main and launch functions
#-----------------------------------------------------------------------------
def launch_kernel(ip=None, xrep_port=0, pub_port=0, req_port=0, hb_port=0,
independent=False):
""" Launches a localhost kernel, binding to the specified ports.
Parameters
----------
ip : str, optional
The ip address the kernel will bind to.
xrep_port : int, optional
The port to use for XREP channel.
pub_port : int, optional
The port to use for the SUB channel.
req_port : int, optional
The port to use for the REQ (raw input) channel.
hb_port : int, optional
The port to use for the hearbeat REP channel.
independent : bool, optional (default False)
If set, the kernel process is guaranteed to survive if this process
dies. If not set, an effort is made to ensure that the kernel is killed
when this process dies. Note that in this case it is still good practice
to kill kernels manually before exiting.
Returns
-------
A tuple of form:
(kernel_process, xrep_port, pub_port, req | new_symbol = getattr(symbol, name, None)
if new_symbol is None:
return symbol, context[i:]
else:
symbol = new_symbol | conditional_block |
pykernel.py | the PUB objects with the message about to be executed.
* Implement random port and security key logic.
* Implement control messages.
* Implement event loop and poll version.
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports.
import __builtin__
from code import CommandCompiler
import sys
import time
import traceback
# System library imports.
import zmq
# Local imports.
from IPython.utils.traitlets import HasTraits, Instance
from completer import KernelCompleter
from entry_point import base_launch_kernel, make_default_main
from session import Session, Message
#-----------------------------------------------------------------------------
# Main kernel class
#-----------------------------------------------------------------------------
class Kernel(HasTraits):
# Private interface
# This is a dict of port number that the kernel is listening on. It is set
# by record_ports and used by connect_request.
_recorded_ports = None
#---------------------------------------------------------------------------
# Kernel interface
#---------------------------------------------------------------------------
session = Instance(Session)
reply_socket = Instance('zmq.Socket')
pub_socket = Instance('zmq.Socket')
req_socket = Instance('zmq.Socket')
def __init__(self, **kwargs):
|
def start(self):
""" Start the kernel main loop.
"""
while True:
ident = self.reply_socket.recv()
assert self.reply_socket.rcvmore(), "Missing message part."
msg = self.reply_socket.recv_json()
omsg = Message(msg)
print>>sys.__stdout__
print>>sys.__stdout__, omsg
handler = self.handlers.get(omsg.msg_type, None)
if handler is None:
print >> sys.__stderr__, "UNKNOWN MESSAGE TYPE:", omsg
else:
handler(ident, omsg)
def record_ports(self, xrep_port, pub_port, req_port, hb_port):
"""Record the ports that this kernel is using.
The creator of the Kernel instance must call this methods if they
want the :meth:`connect_request` method to return the port numbers.
"""
self._recorded_ports = {
'xrep_port' : xrep_port,
'pub_port' : pub_port,
'req_port' : req_port,
'hb_port' : hb_port
}
#---------------------------------------------------------------------------
# Kernel request handlers
#---------------------------------------------------------------------------
def execute_request(self, ident, parent):
try:
code = parent[u'content'][u'code']
except:
print>>sys.__stderr__, "Got bad msg: "
print>>sys.__stderr__, Message(parent)
return
pyin_msg = self.session.msg(u'pyin',{u'code':code}, parent=parent)
self.pub_socket.send_json(pyin_msg)
try:
comp_code = self.compiler(code, '<zmq-kernel>')
# Replace raw_input. Note that is not sufficient to replace
# raw_input in the user namespace.
raw_input = lambda prompt='': self._raw_input(prompt, ident, parent)
__builtin__.raw_input = raw_input
# Set the parent message of the display hook and out streams.
sys.displayhook.set_parent(parent)
sys.stdout.set_parent(parent)
sys.stderr.set_parent(parent)
exec comp_code in self.user_ns, self.user_ns
except:
etype, evalue, tb = sys.exc_info()
tb = traceback.format_exception(etype, evalue, tb)
exc_content = {
u'status' : u'error',
u'traceback' : tb,
u'ename' : unicode(etype.__name__),
u'evalue' : unicode(evalue)
}
exc_msg = self.session.msg(u'pyerr', exc_content, parent)
self.pub_socket.send_json(exc_msg)
reply_content = exc_content
else:
reply_content = { 'status' : 'ok', 'payload' : {} }
# Flush output before sending the reply.
sys.stderr.flush()
sys.stdout.flush()
# Send the reply.
reply_msg = self.session.msg(u'execute_reply', reply_content, parent)
print>>sys.__stdout__, Message(reply_msg)
self.reply_socket.send(ident, zmq.SNDMORE)
self.reply_socket.send_json(reply_msg)
if reply_msg['content']['status'] == u'error':
self._abort_queue()
def complete_request(self, ident, parent):
matches = {'matches' : self._complete(parent),
'status' : 'ok'}
completion_msg = self.session.send(self.reply_socket, 'complete_reply',
matches, parent, ident)
print >> sys.__stdout__, completion_msg
def object_info_request(self, ident, parent):
context = parent['content']['oname'].split('.')
object_info = self._object_info(context)
msg = self.session.send(self.reply_socket, 'object_info_reply',
object_info, parent, ident)
print >> sys.__stdout__, msg
def shutdown_request(self, ident, parent):
content = dict(parent['content'])
msg = self.session.send(self.reply_socket, 'shutdown_reply',
content, parent, ident)
msg = self.session.send(self.pub_socket, 'shutdown_reply',
content, parent, ident)
print >> sys.__stdout__, msg
time.sleep(0.1)
sys.exit(0)
#---------------------------------------------------------------------------
# Protected interface
#---------------------------------------------------------------------------
def _abort_queue(self):
while True:
try:
ident = self.reply_socket.recv(zmq.NOBLOCK)
except zmq.ZMQError, e:
if e.errno == zmq.EAGAIN:
break
else:
assert self.reply_socket.rcvmore(), "Missing message part."
msg = self.reply_socket.recv_json()
print>>sys.__stdout__, "Aborting:"
print>>sys.__stdout__, Message(msg)
msg_type = msg['msg_type']
reply_type = msg_type.split('_')[0] + '_reply'
reply_msg = self.session.msg(reply_type, {'status':'aborted'}, msg)
print>>sys.__stdout__, Message(reply_msg)
self.reply_socket.send(ident,zmq.SNDMORE)
self.reply_socket.send_json(reply_msg)
# We need to wait a bit for requests to come in. This can probably
# be set shorter for true asynchronous clients.
time.sleep(0.1)
def _raw_input(self, prompt, ident, parent):
# Flush output before making the request.
sys.stderr.flush()
sys.stdout.flush()
# Send the input request.
content = dict(prompt=prompt)
msg = self.session.msg(u'input_request', content, parent)
self.req_socket.send_json(msg)
# Await a response.
reply = self.req_socket.recv_json()
try:
value = reply['content']['value']
except:
print>>sys.__stderr__, "Got bad raw_input reply: "
print>>sys.__stderr__, Message(parent)
value = ''
return value
def _complete(self, msg):
return self.completer.complete(msg.content.line, msg.content.text)
def _object_info(self, context):
symbol, leftover = self._symbol_from_context(context)
if symbol is not None and not leftover:
doc = getattr(symbol, '__doc__', '')
else:
doc = ''
object_info = dict(docstring = doc)
return object_info
def _symbol_from_context(self, context):
if not context:
return None, context
base_symbol_string = context[0]
symbol = self.user_ns.get(base_symbol_string, None)
if symbol is None:
symbol = __builtin__.__dict__.get(base_symbol_string, None)
if symbol is None:
return None, context
context = context[1:]
for i, name in enumerate(context):
new_symbol = getattr(symbol, name, None)
if new_symbol is None:
return symbol, context[i:]
else:
symbol = new_symbol
return symbol, []
#-----------------------------------------------------------------------------
# Kernel main and launch functions
#-----------------------------------------------------------------------------
def launch_kernel(ip=None, xrep_port=0, pub_port=0, req_port=0, hb_port=0,
independent=False):
""" Launches a localhost kernel, binding to the specified ports.
Parameters
----------
ip : str, optional
The ip address the kernel will bind to.
xrep_port : int, optional
The port to use for XREP channel.
pub_port : int, optional
The port to use for the SUB channel.
req_port : int, optional
The port to use for the REQ (raw input) channel.
hb_port : int, optional
The port to use for the hearbeat REP channel.
independent : bool, optional (default False)
If set, the kernel process is guaranteed to survive if this process
dies. If not set, an effort is made to ensure that the kernel is killed
when this process dies. Note that in this case it is still good practice
to kill kernels manually before exiting.
Returns
-------
A tuple of form:
(kernel_process, xrep_port, pub_port, | super(Kernel, self).__init__(**kwargs)
self.user_ns = {}
self.history = []
self.compiler = CommandCompiler()
self.completer = KernelCompleter(self.user_ns)
# Build dict of handlers for message types
msg_types = [ 'execute_request', 'complete_request',
'object_info_request', 'shutdown_request' ]
self.handlers = {}
for msg_type in msg_types:
self.handlers[msg_type] = getattr(self, msg_type) | identifier_body |
E4418.py | not available on RS422', 'DTR/DSR is only available on the RS232 interface.'),
error_item(-222, 'Data out of range', 'A numeric parameter value is outside the valid range for the command. For example, SENS:FREQ 2KHZ.'),
error_item(-224, 'Illegal parameter value', 'A discrete parameter was received which was not a valid choice for the command. You may have used an invalid parameter choice. For example, TRIG:SOUR EXT.'),
error_item(-226, 'Lists not same length', 'This occurs when SENSe:CORRection:CSET[1]|CSET2:STATe is set to ON and the frequency and calibration/offset lists do not correspond in length.'),
error_item(-230, 'Data corrupt or stale', 'This occurs when a FETC? is attempted and either a reset has been received or the power meter state has changed such that the current measurement is invalidated (for example, a change of frequency setting or triggering conditions).'),
error_item(-230, 'Data corrupt or stale;Please zero and calibrate Channel A', 'When CAL[1|2]:RCAL is set to ON and the sensor currently connected to channel A has not been zeroed and calibrated, then any command which would normally return a measurement result (for example FETC?, READ? or MEAS?) will generate this error message.'),
error_item(-230, 'Data corrupt or stale;Please zero Channel A', 'When CAL[1|2]:RCAL is set to ON and the sensor currently connected to channel A has not been zeroed, then any command which would normally return a measurement result (for example FETC?, READ? or MEAS?) will generate this error message.'),
error_item(-230, 'Data corrupt or stale;Please calibrate Channel A', 'When CAL[1|2]:RCAL is set to ON and the sensor currently connected to channel A has not been calibrated, then any command which would normally return a measurement result (for example FETC?, READ? or MEAS?) will generate this error message'),
error_item(-231, 'Data questionable;CAL ERROR', 'Power meter calibration failed. The most likely cause is attempting to calibrate without applying a 1 mW power to the power sensor.'),
error_item(-231, 'Data questionable;Input Overload', 'The power input to Channel A exceeds the power sensor\'s maximum range.'),
error_item(-231, 'Data questionable;Lower window log error', 'This indicates that a difference measurement in the lower window has given a negative result when the units of measurement were logarithmic.'),
error_item(-231, 'Data questionable;Upper window log error', 'This indicates that a difference measurement in the upper window has given a negative result when the units of measurement were logarithmic.'),
error_item(-231, 'Data questionable;ZERO ERROR', 'Power meter zeroing failed. The most likely cause is attempting to zero when some power signal is being applied to the power sensor.'),
error_item(-241, 'Hardware missing', 'The power meter is unable to execute the command because either no power sensor is connected or it expects an Agilent E-Series or N8480 Series power sensor, and one is not connected.'),
error_item(-310, 'System error;Dty Cyc may impair accuracy with ECP sensor', 'This indicates that the sensor connected is for use with CW signals only.'),
error_item(-310, 'System error;Sensor EEPROM Read Failed - critical data not found or unreadable', 'This indicates a failure with your Agilent E-Series or N8480 Series power sensor. Refer to your power sensor manual for details on returning it for repair.'),
error_item(-310, 'System error;Sensor EEPROM Read Completed OK but optional data block(s) not found or unreadable', 'This indicates a failure with your Agilent E-Series or N8480 Series power sensor. Refer to your power sensor manual for details on returning it for repair.'),
error_item(-310, 'System error;Sensor EEPROM Read Failed - unknown EEPROM table format', 'This indicates a failure with your Agilent E-Series or N8480 Series power sensor. Refer to your power sensor manual for details on returning it for repair.'),
error_item(-310, 'System error;Sensor EEPROM < > data not found or unreadable', 'Where < > refers to the sensor data block covered, for example, Linearity, Temp - Comp (temperature compensation). This indicates a failure with your Agilent E-Series or N8480 Series power sensor. Refer to your power sensor manual for details on returning it for repair.'),
error_item(-310, 'System error;Option 001 Battery charger fault', 'The power meter is connected to an AC power source, the battery is not fully charged and it is not charging.'),
error_item(-310, 'System error;Sensors connected to both front and rear inputs. You cannot connect two power sensors to the one channel input. In this instance, the power', 'meter detects power sensors connected to both its front and rear channel inputs.'),
error_item(-320, 'Out of memory', 'The power meter required more memory than was available to run an internal operation.'),
error_item(-330, 'Self-test Failed;', 'The -330, "Self-test Failed" errors indicate that you have a problem with your power meter. Refer to "Contacting Agilent Technologies" on page 103 for details of what to do with your faulty power meter.'),
error_item(-330, 'Self-test Failed;Measurement Channel Fault', 'Refer to "Measurement Assembly" on page 98 if you require a description of the Measurement Assembly test.'),
error_item(-330, 'Self-test Failed;Option 001 Battery requires replacement', 'The Option 001 battery is not charging to a satisfactory level and should be replaced.'),
error_item(-330, 'Self-test Failed;RAM Battery Fault', 'Refer to "RAM Battery" on page 98 if you require a description of the battery test. '),
error_item(-330, 'Self-test Failed;Calibrator Fault', 'Refer to "Calibrator" on page 99 if you require a description of the calibrator test. '),
error_item(-330, 'Self-test Failed;ROM Check Failed', 'Refer to "ROM Checksum" on page 98 if you require a description of the ROM Checksum test. '),
error_item(-330, 'Self-test Failed;RAM Check Failed', 'Refer to "RAM" on page 98 if you require a description of the RAM test. '),
error_item(-330, 'Self-test Failed;Display Assy. Fault', 'Refer to "Display" on page 99 if you require a description of the Display test. '),
error_item(-330, 'Self-test Failed;Confidence Check Fault', 'Refer to "Confidence Check" on page 96 if you require a description of this test. '),
error_item(-330, 'Self-test Failed;Serial Interface Fault', 'Refer to "Serial Interface" on page 99 if you require a description of this test. '),
error_item(-350, 'Queue overflow', 'The error queue is full and another error has occurred which could not be recorded.'),
error_item(-361, 'Parity error in program', 'The serial port receiver has detected a parity error and consequently, data integrity cannot be guaranteed.'),
error_item(-362, 'Framing error in program', 'The serial port receiver has detected a framing error and consequently, data integrity cannot be guaranteed.'),
error_item(-363, 'Input buffer overrun', 'The serial port receiver has been overrun and consequently, data has been lost.'),
error_item(-410, 'Query INTERRUPTED', 'A command was received which sends data to the output buffer, but the output buffer contained data from a previous command (the previous data is not overwritten). The output buffer is cleared when power has been off or after *RST (reset) command has been executed.'),
error_item(-420, 'Query UNTERMINATED', 'The power meter was addressed to talk (that is, to send data over the interface) but a command has not been received which sends data to the output buffer. For example, you may have executed a CONFigure command (which does not generate data) and then attempted to read data from the remote interface.'),
error_item(-430, 'Query DEADLOCKED', 'A command was received which generates too much data to fit in the output buffer and the input buffer is also full. Command execution continues but data is lost. -440 Query UNTERMINATED after indefinite response The *IDN? command must be the last query command within a command string.'),
]
@classmethod
def check(cls, num, msg):
if num==0: return
for e in cls.error_list:
| if msg == e.msg:
emsg = '%s (%d)'%(e.msg, e.num)
msg = 'Power meter returned Error message.\n'
msg += '*'*len(emsg) + '\n'
msg += emsg + '\n'
msg += '*'*len(emsg) + '\n'
msg += e.txt + '\n'
raise StandardError(msg)
continue | conditional_block |
|
E4418.py | 'Illegal parameter value', 'A discrete parameter was received which was not a valid choice for the command. You may have used an invalid parameter choice. For example, TRIG:SOUR EXT.'),
error_item(-226, 'Lists not same length', 'This occurs when SENSe:CORRection:CSET[1]|CSET2:STATe is set to ON and the frequency and calibration/offset lists do not correspond in length.'),
error_item(-230, 'Data corrupt or stale', 'This occurs when a FETC? is attempted and either a reset has been received or the power meter state has changed such that the current measurement is invalidated (for example, a change of frequency setting or triggering conditions).'),
error_item(-230, 'Data corrupt or stale;Please zero and calibrate Channel A', 'When CAL[1|2]:RCAL is set to ON and the sensor currently connected to channel A has not been zeroed and calibrated, then any command which would normally return a measurement result (for example FETC?, READ? or MEAS?) will generate this error message.'),
error_item(-230, 'Data corrupt or stale;Please zero Channel A', 'When CAL[1|2]:RCAL is set to ON and the sensor currently connected to channel A has not been zeroed, then any command which would normally return a measurement result (for example FETC?, READ? or MEAS?) will generate this error message.'),
error_item(-230, 'Data corrupt or stale;Please calibrate Channel A', 'When CAL[1|2]:RCAL is set to ON and the sensor currently connected to channel A has not been calibrated, then any command which would normally return a measurement result (for example FETC?, READ? or MEAS?) will generate this error message'),
error_item(-231, 'Data questionable;CAL ERROR', 'Power meter calibration failed. The most likely cause is attempting to calibrate without applying a 1 mW power to the power sensor.'),
error_item(-231, 'Data questionable;Input Overload', 'The power input to Channel A exceeds the power sensor\'s maximum range.'),
error_item(-231, 'Data questionable;Lower window log error', 'This indicates that a difference measurement in the lower window has given a negative result when the units of measurement were logarithmic.'),
error_item(-231, 'Data questionable;Upper window log error', 'This indicates that a difference measurement in the upper window has given a negative result when the units of measurement were logarithmic.'),
error_item(-231, 'Data questionable;ZERO ERROR', 'Power meter zeroing failed. The most likely cause is attempting to zero when some power signal is being applied to the power sensor.'),
error_item(-241, 'Hardware missing', 'The power meter is unable to execute the command because either no power sensor is connected or it expects an Agilent E-Series or N8480 Series power sensor, and one is not connected.'),
error_item(-310, 'System error;Dty Cyc may impair accuracy with ECP sensor', 'This indicates that the sensor connected is for use with CW signals only.'),
error_item(-310, 'System error;Sensor EEPROM Read Failed - critical data not found or unreadable', 'This indicates a failure with your Agilent E-Series or N8480 Series power sensor. Refer to your power sensor manual for details on returning it for repair.'),
error_item(-310, 'System error;Sensor EEPROM Read Completed OK but optional data block(s) not found or unreadable', 'This indicates a failure with your Agilent E-Series or N8480 Series power sensor. Refer to your power sensor manual for details on returning it for repair.'),
error_item(-310, 'System error;Sensor EEPROM Read Failed - unknown EEPROM table format', 'This indicates a failure with your Agilent E-Series or N8480 Series power sensor. Refer to your power sensor manual for details on returning it for repair.'),
error_item(-310, 'System error;Sensor EEPROM < > data not found or unreadable', 'Where < > refers to the sensor data block covered, for example, Linearity, Temp - Comp (temperature compensation). This indicates a failure with your Agilent E-Series or N8480 Series power sensor. Refer to your power sensor manual for details on returning it for repair.'),
error_item(-310, 'System error;Option 001 Battery charger fault', 'The power meter is connected to an AC power source, the battery is not fully charged and it is not charging.'),
error_item(-310, 'System error;Sensors connected to both front and rear inputs. You cannot connect two power sensors to the one channel input. In this instance, the power', 'meter detects power sensors connected to both its front and rear channel inputs.'),
error_item(-320, 'Out of memory', 'The power meter required more memory than was available to run an internal operation.'),
error_item(-330, 'Self-test Failed;', 'The -330, "Self-test Failed" errors indicate that you have a problem with your power meter. Refer to "Contacting Agilent Technologies" on page 103 for details of what to do with your faulty power meter.'),
error_item(-330, 'Self-test Failed;Measurement Channel Fault', 'Refer to "Measurement Assembly" on page 98 if you require a description of the Measurement Assembly test.'),
error_item(-330, 'Self-test Failed;Option 001 Battery requires replacement', 'The Option 001 battery is not charging to a satisfactory level and should be replaced.'),
error_item(-330, 'Self-test Failed;RAM Battery Fault', 'Refer to "RAM Battery" on page 98 if you require a description of the battery test. '),
error_item(-330, 'Self-test Failed;Calibrator Fault', 'Refer to "Calibrator" on page 99 if you require a description of the calibrator test. '),
error_item(-330, 'Self-test Failed;ROM Check Failed', 'Refer to "ROM Checksum" on page 98 if you require a description of the ROM Checksum test. '),
error_item(-330, 'Self-test Failed;RAM Check Failed', 'Refer to "RAM" on page 98 if you require a description of the RAM test. '),
error_item(-330, 'Self-test Failed;Display Assy. Fault', 'Refer to "Display" on page 99 if you require a description of the Display test. '),
error_item(-330, 'Self-test Failed;Confidence Check Fault', 'Refer to "Confidence Check" on page 96 if you require a description of this test. '),
error_item(-330, 'Self-test Failed;Serial Interface Fault', 'Refer to "Serial Interface" on page 99 if you require a description of this test. '),
error_item(-350, 'Queue overflow', 'The error queue is full and another error has occurred which could not be recorded.'),
error_item(-361, 'Parity error in program', 'The serial port receiver has detected a parity error and consequently, data integrity cannot be guaranteed.'),
error_item(-362, 'Framing error in program', 'The serial port receiver has detected a framing error and consequently, data integrity cannot be guaranteed.'),
error_item(-363, 'Input buffer overrun', 'The serial port receiver has been overrun and consequently, data has been lost.'),
error_item(-410, 'Query INTERRUPTED', 'A command was received which sends data to the output buffer, but the output buffer contained data from a previous command (the previous data is not overwritten). The output buffer is cleared when power has been off or after *RST (reset) command has been executed.'),
error_item(-420, 'Query UNTERMINATED', 'The power meter was addressed to talk (that is, to send data over the interface) but a command has not been received which sends data to the output buffer. For example, you may have executed a CONFigure command (which does not generate data) and then attempted to read data from the remote interface.'),
error_item(-430, 'Query DEADLOCKED', 'A command was received which generates too much data to fit in the output buffer and the input buffer is also full. Command execution continues but data is lost. -440 Query UNTERMINATED after indefinite response The *IDN? command must be the last query command within a command string.'),
]
@classmethod
def check(cls, num, msg):
| if num==0: return
for e in cls.error_list:
if msg == e.msg:
emsg = '%s (%d)'%(e.msg, e.num)
msg = 'Power meter returned Error message.\n'
msg += '*'*len(emsg) + '\n'
msg += emsg + '\n'
msg += '*'*len(emsg) + '\n'
msg += e.txt + '\n'
raise StandardError(msg)
continue
_msg = 'Power meter returned Error message.\n'
emsg = '%s (%d)\n'%(msg, num)
_msg += '*'*len(emsg) + '\n'
_msg += emsg
_msg += '*'*len(emsg) + '\n'
raise StandardError(_msg)
return | identifier_body |
|
E4418.py | -------------------------------
This command is used to enable and disable averaging.
Args
====
< on_off : int or str : 0,1,'ON','OFF' >
Specify the averaging status.
0 = 'OFF', 1 = 'ON'
< ch : int : 1,2 >
Specify the channel to set a averaging status. (1, 2)
default = 1
Returnes
========
Nothing.
Examples
========
>>> p.average_on_off(1)
>>> p.average_on_off('OFF', ch=2)
"""
self.com.send('SENS%d:AVER %s'%(ch, str(on_off)))
self._error_check()
return
def average_on_off_query(self, ch=1):
"""
SENSn:AVER? : Query Average ON/OFF
-----------------------------------
Check the average status.
Args
====
< ch : int : 1,2 >
Specify the channel to query a averaging status. (1, 2)
default = 1
Returnes
========
< on_off : int : 1,0 >
Average status. 1 = ON, 0 = OFF
Examples
========
>>> p.average_on_off_query()
1
>>> p.average_on_off_query(ch=2)
0
"""
self.com.send('SENS%d:AVER?'%(ch))
ret = self.com.readline()
self._error_check()
ret = int(ret)
return ret
def average_count(self, count, ch=1):
"""
SENSn:AVER:COUN : Set Average Count
-----------------------------------
This command is used to enter a value for the filter length. If
[SENSe[1]]|SENSe2:AVERage:COUNt:AUTO is set to ON then entering a value
for the filter length automatically sets it to OFF. Increasing the
value of filter length increases measurement accuracy but also
increases the time taken to make a power measurement.
Entering a value using this command automatically turns the
[SENSe[1]]|SENSe2:AVERage:STATe command to ON.
Args
====
< count : int : >
Specify the count for averaging (filter length).
< ch : int : 1,2 >
Specify the channel to set a averaging count. (1, 2)
default = 1
Returnes
========
Nothing.
Examples
========
>>> p.average_count(128)
>>> p.average_count(64, ch=2)
"""
self.com.send('SENS%d:AVER:COUN %d'%(ch, count))
self._error_check()
return
def average_count_query(self, ch=1):
"""
SENSn:AVER:COUN? : Query Average Count
---------------------------------------
Check the count for averaging.
Args
====
< ch : int : 1,2 >
Specify the channel to set a averaging count. (1, 2)
default = 1
Returnes
========
< count : int : >
The count for averaging (filter length).
Examples
========
>>> p.average_count_query()
128
>>> p.average_count_query(ch=2)
64
"""
self.com.send('SENS%d:AVER:COUN?'%(ch))
ret = self.com.readline()
self._error_check()
ret = int(ret)
return ret
class EPM441A(E4418):
product_name = 'EPM-441A'
class EPM442A(E4418):
product_name = 'EPM-442A'
class E4418B(E4418):
product_name = 'E4418B'
class E4419B(E4418):
product_name = 'E4419B'
# ==============
# Helper Classes
# ==============
# Error Class
# ===========
class error_item(object):
num = 0
msg = ''
txt = ''
def __init__(self, num, msg, txt):
self.num = num
self.msg = msg
self.txt = txt
pass
class error_handler(object):
error_list = [
error_item(0, 'No error', ''),
error_item(-101, 'Invalid character', 'An invalid character was found in the command string. You may have inserted a character such as #, $ or % in the command header or within a parameter. For example, LIM:LOW O#.'),
error_item(-102, 'Syntax error', 'Invalid syntax was found in the command string. For example, LIM:CLE:AUTO, 1 or LIM:CLE:AUTO 1.'),
error_item(-103, 'Invalid separator', 'An invalid separator was found in the command string. You may have used a comma instead of a colon, semicolon or blank space; or you may have used a blank space instead of a comma. For example, OUTP:ROSC,1.'),
error_item(-105, 'GET not allowed', 'A Group Execute Trigger (GET) is not allowed within a command string.'),
error_item(-108, 'Parameter not allowed', 'More parameters were received than expected for the command. You may have entered an extra parameter or added a parameter to a command that does not accept a parameter. For example, CAL 10.'),
error_item(-109, 'Missing parameter', 'Fewer parameters were received than expected for the command. You omitted one or more parameters that are required for this command. For example, AVER:COUN.'),
error_item(-112, 'Program mnemonic too long', 'A command header was received which contained more than the maximum 12 characters allowed. For example, SENSeAVERageCOUNt 8.'),
error_item(-113, 'Undefined header', 'A command was received that is not valid for this power meter. You may have misspelled the command, it may not be a valid command or you may have the wrong interface selected. If you are using the short form of the command, remember that it may contain up to four letters. For example, TRIG:SOUR IMM.'),
error_item(-121, 'Invalid character in number', 'An invalid character was found in the number specified for a parameter value. For example, SENS:AVER:COUN 128#H.'),
error_item(-123, 'Exponent too large', 'A numeric parameter was found whose exponent was larger than 32,000. For example, SENS:COUN 1E34000.'),
error_item(-124, 'Too many digits', 'A numeric parameter was found whose mantissa contained more than 255 digits, excluding leading zeros.'),
error_item(-128, 'Numeric data not allowed', 'A numeric value was received within a command which does not accept a numeric value. For example, MEM:CLE 24.'),
error_item(-131, 'Invalid suffix', 'A suffix was incorrectly specified for a numeric parameter. You may have misspelled the suffix. For example, SENS:FREQ 200KZ.'),
error_item(-134, 'Suffix too long', 'A suffix used contained more than 12 characters. For example, SENS:FREQ 2MHZZZZZZZZZZZ.'),
error_item(-138, 'Suffix not allowed', 'A suffix was received following a numeric parameter which does not accept a suffix. For example, INIT:CONT 0Hz.'),
error_item(-148, 'Character data not allowed', 'A discrete parameter was received but a character string or a numeric parameter was expected. Check the list of parameters to verify that you have used a valid parameter type. For example, MEM:CLE CUSTOM_1.'),
error_item(-151, 'Invalid string data', 'An invalid string was received. Check to see if you have enclosed the character string in single or double quotes. For example, MEM:CLE "CUSTOM_1.'),
error_item(-158, 'String data not allowed', 'A character string was received but is not allowed for the command. Check the list of parameters to verify that you have used a valid parameter type. For example, LIM:STAT `ON\'.'),
error_item(-161, 'Invalid block data', 'A block data element was expected but was invalid for some reason. For example, *DDT #15FET. The 5 in the string indicates that 5 characters should follow, whereas in this example there are only 3.'),
error_item(-168, 'Block data not allowed', 'A legal block data element was encountered but not allowed by the power meter at this point. For example SYST:LANG #15FETC?.'),
error_item(-178, 'Expression data not allowed', 'A legal expression data was encountered but not allowed by the power meter at this point. For example SYST:LANG (5+2).'),
| def average_on_off(self, on_off, ch=1):
"""
SENSn:AVER : Set Average ON/OFF | random_line_split |
|
E4418.py | (self):
err_num, err_msg = self.error_query()
error_handler.check(err_num, err_msg)
return
def error_query(self):
"""
SYST:ERR? : Query Error Numbers
-------------------------------
This query returns error numbers and messages from the power meter's
error queue. When an error is generated by the power meter, it stores
an error number and corresponding message in the error queue. One error
is removed from the error queue each time the SYSTem:ERRor? command is
executed. The errors are cleared in the order of first-in first-out,
that is, the oldest errors are cleared first. To clear all the errors
from the error queue, execute the *CLS command. When the error queue is
empty, subsequent SYSTem:ERRor? queries return a +0, "No error"
message. The error queue has a maximum capacity of 30 errors.
Args
====
Nothing.
Returns
=======
< err_num : int : >
Error number. 0 = 'No Error'
< err_msg : str : >
Error message.
Examples
========
>>> p.error_query()
(0, 'No error.')
"""
self.com.send('SYST:ERR?')
ret = self.com.readline()
ret = ret.strip().split(',')
err_num = int(ret[0])
err_msg = ret[1].strip('"')
return err_num, err_msg
def zeroing(self, ch=1):
"""
CALn:ZERO:AUTO : Zeroing
------------------------
This command causes the power meter to perform its zeroing routine on
the specified channel when ONCE is selected. Zeroing takes
approximately 10 seconds. This adjusts the power meter for a zero power
reading with no power supplied to the power sensor. The 0|OFF parameter
is only required for the query response and is ignored in the command.
If 1|ON is selected, it causes the error -224, "Illegal parameter
value" to occur.
The command assumes that the power sensor is not connected to a power
source.
Args
====
< ch : int : 1,2 >
Specify the channel to perform a zeroing. (1, 2)
default = 1
Returns
=======
Nothing.
Examples
========
>>> p.zeroing()
>>> p.zeroing(ch=2)
"""
self.com.send('CAL%d:ZERO:AUTO ONCE'%(ch))
self._error_check()
time.sleep(10)
self._error_check()
return
def measure(self, ch=1, unit='DEF', resolution='DEF', wait=7):
"""
MEASn? : Measuring
------------------
This command sets the specified window's measurement function to single
channel with relative mode off, aborts, configures the window then
initiates channel A or B, calculates the measurement result and places
the result on the bus.
Args
====
< ch : int : 1,2 >
Specify the channel to perform a measuring. (1, 2)
default = 1
< unit : str : 'DEF','dBm','W' >
Specify the units of the returned power level.
'DEF', 'dBm', or 'W'. default = 'DEF'
< resolution : str, or int : 'DEF',1.0,0.1,0.01,0.001 >
Specify the resolution of the returned value.
default = 'DEF'
Returnes
========
< power : float : >
Measured power level. The units is specified by <unit>.
Examples
========
>>> p.measure()
>>> p.measure(ch=2)
"""
self.com.send('MEAS%d? %s, %s'%(ch, unit, resolution))
time.sleep(wait)
ret = self.com.readline()
self._error_check()
ret = float(ret.strip())
return ret
def average_on_off(self, on_off, ch=1):
"""
SENSn:AVER : Set Average ON/OFF
-------------------------------
This command is used to enable and disable averaging.
Args
====
< on_off : int or str : 0,1,'ON','OFF' >
Specify the averaging status.
0 = 'OFF', 1 = 'ON'
< ch : int : 1,2 >
Specify the channel to set a averaging status. (1, 2)
default = 1
Returnes
========
Nothing.
Examples
========
>>> p.average_on_off(1)
>>> p.average_on_off('OFF', ch=2)
"""
self.com.send('SENS%d:AVER %s'%(ch, str(on_off)))
self._error_check()
return
def average_on_off_query(self, ch=1):
"""
SENSn:AVER? : Query Average ON/OFF
-----------------------------------
Check the average status.
Args
====
< ch : int : 1,2 >
Specify the channel to query a averaging status. (1, 2)
default = 1
Returnes
========
< on_off : int : 1,0 >
Average status. 1 = ON, 0 = OFF
Examples
========
>>> p.average_on_off_query()
1
>>> p.average_on_off_query(ch=2)
0
"""
self.com.send('SENS%d:AVER?'%(ch))
ret = self.com.readline()
self._error_check()
ret = int(ret)
return ret
def average_count(self, count, ch=1):
"""
SENSn:AVER:COUN : Set Average Count
-----------------------------------
This command is used to enter a value for the filter length. If
[SENSe[1]]|SENSe2:AVERage:COUNt:AUTO is set to ON then entering a value
for the filter length automatically sets it to OFF. Increasing the
value of filter length increases measurement accuracy but also
increases the time taken to make a power measurement.
Entering a value using this command automatically turns the
[SENSe[1]]|SENSe2:AVERage:STATe command to ON.
Args
====
< count : int : >
Specify the count for averaging (filter length).
< ch : int : 1,2 >
Specify the channel to set a averaging count. (1, 2)
default = 1
Returnes
========
Nothing.
Examples
========
>>> p.average_count(128)
>>> p.average_count(64, ch=2)
"""
self.com.send('SENS%d:AVER:COUN %d'%(ch, count))
self._error_check()
return
def average_count_query(self, ch=1):
"""
SENSn:AVER:COUN? : Query Average Count
---------------------------------------
Check the count for averaging.
Args
====
< ch : int : 1,2 >
Specify the channel to set a averaging count. (1, 2)
default = 1
Returnes
========
< count : int : >
The count for averaging (filter length).
Examples
========
>>> p.average_count_query()
128
>>> p.average_count_query(ch=2)
64
"""
self.com.send('SENS%d:AVER:COUN?'%(ch))
ret = self.com.readline()
self._error_check()
ret = int(ret)
return ret
class EPM441A(E4418):
product_name = 'EPM-441A'
class EPM442A(E4418):
product_name = 'EPM-442A'
class E4418B(E4418):
product_name = 'E4418B'
class E4419B(E4418):
product_name = 'E4419B'
# ==============
# Helper Classes
# ==============
# Error Class
# ===========
class error_item(object):
num = 0
msg = ''
txt = ''
def __init__(self, num, msg, txt):
self.num = num
self.msg = msg
self.txt = txt
pass
class error_handler(object):
error_list = [
error_item(0, 'No error', ''),
error_item(-101, 'Invalid character', 'An invalid character was found in the command string. You may have inserted a character such as #, $ or % in the command header or within a parameter. For example, LIM:LOW O#.'),
error_item(-102, 'Syntax error', 'Invalid syntax was found in the command string. For example, LIM:CLE:AUTO, 1 or LIM:CLE:AUTO 1.'),
error_item(-103, 'Invalid separator', 'An invalid separator | _error_check | identifier_name |
|
main.rs | -> Self{
// init upyunconfig
UpYun{
UpYunConfig: config,
..Default::default()
}
}
fn set_httpc(mut self) -> Self{
self.httpc = "".to_string();
self
}
fn set_deprecated(mut self) -> Self{
self.deprecated = true;
self
}
fn build(self) -> Self{
self
}
// func (up *UpYun) Put(config *PutObjectConfig) (err error) {
// if config.LocalPath != "" {
// var fd *os.File
// if fd, err = os.Open(config.LocalPath); err != nil {
// return errorOperation("open file", err)
// }
// defer fd.Close()
// config.Reader = fd
// }
// if config.UseResumeUpload { // 是否在用断点续传
// logrus.Info("up.resumePut")
// return up.resumePut(config)
// } else {
// logrus.Info("up.put") // 正常上传
// return up.put(config)
// }
// }
fn Put(&mut self, config: PutObjectConfig){
if config.local_path != ""{
// file
}
if config.user_resume_upload{
// 断电续传
info!("断电续传尚未完成")
} else {
info!("正常上传")
}
}
fn put_file(
&mut self,
file_path: String,
filepath: String,
) -> Result<(), Box<dyn std::error::Error>> {
if file_path != "" {
match File::open(filepath) {
Ok(file) => {
println!("{:?}", file)
}
Err(e) => {
println!("open file error{:?}", e)
}
}
}
Ok(())
}
fn doGetEndpoint(&mut self, host: String) -> String {
match self.UpYunConfig.Hosts.get(&host){
Some(Value ) => Value.to_string(),
None => host
}
}
/// FIXME
fn doHTTPRequest(&mut self,method: hyper::Method, url:String, headers: HashMap<String,String>, body: Vec<u8>){
match hyper::Request::builder().method(method).uri(url).body(body){
Ok(req) => {
for (key,value) in headers{
if key.to_lowercase() == "host"{
// req.
} else {
}
}
},
Err(e) => {
println!("{:?}",e)
}
}
}
fn MakeRESTAuth(&mut self,config: RestAuthConfig) -> String{
let sign = vec![config.method, config.uri, config.DateStr, config.LengthStr, self.UpYunConfig.Password.clone()];
let mut tt = vec![];
tt.push(String::from("Upyun"));
tt.push(self.UpYunConfig.Operator.clone());
tt.push(":".to_string());
tt.push(md5str(sign.join("&")));
tt.concat()
}
fn doRESTRequest(&mut self, config: &RestReqConfig) -> Result<(), Box<dyn std::error::Error>> {
// 这里传入的uri做了编码 utf-8 转成 ascii 的组合 /sdk-test/xx/%E4%B8%AD%E6%96%87.log
// escUri := path.Join("/", up.Bucket, escapeUri(config.uri))
let mut escUri =
String::from("/") + &self.UpYunConfig.Bucket + &escapeUri(config.uri.clone());
if config.uri.chars().last().unwrap() == '/' {
escUri += '/'.to_string().as_str()
}
if config.query != "" {
escUri += ("?".to_owned() + &config.query).as_str()
}
let mut headers: HashMap<String, String> = HashMap::new();
let mut has_md5: bool = false;
let old_header: HashMap<String, String> = HashMap::new();
for (k, v) in old_header {
if k.to_lowercase() == "content-md5" && v != "" {
has_md5 = true
}
headers.insert(k, v).expect("header set error ");
}
headers.insert("Date".to_string(), makeRFC1123Date());
headers.insert("Host".to_string(), "v0.api.upyun.com".to_string()); // 为什么这个是固定的
if !has_md5 && config.useMD5 {
// config.httpBody.
// 这里需要判断下httpBody的类型
//// FIXME: depend on config.httpBody.type
headers.insert("Content".to_string(), "xx".to_string());
}
if self.deprecated {
if let Some(value) = headers.get("Conetnt-Length") {
let size = 0;
} | }
fn md5str(s: String) -> String {
let mut hasher = md5::Md5::new();
hasher.input_str(&s);
hasher.result_str()
}
fn escapeUri(s: String) -> String {
// let s = String::from("/xx/中文.log");
if s == "" {
let _s = String::from("中文");
}
let escape: [u32; 8] = [
0xffffffff, 0xfc001fff, 0x78000001, 0xb8000001, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff,
];
let hexMap = "0123456789ABCDEF".as_bytes();
let mut size = 0;
let ss = s.as_bytes();
for i in 0..ss.len() {
let c = ss[i];
if escape.get((c >> 5) as usize).unwrap() & (1 << (c & 0x1f)) > 0 {
size += 3
} else {
size += 1
}
}
// let ret = [0u8;size]; // 静态 error
let mut ret = vec![0u8; size]; // 动态 success
let mut j = 0;
for i in 0..ss.len() {
let c = ss[i];
if escape[(c >> 5) as usize] & (1 << (c & 0x1f)) > 0 {
ret[j] = "%".as_bytes()[0];
// ret[j] = "%".parse::<u8>().unwrap();
ret[j + 1] = hexMap[(c >> 4) as usize];
ret[j + 2] = hexMap[(c & 0xf) as usize];
j += 3
} else {
ret[j] = c;
j += 1
}
}
from_utf8(&ret).unwrap().to_string()
}
fn unescapeUri(s: String) -> String {
println!("============");
// 定位 % 转换成byte的数
// let xx = "%";
// let xxx = xx.as_bytes();
// println!("change % to byte is ==> {:?}",xxx);
// 将传进来的字符串变成 字符数组
// 遍历 匹配 %
// if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
// // if not correct, return original string
// return s
// }
// i += 3
let mut n: i32 = 0;
let s_vec: Vec<char> = s.chars().collect();
for mut _i in 0..s_vec.len() {
if s_vec[_i] == '%' {
if _i + 2 >= s_vec.len() || !ishex(s_vec[_i + 1] as u8) || !ishex(s_vec[_i + 2] as u8) {
return s;
}
_i += 3
} else {
_i += 1
}
n += 1
}
let mut t_vec: Vec<u8> = Vec::new();
let mut j = 0;
for mut _i in 0..s_vec.len() {
if s_vec[_i] == '%' {
t_vec[j] = unhex(s_vec[_i + 1] as u8) << 4 | unhex(s_vec[_i + 2] as u8);
_i += 3
} else {
t_vec[j] = s_vec[_i] as u8;
_i += 1
}
j += 1
}
from_utf8(&t_vec).unwrap().to_string()
}
// 16进制 to 10进制
fn unhex(c: u8) -> u8 {
if '0' as u8 <= c && c <= '9' as u8 {
c - '0' as u8
} else if 'a' as u8 <= c && c <= 'f' as u8 {
c - 'a' as u8 + 10
} else |
}
Ok(())
} | random_line_split |
main.rs | Self{
// init upyunconfig
UpYun{
UpYunConfig: config,
..Default::default()
}
}
fn set_httpc(mut self) -> Self{
self.httpc = "".to_string();
self
}
fn set_deprecated(mut self) -> Self{
self.deprecated = true;
self
}
fn build(self) -> Self{
self
}
// func (up *UpYun) Put(config *PutObjectConfig) (err error) {
// if config.LocalPath != "" {
// var fd *os.File
// if fd, err = os.Open(config.LocalPath); err != nil {
// return errorOperation("open file", err)
// }
// defer fd.Close()
// config.Reader = fd
// }
// if config.UseResumeUpload { // 是否在用断点续传
// logrus.Info("up.resumePut")
// return up.resumePut(config)
// } else {
// logrus.Info("up.put") // 正常上传
// return up.put(config)
// }
// }
fn Put(&mut self, config: PutObjectConfig){
if config.local_path != ""{
// file
}
if config.user_resume_upload{
// 断电续传
info!("断电续传尚未完成")
} else {
info!("正常上传")
}
}
fn put_file(
&mut self,
file_path: String,
filepath: String,
) -> Result<(), Box<dyn std::error::Error>> {
if file_path != "" {
match File::open(filepath) {
Ok(file) => {
println!("{:?}", file)
}
Err(e) => {
println!("open file error{:?}", e)
}
}
}
Ok(())
}
fn doGetEndpoint(&mut self, host: String) -> String {
match self.UpYunConfig.Hosts.get(&host){
| yper::Method, url:String, headers: HashMap<String,String>, body: Vec<u8>){
match hyper::Request::builder().method(method).uri(url).body(body){
Ok(req) => {
for (key,value) in headers{
if key.to_lowercase() == "host"{
// req.
} else {
}
}
},
Err(e) => {
println!("{:?}",e)
}
}
}
fn MakeRESTAuth(&mut self,config: RestAuthConfig) -> String{
let sign = vec![config.method, config.uri, config.DateStr, config.LengthStr, self.UpYunConfig.Password.clone()];
let mut tt = vec![];
tt.push(String::from("Upyun"));
tt.push(self.UpYunConfig.Operator.clone());
tt.push(":".to_string());
tt.push(md5str(sign.join("&")));
tt.concat()
}
fn doRESTRequest(&mut self, config: &RestReqConfig) -> Result<(), Box<dyn std::error::Error>> {
// 这里传入的uri做了编码 utf-8 转成 ascii 的组合 /sdk-test/xx/%E4%B8%AD%E6%96%87.log
// escUri := path.Join("/", up.Bucket, escapeUri(config.uri))
let mut escUri =
String::from("/") + &self.UpYunConfig.Bucket + &escapeUri(config.uri.clone());
if config.uri.chars().last().unwrap() == '/' {
escUri += '/'.to_string().as_str()
}
if config.query != "" {
escUri += ("?".to_owned() + &config.query).as_str()
}
let mut headers: HashMap<String, String> = HashMap::new();
let mut has_md5: bool = false;
let old_header: HashMap<String, String> = HashMap::new();
for (k, v) in old_header {
if k.to_lowercase() == "content-md5" && v != "" {
has_md5 = true
}
headers.insert(k, v).expect("header set error ");
}
headers.insert("Date".to_string(), makeRFC1123Date());
headers.insert("Host".to_string(), "v0.api.upyun.com".to_string()); // 为什么这个是固定的
if !has_md5 && config.useMD5 {
// config.httpBody.
// 这里需要判断下httpBody的类型
//// FIXME: depend on config.httpBody.type
headers.insert("Content".to_string(), "xx".to_string());
}
if self.deprecated {
if let Some(value) = headers.get("Conetnt-Length") {
let size = 0;
}
}
Ok(())
}
}
fn md5str(s: String) -> String {
let mut hasher = md5::Md5::new();
hasher.input_str(&s);
hasher.result_str()
}
fn escapeUri(s: String) -> String {
// let s = String::from("/xx/中文.log");
if s == "" {
let _s = String::from("中文");
}
let escape: [u32; 8] = [
0xffffffff, 0xfc001fff, 0x78000001, 0xb8000001, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff,
];
let hexMap = "0123456789ABCDEF".as_bytes();
let mut size = 0;
let ss = s.as_bytes();
for i in 0..ss.len() {
let c = ss[i];
if escape.get((c >> 5) as usize).unwrap() & (1 << (c & 0x1f)) > 0 {
size += 3
} else {
size += 1
}
}
// let ret = [0u8;size]; // 静态 error
let mut ret = vec![0u8; size]; // 动态 success
let mut j = 0;
for i in 0..ss.len() {
let c = ss[i];
if escape[(c >> 5) as usize] & (1 << (c & 0x1f)) > 0 {
ret[j] = "%".as_bytes()[0];
// ret[j] = "%".parse::<u8>().unwrap();
ret[j + 1] = hexMap[(c >> 4) as usize];
ret[j + 2] = hexMap[(c & 0xf) as usize];
j += 3
} else {
ret[j] = c;
j += 1
}
}
from_utf8(&ret).unwrap().to_string()
}
fn unescapeUri(s: String) -> String {
println!("============");
// 定位 % 转换成byte的数
// let xx = "%";
// let xxx = xx.as_bytes();
// println!("change % to byte is ==> {:?}",xxx);
// 将传进来的字符串变成 字符数组
// 遍历 匹配 %
// if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
// // if not correct, return original string
// return s
// }
// i += 3
let mut n: i32 = 0;
let s_vec: Vec<char> = s.chars().collect();
for mut _i in 0..s_vec.len() {
if s_vec[_i] == '%' {
if _i + 2 >= s_vec.len() || !ishex(s_vec[_i + 1] as u8) || !ishex(s_vec[_i + 2] as u8) {
return s;
}
_i += 3
} else {
_i += 1
}
n += 1
}
let mut t_vec: Vec<u8> = Vec::new();
let mut j = 0;
for mut _i in 0..s_vec.len() {
if s_vec[_i] == '%' {
t_vec[j] = unhex(s_vec[_i + 1] as u8) << 4 | unhex(s_vec[_i + 2] as u8);
_i += 3
} else {
t_vec[j] = s_vec[_i] as u8;
_i += 1
}
j += 1
}
from_utf8(&t_vec).unwrap().to_string()
}
// 16进制 to 10进制
fn unhex(c: u8) -> u8 {
if '0' as u8 <= c && c <= '9' as u8 {
c - '0' as u8
} else if 'a' as u8 <= c && c <= 'f' as u8 {
c - 'a' as u8 + 10
} else | Some(Value ) => Value.to_string(),
None => host
}
}
/// FIXME
fn doHTTPRequest(&mut self,method: h | identifier_body |
main.rs | (self) -> Self {
self
}
}
impl UpYunConfig {
fn new(Bucket: String, Operator: String, Password: String) -> Self{
UpYunConfig{
Bucket:Bucket,
Operator:Operator,
Password: Password,
..Default::default()
}
}
fn build(mut self) -> Self {
self
}
}
impl UpYun {
fn new(config: UpYunConfig) -> Self{
// init upyunconfig
UpYun{
UpYunConfig: config,
..Default::default()
}
}
fn set_httpc(mut self) -> Self{
self.httpc = "".to_string();
self
}
fn set_deprecated(mut self) -> Self{
self.deprecated = true;
self
}
fn build(self) -> Self{
self
}
// func (up *UpYun) Put(config *PutObjectConfig) (err error) {
// if config.LocalPath != "" {
// var fd *os.File
// if fd, err = os.Open(config.LocalPath); err != nil {
// return errorOperation("open file", err)
// }
// defer fd.Close()
// config.Reader = fd
// }
// if config.UseResumeUpload { // 是否在用断点续传
// logrus.Info("up.resumePut")
// return up.resumePut(config)
// } else {
// logrus.Info("up.put") // 正常上传
// return up.put(config)
// }
// }
fn Put(&mut self, config: PutObjectConfig){
if config.local_path != ""{
// file
}
if config.user_resume_upload{
// 断电续传
info!("断电续传尚未完成")
} else {
info!("正常上传")
}
}
fn put_file(
&mut self,
file_path: String,
filepath: String,
) -> Result<(), Box<dyn std::error::Error>> {
if file_path != "" {
match File::open(filepath) {
Ok(file) => {
println!("{:?}", file)
}
Err(e) => {
println!("open file error{:?}", e)
}
}
}
Ok(())
}
fn doGetEndpoint(&mut self, host: String) -> String {
match self.UpYunConfig.Hosts.get(&host){
Some(Value ) => Value.to_string(),
None => host
}
}
/// FIXME
fn doHTTPRequest(&mut self,method: hyper::Method, url:String, headers: HashMap<String,String>, body: Vec<u8>){
match hyper::Request::builder().method(method).uri(url).body(body){
Ok(req) => {
for (key,value) in headers{
if key.to_lowercase() == "host"{
// req.
} else {
}
}
},
Err(e) => {
println!("{:?}",e)
}
}
}
fn MakeRESTAuth(&mut self,config: RestAuthConfig) -> String{
let sign = vec![config.method, config.uri, config.DateStr, config.LengthStr, self.UpYunConfig.Password.clone()];
let mut tt = vec![];
tt.push(String::from("Upyun"));
tt.push(self.UpYunConfig.Operator.clone());
tt.push(":".to_string());
tt.push(md5str(sign.join("&")));
tt.concat()
}
fn doRESTRequest(&mut self, config: &RestReqConfig) -> Result<(), Box<dyn std::error::Error>> {
// 这里传入的uri做了编码 utf-8 转成 ascii 的组合 /sdk-test/xx/%E4%B8%AD%E6%96%87.log
// escUri := path.Join("/", up.Bucket, escapeUri(config.uri))
let mut escUri =
String::from("/") + &self.UpYunConfig.Bucket + &escapeUri(config.uri.clone());
if config.uri.chars().last().unwrap() == '/' {
escUri += '/'.to_string().as_str()
}
if config.query != "" {
escUri += ("?".to_owned() + &config.query).as_str()
}
let mut headers: HashMap<String, String> = HashMap::new();
let mut has_md5: bool = false;
let old_header: HashMap<String, String> = HashMap::new();
for (k, v) in old_header {
if k.to_lowercase() == "content-md5" && v != "" {
has_md5 = true
}
headers.insert(k, v).expect("header set error ");
}
headers.insert("Date".to_string(), makeRFC1123Date());
headers.insert("Host".to_string(), "v0.api.upyun.com".to_string()); // 为什么这个是固定的
if !has_md5 && config.useMD5 {
// config.httpBody.
// 这里需要判断下httpBody的类型
//// FIXME: depend on config.httpBody.type
headers.insert("Content".to_string(), "xx".to_string());
}
if self.deprecated {
if let Some(value) = headers.get("Conetnt-Length") {
let size = 0;
}
}
Ok(())
}
}
fn md5str(s: String) -> String {
let mut hasher = md5::Md5::new();
hasher.input_str(&s);
hasher.result_str()
}
fn escapeUri(s: String) -> String {
// let s = String::from("/xx/中文.log");
if s == "" {
let _s = String::from("中文");
}
let escape: [u32; 8] = [
0xffffffff, 0xfc001fff, 0x78000001, 0xb8000001, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff,
];
let hexMap = "0123456789ABCDEF".as_bytes();
let mut size = 0;
let ss = s.as_bytes();
for i in 0..ss.len() {
let c = ss[i];
if escape.get((c >> 5) as usize).unwrap() & (1 << (c & 0x1f)) > 0 {
size += 3
} else {
size += 1
}
}
// let ret = [0u8;size]; // 静态 error
let mut ret = vec![0u8; size]; // 动态 success
let mut j = 0;
for i in 0..ss.len() {
let c = ss[i];
if escape[(c >> 5) as usize] & (1 << (c & 0x1f)) > 0 {
ret[j] = "%".as_bytes()[0];
// ret[j] = "%".parse::<u8>().unwrap();
ret[j + 1] = hexMap[(c >> 4) as usize];
ret[j + 2] = hexMap[(c & 0xf) as usize];
j += 3
} else {
ret[j] = c;
j += 1
}
}
from_utf8(&ret).unwrap().to_string()
}
fn unescapeUri(s: String) -> String {
println!("============");
// 定位 % 转换成byte的数
// let xx = "%";
// let xxx = xx.as_bytes();
// println!("change % to byte is ==> {:?}",xxx);
// 将传进来的字符串变成 字符数组
// 遍历 匹配 %
// if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
// // if not correct, return original string
// return s
// }
// i += 3
let mut n: i32 = 0;
let s_vec: Vec<char> = s.chars().collect();
for mut _i in 0..s_vec.len() {
if s_vec[_i] == '%' {
if _i + 2 >= s_vec.len() || !ishex(s_vec[_i + 1] as u8) || !ishex(s_vec[_i + 2] as u8) {
return s;
}
_i += 3
} else {
_i += 1
}
n += 1
}
let mut t_vec: Vec<u8> = Vec::new();
let mut j = 0;
for mut _i in 0..s_vec.len() {
if s_vec[_i] == '%' {
t_vec[j] = unhex(s_vec[_i + 1] as u8) << 4 | unhex(s_vec[_i + 2] as u8);
_i += 3
} else {
t_vec[j] = s_vec[_i] as u8;
_i += 1
}
j += 1
}
from_utf8(&t_vec).unwrap().to | build | identifier_name |
|
main.rs | 0xb8000001, 0xffffffff, 0xffffffff, 0xffffffff,
0xffffffff,
];
let hexMap = "0123456789ABCDEF".as_bytes();
let mut size = 0;
let ss = s.as_bytes();
for i in 0..ss.len() {
let c = ss[i];
if escape.get((c >> 5) as usize).unwrap() & (1 << (c & 0x1f)) > 0 {
size += 3
} else {
size += 1
}
}
// let ret = [0u8;size]; // 静态 error
let mut ret = vec![0u8; size]; // 动态 success
let mut j = 0;
for i in 0..ss.len() {
let c = ss[i];
if escape[(c >> 5) as usize] & (1 << (c & 0x1f)) > 0 {
ret[j] = "%".as_bytes()[0];
// ret[j] = "%".parse::<u8>().unwrap();
ret[j + 1] = hexMap[(c >> 4) as usize];
ret[j + 2] = hexMap[(c & 0xf) as usize];
j += 3
} else {
ret[j] = c;
j += 1
}
}
from_utf8(&ret).unwrap().to_string()
}
fn unescapeUri(s: String) -> String {
println!("============");
// 定位 % 转换成byte的数
// let xx = "%";
// let xxx = xx.as_bytes();
// println!("change % to byte is ==> {:?}",xxx);
// 将传进来的字符串变成 字符数组
// 遍历 匹配 %
// if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
// // if not correct, return original string
// return s
// }
// i += 3
let mut n: i32 = 0;
let s_vec: Vec<char> = s.chars().collect();
for mut _i in 0..s_vec.len() {
if s_vec[_i] == '%' {
if _i + 2 >= s_vec.len() || !ishex(s_vec[_i + 1] as u8) || !ishex(s_vec[_i + 2] as u8) {
return s;
}
_i += 3
} else {
_i += 1
}
n += 1
}
let mut t_vec: Vec<u8> = Vec::new();
let mut j = 0;
for mut _i in 0..s_vec.len() {
if s_vec[_i] == '%' {
t_vec[j] = unhex(s_vec[_i + 1] as u8) << 4 | unhex(s_vec[_i + 2] as u8);
_i += 3
} else {
t_vec[j] = s_vec[_i] as u8;
_i += 1
}
j += 1
}
from_utf8(&t_vec).unwrap().to_string()
}
// 16进制 to 10进制
fn unhex(c: u8) -> u8 {
if '0' as u8 <= c && c <= '9' as u8 {
c - '0' as u8
} else if 'a' as u8 <= c && c <= 'f' as u8 {
c - 'a' as u8 + 10
} else if 'A' as u8 <= c && c <= 'F' as u8 {
c - 'A' as u8 + 10
} else {
0
}
}
// 判断是否为16进制
fn ishex(c: u8) -> bool {
if '0' as u8 <= c && c <= '9' as u8 {
true
} else if 'a' as u8 <= c && c <= 'f' as u8 {
true
} else if 'A' as u8 <= c && c <= 'F' as u8 {
true
} else {
false
}
}
// 使用sha-1加密内容 在hmac 一下
// func hmacSha1(key string, data []byte) []byte {
// hm := hmac.New(sha1.New, []byte(key))
// hm.Write(data)
// return hm.Sum(nil)
// }
fn hmacSha1(key: &[u8], value: &[u8]) -> String {
// // 先把秘钥加密一下 类似md5 只是算法不同
// let mut hasher = crypto::sha1::Sha1::new();
// hasher.input_str(&key);
// let result = hasher.result_str().as_bytes();
// let rr = vec![0u8;20];
// rr.copy_from_slice(&result);
// 再把加密后的内容和value 一起hmac一下
// let h_mac = NewMac::new(&result)
let mut mac = hmac::Hmac::new(crypto::sha1::Sha1::new(), key);
mac.input(value);
let result = mac.result();
let code = result.code();
// The correct hash is returned, it's just not in the representation you expected. The hash is returned as raw bytes, not as bytes converted to ASCII hexadecimal digits.
// If we print the hash code array as hex, like this
// println!("{:02x?}", code);
let code_vec = code
.iter()
.map(|b| format!("{:02x}", b))
.collect::<Vec<String>>();
code_vec.concat()
}
// func makeRFC1123Date(d time.Time) string {
// utc := d.UTC().Format(time.RFC1123)
// return strings.ReplaceAll(utc, "UTC", "GMT")
// }
fn makeRFC1123Date() -> String {
let time = Utc::now();
let time_utc = time.to_rfc2822();
let new_time_utf = time_utc.replace("+0000", "GMT");
new_time_utf
}
// base64 to string
// base64::decode_block(src)
#[cfg(test)]
mod tests {
use chrono::{Date, DateTime, Utc};
use hyper::http;
use std::{collections::HashMap, io::Read};
use crate::escapeUri;
use crate::hmacSha1;
use crate::makeRFC1123Date;
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
#[test]
fn parse_uri() {
let bucket = String::from("sdk-test");
let config = "/xx/中文.log/".to_string();
let query = "xxx";
let mut escUri = String::from("/") + &bucket + &escapeUri("/xx/中文.log".to_string());
if config.chars().last().unwrap() == '/' {
escUri += '/'.to_string().as_str()
}
if query != "" {
escUri += ("?".to_owned() + query).as_str()
}
// header set
// hasmd5 set
let mut headers: HashMap<String, String> = HashMap::new();
let mut has_md5: bool = false;
let old_header: HashMap<String, String> = HashMap::new();
for (k, v) in old_header {
if k.to_lowercase() == "content-md5" && v != "" {
has_md5 = true
}
headers.insert(k, v).expect("header set error ");
}
headers.insert("Date".to_string(), makeRFC1123Date());
headers.insert("Host".to_string(), "v0.api.upyun.com".to_string()); // 为什么这个是固定的
// headers["Date"] = makeRFC1123Date(time.Now());
// headers["Host"] = "v0.api.upyun.com"
if !has_md5 {
// 判断类型
}
let deprecated = "";
// if deprecated {}
if let Some(value) = headers.get("Content-Length") {
let size = 0;
}
}
// use crate::base64ToStr;
#[test]
fn make_unified_auth() {
let sign: Vec<&'static str> = vec!["method", "uri", "DateStr", "Policy", "ContentMD5"];
let mut sign_no_empty: Vec<String> = Vec::new();
for item in sign {
if item != "" {
sign_no_empty.push(item.to_string());
}
}
let sign_bytes = sign_no_empty.join("&");
let password = "xx".as_bytes();
let sign_str =
openssl::base64::encode_block(hmacSha1(password, sign_bytes.as_bytes()).as_bytes());
let back_vec: Vec<St | ring> = vec![
"Upyun".to_string(),
"Operator" | conditional_block |
|
BankDataset.py | .drop(trainSet.index)
return trainSet, testSet
# ## Logistic function
# We are using sigmoid as a logistic function defined as <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/9537e778e229470d85a68ee0b099c08298a1a3f6">
# <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/8/88/Logistic-curve.svg/320px-Logistic-curve.svg.png">
# This function calculates probabilities using sigmoid function
#
# In[8]:
def logisticFunction(x):
return 1.0/(1.0 + np.exp(-x))
# ## Regularization
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/d55221bf8c9b730ff7c4eddddb9473af47bb1d1c">
# ### L2 loss
# L2 loss or Tikhonov regularization
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/7328255ad4abce052b3f6f39c43e974808d0cdb6">
# Caution: Do not regularize B0 or bias term
# In[9]:
def l2Loss(regularizationParameter, weight):
loss = 2 * regularizationParameter * weight
"Remove impact on bias"
loss[0] = 0
return loss
# ## Generate logLoss procedure
# For binary classification
# <img src="http://wiki.fast.ai/images/math/a/4/6/a4651d4ad311666c617d57c1dde37b28.png">
# In[10]:
def logLoss(y, p):
return -(y * np.log(p + logDelta) + (1 - y) * np.log(1 - p + logDelta)).sum()
# ## logisticSolverNewton
# In[11]:
def logisticSolverNewton(x, y, beta, regularizationParameter = None, regularization = None):
p = logisticFunction(np.dot(beta, x.T))
gradient = np.dot(x.T, p - y)
w = p * (1 - p)
identity = np.identity(len(w))
w = identity * w
hessian = np.dot(np.dot(x.T, w), x)
updateRule = None
if regularization == None:
updateRule = np.dot(np.linalg.inv(hessian), gradient)
else:
regFactor = regularization(regularizationParameter, beta)
updateRule = np.dot(np.linalg.inv(hessian + 2 * regularizationParameter * np.identity(len(hessian))) ,
(gradient + regFactor))
return updateRule
# ## miniBatchLogisticRegression
# Here log loss is being used. The objective is minimization of loss.
# X<sup>T</sup>(P-Y)
# In[12]:
"""If no step length controller is provided then values of alpha will be taken as step length.
Else the step length controller will be used. Additional parameters to the controller are
provided by stepLengthControllerParameters"""
def miniBatchLogisticRegression(xTrain, yTrain, xTest, yTest, beta, epochs = 5,
batchSize = 50, verbose = 0, alpha = 1.1e-5,
regularizationParameter = 9e-2, regularization = None, solver = None):
xTrain = np.insert(xTrain, 0, 1, axis = 1)
xTest = np.insert(xTest, 0, 1, axis = 1)
xTrain = xTrain * 1.0
yTrain = yTrain * 1.0
xTest = xTest * 1.0
yTest = yTest * 1.0
"""For plotting graphs"""
logLossTraining = []
logLossTest = []
indices = np.array(range(0, len(xTrain)))
for i in range(0, epochs):
if verbose:
|
"""Shuffle the indices"""
np.random.shuffle(indices)
"""Will split. May be uneven"""
batches = np.array_split(indices, batchSize)
if verbose:
print("Total batches created"+str(len(batches)))
index = 0
while index < len(xTrain):
batch = indices[index : index + batchSize]
index = index + batchSize
"""Select required x and y subsets"""
x = np.take(xTrain, batch, axis = 0)
y = np.take(yTrain, batch, axis = 0)
p = logisticFunction(np.dot(beta, x.T))
gradient = np.dot(x.T, p - y)
if solver == None:
"""Gradient descent"""
regFactor = 0
if regularization != None:
regFactor = regularization(regularizationParameter, beta)
beta = beta - (alpha * (gradient + regFactor))
else:
beta = beta - (alpha * solver(x, y, beta, regularizationParameter, regularization))
if verbose:
print beta
"""Calculating LogLoss for train and test set"""
xTrainPrediction = np.dot(beta, xTrain.T)
xTestPrediction = np.dot(beta, xTest.T)
logLossTraining.append(logLoss(yTrain, logisticFunction(np.dot(beta, xTrain.T))))
logLossTest.append(logLoss(yTest, logisticFunction(np.dot(beta, xTest.T))))
return beta, logLossTraining, logLossTest
# ## kFoldAnalysis
# In[13]:
def kFoldAnalysis(xTrain, yTrain, model, modelParameters, nFolds):
indices = np.array(range(0, len(xTrain)))
folds = np.array_split(indices, nFolds)
analysisMetricList = []
trainLossList = []
testLossList = []
for i in range(0, len(folds)):
validationSet = folds[i]
"""Set difference"""
trainSet = np.setdiff1d(indices, validationSet)
modelParameters['xTrain'] = np.take(xTrain, trainSet, axis = 0)
modelParameters['yTrain'] = np.take(yTrain, trainSet, axis = 0)
modelParameters['xTest'] = np.take(xTrain, validationSet, axis = 0)
modelParameters['yTest'] = np.take(yTrain, validationSet, axis = 0)
modelParams, trainLoss, testLoss = model(**modelParameters)
analysisMetricList.append(testLoss[-1])
trainLossList.append(trainLoss)
testLossList.append(testLoss)
return modelParams, trainLossList, testLossList, analysisMetricList
# ## GridSearch
# In[14]:
def gridSearch(xTrain, yTrain, xTest, yTest, model, modelParameters, hyperParameters,
nFolds = 1, reTrain = True, plotGraphs = False):
"""For storing is the best parameters"""
leastLoss = None
bestModel = None
bestHyperParams = None
"""Generate the parameter grid"""
parameterGrid = []
gridKeys = []
parameterGrid = list(product(*hyperParameters.values()))
hyperParameterKeys = hyperParameters.keys()
"""For plottong graphs"""
if plotGraphs:
plt.close()
plotHeight = 10
plotWidth = 20
index = 0
fig, axs = plt.subplots(len(parameterGrid), 2, figsize=(plotWidth, plotHeight * len(parameterGrid)))
fig = plt.figure()
fig.set_figheight(15)
fig.set_figwidth(15)
ax = fig.add_subplot(111, projection='3d')
"""Grid search for cartesian product of hyperParameters"""
for parameterMesh in parameterGrid:
hyperParameterMesh = {}
for k,v in zip(hyperParameterKeys, parameterMesh):
hyperParameterMesh[k] = v
"""Combine model Parameters"""
updatedParam = modelParameters.copy()
updatedParam.update(hyperParameterMesh)
"""Perform grid search with cross validation"""
if nFolds > 1:
modelParams, trainLossList, testLossList, analysisMetricList = kFoldAnalysis(model = model,
xTrain = xTrain,
yTrain = yTrain,
nFolds = nFolds,
modelParameters = updatedParam)
"""For storing best model"""
avg = np.average(analysisMetricList)
if leastLoss == None or avg < leastLoss:
leastLoss = avg
bestModel = modelParams
bestHyperParams = hyperParameterMesh
"""For plotting"""
if plotGraphs:
foldIndex = 1
ax.scatter(hyperParameterMesh['alpha'], hyperParameterMesh['regularizationParameter'],
avg, marker = 'o', label = str(hyperParameterMesh))
for train, test in zip(trainLossList, testLossList):
axs[index][0].plot(train, label = "Fold-" + str(foldIndex))
axs[index][1].plot(test, label = "Fold-" | print("Epoch-"+str(i)) | conditional_block |
BankDataset.py | B = B - B.mean()
return ((A * B).sum())/(sqrt((A * A).sum()) * sqrt((B * B).sum()))
# ### TextEncoder
#
# Here the data is mix of numbers and text. Text value cannot be directly used and should be converted to numeric data.<br>
# For this I have created a function text encoder which accepts a pandas series. Text encoder returns a lookUp dictionary for recreating the numeric value for text value and encoded text vector.
# For encoding I have applied a lambda function that will return value from dictionary.
# In[4]:
""" Converts the text features into numeric values so that they can be used by
the downstream algorithms.
Accepts pandas series and returns lookup dictionary and encoded vector"""
def textEncoder(textVector):
if type(textVector) == pd.core.series.Series:
lookUpDictionary = {}
lookupValue = 0
for key in textVector.unique():
lookUpDictionary[key] = lookupValue
lookupValue +=1
textVector = textVector.apply(lambda a: lookUpDictionary[a])
return lookUpDictionary,textVector
else:
raise TypeError("Expected a pandas series as an input")
# ## generateSpearmanCoefficient
# https://en.m.wikipedia.org/wiki/Spearman's_rank_correlation_coefficient
#
# If the data is categorical we cannot use pearson's coeffecient as it assumes that data comes from normal distribution.<br>
# However Spearman's coefficient is appropriate for both continuous and discrete ordinal variables.<br>
#
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/a8dda555d22080d721679401fa13181cad3863f6"/>
#
# This is same as pearson's coefficient applied to ranked values
# In[5]:
def generateSpearmanCoefficient(x, y):
"""Rank the values"""
n = len(x)
xRanked = rankdata(x)
yRanked = rankdata(y)
"""Generate Paerson's constant on ranked data"""
return generatePearsonCoefficient(xRanked, yRanked)
# ## Feature scaling
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/b0aa2e7d203db1526c577192f2d9102b718eafd5">
# In[6]:
def scaleFeature(x):
mean = np.mean(x)
stdDeviation = np.std(x)
return x.apply(lambda y: ((y * 1.0) - mean)/(stdDeviation))
# ## SplitDataSet Procedure
# This method splits the dataset into trainset and testset based upon the trainSetSize value. For splitting the dataset, I am using pandas.sample to split the data. This gives me trainset. For testset I am calculating complement of the trainset. This I am doing by droping the index present in training set.
# In[7]:
"""Splits the provided pandas dataframe into training and test dataset"""
def splitDataSet(inputDataframe, trainSetSize):
trainSet = inputDataframe.sample(frac = trainSetSize)
testSet = inputDataframe.drop(trainSet.index)
return trainSet, testSet
# ## Logistic function
# We are using sigmoid as a logistic function defined as <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/9537e778e229470d85a68ee0b099c08298a1a3f6">
# <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/8/88/Logistic-curve.svg/320px-Logistic-curve.svg.png">
# This function calculates probabilities using sigmoid function
#
# In[8]:
def logisticFunction(x):
return 1.0/(1.0 + np.exp(-x))
# ## Regularization
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/d55221bf8c9b730ff7c4eddddb9473af47bb1d1c">
# ### L2 loss
# L2 loss or Tikhonov regularization
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/7328255ad4abce052b3f6f39c43e974808d0cdb6">
# Caution: Do not regularize B0 or bias term
# In[9]:
def l2Loss(regularizationParameter, weight):
loss = 2 * regularizationParameter * weight
"Remove impact on bias"
loss[0] = 0
return loss
# ## Generate logLoss procedure
# For binary classification
# <img src="http://wiki.fast.ai/images/math/a/4/6/a4651d4ad311666c617d57c1dde37b28.png">
# In[10]:
def logLoss(y, p):
return -(y * np.log(p + logDelta) + (1 - y) * np.log(1 - p + logDelta)).sum()
# ## logisticSolverNewton
# In[11]:
def logisticSolverNewton(x, y, beta, regularizationParameter = None, regularization = None):
p = logisticFunction(np.dot(beta, x.T))
gradient = np.dot(x.T, p - y)
w = p * (1 - p)
identity = np.identity(len(w))
w = identity * w
hessian = np.dot(np.dot(x.T, w), x)
updateRule = None
if regularization == None:
updateRule = np.dot(np.linalg.inv(hessian), gradient)
else:
regFactor = regularization(regularizationParameter, beta)
updateRule = np.dot(np.linalg.inv(hessian + 2 * regularizationParameter * np.identity(len(hessian))) ,
(gradient + regFactor))
return updateRule
# ## miniBatchLogisticRegression
# Here log loss is being used. The objective is minimization of loss.
# X<sup>T</sup>(P-Y)
# In[12]:
"""If no step length controller is provided then values of alpha will be taken as step length.
Else the step length controller will be used. Additional parameters to the controller are
provided by stepLengthControllerParameters"""
def miniBatchLogisticRegression(xTrain, yTrain, xTest, yTest, beta, epochs = 5,
batchSize = 50, verbose = 0, alpha = 1.1e-5,
regularizationParameter = 9e-2, regularization = None, solver = None):
xTrain = np.insert(xTrain, 0, 1, axis = 1)
xTest = np.insert(xTest, 0, 1, axis = 1)
xTrain = xTrain * 1.0
yTrain = yTrain * 1.0
xTest = xTest * 1.0
yTest = yTest * 1.0
"""For plotting graphs"""
logLossTraining = []
logLossTest = []
indices = np.array(range(0, len(xTrain)))
for i in range(0, epochs):
if verbose:
print("Epoch-"+str(i))
"""Shuffle the indices"""
np.random.shuffle(indices)
"""Will split. May be uneven"""
batches = np.array_split(indices, batchSize)
if verbose:
print("Total batches created"+str(len(batches)))
index = 0
while index < len(xTrain):
batch = indices[index : index + batchSize]
index = index + batchSize
"""Select required x and y subsets"""
x = np.take(xTrain, batch, axis = 0)
y = np.take(yTrain, batch, axis = 0)
p = logisticFunction(np.dot(beta, x.T))
gradient = np.dot(x.T, p - y)
if solver == None:
"""Gradient descent"""
regFactor = 0
if regularization != None:
regFactor = regularization(regularizationParameter, beta)
beta = beta - (alpha * (gradient + regFactor))
else:
beta = beta - (alpha * solver(x, y, beta, regularizationParameter, regularization))
if verbose:
print beta
"""Calculating LogLoss for train and test set"""
xTrainPrediction = np.dot(beta, xTrain.T)
xTestPrediction = np.dot(beta, xTest.T)
logLossTraining.append(logLoss(yTrain, logisticFunction(np.dot(beta, xTrain.T))))
logLossTest.append(logLoss(yTest, logisticFunction(np.dot(beta, xTest.T))))
return beta, logLossTraining, logLossTest
# ## kFoldAnalysis
# In[13]:
def kFoldAnalysis(xTrain, yTrain, model, modelParameters, nFolds):
indices = np.array(range(0, len(xTrain)))
folds = np.array_split(indices, nFolds)
analysisMetricList = | """Generate pearson's coefficient"""
def generatePearsonCoefficient(A, B):
A = A - A.mean() | random_line_split |
|
BankDataset.py | (x):
mean = np.mean(x)
stdDeviation = np.std(x)
return x.apply(lambda y: ((y * 1.0) - mean)/(stdDeviation))
# ## SplitDataSet Procedure
# This method splits the dataset into trainset and testset based upon the trainSetSize value. For splitting the dataset, I am using pandas.sample to split the data. This gives me trainset. For testset I am calculating complement of the trainset. This I am doing by droping the index present in training set.
# In[7]:
"""Splits the provided pandas dataframe into training and test dataset"""
def splitDataSet(inputDataframe, trainSetSize):
trainSet = inputDataframe.sample(frac = trainSetSize)
testSet = inputDataframe.drop(trainSet.index)
return trainSet, testSet
# ## Logistic function
# We are using sigmoid as a logistic function defined as <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/9537e778e229470d85a68ee0b099c08298a1a3f6">
# <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/8/88/Logistic-curve.svg/320px-Logistic-curve.svg.png">
# This function calculates probabilities using sigmoid function
#
# In[8]:
def logisticFunction(x):
return 1.0/(1.0 + np.exp(-x))
# ## Regularization
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/d55221bf8c9b730ff7c4eddddb9473af47bb1d1c">
# ### L2 loss
# L2 loss or Tikhonov regularization
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/7328255ad4abce052b3f6f39c43e974808d0cdb6">
# Caution: Do not regularize B0 or bias term
# In[9]:
def l2Loss(regularizationParameter, weight):
loss = 2 * regularizationParameter * weight
"Remove impact on bias"
loss[0] = 0
return loss
# ## Generate logLoss procedure
# For binary classification
# <img src="http://wiki.fast.ai/images/math/a/4/6/a4651d4ad311666c617d57c1dde37b28.png">
# In[10]:
def logLoss(y, p):
return -(y * np.log(p + logDelta) + (1 - y) * np.log(1 - p + logDelta)).sum()
# ## logisticSolverNewton
# In[11]:
def logisticSolverNewton(x, y, beta, regularizationParameter = None, regularization = None):
p = logisticFunction(np.dot(beta, x.T))
gradient = np.dot(x.T, p - y)
w = p * (1 - p)
identity = np.identity(len(w))
w = identity * w
hessian = np.dot(np.dot(x.T, w), x)
updateRule = None
if regularization == None:
updateRule = np.dot(np.linalg.inv(hessian), gradient)
else:
regFactor = regularization(regularizationParameter, beta)
updateRule = np.dot(np.linalg.inv(hessian + 2 * regularizationParameter * np.identity(len(hessian))) ,
(gradient + regFactor))
return updateRule
# ## miniBatchLogisticRegression
# Here log loss is being used. The objective is minimization of loss.
# X<sup>T</sup>(P-Y)
# In[12]:
"""If no step length controller is provided then values of alpha will be taken as step length.
Else the step length controller will be used. Additional parameters to the controller are
provided by stepLengthControllerParameters"""
def miniBatchLogisticRegression(xTrain, yTrain, xTest, yTest, beta, epochs = 5,
batchSize = 50, verbose = 0, alpha = 1.1e-5,
regularizationParameter = 9e-2, regularization = None, solver = None):
xTrain = np.insert(xTrain, 0, 1, axis = 1)
xTest = np.insert(xTest, 0, 1, axis = 1)
xTrain = xTrain * 1.0
yTrain = yTrain * 1.0
xTest = xTest * 1.0
yTest = yTest * 1.0
"""For plotting graphs"""
logLossTraining = []
logLossTest = []
indices = np.array(range(0, len(xTrain)))
for i in range(0, epochs):
if verbose:
print("Epoch-"+str(i))
"""Shuffle the indices"""
np.random.shuffle(indices)
"""Will split. May be uneven"""
batches = np.array_split(indices, batchSize)
if verbose:
print("Total batches created"+str(len(batches)))
index = 0
while index < len(xTrain):
batch = indices[index : index + batchSize]
index = index + batchSize
"""Select required x and y subsets"""
x = np.take(xTrain, batch, axis = 0)
y = np.take(yTrain, batch, axis = 0)
p = logisticFunction(np.dot(beta, x.T))
gradient = np.dot(x.T, p - y)
if solver == None:
"""Gradient descent"""
regFactor = 0
if regularization != None:
regFactor = regularization(regularizationParameter, beta)
beta = beta - (alpha * (gradient + regFactor))
else:
beta = beta - (alpha * solver(x, y, beta, regularizationParameter, regularization))
if verbose:
print beta
"""Calculating LogLoss for train and test set"""
xTrainPrediction = np.dot(beta, xTrain.T)
xTestPrediction = np.dot(beta, xTest.T)
logLossTraining.append(logLoss(yTrain, logisticFunction(np.dot(beta, xTrain.T))))
logLossTest.append(logLoss(yTest, logisticFunction(np.dot(beta, xTest.T))))
return beta, logLossTraining, logLossTest
# ## kFoldAnalysis
# In[13]:
def kFoldAnalysis(xTrain, yTrain, model, modelParameters, nFolds):
indices = np.array(range(0, len(xTrain)))
folds = np.array_split(indices, nFolds)
analysisMetricList = []
trainLossList = []
testLossList = []
for i in range(0, len(folds)):
validationSet = folds[i]
"""Set difference"""
trainSet = np.setdiff1d(indices, validationSet)
modelParameters['xTrain'] = np.take(xTrain, trainSet, axis = 0)
modelParameters['yTrain'] = np.take(yTrain, trainSet, axis = 0)
modelParameters['xTest'] = np.take(xTrain, validationSet, axis = 0)
modelParameters['yTest'] = np.take(yTrain, validationSet, axis = 0)
modelParams, trainLoss, testLoss = model(**modelParameters)
analysisMetricList.append(testLoss[-1])
trainLossList.append(trainLoss)
testLossList.append(testLoss)
return modelParams, trainLossList, testLossList, analysisMetricList
# ## GridSearch
# In[14]:
def gridSearch(xTrain, yTrain, xTest, yTest, model, modelParameters, hyperParameters,
nFolds = 1, reTrain = True, plotGraphs = False):
"""For storing is the best parameters"""
leastLoss = None
bestModel = None
bestHyperParams = None
"""Generate the parameter grid"""
parameterGrid = []
gridKeys = []
parameterGrid = list(product(*hyperParameters.values()))
hyperParameterKeys = hyperParameters.keys()
"""For plottong graphs"""
if plotGraphs:
plt.close()
plotHeight = 10
plotWidth = 20
index = 0
fig, axs = plt.subplots(len(parameterGrid), 2, figsize=(plotWidth, plotHeight * len(parameterGrid)))
fig = plt.figure()
fig.set_figheight(15)
fig.set_figwidth(15)
ax = fig.add_subplot(111, projection='3d')
"""Grid search for cartesian product of hyperParameters"""
for parameterMesh in parameterGrid:
hyperParameterMesh = {}
for k,v in zip(hyperParameterKeys, parameterMesh):
hyperParameterMesh[k] = v
"""Combine model Parameters"""
updatedParam = modelParameters.copy()
updatedParam.update(hyperParameterMesh)
"""Perform grid search with cross validation"""
if nFolds > 1:
modelParams, trainLossList, testLossList, analysisMetricList = kFoldAnalysis(model = model,
xTrain = xTrain,
yTrain = yTrain,
nFolds = nF | scaleFeature | identifier_name |
|
BankDataset.py | .drop(trainSet.index)
return trainSet, testSet
# ## Logistic function
# We are using sigmoid as a logistic function defined as <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/9537e778e229470d85a68ee0b099c08298a1a3f6">
# <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/8/88/Logistic-curve.svg/320px-Logistic-curve.svg.png">
# This function calculates probabilities using sigmoid function
#
# In[8]:
def logisticFunction(x):
|
# ## Regularization
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/d55221bf8c9b730ff7c4eddddb9473af47bb1d1c">
# ### L2 loss
# L2 loss or Tikhonov regularization
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/7328255ad4abce052b3f6f39c43e974808d0cdb6">
# Caution: Do not regularize B0 or bias term
# In[9]:
def l2Loss(regularizationParameter, weight):
loss = 2 * regularizationParameter * weight
"Remove impact on bias"
loss[0] = 0
return loss
# ## Generate logLoss procedure
# For binary classification
# <img src="http://wiki.fast.ai/images/math/a/4/6/a4651d4ad311666c617d57c1dde37b28.png">
# In[10]:
def logLoss(y, p):
return -(y * np.log(p + logDelta) + (1 - y) * np.log(1 - p + logDelta)).sum()
# ## logisticSolverNewton
# In[11]:
def logisticSolverNewton(x, y, beta, regularizationParameter = None, regularization = None):
p = logisticFunction(np.dot(beta, x.T))
gradient = np.dot(x.T, p - y)
w = p * (1 - p)
identity = np.identity(len(w))
w = identity * w
hessian = np.dot(np.dot(x.T, w), x)
updateRule = None
if regularization == None:
updateRule = np.dot(np.linalg.inv(hessian), gradient)
else:
regFactor = regularization(regularizationParameter, beta)
updateRule = np.dot(np.linalg.inv(hessian + 2 * regularizationParameter * np.identity(len(hessian))) ,
(gradient + regFactor))
return updateRule
# ## miniBatchLogisticRegression
# Here log loss is being used. The objective is minimization of loss.
# X<sup>T</sup>(P-Y)
# In[12]:
"""If no step length controller is provided then values of alpha will be taken as step length.
Else the step length controller will be used. Additional parameters to the controller are
provided by stepLengthControllerParameters"""
def miniBatchLogisticRegression(xTrain, yTrain, xTest, yTest, beta, epochs = 5,
batchSize = 50, verbose = 0, alpha = 1.1e-5,
regularizationParameter = 9e-2, regularization = None, solver = None):
xTrain = np.insert(xTrain, 0, 1, axis = 1)
xTest = np.insert(xTest, 0, 1, axis = 1)
xTrain = xTrain * 1.0
yTrain = yTrain * 1.0
xTest = xTest * 1.0
yTest = yTest * 1.0
"""For plotting graphs"""
logLossTraining = []
logLossTest = []
indices = np.array(range(0, len(xTrain)))
for i in range(0, epochs):
if verbose:
print("Epoch-"+str(i))
"""Shuffle the indices"""
np.random.shuffle(indices)
"""Will split. May be uneven"""
batches = np.array_split(indices, batchSize)
if verbose:
print("Total batches created"+str(len(batches)))
index = 0
while index < len(xTrain):
batch = indices[index : index + batchSize]
index = index + batchSize
"""Select required x and y subsets"""
x = np.take(xTrain, batch, axis = 0)
y = np.take(yTrain, batch, axis = 0)
p = logisticFunction(np.dot(beta, x.T))
gradient = np.dot(x.T, p - y)
if solver == None:
"""Gradient descent"""
regFactor = 0
if regularization != None:
regFactor = regularization(regularizationParameter, beta)
beta = beta - (alpha * (gradient + regFactor))
else:
beta = beta - (alpha * solver(x, y, beta, regularizationParameter, regularization))
if verbose:
print beta
"""Calculating LogLoss for train and test set"""
xTrainPrediction = np.dot(beta, xTrain.T)
xTestPrediction = np.dot(beta, xTest.T)
logLossTraining.append(logLoss(yTrain, logisticFunction(np.dot(beta, xTrain.T))))
logLossTest.append(logLoss(yTest, logisticFunction(np.dot(beta, xTest.T))))
return beta, logLossTraining, logLossTest
# ## kFoldAnalysis
# In[13]:
def kFoldAnalysis(xTrain, yTrain, model, modelParameters, nFolds):
indices = np.array(range(0, len(xTrain)))
folds = np.array_split(indices, nFolds)
analysisMetricList = []
trainLossList = []
testLossList = []
for i in range(0, len(folds)):
validationSet = folds[i]
"""Set difference"""
trainSet = np.setdiff1d(indices, validationSet)
modelParameters['xTrain'] = np.take(xTrain, trainSet, axis = 0)
modelParameters['yTrain'] = np.take(yTrain, trainSet, axis = 0)
modelParameters['xTest'] = np.take(xTrain, validationSet, axis = 0)
modelParameters['yTest'] = np.take(yTrain, validationSet, axis = 0)
modelParams, trainLoss, testLoss = model(**modelParameters)
analysisMetricList.append(testLoss[-1])
trainLossList.append(trainLoss)
testLossList.append(testLoss)
return modelParams, trainLossList, testLossList, analysisMetricList
# ## GridSearch
# In[14]:
def gridSearch(xTrain, yTrain, xTest, yTest, model, modelParameters, hyperParameters,
nFolds = 1, reTrain = True, plotGraphs = False):
"""For storing is the best parameters"""
leastLoss = None
bestModel = None
bestHyperParams = None
"""Generate the parameter grid"""
parameterGrid = []
gridKeys = []
parameterGrid = list(product(*hyperParameters.values()))
hyperParameterKeys = hyperParameters.keys()
"""For plottong graphs"""
if plotGraphs:
plt.close()
plotHeight = 10
plotWidth = 20
index = 0
fig, axs = plt.subplots(len(parameterGrid), 2, figsize=(plotWidth, plotHeight * len(parameterGrid)))
fig = plt.figure()
fig.set_figheight(15)
fig.set_figwidth(15)
ax = fig.add_subplot(111, projection='3d')
"""Grid search for cartesian product of hyperParameters"""
for parameterMesh in parameterGrid:
hyperParameterMesh = {}
for k,v in zip(hyperParameterKeys, parameterMesh):
hyperParameterMesh[k] = v
"""Combine model Parameters"""
updatedParam = modelParameters.copy()
updatedParam.update(hyperParameterMesh)
"""Perform grid search with cross validation"""
if nFolds > 1:
modelParams, trainLossList, testLossList, analysisMetricList = kFoldAnalysis(model = model,
xTrain = xTrain,
yTrain = yTrain,
nFolds = nFolds,
modelParameters = updatedParam)
"""For storing best model"""
avg = np.average(analysisMetricList)
if leastLoss == None or avg < leastLoss:
leastLoss = avg
bestModel = modelParams
bestHyperParams = hyperParameterMesh
"""For plotting"""
if plotGraphs:
foldIndex = 1
ax.scatter(hyperParameterMesh['alpha'], hyperParameterMesh['regularizationParameter'],
avg, marker = 'o', label = str(hyperParameterMesh))
for train, test in zip(trainLossList, testLossList):
axs[index][0].plot(train, label = "Fold-" + str(foldIndex))
axs[index][1].plot(test, label = "Fold | return 1.0/(1.0 + np.exp(-x)) | identifier_body |
tos.py | p = self._read()
sys.stdout.write(".")
if not self._debug:
sys.stdout.write("\n")
self._s.close()
self._s = serial.Serial(port, baudrate, rtscts=0, timeout=None)
thread.start_new_thread(self.run, ())
def run(self):
while True:
p = self._read()
self._read_counter += 1
if self._debug:
print "Serial:run: got a packet(%d): %s" % (self._read_counter, p)
ack = AckFrame(p.data)
if ack.protocol == self.SERIAL_PROTO_ACK:
if not self._ack:
self._ack = ack
if self._debug:
print "Serial:run: got an ack:", ack
self._ack = ack
# Wake up the writer
self._out_ack.acquire()
self._out_ack.notify()
self._out_ack.release()
else:
ampkt = ActiveMessage(NoAckDataFrame(p.data).data)
if ampkt.type == 100:
for t in "".join([chr(i) for i in ampkt.data]).strip('\n\0').split('\n'):
print "PRINTF:", t.strip('\n')
else:
if self._in_queue.full():
print "Warning: Buffer overflow"
self._in_queue.get()
self._in_queue.put(p, block=False)
# Returns the next incoming serial packet
def _read(self):
"""Wait for a packet and return it as a RawPacket."""
try:
d = self._get_byte()
ts = time.time()
while d != self.HDLC_FLAG_BYTE:
d = self._get_byte()
ts = time.time()
packet = [d]
d = self._get_byte()
if d == self.HDLC_FLAG_BYTE:
d = self._get_byte()
ts = time.time()
else:
packet.append(d)
while d != self.HDLC_FLAG_BYTE:
d = self._get_byte()
packet.append(d)
if self._debug == True:
print "Serial:_read: unescaped", packet
packet = self._unescape(packet)
crc = self._crc16(0, packet[1:-3])
packet_crc = self._decode(packet[-3:-1])
if crc != packet_crc:
print "Warning: wrong CRC! %x != %x %s" % (crc, packet_crc, ["%2x" % i for i in packet])
if self._debug:
if self._ts == None:
self._ts = ts
else:
print "Serial:_read: %.4f (%.4f) Recv:" % (ts, ts - self._ts), self._format_packet(packet[1:-3])
self._ts = ts
return RawPacket(ts, packet[1:-3], crc == packet_crc)
except socket.timeout:
return None
def read(self, timeout=None):
start = time.time();
done = False
while not done:
p = None
while p == None:
if timeout == 0 or time.time() - start < timeout:
try:
p = self._in_queue.get(True, timeout)
except Queue.Empty:
return None
else:
return None
if p.crc:
done = True
else:
p = None
# In the current TinyOS the packets from the mote are always NoAckDataFrame
return NoAckDataFrame(p.data)
def write(self, payload):
"""
Write a packet. If the payload argument is a list, it is
assumed to be exactly the payload. Otherwise the payload is
assume to be a Packet and the real payload is obtain by
calling the .payload().
"""
if type(payload) != type([]):
# Assume this will be derived from Packet
|
self._out_lock.acquire()
self._seqno = (self._seqno + 1) % 100
packet = DataFrame();
packet.protocol = self.SERIAL_PROTO_PACKET_ACK
packet.seqno = self._seqno
packet.dispatch = 0
packet.data = payload
packet = packet.payload()
crc = self._crc16(0, packet)
packet.append(crc & 0xff)
packet.append((crc >> 8) & 0xff)
packet = [self.HDLC_FLAG_BYTE] + self._escape(packet) + [self.HDLC_FLAG_BYTE]
while True:
self._put_bytes(packet)
self._write_counter += 1
if self._debug == True:
print "Send(%d/%d): %s" % (self._write_counter, self._write_counter_failures, packet)
print "Wait for ack %d ..." % (self._seqno)
self._out_ack.acquire()
self._out_ack.wait(0.2)
if self._debug:
print "Wait for ack %d done. Latest ack:" % (self._seqno), self._ack
self._out_ack.release()
if self._ack and self._ack.seqno == self._seqno:
if self._debug:
print "The packet was acked."
self._out_lock.release()
if self._debug:
print "Returning from Serial.write..."
return True
else:
self._write_counter_failures += 1
if self._debug:
print "The packet was not acked. Try again."
# break # make only one sending attempt
self._out_lock.release()
return False
def _format_packet(self, payload):
f = NoAckDataFrame(payload)
if f.protocol == self.SERIAL_PROTO_ACK:
rpacket = AckFrame(payload)
return "Ack seqno: %d" % (rpacket.seqno)
else:
rpacket = ActiveMessage(f.data)
return "D: %04x S: %04x L: %02x G: %02x T: %02x | %s" % \
(rpacket.destination, rpacket.source,
rpacket.length, rpacket.group, rpacket.type,
list2hex(rpacket.data))
def _crc16(self, base_crc, frame_data):
crc = base_crc
for b in frame_data:
crc = crc ^ (b << 8)
for i in range(0, 8):
if crc & 0x8000 == 0x8000:
crc = (crc << 1) ^ 0x1021
else:
crc = crc << 1
crc = crc & 0xffff
return crc
def _encode(self, val, dim):
output = []
for i in range(dim):
output.append(val & 0xFF)
val = val >> 8
return output
def _decode(self, v):
r = long(0)
for i in v[::-1]:
r = (r << 8) + i
return r
def _get_byte(self):
try:
r = struct.unpack("B", self._s.read())[0]
return r
except struct.error:
# Serial port read timeout
raise socket.timeout
def _put_bytes(self, data):
#print "DEBUG: _put_bytes:", data
for b in data:
self._s.write(struct.pack('B', b))
def _unescape(self, packet):
r = []
esc = False
for b in packet:
if esc:
r.append(b ^ 0x20)
esc = False
elif b == self.HDLC_CTLESC_BYTE:
esc = True
else:
r.append(b)
return r
def _escape(self, packet):
r = []
for b in packet:
if b == self.HDLC_FLAG_BYTE or b == self.HDLC_CTLESC_BYTE:
r.append(self.HDLC_CTLESC_BYTE)
r.append(b ^ 0x20)
else:
r.append(b)
return r
def debug(self, debug):
self._debug = debug
class SFClient:
def __init__(self, host, port, qsize=10):
self._in_queue = Queue(qsize)
self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._s.connect((host, port))
data = self._s.recv(2)
if data != 'U ':
print "Wrong handshake"
self._s.send("U ")
print "Connected"
thread.start_new_thread(self.run, ())
def run(self):
while True:
length = ord(self._s.recv(1))
data = self._s.recv(length)
data = [ord(c) for c in data][1:]
#print "Recv %d bytes" % (length), ActiveMessage(data)
if self._in_queue.full():
print "Warning: Buffer overflow"
self._in_queue.get()
p = RawPacket()
p.crc = 1
p.data = data
self._in_queue.put(p, block=False)
def read(self, timeout=0):
return self._in | payload = payload.payload() | conditional_block |
tos.py | ", self._s.read())[0]
return r
except struct.error:
# Serial port read timeout
raise socket.timeout
def _put_bytes(self, data):
#print "DEBUG: _put_bytes:", data
for b in data:
self._s.write(struct.pack('B', b))
def _unescape(self, packet):
r = []
esc = False
for b in packet:
if esc:
r.append(b ^ 0x20)
esc = False
elif b == self.HDLC_CTLESC_BYTE:
esc = True
else:
r.append(b)
return r
def _escape(self, packet):
r = []
for b in packet:
if b == self.HDLC_FLAG_BYTE or b == self.HDLC_CTLESC_BYTE:
r.append(self.HDLC_CTLESC_BYTE)
r.append(b ^ 0x20)
else:
r.append(b)
return r
def debug(self, debug):
self._debug = debug
class SFClient:
def __init__(self, host, port, qsize=10):
self._in_queue = Queue(qsize)
self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._s.connect((host, port))
data = self._s.recv(2)
if data != 'U ':
print "Wrong handshake"
self._s.send("U ")
print "Connected"
thread.start_new_thread(self.run, ())
def run(self):
while True:
length = ord(self._s.recv(1))
data = self._s.recv(length)
data = [ord(c) for c in data][1:]
#print "Recv %d bytes" % (length), ActiveMessage(data)
if self._in_queue.full():
print "Warning: Buffer overflow"
self._in_queue.get()
p = RawPacket()
p.crc = 1
p.data = data
self._in_queue.put(p, block=False)
def read(self, timeout=0):
return self._in_queue.get()
def write(self, payload):
print "SFClient: write:", payload
if type(payload) != type([]):
# Assume this will be derived from Packet
payload = payload.payload()
payload = [0] + payload
self._s.send(chr(len(payload)))
self._s.send(''.join([chr(c) for c in payload]))
return True
class AM:
def __init__(self, s):
self._s = s
def read(self, timeout=None):
frame = self._s.read(timeout)
if frame:
return ActiveMessage(frame.data)
return frame
def write(self, packet, amid):
return self._s.write(ActiveMessage(packet, amid=amid))
class Packet:
"""
The Packet class offers a handy way to build pack and unpack
binary data based on a given pattern.
"""
def _decode(self, v):
r = long(0)
for i in v:
r = (r << 8) + i
return r
def _encode(self, val, dim):
output = []
for i in range(dim):
output.append(int(val & 0xFF))
val = val >> 8
output.reverse()
return output
def __init__(self, desc, packet = None):
offset = 0
boffset = 0
sum = 0
for i in range(len(desc)-1, -1, -1):
(n, t, s) = desc[i]
if s == None:
if sum > 0:
desc[i] = (n, t, -sum)
break
sum += s
self.__dict__['_schema'] = [(t, s) for (n, t, s) in desc]
self.__dict__['_names'] = [n for (n, t, s) in desc]
self.__dict__['_values'] = []
if type(packet) == type([]):
for (t, s) in self._schema:
if t == 'int':
self._values.append(self._decode(packet[offset:offset + s]))
offset += s
elif t == 'bint':
doffset = 8 - (boffset + s)
self._values.append((packet[offset] >> doffset) & ((1<<s) - 1))
boffset += s
if boffset == 8:
offset += 1
boffset = 0
elif t == 'string':
self._values.append(''.join([chr(i) for i in packet[offset:offset + s]]))
offset += s
elif t == 'blob':
if s:
if s > 0:
self._values.append(packet[offset:offset + s])
offset += s
else:
self._values.append(packet[offset:s])
offset = len(packet) + s
else:
self._values.append(packet[offset:])
elif type(packet) == type(()):
for i in packet:
self._values.append(i)
else:
for v in self._schema:
self._values.append(None)
def __repr__(self):
return self._values.__repr__()
def __str__(self):
r = ""
for i in range(len(self._names)):
r += "%s: %s " % (self._names[i], self._values[i])
for i in range(len(self._names), len(self._values)):
r += "%s" % self._values[i]
return r
# return self._values.__str__()
# Implement the map behavior
def __getitem__(self, key):
return self.__getattr__(key)
def __setitem__(self, key, value):
self.__setattr__(key, value)
def __len__(self):
return len(self._values)
def keys(self):
return self._names
def values(self):
return self._names
# Implement the struct behavior
def __getattr__(self, name):
#print "DEBUG: __getattr__", name
if type(name) == type(0):
return self._names[name]
else:
return self._values[self._names.index(name)]
def __setattr__(self, name, value):
if type(name) == type(0):
self._values[name] = value
else:
self._values[self._names.index(name)] = value
def __ne__(self, other):
if other.__class__ == self.__class__:
return self._values != other._values
else:
return True
def __eq__(self, other):
if other.__class__ == self.__class__:
return self._values == other._values
else:
return False
def __nonzero__(self):
return True;
# Custom
def names(self):
return self._names
def sizes(self):
return self._schema
def payload(self):
r = []
boffset = 0
for i in range(len(self._schema)):
(t, s) = self._schema[i]
if t == 'int':
r += self._encode(self._values[i], s)
boffset = 0
elif t == 'bint':
doffset = 8 - (boffset + s)
if boffset == 0:
r += [self._values[i] << doffset]
else:
r[-1] |= self._values[i] << doffset
boffset += s
if boffset == 8:
boffset = 0
elif self._values[i] != []:
r += self._values[i]
for i in self._values[len(self._schema):]:
r += i
return r
class RawPacket(Packet):
def __init__(self, ts = None, data = None, crc = None):
Packet.__init__(self,
[('ts' , 'int', 4),
('crc', 'int', 1),
('data', 'blob', None)],
None)
self.ts = ts;
self.data = data
self.crc = crc
class AckFrame(Packet):
def __init__(self, payload = None):
Packet.__init__(self,
[('protocol', 'int', 1),
('seqno', 'int', 1)],
payload)
class DataFrame(Packet):
def __init__(self, payload = None):
if payload != None and type(payload) != type([]):
# Assume is a Packet
payload = payload.payload()
Packet.__init__(self,
[('protocol', 'int', 1),
('seqno', 'int', 1),
('dispatch', 'int', 1),
('data', 'blob', None)],
payload)
class NoAckDataFrame(Packet):
def __init__(self, payload = None):
if payload != None and type(payload) != type([]):
# Assume is a Packet
payload = payload.payload()
Packet.__init__(self,
[('protocol', 'int', 1),
('dispatch', 'int', 1),
('data', 'blob', None)],
payload)
class | ActiveMessage | identifier_name |
|
tos.py | p = self._read()
sys.stdout.write(".")
if not self._debug:
sys.stdout.write("\n")
self._s.close()
self._s = serial.Serial(port, baudrate, rtscts=0, timeout=None)
thread.start_new_thread(self.run, ())
def run(self):
while True:
p = self._read()
self._read_counter += 1
if self._debug:
print "Serial:run: got a packet(%d): %s" % (self._read_counter, p)
ack = AckFrame(p.data)
if ack.protocol == self.SERIAL_PROTO_ACK:
if not self._ack:
self._ack = ack
if self._debug:
print "Serial:run: got an ack:", ack
self._ack = ack
# Wake up the writer
self._out_ack.acquire()
self._out_ack.notify()
self._out_ack.release()
else:
ampkt = ActiveMessage(NoAckDataFrame(p.data).data)
if ampkt.type == 100:
for t in "".join([chr(i) for i in ampkt.data]).strip('\n\0').split('\n'):
print "PRINTF:", t.strip('\n')
else:
if self._in_queue.full():
print "Warning: Buffer overflow"
self._in_queue.get()
self._in_queue.put(p, block=False)
# Returns the next incoming serial packet
def _read(self):
"""Wait for a packet and return it as a RawPacket."""
try:
d = self._get_byte()
ts = time.time()
while d != self.HDLC_FLAG_BYTE:
d = self._get_byte()
ts = time.time()
packet = [d]
d = self._get_byte()
if d == self.HDLC_FLAG_BYTE:
d = self._get_byte()
ts = time.time()
else:
packet.append(d)
while d != self.HDLC_FLAG_BYTE:
d = self._get_byte()
packet.append(d)
if self._debug == True:
print "Serial:_read: unescaped", packet
packet = self._unescape(packet)
crc = self._crc16(0, packet[1:-3])
packet_crc = self._decode(packet[-3:-1])
if crc != packet_crc:
print "Warning: wrong CRC! %x != %x %s" % (crc, packet_crc, ["%2x" % i for i in packet])
if self._debug:
if self._ts == None:
self._ts = ts
else:
print "Serial:_read: %.4f (%.4f) Recv:" % (ts, ts - self._ts), self._format_packet(packet[1:-3])
self._ts = ts
return RawPacket(ts, packet[1:-3], crc == packet_crc)
except socket.timeout:
return None
def read(self, timeout=None):
start = time.time();
done = False
while not done:
p = None
while p == None:
if timeout == 0 or time.time() - start < timeout:
try:
p = self._in_queue.get(True, timeout)
except Queue.Empty:
return None
else:
return None
if p.crc:
done = True
else:
p = None
# In the current TinyOS the packets from the mote are always NoAckDataFrame
return NoAckDataFrame(p.data)
def write(self, payload):
"""
Write a packet. If the payload argument is a list, it is
assumed to be exactly the payload. Otherwise the payload is
assume to be a Packet and the real payload is obtain by
calling the .payload().
"""
if type(payload) != type([]):
# Assume this will be derived from Packet
payload = payload.payload()
self._out_lock.acquire()
self._seqno = (self._seqno + 1) % 100
packet = DataFrame();
packet.protocol = self.SERIAL_PROTO_PACKET_ACK
packet.seqno = self._seqno
packet.dispatch = 0
packet.data = payload
packet = packet.payload()
crc = self._crc16(0, packet)
packet.append(crc & 0xff)
packet.append((crc >> 8) & 0xff)
packet = [self.HDLC_FLAG_BYTE] + self._escape(packet) + [self.HDLC_FLAG_BYTE]
while True:
self._put_bytes(packet)
self._write_counter += 1
if self._debug == True:
print "Send(%d/%d): %s" % (self._write_counter, self._write_counter_failures, packet)
print "Wait for ack %d ..." % (self._seqno)
self._out_ack.acquire()
self._out_ack.wait(0.2)
if self._debug:
print "Wait for ack %d done. Latest ack:" % (self._seqno), self._ack
self._out_ack.release()
if self._ack and self._ack.seqno == self._seqno:
if self._debug:
print "The packet was acked."
self._out_lock.release()
if self._debug:
print "Returning from Serial.write..."
return True
else:
self._write_counter_failures += 1
if self._debug:
print "The packet was not acked. Try again."
# break # make only one sending attempt
self._out_lock.release()
return False
def _format_packet(self, payload):
f = NoAckDataFrame(payload)
if f.protocol == self.SERIAL_PROTO_ACK:
rpacket = AckFrame(payload)
return "Ack seqno: %d" % (rpacket.seqno)
else:
rpacket = ActiveMessage(f.data)
return "D: %04x S: %04x L: %02x G: %02x T: %02x | %s" % \
(rpacket.destination, rpacket.source,
rpacket.length, rpacket.group, rpacket.type,
list2hex(rpacket.data))
def _crc16(self, base_crc, frame_data):
crc = base_crc
for b in frame_data:
crc = crc ^ (b << 8)
for i in range(0, 8):
if crc & 0x8000 == 0x8000:
crc = (crc << 1) ^ 0x1021
else:
crc = crc << 1
crc = crc & 0xffff
return crc
def _encode(self, val, dim):
output = []
for i in range(dim):
output.append(val & 0xFF)
val = val >> 8
return output
def _decode(self, v):
r = long(0)
for i in v[::-1]:
r = (r << 8) + i
return r
def _get_byte(self):
try:
r = struct.unpack("B", self._s.read())[0]
return r
except struct.error:
# Serial port read timeout
raise socket.timeout
def _put_bytes(self, data):
#print "DEBUG: _put_bytes:", data
for b in data:
self._s.write(struct.pack('B', b))
def _unescape(self, packet):
r = []
esc = False
for b in packet:
if esc:
r.append(b ^ 0x20)
esc = False
elif b == self.HDLC_CTLESC_BYTE:
esc = True
else:
r.append(b)
return r
def _escape(self, packet):
r = []
for b in packet:
if b == self.HDLC_FLAG_BYTE or b == self.HDLC_CTLESC_BYTE:
r.append(self.HDLC_CTLESC_BYTE)
r.append(b ^ 0x20)
else:
r.append(b)
return r
def debug(self, debug):
self._debug = debug
| self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._s.connect((host, port))
data = self._s.recv(2)
if data != 'U ':
print "Wrong handshake"
self._s.send("U ")
print "Connected"
thread.start_new_thread(self.run, ())
def run(self):
while True:
length = ord(self._s.recv(1))
data = self._s.recv(length)
data = [ord(c) for c in data][1:]
#print "Recv %d bytes" % (length), ActiveMessage(data)
if self._in_queue.full():
print "Warning: Buffer overflow"
self._in_queue.get()
p = RawPacket()
p.crc = 1
p.data = data
self._in_queue.put(p, block=False)
def read(self, timeout=0):
return self._in_queue | class SFClient:
def __init__(self, host, port, qsize=10):
self._in_queue = Queue(qsize) | random_line_split |
tos.py | = 0
packet.data = payload
packet = packet.payload()
crc = self._crc16(0, packet)
packet.append(crc & 0xff)
packet.append((crc >> 8) & 0xff)
packet = [self.HDLC_FLAG_BYTE] + self._escape(packet) + [self.HDLC_FLAG_BYTE]
while True:
self._put_bytes(packet)
self._write_counter += 1
if self._debug == True:
print "Send(%d/%d): %s" % (self._write_counter, self._write_counter_failures, packet)
print "Wait for ack %d ..." % (self._seqno)
self._out_ack.acquire()
self._out_ack.wait(0.2)
if self._debug:
print "Wait for ack %d done. Latest ack:" % (self._seqno), self._ack
self._out_ack.release()
if self._ack and self._ack.seqno == self._seqno:
if self._debug:
print "The packet was acked."
self._out_lock.release()
if self._debug:
print "Returning from Serial.write..."
return True
else:
self._write_counter_failures += 1
if self._debug:
print "The packet was not acked. Try again."
# break # make only one sending attempt
self._out_lock.release()
return False
def _format_packet(self, payload):
f = NoAckDataFrame(payload)
if f.protocol == self.SERIAL_PROTO_ACK:
rpacket = AckFrame(payload)
return "Ack seqno: %d" % (rpacket.seqno)
else:
rpacket = ActiveMessage(f.data)
return "D: %04x S: %04x L: %02x G: %02x T: %02x | %s" % \
(rpacket.destination, rpacket.source,
rpacket.length, rpacket.group, rpacket.type,
list2hex(rpacket.data))
def _crc16(self, base_crc, frame_data):
crc = base_crc
for b in frame_data:
crc = crc ^ (b << 8)
for i in range(0, 8):
if crc & 0x8000 == 0x8000:
crc = (crc << 1) ^ 0x1021
else:
crc = crc << 1
crc = crc & 0xffff
return crc
def _encode(self, val, dim):
output = []
for i in range(dim):
output.append(val & 0xFF)
val = val >> 8
return output
def _decode(self, v):
r = long(0)
for i in v[::-1]:
r = (r << 8) + i
return r
def _get_byte(self):
try:
r = struct.unpack("B", self._s.read())[0]
return r
except struct.error:
# Serial port read timeout
raise socket.timeout
def _put_bytes(self, data):
#print "DEBUG: _put_bytes:", data
for b in data:
self._s.write(struct.pack('B', b))
def _unescape(self, packet):
r = []
esc = False
for b in packet:
if esc:
r.append(b ^ 0x20)
esc = False
elif b == self.HDLC_CTLESC_BYTE:
esc = True
else:
r.append(b)
return r
def _escape(self, packet):
r = []
for b in packet:
if b == self.HDLC_FLAG_BYTE or b == self.HDLC_CTLESC_BYTE:
r.append(self.HDLC_CTLESC_BYTE)
r.append(b ^ 0x20)
else:
r.append(b)
return r
def debug(self, debug):
self._debug = debug
class SFClient:
def __init__(self, host, port, qsize=10):
self._in_queue = Queue(qsize)
self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._s.connect((host, port))
data = self._s.recv(2)
if data != 'U ':
print "Wrong handshake"
self._s.send("U ")
print "Connected"
thread.start_new_thread(self.run, ())
def run(self):
while True:
length = ord(self._s.recv(1))
data = self._s.recv(length)
data = [ord(c) for c in data][1:]
#print "Recv %d bytes" % (length), ActiveMessage(data)
if self._in_queue.full():
print "Warning: Buffer overflow"
self._in_queue.get()
p = RawPacket()
p.crc = 1
p.data = data
self._in_queue.put(p, block=False)
def read(self, timeout=0):
return self._in_queue.get()
def write(self, payload):
print "SFClient: write:", payload
if type(payload) != type([]):
# Assume this will be derived from Packet
payload = payload.payload()
payload = [0] + payload
self._s.send(chr(len(payload)))
self._s.send(''.join([chr(c) for c in payload]))
return True
class AM:
def __init__(self, s):
self._s = s
def read(self, timeout=None):
frame = self._s.read(timeout)
if frame:
return ActiveMessage(frame.data)
return frame
def write(self, packet, amid):
return self._s.write(ActiveMessage(packet, amid=amid))
class Packet:
"""
The Packet class offers a handy way to build pack and unpack
binary data based on a given pattern.
"""
def _decode(self, v):
r = long(0)
for i in v:
r = (r << 8) + i
return r
def _encode(self, val, dim):
output = []
for i in range(dim):
output.append(int(val & 0xFF))
val = val >> 8
output.reverse()
return output
def __init__(self, desc, packet = None):
offset = 0
boffset = 0
sum = 0
for i in range(len(desc)-1, -1, -1):
(n, t, s) = desc[i]
if s == None:
if sum > 0:
desc[i] = (n, t, -sum)
break
sum += s
self.__dict__['_schema'] = [(t, s) for (n, t, s) in desc]
self.__dict__['_names'] = [n for (n, t, s) in desc]
self.__dict__['_values'] = []
if type(packet) == type([]):
for (t, s) in self._schema:
if t == 'int':
self._values.append(self._decode(packet[offset:offset + s]))
offset += s
elif t == 'bint':
doffset = 8 - (boffset + s)
self._values.append((packet[offset] >> doffset) & ((1<<s) - 1))
boffset += s
if boffset == 8:
offset += 1
boffset = 0
elif t == 'string':
self._values.append(''.join([chr(i) for i in packet[offset:offset + s]]))
offset += s
elif t == 'blob':
if s:
if s > 0:
self._values.append(packet[offset:offset + s])
offset += s
else:
self._values.append(packet[offset:s])
offset = len(packet) + s
else:
self._values.append(packet[offset:])
elif type(packet) == type(()):
for i in packet:
self._values.append(i)
else:
for v in self._schema:
self._values.append(None)
def __repr__(self):
return self._values.__repr__()
def __str__(self):
r = ""
for i in range(len(self._names)):
r += "%s: %s " % (self._names[i], self._values[i])
for i in range(len(self._names), len(self._values)):
r += "%s" % self._values[i]
return r
# return self._values.__str__()
# Implement the map behavior
def __getitem__(self, key):
return self.__getattr__(key)
def __setitem__(self, key, value):
self.__setattr__(key, value)
def __len__(self):
return len(self._values)
def keys(self):
return self._names
def values(self):
return self._names
# Implement the struct behavior
def __getattr__(self, name):
#print "DEBUG: __getattr__", name
| if type(name) == type(0):
return self._names[name]
else:
return self._values[self._names.index(name)] | identifier_body |
|
screen.component.ts | Filter;
@Input() metricTree: INode;
@Input() metricMapping: IMetricMapping;
subscriptions: Subscription[] = [];
renderer: WebGLRenderer;
scene: Scene = new Scene();
// (see https://github.com/nicolaspanel/three-orbitcontrols-ts/issues/1)
camera: THREE.PerspectiveCamera;
controls: OrbitControls;
spatialCursor: Object3D;
highlightBoxes: Object3D[] = [];
interactionHandler: InteractionHandler;
changetypeSymbols: ChangetypeSymbols;
public displayTooltip = true;
// use THREE.PerspectiveCamera instead of importing PerspectiveCamera to avoid warning for panning and zooming are disabled
view: AbstractView;
private isMergedView = false;
private requestAnimationFrameId: number;
private renderingIsPaused = false;
private screenOffset: Vector2 = new Vector2();
private screenDimensions: Vector2 = new Vector2();
private highlightBoxGeometry: BoxGeometry;
private highlightBoxMaterial: MeshBasicMaterial;
ngOnChanges(changes: SimpleChanges) {
if (this.activeViewType !== null && this.metricTree !== null && this.activeFilter !== null) {
this.isMergedView = this.activeViewType === ViewType.MERGED;
this.interactionHandler.setIsMergedView(this.isMergedView);
if (this.isMergedView) {
this.view = new MergedView(this.screenType, this.metricMapping);
if (this.screenType === ScreenType.RIGHT) {
this.pauseRendering();
this.displayTooltip = false;
}
document.querySelector('#stage').classList.remove('split');
} else {
this.view = new SplitView(this.screenType, this.metricMapping);
if (this.screenType === ScreenType.RIGHT) {
this.resumeRendering();
this.displayTooltip = true;
}
document.querySelector('#stage').classList.add('split');
}
this.resetScene();
this.prepareView(this.metricTree);
this.applyFilter(this.activeFilter);
this.handleViewChanged();
}
if (
changes.metricTree
&& changes.metricTree.currentValue
&& ElementAnalyzer.hasMetricValuesForCurrentCommit(
changes.metricTree.currentValue,
this.activeViewType === ViewType.MERGED,
this.screenType
)
) {
this.resetCamera();
this.resetControls();
}
}
ngOnInit() {
this.tooltipService.addScreen(this);
this.screenInteractionService.addScreen(this);
this.view = new SplitView(this.screenType, this.metricMapping);
this.createCamera();
this.createControls();
this.createLight();
this.createRenderer();
this.create3DCursor();
this.createSelectionHighlightBox();
this.createInteractionHandler();
this.changetypeSymbols = new ChangetypeSymbols();
this.initializeEventListeners();
this.render();
this.subscriptions.push(
this.focusService.elementFocussed$.subscribe((elementName) => {
this.focusElementByName(elementName);
this.comparisonPanelService.show({
elementName,
foundElement: ElementAnalyzer.findElementByName(this.metricTree, elementName)
});
})
);
this.subscriptions.push(
this.screenInteractionService.highlightedElements$.subscribe((highlightedElements) => {
if (this.highlightBoxes.length < highlightedElements.length) {
for (let i = 0; i < highlightedElements.length - this.highlightBoxes.length; i++) {
this.highlightBoxes.push(this.createSelectionHighlightBox());
}
} | );
this.subscriptions.push(
this.screenInteractionService.cursorState$.subscribe((state => {
if (state.position) {
this.spatialCursor.position.copy(state.position);
this.tooltipService.setMousePosition(this.getTooltipPosition(), this.screenType);
}
this.spatialCursor.visible = state.visible;
if (state.scale) {
this.spatialCursor.scale.set(1, state.scale, 1);
this.spatialCursor.children[0].scale.set(state.scale, 1, state.scale);
}
}))
);
}
public highlightElement(element: Object3D, highlightBox: Object3D) {
const addedMargin = VisualizationConfig.HIGHLIGHT_BOX_MARGIN;
if (element) {
highlightBox.visible = true && element.visible;
highlightBox.position.copy(new Vector3(element.position.x + element.scale.x / 2,
element.position.y + element.scale.y / 2, element.position.z + element.scale.z / 2));
highlightBox.scale.copy(element.scale).addScalar(addedMargin);
} else {
highlightBox.visible = false;
}
}
ngOnDestroy() {
this.subscriptions.forEach((subscription: Subscription) => {
subscription.unsubscribe();
});
}
createRenderer() {
this.renderer = new WebGLRenderer({antialias: true, preserveDrawingBuffer: true, logarithmicDepthBuffer: true});
this.renderer.setClearColor(0xf0f0f0);
this.renderer.setSize(this.getScreenWidth() - 0, window.innerHeight);
document.querySelector('#stage').appendChild(this.renderer.domElement);
}
updateRenderer() {
this.renderer.setSize(this.getScreenWidth() - 0, window.innerHeight);
}
createLight() {
const ambientLight = new THREE.AmbientLight(0xcccccc, 0.5);
this.scene.add(ambientLight);
const directionalLight = new THREE.DirectionalLight(0xffffff, 0.4);
directionalLight.position.set(0, 1, 0);
this.scene.add(directionalLight);
}
createCamera() {
this.camera = new THREE.PerspectiveCamera(
45,
(this.getScreenWidth() - 0) / window.innerHeight,
VisualizationConfig.CAMERA_NEAR,
VisualizationConfig.CAMERA_FAR
);
this.scene.add(this.camera);
}
updateCamera() {
this.camera.aspect = (this.getScreenWidth() - 0) / window.innerHeight;
this.camera.updateProjectionMatrix();
}
resetCamera() {
const root = this.getRoot();
if (!root) {return; }
// pythagoras
const diagonal = Math.sqrt(Math.pow(root.scale.x, 2) + Math.pow(root.scale.z, 2));
this.camera.position.x = root.scale.x * 2;
this.camera.position.y = diagonal * 1.5;
this.camera.position.z = root.scale.z * 2;
}
createControls() {
this.controls = new OrbitControls(this.camera, document.querySelector('#stage') as HTMLElement);
}
resetControls() {
const centralCoordinates = this.getCentralCoordinates();
this.controls.target.x = centralCoordinates.x;
this.controls.target.y = centralCoordinates.y;
this.controls.target.z = centralCoordinates.z;
}
render() {
this.requestAnimationFrameId = requestAnimationFrame(() => {
this.render();
});
// Canvas object offset
this.screenOffset.set(this.renderer.domElement.getBoundingClientRect().left, this.renderer.domElement.getBoundingClientRect().top);
// Canvas object size
this.screenDimensions.set(this.renderer.domElement.getBoundingClientRect().width,
this.renderer.domElement.getBoundingClientRect().height);
this.controls.update();
this.renderer.render(this.scene, this.camera);
this.interactionHandler.update(this.camera);
TWEEN.update();
}
pauseRendering() {
if (this.requestAnimationFrameId) {
cancelAnimationFrame(this.requestAnimationFrameId);
this.resetScene();
this.renderingIsPaused = true;
}
}
resumeRendering() {
if (this.renderingIsPaused) {
this.render();
this.renderingIsPaused = false;
}
}
prepareView(metricTree) {
if (metricTree.children.length === 0) {
return;
}
this.view.setMetricTree(metricTree);
this.view.recalculate();
this.view.getBlockElements().forEach((element) => {
this.scene.add(element);
});
if (this.view instanceof MergedView) {
this.view.calculateConnections(this.scene);
this.view.getConnections().forEach((blockConnection: BlockConnection) => {
this.scene.add(blockConnection.getCurve());
});
} else {
this.changetypeSymbols.addChangeTypeSymbols(this.scene);
}
}
createInteractionHandler() {
this.interactionHandler = new InteractionHandler(
this.scene,
this.renderer,
this.screenType,
this.isMergedView,
this.focusService,
this.screenInteractionService,
this.tooltipService,
this.spatialCursor
);
}
resetScene() {
for (let i = this.scene.children.length - 1; i >= 0; i--) {
const child = this.scene.children[i];
// only remove Blocks and Lines. Don't remove lights, cameras etc.
if (child.type === 'Mesh' || child.type === 'Line') {
this.scene.remove(child);
}
}
}
focusElementByName(elementName) {
const element = this.scene.getObjectByName(elementName);
if (!element) {
return;
}
const root = this.getRoot();
// pythagoras
const diagonal = Math.sqrt(Math.pow(root.scale.x, 2) + Math.pow(root.scale.z, 2));
new TWEEN.Tween(this.camera.position)
.to({
x: element.position.x + root.scale.x / 5,
y: element.position.y + diagonal / 5,
z: element.position.z + root.scale.z / 5
}, VisualizationConfig.CAMERA_ANIMATION_DURATION)
.easing(TWEEN.Easing.Sinusoidal.InOut)
.start();
| this.highlightBoxes.forEach((value, index) => this.highlightElement(this.scene.getObjectByName(highlightedElements[index]), value));
}) | random_line_split |
screen.component.ts | ;
@Input() metricTree: INode;
@Input() metricMapping: IMetricMapping;
subscriptions: Subscription[] = [];
renderer: WebGLRenderer;
scene: Scene = new Scene();
// (see https://github.com/nicolaspanel/three-orbitcontrols-ts/issues/1)
camera: THREE.PerspectiveCamera;
controls: OrbitControls;
spatialCursor: Object3D;
highlightBoxes: Object3D[] = [];
interactionHandler: InteractionHandler;
changetypeSymbols: ChangetypeSymbols;
public displayTooltip = true;
// use THREE.PerspectiveCamera instead of importing PerspectiveCamera to avoid warning for panning and zooming are disabled
view: AbstractView;
private isMergedView = false;
private requestAnimationFrameId: number;
private renderingIsPaused = false;
private screenOffset: Vector2 = new Vector2();
private screenDimensions: Vector2 = new Vector2();
private highlightBoxGeometry: BoxGeometry;
private highlightBoxMaterial: MeshBasicMaterial;
| (changes: SimpleChanges) {
if (this.activeViewType !== null && this.metricTree !== null && this.activeFilter !== null) {
this.isMergedView = this.activeViewType === ViewType.MERGED;
this.interactionHandler.setIsMergedView(this.isMergedView);
if (this.isMergedView) {
this.view = new MergedView(this.screenType, this.metricMapping);
if (this.screenType === ScreenType.RIGHT) {
this.pauseRendering();
this.displayTooltip = false;
}
document.querySelector('#stage').classList.remove('split');
} else {
this.view = new SplitView(this.screenType, this.metricMapping);
if (this.screenType === ScreenType.RIGHT) {
this.resumeRendering();
this.displayTooltip = true;
}
document.querySelector('#stage').classList.add('split');
}
this.resetScene();
this.prepareView(this.metricTree);
this.applyFilter(this.activeFilter);
this.handleViewChanged();
}
if (
changes.metricTree
&& changes.metricTree.currentValue
&& ElementAnalyzer.hasMetricValuesForCurrentCommit(
changes.metricTree.currentValue,
this.activeViewType === ViewType.MERGED,
this.screenType
)
) {
this.resetCamera();
this.resetControls();
}
}
ngOnInit() {
this.tooltipService.addScreen(this);
this.screenInteractionService.addScreen(this);
this.view = new SplitView(this.screenType, this.metricMapping);
this.createCamera();
this.createControls();
this.createLight();
this.createRenderer();
this.create3DCursor();
this.createSelectionHighlightBox();
this.createInteractionHandler();
this.changetypeSymbols = new ChangetypeSymbols();
this.initializeEventListeners();
this.render();
this.subscriptions.push(
this.focusService.elementFocussed$.subscribe((elementName) => {
this.focusElementByName(elementName);
this.comparisonPanelService.show({
elementName,
foundElement: ElementAnalyzer.findElementByName(this.metricTree, elementName)
});
})
);
this.subscriptions.push(
this.screenInteractionService.highlightedElements$.subscribe((highlightedElements) => {
if (this.highlightBoxes.length < highlightedElements.length) {
for (let i = 0; i < highlightedElements.length - this.highlightBoxes.length; i++) {
this.highlightBoxes.push(this.createSelectionHighlightBox());
}
}
this.highlightBoxes.forEach((value, index) => this.highlightElement(this.scene.getObjectByName(highlightedElements[index]), value));
})
);
this.subscriptions.push(
this.screenInteractionService.cursorState$.subscribe((state => {
if (state.position) {
this.spatialCursor.position.copy(state.position);
this.tooltipService.setMousePosition(this.getTooltipPosition(), this.screenType);
}
this.spatialCursor.visible = state.visible;
if (state.scale) {
this.spatialCursor.scale.set(1, state.scale, 1);
this.spatialCursor.children[0].scale.set(state.scale, 1, state.scale);
}
}))
);
}
public highlightElement(element: Object3D, highlightBox: Object3D) {
const addedMargin = VisualizationConfig.HIGHLIGHT_BOX_MARGIN;
if (element) {
highlightBox.visible = true && element.visible;
highlightBox.position.copy(new Vector3(element.position.x + element.scale.x / 2,
element.position.y + element.scale.y / 2, element.position.z + element.scale.z / 2));
highlightBox.scale.copy(element.scale).addScalar(addedMargin);
} else {
highlightBox.visible = false;
}
}
ngOnDestroy() {
this.subscriptions.forEach((subscription: Subscription) => {
subscription.unsubscribe();
});
}
createRenderer() {
this.renderer = new WebGLRenderer({antialias: true, preserveDrawingBuffer: true, logarithmicDepthBuffer: true});
this.renderer.setClearColor(0xf0f0f0);
this.renderer.setSize(this.getScreenWidth() - 0, window.innerHeight);
document.querySelector('#stage').appendChild(this.renderer.domElement);
}
updateRenderer() {
this.renderer.setSize(this.getScreenWidth() - 0, window.innerHeight);
}
createLight() {
const ambientLight = new THREE.AmbientLight(0xcccccc, 0.5);
this.scene.add(ambientLight);
const directionalLight = new THREE.DirectionalLight(0xffffff, 0.4);
directionalLight.position.set(0, 1, 0);
this.scene.add(directionalLight);
}
createCamera() {
this.camera = new THREE.PerspectiveCamera(
45,
(this.getScreenWidth() - 0) / window.innerHeight,
VisualizationConfig.CAMERA_NEAR,
VisualizationConfig.CAMERA_FAR
);
this.scene.add(this.camera);
}
updateCamera() {
this.camera.aspect = (this.getScreenWidth() - 0) / window.innerHeight;
this.camera.updateProjectionMatrix();
}
resetCamera() {
const root = this.getRoot();
if (!root) {return; }
// pythagoras
const diagonal = Math.sqrt(Math.pow(root.scale.x, 2) + Math.pow(root.scale.z, 2));
this.camera.position.x = root.scale.x * 2;
this.camera.position.y = diagonal * 1.5;
this.camera.position.z = root.scale.z * 2;
}
createControls() {
this.controls = new OrbitControls(this.camera, document.querySelector('#stage') as HTMLElement);
}
resetControls() {
const centralCoordinates = this.getCentralCoordinates();
this.controls.target.x = centralCoordinates.x;
this.controls.target.y = centralCoordinates.y;
this.controls.target.z = centralCoordinates.z;
}
render() {
this.requestAnimationFrameId = requestAnimationFrame(() => {
this.render();
});
// Canvas object offset
this.screenOffset.set(this.renderer.domElement.getBoundingClientRect().left, this.renderer.domElement.getBoundingClientRect().top);
// Canvas object size
this.screenDimensions.set(this.renderer.domElement.getBoundingClientRect().width,
this.renderer.domElement.getBoundingClientRect().height);
this.controls.update();
this.renderer.render(this.scene, this.camera);
this.interactionHandler.update(this.camera);
TWEEN.update();
}
pauseRendering() {
if (this.requestAnimationFrameId) {
cancelAnimationFrame(this.requestAnimationFrameId);
this.resetScene();
this.renderingIsPaused = true;
}
}
resumeRendering() {
if (this.renderingIsPaused) {
this.render();
this.renderingIsPaused = false;
}
}
prepareView(metricTree) {
if (metricTree.children.length === 0) {
return;
}
this.view.setMetricTree(metricTree);
this.view.recalculate();
this.view.getBlockElements().forEach((element) => {
this.scene.add(element);
});
if (this.view instanceof MergedView) {
this.view.calculateConnections(this.scene);
this.view.getConnections().forEach((blockConnection: BlockConnection) => {
this.scene.add(blockConnection.getCurve());
});
} else {
this.changetypeSymbols.addChangeTypeSymbols(this.scene);
}
}
createInteractionHandler() {
this.interactionHandler = new InteractionHandler(
this.scene,
this.renderer,
this.screenType,
this.isMergedView,
this.focusService,
this.screenInteractionService,
this.tooltipService,
this.spatialCursor
);
}
resetScene() {
for (let i = this.scene.children.length - 1; i >= 0; i--) {
const child = this.scene.children[i];
// only remove Blocks and Lines. Don't remove lights, cameras etc.
if (child.type === 'Mesh' || child.type === 'Line') {
this.scene.remove(child);
}
}
}
focusElementByName(elementName) {
const element = this.scene.getObjectByName(elementName);
if (!element) {
return;
}
const root = this.getRoot();
// pythagoras
const diagonal = Math.sqrt(Math.pow(root.scale.x, 2) + Math.pow(root.scale.z, 2));
new TWEEN.Tween(this.camera.position)
.to({
x: element.position.x + root.scale.x / 5,
y: element.position.y + diagonal / 5,
z: element.position.z + root.scale.z / 5
}, VisualizationConfig.CAMERA_ANIMATION_DURATION)
.easing(TWEEN.Easing.Sinusoidal.InOut)
.start();
| ngOnChanges | identifier_name |
screen.component.ts | ;
@Input() metricTree: INode;
@Input() metricMapping: IMetricMapping;
subscriptions: Subscription[] = [];
renderer: WebGLRenderer;
scene: Scene = new Scene();
// (see https://github.com/nicolaspanel/three-orbitcontrols-ts/issues/1)
camera: THREE.PerspectiveCamera;
controls: OrbitControls;
spatialCursor: Object3D;
highlightBoxes: Object3D[] = [];
interactionHandler: InteractionHandler;
changetypeSymbols: ChangetypeSymbols;
public displayTooltip = true;
// use THREE.PerspectiveCamera instead of importing PerspectiveCamera to avoid warning for panning and zooming are disabled
view: AbstractView;
private isMergedView = false;
private requestAnimationFrameId: number;
private renderingIsPaused = false;
private screenOffset: Vector2 = new Vector2();
private screenDimensions: Vector2 = new Vector2();
private highlightBoxGeometry: BoxGeometry;
private highlightBoxMaterial: MeshBasicMaterial;
ngOnChanges(changes: SimpleChanges) {
if (this.activeViewType !== null && this.metricTree !== null && this.activeFilter !== null) {
this.isMergedView = this.activeViewType === ViewType.MERGED;
this.interactionHandler.setIsMergedView(this.isMergedView);
if (this.isMergedView) {
this.view = new MergedView(this.screenType, this.metricMapping);
if (this.screenType === ScreenType.RIGHT) {
this.pauseRendering();
this.displayTooltip = false;
}
document.querySelector('#stage').classList.remove('split');
} else {
this.view = new SplitView(this.screenType, this.metricMapping);
if (this.screenType === ScreenType.RIGHT) {
this.resumeRendering();
this.displayTooltip = true;
}
document.querySelector('#stage').classList.add('split');
}
this.resetScene();
this.prepareView(this.metricTree);
this.applyFilter(this.activeFilter);
this.handleViewChanged();
}
if (
changes.metricTree
&& changes.metricTree.currentValue
&& ElementAnalyzer.hasMetricValuesForCurrentCommit(
changes.metricTree.currentValue,
this.activeViewType === ViewType.MERGED,
this.screenType
)
) {
this.resetCamera();
this.resetControls();
}
}
ngOnInit() {
this.tooltipService.addScreen(this);
this.screenInteractionService.addScreen(this);
this.view = new SplitView(this.screenType, this.metricMapping);
this.createCamera();
this.createControls();
this.createLight();
this.createRenderer();
this.create3DCursor();
this.createSelectionHighlightBox();
this.createInteractionHandler();
this.changetypeSymbols = new ChangetypeSymbols();
this.initializeEventListeners();
this.render();
this.subscriptions.push(
this.focusService.elementFocussed$.subscribe((elementName) => {
this.focusElementByName(elementName);
this.comparisonPanelService.show({
elementName,
foundElement: ElementAnalyzer.findElementByName(this.metricTree, elementName)
});
})
);
this.subscriptions.push(
this.screenInteractionService.highlightedElements$.subscribe((highlightedElements) => {
if (this.highlightBoxes.length < highlightedElements.length) {
for (let i = 0; i < highlightedElements.length - this.highlightBoxes.length; i++) {
this.highlightBoxes.push(this.createSelectionHighlightBox());
}
}
this.highlightBoxes.forEach((value, index) => this.highlightElement(this.scene.getObjectByName(highlightedElements[index]), value));
})
);
this.subscriptions.push(
this.screenInteractionService.cursorState$.subscribe((state => {
if (state.position) {
this.spatialCursor.position.copy(state.position);
this.tooltipService.setMousePosition(this.getTooltipPosition(), this.screenType);
}
this.spatialCursor.visible = state.visible;
if (state.scale) {
this.spatialCursor.scale.set(1, state.scale, 1);
this.spatialCursor.children[0].scale.set(state.scale, 1, state.scale);
}
}))
);
}
public highlightElement(element: Object3D, highlightBox: Object3D) {
const addedMargin = VisualizationConfig.HIGHLIGHT_BOX_MARGIN;
if (element) {
highlightBox.visible = true && element.visible;
highlightBox.position.copy(new Vector3(element.position.x + element.scale.x / 2,
element.position.y + element.scale.y / 2, element.position.z + element.scale.z / 2));
highlightBox.scale.copy(element.scale).addScalar(addedMargin);
} else {
highlightBox.visible = false;
}
}
ngOnDestroy() {
this.subscriptions.forEach((subscription: Subscription) => {
subscription.unsubscribe();
});
}
createRenderer() {
this.renderer = new WebGLRenderer({antialias: true, preserveDrawingBuffer: true, logarithmicDepthBuffer: true});
this.renderer.setClearColor(0xf0f0f0);
this.renderer.setSize(this.getScreenWidth() - 0, window.innerHeight);
document.querySelector('#stage').appendChild(this.renderer.domElement);
}
updateRenderer() {
this.renderer.setSize(this.getScreenWidth() - 0, window.innerHeight);
}
createLight() {
const ambientLight = new THREE.AmbientLight(0xcccccc, 0.5);
this.scene.add(ambientLight);
const directionalLight = new THREE.DirectionalLight(0xffffff, 0.4);
directionalLight.position.set(0, 1, 0);
this.scene.add(directionalLight);
}
createCamera() {
this.camera = new THREE.PerspectiveCamera(
45,
(this.getScreenWidth() - 0) / window.innerHeight,
VisualizationConfig.CAMERA_NEAR,
VisualizationConfig.CAMERA_FAR
);
this.scene.add(this.camera);
}
updateCamera() {
this.camera.aspect = (this.getScreenWidth() - 0) / window.innerHeight;
this.camera.updateProjectionMatrix();
}
resetCamera() {
const root = this.getRoot();
if (!root) {return; }
// pythagoras
const diagonal = Math.sqrt(Math.pow(root.scale.x, 2) + Math.pow(root.scale.z, 2));
this.camera.position.x = root.scale.x * 2;
this.camera.position.y = diagonal * 1.5;
this.camera.position.z = root.scale.z * 2;
}
createControls() {
this.controls = new OrbitControls(this.camera, document.querySelector('#stage') as HTMLElement);
}
resetControls() {
const centralCoordinates = this.getCentralCoordinates();
this.controls.target.x = centralCoordinates.x;
this.controls.target.y = centralCoordinates.y;
this.controls.target.z = centralCoordinates.z;
}
render() {
this.requestAnimationFrameId = requestAnimationFrame(() => {
this.render();
});
// Canvas object offset
this.screenOffset.set(this.renderer.domElement.getBoundingClientRect().left, this.renderer.domElement.getBoundingClientRect().top);
// Canvas object size
this.screenDimensions.set(this.renderer.domElement.getBoundingClientRect().width,
this.renderer.domElement.getBoundingClientRect().height);
this.controls.update();
this.renderer.render(this.scene, this.camera);
this.interactionHandler.update(this.camera);
TWEEN.update();
}
pauseRendering() {
if (this.requestAnimationFrameId) |
}
resumeRendering() {
if (this.renderingIsPaused) {
this.render();
this.renderingIsPaused = false;
}
}
prepareView(metricTree) {
if (metricTree.children.length === 0) {
return;
}
this.view.setMetricTree(metricTree);
this.view.recalculate();
this.view.getBlockElements().forEach((element) => {
this.scene.add(element);
});
if (this.view instanceof MergedView) {
this.view.calculateConnections(this.scene);
this.view.getConnections().forEach((blockConnection: BlockConnection) => {
this.scene.add(blockConnection.getCurve());
});
} else {
this.changetypeSymbols.addChangeTypeSymbols(this.scene);
}
}
createInteractionHandler() {
this.interactionHandler = new InteractionHandler(
this.scene,
this.renderer,
this.screenType,
this.isMergedView,
this.focusService,
this.screenInteractionService,
this.tooltipService,
this.spatialCursor
);
}
resetScene() {
for (let i = this.scene.children.length - 1; i >= 0; i--) {
const child = this.scene.children[i];
// only remove Blocks and Lines. Don't remove lights, cameras etc.
if (child.type === 'Mesh' || child.type === 'Line') {
this.scene.remove(child);
}
}
}
focusElementByName(elementName) {
const element = this.scene.getObjectByName(elementName);
if (!element) {
return;
}
const root = this.getRoot();
// pythagoras
const diagonal = Math.sqrt(Math.pow(root.scale.x, 2) + Math.pow(root.scale.z, 2));
new TWEEN.Tween(this.camera.position)
.to({
x: element.position.x + root.scale.x / 5,
y: element.position.y + diagonal / 5,
z: element.position.z + root.scale.z / 5
}, VisualizationConfig.CAMERA_ANIMATION_DURATION)
.easing(TWEEN.Easing.Sinusoidal.InOut)
.start | {
cancelAnimationFrame(this.requestAnimationFrameId);
this.resetScene();
this.renderingIsPaused = true;
} | conditional_block |
screen.component.ts | this.activeViewType !== null && this.metricTree !== null && this.activeFilter !== null) {
this.isMergedView = this.activeViewType === ViewType.MERGED;
this.interactionHandler.setIsMergedView(this.isMergedView);
if (this.isMergedView) {
this.view = new MergedView(this.screenType, this.metricMapping);
if (this.screenType === ScreenType.RIGHT) {
this.pauseRendering();
this.displayTooltip = false;
}
document.querySelector('#stage').classList.remove('split');
} else {
this.view = new SplitView(this.screenType, this.metricMapping);
if (this.screenType === ScreenType.RIGHT) {
this.resumeRendering();
this.displayTooltip = true;
}
document.querySelector('#stage').classList.add('split');
}
this.resetScene();
this.prepareView(this.metricTree);
this.applyFilter(this.activeFilter);
this.handleViewChanged();
}
if (
changes.metricTree
&& changes.metricTree.currentValue
&& ElementAnalyzer.hasMetricValuesForCurrentCommit(
changes.metricTree.currentValue,
this.activeViewType === ViewType.MERGED,
this.screenType
)
) {
this.resetCamera();
this.resetControls();
}
}
ngOnInit() {
this.tooltipService.addScreen(this);
this.screenInteractionService.addScreen(this);
this.view = new SplitView(this.screenType, this.metricMapping);
this.createCamera();
this.createControls();
this.createLight();
this.createRenderer();
this.create3DCursor();
this.createSelectionHighlightBox();
this.createInteractionHandler();
this.changetypeSymbols = new ChangetypeSymbols();
this.initializeEventListeners();
this.render();
this.subscriptions.push(
this.focusService.elementFocussed$.subscribe((elementName) => {
this.focusElementByName(elementName);
this.comparisonPanelService.show({
elementName,
foundElement: ElementAnalyzer.findElementByName(this.metricTree, elementName)
});
})
);
this.subscriptions.push(
this.screenInteractionService.highlightedElements$.subscribe((highlightedElements) => {
if (this.highlightBoxes.length < highlightedElements.length) {
for (let i = 0; i < highlightedElements.length - this.highlightBoxes.length; i++) {
this.highlightBoxes.push(this.createSelectionHighlightBox());
}
}
this.highlightBoxes.forEach((value, index) => this.highlightElement(this.scene.getObjectByName(highlightedElements[index]), value));
})
);
this.subscriptions.push(
this.screenInteractionService.cursorState$.subscribe((state => {
if (state.position) {
this.spatialCursor.position.copy(state.position);
this.tooltipService.setMousePosition(this.getTooltipPosition(), this.screenType);
}
this.spatialCursor.visible = state.visible;
if (state.scale) {
this.spatialCursor.scale.set(1, state.scale, 1);
this.spatialCursor.children[0].scale.set(state.scale, 1, state.scale);
}
}))
);
}
public highlightElement(element: Object3D, highlightBox: Object3D) {
const addedMargin = VisualizationConfig.HIGHLIGHT_BOX_MARGIN;
if (element) {
highlightBox.visible = true && element.visible;
highlightBox.position.copy(new Vector3(element.position.x + element.scale.x / 2,
element.position.y + element.scale.y / 2, element.position.z + element.scale.z / 2));
highlightBox.scale.copy(element.scale).addScalar(addedMargin);
} else {
highlightBox.visible = false;
}
}
ngOnDestroy() {
this.subscriptions.forEach((subscription: Subscription) => {
subscription.unsubscribe();
});
}
createRenderer() {
this.renderer = new WebGLRenderer({antialias: true, preserveDrawingBuffer: true, logarithmicDepthBuffer: true});
this.renderer.setClearColor(0xf0f0f0);
this.renderer.setSize(this.getScreenWidth() - 0, window.innerHeight);
document.querySelector('#stage').appendChild(this.renderer.domElement);
}
updateRenderer() {
this.renderer.setSize(this.getScreenWidth() - 0, window.innerHeight);
}
createLight() {
const ambientLight = new THREE.AmbientLight(0xcccccc, 0.5);
this.scene.add(ambientLight);
const directionalLight = new THREE.DirectionalLight(0xffffff, 0.4);
directionalLight.position.set(0, 1, 0);
this.scene.add(directionalLight);
}
createCamera() {
this.camera = new THREE.PerspectiveCamera(
45,
(this.getScreenWidth() - 0) / window.innerHeight,
VisualizationConfig.CAMERA_NEAR,
VisualizationConfig.CAMERA_FAR
);
this.scene.add(this.camera);
}
updateCamera() {
this.camera.aspect = (this.getScreenWidth() - 0) / window.innerHeight;
this.camera.updateProjectionMatrix();
}
resetCamera() {
const root = this.getRoot();
if (!root) {return; }
// pythagoras
const diagonal = Math.sqrt(Math.pow(root.scale.x, 2) + Math.pow(root.scale.z, 2));
this.camera.position.x = root.scale.x * 2;
this.camera.position.y = diagonal * 1.5;
this.camera.position.z = root.scale.z * 2;
}
createControls() {
this.controls = new OrbitControls(this.camera, document.querySelector('#stage') as HTMLElement);
}
resetControls() {
const centralCoordinates = this.getCentralCoordinates();
this.controls.target.x = centralCoordinates.x;
this.controls.target.y = centralCoordinates.y;
this.controls.target.z = centralCoordinates.z;
}
render() {
this.requestAnimationFrameId = requestAnimationFrame(() => {
this.render();
});
// Canvas object offset
this.screenOffset.set(this.renderer.domElement.getBoundingClientRect().left, this.renderer.domElement.getBoundingClientRect().top);
// Canvas object size
this.screenDimensions.set(this.renderer.domElement.getBoundingClientRect().width,
this.renderer.domElement.getBoundingClientRect().height);
this.controls.update();
this.renderer.render(this.scene, this.camera);
this.interactionHandler.update(this.camera);
TWEEN.update();
}
pauseRendering() {
if (this.requestAnimationFrameId) {
cancelAnimationFrame(this.requestAnimationFrameId);
this.resetScene();
this.renderingIsPaused = true;
}
}
resumeRendering() {
if (this.renderingIsPaused) {
this.render();
this.renderingIsPaused = false;
}
}
prepareView(metricTree) {
if (metricTree.children.length === 0) {
return;
}
this.view.setMetricTree(metricTree);
this.view.recalculate();
this.view.getBlockElements().forEach((element) => {
this.scene.add(element);
});
if (this.view instanceof MergedView) {
this.view.calculateConnections(this.scene);
this.view.getConnections().forEach((blockConnection: BlockConnection) => {
this.scene.add(blockConnection.getCurve());
});
} else {
this.changetypeSymbols.addChangeTypeSymbols(this.scene);
}
}
createInteractionHandler() {
this.interactionHandler = new InteractionHandler(
this.scene,
this.renderer,
this.screenType,
this.isMergedView,
this.focusService,
this.screenInteractionService,
this.tooltipService,
this.spatialCursor
);
}
resetScene() {
for (let i = this.scene.children.length - 1; i >= 0; i--) {
const child = this.scene.children[i];
// only remove Blocks and Lines. Don't remove lights, cameras etc.
if (child.type === 'Mesh' || child.type === 'Line') {
this.scene.remove(child);
}
}
}
focusElementByName(elementName) {
const element = this.scene.getObjectByName(elementName);
if (!element) {
return;
}
const root = this.getRoot();
// pythagoras
const diagonal = Math.sqrt(Math.pow(root.scale.x, 2) + Math.pow(root.scale.z, 2));
new TWEEN.Tween(this.camera.position)
.to({
x: element.position.x + root.scale.x / 5,
y: element.position.y + diagonal / 5,
z: element.position.z + root.scale.z / 5
}, VisualizationConfig.CAMERA_ANIMATION_DURATION)
.easing(TWEEN.Easing.Sinusoidal.InOut)
.start();
new TWEEN.Tween(this.controls.target)
.to({
x: element.position.x + element.scale.x / 2,
y: element.position.y,
z: element.position.z + element.scale.z / 2
}, VisualizationConfig.CAMERA_ANIMATION_DURATION)
.easing(TWEEN.Easing.Sinusoidal.InOut)
.start();
}
private getCentralCoordinates() {
const root = this.getRoot();
if (!root) {
console.warn(`no root found in screen #${this.screenType}`);
return;
}
return {
x: root.scale.x / 2,
y: 0,
z: root.scale.z / 2
};
}
private getRoot(): Object3D {
return this.scene.getObjectByName(VisualizationConfig.ROOT_NAME);
}
private getScreenWidth() {
if (this.isMergedView) {
return window.innerWidth;
}
return window.innerWidth / 2;
}
private initializeEventListeners() | {
window.addEventListener('resize', this.handleViewChanged.bind(this), false);
} | identifier_body |
|
zun.go | fmt.Errorf("mysql op is error : %s ", err)
}
return err
}
func CreateZunContainerOpts(pod *v1.Pod) (createOpts zun_container.CreateOpts, err error) {
if pod == nil {
err = fmt.Errorf("Pod is null.CreateZunContainerOpts function in zun ")
return
}
for _, container := range pod.Spec.Containers {
// get pod name
podName := fmt.Sprintf("%s-%s", pod.Name, container.Name)
if podName != "" {
createOpts.Name = podName
}
//get pod image
createOpts.Image = container.Image
createOpts.ImageDriver = "glance"
// open container console in zun-ui
isInteractive := true
createOpts.Interactive = &isInteractive
// show numa info
createOpts.CpuPolicy = "shared"
//isprivileged := true
//createOpts.Privileged = &isprivileged
// Specify the node created by the container
createOpts.Host = "compute1"
//get pod env
env := make(map[string]string, len(container.Env))
for _, v := range container.Env {
env[v.Name] = v.Value
if v.Name == "HADOOP" || v.Name == "SPARK" {
tempName := pod.Name
clusterName := tempName[0:strings.LastIndex(tempName[0:strings.LastIndex(tempName, "-")], "-")]
//clusterName = clusterName[0:strings.LastIndex(clusterName, "-")]
if !isExitHadoopCluster(pod.Namespace, clusterName) {
if numberOfNodes, err := strconv.Atoi(v.Value); err == nil {
initErr := initHadoopCluster(&HadoopCluster{
namespace: pod.Namespace,
clusterName: clusterName,
numberOfNodes: numberOfNodes,
availableNumber: 0,
createTime: time.Now().String(),
hadoopSlaveId: "",
hadoopMasterId: "",
clusterStatus: "InitCreating",
})
if initErr != nil {
fmt.Println(initErr)
}
}
} else {
continue
}
}
}
createOpts.Environment = env
//get pod labels
createOpts.Labels = pod.Labels
//get work dir
createOpts.Workdir = container.WorkingDir
//get image pull policy
createOpts.ImagePullPolicy = strings.ToLower(string(container.ImagePullPolicy))
//get pod command
command := ""
if len(container.Command) > 0 {
for _, v := range container.Command {
command = command + v + " "
}
}
if len(container.Args) > 0 {
for _, v := range container.Args {
command = command + v + " "
}
}
if command != "" {
createOpts.Command = command
}
//get pod resource
if container.Resources.Limits != nil {
cpuLimit := float64(1)
if _, ok := container.Resources.Limits[v1.ResourceCPU]; ok {
cpuLimit = float64(container.Resources.Limits.Cpu().MilliValue()) / 1000.00
}
memoryLimit := 0.5
if _, ok := container.Resources.Limits[v1.ResourceMemory]; ok {
memoryLimit = float64(container.Resources.Limits.Memory().Value()) / (1024 * 1024)
}
createOpts.Cpu = cpuLimit
createOpts.Memory = int(memoryLimit)
} else if container.Resources.Requests != nil {
cpuRequests := float64(1)
if _, ok := container.Resources.Requests[v1.ResourceCPU]; ok {
cpuRequests = float64(container.Resources.Requests.Cpu().MilliValue()) / 1000.00
}
memoryRequests := 0.5
if _, ok := container.Resources.Requests[v1.ResourceMemory]; ok {
memoryRequests = float64(container.Resources.Requests.Memory().Value()) / (1024 * 1024)
}
createOpts.Cpu = cpuRequests
createOpts.Memory = int(memoryRequests)
}
}
return createOpts, nil
}
// UpdatePod takes a Kubernetes Pod and updates it within the provider.
func (p *ZunProvider) UpdatePod(ctx context.Context, pod *v1.Pod) error {
return nil
}
// DeletePod takes a Kubernetes Pod and deletes it from the provider.
func (p *ZunProvider) DeletePod(ctx context.Context, pod *v1.Pod) error {
nn := fmt.Sprintf("%s-%s", pod.Namespace, pod.Name)
if v := ContainerIdQuery(nn); v != "" {
err := zun_container.Delete(p.ZunClient, v, true).ExtractErr()
if err != nil {
return err
}
PodDelete(nn)
if pod.Namespace != "" && pod.Name != "" {
tempName := pod.Name
clusterName := tempName[0:strings.LastIndex(tempName[0:strings.LastIndex(tempName, "-")], "-")]
//clusterName = clusterName[0:strings.LastIndex(clusterName, "-")]
//deleteHadoopCluster(pod.Namespace, clusterName)
deletehadoopNode(pod.Namespace, clusterName, v)
}
return nil
}
return fmt.Errorf("Delete Pod is fail, pod is not found! ")
}
// GetPod retrieves a pod by name from the provider (can be cached).
func (p *ZunProvider) GetPod(ctx context.Context, namespace, name string) (*v1.Pod, error) {
nn := fmt.Sprintf("%s-%s", namespace, name)
if v := ContainerIdQuery(nn); v != "" {
container, err := zun_container.Get(p.ZunClient, v).Extract()
if err != nil {
return nil, fmt.Errorf("zun_container.Get(p.ZunClient,v.ContainerID).Extract() is error : %s ", err)
}
zunPod := PodQuery(nn)
podinfo := zunPodToPodinfo(zunPod)
if _, ok := container.Environment["HADOOP"]; ok {
p.ContainerHadoopNodeFactory(container, namespace, name,"HADOOP")
}else if _, ok := container.Environment["SPARK"]; ok {
p.ContainerHadoopNodeFactory(container, namespace, name,"SPARK")
}
return containerToPod(container, podinfo)
}
return nil, nil
}
func zunPodToPodinfo(zunPod *ZunPod) (podinfo *PodOTemplate) {
var podTemplate = new(PodOTemplate)
podTemplate.Kind = zunPod.podKind
var metadata Metadata
metadata.Labels = map[string]string{
"PodName": zunPod.Name,
"ClusterName": zunPod.podClustername,
"NodeName": zunPod.nodeName,
"Namespace": zunPod.NameSpace,
"UID": zunPod.podUid,
"CreationTimestamp": zunPod.podCreatetime,
}
// create container info
metadata.Name = zunPod.NamespaceName
podTemplate.Metadata = metadata
return podTemplate
}
func | (c *zun_container.Container, podInfo *PodOTemplate) (pod *v1.Pod, err error) {
containers := make([]v1.Container, 1)
containerStatuses := make([]v1.ContainerStatus, 0)
containerMemoryMB := 0
if c.Memory != "" {
containerMemory, err := strconv.Atoi(c.Memory)
if err != nil {
log.Println(err)
}
containerMemoryMB = containerMemory
}
container := v1.Container{
Name: c.Name,
Image: c.Image,
Command: c.Command,
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%g", float64(c.CPU))),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dM", containerMemoryMB)),
},
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%g", float64(c.CPU*1024/100))),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dM", containerMemoryMB)),
},
},
}
containers = append(containers, container)
containerStatus := v1.ContainerStatus{
Name: c.Name,
State: zunContainerStausToContainerStatus(c),
LastTerminationState: zunContainerStausToContainerStatus(c),
Ready: zunStatusToPodPhase(c.Status) == v1.PodRunning,
RestartCount: int32(0),
Image: c.Image,
ImageID: "",
ContainerID: c.UUID,
}
// Add to containerStatuses
containerStatuses = append(containerStatuses, containerStatus)
var containerStartTime metav1.Time
containerStartTime = metav1.NewTime(time.Time{})
p := v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v | containerToPod | identifier_name |
zun.go | fmt.Errorf("mysql op is error : %s ", err)
}
return err
}
func CreateZunContainerOpts(pod *v1.Pod) (createOpts zun_container.CreateOpts, err error) {
if pod == nil {
err = fmt.Errorf("Pod is null.CreateZunContainerOpts function in zun ")
return
}
for _, container := range pod.Spec.Containers {
// get pod name
podName := fmt.Sprintf("%s-%s", pod.Name, container.Name)
if podName != "" {
createOpts.Name = podName
}
//get pod image
createOpts.Image = container.Image
createOpts.ImageDriver = "glance"
// open container console in zun-ui
isInteractive := true
createOpts.Interactive = &isInteractive
// show numa info
createOpts.CpuPolicy = "shared"
//isprivileged := true
//createOpts.Privileged = &isprivileged
// Specify the node created by the container
createOpts.Host = "compute1"
//get pod env
env := make(map[string]string, len(container.Env))
for _, v := range container.Env {
env[v.Name] = v.Value
if v.Name == "HADOOP" || v.Name == "SPARK" {
tempName := pod.Name
clusterName := tempName[0:strings.LastIndex(tempName[0:strings.LastIndex(tempName, "-")], "-")]
//clusterName = clusterName[0:strings.LastIndex(clusterName, "-")]
if !isExitHadoopCluster(pod.Namespace, clusterName) {
if numberOfNodes, err := strconv.Atoi(v.Value); err == nil {
initErr := initHadoopCluster(&HadoopCluster{
namespace: pod.Namespace,
clusterName: clusterName,
numberOfNodes: numberOfNodes,
availableNumber: 0,
createTime: time.Now().String(),
hadoopSlaveId: "",
hadoopMasterId: "",
clusterStatus: "InitCreating",
})
if initErr != nil {
fmt.Println(initErr)
}
}
} else {
continue
}
}
}
createOpts.Environment = env
//get pod labels
createOpts.Labels = pod.Labels
//get work dir
createOpts.Workdir = container.WorkingDir
//get image pull policy
createOpts.ImagePullPolicy = strings.ToLower(string(container.ImagePullPolicy))
//get pod command
command := ""
if len(container.Command) > 0 {
for _, v := range container.Command {
command = command + v + " "
}
}
if len(container.Args) > 0 {
for _, v := range container.Args {
command = command + v + " "
}
}
if command != "" {
createOpts.Command = command
}
//get pod resource
if container.Resources.Limits != nil {
cpuLimit := float64(1)
if _, ok := container.Resources.Limits[v1.ResourceCPU]; ok {
cpuLimit = float64(container.Resources.Limits.Cpu().MilliValue()) / 1000.00
}
memoryLimit := 0.5
if _, ok := container.Resources.Limits[v1.ResourceMemory]; ok |
createOpts.Cpu = cpuLimit
createOpts.Memory = int(memoryLimit)
} else if container.Resources.Requests != nil {
cpuRequests := float64(1)
if _, ok := container.Resources.Requests[v1.ResourceCPU]; ok {
cpuRequests = float64(container.Resources.Requests.Cpu().MilliValue()) / 1000.00
}
memoryRequests := 0.5
if _, ok := container.Resources.Requests[v1.ResourceMemory]; ok {
memoryRequests = float64(container.Resources.Requests.Memory().Value()) / (1024 * 1024)
}
createOpts.Cpu = cpuRequests
createOpts.Memory = int(memoryRequests)
}
}
return createOpts, nil
}
// UpdatePod takes a Kubernetes Pod and updates it within the provider.
func (p *ZunProvider) UpdatePod(ctx context.Context, pod *v1.Pod) error {
return nil
}
// DeletePod takes a Kubernetes Pod and deletes it from the provider.
func (p *ZunProvider) DeletePod(ctx context.Context, pod *v1.Pod) error {
nn := fmt.Sprintf("%s-%s", pod.Namespace, pod.Name)
if v := ContainerIdQuery(nn); v != "" {
err := zun_container.Delete(p.ZunClient, v, true).ExtractErr()
if err != nil {
return err
}
PodDelete(nn)
if pod.Namespace != "" && pod.Name != "" {
tempName := pod.Name
clusterName := tempName[0:strings.LastIndex(tempName[0:strings.LastIndex(tempName, "-")], "-")]
//clusterName = clusterName[0:strings.LastIndex(clusterName, "-")]
//deleteHadoopCluster(pod.Namespace, clusterName)
deletehadoopNode(pod.Namespace, clusterName, v)
}
return nil
}
return fmt.Errorf("Delete Pod is fail, pod is not found! ")
}
// GetPod retrieves a pod by name from the provider (can be cached).
func (p *ZunProvider) GetPod(ctx context.Context, namespace, name string) (*v1.Pod, error) {
nn := fmt.Sprintf("%s-%s", namespace, name)
if v := ContainerIdQuery(nn); v != "" {
container, err := zun_container.Get(p.ZunClient, v).Extract()
if err != nil {
return nil, fmt.Errorf("zun_container.Get(p.ZunClient,v.ContainerID).Extract() is error : %s ", err)
}
zunPod := PodQuery(nn)
podinfo := zunPodToPodinfo(zunPod)
if _, ok := container.Environment["HADOOP"]; ok {
p.ContainerHadoopNodeFactory(container, namespace, name,"HADOOP")
}else if _, ok := container.Environment["SPARK"]; ok {
p.ContainerHadoopNodeFactory(container, namespace, name,"SPARK")
}
return containerToPod(container, podinfo)
}
return nil, nil
}
func zunPodToPodinfo(zunPod *ZunPod) (podinfo *PodOTemplate) {
var podTemplate = new(PodOTemplate)
podTemplate.Kind = zunPod.podKind
var metadata Metadata
metadata.Labels = map[string]string{
"PodName": zunPod.Name,
"ClusterName": zunPod.podClustername,
"NodeName": zunPod.nodeName,
"Namespace": zunPod.NameSpace,
"UID": zunPod.podUid,
"CreationTimestamp": zunPod.podCreatetime,
}
// create container info
metadata.Name = zunPod.NamespaceName
podTemplate.Metadata = metadata
return podTemplate
}
func containerToPod(c *zun_container.Container, podInfo *PodOTemplate) (pod *v1.Pod, err error) {
containers := make([]v1.Container, 1)
containerStatuses := make([]v1.ContainerStatus, 0)
containerMemoryMB := 0
if c.Memory != "" {
containerMemory, err := strconv.Atoi(c.Memory)
if err != nil {
log.Println(err)
}
containerMemoryMB = containerMemory
}
container := v1.Container{
Name: c.Name,
Image: c.Image,
Command: c.Command,
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%g", float64(c.CPU))),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dM", containerMemoryMB)),
},
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%g", float64(c.CPU*1024/100))),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dM", containerMemoryMB)),
},
},
}
containers = append(containers, container)
containerStatus := v1.ContainerStatus{
Name: c.Name,
State: zunContainerStausToContainerStatus(c),
LastTerminationState: zunContainerStausToContainerStatus(c),
Ready: zunStatusToPodPhase(c.Status) == v1.PodRunning,
RestartCount: int32(0),
Image: c.Image,
ImageID: "",
ContainerID: c.UUID,
}
// Add to containerStatuses
containerStatuses = append(containerStatuses, containerStatus)
var containerStartTime metav1.Time
containerStartTime = metav1.NewTime(time.Time{})
p := v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: " | {
memoryLimit = float64(container.Resources.Limits.Memory().Value()) / (1024 * 1024)
} | conditional_block |
zun.go | fmt.Errorf("mysql op is error : %s ", err)
}
return err
}
func CreateZunContainerOpts(pod *v1.Pod) (createOpts zun_container.CreateOpts, err error) {
if pod == nil {
err = fmt.Errorf("Pod is null.CreateZunContainerOpts function in zun ")
return
}
for _, container := range pod.Spec.Containers {
// get pod name
podName := fmt.Sprintf("%s-%s", pod.Name, container.Name)
if podName != "" {
createOpts.Name = podName
}
//get pod image
createOpts.Image = container.Image
createOpts.ImageDriver = "glance"
// open container console in zun-ui
isInteractive := true
createOpts.Interactive = &isInteractive
// show numa info
createOpts.CpuPolicy = "shared"
//isprivileged := true
//createOpts.Privileged = &isprivileged
// Specify the node created by the container
createOpts.Host = "compute1"
//get pod env
env := make(map[string]string, len(container.Env))
for _, v := range container.Env {
env[v.Name] = v.Value
if v.Name == "HADOOP" || v.Name == "SPARK" {
tempName := pod.Name
clusterName := tempName[0:strings.LastIndex(tempName[0:strings.LastIndex(tempName, "-")], "-")]
//clusterName = clusterName[0:strings.LastIndex(clusterName, "-")]
if !isExitHadoopCluster(pod.Namespace, clusterName) {
if numberOfNodes, err := strconv.Atoi(v.Value); err == nil {
initErr := initHadoopCluster(&HadoopCluster{
namespace: pod.Namespace,
clusterName: clusterName,
numberOfNodes: numberOfNodes,
availableNumber: 0,
createTime: time.Now().String(),
hadoopSlaveId: "",
hadoopMasterId: "",
clusterStatus: "InitCreating",
})
if initErr != nil {
fmt.Println(initErr)
}
}
} else {
continue
}
}
}
createOpts.Environment = env
//get pod labels
createOpts.Labels = pod.Labels
//get work dir
createOpts.Workdir = container.WorkingDir
//get image pull policy
createOpts.ImagePullPolicy = strings.ToLower(string(container.ImagePullPolicy))
//get pod command
command := ""
if len(container.Command) > 0 {
for _, v := range container.Command {
command = command + v + " "
}
}
if len(container.Args) > 0 {
for _, v := range container.Args {
command = command + v + " "
}
}
if command != "" {
createOpts.Command = command
}
//get pod resource
| cpuLimit = float64(container.Resources.Limits.Cpu().MilliValue()) / 1000.00
}
memoryLimit := 0.5
if _, ok := container.Resources.Limits[v1.ResourceMemory]; ok {
memoryLimit = float64(container.Resources.Limits.Memory().Value()) / (1024 * 1024)
}
createOpts.Cpu = cpuLimit
createOpts.Memory = int(memoryLimit)
} else if container.Resources.Requests != nil {
cpuRequests := float64(1)
if _, ok := container.Resources.Requests[v1.ResourceCPU]; ok {
cpuRequests = float64(container.Resources.Requests.Cpu().MilliValue()) / 1000.00
}
memoryRequests := 0.5
if _, ok := container.Resources.Requests[v1.ResourceMemory]; ok {
memoryRequests = float64(container.Resources.Requests.Memory().Value()) / (1024 * 1024)
}
createOpts.Cpu = cpuRequests
createOpts.Memory = int(memoryRequests)
}
}
return createOpts, nil
}
// UpdatePod takes a Kubernetes Pod and updates it within the provider.
func (p *ZunProvider) UpdatePod(ctx context.Context, pod *v1.Pod) error {
return nil
}
// DeletePod takes a Kubernetes Pod and deletes it from the provider.
func (p *ZunProvider) DeletePod(ctx context.Context, pod *v1.Pod) error {
nn := fmt.Sprintf("%s-%s", pod.Namespace, pod.Name)
if v := ContainerIdQuery(nn); v != "" {
err := zun_container.Delete(p.ZunClient, v, true).ExtractErr()
if err != nil {
return err
}
PodDelete(nn)
if pod.Namespace != "" && pod.Name != "" {
tempName := pod.Name
clusterName := tempName[0:strings.LastIndex(tempName[0:strings.LastIndex(tempName, "-")], "-")]
//clusterName = clusterName[0:strings.LastIndex(clusterName, "-")]
//deleteHadoopCluster(pod.Namespace, clusterName)
deletehadoopNode(pod.Namespace, clusterName, v)
}
return nil
}
return fmt.Errorf("Delete Pod is fail, pod is not found! ")
}
// GetPod retrieves a pod by name from the provider (can be cached).
func (p *ZunProvider) GetPod(ctx context.Context, namespace, name string) (*v1.Pod, error) {
nn := fmt.Sprintf("%s-%s", namespace, name)
if v := ContainerIdQuery(nn); v != "" {
container, err := zun_container.Get(p.ZunClient, v).Extract()
if err != nil {
return nil, fmt.Errorf("zun_container.Get(p.ZunClient,v.ContainerID).Extract() is error : %s ", err)
}
zunPod := PodQuery(nn)
podinfo := zunPodToPodinfo(zunPod)
if _, ok := container.Environment["HADOOP"]; ok {
p.ContainerHadoopNodeFactory(container, namespace, name,"HADOOP")
}else if _, ok := container.Environment["SPARK"]; ok {
p.ContainerHadoopNodeFactory(container, namespace, name,"SPARK")
}
return containerToPod(container, podinfo)
}
return nil, nil
}
func zunPodToPodinfo(zunPod *ZunPod) (podinfo *PodOTemplate) {
var podTemplate = new(PodOTemplate)
podTemplate.Kind = zunPod.podKind
var metadata Metadata
metadata.Labels = map[string]string{
"PodName": zunPod.Name,
"ClusterName": zunPod.podClustername,
"NodeName": zunPod.nodeName,
"Namespace": zunPod.NameSpace,
"UID": zunPod.podUid,
"CreationTimestamp": zunPod.podCreatetime,
}
// create container info
metadata.Name = zunPod.NamespaceName
podTemplate.Metadata = metadata
return podTemplate
}
func containerToPod(c *zun_container.Container, podInfo *PodOTemplate) (pod *v1.Pod, err error) {
containers := make([]v1.Container, 1)
containerStatuses := make([]v1.ContainerStatus, 0)
containerMemoryMB := 0
if c.Memory != "" {
containerMemory, err := strconv.Atoi(c.Memory)
if err != nil {
log.Println(err)
}
containerMemoryMB = containerMemory
}
container := v1.Container{
Name: c.Name,
Image: c.Image,
Command: c.Command,
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%g", float64(c.CPU))),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dM", containerMemoryMB)),
},
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%g", float64(c.CPU*1024/100))),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dM", containerMemoryMB)),
},
},
}
containers = append(containers, container)
containerStatus := v1.ContainerStatus{
Name: c.Name,
State: zunContainerStausToContainerStatus(c),
LastTerminationState: zunContainerStausToContainerStatus(c),
Ready: zunStatusToPodPhase(c.Status) == v1.PodRunning,
RestartCount: int32(0),
Image: c.Image,
ImageID: "",
ContainerID: c.UUID,
}
// Add to containerStatuses
containerStatuses = append(containerStatuses, containerStatus)
var containerStartTime metav1.Time
containerStartTime = metav1.NewTime(time.Time{})
p := v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1 | if container.Resources.Limits != nil {
cpuLimit := float64(1)
if _, ok := container.Resources.Limits[v1.ResourceCPU]; ok { | random_line_split |
zun.go | odUid,
"CreationTimestamp": zunPod.podCreatetime,
}
// create container info
metadata.Name = zunPod.NamespaceName
podTemplate.Metadata = metadata
return podTemplate
}
func containerToPod(c *zun_container.Container, podInfo *PodOTemplate) (pod *v1.Pod, err error) {
containers := make([]v1.Container, 1)
containerStatuses := make([]v1.ContainerStatus, 0)
containerMemoryMB := 0
if c.Memory != "" {
containerMemory, err := strconv.Atoi(c.Memory)
if err != nil {
log.Println(err)
}
containerMemoryMB = containerMemory
}
container := v1.Container{
Name: c.Name,
Image: c.Image,
Command: c.Command,
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%g", float64(c.CPU))),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dM", containerMemoryMB)),
},
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%g", float64(c.CPU*1024/100))),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dM", containerMemoryMB)),
},
},
}
containers = append(containers, container)
containerStatus := v1.ContainerStatus{
Name: c.Name,
State: zunContainerStausToContainerStatus(c),
LastTerminationState: zunContainerStausToContainerStatus(c),
Ready: zunStatusToPodPhase(c.Status) == v1.PodRunning,
RestartCount: int32(0),
Image: c.Image,
ImageID: "",
ContainerID: c.UUID,
}
// Add to containerStatuses
containerStatuses = append(containerStatuses, containerStatus)
var containerStartTime metav1.Time
containerStartTime = metav1.NewTime(time.Time{})
p := v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: podInfo.Metadata.Labels["PodName"],
Namespace: podInfo.Metadata.Labels["Namespace"],
ClusterName: podInfo.Metadata.Labels["ClusterName"],
UID: types.UID(podInfo.Metadata.Labels["UID"]),
CreationTimestamp: metav1.NewTime(time.Now()),
},
Spec: v1.PodSpec{
NodeName: "virtual-kubelet",
Volumes: []v1.Volume{},
Containers: containers,
},
Status: v1.PodStatus{
Phase: zunStatusToPodPhase(c.Status),
Conditions: zunPodStateToPodConditions(c.Status, metav1.NewTime(time.Now())),
Message: "",
Reason: "",
HostIP: "",
PodIP: c.GetIp(),
StartTime: &containerStartTime,
ContainerStatuses: containerStatuses,
},
}
return &p, nil
}
func zunPodStateToPodConditions(state string, transitionTime metav1.Time) []v1.PodCondition {
switch state {
case "Running", "Created":
return []v1.PodCondition{
v1.PodCondition{
Type: v1.PodReady,
Status: v1.ConditionTrue,
LastTransitionTime: transitionTime,
}, v1.PodCondition{
Type: v1.PodInitialized,
Status: v1.ConditionTrue,
LastTransitionTime: transitionTime,
}, v1.PodCondition{
Type: v1.PodScheduled,
Status: v1.ConditionTrue,
LastTransitionTime: transitionTime,
},
}
}
return []v1.PodCondition{}
}
// GetContainerLogs retrieves the logs of a container by name from the provider.
func (p *ZunProvider) GetContainerLogs(ctx context.Context, namespace, podName, containerName string, tail int) (string, error) {
return "not support in Zun Provider", nil
}
// ExecInContainer executes a command in a container in the pod, copying data
// between in/out/err and the container's stdin/stdout/stderr.
func (p *ZunProvider) ExecInContainer(name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error {
log.Printf("receive ExecInContainer %q\n", container)
return nil
}
// GetPodStatus retrieves the status of a pod by name from the provider.
func (p *ZunProvider) GetPodStatus(ctx context.Context, namespace, name string) (*v1.PodStatus, error) {
pod, err := p.GetPod(ctx, namespace, name)
if err != nil {
return nil, err
}
if pod == nil {
return nil, nil
}
return &pod.Status, nil
}
// GetPods retrieves a list of all pods running on the provider (can be cached).
func (p *ZunProvider) GetPods(context.Context) ([]*v1.Pod, error) {
pager := zun_container.List(p.ZunClient, nil)
pages := 0
err := pager.EachPage(func(page pagination.Page) (bool, error) {
pages++
return true, nil
})
if err != nil {
return nil, err
}
pods := make([]*v1.Pod, 0, pages)
err = pager.EachPage(func(page pagination.Page) (bool, error) {
containerList, err := zun_container.ExtractContainers(page)
if err != nil {
return false, err
}
for _, m := range containerList {
c := m
temp := new(PodOTemplate)
//for _, v := range podsDB {
// if v.ContainerID == c.UUID {
// temp = v.Podinfo
// }
//}
if zunPod := PodQueryByContainerId(c.UUID); zunPod != nil {
temp = zunPodToPodinfo(zunPod)
}
p, err := containerToPod(&c, temp)
if err != nil {
log.Println(err)
continue
}
pods = append(pods, p)
}
return true, nil
})
if err != nil {
return nil, err
}
return pods, nil
}
// Capacity returns a resource list with the capacity constraints of the provider.
func (p *ZunProvider) Capacity(context.Context) v1.ResourceList {
return v1.ResourceList{
"cpu": resource.MustParse(p.cpu),
"memory": resource.MustParse(p.memory),
"pods": resource.MustParse(p.pods),
}
}
// NodeConditions returns a list of conditions (Ready, OutOfDisk, etc), which is
// polled periodically to update the node status within Kubernetes.
func (p *ZunProvider) NodeConditions(context.Context) []v1.NodeCondition {
return []v1.NodeCondition{
{
Type: "Ready",
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "KubeletReady",
Message: "kubelet is ready.",
},
{
Type: "OutOfDisk",
Status: v1.ConditionFalse,
LastHeartbeatTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "KubeletHasSufficientDisk",
Message: "kubelet has sufficient disk space available",
},
{
Type: "MemoryPressure",
Status: v1.ConditionFalse,
LastHeartbeatTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "KubeletHasSufficientMemory",
Message: "kubelet has sufficient memory available",
},
{
Type: "DiskPressure",
Status: v1.ConditionFalse,
LastHeartbeatTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "KubeletHasNoDiskPressure",
Message: "kubelet has no disk pressure",
},
{
Type: "NetworkUnavailable",
Status: v1.ConditionFalse,
LastHeartbeatTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "RouteCreated",
Message: "RouteController created a route",
},
}
}
// NodeAddresses returns a list of addresses for the node status
// within Kubernetes.
func (p *ZunProvider) NodeAddresses(context.Context) []v1.NodeAddress {
return nil
}
// NodeDaemonEndpoints returns NodeDaemonEndpoints for the node status
// within Kubernetes.
func (p *ZunProvider) NodeDaemonEndpoints(context.Context) *v1.NodeDaemonEndpoints | {
return &v1.NodeDaemonEndpoints{
KubeletEndpoint: v1.DaemonEndpoint{
Port: p.daemonEndpointPort,
},
}
} | identifier_body |
|
EDA.py | .reset_index(inplace=True)
get_data = h1_df.loc[h1_df['symbol'] == cp]['mid']
data = h1_df
t=10 # Rolling time window, and calculate mean and standard error
rolmean = get_data.rolling(window=t).mean()
rolstd = get_data.rolling(window=t).std()
BollingerBand(cp, data, rolmean, rolstd)
if(chosenCP[0]<get_data.describe()['std']):
chosenCP[0] = get_data.describe()['std']
chosenCP[1] = cp
# Based on volatility, as measured by std, we chose our base currency pair: GBPJPY
# In[272]:
chosenCP
# In[274]:
h1_df.loc[h1_df['symbol'] == 'GBPJPY']['mid']
# In[275]:
# get number of rows, columns
data_size = h1.shape[0:2]
data_size
# In[276]:
# Split the dataset: First 70% for training, Last 30% for testing
data_size = h1.shape[0]
data_train = h1_df.loc[h1_df['symbol'] == 'GBPJPY']['mid'].iloc[0:int(data_size*0.7)]
data_test = h1_df.loc[h1_df['symbol'] == 'GBPJPY']['mid'].iloc[int(data_size*0.7):]
# In[277]:
#plotting the time series for the 'mid' feature obtained from feature engineering
h1_df.loc[h1_df['symbol'] == 'GBPJPY']['mid'].plot()
# ### Rolling Time Window (t=3)
# In[279]:
t=3 # Rolling time window, and calculate mean and standard error
rolmean = data_train.rolling(window=t).mean()
rolstd = data_train.rolling(window=t).std()
rolmean, rolstd
# In[280]:
#h1_df.loc['GBPJPY']['mid'].plot()
rolmean.plot()
rolstd.plot()
# ### Dickey-Fuller Test on stationary condition
# #### This section checks if the data is stationary and removes trend & seasonality accordingly to do analysis on the residuals/ noise.
# In[281]:
# Dickey-Fuller test on stationary condition
from statsmodels.tsa.stattools import adfuller
print ('Results of Dickey-Fuller Test:')
# Dickey-Fuller test
dftest = adfuller(data_train, autolag=None)
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key, value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
print (dfoutput)
# #### Dickey-Fuller test shows that null hypothesis (nonstationarity) cannot be rejected, so the time series may be nonstationary. If non-stationary, we cannot use AR(I)MA to fit the data, make inference or do forecasting. Thus, we need to make the time series stationary first.
#
# #### We have several ways to make time series stationary.
# In[282]:
# Remove trend from original time series: Subtract this estimated trend
data_log_moving_avg_diff = data_train - rolmean
data_log_moving_avg_diff
# In[283]:
# Drop missing values
data_log_moving_avg_diff.dropna(inplace=True)
data_log_moving_avg_diff
# In[284]:
# Evaluating Trendless Time Series for Stationary: Using Dickey-Fuller test
print ('Results of Dickey-Fuller Test:')
# Dickey-Fuller test
dftest = adfuller(data_log_moving_avg_diff, autolag=None)
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key, value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
print (dfoutput)
# #### This time, we find that Dickey-Fuller test is significant (p-value<5%). This means, after removing general trend, the time series data probably become stationary.
#
# #### However, let us decompose the time series into trend, seasonality and residual to better understand the data
# ### Decomposition of Original Time Series
# #### into Trend, Seasonality and Residual/ Noise
# In[288]:
# Decomposing the Original Time Series
from statsmodels.tsa.seasonal import seasonal_decompose
decomposition = seasonal_decompose(list(data_train), freq=15)
trend = decomposition.trend
seasonal = decomposition.seasonal
residual = decomposition.resid
# Plot the components
plt.figure(figsize=(15,10))
plt.subplot(411)
plt.plot(data_train, label='Original')
plt.legend(loc='best')
plt.subplot(412)
plt.plot(trend, label='Trend')
plt.legend(loc='best')
plt.subplot(413)
plt.plot(seasonal,label='Seasonality')
plt.legend(loc='best')
plt.subplot(414)
plt.plot(residual, label='Residuals')
plt.legend(loc='best')
plt.tight_layout()
# #### From the above, we can make a few conclusions.
# #### 1. There is a generally decreasing trend.
# #### 2. There is seasonality, on a very small scale.
# #### 3. The residuals seems to play a significant role in the currency price.
# ### Removing Seasonality
# #### Even though seasonality is observed on a very small scale, our group has decided to remove it.
# In[289]:
# Evaluating the Residuals for Stationary
data_decompose = pd.Series(residual)
data_decompose.dropna(inplace=True)
t=30 # Rolling time window, and calculate mean and standard error
rolmean = data_decompose.rolling(window=t).mean()
rolstd = data_decompose.rolling(window=t).std()
# Visualize whether mean and standard error of time series are stationary over time
plt.figure(figsize=(15,5))
orig = plt.plot(data_decompose, color='blue',label='Original Residuals')
mean = plt.plot(rolmean, color='red', label='Rolling Mean')
std = plt.plot(rolstd, color='black', label = 'Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean & Standard Deviation of Residuals')
plt.show(block=False)
# Dicky-Fuller test on stationary condition
print ('Results of Dickey-Fuller Test:')
# Dickey-Fuller test
dftest = adfuller(data_decompose, autolag=None)
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key, value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
print (dfoutput)
# ### ARIMA
# In[290]:
from statsmodels.tsa.arima_model import ARIMA
# In[292]:
# Suppose we use ARIMA(p,d,q) to fit the time series data: AR(p), MA(q) and d-order-differencing
# Remember: we have tested that first-order-differencing is effective enough to make non-stationary to stationary
# Here we can use ARIMA model, which fits ARMA model on (first-order) differencing time series data
from statsmodels.tsa.arima_model import ARIMA
#import seaborn as sns
# ARIMA(5, 1, 5): AR(5), MA(5) and first-order-differencing
model = ARIMA(data_train, order=(5,1,5))
model_fit = model.fit(disp=0)
print (model_fit.summary())
# Find AIC value
print ("AIC is: %s" %(model_fit.aic))
# Plot residual errors series
plt.figure()
residuals = pd.DataFrame(model_fit.resid)
residuals.plot()
plt.show()
# Plot distribution of residual errors
#sns.distplot(residuals, hist=True, kde=True, bins=50, color = 'darkblue', hist_kws={'edgecolor':'black'}, kde_kws={'linewidth': 4})
# In[294]:
# We can do a grid search on selecting autoregressive order or moving average order in ARIMA model
# Select the model with best performance
# Remember usually you need to further split training data to do validation, and find the model with best validation accuracy.
# In this example, in order to simply and quickly illustrate the procedure of time series modelling, we ignore this step.
p_list = [i for i in range(1, 11)] # AR order
best_AIC = float('inf') # You need to minimize AIC
best_model = None # Model with lowest AIC
for p in p_list:
# ARIMA(p, 1, 1): AR(p), MA(1) and first-order-differencing
| model = ARIMA(data_train, order=(p,1,1))
model_fit = model.fit(disp=0)
if model_fit.aic <= best_AIC:
best_model, best_AIC = model, model_fit.aic | conditional_block |
|
EDA.py | qb.AddForex("EURUSD")
USDJPY = qb.AddForex("USDJPY")
GBPUSD = qb.AddForex("GBPUSD")
# ### III. Exploratory Data Analysis (EDA)
# #### This section analyses and justifies the various currency pairs chosen. Our group compared the volatility of the currency pairs using Bollinger Bands.
# #### Volatility Analysis of Chosen Currency (EURJPY)
# In[270]:
## modified BollingerBand function ##
def BollingerBand(cp, data, mean, std):
plt.style.use('fivethirtyeight')
fig = plt.figure(figsize=(12,6))
ax = fig.add_subplot(111)
ax.set_title(cp)
ax.set_xlabel('Time')
ax.set_ylabel('Exchange Rate')
x_axis = list(data.loc[data['symbol'] == cp]['time'])
# x_axis = data.loc[data['symbol'] == cp].index.get_level_values(0)
mean = data.loc[data['symbol'] == cp]['close']
bollinger_upper = (mean + std*2)
bollinger_lower = (mean - std*2)
ax.fill_between(x_axis, bollinger_upper, bollinger_lower, facecolor='grey', alpha = 0.5)
ax.plot(x_axis, mean, color='blue', lw=2)
ax.plot(x_axis, bollinger_upper, color='green', lw=2)
ax.plot(x_axis, bollinger_lower, color='orange', lw=2)
ax.legend()
plt.show();
bollinger_upper = list(mean + std*2)
bollinger_lower = list(mean - std*2)
# In[271]:
## modified function for plotting BollingerBand ##
'''
Based off some research done online, our group has decided to drill down further
into the following 3 currency pairs due to their relatively higher volatility.
The pair with the highest volatility was eventually chosen as the base pair acting
as the basis for our portfolio.
'''
volatileCP = ['GBPJPY', 'EURNZD', 'GBPAUD']
chosenCP = [0,0];
for cp in volatileCP:
qb.AddForex(cp)
h1 = qb.History(qb.Securities.Keys, 180, Resolution.Hour)
h1_df = pd.DataFrame(h1)
# adding the new variable 'mid' into the dataframe (justification found in report)
h1_df['mid'] = (h1_df['high']+h1_df['low'])/2
h1 = h1_df
# convert row name 'time' from an index to a column
h1_df.index.name = 'time'
h1_df.reset_index(inplace=True)
get_data = h1_df.loc[h1_df['symbol'] == cp]['mid']
data = h1_df
t=10 # Rolling time window, and calculate mean and standard error
rolmean = get_data.rolling(window=t).mean()
rolstd = get_data.rolling(window=t).std()
BollingerBand(cp, data, rolmean, rolstd)
if(chosenCP[0]<get_data.describe()['std']):
chosenCP[0] = get_data.describe()['std']
chosenCP[1] = cp
# Based on volatility, as measured by std, we chose our base currency pair: GBPJPY
# In[272]:
chosenCP
# In[274]:
h1_df.loc[h1_df['symbol'] == 'GBPJPY']['mid']
# In[275]:
# get number of rows, columns
data_size = h1.shape[0:2]
data_size
# In[276]:
# Split the dataset: First 70% for training, Last 30% for testing
data_size = h1.shape[0]
data_train = h1_df.loc[h1_df['symbol'] == 'GBPJPY']['mid'].iloc[0:int(data_size*0.7)]
data_test = h1_df.loc[h1_df['symbol'] == 'GBPJPY']['mid'].iloc[int(data_size*0.7):]
# In[277]:
#plotting the time series for the 'mid' feature obtained from feature engineering
h1_df.loc[h1_df['symbol'] == 'GBPJPY']['mid'].plot()
# ### Rolling Time Window (t=3)
# In[279]:
t=3 # Rolling time window, and calculate mean and standard error
rolmean = data_train.rolling(window=t).mean()
rolstd = data_train.rolling(window=t).std()
rolmean, rolstd
# In[280]:
#h1_df.loc['GBPJPY']['mid'].plot()
rolmean.plot()
rolstd.plot()
# ### Dickey-Fuller Test on stationary condition
# #### This section checks if the data is stationary and removes trend & seasonality accordingly to do analysis on the residuals/ noise.
# In[281]:
# Dickey-Fuller test on stationary condition
from statsmodels.tsa.stattools import adfuller
print ('Results of Dickey-Fuller Test:')
# Dickey-Fuller test
dftest = adfuller(data_train, autolag=None)
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key, value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
print (dfoutput)
# #### Dickey-Fuller test shows that null hypothesis (nonstationarity) cannot be rejected, so the time series may be nonstationary. If non-stationary, we cannot use AR(I)MA to fit the data, make inference or do forecasting. Thus, we need to make the time series stationary first.
#
# #### We have several ways to make time series stationary.
# In[282]:
# Remove trend from original time series: Subtract this estimated trend
data_log_moving_avg_diff = data_train - rolmean
data_log_moving_avg_diff
# In[283]:
# Drop missing values
data_log_moving_avg_diff.dropna(inplace=True)
data_log_moving_avg_diff
# In[284]:
# Evaluating Trendless Time Series for Stationary: Using Dickey-Fuller test
print ('Results of Dickey-Fuller Test:')
| dfoutput['Critical Value (%s)'%key] = value
print (dfoutput)
# #### This time, we find that Dickey-Fuller test is significant (p-value<5%). This means, after removing general trend, the time series data probably become stationary.
#
# #### However, let us decompose the time series into trend, seasonality and residual to better understand the data
# ### Decomposition of Original Time Series
# #### into Trend, Seasonality and Residual/ Noise
# In[288]:
# Decomposing the Original Time Series
from statsmodels.tsa.seasonal import seasonal_decompose
decomposition = seasonal_decompose(list(data_train), freq=15)
trend = decomposition.trend
seasonal = decomposition.seasonal
residual = decomposition.resid
# Plot the components
plt.figure(figsize=(15,10))
plt.subplot(411)
plt.plot(data_train, label='Original')
plt.legend(loc='best')
plt.subplot(412)
plt.plot(trend, label='Trend')
plt.legend(loc='best')
plt.subplot(413)
plt.plot(seasonal,label='Seasonality')
plt.legend(loc='best')
plt.subplot(414)
plt.plot(residual, label='Residuals')
plt.legend(loc='best')
plt.tight_layout()
# #### From the above, we can make a few conclusions.
# #### 1. There is a generally decreasing trend.
# #### 2. There is seasonality, on a very small scale.
# #### 3. The residuals seems to play a significant role in the currency price.
# ### Removing Seasonality
# #### Even though seasonality is observed on a very small scale, our group has decided to remove it.
# In[289]:
# Evaluating the Residuals for Stationary
data_decompose = pd.Series(residual)
data_decompose.dropna(inplace=True)
t=30 # Rolling time window, and calculate mean and standard error
rolmean = data_decompose.rolling(window=t).mean()
rolstd = data_decompose.rolling(window=t).std()
# Visualize whether mean and standard error of time series are stationary over time
plt.figure(figsize=(15,5))
orig = plt.plot(data_decompose, color='blue',label='Original Residuals')
mean = plt.plot(rolmean, color='red', label='Rolling Mean')
std = plt.plot(rolstd, color='black', label = 'Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean & Standard Deviation of Residuals')
plt.show(block=False)
# Dicky-Fuller test on stationary condition
print ('Results of Dickey-Fuller Test | # Dickey-Fuller test
dftest = adfuller(data_log_moving_avg_diff, autolag=None)
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key, value in dftest[4].items(): | random_line_split |
EDA.py | qb.AddForex("EURUSD")
USDJPY = qb.AddForex("USDJPY")
GBPUSD = qb.AddForex("GBPUSD")
# ### III. Exploratory Data Analysis (EDA)
# #### This section analyses and justifies the various currency pairs chosen. Our group compared the volatility of the currency pairs using Bollinger Bands.
# #### Volatility Analysis of Chosen Currency (EURJPY)
# In[270]:
## modified BollingerBand function ##
def BollingerBand(cp, data, mean, std):
|
ax.legend()
plt.show();
bollinger_upper = list(mean + std*2)
bollinger_lower = list(mean - std*2)
# In[271]:
## modified function for plotting BollingerBand ##
'''
Based off some research done online, our group has decided to drill down further
into the following 3 currency pairs due to their relatively higher volatility.
The pair with the highest volatility was eventually chosen as the base pair acting
as the basis for our portfolio.
'''
volatileCP = ['GBPJPY', 'EURNZD', 'GBPAUD']
chosenCP = [0,0];
for cp in volatileCP:
qb.AddForex(cp)
h1 = qb.History(qb.Securities.Keys, 180, Resolution.Hour)
h1_df = pd.DataFrame(h1)
# adding the new variable 'mid' into the dataframe (justification found in report)
h1_df['mid'] = (h1_df['high']+h1_df['low'])/2
h1 = h1_df
# convert row name 'time' from an index to a column
h1_df.index.name = 'time'
h1_df.reset_index(inplace=True)
get_data = h1_df.loc[h1_df['symbol'] == cp]['mid']
data = h1_df
t=10 # Rolling time window, and calculate mean and standard error
rolmean = get_data.rolling(window=t).mean()
rolstd = get_data.rolling(window=t).std()
BollingerBand(cp, data, rolmean, rolstd)
if(chosenCP[0]<get_data.describe()['std']):
chosenCP[0] = get_data.describe()['std']
chosenCP[1] = cp
# Based on volatility, as measured by std, we chose our base currency pair: GBPJPY
# In[272]:
chosenCP
# In[274]:
h1_df.loc[h1_df['symbol'] == 'GBPJPY']['mid']
# In[275]:
# get number of rows, columns
data_size = h1.shape[0:2]
data_size
# In[276]:
# Split the dataset: First 70% for training, Last 30% for testing
data_size = h1.shape[0]
data_train = h1_df.loc[h1_df['symbol'] == 'GBPJPY']['mid'].iloc[0:int(data_size*0.7)]
data_test = h1_df.loc[h1_df['symbol'] == 'GBPJPY']['mid'].iloc[int(data_size*0.7):]
# In[277]:
#plotting the time series for the 'mid' feature obtained from feature engineering
h1_df.loc[h1_df['symbol'] == 'GBPJPY']['mid'].plot()
# ### Rolling Time Window (t=3)
# In[279]:
t=3 # Rolling time window, and calculate mean and standard error
rolmean = data_train.rolling(window=t).mean()
rolstd = data_train.rolling(window=t).std()
rolmean, rolstd
# In[280]:
#h1_df.loc['GBPJPY']['mid'].plot()
rolmean.plot()
rolstd.plot()
# ### Dickey-Fuller Test on stationary condition
# #### This section checks if the data is stationary and removes trend & seasonality accordingly to do analysis on the residuals/ noise.
# In[281]:
# Dickey-Fuller test on stationary condition
from statsmodels.tsa.stattools import adfuller
print ('Results of Dickey-Fuller Test:')
# Dickey-Fuller test
dftest = adfuller(data_train, autolag=None)
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key, value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
print (dfoutput)
# #### Dickey-Fuller test shows that null hypothesis (nonstationarity) cannot be rejected, so the time series may be nonstationary. If non-stationary, we cannot use AR(I)MA to fit the data, make inference or do forecasting. Thus, we need to make the time series stationary first.
#
# #### We have several ways to make time series stationary.
# In[282]:
# Remove trend from original time series: Subtract this estimated trend
data_log_moving_avg_diff = data_train - rolmean
data_log_moving_avg_diff
# In[283]:
# Drop missing values
data_log_moving_avg_diff.dropna(inplace=True)
data_log_moving_avg_diff
# In[284]:
# Evaluating Trendless Time Series for Stationary: Using Dickey-Fuller test
print ('Results of Dickey-Fuller Test:')
# Dickey-Fuller test
dftest = adfuller(data_log_moving_avg_diff, autolag=None)
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key, value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
print (dfoutput)
# #### This time, we find that Dickey-Fuller test is significant (p-value<5%). This means, after removing general trend, the time series data probably become stationary.
#
# #### However, let us decompose the time series into trend, seasonality and residual to better understand the data
# ### Decomposition of Original Time Series
# #### into Trend, Seasonality and Residual/ Noise
# In[288]:
# Decomposing the Original Time Series
from statsmodels.tsa.seasonal import seasonal_decompose
decomposition = seasonal_decompose(list(data_train), freq=15)
trend = decomposition.trend
seasonal = decomposition.seasonal
residual = decomposition.resid
# Plot the components
plt.figure(figsize=(15,10))
plt.subplot(411)
plt.plot(data_train, label='Original')
plt.legend(loc='best')
plt.subplot(412)
plt.plot(trend, label='Trend')
plt.legend(loc='best')
plt.subplot(413)
plt.plot(seasonal,label='Seasonality')
plt.legend(loc='best')
plt.subplot(414)
plt.plot(residual, label='Residuals')
plt.legend(loc='best')
plt.tight_layout()
# #### From the above, we can make a few conclusions.
# #### 1. There is a generally decreasing trend.
# #### 2. There is seasonality, on a very small scale.
# #### 3. The residuals seems to play a significant role in the currency price.
# ### Removing Seasonality
# #### Even though seasonality is observed on a very small scale, our group has decided to remove it.
# In[289]:
# Evaluating the Residuals for Stationary
data_decompose = pd.Series(residual)
data_decompose.dropna(inplace=True)
t=30 # Rolling time window, and calculate mean and standard error
rolmean = data_decompose.rolling(window=t).mean()
rolstd = data_decompose.rolling(window=t).std()
# Visualize whether mean and standard error of time series are stationary over time
plt.figure(figsize=(15,5))
orig = plt.plot(data_decompose, color='blue',label='Original Residuals')
mean = plt.plot(rolmean, color='red', label='Rolling Mean')
std = plt.plot(rolstd, color='black', label = 'Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean & Standard Deviation of Residuals')
plt.show(block=False)
# Dicky-Fuller test on stationary condition
print ('Results of Dickey-Fuller | plt.style.use('fivethirtyeight')
fig = plt.figure(figsize=(12,6))
ax = fig.add_subplot(111)
ax.set_title(cp)
ax.set_xlabel('Time')
ax.set_ylabel('Exchange Rate')
x_axis = list(data.loc[data['symbol'] == cp]['time'])
# x_axis = data.loc[data['symbol'] == cp].index.get_level_values(0)
mean = data.loc[data['symbol'] == cp]['close']
bollinger_upper = (mean + std*2)
bollinger_lower = (mean - std*2)
ax.fill_between(x_axis, bollinger_upper, bollinger_lower, facecolor='grey', alpha = 0.5)
ax.plot(x_axis, mean, color='blue', lw=2)
ax.plot(x_axis, bollinger_upper, color='green', lw=2)
ax.plot(x_axis, bollinger_lower, color='orange', lw=2) | identifier_body |
EDA.py | qb.AddForex("EURUSD")
USDJPY = qb.AddForex("USDJPY")
GBPUSD = qb.AddForex("GBPUSD")
# ### III. Exploratory Data Analysis (EDA)
# #### This section analyses and justifies the various currency pairs chosen. Our group compared the volatility of the currency pairs using Bollinger Bands.
# #### Volatility Analysis of Chosen Currency (EURJPY)
# In[270]:
## modified BollingerBand function ##
def | (cp, data, mean, std):
plt.style.use('fivethirtyeight')
fig = plt.figure(figsize=(12,6))
ax = fig.add_subplot(111)
ax.set_title(cp)
ax.set_xlabel('Time')
ax.set_ylabel('Exchange Rate')
x_axis = list(data.loc[data['symbol'] == cp]['time'])
# x_axis = data.loc[data['symbol'] == cp].index.get_level_values(0)
mean = data.loc[data['symbol'] == cp]['close']
bollinger_upper = (mean + std*2)
bollinger_lower = (mean - std*2)
ax.fill_between(x_axis, bollinger_upper, bollinger_lower, facecolor='grey', alpha = 0.5)
ax.plot(x_axis, mean, color='blue', lw=2)
ax.plot(x_axis, bollinger_upper, color='green', lw=2)
ax.plot(x_axis, bollinger_lower, color='orange', lw=2)
ax.legend()
plt.show();
bollinger_upper = list(mean + std*2)
bollinger_lower = list(mean - std*2)
# In[271]:
## modified function for plotting BollingerBand ##
'''
Based off some research done online, our group has decided to drill down further
into the following 3 currency pairs due to their relatively higher volatility.
The pair with the highest volatility was eventually chosen as the base pair acting
as the basis for our portfolio.
'''
volatileCP = ['GBPJPY', 'EURNZD', 'GBPAUD']
chosenCP = [0,0];
for cp in volatileCP:
qb.AddForex(cp)
h1 = qb.History(qb.Securities.Keys, 180, Resolution.Hour)
h1_df = pd.DataFrame(h1)
# adding the new variable 'mid' into the dataframe (justification found in report)
h1_df['mid'] = (h1_df['high']+h1_df['low'])/2
h1 = h1_df
# convert row name 'time' from an index to a column
h1_df.index.name = 'time'
h1_df.reset_index(inplace=True)
get_data = h1_df.loc[h1_df['symbol'] == cp]['mid']
data = h1_df
t=10 # Rolling time window, and calculate mean and standard error
rolmean = get_data.rolling(window=t).mean()
rolstd = get_data.rolling(window=t).std()
BollingerBand(cp, data, rolmean, rolstd)
if(chosenCP[0]<get_data.describe()['std']):
chosenCP[0] = get_data.describe()['std']
chosenCP[1] = cp
# Based on volatility, as measured by std, we chose our base currency pair: GBPJPY
# In[272]:
chosenCP
# In[274]:
h1_df.loc[h1_df['symbol'] == 'GBPJPY']['mid']
# In[275]:
# get number of rows, columns
data_size = h1.shape[0:2]
data_size
# In[276]:
# Split the dataset: First 70% for training, Last 30% for testing
data_size = h1.shape[0]
data_train = h1_df.loc[h1_df['symbol'] == 'GBPJPY']['mid'].iloc[0:int(data_size*0.7)]
data_test = h1_df.loc[h1_df['symbol'] == 'GBPJPY']['mid'].iloc[int(data_size*0.7):]
# In[277]:
#plotting the time series for the 'mid' feature obtained from feature engineering
h1_df.loc[h1_df['symbol'] == 'GBPJPY']['mid'].plot()
# ### Rolling Time Window (t=3)
# In[279]:
t=3 # Rolling time window, and calculate mean and standard error
rolmean = data_train.rolling(window=t).mean()
rolstd = data_train.rolling(window=t).std()
rolmean, rolstd
# In[280]:
#h1_df.loc['GBPJPY']['mid'].plot()
rolmean.plot()
rolstd.plot()
# ### Dickey-Fuller Test on stationary condition
# #### This section checks if the data is stationary and removes trend & seasonality accordingly to do analysis on the residuals/ noise.
# In[281]:
# Dickey-Fuller test on stationary condition
from statsmodels.tsa.stattools import adfuller
print ('Results of Dickey-Fuller Test:')
# Dickey-Fuller test
dftest = adfuller(data_train, autolag=None)
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key, value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
print (dfoutput)
# #### Dickey-Fuller test shows that null hypothesis (nonstationarity) cannot be rejected, so the time series may be nonstationary. If non-stationary, we cannot use AR(I)MA to fit the data, make inference or do forecasting. Thus, we need to make the time series stationary first.
#
# #### We have several ways to make time series stationary.
# In[282]:
# Remove trend from original time series: Subtract this estimated trend
data_log_moving_avg_diff = data_train - rolmean
data_log_moving_avg_diff
# In[283]:
# Drop missing values
data_log_moving_avg_diff.dropna(inplace=True)
data_log_moving_avg_diff
# In[284]:
# Evaluating Trendless Time Series for Stationary: Using Dickey-Fuller test
print ('Results of Dickey-Fuller Test:')
# Dickey-Fuller test
dftest = adfuller(data_log_moving_avg_diff, autolag=None)
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key, value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
print (dfoutput)
# #### This time, we find that Dickey-Fuller test is significant (p-value<5%). This means, after removing general trend, the time series data probably become stationary.
#
# #### However, let us decompose the time series into trend, seasonality and residual to better understand the data
# ### Decomposition of Original Time Series
# #### into Trend, Seasonality and Residual/ Noise
# In[288]:
# Decomposing the Original Time Series
from statsmodels.tsa.seasonal import seasonal_decompose
decomposition = seasonal_decompose(list(data_train), freq=15)
trend = decomposition.trend
seasonal = decomposition.seasonal
residual = decomposition.resid
# Plot the components
plt.figure(figsize=(15,10))
plt.subplot(411)
plt.plot(data_train, label='Original')
plt.legend(loc='best')
plt.subplot(412)
plt.plot(trend, label='Trend')
plt.legend(loc='best')
plt.subplot(413)
plt.plot(seasonal,label='Seasonality')
plt.legend(loc='best')
plt.subplot(414)
plt.plot(residual, label='Residuals')
plt.legend(loc='best')
plt.tight_layout()
# #### From the above, we can make a few conclusions.
# #### 1. There is a generally decreasing trend.
# #### 2. There is seasonality, on a very small scale.
# #### 3. The residuals seems to play a significant role in the currency price.
# ### Removing Seasonality
# #### Even though seasonality is observed on a very small scale, our group has decided to remove it.
# In[289]:
# Evaluating the Residuals for Stationary
data_decompose = pd.Series(residual)
data_decompose.dropna(inplace=True)
t=30 # Rolling time window, and calculate mean and standard error
rolmean = data_decompose.rolling(window=t).mean()
rolstd = data_decompose.rolling(window=t).std()
# Visualize whether mean and standard error of time series are stationary over time
plt.figure(figsize=(15,5))
orig = plt.plot(data_decompose, color='blue',label='Original Residuals')
mean = plt.plot(rolmean, color='red', label='Rolling Mean')
std = plt.plot(rolstd, color='black', label = 'Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean & Standard Deviation of Residuals')
plt.show(block=False)
# Dicky-Fuller test on stationary condition
print ('Results of Dickey-Full | BollingerBand | identifier_name |
inference.py | sequence': one_hots,
'sequence_length': row_lengths,
},
signature=signature,
as_dict=True).values())[0]
def in_graph_inferrer(sequences,
savedmodel_dir_path,
signature,
name_scope='inferrer'):
| """
# Add variable to make it easier to refactor with multiple tags in future.
tags = [tf.saved_model.tag_constants.SERVING]
# Tokenization
residues = tf.strings.unicode_split(sequences, 'UTF-8')
# Convert to one-hots and pad.
one_hots, row_lengths = utils.in_graph_residues_to_onehot(residues)
module_spec = hub.saved_model_module.create_module_spec_from_saved_model(
savedmodel_dir_path)
module = hub.Module(module_spec, trainable=False, tags=tags, name=name_scope)
return call_module(module, one_hots, row_lengths, signature)
@functools.lru_cache(maxsize=None)
def memoized_inferrer(
savedmodel_dir_path,
activation_type=tf.saved_model.signature_constants
.DEFAULT_SERVING_SIGNATURE_DEF_KEY,
batch_size=16,
use_tqdm=False,
session_config=None,
memoize_inference_results=False,
use_latest_savedmodel=False,
):
"""Alternative constructor for Inferrer that is memoized."""
return Inferrer(
savedmodel_dir_path=savedmodel_dir_path,
activation_type=activation_type,
batch_size=batch_size,
use_tqdm=use_tqdm,
session_config=session_config,
memoize_inference_results=memoize_inference_results,
use_latest_savedmodel=use_latest_savedmodel,
)
class Inferrer(object):
"""Uses a SavedModel to provide batched inference."""
def __init__(
self,
savedmodel_dir_path,
activation_type=tf.saved_model.signature_constants
.DEFAULT_SERVING_SIGNATURE_DEF_KEY,
batch_size=64,
use_tqdm=False,
session_config=None,
memoize_inference_results=False,
use_latest_savedmodel=False,
):
"""Construct Inferrer.
Args:
savedmodel_dir_path: path to directory where a SavedModel pb or
pbtxt is stored. The SavedModel must only have one input per signature
and only one output per signature.
activation_type: one of the keys in saved_model.signature_def.keys().
batch_size: batch size to use for individual inference ops.
use_tqdm: Whether to print progress using tqdm.
session_config: tf.ConfigProto for tf.Session creation.
memoize_inference_results: if True, calls to inference.get_activations
will be memoized.
use_latest_savedmodel: If True, the model will be loaded from
latest_savedmodel_path_from_base_path(savedmodel_dir_path).
Raises:
ValueError: if activation_type is not the name of a signature_def in the
SavedModel.
ValueError: if SavedModel.signature_def[activation_type] has an input
other than 'sequence'.
ValueError: if SavedModel.signature_def[activation_type] has more than
one output.
"""
if use_latest_savedmodel:
savedmodel_dir_path = latest_savedmodel_path_from_base_path(
savedmodel_dir_path)
self.batch_size = batch_size
self._graph = tf.Graph()
self._model_name_scope = 'inferrer'
with self._graph.as_default():
self._sequences = tf.placeholder(
shape=[None], dtype=tf.string, name='sequences')
self._fetch = in_graph_inferrer(
self._sequences,
savedmodel_dir_path,
activation_type,
name_scope=self._model_name_scope)
self._sess = tf.Session(
config=session_config if session_config else tf.ConfigProto())
self._sess.run([
tf.initializers.global_variables(),
tf.initializers.local_variables(),
tf.initializers.tables_initializer(),
])
self._savedmodel_dir_path = savedmodel_dir_path
self.activation_type = activation_type
self._use_tqdm = use_tqdm
if memoize_inference_results:
self._get_activations_for_batch = self._get_activations_for_batch_memoized
else:
self._get_activations_for_batch = self._get_activations_for_batch_unmemoized
def __repr__(self):
return ('{} with feed tensors savedmodel_dir_path {} and '
'activation_type {}').format(
type(self).__name__, self._savedmodel_dir_path,
self.activation_type)
def _get_tensor_by_name(self, name):
return self._graph.get_tensor_by_name('{}/{}'.format(
self._model_name_scope, name))
def _get_activations_for_batch_unmemoized(self,
seqs,
custom_tensor_to_retrieve=None):
"""Gets activations for each sequence in list_of_seqs.
[
[activation_1, activation_2, ...] # For list_of_seqs[0]
[activation_1, activation_2, ...] # For list_of_seqs[1]
...
]
In the case that the activations are the normalized probabilities that a
sequence belongs to a class, entry `i, j` of
`inferrer.get_activations(batch)` contains the probability that
sequence `i` is in family `j`.
Args:
seqs: tuple of strings, with characters that are amino
acids.
custom_tensor_to_retrieve: string name for a tensor to retrieve, if unset
uses default for signature.
Returns:
np.array of floats containing the value from fetch_op.
"""
if custom_tensor_to_retrieve:
fetch = self._get_tensor_by_name(custom_tensor_to_retrieve)
else:
fetch = self._fetch
with self._graph.as_default():
return self._sess.run(fetch, {self._sequences: seqs})
@functools.lru_cache(maxsize=None)
def _get_activations_for_batch_memoized(self,
seqs,
custom_tensor_to_retrieve=None):
return self._get_activations_for_batch_unmemoized(
seqs, custom_tensor_to_retrieve)
def get_activations(self, list_of_seqs, custom_tensor_to_retrieve=None):
"""Gets activations where batching may be needed to avoid OOM.
Inputs are strings of amino acids, outputs are activations from the network.
Args:
list_of_seqs: iterable of strings as input for inference.
custom_tensor_to_retrieve: string name for a tensor to retrieve, if unset
uses default for signature.
Returns:
np.array of scipy sparse coo matrices of shape [num_of_seqs_in_batch, ...]
where ... is the shape of the fetch tensor.
"""
np_seqs = np.array(list_of_seqs, dtype=np.object)
if np_seqs.size == 0:
return np.array([], dtype=float)
if len(np_seqs.shape) != 1:
raise ValueError('`list_of_seqs` should be convertible to a numpy vector '
'of strings. Got {}'.format(np_seqs))
logging.debug('Predicting for %d sequences', len(list_of_seqs))
lengths = np.array([len(seq) for seq in np_seqs])
# Sort by reverse length, so that the longest element is first.
# This is because the longest element can cause memory issues, and we'd like
# to fail-fast in this case.
sorter = np.argsort(lengths)[::-1]
# The inverse of a permutation A is the permutation B such that B(A) is the
# the identity permutation (a sorted list).
reverser = np.argsort(sorter)
activation_list = []
batches = np.array_split(np_seqs[sorter],
np.ceil(len(np_seqs) / self.batch_size))
if self._use_tqdm:
batches = tqdm.tqdm(
batches,
position=0,
desc='Annotating batches of sequences',
leave=True,
dynamic_ncols=True)
for batch in batches:
batch_activations = self._get_activations_for_batch(
tuple(batch), custom_tensor_to_retrieve=custom_tensor_to_retrieve)
batch_activations_sparse = [scipy.sparse.coo_matrix(x) for x in batch_activations]
activation_list.append(batch_activations_sparse)
activations = np.concatenate(activation_list, axis=0)[reverser]
return activations
def get_variable(self, variable_name):
"""Gets the value of a variable from the graph.
Args:
variable_name: string name for retrieval. E.g. "vocab_name:0"
Returns:
output from TensorFlow from attempt to retrieve this value.
"""
with self._graph.as_default():
return self._sess.run(self._get_tensor_by_name(variable_name))
def latest_savedmodel_path_from_base_path(base_path):
"""Get the most recent savedmodel from a base directory path."""
protein_export_base_path = os.path | """Add an in-graph inferrer to the active default graph.
Additionally performs in-graph preprocessing, splitting strings, and encoding
residues.
Args:
sequences: A tf.string Tensor representing a batch of sequences with shape
[None].
savedmodel_dir_path: Path to the directory with the SavedModel binary.
signature: Name of the signature to use in `savedmodel_dir_path`. e.g.
'pooled_representation'
name_scope: Name scope to use for the loaded saved model.
Returns:
Output Tensor
Raises:
ValueError if signature does not conform to
('sequence',
'sequence_length') -> output
or if the specified signature is not present. | identifier_body |
inference.py | sequence': one_hots,
'sequence_length': row_lengths,
},
signature=signature,
as_dict=True).values())[0]
def in_graph_inferrer(sequences,
savedmodel_dir_path,
signature,
name_scope='inferrer'):
"""Add an in-graph inferrer to the active default graph.
Additionally performs in-graph preprocessing, splitting strings, and encoding
residues.
Args:
sequences: A tf.string Tensor representing a batch of sequences with shape
[None].
savedmodel_dir_path: Path to the directory with the SavedModel binary.
signature: Name of the signature to use in `savedmodel_dir_path`. e.g.
'pooled_representation'
name_scope: Name scope to use for the loaded saved model.
Returns:
Output Tensor
Raises:
ValueError if signature does not conform to
('sequence',
'sequence_length') -> output
or if the specified signature is not present.
"""
# Add variable to make it easier to refactor with multiple tags in future.
tags = [tf.saved_model.tag_constants.SERVING]
# Tokenization
residues = tf.strings.unicode_split(sequences, 'UTF-8')
# Convert to one-hots and pad.
one_hots, row_lengths = utils.in_graph_residues_to_onehot(residues)
module_spec = hub.saved_model_module.create_module_spec_from_saved_model(
savedmodel_dir_path)
module = hub.Module(module_spec, trainable=False, tags=tags, name=name_scope)
return call_module(module, one_hots, row_lengths, signature)
@functools.lru_cache(maxsize=None)
def memoized_inferrer(
savedmodel_dir_path,
activation_type=tf.saved_model.signature_constants
.DEFAULT_SERVING_SIGNATURE_DEF_KEY,
batch_size=16,
use_tqdm=False,
session_config=None,
memoize_inference_results=False,
use_latest_savedmodel=False,
):
"""Alternative constructor for Inferrer that is memoized."""
return Inferrer(
savedmodel_dir_path=savedmodel_dir_path,
activation_type=activation_type,
batch_size=batch_size,
use_tqdm=use_tqdm,
session_config=session_config,
memoize_inference_results=memoize_inference_results,
use_latest_savedmodel=use_latest_savedmodel,
)
class Inferrer(object):
"""Uses a SavedModel to provide batched inference."""
def __init__(
self,
savedmodel_dir_path,
activation_type=tf.saved_model.signature_constants
.DEFAULT_SERVING_SIGNATURE_DEF_KEY,
batch_size=64,
use_tqdm=False,
session_config=None,
memoize_inference_results=False,
use_latest_savedmodel=False,
):
"""Construct Inferrer.
Args:
savedmodel_dir_path: path to directory where a SavedModel pb or
pbtxt is stored. The SavedModel must only have one input per signature
and only one output per signature.
activation_type: one of the keys in saved_model.signature_def.keys().
batch_size: batch size to use for individual inference ops.
use_tqdm: Whether to print progress using tqdm.
session_config: tf.ConfigProto for tf.Session creation.
memoize_inference_results: if True, calls to inference.get_activations
will be memoized.
use_latest_savedmodel: If True, the model will be loaded from
latest_savedmodel_path_from_base_path(savedmodel_dir_path).
Raises:
ValueError: if activation_type is not the name of a signature_def in the
SavedModel.
ValueError: if SavedModel.signature_def[activation_type] has an input
other than 'sequence'.
ValueError: if SavedModel.signature_def[activation_type] has more than
one output.
"""
if use_latest_savedmodel:
savedmodel_dir_path = latest_savedmodel_path_from_base_path(
savedmodel_dir_path)
self.batch_size = batch_size
self._graph = tf.Graph()
self._model_name_scope = 'inferrer'
with self._graph.as_default():
self._sequences = tf.placeholder(
shape=[None], dtype=tf.string, name='sequences')
self._fetch = in_graph_inferrer(
self._sequences,
savedmodel_dir_path,
activation_type,
name_scope=self._model_name_scope)
self._sess = tf.Session(
config=session_config if session_config else tf.ConfigProto())
self._sess.run([
tf.initializers.global_variables(),
tf.initializers.local_variables(),
tf.initializers.tables_initializer(),
])
self._savedmodel_dir_path = savedmodel_dir_path
self.activation_type = activation_type
self._use_tqdm = use_tqdm
if memoize_inference_results:
self._get_activations_for_batch = self._get_activations_for_batch_memoized
else:
self._get_activations_for_batch = self._get_activations_for_batch_unmemoized
def __repr__(self):
return ('{} with feed tensors savedmodel_dir_path {} and '
'activation_type {}').format(
type(self).__name__, self._savedmodel_dir_path,
self.activation_type)
def | (self, name):
return self._graph.get_tensor_by_name('{}/{}'.format(
self._model_name_scope, name))
def _get_activations_for_batch_unmemoized(self,
seqs,
custom_tensor_to_retrieve=None):
"""Gets activations for each sequence in list_of_seqs.
[
[activation_1, activation_2, ...] # For list_of_seqs[0]
[activation_1, activation_2, ...] # For list_of_seqs[1]
...
]
In the case that the activations are the normalized probabilities that a
sequence belongs to a class, entry `i, j` of
`inferrer.get_activations(batch)` contains the probability that
sequence `i` is in family `j`.
Args:
seqs: tuple of strings, with characters that are amino
acids.
custom_tensor_to_retrieve: string name for a tensor to retrieve, if unset
uses default for signature.
Returns:
np.array of floats containing the value from fetch_op.
"""
if custom_tensor_to_retrieve:
fetch = self._get_tensor_by_name(custom_tensor_to_retrieve)
else:
fetch = self._fetch
with self._graph.as_default():
return self._sess.run(fetch, {self._sequences: seqs})
@functools.lru_cache(maxsize=None)
def _get_activations_for_batch_memoized(self,
seqs,
custom_tensor_to_retrieve=None):
return self._get_activations_for_batch_unmemoized(
seqs, custom_tensor_to_retrieve)
def get_activations(self, list_of_seqs, custom_tensor_to_retrieve=None):
"""Gets activations where batching may be needed to avoid OOM.
Inputs are strings of amino acids, outputs are activations from the network.
Args:
list_of_seqs: iterable of strings as input for inference.
custom_tensor_to_retrieve: string name for a tensor to retrieve, if unset
uses default for signature.
Returns:
np.array of scipy sparse coo matrices of shape [num_of_seqs_in_batch, ...]
where ... is the shape of the fetch tensor.
"""
np_seqs = np.array(list_of_seqs, dtype=np.object)
if np_seqs.size == 0:
return np.array([], dtype=float)
if len(np_seqs.shape) != 1:
raise ValueError('`list_of_seqs` should be convertible to a numpy vector '
'of strings. Got {}'.format(np_seqs))
logging.debug('Predicting for %d sequences', len(list_of_seqs))
lengths = np.array([len(seq) for seq in np_seqs])
# Sort by reverse length, so that the longest element is first.
# This is because the longest element can cause memory issues, and we'd like
# to fail-fast in this case.
sorter = np.argsort(lengths)[::-1]
# The inverse of a permutation A is the permutation B such that B(A) is the
# the identity permutation (a sorted list).
reverser = np.argsort(sorter)
activation_list = []
batches = np.array_split(np_seqs[sorter],
np.ceil(len(np_seqs) / self.batch_size))
if self._use_tqdm:
batches = tqdm.tqdm(
batches,
position=0,
desc='Annotating batches of sequences',
leave=True,
dynamic_ncols=True)
for batch in batches:
batch_activations = self._get_activations_for_batch(
tuple(batch), custom_tensor_to_retrieve=custom_tensor_to_retrieve)
batch_activations_sparse = [scipy.sparse.coo_matrix(x) for x in batch_activations]
activation_list.append(batch_activations_sparse)
activations = np.concatenate(activation_list, axis=0)[reverser]
return activations
def get_variable(self, variable_name):
"""Gets the value of a variable from the graph.
Args:
variable_name: string name for retrieval. E.g. "vocab_name:0"
Returns:
output from TensorFlow from attempt to retrieve this value.
"""
with self._graph.as_default():
return self._sess.run(self._get_tensor_by_name(variable_name))
def latest_savedmodel_path_from_base_path(base_path):
"""Get the most recent savedmodel from a base directory path."""
protein_export_base_path = os.path | _get_tensor_by_name | identifier_name |
inference.py |
"""Compute activations for trained model from input sequences."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import functools
import gzip
import io
import itertools
import os
from typing import Dict, FrozenSet, Iterator, List, Text, Tuple
from absl import logging
import numpy as np
import pandas as pd
import utils
import six
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
import tqdm
import scipy.sparse
def call_module(module, one_hots, row_lengths, signature):
"""Call a tf_hub.Module using the standard blundell signature.
This expects that `module` has a signature named `signature` which conforms to
('sequence',
'sequence_length') -> output
To use an existing SavedModel
file you may want to create a module_spec with
`tensorflow_hub.saved_model_module.create_module_spec_from_saved_model`.
Args:
module: a tf_hub.Module to call.
one_hots: a rank 3 tensor with one-hot encoded sequences of residues.
row_lengths: a rank 1 tensor with sequence lengths.
signature: the graph signature to validate and call.
Returns:
The output tensor of `module`.
"""
if signature not in module.get_signature_names():
raise ValueError('signature not in ' +
six.ensure_str(str(module.get_signature_names())) +
'. Was ' + six.ensure_str(signature) + '.')
inputs = module.get_input_info_dict(signature=signature)
expected_inputs = [
'sequence',
'sequence_length',
]
if set(inputs.keys()) != set(expected_inputs):
raise ValueError(
'The signature_def does not have the expected inputs. Please '
'reconfigure your saved model to only export signatures '
'with sequence and length inputs. (Inputs were %s, expected %s)' %
(str(inputs), str(expected_inputs)))
outputs = module.get_output_info_dict(signature=signature)
if len(outputs) > 1:
raise ValueError('The signature_def given has more than one output. Please '
'reconfigure your saved model to only export signatures '
'with one output. (Outputs were %s)' % str(outputs))
return list(
module({
'sequence': one_hots,
'sequence_length': row_lengths,
},
signature=signature,
as_dict=True).values())[0]
def in_graph_inferrer(sequences,
savedmodel_dir_path,
signature,
name_scope='inferrer'):
"""Add an in-graph inferrer to the active default graph.
Additionally performs in-graph preprocessing, splitting strings, and encoding
residues.
Args:
sequences: A tf.string Tensor representing a batch of sequences with shape
[None].
savedmodel_dir_path: Path to the directory with the SavedModel binary.
signature: Name of the signature to use in `savedmodel_dir_path`. e.g.
'pooled_representation'
name_scope: Name scope to use for the loaded saved model.
Returns:
Output Tensor
Raises:
ValueError if signature does not conform to
('sequence',
'sequence_length') -> output
or if the specified signature is not present.
"""
# Add variable to make it easier to refactor with multiple tags in future.
tags = [tf.saved_model.tag_constants.SERVING]
# Tokenization
residues = tf.strings.unicode_split(sequences, 'UTF-8')
# Convert to one-hots and pad.
one_hots, row_lengths = utils.in_graph_residues_to_onehot(residues)
module_spec = hub.saved_model_module.create_module_spec_from_saved_model(
savedmodel_dir_path)
module = hub.Module(module_spec, trainable=False, tags=tags, name=name_scope)
return call_module(module, one_hots, row_lengths, signature)
@functools.lru_cache(maxsize=None)
def memoized_inferrer(
savedmodel_dir_path,
activation_type=tf.saved_model.signature_constants
.DEFAULT_SERVING_SIGNATURE_DEF_KEY,
batch_size=16,
use_tqdm=False,
session_config=None,
memoize_inference_results=False,
use_latest_savedmodel=False,
):
"""Alternative constructor for Inferrer that is memoized."""
return Inferrer(
savedmodel_dir_path=savedmodel_dir_path,
activation_type=activation_type,
batch_size=batch_size,
use_tqdm=use_tqdm,
session_config=session_config,
memoize_inference_results=memoize_inference_results,
use_latest_savedmodel=use_latest_savedmodel,
)
class Inferrer(object):
"""Uses a SavedModel to provide batched inference."""
def __init__(
self,
savedmodel_dir_path,
activation_type=tf.saved_model.signature_constants
.DEFAULT_SERVING_SIGNATURE_DEF_KEY,
batch_size=64,
use_tqdm=False,
session_config=None,
memoize_inference_results=False,
use_latest_savedmodel=False,
):
"""Construct Inferrer.
Args:
savedmodel_dir_path: path to directory where a SavedModel pb or
pbtxt is stored. The SavedModel must only have one input per signature
and only one output per signature.
activation_type: one of the keys in saved_model.signature_def.keys().
batch_size: batch size to use for individual inference ops.
use_tqdm: Whether to print progress using tqdm.
session_config: tf.ConfigProto for tf.Session creation.
memoize_inference_results: if True, calls to inference.get_activations
will be memoized.
use_latest_savedmodel: If True, the model will be loaded from
latest_savedmodel_path_from_base_path(savedmodel_dir_path).
Raises:
ValueError: if activation_type is not the name of a signature_def in the
SavedModel.
ValueError: if SavedModel.signature_def[activation_type] has an input
other than 'sequence'.
ValueError: if SavedModel.signature_def[activation_type] has more than
one output.
"""
if use_latest_savedmodel:
savedmodel_dir_path = latest_savedmodel_path_from_base_path(
savedmodel_dir_path)
self.batch_size = batch_size
self._graph = tf.Graph()
self._model_name_scope = 'inferrer'
with self._graph.as_default():
self._sequences = tf.placeholder(
shape=[None], dtype=tf.string, name='sequences')
self._fetch = in_graph_inferrer(
self._sequences,
savedmodel_dir_path,
activation_type,
name_scope=self._model_name_scope)
self._sess = tf.Session(
config=session_config if session_config else tf.ConfigProto())
self._sess.run([
tf.initializers.global_variables(),
tf.initializers.local_variables(),
tf.initializers.tables_initializer(),
])
self._savedmodel_dir_path = savedmodel_dir_path
self.activation_type = activation_type
self._use_tqdm = use_tqdm
if memoize_inference_results:
self._get_activations_for_batch = self._get_activations_for_batch_memoized
else:
self._get_activations_for_batch = self._get_activations_for_batch_unmemoized
def __repr__(self):
return ('{} with feed tensors savedmodel_dir_path {} and '
'activation_type {}').format(
type(self).__name__, self._savedmodel_dir_path,
self.activation_type)
def _get_tensor_by_name(self, name):
return self._graph.get_tensor_by_name('{}/{}'.format(
self._model_name_scope, name))
def _get_activations_for_batch_unmemoized(self,
seqs,
custom_tensor_to_retrieve=None):
"""Gets activations for each sequence in list_of_seqs.
[
[activation_1, activation_2, ...] # For list_of_seqs[0]
[activation_1, activation_2, ...] # For list_of_seqs[1]
...
]
In the case that the activations are the normalized probabilities that a
sequence belongs to a class, entry `i, j` of
`inferrer.get_activations(batch)` contains the probability that
sequence `i` is in family `j`.
Args:
seqs: tuple of strings, with characters that are amino
acids.
custom_tensor_to_retrieve: string name for a tensor to retrieve, if unset
uses default for signature.
Returns:
np.array of floats containing the value from fetch_op.
"""
if custom_tensor_to_retrieve:
fetch = self._get_tensor_by_name(custom_tensor_to_retrieve)
else:
fetch = self._fetch
with self._graph.as_default():
return self._sess.run(fetch, {self._sequences: seqs})
@functools.lru_cache(maxsize=None)
def _get_activations_for_batch_memoized(self,
seqs,
custom_tensor_to_retrieve=None):
return self._get_activations_for_batch_unmemoized(
seqs, custom_tensor_to_retrieve)
def get_activations(self, list_of_seqs, custom_tensor_to_retrieve=None):
"""Gets activations where batching may be needed to avoid OOM.
Inputs are strings of amino acids, outputs are activations | # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | random_line_split |
|
inference.py | .
Additionally performs in-graph preprocessing, splitting strings, and encoding
residues.
Args:
sequences: A tf.string Tensor representing a batch of sequences with shape
[None].
savedmodel_dir_path: Path to the directory with the SavedModel binary.
signature: Name of the signature to use in `savedmodel_dir_path`. e.g.
'pooled_representation'
name_scope: Name scope to use for the loaded saved model.
Returns:
Output Tensor
Raises:
ValueError if signature does not conform to
('sequence',
'sequence_length') -> output
or if the specified signature is not present.
"""
# Add variable to make it easier to refactor with multiple tags in future.
tags = [tf.saved_model.tag_constants.SERVING]
# Tokenization
residues = tf.strings.unicode_split(sequences, 'UTF-8')
# Convert to one-hots and pad.
one_hots, row_lengths = utils.in_graph_residues_to_onehot(residues)
module_spec = hub.saved_model_module.create_module_spec_from_saved_model(
savedmodel_dir_path)
module = hub.Module(module_spec, trainable=False, tags=tags, name=name_scope)
return call_module(module, one_hots, row_lengths, signature)
@functools.lru_cache(maxsize=None)
def memoized_inferrer(
savedmodel_dir_path,
activation_type=tf.saved_model.signature_constants
.DEFAULT_SERVING_SIGNATURE_DEF_KEY,
batch_size=16,
use_tqdm=False,
session_config=None,
memoize_inference_results=False,
use_latest_savedmodel=False,
):
"""Alternative constructor for Inferrer that is memoized."""
return Inferrer(
savedmodel_dir_path=savedmodel_dir_path,
activation_type=activation_type,
batch_size=batch_size,
use_tqdm=use_tqdm,
session_config=session_config,
memoize_inference_results=memoize_inference_results,
use_latest_savedmodel=use_latest_savedmodel,
)
class Inferrer(object):
"""Uses a SavedModel to provide batched inference."""
def __init__(
self,
savedmodel_dir_path,
activation_type=tf.saved_model.signature_constants
.DEFAULT_SERVING_SIGNATURE_DEF_KEY,
batch_size=64,
use_tqdm=False,
session_config=None,
memoize_inference_results=False,
use_latest_savedmodel=False,
):
"""Construct Inferrer.
Args:
savedmodel_dir_path: path to directory where a SavedModel pb or
pbtxt is stored. The SavedModel must only have one input per signature
and only one output per signature.
activation_type: one of the keys in saved_model.signature_def.keys().
batch_size: batch size to use for individual inference ops.
use_tqdm: Whether to print progress using tqdm.
session_config: tf.ConfigProto for tf.Session creation.
memoize_inference_results: if True, calls to inference.get_activations
will be memoized.
use_latest_savedmodel: If True, the model will be loaded from
latest_savedmodel_path_from_base_path(savedmodel_dir_path).
Raises:
ValueError: if activation_type is not the name of a signature_def in the
SavedModel.
ValueError: if SavedModel.signature_def[activation_type] has an input
other than 'sequence'.
ValueError: if SavedModel.signature_def[activation_type] has more than
one output.
"""
if use_latest_savedmodel:
savedmodel_dir_path = latest_savedmodel_path_from_base_path(
savedmodel_dir_path)
self.batch_size = batch_size
self._graph = tf.Graph()
self._model_name_scope = 'inferrer'
with self._graph.as_default():
self._sequences = tf.placeholder(
shape=[None], dtype=tf.string, name='sequences')
self._fetch = in_graph_inferrer(
self._sequences,
savedmodel_dir_path,
activation_type,
name_scope=self._model_name_scope)
self._sess = tf.Session(
config=session_config if session_config else tf.ConfigProto())
self._sess.run([
tf.initializers.global_variables(),
tf.initializers.local_variables(),
tf.initializers.tables_initializer(),
])
self._savedmodel_dir_path = savedmodel_dir_path
self.activation_type = activation_type
self._use_tqdm = use_tqdm
if memoize_inference_results:
self._get_activations_for_batch = self._get_activations_for_batch_memoized
else:
self._get_activations_for_batch = self._get_activations_for_batch_unmemoized
def __repr__(self):
return ('{} with feed tensors savedmodel_dir_path {} and '
'activation_type {}').format(
type(self).__name__, self._savedmodel_dir_path,
self.activation_type)
def _get_tensor_by_name(self, name):
return self._graph.get_tensor_by_name('{}/{}'.format(
self._model_name_scope, name))
def _get_activations_for_batch_unmemoized(self,
seqs,
custom_tensor_to_retrieve=None):
"""Gets activations for each sequence in list_of_seqs.
[
[activation_1, activation_2, ...] # For list_of_seqs[0]
[activation_1, activation_2, ...] # For list_of_seqs[1]
...
]
In the case that the activations are the normalized probabilities that a
sequence belongs to a class, entry `i, j` of
`inferrer.get_activations(batch)` contains the probability that
sequence `i` is in family `j`.
Args:
seqs: tuple of strings, with characters that are amino
acids.
custom_tensor_to_retrieve: string name for a tensor to retrieve, if unset
uses default for signature.
Returns:
np.array of floats containing the value from fetch_op.
"""
if custom_tensor_to_retrieve:
fetch = self._get_tensor_by_name(custom_tensor_to_retrieve)
else:
fetch = self._fetch
with self._graph.as_default():
return self._sess.run(fetch, {self._sequences: seqs})
@functools.lru_cache(maxsize=None)
def _get_activations_for_batch_memoized(self,
seqs,
custom_tensor_to_retrieve=None):
return self._get_activations_for_batch_unmemoized(
seqs, custom_tensor_to_retrieve)
def get_activations(self, list_of_seqs, custom_tensor_to_retrieve=None):
"""Gets activations where batching may be needed to avoid OOM.
Inputs are strings of amino acids, outputs are activations from the network.
Args:
list_of_seqs: iterable of strings as input for inference.
custom_tensor_to_retrieve: string name for a tensor to retrieve, if unset
uses default for signature.
Returns:
np.array of scipy sparse coo matrices of shape [num_of_seqs_in_batch, ...]
where ... is the shape of the fetch tensor.
"""
np_seqs = np.array(list_of_seqs, dtype=np.object)
if np_seqs.size == 0:
return np.array([], dtype=float)
if len(np_seqs.shape) != 1:
raise ValueError('`list_of_seqs` should be convertible to a numpy vector '
'of strings. Got {}'.format(np_seqs))
logging.debug('Predicting for %d sequences', len(list_of_seqs))
lengths = np.array([len(seq) for seq in np_seqs])
# Sort by reverse length, so that the longest element is first.
# This is because the longest element can cause memory issues, and we'd like
# to fail-fast in this case.
sorter = np.argsort(lengths)[::-1]
# The inverse of a permutation A is the permutation B such that B(A) is the
# the identity permutation (a sorted list).
reverser = np.argsort(sorter)
activation_list = []
batches = np.array_split(np_seqs[sorter],
np.ceil(len(np_seqs) / self.batch_size))
if self._use_tqdm:
batches = tqdm.tqdm(
batches,
position=0,
desc='Annotating batches of sequences',
leave=True,
dynamic_ncols=True)
for batch in batches:
batch_activations = self._get_activations_for_batch(
tuple(batch), custom_tensor_to_retrieve=custom_tensor_to_retrieve)
batch_activations_sparse = [scipy.sparse.coo_matrix(x) for x in batch_activations]
activation_list.append(batch_activations_sparse)
activations = np.concatenate(activation_list, axis=0)[reverser]
return activations
def get_variable(self, variable_name):
"""Gets the value of a variable from the graph.
Args:
variable_name: string name for retrieval. E.g. "vocab_name:0"
Returns:
output from TensorFlow from attempt to retrieve this value.
"""
with self._graph.as_default():
return self._sess.run(self._get_tensor_by_name(variable_name))
def latest_savedmodel_path_from_base_path(base_path):
"""Get the most recent savedmodel from a base directory path."""
protein_export_base_path = os.path.join(base_path, 'export/protein_exporter')
suffixes = [
x for x in tf.io.gfile.listdir(protein_export_base_path)
if 'temp-' not in x
]
if not suffixes:
| raise ValueError('No SavedModels found in %s' % protein_export_base_path) | conditional_block |
|
session.go | .
type Session struct {
*zoo.Zoo
id int
context context.Context
log *log.Entry
started time.Time
manager SessionManager
session netproto.Session
pumpErrors chan error
inactivityTimer *time.Timer
inactivityTimeout time.Duration
localIdentity *identity.ParsedIdentity
caCert *x509.Certificate
tlsConfig *tls.Config
inter *network.NetworkInterface
closedCallbacks []func(s *Session, err error)
childContext context.Context
childContextCancel context.CancelFunc
streamHandlersMtx sync.Mutex
streamHandlers map[uint32]StreamHandler
streamHandlerBuilders StreamHandlerBuilders
}
// SessionReadyDetails contains information about the session becoming ready.
type SessionReadyDetails struct {
// Session is the session that became ready.
Session *Session
// InitiatedTimestamp is when this session was initiated.
InitiatedTimestamp time.Time
// PeerIdentity is the parsed peer identity.
PeerIdentity *identity.ParsedIdentity
}
// SessionManager manages a session.
type SessionManager interface {
// OnSessionReady is called when the session is finished initializing.
// Returning an error will terminate the session with the error.
OnSessionReady(details *SessionReadyDetails) error
// OnSessionClosed is called when a session is closed.
OnSessionClosed(sess *Session, err error)
}
// SessionConfig contains arguments to build a session.
type SessionConfig struct {
// Manager is the session manager.
Manager SessionManager
// Context, when cancelled will close the session.
Context context.Context
// Session to wrap.
Session netproto.Session
// Stream handler builders
HandlerBuilders StreamHandlerBuilders
// Identity of the local node
LocalIdentity *identity.ParsedIdentity
// CaCertificate is the CA cert.
CaCertificate *x509.Certificate
// TLSConfig is the local TLS config.
TLSConfig *tls.Config
}
// NewSession builds a new session.
func NewSession(config SessionConfig) (*Session, error) {
s := &Session{
Zoo: zoo.NewZoo(),
id: sessionIdCtr,
context: config.Context,
session: config.Session,
manager: config.Manager,
localIdentity: config.LocalIdentity,
caCert: config.CaCertificate,
tlsConfig: config.TLSConfig,
streamHandlerBuilders: config.HandlerBuilders,
started: time.Now(),
inactivityTimer: time.NewTimer(handshakeTimeout),
inactivityTimeout: handshakeTimeout,
pumpErrors: make(chan error, 2),
streamHandlers: make(map[uint32]StreamHandler),
log: log.WithField("session", sessionIdCtr),
}
sessionIdCtr++
if config.LocalIdentity == nil || config.LocalIdentity.GetPrivateKey() == nil {
return nil, errors.New("local identity must be set with a private key")
}
localCertChain, err := config.LocalIdentity.ParseCertificates()
if err != nil {
return nil, err
}
if config.CaCertificate == nil {
return nil, errors.New("ca certificate must be given")
}
if err := localCertChain.Validate(config.CaCertificate); err != nil {
return nil, err
}
s.childContext, s.childContextCancel = context.WithCancel(config.Context)
s.StartPump(s.acceptStreamPump)
go s.manageCloseConditions()
return s, nil
}
// IsInitiator returns if the session was initiated by the local host.
func (s *Session) IsInitiator() bool {
return s.session.Initiator()
}
// GetId gets the incremented ID of this session
func (s *Session) GetId() int {
return s.id
}
// GetStartTime returns the time the session started.
func (s *Session) GetStartTime() time.Time {
return s.started
}
// GetContext returns the context for this session.
func (s *Session) GetContext() context.Context {
return s.childContext
}
// SetStartTime overrides the built-in start time.
func (s *Session) SetStartTime(t time.Time) {
s.started = t
}
// CloseWithErr forces the session to close early.
func (s *Session) CloseWithErr(err error) {
select {
case s.pumpErrors <- err:
default:
}
}
// GetManager returns the SessionManager for this session
func (s *Session) GetManager() SessionManager {
return s.manager
}
// ResetInactivityTimeout resets the timeout.
// If zero is passed, maintains last duration.
func (s *Session) ResetInactivityTimeout(dur time.Duration) {
if dur == 0 {
dur = s.inactivityTimeout
} else {
s.inactivityTimeout = dur
}
s.inactivityTimer.Reset(dur)
}
// OpenStream attempts to open a stream with a handler.
func (s *Session) OpenStream(streamType StreamType) (handler StreamHandler, err error) {
handlerBuilder, ok := s.streamHandlerBuilders[streamType]
if !ok |
l := log.WithField("streamType", streamType)
stream, err := s.session.OpenStream()
if err != nil {
return nil, err
}
streamId := stream.ID()
l = l.WithField("stream", streamId)
l.Debug("Stream opened (by us)")
rw := packet.NewPacketReadWriter(stream)
err = rw.WriteProtoPacket(&StreamInit{StreamType: uint32(streamType)})
if err != nil {
return nil, err
}
shConfig := s.buildBaseStreamHandlerConfig(true)
shConfig.Log = s.log.WithField("stream", uint32(streamId))
shConfig.Session = s
shConfig.NetSession = s.session
shConfig.PacketRw = rw
shConfig.Stream = stream
handler, err = handlerBuilder.BuildHandler(s.context, shConfig)
if err != nil {
return nil, err
}
go s.runStreamHandler(handler, stream)
l.Debug("Stream initialized")
return handler, nil
}
func (s *Session) buildBaseStreamHandlerConfig(initiator bool) *StreamHandlerConfig {
return &StreamHandlerConfig{
Initiator: initiator,
Session: s,
NetSession: s.session,
LocalIdentity: s.localIdentity,
CaCert: s.caCert,
TLSConfig: s.tlsConfig,
}
}
// handleIncomingStream handles an incoming stream.
func (s *Session) handleIncomingStream(stream netproto.Stream) error {
l := s.log.WithField("stream", stream.ID())
l.Debug("Stream opened")
rw := packet.NewPacketReadWriter(stream)
si := &StreamInit{}
_, _, err := rw.ReadPacket(func(packetType packet.PacketType) (packet.ProtoPacket, error) {
if packetType != 1 {
return nil, fmt.Errorf("Expected packet type 1, got %d", packetType)
}
return si, nil
})
if err != nil {
return err
}
handlerBuilder, ok := s.streamHandlerBuilders[StreamType(si.StreamType)]
if !ok {
return fmt.Errorf("Unknown stream type: %d", si.StreamType)
}
shConfig := s.buildBaseStreamHandlerConfig(false)
shConfig.Log = l
shConfig.Session = s
shConfig.PacketRw = rw
shConfig.Stream = stream
handler, err := handlerBuilder.BuildHandler(s.context, shConfig)
if err != nil {
return err
}
l.WithField("streamType", si.StreamType).Debug("Stream initialized")
go s.runStreamHandler(handler, stream)
return nil
}
// runStreamHandler manages a stream handler.
func (s *Session) runStreamHandler(handler StreamHandler, stream netproto.Stream) {
id := stream.ID()
s.streamHandlersMtx.Lock()
s.streamHandlers[uint32(id)] = handler
s.streamHandlersMtx.Unlock()
ctx, ctxCancel := context.WithCancel(s.childContext)
defer ctxCancel()
err := handler.Handle(ctx)
l := s.log.WithField("stream", uint32(id))
select {
case <-s.childContext.Done():
return // Don't print or bother removing the stream handler when we're done with the session.
default:
}
if err != nil && err != io.EOF && err != context.Canceled {
l.WithError(err).Warn("Stream closed with error")
} else {
l.Debug("Stream closed")
}
s.streamHandlersMtx.Lock()
delete(s.streamHandlers, uint32(id))
s.streamHandlersMtx.Unlock()
stream.Close()
}
// StartPump starts a goroutine that will end the session if returned.
func (s *Session) StartPump(pump func() error) {
go func() {
select {
case s.pumpErrors <- pump():
default:
}
}()
}
// GetInterface attempts to determine the interface this session is running on.
func (s *Session) GetInterface() *network.NetworkInterface {
if s.inter != nil {
return s.inter
}
remAddr := s.session.RemoteAddr()
uadr, ok := remAddr.(*net.UDPAddr)
if !ok {
return nil
}
inter, _ := network.FromAddr(uadr.IP)
s.inter = inter
return inter
}
// GetLocalAddr returns the local address.
func (s *Session) GetLocalAddr() net.Addr {
return s.session.LocalAddr()
}
// GetRemoteAddr returns the remote address.
func (s *Session) GetRemoteAddr() net.Addr | {
return nil, fmt.Errorf("Unknown stream type: %d", streamType)
} | conditional_block |
session.go | peer.
type Session struct {
*zoo.Zoo
id int
context context.Context
log *log.Entry
started time.Time
manager SessionManager
session netproto.Session
pumpErrors chan error
inactivityTimer *time.Timer
inactivityTimeout time.Duration
localIdentity *identity.ParsedIdentity
caCert *x509.Certificate
tlsConfig *tls.Config
inter *network.NetworkInterface
closedCallbacks []func(s *Session, err error)
childContext context.Context
childContextCancel context.CancelFunc
streamHandlersMtx sync.Mutex
streamHandlers map[uint32]StreamHandler
streamHandlerBuilders StreamHandlerBuilders
}
// SessionReadyDetails contains information about the session becoming ready.
type SessionReadyDetails struct {
// Session is the session that became ready.
Session *Session
// InitiatedTimestamp is when this session was initiated.
InitiatedTimestamp time.Time
// PeerIdentity is the parsed peer identity.
PeerIdentity *identity.ParsedIdentity
}
// SessionManager manages a session.
type SessionManager interface {
// OnSessionReady is called when the session is finished initializing.
// Returning an error will terminate the session with the error.
OnSessionReady(details *SessionReadyDetails) error
// OnSessionClosed is called when a session is closed.
OnSessionClosed(sess *Session, err error)
}
// SessionConfig contains arguments to build a session.
type SessionConfig struct {
// Manager is the session manager.
Manager SessionManager
// Context, when cancelled will close the session.
Context context.Context
// Session to wrap.
Session netproto.Session
// Stream handler builders
HandlerBuilders StreamHandlerBuilders
// Identity of the local node
LocalIdentity *identity.ParsedIdentity
// CaCertificate is the CA cert.
CaCertificate *x509.Certificate
// TLSConfig is the local TLS config.
TLSConfig *tls.Config
}
// NewSession builds a new session.
func NewSession(config SessionConfig) (*Session, error) {
s := &Session{
Zoo: zoo.NewZoo(),
id: sessionIdCtr,
context: config.Context,
session: config.Session,
manager: config.Manager,
localIdentity: config.LocalIdentity,
caCert: config.CaCertificate,
tlsConfig: config.TLSConfig,
streamHandlerBuilders: config.HandlerBuilders,
started: time.Now(),
inactivityTimer: time.NewTimer(handshakeTimeout),
inactivityTimeout: handshakeTimeout,
pumpErrors: make(chan error, 2),
streamHandlers: make(map[uint32]StreamHandler),
log: log.WithField("session", sessionIdCtr),
}
sessionIdCtr++
if config.LocalIdentity == nil || config.LocalIdentity.GetPrivateKey() == nil {
return nil, errors.New("local identity must be set with a private key")
}
localCertChain, err := config.LocalIdentity.ParseCertificates()
if err != nil {
return nil, err
}
if config.CaCertificate == nil {
return nil, errors.New("ca certificate must be given")
}
if err := localCertChain.Validate(config.CaCertificate); err != nil {
return nil, err
}
s.childContext, s.childContextCancel = context.WithCancel(config.Context)
s.StartPump(s.acceptStreamPump)
go s.manageCloseConditions()
return s, nil
}
// IsInitiator returns if the session was initiated by the local host.
func (s *Session) IsInitiator() bool {
return s.session.Initiator()
}
// GetId gets the incremented ID of this session
func (s *Session) GetId() int {
return s.id
}
// GetStartTime returns the time the session started.
func (s *Session) GetStartTime() time.Time {
return s.started
}
// GetContext returns the context for this session.
func (s *Session) GetContext() context.Context {
return s.childContext
}
// SetStartTime overrides the built-in start time.
func (s *Session) SetStartTime(t time.Time) {
s.started = t
}
// CloseWithErr forces the session to close early.
func (s *Session) CloseWithErr(err error) {
select {
case s.pumpErrors <- err:
default:
}
}
// GetManager returns the SessionManager for this session
func (s *Session) GetManager() SessionManager {
return s.manager
}
// ResetInactivityTimeout resets the timeout.
// If zero is passed, maintains last duration.
func (s *Session) ResetInactivityTimeout(dur time.Duration) {
if dur == 0 {
dur = s.inactivityTimeout
} else {
s.inactivityTimeout = dur
}
s.inactivityTimer.Reset(dur)
}
// OpenStream attempts to open a stream with a handler.
func (s *Session) OpenStream(streamType StreamType) (handler StreamHandler, err error) {
handlerBuilder, ok := s.streamHandlerBuilders[streamType]
if !ok {
return nil, fmt.Errorf("Unknown stream type: %d", streamType)
}
l := log.WithField("streamType", streamType)
stream, err := s.session.OpenStream()
if err != nil {
return nil, err
}
streamId := stream.ID()
l = l.WithField("stream", streamId)
l.Debug("Stream opened (by us)")
rw := packet.NewPacketReadWriter(stream)
err = rw.WriteProtoPacket(&StreamInit{StreamType: uint32(streamType)})
if err != nil {
return nil, err
}
shConfig := s.buildBaseStreamHandlerConfig(true)
shConfig.Log = s.log.WithField("stream", uint32(streamId))
shConfig.Session = s
shConfig.NetSession = s.session
shConfig.PacketRw = rw
shConfig.Stream = stream
handler, err = handlerBuilder.BuildHandler(s.context, shConfig)
if err != nil {
return nil, err
}
go s.runStreamHandler(handler, stream)
l.Debug("Stream initialized")
return handler, nil
}
func (s *Session) buildBaseStreamHandlerConfig(initiator bool) *StreamHandlerConfig {
return &StreamHandlerConfig{
Initiator: initiator,
Session: s,
NetSession: s.session,
LocalIdentity: s.localIdentity,
CaCert: s.caCert,
TLSConfig: s.tlsConfig,
}
}
// handleIncomingStream handles an incoming stream.
func (s *Session) handleIncomingStream(stream netproto.Stream) error {
l := s.log.WithField("stream", stream.ID())
l.Debug("Stream opened")
rw := packet.NewPacketReadWriter(stream)
si := &StreamInit{}
_, _, err := rw.ReadPacket(func(packetType packet.PacketType) (packet.ProtoPacket, error) {
if packetType != 1 {
return nil, fmt.Errorf("Expected packet type 1, got %d", packetType)
}
return si, nil
})
if err != nil {
return err
}
handlerBuilder, ok := s.streamHandlerBuilders[StreamType(si.StreamType)]
if !ok {
return fmt.Errorf("Unknown stream type: %d", si.StreamType)
}
shConfig := s.buildBaseStreamHandlerConfig(false)
shConfig.Log = l
shConfig.Session = s
shConfig.PacketRw = rw
shConfig.Stream = stream
handler, err := handlerBuilder.BuildHandler(s.context, shConfig)
if err != nil {
return err
}
l.WithField("streamType", si.StreamType).Debug("Stream initialized")
go s.runStreamHandler(handler, stream)
return nil
}
// runStreamHandler manages a stream handler.
func (s *Session) runStreamHandler(handler StreamHandler, stream netproto.Stream) {
id := stream.ID()
s.streamHandlersMtx.Lock()
s.streamHandlers[uint32(id)] = handler
s.streamHandlersMtx.Unlock()
ctx, ctxCancel := context.WithCancel(s.childContext)
defer ctxCancel()
err := handler.Handle(ctx)
l := s.log.WithField("stream", uint32(id))
select {
case <-s.childContext.Done():
return // Don't print or bother removing the stream handler when we're done with the session.
default:
}
if err != nil && err != io.EOF && err != context.Canceled {
l.WithError(err).Warn("Stream closed with error")
} else {
l.Debug("Stream closed")
}
s.streamHandlersMtx.Lock()
delete(s.streamHandlers, uint32(id))
s.streamHandlersMtx.Unlock()
stream.Close()
}
// StartPump starts a goroutine that will end the session if returned.
func (s *Session) StartPump(pump func() error) {
go func() {
select {
case s.pumpErrors <- pump():
default:
}
}()
}
// GetInterface attempts to determine the interface this session is running on.
func (s *Session) GetInterface() *network.NetworkInterface {
if s.inter != nil {
return s.inter
}
remAddr := s.session.RemoteAddr()
uadr, ok := remAddr.(*net.UDPAddr)
if !ok {
return nil
}
inter, _ := network.FromAddr(uadr.IP)
s.inter = inter
return inter | // GetLocalAddr returns the local address.
func (s *Session) GetLocalAddr() net.Addr {
return s.session.LocalAddr()
}
// GetRemoteAddr returns the remote address.
func (s *Session) GetRemoteAddr() net.Addr {
return | }
| random_line_split |
session.go | .
type Session struct {
*zoo.Zoo
id int
context context.Context
log *log.Entry
started time.Time
manager SessionManager
session netproto.Session
pumpErrors chan error
inactivityTimer *time.Timer
inactivityTimeout time.Duration
localIdentity *identity.ParsedIdentity
caCert *x509.Certificate
tlsConfig *tls.Config
inter *network.NetworkInterface
closedCallbacks []func(s *Session, err error)
childContext context.Context
childContextCancel context.CancelFunc
streamHandlersMtx sync.Mutex
streamHandlers map[uint32]StreamHandler
streamHandlerBuilders StreamHandlerBuilders
}
// SessionReadyDetails contains information about the session becoming ready.
type SessionReadyDetails struct {
// Session is the session that became ready.
Session *Session
// InitiatedTimestamp is when this session was initiated.
InitiatedTimestamp time.Time
// PeerIdentity is the parsed peer identity.
PeerIdentity *identity.ParsedIdentity
}
// SessionManager manages a session.
type SessionManager interface {
// OnSessionReady is called when the session is finished initializing.
// Returning an error will terminate the session with the error.
OnSessionReady(details *SessionReadyDetails) error
// OnSessionClosed is called when a session is closed.
OnSessionClosed(sess *Session, err error)
}
// SessionConfig contains arguments to build a session.
type SessionConfig struct {
// Manager is the session manager.
Manager SessionManager
// Context, when cancelled will close the session.
Context context.Context
// Session to wrap.
Session netproto.Session
// Stream handler builders
HandlerBuilders StreamHandlerBuilders
// Identity of the local node
LocalIdentity *identity.ParsedIdentity
// CaCertificate is the CA cert.
CaCertificate *x509.Certificate
// TLSConfig is the local TLS config.
TLSConfig *tls.Config
}
// NewSession builds a new session.
func NewSession(config SessionConfig) (*Session, error) {
s := &Session{
Zoo: zoo.NewZoo(),
id: sessionIdCtr,
context: config.Context,
session: config.Session,
manager: config.Manager,
localIdentity: config.LocalIdentity,
caCert: config.CaCertificate,
tlsConfig: config.TLSConfig,
streamHandlerBuilders: config.HandlerBuilders,
started: time.Now(),
inactivityTimer: time.NewTimer(handshakeTimeout),
inactivityTimeout: handshakeTimeout,
pumpErrors: make(chan error, 2),
streamHandlers: make(map[uint32]StreamHandler),
log: log.WithField("session", sessionIdCtr),
}
sessionIdCtr++
if config.LocalIdentity == nil || config.LocalIdentity.GetPrivateKey() == nil {
return nil, errors.New("local identity must be set with a private key")
}
localCertChain, err := config.LocalIdentity.ParseCertificates()
if err != nil {
return nil, err
}
if config.CaCertificate == nil {
return nil, errors.New("ca certificate must be given")
}
if err := localCertChain.Validate(config.CaCertificate); err != nil {
return nil, err
}
s.childContext, s.childContextCancel = context.WithCancel(config.Context)
s.StartPump(s.acceptStreamPump)
go s.manageCloseConditions()
return s, nil
}
// IsInitiator returns if the session was initiated by the local host.
func (s *Session) IsInitiator() bool {
return s.session.Initiator()
}
// GetId gets the incremented ID of this session
func (s *Session) GetId() int {
return s.id
}
// GetStartTime returns the time the session started.
func (s *Session) GetStartTime() time.Time {
return s.started
}
// GetContext returns the context for this session.
func (s *Session) GetContext() context.Context {
return s.childContext
}
// SetStartTime overrides the built-in start time.
func (s *Session) SetStartTime(t time.Time) {
s.started = t
}
// CloseWithErr forces the session to close early.
func (s *Session) CloseWithErr(err error) {
select {
case s.pumpErrors <- err:
default:
}
}
// GetManager returns the SessionManager for this session
func (s *Session) GetManager() SessionManager {
return s.manager
}
// ResetInactivityTimeout resets the timeout.
// If zero is passed, maintains last duration.
func (s *Session) ResetInactivityTimeout(dur time.Duration) {
if dur == 0 {
dur = s.inactivityTimeout
} else {
s.inactivityTimeout = dur
}
s.inactivityTimer.Reset(dur)
}
// OpenStream attempts to open a stream with a handler.
func (s *Session) OpenStream(streamType StreamType) (handler StreamHandler, err error) {
handlerBuilder, ok := s.streamHandlerBuilders[streamType]
if !ok {
return nil, fmt.Errorf("Unknown stream type: %d", streamType)
}
l := log.WithField("streamType", streamType)
stream, err := s.session.OpenStream()
if err != nil {
return nil, err
}
streamId := stream.ID()
l = l.WithField("stream", streamId)
l.Debug("Stream opened (by us)")
rw := packet.NewPacketReadWriter(stream)
err = rw.WriteProtoPacket(&StreamInit{StreamType: uint32(streamType)})
if err != nil {
return nil, err
}
shConfig := s.buildBaseStreamHandlerConfig(true)
shConfig.Log = s.log.WithField("stream", uint32(streamId))
shConfig.Session = s
shConfig.NetSession = s.session
shConfig.PacketRw = rw
shConfig.Stream = stream
handler, err = handlerBuilder.BuildHandler(s.context, shConfig)
if err != nil {
return nil, err
}
go s.runStreamHandler(handler, stream)
l.Debug("Stream initialized")
return handler, nil
}
func (s *Session) buildBaseStreamHandlerConfig(initiator bool) *StreamHandlerConfig {
return &StreamHandlerConfig{
Initiator: initiator,
Session: s,
NetSession: s.session,
LocalIdentity: s.localIdentity,
CaCert: s.caCert,
TLSConfig: s.tlsConfig,
}
}
// handleIncomingStream handles an incoming stream.
func (s *Session) handleIncomingStream(stream netproto.Stream) error {
l := s.log.WithField("stream", stream.ID())
l.Debug("Stream opened")
rw := packet.NewPacketReadWriter(stream)
si := &StreamInit{}
_, _, err := rw.ReadPacket(func(packetType packet.PacketType) (packet.ProtoPacket, error) {
if packetType != 1 {
return nil, fmt.Errorf("Expected packet type 1, got %d", packetType)
}
return si, nil
})
if err != nil {
return err
}
handlerBuilder, ok := s.streamHandlerBuilders[StreamType(si.StreamType)]
if !ok {
return fmt.Errorf("Unknown stream type: %d", si.StreamType)
}
shConfig := s.buildBaseStreamHandlerConfig(false)
shConfig.Log = l
shConfig.Session = s
shConfig.PacketRw = rw
shConfig.Stream = stream
handler, err := handlerBuilder.BuildHandler(s.context, shConfig)
if err != nil {
return err
}
l.WithField("streamType", si.StreamType).Debug("Stream initialized")
go s.runStreamHandler(handler, stream)
return nil
}
// runStreamHandler manages a stream handler.
func (s *Session) runStreamHandler(handler StreamHandler, stream netproto.Stream) {
id := stream.ID()
s.streamHandlersMtx.Lock()
s.streamHandlers[uint32(id)] = handler
s.streamHandlersMtx.Unlock()
ctx, ctxCancel := context.WithCancel(s.childContext)
defer ctxCancel()
err := handler.Handle(ctx)
l := s.log.WithField("stream", uint32(id))
select {
case <-s.childContext.Done():
return // Don't print or bother removing the stream handler when we're done with the session.
default:
}
if err != nil && err != io.EOF && err != context.Canceled {
l.WithError(err).Warn("Stream closed with error")
} else {
l.Debug("Stream closed")
}
s.streamHandlersMtx.Lock()
delete(s.streamHandlers, uint32(id))
s.streamHandlersMtx.Unlock()
stream.Close()
}
// StartPump starts a goroutine that will end the session if returned.
func (s *Session) StartPump(pump func() error) {
go func() {
select {
case s.pumpErrors <- pump():
default:
}
}()
}
// GetInterface attempts to determine the interface this session is running on.
func (s *Session) GetInterface() *network.NetworkInterface |
// GetLocalAddr returns the local address.
func (s *Session) GetLocalAddr() net.Addr {
return s.session.LocalAddr()
}
// GetRemoteAddr returns the remote address.
func (s *Session) GetRemoteAddr() net.Addr | {
if s.inter != nil {
return s.inter
}
remAddr := s.session.RemoteAddr()
uadr, ok := remAddr.(*net.UDPAddr)
if !ok {
return nil
}
inter, _ := network.FromAddr(uadr.IP)
s.inter = inter
return inter
} | identifier_body |
session.go | .
type Session struct {
*zoo.Zoo
id int
context context.Context
log *log.Entry
started time.Time
manager SessionManager
session netproto.Session
pumpErrors chan error
inactivityTimer *time.Timer
inactivityTimeout time.Duration
localIdentity *identity.ParsedIdentity
caCert *x509.Certificate
tlsConfig *tls.Config
inter *network.NetworkInterface
closedCallbacks []func(s *Session, err error)
childContext context.Context
childContextCancel context.CancelFunc
streamHandlersMtx sync.Mutex
streamHandlers map[uint32]StreamHandler
streamHandlerBuilders StreamHandlerBuilders
}
// SessionReadyDetails contains information about the session becoming ready.
type SessionReadyDetails struct {
// Session is the session that became ready.
Session *Session
// InitiatedTimestamp is when this session was initiated.
InitiatedTimestamp time.Time
// PeerIdentity is the parsed peer identity.
PeerIdentity *identity.ParsedIdentity
}
// SessionManager manages a session.
type SessionManager interface {
// OnSessionReady is called when the session is finished initializing.
// Returning an error will terminate the session with the error.
OnSessionReady(details *SessionReadyDetails) error
// OnSessionClosed is called when a session is closed.
OnSessionClosed(sess *Session, err error)
}
// SessionConfig contains arguments to build a session.
type SessionConfig struct {
// Manager is the session manager.
Manager SessionManager
// Context, when cancelled will close the session.
Context context.Context
// Session to wrap.
Session netproto.Session
// Stream handler builders
HandlerBuilders StreamHandlerBuilders
// Identity of the local node
LocalIdentity *identity.ParsedIdentity
// CaCertificate is the CA cert.
CaCertificate *x509.Certificate
// TLSConfig is the local TLS config.
TLSConfig *tls.Config
}
// NewSession builds a new session.
func NewSession(config SessionConfig) (*Session, error) {
s := &Session{
Zoo: zoo.NewZoo(),
id: sessionIdCtr,
context: config.Context,
session: config.Session,
manager: config.Manager,
localIdentity: config.LocalIdentity,
caCert: config.CaCertificate,
tlsConfig: config.TLSConfig,
streamHandlerBuilders: config.HandlerBuilders,
started: time.Now(),
inactivityTimer: time.NewTimer(handshakeTimeout),
inactivityTimeout: handshakeTimeout,
pumpErrors: make(chan error, 2),
streamHandlers: make(map[uint32]StreamHandler),
log: log.WithField("session", sessionIdCtr),
}
sessionIdCtr++
if config.LocalIdentity == nil || config.LocalIdentity.GetPrivateKey() == nil {
return nil, errors.New("local identity must be set with a private key")
}
localCertChain, err := config.LocalIdentity.ParseCertificates()
if err != nil {
return nil, err
}
if config.CaCertificate == nil {
return nil, errors.New("ca certificate must be given")
}
if err := localCertChain.Validate(config.CaCertificate); err != nil {
return nil, err
}
s.childContext, s.childContextCancel = context.WithCancel(config.Context)
s.StartPump(s.acceptStreamPump)
go s.manageCloseConditions()
return s, nil
}
// IsInitiator returns if the session was initiated by the local host.
func (s *Session) IsInitiator() bool {
return s.session.Initiator()
}
// GetId gets the incremented ID of this session
func (s *Session) GetId() int {
return s.id
}
// GetStartTime returns the time the session started.
func (s *Session) GetStartTime() time.Time {
return s.started
}
// GetContext returns the context for this session.
func (s *Session) GetContext() context.Context {
return s.childContext
}
// SetStartTime overrides the built-in start time.
func (s *Session) SetStartTime(t time.Time) {
s.started = t
}
// CloseWithErr forces the session to close early.
func (s *Session) CloseWithErr(err error) {
select {
case s.pumpErrors <- err:
default:
}
}
// GetManager returns the SessionManager for this session
func (s *Session) GetManager() SessionManager {
return s.manager
}
// ResetInactivityTimeout resets the timeout.
// If zero is passed, maintains last duration.
func (s *Session) ResetInactivityTimeout(dur time.Duration) {
if dur == 0 {
dur = s.inactivityTimeout
} else {
s.inactivityTimeout = dur
}
s.inactivityTimer.Reset(dur)
}
// OpenStream attempts to open a stream with a handler.
func (s *Session) OpenStream(streamType StreamType) (handler StreamHandler, err error) {
handlerBuilder, ok := s.streamHandlerBuilders[streamType]
if !ok {
return nil, fmt.Errorf("Unknown stream type: %d", streamType)
}
l := log.WithField("streamType", streamType)
stream, err := s.session.OpenStream()
if err != nil {
return nil, err
}
streamId := stream.ID()
l = l.WithField("stream", streamId)
l.Debug("Stream opened (by us)")
rw := packet.NewPacketReadWriter(stream)
err = rw.WriteProtoPacket(&StreamInit{StreamType: uint32(streamType)})
if err != nil {
return nil, err
}
shConfig := s.buildBaseStreamHandlerConfig(true)
shConfig.Log = s.log.WithField("stream", uint32(streamId))
shConfig.Session = s
shConfig.NetSession = s.session
shConfig.PacketRw = rw
shConfig.Stream = stream
handler, err = handlerBuilder.BuildHandler(s.context, shConfig)
if err != nil {
return nil, err
}
go s.runStreamHandler(handler, stream)
l.Debug("Stream initialized")
return handler, nil
}
func (s *Session) buildBaseStreamHandlerConfig(initiator bool) *StreamHandlerConfig {
return &StreamHandlerConfig{
Initiator: initiator,
Session: s,
NetSession: s.session,
LocalIdentity: s.localIdentity,
CaCert: s.caCert,
TLSConfig: s.tlsConfig,
}
}
// handleIncomingStream handles an incoming stream.
func (s *Session) handleIncomingStream(stream netproto.Stream) error {
l := s.log.WithField("stream", stream.ID())
l.Debug("Stream opened")
rw := packet.NewPacketReadWriter(stream)
si := &StreamInit{}
_, _, err := rw.ReadPacket(func(packetType packet.PacketType) (packet.ProtoPacket, error) {
if packetType != 1 {
return nil, fmt.Errorf("Expected packet type 1, got %d", packetType)
}
return si, nil
})
if err != nil {
return err
}
handlerBuilder, ok := s.streamHandlerBuilders[StreamType(si.StreamType)]
if !ok {
return fmt.Errorf("Unknown stream type: %d", si.StreamType)
}
shConfig := s.buildBaseStreamHandlerConfig(false)
shConfig.Log = l
shConfig.Session = s
shConfig.PacketRw = rw
shConfig.Stream = stream
handler, err := handlerBuilder.BuildHandler(s.context, shConfig)
if err != nil {
return err
}
l.WithField("streamType", si.StreamType).Debug("Stream initialized")
go s.runStreamHandler(handler, stream)
return nil
}
// runStreamHandler manages a stream handler.
func (s *Session) | (handler StreamHandler, stream netproto.Stream) {
id := stream.ID()
s.streamHandlersMtx.Lock()
s.streamHandlers[uint32(id)] = handler
s.streamHandlersMtx.Unlock()
ctx, ctxCancel := context.WithCancel(s.childContext)
defer ctxCancel()
err := handler.Handle(ctx)
l := s.log.WithField("stream", uint32(id))
select {
case <-s.childContext.Done():
return // Don't print or bother removing the stream handler when we're done with the session.
default:
}
if err != nil && err != io.EOF && err != context.Canceled {
l.WithError(err).Warn("Stream closed with error")
} else {
l.Debug("Stream closed")
}
s.streamHandlersMtx.Lock()
delete(s.streamHandlers, uint32(id))
s.streamHandlersMtx.Unlock()
stream.Close()
}
// StartPump starts a goroutine that will end the session if returned.
func (s *Session) StartPump(pump func() error) {
go func() {
select {
case s.pumpErrors <- pump():
default:
}
}()
}
// GetInterface attempts to determine the interface this session is running on.
func (s *Session) GetInterface() *network.NetworkInterface {
if s.inter != nil {
return s.inter
}
remAddr := s.session.RemoteAddr()
uadr, ok := remAddr.(*net.UDPAddr)
if !ok {
return nil
}
inter, _ := network.FromAddr(uadr.IP)
s.inter = inter
return inter
}
// GetLocalAddr returns the local address.
func (s *Session) GetLocalAddr() net.Addr {
return s.session.LocalAddr()
}
// GetRemoteAddr returns the remote address.
func (s *Session) GetRemoteAddr() net.Addr {
| runStreamHandler | identifier_name |
rpnet.py | out_channels=192, kernel_size=5, padding=2),
nn.BatchNorm2d(num_features=192),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2, padding=1),
nn.Dropout(0.2)
)
hidden8 = nn.Sequential(
nn.Conv2d(in_channels=192, out_channels=192, kernel_size=5, padding=2),
nn.BatchNorm2d(num_features=192),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=1, padding=1),
nn.Dropout(0.2)
)
hidden9 = nn.Sequential(
nn.Conv2d(in_channels=192, out_channels=192, kernel_size=3, padding=1),
nn.BatchNorm2d(num_features=192),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2, padding=1),
nn.Dropout(0.2)
)
hidden10 = nn.Sequential(
nn.Conv2d(in_channels=192, out_channels=192, kernel_size=3, padding=1),
nn.BatchNorm2d(num_features=192),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=1, padding=1),
nn.Dropout(0.2)
)
self.features = nn.Sequential(
hidden1,
hidden2,
hidden3,
hidden4,
hidden5,
hidden6,
hidden7,
hidden8,
hidden9,
hidden10
)
self.classifier = nn.Sequential(
nn.Linear(23232, 100),
# nn.ReLU(inplace=True),
nn.Linear(100, 100),
# nn.ReLU(inplace=True),
nn.Linear(100, num_classes),#num_classes is 4 which refer to the bounding box about left-top and right-down
)
def forward(self, x):
x1 = self.features(x)
x11 = x1.view(x1.size(0), -1)
x = self.classifier(x11)
return x
class fh02(nn.Module):
def __init__(self, num_points, num_classes, wrPath=None):
super(fh02, self).__init__()
self.load_wR2(wrPath)
self.classifier1 = nn.Sequential(
# nn.Dropout(),
nn.Linear(53248, 128),
# nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(128, provNum),
)
self.classifier2 = nn.Sequential(
# nn.Dropout(),
nn.Linear(53248, 128),
# nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(128, alphaNum),
)
self.classifier3 = nn.Sequential(
# nn.Dropout(),
nn.Linear(53248, 128),
# nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(128, adNum),
)
self.classifier4 = nn.Sequential(
# nn.Dropout(),
nn.Linear(53248, 128),
# nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(128, adNum),
)
self.classifier5 = nn.Sequential(
# nn.Dropout(),
nn.Linear(53248, 128),
# nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(128, adNum),
)
self.classifier6 = nn.Sequential(
# nn.Dropout(),
nn.Linear(53248, 128),
# nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(128, adNum),
)
self.classifier7 = nn.Sequential(
# nn.Dropout(),
nn.Linear(53248, 128),
# nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(128, adNum),
)
def load_wR2(self, path):
self.wR2 = wR2(numPoints)
if use_gpu:
self.wR2 = torch.nn.DataParallel(self.wR2, device_ids=range( | x2 = self.wR2.module.features[2](_x1)
_x3 = self.wR2.module.features[3](x2)
x4 = self.wR2.module.features[4](_x3)
_x5 = self.wR2.module.features[5](x4)
x6 = self.wR2.module.features[6](_x5)
x7 = self.wR2.module.features[7](x6)
x8 = self.wR2.module.features[8](x7)
x9 = self.wR2.module.features[9](x8)
x9 = x9.view(x9.size(0), -1)
boxLoc = self.wR2.module.classifier(x9)#(n,4)
h1, w1 = _x1.data.size()[2], _x1.data.size()[3]
h2, w2 = _x3.data.size()[2], _x3.data.size()[3]
h3, w3 = _x5.data.size()[2], _x5.data.size()[3]
if use_gpu:
p1 = Variable(torch.FloatTensor([[w1,0,0,0],[0,h1,0,0],[0,0,w1,0],[0,0,0,h1]]).cuda(), requires_grad=False)
p2 = Variable(torch.FloatTensor([[w2,0,0,0],[0,h2,0,0],[0,0,w2,0],[0,0,0,h2]]).cuda(), requires_grad=False)
p3 = Variable(torch.FloatTensor([[w3,0,0,0],[0,h3,0,0],[0,0,w3,0],[0,0,0,h3]]).cuda(), requires_grad=False)
else:
p1 = Variable(torch.FloatTensor([[w1,0,0,0],[0,h1,0,0],[0,0,w1,0],[0,0,0,h1]]), requires_grad=False)
p2 = Variable(torch.FloatTensor([[w2,0,0,0],[0,h2,0,0],[0,0,w2,0],[0,0,0,h2]]), requires_grad=False)
p3 = Variable(torch.FloatTensor([[w3,0,0,0],[0,h3,0,0],[0,0,w3,0],[0,0,0,h3]]), requires_grad=False)
# x, y, w, h --> x1, y1, x2, y2
assert boxLoc.data.size()[1] == 4
if use_gpu:
postfix = Variable(torch.FloatTensor([[1,0,1,0],[0,1,0,1],[-0.5,0,0.5,0],[0,-0.5,0,0.5]]).cuda(), requires_grad=False)
else:
postfix = Variable(torch.FloatTensor([[1,0,1,0],[0,1,0,1],[-0.5,0,0.5,0],[0,-0.5,0,0.5]]), requires_grad=False)
#(n,4)*(4,4)->(n,4)
boxNew = boxLoc.mm(postfix).clamp(min=0, max=1)#boxLoc.mm(postfix)为根据cx,cy,w,h计算出left-top和right-buttom的点
# input = Variable(torch.rand(2, 1, 10, 10), requires_grad=True)
# rois = Variable(torch.LongTensor([[0, 1, 2, 7, 8], [0, 3, 3, 8, 8], [1, 3, 3, 8, 8]]), requires_grad=False)
roi1 = roi_pooling_ims(_x1, boxNew.mm(p1), size=(16, 8))#boxNew.mm(p1)得到特征图中对应的left-top和right-buttom点的坐标(n,4)
roi2 = roi_pooling_ims(_x3, boxNew.mm(p2), size=(16, 8))
roi3 = roi_pooling_ims(_x5, boxNew.mm(p3), size=(16, 8))
rois = torch.cat((roi1, roi2, roi3), 1)#(n,(c1+c2+c3),h,w)
| torch.cuda.device_count()))
else:
self.wR2 = torch.nn.DataParallel(self.wR2)
if not path is None:
if use_gpu:
self.wR2.load_state_dict(torch.load(path))
else:
self.wR2.load_state_dict(torch.load(path,map_location='cpu'))
# self.wR2 = self.wR2.cuda()
# for param in self.wR2.parameters():
# param.requires_grad = False
def forward(self, x):
x0 = self.wR2.module.features[0](x)
_x1 = self.wR2.module.features[1](x0)
| identifier_body |
rpnet.py | out_channels=192, kernel_size=5, padding=2),
nn.BatchNorm2d(num_features=192),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2, padding=1),
nn.Dropout(0.2)
)
hidden8 = nn.Sequential(
nn.Conv2d(in_channels=192, out_channels=192, kernel_size=5, padding=2),
nn.BatchNorm2d(num_features=192),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=1, padding=1),
nn.Dropout(0.2)
)
hidden9 = nn.Sequential(
nn.Conv2d(in_channels=192, out_channels=192, kernel_size=3, padding=1),
nn.BatchNorm2d(num_features=192),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2, padding=1),
nn.Dropout(0.2)
)
hidden10 = nn.Sequential(
nn.Conv2d(in_channels=192, out_channels=192, kernel_size=3, padding=1),
nn.BatchNorm2d(num_features=192),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=1, padding=1),
nn.Dropout(0.2)
)
self.features = nn.Sequential(
hidden1,
hidden2,
hidden3,
hidden4,
hidden5,
hidden6,
hidden7,
hidden8,
hidden9,
hidden10
)
self.classifier = nn.Sequential(
nn.Linear(23232, 100),
# nn.ReLU(inplace=True),
nn.Linear(100, 100),
# nn.ReLU(inplace=True),
nn.Linear(100, num_classes),#num_classes is 4 which refer to the bounding box about left-top and right-down
)
|
class fh02(nn.Module):
def __init__(self, num_points, num_classes, wrPath=None):
super(fh02, self).__init__()
self.load_wR2(wrPath)
self.classifier1 = nn.Sequential(
# nn.Dropout(),
nn.Linear(53248, 128),
# nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(128, provNum),
)
self.classifier2 = nn.Sequential(
# nn.Dropout(),
nn.Linear(53248, 128),
# nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(128, alphaNum),
)
self.classifier3 = nn.Sequential(
# nn.Dropout(),
nn.Linear(53248, 128),
# nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(128, adNum),
)
self.classifier4 = nn.Sequential(
# nn.Dropout(),
nn.Linear(53248, 128),
# nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(128, adNum),
)
self.classifier5 = nn.Sequential(
# nn.Dropout(),
nn.Linear(53248, 128),
# nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(128, adNum),
)
self.classifier6 = nn.Sequential(
# nn.Dropout(),
nn.Linear(53248, 128),
# nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(128, adNum),
)
self.classifier7 = nn.Sequential(
# nn.Dropout(),
nn.Linear(53248, 128),
# nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(128, adNum),
)
def load_wR2(self, path):
self.wR2 = wR2(numPoints)
if use_gpu:
self.wR2 = torch.nn.DataParallel(self.wR2, device_ids=range(torch.cuda.device_count()))
else:
self.wR2 = torch.nn.DataParallel(self.wR2)
if not path is None:
if use_gpu:
self.wR2.load_state_dict(torch.load(path))
else:
self.wR2.load_state_dict(torch.load(path,map_location='cpu'))
# self.wR2 = self.wR2.cuda()
# for param in self.wR2.parameters():
# param.requires_grad = False
def forward(self, x):
x0 = self.wR2.module.features[0](x)
_x1 = self.wR2.module.features[1](x0)
x2 = self.wR2.module.features[2](_x1)
_x3 = self.wR2.module.features[3](x2)
x4 = self.wR2.module.features[4](_x3)
_x5 = self.wR2.module.features[5](x4)
x6 = self.wR2.module.features[6](_x5)
x7 = self.wR2.module.features[7](x6)
x8 = self.wR2.module.features[8](x7)
x9 = self.wR2.module.features[9](x8)
x9 = x9.view(x9.size(0), -1)
boxLoc = self.wR2.module.classifier(x9)#(n,4)
h1, w1 = _x1.data.size()[2], _x1.data.size()[3]
h2, w2 = _x3.data.size()[2], _x3.data.size()[3]
h3, w3 = _x5.data.size()[2], _x5.data.size()[3]
if use_gpu:
p1 = Variable(torch.FloatTensor([[w1,0,0,0],[0,h1,0,0],[0,0,w1,0],[0,0,0,h1]]).cuda(), requires_grad=False)
p2 = Variable(torch.FloatTensor([[w2,0,0,0],[0,h2,0,0],[0,0,w2,0],[0,0,0,h2]]).cuda(), requires_grad=False)
p3 = Variable(torch.FloatTensor([[w3,0,0,0],[0,h3,0,0],[0,0,w3,0],[0,0,0,h3]]).cuda(), requires_grad=False)
else:
p1 = Variable(torch.FloatTensor([[w1,0,0,0],[0,h1,0,0],[0,0,w1,0],[0,0,0,h1]]), requires_grad=False)
p2 = Variable(torch.FloatTensor([[w2,0,0,0],[0,h2,0,0],[0,0,w2,0],[0,0,0,h2]]), requires_grad=False)
p3 = Variable(torch.FloatTensor([[w3,0,0,0],[0,h3,0,0],[0,0,w3,0],[0,0,0,h3]]), requires_grad=False)
# x, y, w, h --> x1, y1, x2, y2
assert boxLoc.data.size()[1] == 4
if use_gpu:
postfix = Variable(torch.FloatTensor([[1,0,1,0],[0,1,0,1],[-0.5,0,0.5,0],[0,-0.5,0,0.5]]).cuda(), requires_grad=False)
else:
postfix = Variable(torch.FloatTensor([[1,0,1,0],[0,1,0,1],[-0.5,0,0.5,0],[0,-0.5,0,0.5]]), requires_grad=False)
#(n,4)*(4,4)->(n,4)
boxNew = boxLoc.mm(postfix).clamp(min=0, max=1)#boxLoc.mm(postfix)为根据cx,cy,w,h计算出left-top和right-buttom的点
# input = Variable(torch.rand(2, 1, 10, 10), requires_grad=True)
# rois = Variable(torch.LongTensor([[0, 1, 2, 7, 8], [0, 3, 3, 8, 8], [1, 3, 3, 8, 8]]), requires_grad=False)
roi1 = roi_pooling_ims(_x1, boxNew.mm(p1), size=(16, 8))#boxNew.mm(p1)得到特征图中对应的left-top和right-buttom点的坐标(n,4)
roi2 = roi_pooling_ims(_x3, boxNew.mm(p2), size=(16, 8))
roi3 = roi_pooling_ims(_x5, boxNew.mm(p3), size=(16, 8))
rois = torch.cat((roi1, roi2, roi3), 1)#(n,(c1+c2+c3),h,w)
| def forward(self, x):
x1 = self.features(x)
x11 = x1.view(x1.size(0), -1)
x = self.classifier(x11)
return x | random_line_split |
rpnet.py | tial(
nn.Conv2d(in_channels=3, out_channels=48, kernel_size=5, padding=2, stride=2),
nn.BatchNorm2d(num_features=48),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2, padding=1),
nn.Dropout(0.2)
)
hidden2 = nn.Sequential(
nn.Conv2d(in_channels=48, out_channels=64, kernel_size=5, padding=2),
nn.BatchNorm2d(num_features=64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=1, padding=1),
nn.Dropout(0.2)
)
hidden3 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, padding=2),
nn.BatchNorm2d(num_features=128),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2, padding=1),
nn.Dropout(0.2)
)
hidden4 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=160, kernel_size=5, padding=2),
nn.BatchNorm2d(num_features=160),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=1, padding=1),
nn.Dropout(0.2)
)
hidden5 = nn.Sequential(
nn.Conv2d(in_channels=160, out_channels=192, kernel_size=5, padding=2),
nn.BatchNorm2d(num_features=192),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2, padding=1),
nn.Dropout(0.2)
)
hidden6 = nn.Sequential(
nn.Conv2d(in_channels=192, out_channels=192, kernel_size=5, padding=2),
nn.BatchNorm2d(num_features=192),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=1, padding=1),
nn.Dropout(0.2)
)
hidden7 = nn.Sequential(
nn.Conv2d(in_channels=192, out_channels=192, kernel_size=5, padding=2),
nn.BatchNorm2d(num_features=192),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2, padding=1),
nn.Dropout(0.2)
)
hidden8 = nn.Sequential(
nn.Conv2d(in_channels=192, out_channels=192, kernel_size=5, padding=2),
nn.BatchNorm2d(num_features=192),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=1, padding=1),
nn.Dropout(0.2)
)
hidden9 = nn.Sequential(
nn.Conv2d(in_channels=192, out_channels=192, kernel_size=3, padding=1),
nn.BatchNorm2d(num_features=192),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2, padding=1),
nn.Dropout(0.2)
)
hidden10 = nn.Sequential(
nn.Conv2d(in_channels=192, out_channels=192, kernel_size=3, padding=1),
nn.BatchNorm2d(num_features=192),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=1, padding=1),
nn.Dropout(0.2)
)
self.features = nn.Sequential(
hidden1,
hidden2,
hidden3,
hidden4,
hidden5,
hidden6,
hidden7,
hidden8,
hidden9,
hidden10
)
self.classifier = nn.Sequential(
nn.Linear(23232, 100),
# nn.ReLU(inplace=True),
nn.Linear(100, 100),
# nn.ReLU(inplace=True),
nn.Linear(100, num_classes),#num_classes is 4 which refer to the bounding box about left-top and right-down
)
def forward(self, x):
x1 = self.features(x)
x11 = x1.view(x1.size(0), -1)
x = self.classifier(x11)
return x
class fh02(nn.Module):
def __init__(self, num_points, num_classes, wrPath=None):
super(fh02, self).__init__()
self.load_wR2(wrPath)
self.classifier1 = nn.Sequential(
# nn.Dropout(),
nn.Linear(53248, 128),
# nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(128, provNum),
)
self.classifier2 = nn.Sequential(
# nn.Dropout(),
nn.Linear(53248, 128),
# nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(128, alphaNum),
)
self.classifier3 = nn.Sequential(
# nn.Dropout(),
nn.Linear(53248, 128),
# nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(128, adNum),
)
self.classifier4 = nn.Sequential(
# nn.Dropout(),
nn.Linear(53248, 128),
# nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(128, adNum),
)
self.classifier5 = nn.Sequential(
# nn.Dropout(),
nn.Linear(53248, 128),
# nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(128, adNum),
)
self.classifier6 = nn.Sequential(
# nn.Dropout(),
nn.Linear(53248, 128),
# nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(128, adNum),
)
self.classifier7 = nn.Sequential(
# nn.Dropout(),
nn.Linear(53248, 128),
# nn.ReLU(inplace=True),
# nn.Dropout(),
nn.Linear(128, adNum),
)
def load_wR2(self, path):
self.wR2 = wR2(numPoints)
if use_gpu:
self.wR2 = torch.nn.DataParallel(self.wR2, device_ids=range(torch.cuda.device_count()))
else:
self.wR2 = torch.nn.DataParallel(self.wR2)
if not path is None:
if use_gpu:
self.wR2.load_state_dict(torch.load(path))
else:
self.wR2.load_state_dict(torch.load(path,map_location='cpu'))
# self.wR2 = self.wR2.cuda()
# for param in self.wR2.parameters():
# param.requires_grad = False
def forward(self, x):
x0 = self.wR2.module.features[0](x)
_x1 = self.wR2.module.features[1](x0)
x2 = self.wR2.module.features[2](_x1)
_x3 = self.wR2.module.features[3](x2)
x4 = self.wR2.module.features[4](_x3)
_x5 = self.wR2.module.features[5](x4)
x6 = self.wR2.module.features[6](_x5)
x7 = self.wR2.module.features[7](x6)
x8 = self.wR2.module.features[8](x7)
x9 = self.wR2.module.features[9](x8)
x9 = x9.view(x9.size(0), -1)
boxLoc = self.wR2.module.classifier(x9)#(n,4)
h1, w1 = _x1.data.size()[2], _x1.data.size()[3]
h2, w2 = _x3.data.size()[2], _x3.data.size()[3]
h3, w3 = _x5.data.size()[2], _x5.data.size()[3]
if use_gpu:
p1 = Variable(torch.FloatTensor([[w1,0,0,0],[0,h1,0,0],[0,0,w1,0],[0,0,0,h1]]).cuda(), requires_grad=False)
p2 = Variable(torch.FloatTensor([[w2,0,0,0],[0,h2,0,0],[0,0,w2,0],[0,0,0,h2]]).cuda(), requires_grad=False)
p3 = Variable(torch.FloatTensor([[w3,0,0,0],[0,h3,0,0],[0,0,w3,0],[0,0,0,h3]]).cuda(), requires_grad=False)
else:
p1 = Variable(torch.FloatTensor([[w1,0,0,0],[0,h1,0,0],[0,0,w1,0],[0,0,0,h1]]), requires_grad=False)
p2 = Variable(torch.FloatTensor([[w2,0,0,0],[0,h2,0,0],[0,0,w2,0],[0, | uen | identifier_name |
|
rpnet.py | reflects
(top, left, bottom, right)
:param rec2: (y0, x0, y1, x1)
:return: scala value of IoU
"""
# computing area of each rectangles
S_rec1 = (rec1[2] - rec1[0]) * (rec1[3] - rec1[1])
S_rec2 = (rec2[2] - rec2[0]) * (rec2[3] - rec2[1])
# computing the sum_area
sum_area = S_rec1 + S_rec2
# find the each edge of intersect rectangle
left_line = max(rec1[1], rec2[1])
right_line = min(rec1[3], rec2[3])
top_line = max(rec1[0], rec2[0])
bottom_line = min(rec1[2], rec2[2])
# judge if there is an intersect
if left_line >= right_line or top_line >= bottom_line:
return 0
else:
intersect = (right_line - left_line) * (bottom_line - top_line)
return intersect / (sum_area - intersect)
def eval(model, test_dirs):
count, detectionCorrect, recognitionCorrect = 0, 0, 0
dst = labelTestDataLoader(test_dirs, imgSize)
if use_gpu:
testloader = DataLoader(dst, batch_size=1, shuffle=True, num_workers=8)
else:
testloader = DataLoader(dst, batch_size=1, shuffle=True, num_workers=0)
start = time()
for i, (XI, corner, labels, ims) in enumerate(testloader):
count += 1
#测试识别准确率
YI = [[int(ee) for ee in el.split('_')[:7]] for el in labels]
if use_gpu:
x = Variable(XI.cuda(0))
else:
x = Variable(XI)
# Forward pass: Compute predicted y by passing x to the model
fps_pred, y_pred = model(x)
outputY = [el.data.cpu().numpy().tolist() for el in y_pred]
labelPred = [t[0].index(max(t[0])) for t in outputY]
# compare YI, outputY
try:
if isEqual(labelPred, YI[0]) == 7:
recognitionCorrect += 1
else:
pass
except:
pass
img = cv2.imread(ims[0])
#测试检测准确率
[cx, cy, w, h] = fps_pred.data.cpu().numpy()[0].tolist()
left_up = [(cx - w/2)*img.shape[1], (cy - h/2)*img.shape[0]]
right_down = [(cx + w/2)*img.shape[1], (cy + h/2)*img.shape[0]]
prediction = (left_up[1],left_up[0],right_down[1],right_down[0])
group_truth = (corner[0][1].item(),corner[0][0].item(),corner[1][1].item(),corner[1][0].item())
IOU = compute_iou(prediction,group_truth)
#print("iou is:",IOU)
if(IOU>=0.7):
detectionCorrect+=1
return detectionCorrect/count, recognitionCorrect/count, (time() - start) / count
epoch_start = int(args["start_epoch"])
resume_file = str(args["resume"])
#if not resume_file == '111':#在原来的基础上继续训练模型
# # epoch_start = int(resume_file[resume_file.find('pth') + 3:]) + 1
# if not os.path.isfile(resume_file):
# print ("fail to load existed model! Existing ...")
# exit(0)
# print ("Load existed model! %s" % resume_file)
# model_conv = fh02(numPoints, numClasses, wR2Path)
# if use_gpu:
# model_conv = torch.nn.DataParallel(model_conv, device_ids=range(torch.cuda.device_count()))
# model_conv.load_state_dict(torch.load(resume_file))
# model_conv = model_conv.cuda()
# else:
# model_conv = torch.nn.DataParallel(model_conv)
# model_conv.load_state_dict(torch.load(resume_file,map_location='cpu'))
#
#else:#从头开始训练模型
model_conv = fh02(numPoints, numClasses, wR2Path)
if use_gpu:
model_conv = torch.nn.DataParallel(model_conv, device_ids=range(torch.cuda.device_count()))
model_conv = model_conv.cuda()
else:
model_conv = torch.nn.DataParallel(model_conv)
print(model_conv)#打印模型结构
print(get_n_params(model_conv))#打印参数数量
criterion = nn.CrossEntropyLoss()#采用交叉熵损失函数
# optimizer_conv = optim.RMSprop(model_conv.parameters(), lr=0.01, momentum=0.9)
optimizer_conv = optim.SGD(model_conv.parameters(), lr=0.001, momentum=0.9)
#导入数据集
dst = labelFpsDataLoader(trainDirs, imgSize)#(480,480)
if use_gpu:
trainloader = DataLoader(dst, batch_size=batchSize, shuffle=True, num_workers=8)
else:
trainloader = DataLoader(dst, batch_size=batchSize, shuffle=True, num_workers=0)
lrScheduler = lr_scheduler.StepLR(optimizer_conv, step_size=5, gamma=0.1)
def train_model(model, criterion, optimizer, num_epochs=25):
# since = time.time()
best_detectionAp = 0
loss_log = []
for epoch in range(epoch_start, num_epochs):
print("epoch:",epoch)
lossAver = []
model.train(True)
lrScheduler.step()
start = time()
for i, (XI, Y, labels, ims, total) in enumerate(trainloader):
#test and debug
# if(i==2):sys.exit(0)
# print(Y,labels,ims)
#Y:为左上角和左下角的4个坐标值
#labels: 为0_0_8_9_24_30_32
#ims:图片的路径
if not len(XI) == batchSize:
continue
#将标签的字符串记进行切割,存储到list中
YI = [[int(ee) for ee in el.split('_')[:7]] for el in labels]
# print("YI:",YI)
#将x1,y1,x2,y2转变为numpy数组的形式
Y = np.array([el.numpy() for el in Y]).T
# print("Y:",Y)
if use_gpu:
x = Variable(XI.cuda(0))
y = Variable(torch.FloatTensor(Y).cuda(0), requires_grad=False)
else:
x = Variable(XI)
y = Variable(torch.FloatTensor(Y), requires_grad=False)
# Forward pass: Compute predicted y by passing x to the model
fps_pred, y_pred = model(x)
# print("fps_pred:",fps_pred)
# y_pred_list = []
# for yi in y_pred:
# y_pred_list.append(np.argmax(yi.detach().numpy(),1))
# print("y_pred:",y_pred)
# print("torch.FloatTensor(np.hsplit(fps_pred.numpy(),2)[0]):",Variable(torch.FloatTensor(np.hsplit(fps_pred.detach().numpy(),2)[0])))
# Compute and print loss
detection_loss = 0.0
if use_gpu:
detection_loss += 0.8 * nn.L1Loss().cuda()(fps_pred[:,:2], y[:,:2])#cx和cy的权重为0.8,w和h的权重为0.2
detection_loss += 0.2 * nn.L1Loss().cuda()(fps_pred[:,2:], y[:,2:])#这里应该有问题,这样取的每一组数据的前两维,这样写的话变成取每个batch的前两个样本了
else:
# changed by sz
detection_loss += 0.8 * nn.L1Loss().cuda()(fps_pred[:][:2], y[:][:2])
detection_loss += 0.2 * nn.L1Loss().cuda()(fps_pred[:][2:], y[:][2:])
# print("fps_pred[:][:2]:",fps_pred[:][:2])
# print("y[:][:2]:",y[:][:2])
# print("loss:",loss)
classfication_loss = 0.0
for j in range(7):
if use_gpu:
l = Variable(torch.LongTensor([el[j] for el in YI]).cuda(0))
else:
l = Variable(torch.LongTensor([el[j] for el in YI]))
classfication_loss += criterion(y_pred[j], l)#直接将各个部分的
loss = detection_loss + classfication_loss
# Zero gradients, perform a backward pass, and update the weights.
optimizer.zero_grad()#反向传播
loss.backward()
optimizer.step()#更新参数
lossAver.append(loss.item())
if i%500==0:
print ('epoch:{}[ | {}/{}]===>train average loss:{} = [detection_loss : {} + cl | conditional_block |
|
LatexGenerator.py | (nested_dict)
self.data = nested_dict ()
def organize (self):
self._group (self.truth_rs)
self._group (self.build_rs)
def _group (self, result_set):
for weakness in result_set.weaknesses ():
for suite in weakness.suites:
# Handle the flaws
for flaw in suite.flaws:
# Find the target, this will create the necessary keys if required
target = self._get_target (weakness, suite, flaw)
if not target:
# First time seeing this file/fuction/line, need to set its value
newdict = self._get_default_data (weakness, suite, flaw)
target.update (newdict)
else:
|
# Handle the bugs
for bug in suite.bugs:
# Find the target, this will create the necessary keys if required
target = self._get_target (weakness, suite, bug)
if not target:
# First time seeing this file/fuction/line, need to set its value
newdict = self._get_default_data (weakness, suite, bug)
target.update (newdict)
else:
target['bugs'].append (bug)
#
# Get the appropriate target based on the grainularity
#
def _get_target (self, weakness, suite, obj):
if self.grainularity == Organizer.Grainularity.FILENAME:
return self.data[weakness.name][suite.directory][obj.filename]
elif self.grainularity == Organizer.Grainularity.FUNCTION:
return self.data[weakness.name][suite.directory][obj.filename][obj.function]
else:
return self.data[weakness.name][suite.directory][obj.filename][obj.function][obj.line]
#
# Make the default data dictionary based on the object provided
#
def _get_default_data (self, weakness, suite, obj):
# We always have flaws, bugs, weakness, directory, and filename
result = {'flaws': [], 'bugs': [], 'weakness': weakness.name, 'directory': suite.directory,
'filename': obj.filename, 'function': '', 'line': ''}
# Populate the function/line if we used that grainularity
if self.grainularity == Organizer.Grainularity.FUNCTION:
result['function'] = obj.function
elif self.grainularity == Organizer.Grainularity.LINE:
result['function'] = obj.function
result['line'] = obj.line
# Append the provided object to the correct list
if isinstance (obj, Bug):
result['bugs'].append (obj)
else:
result['flaws'].append (obj)
return result
#
# Find the leaves in the provided organized dictionary
#
def find_leaves (self, dictionary):
if isinstance (dictionary.get ('flaws'), list):
return [dictionary]
result = []
for key in dictionary.keys ():
result.extend (self.find_leaves (dictionary.get (key)))
return result
class DataPoint:
def __init__ (self):
self.tp = 0
self.fp = 0
self.fn = 0
self.weakness = None
self.directory = None
self.flaws = []
self.bugs = []
self.tool = None
self.truth = None
def precision (self):
try:
return self.tp / (self.tp + self.fp * 1.0)
except Exception as e:
return 0
def recall (self):
try:
return self.tp / (self.tp + self.fn * 1.0)
except Exception as e:
return 0
#
# Factory for Report Generation
# @returns LatexGenerator object
#
def __create__():
return LatexGenerator()
#
# Concrete class - PDFLatexGenerator
#
class LatexGenerator(ReportGenerator):
#
# Initialize the parser
#
@staticmethod
def init_parser (parser):
latex_parser = parser.add_parser ('latex', help='Convert evidence into a latex file')
latex_parser.set_defaults (generator=LatexGenerator)
#
# Initalize the generator
#
def parse_args (self, args):
# Call the base class (Command) init
super (LatexGenerator, self).parse_args (args)
self.pages = []
self.appendix = None
self.load_pages ()
#
# Load the pages
#
def load_pages (self):
# Some pages are repeated per grainularity (i.e. Summary). These should
# be in the correct order.
page_order = ['Methodology', 'Summary', 'Detail']
script_path = os.path.dirname (os.path.abspath (sys.argv[0]))
for cls in Utilities.import_classes (script_path, 'lib/ReportGenerators/LatexPages', 'name'):
if cls.name () == 'Appendix':
self.appendix = cls ()
elif cls.name () in page_order:
self.pages.insert (page_order.index (cls.name ()), cls ())
else:
logging.warning ("WARNING: Found unexpected Latex Page [%s], skipping..." % cls.name ())
logging.debug ('Loaded LatexPages [%s]' % self.pages)
#
# Generate Report of respective type
#
def generate (self, truth, build):
# Construct the appropriate Tool object which was used for the build
self.tool = self.get_tool (build)()
# The same appendix is used for each permutation, so we initalize
# and finialize it here instead of in write_report
self.appendix.parse_args (build.GetSource ())
logging.info ('Generating report based on filename')
self.write_report (truth, build, Organizer.Grainularity.FILENAME)
logging.info ('Generating report based on function')
self.write_report (truth, build, Organizer.Grainularity.FUNCTION)
logging.info ('Generating report based on line')
self.write_report (truth, build, Organizer.Grainularity.LINE)
self.appendix.fini ()
def write_report (self, truth, build, grainularity):
# Organize the truth and build result sets
organizer = Organizer (grainularity,
truth,
build)
organizer.organize ()
# We create two permutations for each grainularity:
# One where Bugs with wrong checkers count as false postives ([tool].[grainularity].tex)
# One where Bugs with wrong checkers don't count as false positives ([tool].[grainularity].skip.tex)
permutations = [('%s.%s.tex' % (build.GetSource (), str (grainularity).split ('.')[-1]), True),
('%s.%s.skip.tex' % (build.GetSource (), str (grainularity).split ('.')[-1]), False)]
for (filename, wrong_checker_fp) in permutations:
# Initalize the pages
for page in self.pages:
page.parse_args (organizer, wrong_checker_fp)
# Get the leaves
for data in organizer.find_leaves (organizer.data):
# Build datapoint
datapoint = self.build_datapoint (organizer, data, wrong_checker_fp)
# If there are 0 expected flaws, don't report on this datapoint.
# This can happen when there are only incidental flaws for
# the grainularity (i.e. line-based reporting where a line
# only has an incidental flaw)
if (0 == datapoint.tp + datapoint.fn):
logging.debug ('Skipping datapoint with 0 expected flaws')
continue
self.appendix.visit (datapoint)
for page in self.pages:
page.visit (datapoint)
# Write the main report page
outfile = open (filename, 'w')
self.init_latex (outfile, build.GetSource (), truth.GetSource ())
# Finialize the pages
for page in self.pages:
self.include_page (outfile, page)
page.fini ()
self.include_page (outfile, self.appendix)
self.fini_latex (outfile)
#
# Build a datapoint from the provided data structure
#
def build_datapoint (self, organizer, data, wrong_checker_is_fp):
# Get the probability matrix
(tp, fp, fn) = self.compute_probability (data, wrong_checker_is_fp)
# Build a data point
result = DataPoint ()
result.tp = tp
result.fp = fp
result.fn = fn
result.weakness = data['weakness']
result.directory = data['directory']
result.flaws = data['flaws']
result.bugs = data['bugs']
result.tool = organizer.build_rs.GetName ()
result.truth = organizer.truth_rs.GetName ()
return result
#
# Compute the probability matrix from the provided data
#
def compute_probability (self, data, wrong_checker_is_fp):
# Build a list of incidental CWEs which appear in the data
incidentals = [f for f in data['flaws'] if f.severity == FlawType.INCIDENTAL]
incidental_cwes = set ()
for flaw in incidentals:
cwe = flaw.description.replace (' ', '')
if cwe.startswith ('CWE'):
incidental_cwes.add (cwe[:6])
right_checker = 0
wrong_checker = 0
# Check the bugs to | target['flaws'].append (flaw) | conditional_block |
LatexGenerator.py | (nested_dict)
self.data = nested_dict ()
def organize (self):
self._group (self.truth_rs)
self._group (self.build_rs)
def _group (self, result_set):
for weakness in result_set.weaknesses ():
for suite in weakness.suites:
# Handle the flaws
for flaw in suite.flaws:
# Find the target, this will create the necessary keys if required
target = self._get_target (weakness, suite, flaw)
if not target:
# First time seeing this file/fuction/line, need to set its value
newdict = self._get_default_data (weakness, suite, flaw)
target.update (newdict)
else:
target['flaws'].append (flaw)
# Handle the bugs
for bug in suite.bugs:
# Find the target, this will create the necessary keys if required
target = self._get_target (weakness, suite, bug)
if not target:
# First time seeing this file/fuction/line, need to set its value
newdict = self._get_default_data (weakness, suite, bug)
target.update (newdict)
else:
target['bugs'].append (bug)
#
# Get the appropriate target based on the grainularity
#
def _get_target (self, weakness, suite, obj):
if self.grainularity == Organizer.Grainularity.FILENAME:
return self.data[weakness.name][suite.directory][obj.filename]
elif self.grainularity == Organizer.Grainularity.FUNCTION:
return self.data[weakness.name][suite.directory][obj.filename][obj.function]
else:
return self.data[weakness.name][suite.directory][obj.filename][obj.function][obj.line]
#
# Make the default data dictionary based on the object provided
#
def _get_default_data (self, weakness, suite, obj):
# We always have flaws, bugs, weakness, directory, and filename
result = {'flaws': [], 'bugs': [], 'weakness': weakness.name, 'directory': suite.directory,
'filename': obj.filename, 'function': '', 'line': ''}
# Populate the function/line if we used that grainularity
if self.grainularity == Organizer.Grainularity.FUNCTION:
result['function'] = obj.function
elif self.grainularity == Organizer.Grainularity.LINE:
result['function'] = obj.function
result['line'] = obj.line
# Append the provided object to the correct list
if isinstance (obj, Bug):
result['bugs'].append (obj)
else:
result['flaws'].append (obj)
return result
#
# Find the leaves in the provided organized dictionary
#
def find_leaves (self, dictionary):
if isinstance (dictionary.get ('flaws'), list):
return [dictionary]
result = []
for key in dictionary.keys ():
result.extend (self.find_leaves (dictionary.get (key)))
return result
class DataPoint:
def __init__ (self):
self.tp = 0
self.fp = 0
self.fn = 0
self.weakness = None
self.directory = None
self.flaws = []
self.bugs = []
self.tool = None
self.truth = None
def precision (self):
try:
return self.tp / (self.tp + self.fp * 1.0)
except Exception as e:
return 0
def recall (self):
try:
return self.tp / (self.tp + self.fn * 1.0)
except Exception as e:
return 0
#
# Factory for Report Generation
# @returns LatexGenerator object
#
def __create__():
return LatexGenerator()
#
# Concrete class - PDFLatexGenerator
#
class LatexGenerator(ReportGenerator):
#
# Initialize the parser
#
@staticmethod
def init_parser (parser):
latex_parser = parser.add_parser ('latex', help='Convert evidence into a latex file')
latex_parser.set_defaults (generator=LatexGenerator)
#
# Initalize the generator
#
def parse_args (self, args):
# Call the base class (Command) init
super (LatexGenerator, self).parse_args (args)
self.pages = []
self.appendix = None
self.load_pages ()
#
# Load the pages
#
def load_pages (self):
# Some pages are repeated per grainularity (i.e. Summary). These should
# be in the correct order.
page_order = ['Methodology', 'Summary', 'Detail']
script_path = os.path.dirname (os.path.abspath (sys.argv[0]))
for cls in Utilities.import_classes (script_path, 'lib/ReportGenerators/LatexPages', 'name'):
if cls.name () == 'Appendix':
self.appendix = cls ()
elif cls.name () in page_order:
self.pages.insert (page_order.index (cls.name ()), cls ())
else:
logging.warning ("WARNING: Found unexpected Latex Page [%s], skipping..." % cls.name ())
logging.debug ('Loaded LatexPages [%s]' % self.pages)
#
# Generate Report of respective type
#
def generate (self, truth, build):
# Construct the appropriate Tool object which was used for the build
self.tool = self.get_tool (build)()
# The same appendix is used for each permutation, so we initalize
# and finialize it here instead of in write_report
self.appendix.parse_args (build.GetSource ())
logging.info ('Generating report based on filename')
self.write_report (truth, build, Organizer.Grainularity.FILENAME)
logging.info ('Generating report based on function')
self.write_report (truth, build, Organizer.Grainularity.FUNCTION)
logging.info ('Generating report based on line')
self.write_report (truth, build, Organizer.Grainularity.LINE)
self.appendix.fini ()
def write_report (self, truth, build, grainularity):
# Organize the truth and build result sets
|
# If there are 0 expected flaws, don't report on this datapoint.
# This can happen when there are only incidental flaws for
# the grainularity (i.e. line-based reporting where a line
# only has an incidental flaw)
if (0 == datapoint.tp + datapoint.fn):
logging.debug ('Skipping datapoint with 0 expected flaws')
continue
self.appendix.visit (datapoint)
for page in self.pages:
page.visit (datapoint)
# Write the main report page
outfile = open (filename, 'w')
self.init_latex (outfile, build.GetSource (), truth.GetSource ())
# Finialize the pages
for page in self.pages:
self.include_page (outfile, page)
page.fini ()
self.include_page (outfile, self.appendix)
self.fini_latex (outfile)
#
# Build a datapoint from the provided data structure
#
def build_datapoint (self, organizer, data, wrong_checker_is_fp):
# Get the probability matrix
(tp, fp, fn) = self.compute_probability (data, wrong_checker_is_fp)
# Build a data point
result = DataPoint ()
result.tp = tp
result.fp = fp
result.fn = fn
result.weakness = data['weakness']
result.directory = data['directory']
result.flaws = data['flaws']
result.bugs = data['bugs']
result.tool = organizer.build_rs.GetName ()
result.truth = organizer.truth_rs.GetName ()
return result
#
# Compute the probability matrix from the provided data
#
def compute_probability (self, data, wrong_checker_is_fp):
# Build a list of incidental CWEs which appear in the data
incidentals = [f for f in data['flaws'] if f.severity == FlawType.INCIDENTAL]
incidental_cwes = set ()
for flaw in incidentals:
cwe = flaw.description.replace (' ', '')
if cwe.startswith ('CWE'):
incidental_cwes.add (cwe[:6])
right_checker = 0
wrong_checker = 0
# Check the bugs | organizer = Organizer (grainularity,
truth,
build)
organizer.organize ()
# We create two permutations for each grainularity:
# One where Bugs with wrong checkers count as false postives ([tool].[grainularity].tex)
# One where Bugs with wrong checkers don't count as false positives ([tool].[grainularity].skip.tex)
permutations = [('%s.%s.tex' % (build.GetSource (), str (grainularity).split ('.')[-1]), True),
('%s.%s.skip.tex' % (build.GetSource (), str (grainularity).split ('.')[-1]), False)]
for (filename, wrong_checker_fp) in permutations:
# Initalize the pages
for page in self.pages:
page.parse_args (organizer, wrong_checker_fp)
# Get the leaves
for data in organizer.find_leaves (organizer.data):
# Build datapoint
datapoint = self.build_datapoint (organizer, data, wrong_checker_fp) | identifier_body |
LatexGenerator.py | (nested_dict)
self.data = nested_dict ()
def organize (self):
self._group (self.truth_rs)
self._group (self.build_rs)
def _group (self, result_set):
for weakness in result_set.weaknesses ():
for suite in weakness.suites:
# Handle the flaws
for flaw in suite.flaws:
# Find the target, this will create the necessary keys if required
target = self._get_target (weakness, suite, flaw)
if not target:
# First time seeing this file/fuction/line, need to set its value
newdict = self._get_default_data (weakness, suite, flaw)
target.update (newdict)
else:
target['flaws'].append (flaw)
# Handle the bugs
for bug in suite.bugs:
# Find the target, this will create the necessary keys if required
target = self._get_target (weakness, suite, bug)
if not target:
# First time seeing this file/fuction/line, need to set its value
newdict = self._get_default_data (weakness, suite, bug)
target.update (newdict)
else:
target['bugs'].append (bug)
#
# Get the appropriate target based on the grainularity
#
def _get_target (self, weakness, suite, obj):
if self.grainularity == Organizer.Grainularity.FILENAME:
return self.data[weakness.name][suite.directory][obj.filename]
elif self.grainularity == Organizer.Grainularity.FUNCTION:
return self.data[weakness.name][suite.directory][obj.filename][obj.function]
else:
return self.data[weakness.name][suite.directory][obj.filename][obj.function][obj.line]
#
# Make the default data dictionary based on the object provided
#
def _get_default_data (self, weakness, suite, obj):
# We always have flaws, bugs, weakness, directory, and filename
result = {'flaws': [], 'bugs': [], 'weakness': weakness.name, 'directory': suite.directory,
'filename': obj.filename, 'function': '', 'line': ''}
# Populate the function/line if we used that grainularity
if self.grainularity == Organizer.Grainularity.FUNCTION:
result['function'] = obj.function
elif self.grainularity == Organizer.Grainularity.LINE:
result['function'] = obj.function
result['line'] = obj.line
# Append the provided object to the correct list
if isinstance (obj, Bug):
result['bugs'].append (obj)
else:
result['flaws'].append (obj)
return result
#
# Find the leaves in the provided organized dictionary
#
def find_leaves (self, dictionary):
if isinstance (dictionary.get ('flaws'), list):
return [dictionary]
result = []
for key in dictionary.keys ():
result.extend (self.find_leaves (dictionary.get (key)))
return result
class DataPoint:
def __init__ (self):
self.tp = 0
self.fp = 0
self.fn = 0
self.weakness = None
self.directory = None
self.flaws = []
self.bugs = []
self.tool = None
self.truth = None
def precision (self):
try:
return self.tp / (self.tp + self.fp * 1.0)
except Exception as e:
return 0
def recall (self):
try:
return self.tp / (self.tp + self.fn * 1.0)
except Exception as e:
return 0
#
# Factory for Report Generation
# @returns LatexGenerator object
#
def __create__():
return LatexGenerator()
#
# Concrete class - PDFLatexGenerator
#
class LatexGenerator(ReportGenerator):
#
# Initialize the parser
#
@staticmethod
def init_parser (parser):
latex_parser = parser.add_parser ('latex', help='Convert evidence into a latex file')
latex_parser.set_defaults (generator=LatexGenerator)
#
# Initalize the generator
#
def | (self, args):
# Call the base class (Command) init
super (LatexGenerator, self).parse_args (args)
self.pages = []
self.appendix = None
self.load_pages ()
#
# Load the pages
#
def load_pages (self):
# Some pages are repeated per grainularity (i.e. Summary). These should
# be in the correct order.
page_order = ['Methodology', 'Summary', 'Detail']
script_path = os.path.dirname (os.path.abspath (sys.argv[0]))
for cls in Utilities.import_classes (script_path, 'lib/ReportGenerators/LatexPages', 'name'):
if cls.name () == 'Appendix':
self.appendix = cls ()
elif cls.name () in page_order:
self.pages.insert (page_order.index (cls.name ()), cls ())
else:
logging.warning ("WARNING: Found unexpected Latex Page [%s], skipping..." % cls.name ())
logging.debug ('Loaded LatexPages [%s]' % self.pages)
#
# Generate Report of respective type
#
def generate (self, truth, build):
# Construct the appropriate Tool object which was used for the build
self.tool = self.get_tool (build)()
# The same appendix is used for each permutation, so we initalize
# and finialize it here instead of in write_report
self.appendix.parse_args (build.GetSource ())
logging.info ('Generating report based on filename')
self.write_report (truth, build, Organizer.Grainularity.FILENAME)
logging.info ('Generating report based on function')
self.write_report (truth, build, Organizer.Grainularity.FUNCTION)
logging.info ('Generating report based on line')
self.write_report (truth, build, Organizer.Grainularity.LINE)
self.appendix.fini ()
def write_report (self, truth, build, grainularity):
# Organize the truth and build result sets
organizer = Organizer (grainularity,
truth,
build)
organizer.organize ()
# We create two permutations for each grainularity:
# One where Bugs with wrong checkers count as false postives ([tool].[grainularity].tex)
# One where Bugs with wrong checkers don't count as false positives ([tool].[grainularity].skip.tex)
permutations = [('%s.%s.tex' % (build.GetSource (), str (grainularity).split ('.')[-1]), True),
('%s.%s.skip.tex' % (build.GetSource (), str (grainularity).split ('.')[-1]), False)]
for (filename, wrong_checker_fp) in permutations:
# Initalize the pages
for page in self.pages:
page.parse_args (organizer, wrong_checker_fp)
# Get the leaves
for data in organizer.find_leaves (organizer.data):
# Build datapoint
datapoint = self.build_datapoint (organizer, data, wrong_checker_fp)
# If there are 0 expected flaws, don't report on this datapoint.
# This can happen when there are only incidental flaws for
# the grainularity (i.e. line-based reporting where a line
# only has an incidental flaw)
if (0 == datapoint.tp + datapoint.fn):
logging.debug ('Skipping datapoint with 0 expected flaws')
continue
self.appendix.visit (datapoint)
for page in self.pages:
page.visit (datapoint)
# Write the main report page
outfile = open (filename, 'w')
self.init_latex (outfile, build.GetSource (), truth.GetSource ())
# Finialize the pages
for page in self.pages:
self.include_page (outfile, page)
page.fini ()
self.include_page (outfile, self.appendix)
self.fini_latex (outfile)
#
# Build a datapoint from the provided data structure
#
def build_datapoint (self, organizer, data, wrong_checker_is_fp):
# Get the probability matrix
(tp, fp, fn) = self.compute_probability (data, wrong_checker_is_fp)
# Build a data point
result = DataPoint ()
result.tp = tp
result.fp = fp
result.fn = fn
result.weakness = data['weakness']
result.directory = data['directory']
result.flaws = data['flaws']
result.bugs = data['bugs']
result.tool = organizer.build_rs.GetName ()
result.truth = organizer.truth_rs.GetName ()
return result
#
# Compute the probability matrix from the provided data
#
def compute_probability (self, data, wrong_checker_is_fp):
# Build a list of incidental CWEs which appear in the data
incidentals = [f for f in data['flaws'] if f.severity == FlawType.INCIDENTAL]
incidental_cwes = set ()
for flaw in incidentals:
cwe = flaw.description.replace (' ', '')
if cwe.startswith ('CWE'):
incidental_cwes.add (cwe[:6])
right_checker = 0
wrong_checker = 0
# Check the bugs to | parse_args | identifier_name |
LatexGenerator.py | (nested_dict)
self.data = nested_dict ()
| def organize (self):
self._group (self.truth_rs)
self._group (self.build_rs)
def _group (self, result_set):
for weakness in result_set.weaknesses ():
for suite in weakness.suites:
# Handle the flaws
for flaw in suite.flaws:
# Find the target, this will create the necessary keys if required
target = self._get_target (weakness, suite, flaw)
if not target:
# First time seeing this file/fuction/line, need to set its value
newdict = self._get_default_data (weakness, suite, flaw)
target.update (newdict)
else:
target['flaws'].append (flaw)
# Handle the bugs
for bug in suite.bugs:
# Find the target, this will create the necessary keys if required
target = self._get_target (weakness, suite, bug)
if not target:
# First time seeing this file/fuction/line, need to set its value
newdict = self._get_default_data (weakness, suite, bug)
target.update (newdict)
else:
target['bugs'].append (bug)
#
# Get the appropriate target based on the grainularity
#
def _get_target (self, weakness, suite, obj):
if self.grainularity == Organizer.Grainularity.FILENAME:
return self.data[weakness.name][suite.directory][obj.filename]
elif self.grainularity == Organizer.Grainularity.FUNCTION:
return self.data[weakness.name][suite.directory][obj.filename][obj.function]
else:
return self.data[weakness.name][suite.directory][obj.filename][obj.function][obj.line]
#
# Make the default data dictionary based on the object provided
#
def _get_default_data (self, weakness, suite, obj):
# We always have flaws, bugs, weakness, directory, and filename
result = {'flaws': [], 'bugs': [], 'weakness': weakness.name, 'directory': suite.directory,
'filename': obj.filename, 'function': '', 'line': ''}
# Populate the function/line if we used that grainularity
if self.grainularity == Organizer.Grainularity.FUNCTION:
result['function'] = obj.function
elif self.grainularity == Organizer.Grainularity.LINE:
result['function'] = obj.function
result['line'] = obj.line
# Append the provided object to the correct list
if isinstance (obj, Bug):
result['bugs'].append (obj)
else:
result['flaws'].append (obj)
return result
#
# Find the leaves in the provided organized dictionary
#
def find_leaves (self, dictionary):
if isinstance (dictionary.get ('flaws'), list):
return [dictionary]
result = []
for key in dictionary.keys ():
result.extend (self.find_leaves (dictionary.get (key)))
return result
class DataPoint:
def __init__ (self):
self.tp = 0
self.fp = 0
self.fn = 0
self.weakness = None
self.directory = None
self.flaws = []
self.bugs = []
self.tool = None
self.truth = None
def precision (self):
try:
return self.tp / (self.tp + self.fp * 1.0)
except Exception as e:
return 0
def recall (self):
try:
return self.tp / (self.tp + self.fn * 1.0)
except Exception as e:
return 0
#
# Factory for Report Generation
# @returns LatexGenerator object
#
def __create__():
return LatexGenerator()
#
# Concrete class - PDFLatexGenerator
#
class LatexGenerator(ReportGenerator):
#
# Initialize the parser
#
@staticmethod
def init_parser (parser):
latex_parser = parser.add_parser ('latex', help='Convert evidence into a latex file')
latex_parser.set_defaults (generator=LatexGenerator)
#
# Initalize the generator
#
def parse_args (self, args):
# Call the base class (Command) init
super (LatexGenerator, self).parse_args (args)
self.pages = []
self.appendix = None
self.load_pages ()
#
# Load the pages
#
def load_pages (self):
# Some pages are repeated per grainularity (i.e. Summary). These should
# be in the correct order.
page_order = ['Methodology', 'Summary', 'Detail']
script_path = os.path.dirname (os.path.abspath (sys.argv[0]))
for cls in Utilities.import_classes (script_path, 'lib/ReportGenerators/LatexPages', 'name'):
if cls.name () == 'Appendix':
self.appendix = cls ()
elif cls.name () in page_order:
self.pages.insert (page_order.index (cls.name ()), cls ())
else:
logging.warning ("WARNING: Found unexpected Latex Page [%s], skipping..." % cls.name ())
logging.debug ('Loaded LatexPages [%s]' % self.pages)
#
# Generate Report of respective type
#
def generate (self, truth, build):
# Construct the appropriate Tool object which was used for the build
self.tool = self.get_tool (build)()
# The same appendix is used for each permutation, so we initalize
# and finialize it here instead of in write_report
self.appendix.parse_args (build.GetSource ())
logging.info ('Generating report based on filename')
self.write_report (truth, build, Organizer.Grainularity.FILENAME)
logging.info ('Generating report based on function')
self.write_report (truth, build, Organizer.Grainularity.FUNCTION)
logging.info ('Generating report based on line')
self.write_report (truth, build, Organizer.Grainularity.LINE)
self.appendix.fini ()
def write_report (self, truth, build, grainularity):
# Organize the truth and build result sets
organizer = Organizer (grainularity,
truth,
build)
organizer.organize ()
# We create two permutations for each grainularity:
# One where Bugs with wrong checkers count as false postives ([tool].[grainularity].tex)
# One where Bugs with wrong checkers don't count as false positives ([tool].[grainularity].skip.tex)
permutations = [('%s.%s.tex' % (build.GetSource (), str (grainularity).split ('.')[-1]), True),
('%s.%s.skip.tex' % (build.GetSource (), str (grainularity).split ('.')[-1]), False)]
for (filename, wrong_checker_fp) in permutations:
# Initalize the pages
for page in self.pages:
page.parse_args (organizer, wrong_checker_fp)
# Get the leaves
for data in organizer.find_leaves (organizer.data):
# Build datapoint
datapoint = self.build_datapoint (organizer, data, wrong_checker_fp)
# If there are 0 expected flaws, don't report on this datapoint.
# This can happen when there are only incidental flaws for
# the grainularity (i.e. line-based reporting where a line
# only has an incidental flaw)
if (0 == datapoint.tp + datapoint.fn):
logging.debug ('Skipping datapoint with 0 expected flaws')
continue
self.appendix.visit (datapoint)
for page in self.pages:
page.visit (datapoint)
# Write the main report page
outfile = open (filename, 'w')
self.init_latex (outfile, build.GetSource (), truth.GetSource ())
# Finialize the pages
for page in self.pages:
self.include_page (outfile, page)
page.fini ()
self.include_page (outfile, self.appendix)
self.fini_latex (outfile)
#
# Build a datapoint from the provided data structure
#
def build_datapoint (self, organizer, data, wrong_checker_is_fp):
# Get the probability matrix
(tp, fp, fn) = self.compute_probability (data, wrong_checker_is_fp)
# Build a data point
result = DataPoint ()
result.tp = tp
result.fp = fp
result.fn = fn
result.weakness = data['weakness']
result.directory = data['directory']
result.flaws = data['flaws']
result.bugs = data['bugs']
result.tool = organizer.build_rs.GetName ()
result.truth = organizer.truth_rs.GetName ()
return result
#
# Compute the probability matrix from the provided data
#
def compute_probability (self, data, wrong_checker_is_fp):
# Build a list of incidental CWEs which appear in the data
incidentals = [f for f in data['flaws'] if f.severity == FlawType.INCIDENTAL]
incidental_cwes = set ()
for flaw in incidentals:
cwe = flaw.description.replace (' ', '')
if cwe.startswith ('CWE'):
incidental_cwes.add (cwe[:6])
right_checker = 0
wrong_checker = 0
# Check the bugs to see | random_line_split |
|
admin.js | (e) {
dropArea.classList.add('highlight')
}
function unhighlight(e) {
dropArea.classList.remove('active')
}
function handleDrop(e) {
e.preventDefault();
e.stopPropagation();
var items = e.dataTransfer.items;
var files = e.dataTransfer.files;
console.log(items, files);
let p = Promise.resolve();
let fileInfos = [];
for (var i = 0; i < items.length; ++i) {
let item = items[i];
// Skip this one if we didn't get a file.
if (item.kind != 'file') {
continue;
}
let entry = item.webkitGetAsEntry();
p = p.then(() => readEntry(entry, fileInfos));
// console.log(item, entry);
// if (entry.isDirectory) {
// console.log('start');
// // entry.createReader().readEntries((entries) => {
// // console.log(entries);
// // });
// readEntry(entry, []).then(fileInfos => {
// console.log('ssss')
// console.log(fileInfos);
// renderTable(fileInfos);
// });
// console.log('end');
// } else {
// p = p.then()
// }
}
p.then(f => {
console.log('ssss')
console.log(fileInfos);
renderTable(fileInfos);
});
}
// function handleDrop(e) {
// var dt = e.dataTransfer
// var files = dt.files
// console.log(dt);
// handleFiles(files)
// }
let uploadProgress = []
let progressBar = document.getElementById('progress-bar')
function initializeProgress(numFiles) {
progressBar.value = 0
uploadProgress = []
for (let i = numFiles; i > 0; i--) {
uploadProgress.push(0)
}
}
function updateProgress(fileNumber, percent) {
uploadProgress[fileNumber] = percent
let total = uploadProgress.reduce((tot, curr) => tot + curr, 0) / uploadProgress.length
console.debug('update', fileNumber, percent, total)
progressBar.value = total
}
function handleFiles(files) {
files = [...files]
// initializeProgress(files.length)
renderTable(files);
// files.forEach(uploadFile)
// files.forEach(previewFile)
}
function previewFile(file) {
let reader = new FileReader()
reader.readAsDataURL(file)
reader.onloadend = function () {
let img = document.createElement('img')
img.src = reader.result
document.getElementById('gallery').appendChild(img)
}
}
function renderTable(files) {
console.log(files);
let tableContent = document.querySelector("#filesUploaded tbody");
// tableContent.innerHTML = "";
let rowIndex = tableContent.children.length + 1;
// uploadFiles = {};
for (let i = 0; i < files.length; i++) {
let file = files[i].file;
let filename = file.name;
let fullpath = files[i].path;
let path = fullpath.substr(1, fullpath.indexOf(filename) - 1);
let key = fullpath.substr(1);
uploadFiles[key] = {
file: file,
name: filename,
path: path
};
console.log(JSON.stringify(uploadFiles));
let reader = new FileReader();
reader.onload = function () {
var arrayBuffer = this.result,
array = new Uint8Array(arrayBuffer);
// binaryString = String.fromCharCode.apply(null, array);
let file_md5 = md5(array);
// console.log(key, JSON.stringify(serverFiles[key]), file_md5);
if (serverFiles[key] && serverFiles[key].md5 == file_md5) {
delete uploadFiles[key];
} else {
uploadFiles[key].md5 = file_md5;
let row = document.createElement("tr");
row.innerHTML =
`<th>${rowIndex++}</th><td>${filename}</td><td>${path}</td><td>${(file.size/1000).toFixed(2)}kb</td><td>${file_md5}</td>`;
tableContent.appendChild(row);
}
}
reader.readAsArrayBuffer(file);
}
}
// # 文件名 大小 MD5
// 0 2.jpg 48517.08kb d0fc35af50238da5ef20b5b511328a60
// 1 3.jpg 47221.85kb f523c41e133f83f5d3d082deb94bccad
// 2 1.jpg 49291.56kb ee224e07441a6b6f27d951d9bbfb2745
// function uploadFile(file, i) {
// var url = './upload.php'
// var xhr = new XMLHttpRequest()
// var formData = new FormData()
// xhr.open('POST', url, true)
// xhr.setRequestHeader('X-Requested-With', 'XMLHttpRequest')
// // Update progress (can be used to show progress indicator)
// xhr.upload.addEventListener("progress", function (e) {
// updateProgress(i, (e.loaded * 100.0 / e.total) || 100)
// })
// xhr.addEventListener('readystatechange', function (e) {
// if (xhr.readyState == 4 && xhr.status == 200) {
// updateProgress(i, 100); // <- Add this
// console.log(xhr);
// console.log(this.responseText);
// } else if (xhr.readyState == 4 && xhr.status != 200) {
// // Error. Inform the user
// }
// })
// formData.append('fileToUpload', file)
// xhr.send(formData)
// }
function onClearFiles(){
let tableContent = document.querySelector("#filesUploaded tbody");
tableContent.innerHTML = "";
uploadFiles = {};
}
function onUploadFiles() {
let version = document.getElementById("publishVersion").value;
if (!version) {
console.log('version is empty');
return;
}
if (Object.keys(uploadFiles).length == 0) {
handlerError('nothing to upload');
return;
}
$('#exampleModal').modal('show');
let promise = Promise.resolve();
$('#btn-progress-done').prop('disabled', true);
console.log(uploadFiles);
let numFiles = Object.keys(uploadFiles).length;
let fileIdx = 1;
for (let key in uploadFiles) {
promise = promise.then(() => new Promise((resolve, reject) => {
let file = uploadFiles[key].file;
let path = uploadFiles[key].path;
$("#upload-file-info").text(file.name + ` (${fileIdx++}/${numFiles})`);
console.log(file);
var formData = new FormData();
formData.append('fileToUpload', file);
formData.append('path', path);
$.ajax({
xhr: function () {
var xhr = new window.XMLHttpRequest();
//Upload progress
xhr.upload.addEventListener("progress", function (evt) {
// console.log(evt);
if (evt.lengthComputable) {
var percentComplete = evt.loaded / evt.total;
//Do something with upload progress
$('#progressbar').css("width", (percentComplete * 100) + "%");
console.log(percentComplete);
}
}, false);
return xhr;
},
type: "POST",
url: "./upload.php",
processData: false,
contentType: false,
success: function (data) {
console.log(data);
if (data.err) {
reject(data.err);
} else {
resolve();
}
},
data: formData,
dataType: "json"
})
}));
}
promise.then(() => {
console.log('uploads, done');
$('#btn-progress-done').prop('disabled', false);
onUploadFilesDone();
}).catch(err => {
handlerError(err);
});
}
function onUploadFilesDone() {
let data = {
version: document.getElementById("publishVersion").value,
update_teacher: document.getElementById("checkbox-update-teacher").checked,
update_student: document.getElementById("checkbox-update-student").checked,
update_server: document.getElementById("checkbox-update-server").checked,
files: []
};
for (let key in uploadFiles) {
let file = uploadFiles[key].file;
data.files.push({
name: file.name,
path: uploadFiles[key].path,
size: (file.size / 1000).toFixed(2),
md5: uploadFiles[key].md5,
});
}
console.log(data);
$.ajax({
type: "POST",
url: "./upload_done.php",
success: function (data) {
initVersionPanel();
handlerError(data.err);
},
data: {
data: JSON.stringify(data)
},
dataType: "json"
})
}
// function graphStyleRadioClick(e) {
// console.log('clicked', e);
// }
let serverFiles;
let uploadFiles;
function initVersionPanel() {
serverFiles = {};
$.ajax({
url: "../version_last.php",
success: function (data) {
console.log(data);
if (!data.version) {
document.getElementById("publishVersion").value = "1.0. | highlight | identifier_name |
|
admin.js | .value = 0
uploadProgress = []
for (let i = numFiles; i > 0; i--) {
uploadProgress.push(0)
}
}
function updateProgress(fileNumber, percent) {
uploadProgress[fileNumber] = percent
let total = uploadProgress.reduce((tot, curr) => tot + curr, 0) / uploadProgress.length
console.debug('update', fileNumber, percent, total)
progressBar.value = total
}
function handleFiles(files) {
files = [...files]
// initializeProgress(files.length)
renderTable(files);
// files.forEach(uploadFile)
// files.forEach(previewFile)
}
function previewFile(file) {
let reader = new FileReader()
reader.readAsDataURL(file)
reader.onloadend = function () {
let img = document.createElement('img')
img.src = reader.result
document.getElementById('gallery').appendChild(img)
}
}
function renderTable(files) {
console.log(files);
let tableContent = document.querySelector("#filesUploaded tbody");
// tableContent.innerHTML = "";
let rowIndex = tableContent.children.length + 1;
// uploadFiles = {};
for (let i = 0; i < files.length; i++) {
let file = files[i].file;
let filename = file.name;
let fullpath = files[i].path;
let path = fullpath.substr(1, fullpath.indexOf(filename) - 1);
let key = fullpath.substr(1);
uploadFiles[key] = {
file: file,
name: filename,
path: path
};
console.log(JSON.stringify(uploadFiles));
let reader = new FileReader();
reader.onload = function () {
var arrayBuffer = this.result,
array = new Uint8Array(arrayBuffer);
// binaryString = String.fromCharCode.apply(null, array);
let file_md5 = md5(array);
// console.log(key, JSON.stringify(serverFiles[key]), file_md5);
if (serverFiles[key] && serverFiles[key].md5 == file_md5) {
delete uploadFiles[key];
} else {
uploadFiles[key].md5 = file_md5;
let row = document.createElement("tr");
row.innerHTML =
`<th>${rowIndex++}</th><td>${filename}</td><td>${path}</td><td>${(file.size/1000).toFixed(2)}kb</td><td>${file_md5}</td>`;
tableContent.appendChild(row);
}
}
reader.readAsArrayBuffer(file);
}
}
// # 文件名 大小 MD5
// 0 2.jpg 48517.08kb d0fc35af50238da5ef20b5b511328a60
// 1 3.jpg 47221.85kb f523c41e133f83f5d3d082deb94bccad
// 2 1.jpg 49291.56kb ee224e07441a6b6f27d951d9bbfb2745
// function uploadFile(file, i) {
// var url = './upload.php'
// var xhr = new XMLHttpRequest()
// var formData = new FormData()
// xhr.open('POST', url, true)
// xhr.setRequestHeader('X-Requested-With', 'XMLHttpRequest')
// // Update progress (can be used to show progress indicator)
// xhr.upload.addEventListener("progress", function (e) {
// updateProgress(i, (e.loaded * 100.0 / e.total) || 100)
// })
// xhr.addEventListener('readystatechange', function (e) {
// if (xhr.readyState == 4 && xhr.status == 200) {
// updateProgress(i, 100); // <- Add this
// console.log(xhr);
// console.log(this.responseText);
// } else if (xhr.readyState == 4 && xhr.status != 200) {
// // Error. Inform the user
// }
// })
// formData.append('fileToUpload', file)
// xhr.send(formData)
// }
function onClearFiles(){
let tableContent = document.querySelector("#filesUploaded tbody");
tableContent.innerHTML = "";
uploadFiles = {};
}
function onUploadFiles() {
let version = document.getElementById("publishVersion").value;
if (!version) {
console.log('version is empty');
return;
}
if (Object.keys(uploadFiles).length == 0) {
handlerError('nothing to upload');
return;
}
$('#exampleModal').modal('show');
let promise = Promise.resolve();
$('#btn-progress-done').prop('disabled', true);
console.log(uploadFiles);
let numFiles = Object.keys(uploadFiles).length;
let fileIdx = 1;
for (let key in uploadFiles) {
promise = promise.then(() => new Promise((resolve, reject) => {
let file = uploadFiles[key].file;
let path = uploadFiles[key].path;
$("#upload-file-info").text(file.name + ` (${fileIdx++}/${numFiles})`);
console.log(file);
var formData = new FormData();
formData.append('fileToUpload', file);
formData.append('path', path);
$.ajax({
xhr: function () {
var xhr = new window.XMLHttpRequest();
//Upload progress
xhr.upload.addEventListener("progress", function (evt) {
// console.log(evt);
if (evt.lengthComputable) {
var percentComplete = evt.loaded / evt.total;
//Do something with upload progress
$('#progressbar').css("width", (percentComplete * 100) + "%");
console.log(percentComplete);
}
}, false);
return xhr;
},
type: "POST",
url: "./upload.php",
processData: false,
contentType: false,
success: function (data) {
console.log(data);
if (data.err) {
reject(data.err);
} else {
resolve();
}
},
data: formData,
dataType: "json"
})
}));
}
promise.then(() => {
console.log('uploads, done');
$('#btn-progress-done').prop('disabled', false);
onUploadFilesDone();
}).catch(err => {
handlerError(err);
});
}
function onUploadFilesDone() {
let data = {
version: document.getElementById("publishVersion").value,
update_teacher: document.getElementById("checkbox-update-teacher").checked,
update_student: document.getElementById("checkbox-update-student").checked,
update_server: document.getElementById("checkbox-update-server").checked,
files: []
};
for (let key in uploadFiles) {
let file = uploadFiles[key].file;
data.files.push({
name: file.name,
path: uploadFiles[key].path,
size: (file.size / 1000).toFixed(2),
md5: uploadFiles[key].md5,
});
}
console.log(data);
$.ajax({
type: "POST",
url: "./upload_done.php",
success: function (data) {
initVersionPanel();
handlerError(data.err);
},
data: {
data: JSON.stringify(data)
},
dataType: "json"
})
}
// function graphStyleRadioClick(e) {
// console.log('clicked', e);
// }
let serverFiles;
let uploadFiles;
function initVersionPanel() {
serverFiles = {};
$.ajax({
url: "../version_last.php",
success: function (data) {
console.log(data);
if (!data.version) {
document.getElementById("publishVersion").value = "1.0.0";
document.getElementById("checkbox-update-teacher").checked = true;
document.getElementById("checkbox-update-student").checked = true;
document.getElementById("checkbox-update-server").checked = true;
return;
}
let update = '';
if (data.update_server == "1")
update += ",更新服务器";
if (data.update_teacher == "1")
update += ",更新教师端";
if (data.update_student == "1")
update += ",更新学生端";
if (update.length > 0) {
update = ' <h6>(' + update.substr(1) + ')</h6>';
}
document.getElementById("currentVersion").innerHTML = "当前版本号:" + data.version + update;
let vArr = data.version.split(".");
vArr[vArr.length - 1] = parseInt(vArr[vArr.length - 1]) + 1;
document.getElementById("publishVersion").value = vArr.join(".");
let table = document.querySelector("#filesServer tbody");
table.innerHTML = "";
let rowIndex = 0;
for (let i = 0; i < data.files.length; i++) {
let file = data.files[i];
let row = document.createElement("tr");
row.innerHTML =
`<th>${rowIndex++}</th><td>${file.name}</td><td>${file.path}</td><td>${file.size}</td><td>${file.md5}</td>`;
table.appendChild(row);
serverFiles[file.path + file.name] = file;
}
},
dataType: "json"
});
onClearFiles();
}
function handlerError(str) {
if (str)
alert(str);
}
$(function () | {
initVersionPanel();
initFi | identifier_body |
|
admin.js | {
dropArea.classList.add('highlight')
}
function unhighlight(e) {
dropArea.classList.remove('active')
}
function handleDrop(e) {
e.preventDefault();
e.stopPropagation();
var items = e.dataTransfer.items;
var files = e.dataTransfer.files;
console.log(items, files);
let p = Promise.resolve();
let fileInfos = [];
for (var i = 0; i < items.length; ++i) {
let item = items[i];
// Skip this one if we didn't get a file.
if (item.kind != 'file') {
continue;
}
let entry = item.webkitGetAsEntry();
p = p.then(() => readEntry(entry, fileInfos));
// console.log(item, entry);
// if (entry.isDirectory) {
// console.log('start');
// // entry.createReader().readEntries((entries) => {
// // console.log(entries);
// // });
// readEntry(entry, []).then(fileInfos => {
// console.log('ssss')
// console.log(fileInfos);
// renderTable(fileInfos);
// });
// console.log('end');
// } else {
// p = p.then()
// }
}
p.then(f => {
console.log('ssss')
console.log(fileInfos);
renderTable(fileInfos);
});
}
// function handleDrop(e) {
// var dt = e.dataTransfer
// var files = dt.files
// console.log(dt);
// handleFiles(files)
// }
let uploadProgress = []
let progressBar = document.getElementById('progress-bar')
function initializeProgress(numFiles) {
progressBar.value = 0
uploadProgress = []
for (let i = numFiles; i > 0; i--) {
uploadProgress.push(0)
}
}
function updateProgress(fileNumber, percent) {
uploadProgress[fileNumber] = percent
let total = uploadProgress.reduce((tot, curr) => tot + curr, 0) / uploadProgress.length
console.debug('update', fileNumber, percent, total)
progressBar.value = total
}
function handleFiles(files) {
files = [...files]
// initializeProgress(files.length)
renderTable(files);
// files.forEach(uploadFile)
// files.forEach(previewFile)
}
function previewFile(file) {
let reader = new FileReader()
reader.readAsDataURL(file)
reader.onloadend = function () {
let img = document.createElement('img')
img.src = reader.result
document.getElementById('gallery').appendChild(img)
}
}
function renderTable(files) {
console.log(files);
let tableContent = document.querySelector("#filesUploaded tbody");
// tableContent.innerHTML = "";
let rowIndex = tableContent.children.length + 1;
// uploadFiles = {};
for (let i = 0; i < files.length; i++) {
let file = files[i].file;
let filename = file.name;
let fullpath = files[i].path;
let path = fullpath.substr(1, fullpath.indexOf(filename) - 1);
let key = fullpath.substr(1);
uploadFiles[key] = {
file: file,
name: filename,
path: path
};
console.log(JSON.stringify(uploadFiles));
let reader = new FileReader();
reader.onload = function () {
var arrayBuffer = this.result,
array = new Uint8Array(arrayBuffer);
// binaryString = String.fromCharCode.apply(null, array);
let file_md5 = md5(array);
// console.log(key, JSON.stringify(serverFiles[key]), file_md5);
if (serverFiles[key] && serverFiles[key].md5 == file_md5) {
delete uploadFiles[key];
} else {
uploadFiles[key].md5 = file_md5;
let row = document.createElement("tr");
row.innerHTML =
`<th>${rowIndex++}</th><td>${filename}</td><td>${path}</td><td>${(file.size/1000).toFixed(2)}kb</td><td>${file_md5}</td>`;
tableContent.appendChild(row);
}
}
reader.readAsArrayBuffer(file);
}
}
// # 文件名 大小 MD5
// 0 2.jpg 48517.08kb d0fc35af50238da5ef20b5b511328a60
// 1 3.jpg 47221.85kb f523c41e133f83f5d3d082deb94bccad
// 2 1.jpg 49291.56kb ee224e07441a6b6f27d951d9bbfb2745
// function uploadFile(file, i) {
// var url = './upload.php'
// var xhr = new XMLHttpRequest()
// var formData = new FormData()
// xhr.open('POST', url, true)
// xhr.setRequestHeader('X-Requested-With', 'XMLHttpRequest')
// // Update progress (can be used to show progress indicator)
// xhr.upload.addEventListener("progress", function (e) {
// updateProgress(i, (e.loaded * 100.0 / e.total) || 100)
// })
// xhr.addEventListener('readystatechange', function (e) {
// if (xhr.readyState == 4 && xhr.status == 200) {
// updateProgress(i, 100); // <- Add this
// console.log(xhr);
// console.log(this.responseText);
// } else if (xhr.readyState == 4 && xhr.status != 200) {
// // Error. Inform the user
// }
// })
// formData.append('fileToUpload', file)
// xhr.send(formData)
// }
function onClearFiles(){
let tableContent = document.querySelector("#filesUploaded tbody");
tableContent.innerHTML = "";
uploadFiles = {};
}
function onUploadFiles() {
let version = document.getElementById("publishVersion").value;
if (!version) {
console.log('version is empty');
return;
}
if (Object.keys(uploadFiles).length == 0) {
handlerError('nothing to upload');
return;
}
$('#exampleModal').modal('show');
let promise = Promise.resolve();
$('#btn-progress-done').prop('disabled', true);
console.log(uploadFiles);
let numFiles = Object.keys(uploadFiles).length;
let fileIdx = 1;
for (let key in uploadFiles) {
promise = promise.then(() => new Promise((resolve, reject) => {
let file = uploadFiles[key].file;
let path = uploadFiles[key].path;
$("#upload-file-info").text(file.name + ` (${fileIdx++}/${numFiles})`);
console.log(file);
var formData = new FormData();
formData.append('fileToUpload', file);
formData.append('path', path);
$.ajax({
xhr: function () {
var xhr = new window.XMLHttpRequest();
//Upload progress
xhr.upload.addEventListener("progress", function (evt) {
// console.log(evt);
if (evt.lengthComputable) {
var percentComplete = evt.loaded / evt.total;
//Do something with upload progress
$('#progressbar').css("width", (percentComplete * 100) + "%");
console.log(percentComplete);
}
}, false);
return xhr;
},
type: "POST",
url: "./upload.php",
processData: false,
contentType: false,
success: function (data) {
console.log(data);
if (data.err) {
reject(data.err);
} else {
resolve();
}
},
data: formData,
dataType: "json"
})
}));
}
promise.then(() => {
console.log('uploads, done');
$('#btn-progress-done').prop('disabled', false);
onUploadFilesDone();
}).catch(err => {
handlerError(err); | });
}
function onUploadFilesDone() {
let data = {
version: document.getElementById("publishVersion").value,
update_teacher: document.getElementById("checkbox-update-teacher").checked,
update_student: document.getElementById("checkbox-update-student").checked,
update_server: document.getElementById("checkbox-update-server").checked,
files: []
};
for (let key in uploadFiles) {
let file = uploadFiles[key].file;
data.files.push({
name: file.name,
path: uploadFiles[key].path,
size: (file.size / 1000).toFixed(2),
md5: uploadFiles[key].md5,
});
}
console.log(data);
$.ajax({
type: "POST",
url: "./upload_done.php",
success: function (data) {
initVersionPanel();
handlerError(data.err);
},
data: {
data: JSON.stringify(data)
},
dataType: "json"
})
}
// function graphStyleRadioClick(e) {
// console.log('clicked', e);
// }
let serverFiles;
let uploadFiles;
function initVersionPanel() {
serverFiles = {};
$.ajax({
url: "../version_last.php",
success: function (data) {
console.log(data);
if (!data.version) {
document.getElementById("publishVersion").value = "1.0.0";
| random_line_split |
|
admin.js | dropArea.classList.add('highlight')
}
function unhighlight(e) {
dropArea.classList.remove('active')
}
function handleDrop(e) {
e.preventDefault();
e.stopPropagation();
var items = e.dataTransfer.items;
var files = e.dataTransfer.files;
console.log(items, files);
let p = Promise.resolve();
let fileInfos = [];
for (var i = 0; i < items.length; ++i) {
let item = items[i];
// Skip this one if we didn't get a file.
if (item.kind != 'file') {
continue;
}
let entry = item.webkitGetAsEntry();
p = p.then(() => readEntry(entry, fileInfos));
// console.log(item, entry);
// if (entry.isDirectory) {
// console.log('start');
// // entry.createReader().readEntries((entries) => {
// // console.log(entries);
// // });
// readEntry(entry, []).then(fileInfos => {
// console.log('ssss')
// console.log(fileInfos);
// renderTable(fileInfos);
// });
// console.log('end');
// } else {
// p = p.then()
// }
}
p.then(f => {
console.log('ssss')
console.log(fileInfos);
renderTable(fileInfos);
});
}
// function handleDrop(e) {
// var dt = e.dataTransfer
// var files = dt.files
// console.log(dt);
// handleFiles(files)
// }
let uploadProgress = []
let progressBar = document.getElementById('progress-bar')
function initializeProgress(numFiles) {
progressBar.value = 0
uploadProgress = []
for (let i = numFiles; i > 0; i--) {
uploadProgress.push(0)
}
}
function updateProgress(fileNumber, percent) {
uploadProgress[fileNumber] = percent
let total = uploadProgress.reduce((tot, curr) => tot + curr, 0) / uploadProgress.length
console.debug('update', fileNumber, percent, total)
progressBar.value = total
}
function handleFiles(files) {
files = [...files]
// initializeProgress(files.length)
renderTable(files);
// files.forEach(uploadFile)
// files.forEach(previewFile)
}
function previewFile(file) {
let reader = new FileReader()
reader.readAsDataURL(file)
reader.onloadend = function () {
let img = document.createElement('img')
img.src = reader.result
document.getElementById('gallery').appendChild(img)
}
}
function renderTable(files) {
console.log(files);
let tableContent = document.querySelector("#filesUploaded tbody");
// tableContent.innerHTML = "";
let rowIndex = tableContent.children.length + 1;
// uploadFiles = {};
for (let i = 0; i < files.length; i++) {
let file = files[i].file;
let filename = file.name;
let fullpath = files[i].path;
let path = fullpath.substr(1, fullpath.indexOf(filename) - 1);
let key = fullpath.substr(1);
uploadFiles[key] = {
file: file,
name: filename,
path: path
};
console.log(JSON.stringify(uploadFiles));
let reader = new FileReader();
reader.onload = function () {
var arrayBuffer = this.result,
array = new Uint8Array(arrayBuffer);
// binaryString = String.fromCharCode.apply(null, array);
let file_md5 = md5(array);
// console.log(key, JSON.stringify(serverFiles[key]), file_md5);
if (serverFiles[key] && serverFiles[key].md5 == file_md5) {
delete uploadFiles[key];
} else {
uploadFiles[key].md5 = file_md5;
let row = document.createElement("tr");
row.innerHTML =
`<th>${rowIndex++}</th><td>${filename}</td><td>${path}</td><td>${(file.size/1000).toFixed(2)}kb</td><td>${file_md5}</td>`;
tableContent.appendChild(row);
}
}
reader.readAsArrayBuffer(file);
}
}
// # 文件名 大小 MD5
// 0 2.jpg 48517.08kb d0fc35af50238da5ef20b5b511328a60
// 1 3.jpg 47221.85kb f523c41e133f83f5d3d082deb94bccad
// 2 1.jpg 49291.56kb ee224e07441a6b6f27d951d9bbfb2745
// function uploadFile(file, i) {
// var url = './upload.php'
// var xhr = new XMLHttpRequest()
// var formData = new FormData()
// xhr.open('POST', url, true)
// xhr.setRequestHeader('X-Requested-With', 'XMLHttpRequest')
// // Update progress (can be used to show progress indicator)
// xhr.upload.addEventListener("progress", function (e) {
// updateProgress(i, (e.loaded * 100.0 / e.total) || 100)
// })
// xhr.addEventListener('readystatechange', function (e) {
// if (xhr.readyState == 4 && xhr.status == 200) {
// updateProgress(i, 100); // <- Add this
// console.log(xhr);
// console.log(this.responseText);
// } else if (xhr.readyState == 4 && xhr.status != 200) {
// // Error. Inform the user
// }
// })
// formData.append('fileToUpload', file)
// xhr.send(formData)
// }
function onClearFiles(){
let tableContent = document.querySelector("#filesUploaded tbody");
tableContent.innerHTML = "";
uploadFiles = {};
}
function onUploadFiles() {
let version = document.getElementById("publishVersion").value;
if (!version) {
console.log('version is empty');
return;
}
if (Object.keys(uploadFiles).length == 0) {
handlerError('nothing to upload');
return;
}
$('#exampleModal').modal('show');
let promise = Promise.resolve();
$('#btn-progress-done').prop('disabled', true);
console.log(uploadFiles);
let numFiles = Object.keys(uploadFiles).length;
let fileIdx = 1;
for (let key in uploadFiles) {
promise = promise.then(() => new Promise((resolve, reject) => {
let file = uploadFiles[key].file;
let path = uploadFiles[key].path;
$("#upload-file-info").text(file.name + ` (${fileIdx++}/${numFiles})`);
console.log(file);
var formData = new FormData();
formData.append('fileToUpload', file);
formData.append('path', path);
$.ajax({
xhr: function () {
var xhr = new window.XMLHttpRequest();
//Upload progress
xhr.upload.addEventListener("progress", function (evt) {
// console.log(evt);
if (evt.lengthComputable) {
| }, false);
return xhr;
},
type: "POST",
url: "./upload.php",
processData: false,
contentType: false,
success: function (data) {
console.log(data);
if (data.err) {
reject(data.err);
} else {
resolve();
}
},
data: formData,
dataType: "json"
})
}));
}
promise.then(() => {
console.log('uploads, done');
$('#btn-progress-done').prop('disabled', false);
onUploadFilesDone();
}).catch(err => {
handlerError(err);
});
}
function onUploadFilesDone() {
let data = {
version: document.getElementById("publishVersion").value,
update_teacher: document.getElementById("checkbox-update-teacher").checked,
update_student: document.getElementById("checkbox-update-student").checked,
update_server: document.getElementById("checkbox-update-server").checked,
files: []
};
for (let key in uploadFiles) {
let file = uploadFiles[key].file;
data.files.push({
name: file.name,
path: uploadFiles[key].path,
size: (file.size / 1000).toFixed(2),
md5: uploadFiles[key].md5,
});
}
console.log(data);
$.ajax({
type: "POST",
url: "./upload_done.php",
success: function (data) {
initVersionPanel();
handlerError(data.err);
},
data: {
data: JSON.stringify(data)
},
dataType: "json"
})
}
// function graphStyleRadioClick(e) {
// console.log('clicked', e);
// }
let serverFiles;
let uploadFiles;
function initVersionPanel() {
serverFiles = {};
$.ajax({
url: "../version_last.php",
success: function (data) {
console.log(data);
if (!data.version) {
document.getElementById("publishVersion").value = "1.0.0";
| var percentComplete = evt.loaded / evt.total;
//Do something with upload progress
$('#progressbar').css("width", (percentComplete * 100) + "%");
console.log(percentComplete);
}
| conditional_block |
demo.py | Initialized biophysical model", modelID
print '''
Please select from the following options:
1 - Run test pulse on model
2 - Fit model parameter to data
3 - Display static neuron model
4 - Visualize model dynamics
5 - Quit
'''
try:
selection=int(raw_input('Please choose an option above: '))
except ValueError:
| continue
# test pulse example
if selection == 1:
# Run the model with a test pulse of the 'long square' type
print "Running model with a long square current injection pulse of 210pA"
output = currModel.long_square(0.21)
currModel.plot_output()
# fit parameter example
elif selection == 2:
if not currModel.bp.cache_stimulus:
print "Current model was not instantiated with NWB data cached. Please reload the current model and cache experimental stimulus data."
continue
print "Fitting somatic sodium conductance for model", modelID, "to experimental data in sweep 41."
print "Please be patient, this may take some time."
# Define which section and which parameter to fit.
# Here we'll fit the somatic sodium conductance.
currModel.set_fit_section('soma', 0)
currModel.set_parameter_to_fit('gbar_NaV')
# Running the model with an NWB pulse as stimulus takes a
# very long time because of the high sampling rate.
# As a computationally-cheaper approximation for stimuli of
# type Long Square pulse, we can rebuild the stimulus with the
# default (lower) sampling rate in h.IClamp
# currModel.run_nwb_pulse(41) # too slow
output = currModel.long_square(0.21)
# Set the experimental reference sweep and set up the variables for the objective function
currModel.set_reference_sweep(ref_index=41)
currModel.set_up_objective(measure='spike frequency')
# Use SciPy's minimize functions to fit the specified parameter
#results = minimize(currModel.objective_function, currModel.theta, method='Nelder-Mead', tol=1e-3)
#results = minimize(currModel.objective_function, currModel.theta, method='Powell', tol=1e-3)
#results = minimize(currModel.objective_function, currModel.theta, method='COBYLA', tol=1e-5)
currModel.gradient_descent(alpha=0.00005, epsilon=0.001, threshold=0.01, max_cycles=1000)
currModel.plot_fit()
output = currModel.long_square(0.21)
currModel.plot_output()
times = np.array(output['t'])/1000
spikes = detect_putative_spikes(np.array(output['v']), times, 0.1, 1.1)
avg_rate = currModel.average_rate_from_delays(times, spikes, 0.1, 1.1)
print "spike rate for theta of", currModel.theta, ":", avg_rate
# static visualization example
elif selection == 3:
run_visualization(currModel)
elif selection == 4:
run_visualization(currModel, show_simulation_dynamics = True)
elif selection == 5:
quit()
else:
print "Invalid selection."
continue
def run_visualization(currModel, show_simulation_dynamics = False):
print "Setting up visualization..."
morphology = currModel.get_reconstruction()
# Prepare model coordinates for uploading to OpenGL.
tempIndices = []
tempVertices = []
n_index = 0
tempX = []
tempY = []
tempZ = []
tempCol = []
if not show_simulation_dynamics:
print '''
Soma - Red
Axon - Green
Dendrites - Blue
Apical Dendrites - Purple'''
# array of colors to denote individual compartment types
compartmentColors=[[0.0,0.0,0.0,0.0], # padding for index convenience
[1.0, 0.0, 0.0, 1.0], #1: soma - red
[0.0, 1.0, 0.0, 1.0], #2: axon - green
[0.0, 0.0, 1.0, 1.0], #3: dendrites - blue
[1.0, 0.0, 1.0, 1.0]] #4: apical dendrites - purple
color_dim = 4
# used to set up section monitoring for visualization of dynamics
compartmentNames=['none', # padding for index convenience
'soma', #1: soma
'axon', #2: axon
'dend', #3: dendrites - blue
'dend'] #4: apical dendrites - purple
sectionIndices=[0,0,0,0,0]
segmentsPerSection = {}
sec_name = ''
# initialize storage arrays for each vertex.
index = 0
n_compartments = len(morphology.compartment_list)
tempX = [0] * n_compartments
tempY = [0] * n_compartments
tempZ = [0] * n_compartments
tempCol = [0] * n_compartments * color_dim
for n in morphology.compartment_list:
# add parent coords
tempX[n['id']] = n['x']
tempY[n['id']] = -n['y']
tempZ[n['id']] = n['z']
# add color data for parent
col_i = 0
offset = n['id']*color_dim
for cval in compartmentColors[n['type']]:
tempCol[offset+col_i] = cval
col_i += 1
# if at a branch point or an end of a section, set up a vector to monitor that segment's voltage
type = compartmentNames[n['type']]
sec_index = sectionIndices[n['type']]
if not (len(morphology.children_of(n)) == 1): #either branch pt or end
sec_name = type + '[' + str(sec_index) + ']'
sectionIndices[n['type']] += 1
currModel.monitor_section_voltage(type, sec_index)
segmentsPerSection[sec_name] = 1
else:
segmentsPerSection[sec_name] += 1
index += 1
for c in morphology.children_of(n):
# add child coods
tempX[c['id']] = c['x']
tempY[c['id']] = -c['y']
tempZ[c['id']] = c['z']
# add index data:
# draw from parent to child, for each child
tempIndices.append(n['id'])
tempIndices.append(c['id'])
index += 1
# add color data for child
col_i = 0
offset = c['id']*color_dim
for cval in compartmentColors[c['type']]:
tempCol[offset+col_i] = cval
col_i += 1
segmentsPerSection[sec_name] += 1
# get ranges for scaling
maxX = max(tempX)
maxY = max(tempY)
maxZ = max(tempZ)
minX = min(tempX)
minY = min(tempY)
minZ = min(tempZ)
xHalfRange = (maxX - minX)/2.0
yHalfRange = (maxY - minY)/2.0
zHalfRange = (maxZ - minZ)/2.0
longestDimLen = max(xHalfRange, yHalfRange, zHalfRange)
# center coords about 0,0,0, with range -1 to 1
tempX = [((((x-minX)*(2*xHalfRange))/(2*xHalfRange)) - xHalfRange)/longestDimLen for x in tempX]
tempY = [((((y-minY)*(2*yHalfRange))/(2*yHalfRange)) - yHalfRange)/longestDimLen for y in tempY]
tempZ = [((((z-minZ)*(2*zHalfRange))/(2*zHalfRange)) - zHalfRange)/longestDimLen for z in tempZ]
# convert everything to a numpy array so OpenGL can use it
indexData = np.array(tempIndices, dtype='uint16')
vertexData = np.array([tempX,tempY,tempZ], dtype='float32')
tempCol = np.array(tempCol, dtype='float32')
vertexData = np.append(vertexData.transpose().flatten(), tempCol)
#################### /Preparing Model Coords
# Set up the Visualization instance
n_vertices = len(tempX)
currVis = Visualization(data=vertexData, indices=indexData, nVert=n_vertices, colorDim=color_dim)
if show_simulation_dynamics:
currModel.run_test_pulse(amp=0.25, delay=20. | print "Invalid selection."
| random_line_split |
demo.py | except ValueError:
print "Invalid selection."
continue
# test pulse example
if selection == 1:
# Run the model with a test pulse of the 'long square' type
print "Running model with a long square current injection pulse of 210pA"
output = currModel.long_square(0.21)
currModel.plot_output()
# fit parameter example
elif selection == 2:
if not currModel.bp.cache_stimulus:
print "Current model was not instantiated with NWB data cached. Please reload the current model and cache experimental stimulus data."
continue
print "Fitting somatic sodium conductance for model", modelID, "to experimental data in sweep 41."
print "Please be patient, this may take some time."
# Define which section and which parameter to fit.
# Here we'll fit the somatic sodium conductance.
currModel.set_fit_section('soma', 0)
currModel.set_parameter_to_fit('gbar_NaV')
# Running the model with an NWB pulse as stimulus takes a
# very long time because of the high sampling rate.
# As a computationally-cheaper approximation for stimuli of
# type Long Square pulse, we can rebuild the stimulus with the
# default (lower) sampling rate in h.IClamp
# currModel.run_nwb_pulse(41) # too slow
output = currModel.long_square(0.21)
# Set the experimental reference sweep and set up the variables for the objective function
currModel.set_reference_sweep(ref_index=41)
currModel.set_up_objective(measure='spike frequency')
# Use SciPy's minimize functions to fit the specified parameter
#results = minimize(currModel.objective_function, currModel.theta, method='Nelder-Mead', tol=1e-3)
#results = minimize(currModel.objective_function, currModel.theta, method='Powell', tol=1e-3)
#results = minimize(currModel.objective_function, currModel.theta, method='COBYLA', tol=1e-5)
currModel.gradient_descent(alpha=0.00005, epsilon=0.001, threshold=0.01, max_cycles=1000)
currModel.plot_fit()
output = currModel.long_square(0.21)
currModel.plot_output()
times = np.array(output['t'])/1000
spikes = detect_putative_spikes(np.array(output['v']), times, 0.1, 1.1)
avg_rate = currModel.average_rate_from_delays(times, spikes, 0.1, 1.1)
print "spike rate for theta of", currModel.theta, ":", avg_rate
# static visualization example
elif selection == 3:
run_visualization(currModel)
elif selection == 4:
run_visualization(currModel, show_simulation_dynamics = True)
elif selection == 5:
quit()
else:
print "Invalid selection."
continue
def run_visualization(currModel, show_simulation_dynamics = False):
print "Setting up visualization..."
morphology = currModel.get_reconstruction()
# Prepare model coordinates for uploading to OpenGL.
tempIndices = []
tempVertices = []
n_index = 0
tempX = []
tempY = []
tempZ = []
tempCol = []
if not show_simulation_dynamics:
print '''
Soma - Red
Axon - Green
Dendrites - Blue
Apical Dendrites - Purple'''
# array of colors to denote individual compartment types
compartmentColors=[[0.0,0.0,0.0,0.0], # padding for index convenience
[1.0, 0.0, 0.0, 1.0], #1: soma - red
[0.0, 1.0, 0.0, 1.0], #2: axon - green
[0.0, 0.0, 1.0, 1.0], #3: dendrites - blue
[1.0, 0.0, 1.0, 1.0]] #4: apical dendrites - purple
color_dim = 4
# used to set up section monitoring for visualization of dynamics
compartmentNames=['none', # padding for index convenience
'soma', #1: soma
'axon', #2: axon
'dend', #3: dendrites - blue
'dend'] #4: apical dendrites - purple
sectionIndices=[0,0,0,0,0]
segmentsPerSection = {}
sec_name = ''
# initialize storage arrays for each vertex.
index = 0
n_compartments = len(morphology.compartment_list)
tempX = [0] * n_compartments
tempY = [0] * n_compartments
tempZ = [0] * n_compartments
tempCol = [0] * n_compartments * color_dim
for n in morphology.compartment_list:
# add parent coords
tempX[n['id']] = n['x']
tempY[n['id']] = -n['y']
tempZ[n['id']] = n['z']
# add color data for parent
col_i = 0
offset = n['id']*color_dim
for cval in compartmentColors[n['type']]:
tempCol[offset+col_i] = cval
col_i += 1
# if at a branch point or an end of a section, set up a vector to monitor that segment's voltage
type = compartmentNames[n['type']]
sec_index = sectionIndices[n['type']]
if not (len(morphology.children_of(n)) == 1): #either branch pt or end
sec_name = type + '[' + str(sec_index) + ']'
sectionIndices[n['type']] += 1
currModel.monitor_section_voltage(type, sec_index)
segmentsPerSection[sec_name] = 1
else:
segmentsPerSection[sec_name] += 1
index += 1
for c in morphology.children_of(n):
# add child coods
tempX[c['id']] = c['x']
tempY[c['id']] = -c['y']
tempZ[c['id']] = c['z']
# add index data:
# draw from parent to child, for each child
tempIndices.append(n['id'])
tempIndices.append(c['id'])
index += 1
# add color data for child
col_i = 0
offset = c['id']*color_dim
for cval in compartmentColors[c['type']]:
tempCol[offset+col_i] = cval
col_i += 1
segmentsPerSection[sec_name] += 1
# get ranges for scaling
maxX = max(tempX)
maxY = max(tempY)
maxZ = max(tempZ)
minX = min(tempX)
minY = min(tempY)
minZ = min(tempZ)
xHalfRange = (maxX - minX)/2.0
yHalfRange = (maxY - minY)/2.0
zHalfRange = (maxZ - minZ)/2.0
longestDimLen = max(xHalfRange, yHalfRange, zHalfRange)
# center coords about 0,0,0, with range -1 to 1
tempX = [((((x-minX)*(2*xHalfRange))/(2*xHalfRange)) - xHalfRange)/longestDimLen for x in tempX]
tempY = [((((y-minY)*(2*yHalfRange))/(2*yHalfRange)) - yHalfRange)/longestDimLen for y in tempY]
tempZ = [((((z-minZ)*(2*zHalfRange))/(2*zHalfRange)) - zHalfRange)/longestDimLen for z in tempZ]
# convert everything to a numpy array so OpenGL can use it
indexData = np.array(tempIndices, dtype='uint16')
vertexData = np.array([tempX,tempY,tempZ], dtype=' | print "Loading parameters for model", modelID
selection=raw_input('Would you like to download NWB data for model? [Y/N] ')
if selection[0] == 'y' or selection[0] == 'Y':
currModel = Model(modelID, cache_stim = True)
if selection[0] == 'n' or selection[0] == 'N':
currModel = Model(modelID, cache_stim = False)
currModel.init_model()
while(True):
print "Initialized biophysical model", modelID
print '''
Please select from the following options:
1 - Run test pulse on model
2 - Fit model parameter to data
3 - Display static neuron model
4 - Visualize model dynamics
5 - Quit
'''
try:
selection=int(raw_input('Please choose an option above: '))
| identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.