file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
App.js | 'use strict';
import React,{ Component } from 'react';
import { StyleSheet, AppState, Dimensions, Image } from 'react-native';
import CodePush from 'react-native-code-push';
import { Container, Text, View, InputGroup, Input, Icon } from 'native-base';
import Modal from 'react-native-modalbox';
import AppNavigator from './AppNavigator';
import ProgressBar from './components/loaders/ProgressBar';
import theme from './themes/base-theme';
var height = Dimensions.get('window').height;
let styles = StyleSheet.create({
container: {
flex: 1,
width: null,
height: null
},
box: {
padding: 10,
backgroundColor: 'transparent',
flex: 1,
height: height-70
},
space: {
marginTop: 10,
marginBottom: 10,
justifyContent: 'center'
},
modal: {
justifyContent: 'center',
alignItems: 'center'
},
modal1: {
height: 300,
width: 300
}
});
class App extends Component {
constructor(props) {
super(props);
this.state = {
showDownloadingModal: false,
showInstalling: false,
downloadProgress: 0
}
}
componentDidMount() {
CodePush.sync({ updateDialog: true, installMode: CodePush.InstallMode.IMMEDIATE },
(status) => {
switch (status) {
case CodePush.SyncStatus.DOWNLOADING_PACKAGE:
this.setState({showDownloadingModal: true});
this.refs.modal.open();
break;
case CodePush.SyncStatus.INSTALLING_UPDATE:
this.setState({showInstalling: true});
break;
case CodePush.SyncStatus.UPDATE_INSTALLED:
this.refs.modal.close();
this.setState({showDownloadingModal: false});
break;
}
},
({ receivedBytes, totalBytes, }) => {
this.setState({downloadProgress: receivedBytes / totalBytes * 100});
}
);
}
render() |
/>
</InputGroup>
</View>
);
else
return(
<AppNavigator store={this.props.store} />
);
}
}
export default App
| {
if(this.state.showDownloadingModal)
return (
<View style={{backgroundColor: theme.brandSecondary}}>
<InputGroup
borderType='rounded'
>
<Icon name='ios-person-outline' />
<Input placeholder='Username' />
</InputGroup>
<InputGroup
borderType='rounded'
>
<Icon name='ios-unlock-outline' />
<Input
placeholder='Password'
secureTextEntry={true} | identifier_body |
timer.rs | it
//! really needs to. Enabling vsync by setting
//! [`conf.window_setup.vsync`](../conf/struct.WindowSetup.html#structfield.vsync)
//! in your [`Conf`](../conf/struct.Conf.html) object is generally the best
//! way to cap your displayed framerate.
//!
//! For a more detailed tutorial in how to handle frame timings in games,
//! see <http://gafferongames.com/game-physics/fix-your-timestep/>
use crate::context::Context;
use std::cmp;
use std::convert::TryFrom;
use std::f64;
use std::thread;
use std::time;
/// A simple buffer that fills
/// up to a limit and then holds the last
/// N items that have been inserted into it,
/// overwriting old ones in a round-robin fashion.
///
/// It's not quite a ring buffer 'cause you can't
/// remove items from it, it just holds the last N
/// things.
#[derive(Debug, Clone)]
struct LogBuffer<T>
where
T: Clone,
{
head: usize,
size: usize,
/// The number of actual samples inserted, used for
/// smarter averaging.
samples: usize,
contents: Vec<T>,
}
impl<T> LogBuffer<T>
where
T: Clone + Copy,
{
fn new(size: usize, init_val: T) -> LogBuffer<T> {
LogBuffer {
head: 0,
size,
contents: vec![init_val; size],
// Never divide by 0
samples: 1,
}
}
/// Pushes a new item into the `LogBuffer`, overwriting
/// the oldest item in it.
fn push(&mut self, item: T) {
self.head = (self.head + 1) % self.contents.len();
self.contents[self.head] = item;
self.size = cmp::min(self.size + 1, self.contents.len());
self.samples += 1;
}
/// Returns a slice pointing at the contents of the buffer.
/// They are in *no particular order*, and if not all the
/// slots are filled, the empty slots will be present but
/// contain the initial value given to [`new()`](#method.new).
///
/// We're only using this to log FPS for a short time,
/// so we don't care for the second or so when it's inaccurate.
fn contents(&self) -> &[T] {
if self.samples > self.size {
&self.contents
} else {
&self.contents[..self.samples]
}
}
/// Returns the most recent value in the buffer.
fn latest(&self) -> T {
self.contents[self.head]
}
}
/// A structure that contains our time-tracking state.
#[derive(Debug)]
pub struct TimeContext {
init_instant: time::Instant,
last_instant: time::Instant,
frame_durations: LogBuffer<time::Duration>,
residual_update_dt: time::Duration,
frame_count: usize,
}
/// How many frames we log update times for.
const TIME_LOG_FRAMES: usize = 200;
impl TimeContext {
/// Creates a new `TimeContext` and initializes the start to this instant.
pub fn new() -> TimeContext {
let initial_dt = time::Duration::from_millis(16);
TimeContext {
init_instant: time::Instant::now(),
last_instant: time::Instant::now(),
frame_durations: LogBuffer::new(TIME_LOG_FRAMES, initial_dt),
residual_update_dt: time::Duration::from_secs(0),
frame_count: 0,
}
}
/// Update the state of the `TimeContext` to record that
/// another frame has taken place. Necessary for the FPS
/// tracking and [`check_update_time()`](fn.check_update_time.html)
/// functions to work.
///
/// It's usually not necessary to call this function yourself,
/// [`event::run()`](../event/fn.run.html) will do it for you.
pub fn tick(&mut self) {
let now = time::Instant::now();
let time_since_last = now - self.last_instant;
self.frame_durations.push(time_since_last);
self.last_instant = now;
self.frame_count += 1;
self.residual_update_dt += time_since_last;
}
}
impl Default for TimeContext {
fn default() -> Self {
Self::new()
}
}
/// Get the time between the start of the last frame and the current one;
/// in other words, the length of the last frame.
pub fn delta(ctx: &Context) -> time::Duration {
let tc = &ctx.timer_context;
tc.frame_durations.latest()
}
/// Gets the average time of a frame, averaged
/// over the last 200 frames.
pub fn average_delta(ctx: &Context) -> time::Duration {
let tc = &ctx.timer_context;
let sum: time::Duration = tc.frame_durations.contents().iter().sum();
// If our buffer is actually full, divide by its size.
// Otherwise divide by the number of samples we've added
if tc.frame_durations.samples > tc.frame_durations.size {
sum / u32::try_from(tc.frame_durations.size).unwrap()
} else |
}
/// A convenience function to convert a Rust `Duration` type
/// to a (less precise but more useful) `f64`.
///
/// Does not make sure that the `Duration` is within the bounds
/// of the `f64`.
pub fn duration_to_f64(d: time::Duration) -> f64 {
let seconds = d.as_secs() as f64;
let nanos = f64::from(d.subsec_nanos());
seconds + (nanos * 1e-9)
}
/// A convenience function to create a Rust `Duration` type
/// from a (less precise but more useful) `f64`.
///
/// Only handles positive numbers correctly.
pub fn f64_to_duration(t: f64) -> time::Duration {
debug_assert!(t > 0.0, "f64_to_duration passed a negative number!");
let seconds = t.trunc();
let nanos = t.fract() * 1e9;
time::Duration::new(seconds as u64, nanos as u32)
}
/// Returns a `Duration` representing how long each
/// frame should be to match the given fps.
///
/// Approximately.
fn fps_as_duration(fps: u32) -> time::Duration {
let target_dt_seconds = 1.0 / f64::from(fps);
f64_to_duration(target_dt_seconds)
}
/// Gets the FPS of the game, averaged over the last
/// 200 frames.
pub fn fps(ctx: &Context) -> f64 {
let duration_per_frame = average_delta(ctx);
let seconds_per_frame = duration_to_f64(duration_per_frame);
1.0 / seconds_per_frame
}
/// Returns the time since the game was initialized,
/// as reported by the system clock.
pub fn time_since_start(ctx: &Context) -> time::Duration {
let tc = &ctx.timer_context;
time::Instant::now() - tc.init_instant
}
/// Check whether or not the desired amount of time has elapsed
/// since the last frame.
///
/// The intention is to use this in your `update` call to control
/// how often game logic is updated per frame (see [the astroblasto example](https://github.com/ggez/ggez/blob/30ea4a4ead67557d2ebb39550e17339323fc9c58/examples/05_astroblasto.rs#L438-L442)).
///
/// Calling this decreases a timer inside the context if the function returns true.
/// If called in a loop it may therefore return true once, twice or not at all, depending on
/// how much time elapsed since the last frame.
///
/// For more info on the idea behind this see <http://gafferongames.com/game-physics/fix-your-timestep/>.
///
/// Due to the global nature of this timer it's desirable to only use this function at one point
/// of your code. If you want to limit the frame rate in both game logic and drawing consider writing
/// your own event loop, or using a dirty bit for when to redraw graphics, which is set whenever the game
/// logic runs.
pub fn check_update_time(ctx: &mut Context, target_fps: u32) -> bool {
let timedata = &mut ctx.timer_context;
let target_dt = fps_as_duration(target_fps);
if timedata.residual_update_dt > target_dt {
timedata.residual_update_dt -= target_dt;
true
} else {
false
}
}
/// Returns the fractional amount of a frame not consumed
/// by [`check_update_time()`](fn.check_update_time.html).
/// For example, if the desired
/// update frame time is 40 ms (25 fps), and 45 ms have
/// passed since the last frame, [`check_update_time()`](fn.check_update_time.html)
/// will return `true` and `remaining_update_time()` will
/// return 5 ms -- the amount of | {
sum / u32::try_from(tc.frame_durations.samples).unwrap()
} | conditional_block |
timer.rs | it
//! really needs to. Enabling vsync by setting
//! [`conf.window_setup.vsync`](../conf/struct.WindowSetup.html#structfield.vsync)
//! in your [`Conf`](../conf/struct.Conf.html) object is generally the best
//! way to cap your displayed framerate.
//!
//! For a more detailed tutorial in how to handle frame timings in games,
//! see <http://gafferongames.com/game-physics/fix-your-timestep/>
use crate::context::Context;
use std::cmp;
use std::convert::TryFrom;
use std::f64;
use std::thread;
use std::time;
/// A simple buffer that fills
/// up to a limit and then holds the last
/// N items that have been inserted into it,
/// overwriting old ones in a round-robin fashion.
///
/// It's not quite a ring buffer 'cause you can't
/// remove items from it, it just holds the last N
/// things.
#[derive(Debug, Clone)]
struct LogBuffer<T>
where
T: Clone,
{
head: usize,
size: usize,
/// The number of actual samples inserted, used for
/// smarter averaging.
samples: usize,
contents: Vec<T>,
}
impl<T> LogBuffer<T>
where
T: Clone + Copy,
{
fn new(size: usize, init_val: T) -> LogBuffer<T> {
LogBuffer {
head: 0,
size,
contents: vec![init_val; size],
// Never divide by 0
samples: 1,
}
}
/// Pushes a new item into the `LogBuffer`, overwriting
/// the oldest item in it.
fn push(&mut self, item: T) {
self.head = (self.head + 1) % self.contents.len();
self.contents[self.head] = item;
self.size = cmp::min(self.size + 1, self.contents.len());
self.samples += 1;
}
/// Returns a slice pointing at the contents of the buffer.
/// They are in *no particular order*, and if not all the
/// slots are filled, the empty slots will be present but
/// contain the initial value given to [`new()`](#method.new).
///
/// We're only using this to log FPS for a short time,
/// so we don't care for the second or so when it's inaccurate.
fn contents(&self) -> &[T] {
if self.samples > self.size {
&self.contents
} else {
&self.contents[..self.samples]
}
}
/// Returns the most recent value in the buffer.
fn latest(&self) -> T {
self.contents[self.head]
}
}
/// A structure that contains our time-tracking state.
#[derive(Debug)]
pub struct TimeContext {
init_instant: time::Instant,
last_instant: time::Instant,
frame_durations: LogBuffer<time::Duration>,
residual_update_dt: time::Duration,
frame_count: usize,
}
/// How many frames we log update times for.
const TIME_LOG_FRAMES: usize = 200;
impl TimeContext {
/// Creates a new `TimeContext` and initializes the start to this instant.
pub fn new() -> TimeContext { | frame_durations: LogBuffer::new(TIME_LOG_FRAMES, initial_dt),
residual_update_dt: time::Duration::from_secs(0),
frame_count: 0,
}
}
/// Update the state of the `TimeContext` to record that
/// another frame has taken place. Necessary for the FPS
/// tracking and [`check_update_time()`](fn.check_update_time.html)
/// functions to work.
///
/// It's usually not necessary to call this function yourself,
/// [`event::run()`](../event/fn.run.html) will do it for you.
pub fn tick(&mut self) {
let now = time::Instant::now();
let time_since_last = now - self.last_instant;
self.frame_durations.push(time_since_last);
self.last_instant = now;
self.frame_count += 1;
self.residual_update_dt += time_since_last;
}
}
impl Default for TimeContext {
fn default() -> Self {
Self::new()
}
}
/// Get the time between the start of the last frame and the current one;
/// in other words, the length of the last frame.
pub fn delta(ctx: &Context) -> time::Duration {
let tc = &ctx.timer_context;
tc.frame_durations.latest()
}
/// Gets the average time of a frame, averaged
/// over the last 200 frames.
pub fn average_delta(ctx: &Context) -> time::Duration {
let tc = &ctx.timer_context;
let sum: time::Duration = tc.frame_durations.contents().iter().sum();
// If our buffer is actually full, divide by its size.
// Otherwise divide by the number of samples we've added
if tc.frame_durations.samples > tc.frame_durations.size {
sum / u32::try_from(tc.frame_durations.size).unwrap()
} else {
sum / u32::try_from(tc.frame_durations.samples).unwrap()
}
}
/// A convenience function to convert a Rust `Duration` type
/// to a (less precise but more useful) `f64`.
///
/// Does not make sure that the `Duration` is within the bounds
/// of the `f64`.
pub fn duration_to_f64(d: time::Duration) -> f64 {
let seconds = d.as_secs() as f64;
let nanos = f64::from(d.subsec_nanos());
seconds + (nanos * 1e-9)
}
/// A convenience function to create a Rust `Duration` type
/// from a (less precise but more useful) `f64`.
///
/// Only handles positive numbers correctly.
pub fn f64_to_duration(t: f64) -> time::Duration {
debug_assert!(t > 0.0, "f64_to_duration passed a negative number!");
let seconds = t.trunc();
let nanos = t.fract() * 1e9;
time::Duration::new(seconds as u64, nanos as u32)
}
/// Returns a `Duration` representing how long each
/// frame should be to match the given fps.
///
/// Approximately.
fn fps_as_duration(fps: u32) -> time::Duration {
let target_dt_seconds = 1.0 / f64::from(fps);
f64_to_duration(target_dt_seconds)
}
/// Gets the FPS of the game, averaged over the last
/// 200 frames.
pub fn fps(ctx: &Context) -> f64 {
let duration_per_frame = average_delta(ctx);
let seconds_per_frame = duration_to_f64(duration_per_frame);
1.0 / seconds_per_frame
}
/// Returns the time since the game was initialized,
/// as reported by the system clock.
pub fn time_since_start(ctx: &Context) -> time::Duration {
let tc = &ctx.timer_context;
time::Instant::now() - tc.init_instant
}
/// Check whether or not the desired amount of time has elapsed
/// since the last frame.
///
/// The intention is to use this in your `update` call to control
/// how often game logic is updated per frame (see [the astroblasto example](https://github.com/ggez/ggez/blob/30ea4a4ead67557d2ebb39550e17339323fc9c58/examples/05_astroblasto.rs#L438-L442)).
///
/// Calling this decreases a timer inside the context if the function returns true.
/// If called in a loop it may therefore return true once, twice or not at all, depending on
/// how much time elapsed since the last frame.
///
/// For more info on the idea behind this see <http://gafferongames.com/game-physics/fix-your-timestep/>.
///
/// Due to the global nature of this timer it's desirable to only use this function at one point
/// of your code. If you want to limit the frame rate in both game logic and drawing consider writing
/// your own event loop, or using a dirty bit for when to redraw graphics, which is set whenever the game
/// logic runs.
pub fn check_update_time(ctx: &mut Context, target_fps: u32) -> bool {
let timedata = &mut ctx.timer_context;
let target_dt = fps_as_duration(target_fps);
if timedata.residual_update_dt > target_dt {
timedata.residual_update_dt -= target_dt;
true
} else {
false
}
}
/// Returns the fractional amount of a frame not consumed
/// by [`check_update_time()`](fn.check_update_time.html).
/// For example, if the desired
/// update frame time is 40 ms (25 fps), and 45 ms have
/// passed since the last frame, [`check_update_time()`](fn.check_update_time.html)
/// will return `true` and `remaining_update_time()` will
/// return 5 ms -- the amount of time " | let initial_dt = time::Duration::from_millis(16);
TimeContext {
init_instant: time::Instant::now(),
last_instant: time::Instant::now(), | random_line_split |
timer.rs | it
//! really needs to. Enabling vsync by setting
//! [`conf.window_setup.vsync`](../conf/struct.WindowSetup.html#structfield.vsync)
//! in your [`Conf`](../conf/struct.Conf.html) object is generally the best
//! way to cap your displayed framerate.
//!
//! For a more detailed tutorial in how to handle frame timings in games,
//! see <http://gafferongames.com/game-physics/fix-your-timestep/>
use crate::context::Context;
use std::cmp;
use std::convert::TryFrom;
use std::f64;
use std::thread;
use std::time;
/// A simple buffer that fills
/// up to a limit and then holds the last
/// N items that have been inserted into it,
/// overwriting old ones in a round-robin fashion.
///
/// It's not quite a ring buffer 'cause you can't
/// remove items from it, it just holds the last N
/// things.
#[derive(Debug, Clone)]
struct LogBuffer<T>
where
T: Clone,
{
head: usize,
size: usize,
/// The number of actual samples inserted, used for
/// smarter averaging.
samples: usize,
contents: Vec<T>,
}
impl<T> LogBuffer<T>
where
T: Clone + Copy,
{
fn new(size: usize, init_val: T) -> LogBuffer<T> {
LogBuffer {
head: 0,
size,
contents: vec![init_val; size],
// Never divide by 0
samples: 1,
}
}
/// Pushes a new item into the `LogBuffer`, overwriting
/// the oldest item in it.
fn push(&mut self, item: T) {
self.head = (self.head + 1) % self.contents.len();
self.contents[self.head] = item;
self.size = cmp::min(self.size + 1, self.contents.len());
self.samples += 1;
}
/// Returns a slice pointing at the contents of the buffer.
/// They are in *no particular order*, and if not all the
/// slots are filled, the empty slots will be present but
/// contain the initial value given to [`new()`](#method.new).
///
/// We're only using this to log FPS for a short time,
/// so we don't care for the second or so when it's inaccurate.
fn contents(&self) -> &[T] {
if self.samples > self.size {
&self.contents
} else {
&self.contents[..self.samples]
}
}
/// Returns the most recent value in the buffer.
fn latest(&self) -> T {
self.contents[self.head]
}
}
/// A structure that contains our time-tracking state.
#[derive(Debug)]
pub struct TimeContext {
init_instant: time::Instant,
last_instant: time::Instant,
frame_durations: LogBuffer<time::Duration>,
residual_update_dt: time::Duration,
frame_count: usize,
}
/// How many frames we log update times for.
const TIME_LOG_FRAMES: usize = 200;
impl TimeContext {
/// Creates a new `TimeContext` and initializes the start to this instant.
pub fn | () -> TimeContext {
let initial_dt = time::Duration::from_millis(16);
TimeContext {
init_instant: time::Instant::now(),
last_instant: time::Instant::now(),
frame_durations: LogBuffer::new(TIME_LOG_FRAMES, initial_dt),
residual_update_dt: time::Duration::from_secs(0),
frame_count: 0,
}
}
/// Update the state of the `TimeContext` to record that
/// another frame has taken place. Necessary for the FPS
/// tracking and [`check_update_time()`](fn.check_update_time.html)
/// functions to work.
///
/// It's usually not necessary to call this function yourself,
/// [`event::run()`](../event/fn.run.html) will do it for you.
pub fn tick(&mut self) {
let now = time::Instant::now();
let time_since_last = now - self.last_instant;
self.frame_durations.push(time_since_last);
self.last_instant = now;
self.frame_count += 1;
self.residual_update_dt += time_since_last;
}
}
impl Default for TimeContext {
fn default() -> Self {
Self::new()
}
}
/// Get the time between the start of the last frame and the current one;
/// in other words, the length of the last frame.
pub fn delta(ctx: &Context) -> time::Duration {
let tc = &ctx.timer_context;
tc.frame_durations.latest()
}
/// Gets the average time of a frame, averaged
/// over the last 200 frames.
pub fn average_delta(ctx: &Context) -> time::Duration {
let tc = &ctx.timer_context;
let sum: time::Duration = tc.frame_durations.contents().iter().sum();
// If our buffer is actually full, divide by its size.
// Otherwise divide by the number of samples we've added
if tc.frame_durations.samples > tc.frame_durations.size {
sum / u32::try_from(tc.frame_durations.size).unwrap()
} else {
sum / u32::try_from(tc.frame_durations.samples).unwrap()
}
}
/// A convenience function to convert a Rust `Duration` type
/// to a (less precise but more useful) `f64`.
///
/// Does not make sure that the `Duration` is within the bounds
/// of the `f64`.
pub fn duration_to_f64(d: time::Duration) -> f64 {
let seconds = d.as_secs() as f64;
let nanos = f64::from(d.subsec_nanos());
seconds + (nanos * 1e-9)
}
/// A convenience function to create a Rust `Duration` type
/// from a (less precise but more useful) `f64`.
///
/// Only handles positive numbers correctly.
pub fn f64_to_duration(t: f64) -> time::Duration {
debug_assert!(t > 0.0, "f64_to_duration passed a negative number!");
let seconds = t.trunc();
let nanos = t.fract() * 1e9;
time::Duration::new(seconds as u64, nanos as u32)
}
/// Returns a `Duration` representing how long each
/// frame should be to match the given fps.
///
/// Approximately.
fn fps_as_duration(fps: u32) -> time::Duration {
let target_dt_seconds = 1.0 / f64::from(fps);
f64_to_duration(target_dt_seconds)
}
/// Gets the FPS of the game, averaged over the last
/// 200 frames.
pub fn fps(ctx: &Context) -> f64 {
let duration_per_frame = average_delta(ctx);
let seconds_per_frame = duration_to_f64(duration_per_frame);
1.0 / seconds_per_frame
}
/// Returns the time since the game was initialized,
/// as reported by the system clock.
pub fn time_since_start(ctx: &Context) -> time::Duration {
let tc = &ctx.timer_context;
time::Instant::now() - tc.init_instant
}
/// Check whether or not the desired amount of time has elapsed
/// since the last frame.
///
/// The intention is to use this in your `update` call to control
/// how often game logic is updated per frame (see [the astroblasto example](https://github.com/ggez/ggez/blob/30ea4a4ead67557d2ebb39550e17339323fc9c58/examples/05_astroblasto.rs#L438-L442)).
///
/// Calling this decreases a timer inside the context if the function returns true.
/// If called in a loop it may therefore return true once, twice or not at all, depending on
/// how much time elapsed since the last frame.
///
/// For more info on the idea behind this see <http://gafferongames.com/game-physics/fix-your-timestep/>.
///
/// Due to the global nature of this timer it's desirable to only use this function at one point
/// of your code. If you want to limit the frame rate in both game logic and drawing consider writing
/// your own event loop, or using a dirty bit for when to redraw graphics, which is set whenever the game
/// logic runs.
pub fn check_update_time(ctx: &mut Context, target_fps: u32) -> bool {
let timedata = &mut ctx.timer_context;
let target_dt = fps_as_duration(target_fps);
if timedata.residual_update_dt > target_dt {
timedata.residual_update_dt -= target_dt;
true
} else {
false
}
}
/// Returns the fractional amount of a frame not consumed
/// by [`check_update_time()`](fn.check_update_time.html).
/// For example, if the desired
/// update frame time is 40 ms (25 fps), and 45 ms have
/// passed since the last frame, [`check_update_time()`](fn.check_update_time.html)
/// will return `true` and `remaining_update_time()` will
/// return 5 ms -- the amount of time | new | identifier_name |
volumetric_data.py | """Module for reading volumetric data from VASP calculations.
Charge density and dipole moment
Local potential
Electron localization function
"""
import os
import numpy as np
from ase.calculators.vasp import Vasp, VaspChargeDensity
from POTCAR import get_ZVAL
def get_volumetric_data(self, filename='CHG', **kwargs):
"""Read filename to read the volumetric data in it.
Supported filenames are CHG, CHGCAR, and LOCPOT.
""" | n0, n1, n2 = data[0].shape
s0 = np.linspace(0, 1, num=n0, endpoint=False)
s1 = np.linspace(0, 1, num=n1, endpoint=False)
s2 = np.linspace(0, 1, num=n2, endpoint=False)
X, Y, Z = np.meshgrid(s0, s1, s2)
C = np.column_stack([X.ravel(),
Y.ravel(),
Z.ravel()])
uc = atoms.get_cell()
real = np.dot(C, uc)
# now convert arrays back to unitcell shape
x = np.reshape(real[:, 0], (n0, n1, n2))
y = np.reshape(real[:, 1], (n0, n1, n2))
z = np.reshape(real[:, 2], (n0, n1, n2))
return x, y, z, data
def get_charge_density(self, spin=0, filename='CHG'):
"""Returns x, y, and z coordinate and charge density arrays.
Supported file formats: CHG, CHGCAR
:param int spin: an integer
:returns: x, y, z, charge density arrays
:rtype: 3-d numpy arrays
Relies on :func:`ase.calculators.vasp.VaspChargeDensity`.
"""
x, y, z, data = get_volumetric_data(self, filename=filename)
return x, y, z, data[spin]
Vasp.get_charge_density = get_charge_density
def get_local_potential(self):
"""Returns x, y, z, and local potential arrays
is there a spin for this?
We multiply the data by the volume because we are reusing the
charge density code which divides by volume.
"""
x, y, z, data = get_volumetric_data(self, filename='LOCPOT')
atoms = self.get_atoms()
return x, y, z, data[0] * atoms.get_volume()
Vasp.get_local_potential = get_local_potential
def get_elf(self):
"""Returns x, y, z and electron localization function arrays."""
x, y, z, data = get_volumetric_data(self, filename='ELFCAR')
atoms = self.get_atoms()
return x, y, z, data[0] * atoms.get_volume()
Vasp.get_elf = get_elf
def get_electron_density_center(self, spin=0, scaled=True):
"""Returns center of electron density.
If scaled, use scaled coordinates, otherwise use cartesian
coordinates.
"""
atoms = self.get_atoms()
x, y, z, cd = self.get_charge_density(spin)
n0, n1, n2 = cd.shape
nelements = n0 * n1 * n2
voxel_volume = atoms.get_volume() / nelements
total_electron_charge = cd.sum() * voxel_volume
electron_density_center = np.array([(cd * x).sum(),
(cd * y).sum(),
(cd * z).sum()])
electron_density_center *= voxel_volume
electron_density_center /= total_electron_charge
if scaled:
uc = atoms.get_cell()
return np.dot(np.linalg.inv(uc.T), electron_density_center.T).T
else:
return electron_density_center
def get_dipole_moment(self, atoms=None):
"""Tries to return the dipole vector of the unit cell in atomic units.
Returns None when CHG file is empty/not-present.
To get the dipole moment, use this formula:
dipole_moment = ((dipole_vector**2).sum())**0.5/Debye
"""
if atoms is None:
atoms = self.get_atoms()
try:
x, y, z, cd = self.get_charge_density()
except (IOError, IndexError):
# IOError: no CHG file, function called outside context manager
# IndexError: Empty CHG file, Vasp run with lcharg=False
return None
n0, n1, n2 = cd.shape
nelements = n0 * n1 * n2
voxel_volume = atoms.get_volume() / nelements
total_electron_charge = -cd.sum() * voxel_volume
electron_density_center = np.array([(cd*x).sum(),
(cd*y).sum(),
(cd*z).sum()])
electron_density_center *= voxel_volume
electron_density_center /= total_electron_charge
electron_dipole_moment = electron_density_center * total_electron_charge
electron_dipole_moment *= -1.0
# now the ion charge center
LOP = self.get_pseudopotentials()
ppp = os.environ['VASP_PP_PATH']
# make dictionary for ease of use
zval = {}
for sym, ppath, hash in LOP:
# out a bug above. os.path.join discards the root if the
# second path starts with /, which makes it look like an
# absolute path. the get_pseudopotentials code returns a path
# with a / in the beginning.
fullpath = ppp + ppath
z = get_ZVAL(fullpath)
zval[sym] = z
ion_charge_center = np.array([0.0, 0.0, 0.0])
total_ion_charge = 0.0
for atom in atoms:
Z = zval[atom.symbol]
total_ion_charge += Z
pos = atom.position
ion_charge_center += Z*pos
ion_charge_center /= total_ion_charge
ion_dipole_moment = ion_charge_center * total_ion_charge
dipole_vector = (ion_dipole_moment + electron_dipole_moment)
return dipole_vector
Vasp.get_dipole_moment = get_dipole_moment | atoms = self.get_atoms()
vd = VaspChargeDensity(filename)
data = np.array(vd.chg) | random_line_split |
volumetric_data.py | """Module for reading volumetric data from VASP calculations.
Charge density and dipole moment
Local potential
Electron localization function
"""
import os
import numpy as np
from ase.calculators.vasp import Vasp, VaspChargeDensity
from POTCAR import get_ZVAL
def get_volumetric_data(self, filename='CHG', **kwargs):
"""Read filename to read the volumetric data in it.
Supported filenames are CHG, CHGCAR, and LOCPOT.
"""
atoms = self.get_atoms()
vd = VaspChargeDensity(filename)
data = np.array(vd.chg)
n0, n1, n2 = data[0].shape
s0 = np.linspace(0, 1, num=n0, endpoint=False)
s1 = np.linspace(0, 1, num=n1, endpoint=False)
s2 = np.linspace(0, 1, num=n2, endpoint=False)
X, Y, Z = np.meshgrid(s0, s1, s2)
C = np.column_stack([X.ravel(),
Y.ravel(),
Z.ravel()])
uc = atoms.get_cell()
real = np.dot(C, uc)
# now convert arrays back to unitcell shape
x = np.reshape(real[:, 0], (n0, n1, n2))
y = np.reshape(real[:, 1], (n0, n1, n2))
z = np.reshape(real[:, 2], (n0, n1, n2))
return x, y, z, data
def get_charge_density(self, spin=0, filename='CHG'):
"""Returns x, y, and z coordinate and charge density arrays.
Supported file formats: CHG, CHGCAR
:param int spin: an integer
:returns: x, y, z, charge density arrays
:rtype: 3-d numpy arrays
Relies on :func:`ase.calculators.vasp.VaspChargeDensity`.
"""
x, y, z, data = get_volumetric_data(self, filename=filename)
return x, y, z, data[spin]
Vasp.get_charge_density = get_charge_density
def get_local_potential(self):
"""Returns x, y, z, and local potential arrays
is there a spin for this?
We multiply the data by the volume because we are reusing the
charge density code which divides by volume.
"""
x, y, z, data = get_volumetric_data(self, filename='LOCPOT')
atoms = self.get_atoms()
return x, y, z, data[0] * atoms.get_volume()
Vasp.get_local_potential = get_local_potential
def get_elf(self):
"""Returns x, y, z and electron localization function arrays."""
x, y, z, data = get_volumetric_data(self, filename='ELFCAR')
atoms = self.get_atoms()
return x, y, z, data[0] * atoms.get_volume()
Vasp.get_elf = get_elf
def | (self, spin=0, scaled=True):
"""Returns center of electron density.
If scaled, use scaled coordinates, otherwise use cartesian
coordinates.
"""
atoms = self.get_atoms()
x, y, z, cd = self.get_charge_density(spin)
n0, n1, n2 = cd.shape
nelements = n0 * n1 * n2
voxel_volume = atoms.get_volume() / nelements
total_electron_charge = cd.sum() * voxel_volume
electron_density_center = np.array([(cd * x).sum(),
(cd * y).sum(),
(cd * z).sum()])
electron_density_center *= voxel_volume
electron_density_center /= total_electron_charge
if scaled:
uc = atoms.get_cell()
return np.dot(np.linalg.inv(uc.T), electron_density_center.T).T
else:
return electron_density_center
def get_dipole_moment(self, atoms=None):
"""Tries to return the dipole vector of the unit cell in atomic units.
Returns None when CHG file is empty/not-present.
To get the dipole moment, use this formula:
dipole_moment = ((dipole_vector**2).sum())**0.5/Debye
"""
if atoms is None:
atoms = self.get_atoms()
try:
x, y, z, cd = self.get_charge_density()
except (IOError, IndexError):
# IOError: no CHG file, function called outside context manager
# IndexError: Empty CHG file, Vasp run with lcharg=False
return None
n0, n1, n2 = cd.shape
nelements = n0 * n1 * n2
voxel_volume = atoms.get_volume() / nelements
total_electron_charge = -cd.sum() * voxel_volume
electron_density_center = np.array([(cd*x).sum(),
(cd*y).sum(),
(cd*z).sum()])
electron_density_center *= voxel_volume
electron_density_center /= total_electron_charge
electron_dipole_moment = electron_density_center * total_electron_charge
electron_dipole_moment *= -1.0
# now the ion charge center
LOP = self.get_pseudopotentials()
ppp = os.environ['VASP_PP_PATH']
# make dictionary for ease of use
zval = {}
for sym, ppath, hash in LOP:
# out a bug above. os.path.join discards the root if the
# second path starts with /, which makes it look like an
# absolute path. the get_pseudopotentials code returns a path
# with a / in the beginning.
fullpath = ppp + ppath
z = get_ZVAL(fullpath)
zval[sym] = z
ion_charge_center = np.array([0.0, 0.0, 0.0])
total_ion_charge = 0.0
for atom in atoms:
Z = zval[atom.symbol]
total_ion_charge += Z
pos = atom.position
ion_charge_center += Z*pos
ion_charge_center /= total_ion_charge
ion_dipole_moment = ion_charge_center * total_ion_charge
dipole_vector = (ion_dipole_moment + electron_dipole_moment)
return dipole_vector
Vasp.get_dipole_moment = get_dipole_moment
| get_electron_density_center | identifier_name |
volumetric_data.py | """Module for reading volumetric data from VASP calculations.
Charge density and dipole moment
Local potential
Electron localization function
"""
import os
import numpy as np
from ase.calculators.vasp import Vasp, VaspChargeDensity
from POTCAR import get_ZVAL
def get_volumetric_data(self, filename='CHG', **kwargs):
"""Read filename to read the volumetric data in it.
Supported filenames are CHG, CHGCAR, and LOCPOT.
"""
atoms = self.get_atoms()
vd = VaspChargeDensity(filename)
data = np.array(vd.chg)
n0, n1, n2 = data[0].shape
s0 = np.linspace(0, 1, num=n0, endpoint=False)
s1 = np.linspace(0, 1, num=n1, endpoint=False)
s2 = np.linspace(0, 1, num=n2, endpoint=False)
X, Y, Z = np.meshgrid(s0, s1, s2)
C = np.column_stack([X.ravel(),
Y.ravel(),
Z.ravel()])
uc = atoms.get_cell()
real = np.dot(C, uc)
# now convert arrays back to unitcell shape
x = np.reshape(real[:, 0], (n0, n1, n2))
y = np.reshape(real[:, 1], (n0, n1, n2))
z = np.reshape(real[:, 2], (n0, n1, n2))
return x, y, z, data
def get_charge_density(self, spin=0, filename='CHG'):
|
Vasp.get_charge_density = get_charge_density
def get_local_potential(self):
"""Returns x, y, z, and local potential arrays
is there a spin for this?
We multiply the data by the volume because we are reusing the
charge density code which divides by volume.
"""
x, y, z, data = get_volumetric_data(self, filename='LOCPOT')
atoms = self.get_atoms()
return x, y, z, data[0] * atoms.get_volume()
Vasp.get_local_potential = get_local_potential
def get_elf(self):
"""Returns x, y, z and electron localization function arrays."""
x, y, z, data = get_volumetric_data(self, filename='ELFCAR')
atoms = self.get_atoms()
return x, y, z, data[0] * atoms.get_volume()
Vasp.get_elf = get_elf
def get_electron_density_center(self, spin=0, scaled=True):
"""Returns center of electron density.
If scaled, use scaled coordinates, otherwise use cartesian
coordinates.
"""
atoms = self.get_atoms()
x, y, z, cd = self.get_charge_density(spin)
n0, n1, n2 = cd.shape
nelements = n0 * n1 * n2
voxel_volume = atoms.get_volume() / nelements
total_electron_charge = cd.sum() * voxel_volume
electron_density_center = np.array([(cd * x).sum(),
(cd * y).sum(),
(cd * z).sum()])
electron_density_center *= voxel_volume
electron_density_center /= total_electron_charge
if scaled:
uc = atoms.get_cell()
return np.dot(np.linalg.inv(uc.T), electron_density_center.T).T
else:
return electron_density_center
def get_dipole_moment(self, atoms=None):
"""Tries to return the dipole vector of the unit cell in atomic units.
Returns None when CHG file is empty/not-present.
To get the dipole moment, use this formula:
dipole_moment = ((dipole_vector**2).sum())**0.5/Debye
"""
if atoms is None:
atoms = self.get_atoms()
try:
x, y, z, cd = self.get_charge_density()
except (IOError, IndexError):
# IOError: no CHG file, function called outside context manager
# IndexError: Empty CHG file, Vasp run with lcharg=False
return None
n0, n1, n2 = cd.shape
nelements = n0 * n1 * n2
voxel_volume = atoms.get_volume() / nelements
total_electron_charge = -cd.sum() * voxel_volume
electron_density_center = np.array([(cd*x).sum(),
(cd*y).sum(),
(cd*z).sum()])
electron_density_center *= voxel_volume
electron_density_center /= total_electron_charge
electron_dipole_moment = electron_density_center * total_electron_charge
electron_dipole_moment *= -1.0
# now the ion charge center
LOP = self.get_pseudopotentials()
ppp = os.environ['VASP_PP_PATH']
# make dictionary for ease of use
zval = {}
for sym, ppath, hash in LOP:
# out a bug above. os.path.join discards the root if the
# second path starts with /, which makes it look like an
# absolute path. the get_pseudopotentials code returns a path
# with a / in the beginning.
fullpath = ppp + ppath
z = get_ZVAL(fullpath)
zval[sym] = z
ion_charge_center = np.array([0.0, 0.0, 0.0])
total_ion_charge = 0.0
for atom in atoms:
Z = zval[atom.symbol]
total_ion_charge += Z
pos = atom.position
ion_charge_center += Z*pos
ion_charge_center /= total_ion_charge
ion_dipole_moment = ion_charge_center * total_ion_charge
dipole_vector = (ion_dipole_moment + electron_dipole_moment)
return dipole_vector
Vasp.get_dipole_moment = get_dipole_moment
| """Returns x, y, and z coordinate and charge density arrays.
Supported file formats: CHG, CHGCAR
:param int spin: an integer
:returns: x, y, z, charge density arrays
:rtype: 3-d numpy arrays
Relies on :func:`ase.calculators.vasp.VaspChargeDensity`.
"""
x, y, z, data = get_volumetric_data(self, filename=filename)
return x, y, z, data[spin] | identifier_body |
volumetric_data.py | """Module for reading volumetric data from VASP calculations.
Charge density and dipole moment
Local potential
Electron localization function
"""
import os
import numpy as np
from ase.calculators.vasp import Vasp, VaspChargeDensity
from POTCAR import get_ZVAL
def get_volumetric_data(self, filename='CHG', **kwargs):
"""Read filename to read the volumetric data in it.
Supported filenames are CHG, CHGCAR, and LOCPOT.
"""
atoms = self.get_atoms()
vd = VaspChargeDensity(filename)
data = np.array(vd.chg)
n0, n1, n2 = data[0].shape
s0 = np.linspace(0, 1, num=n0, endpoint=False)
s1 = np.linspace(0, 1, num=n1, endpoint=False)
s2 = np.linspace(0, 1, num=n2, endpoint=False)
X, Y, Z = np.meshgrid(s0, s1, s2)
C = np.column_stack([X.ravel(),
Y.ravel(),
Z.ravel()])
uc = atoms.get_cell()
real = np.dot(C, uc)
# now convert arrays back to unitcell shape
x = np.reshape(real[:, 0], (n0, n1, n2))
y = np.reshape(real[:, 1], (n0, n1, n2))
z = np.reshape(real[:, 2], (n0, n1, n2))
return x, y, z, data
def get_charge_density(self, spin=0, filename='CHG'):
"""Returns x, y, and z coordinate and charge density arrays.
Supported file formats: CHG, CHGCAR
:param int spin: an integer
:returns: x, y, z, charge density arrays
:rtype: 3-d numpy arrays
Relies on :func:`ase.calculators.vasp.VaspChargeDensity`.
"""
x, y, z, data = get_volumetric_data(self, filename=filename)
return x, y, z, data[spin]
Vasp.get_charge_density = get_charge_density
def get_local_potential(self):
"""Returns x, y, z, and local potential arrays
is there a spin for this?
We multiply the data by the volume because we are reusing the
charge density code which divides by volume.
"""
x, y, z, data = get_volumetric_data(self, filename='LOCPOT')
atoms = self.get_atoms()
return x, y, z, data[0] * atoms.get_volume()
Vasp.get_local_potential = get_local_potential
def get_elf(self):
"""Returns x, y, z and electron localization function arrays."""
x, y, z, data = get_volumetric_data(self, filename='ELFCAR')
atoms = self.get_atoms()
return x, y, z, data[0] * atoms.get_volume()
Vasp.get_elf = get_elf
def get_electron_density_center(self, spin=0, scaled=True):
"""Returns center of electron density.
If scaled, use scaled coordinates, otherwise use cartesian
coordinates.
"""
atoms = self.get_atoms()
x, y, z, cd = self.get_charge_density(spin)
n0, n1, n2 = cd.shape
nelements = n0 * n1 * n2
voxel_volume = atoms.get_volume() / nelements
total_electron_charge = cd.sum() * voxel_volume
electron_density_center = np.array([(cd * x).sum(),
(cd * y).sum(),
(cd * z).sum()])
electron_density_center *= voxel_volume
electron_density_center /= total_electron_charge
if scaled:
uc = atoms.get_cell()
return np.dot(np.linalg.inv(uc.T), electron_density_center.T).T
else:
|
def get_dipole_moment(self, atoms=None):
"""Tries to return the dipole vector of the unit cell in atomic units.
Returns None when CHG file is empty/not-present.
To get the dipole moment, use this formula:
dipole_moment = ((dipole_vector**2).sum())**0.5/Debye
"""
if atoms is None:
atoms = self.get_atoms()
try:
x, y, z, cd = self.get_charge_density()
except (IOError, IndexError):
# IOError: no CHG file, function called outside context manager
# IndexError: Empty CHG file, Vasp run with lcharg=False
return None
n0, n1, n2 = cd.shape
nelements = n0 * n1 * n2
voxel_volume = atoms.get_volume() / nelements
total_electron_charge = -cd.sum() * voxel_volume
electron_density_center = np.array([(cd*x).sum(),
(cd*y).sum(),
(cd*z).sum()])
electron_density_center *= voxel_volume
electron_density_center /= total_electron_charge
electron_dipole_moment = electron_density_center * total_electron_charge
electron_dipole_moment *= -1.0
# now the ion charge center
LOP = self.get_pseudopotentials()
ppp = os.environ['VASP_PP_PATH']
# make dictionary for ease of use
zval = {}
for sym, ppath, hash in LOP:
# out a bug above. os.path.join discards the root if the
# second path starts with /, which makes it look like an
# absolute path. the get_pseudopotentials code returns a path
# with a / in the beginning.
fullpath = ppp + ppath
z = get_ZVAL(fullpath)
zval[sym] = z
ion_charge_center = np.array([0.0, 0.0, 0.0])
total_ion_charge = 0.0
for atom in atoms:
Z = zval[atom.symbol]
total_ion_charge += Z
pos = atom.position
ion_charge_center += Z*pos
ion_charge_center /= total_ion_charge
ion_dipole_moment = ion_charge_center * total_ion_charge
dipole_vector = (ion_dipole_moment + electron_dipole_moment)
return dipole_vector
Vasp.get_dipole_moment = get_dipole_moment
| return electron_density_center | conditional_block |
reference.rs | use crate::real_std::{any::Any, fmt, marker::PhantomData, sync::Mutex};
use crate::{
api::{generic::A, Generic, Unrooted, Userdata, WithVM, IO},
gc::{CloneUnrooted, GcPtr, GcRef, Move, Trace},
thread::ThreadInternal,
value::{Cloner, Value},
vm::Thread,
ExternModule, Result,
};
#[derive(VmType)]
#[gluon(gluon_vm)]
#[gluon(vm_type = "std.reference.Reference")]
pub struct Reference<T> {
value: Mutex<Value>,
thread: GcPtr<Thread>,
_marker: PhantomData<T>,
}
impl<T> Userdata for Reference<T>
where
T: Any + Send + Sync,
{
fn deep_clone<'gc>(
&self,
deep_cloner: &'gc mut Cloner,
) -> Result<GcRef<'gc, Box<dyn Userdata>>> {
let value = self.value.lock().unwrap();
// SAFETY During the `alloc` call the unrooted values are scanned through the `DataDef`
unsafe {
let cloned_value = deep_cloner.deep_clone(&value)?.unrooted();
let data: Box<dyn Userdata> = Box::new(Reference {
value: Mutex::new(cloned_value),
thread: GcPtr::from_raw(deep_cloner.thread()),
_marker: PhantomData::<A>,
});
deep_cloner.gc().alloc(Move(data))
}
}
}
impl<T> fmt::Debug for Reference<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Ref({:?})", *self.value.lock().unwrap())
}
}
unsafe impl<T> Trace for Reference<T> {
impl_trace_fields! { self, gc; value }
}
fn set(r: &Reference<A>, a: Generic<A>) -> IO<()> {
match r.thread.deep_clone_value(&r.thread, a.get_value()) {
// SAFETY Rooted when stored in the reference
Ok(a) => unsafe {
*r.value.lock().unwrap() = a.get_value().clone_unrooted();
IO::Value(())
},
Err(err) => IO::Exception(format!("{}", err)),
}
}
fn get(r: &Reference<A>) -> IO<Unrooted<A>> {
// SAFETY The returned, unrooted value gets pushed immediately to the stack
IO::Value(unsafe { Unrooted::from(r.value.lock().unwrap().clone_unrooted()) })
}
fn make_ref(a: WithVM<Generic<A>>) -> IO<Reference<A>> {
// SAFETY The returned, unrooted value gets pushed immediately to the stack
unsafe {
IO::Value(Reference {
value: Mutex::new(a.value.get_value().clone_unrooted()),
thread: GcPtr::from_raw(a.vm),
_marker: PhantomData,
})
}
}
| pub mod reference {
pub use crate::reference as prim;
}
}
pub fn load(vm: &Thread) -> Result<ExternModule> {
let _ = vm.register_type::<Reference<A>>("std.reference.Reference", &["a"]);
ExternModule::new(
vm,
record! {
type Reference a => Reference<A>,
(store "<-") => primitive!(2, "std.reference.prim.(<-)", std::reference::prim::set),
load => primitive!(1, "std.reference.prim.load", std::reference::prim::get),
(ref_ "ref") => primitive!(1, "std.reference.prim.ref", std::reference::prim::make_ref),
},
)
}
pub mod st {
use super::*;
use crate::api::RuntimeResult;
fn set(r: &Reference<A>, a: Generic<A>) -> RuntimeResult<(), String> {
match r.thread.deep_clone_value(&r.thread, a.get_value()) {
// SAFETY Rooted when stored in the reference
Ok(a) => unsafe {
*r.value.lock().unwrap() = a.get_value().clone_unrooted();
RuntimeResult::Return(())
},
Err(err) => RuntimeResult::Panic(format!("{}", err)),
}
}
fn get(r: &Reference<A>) -> Unrooted<A> {
// SAFETY The returned, unrooted value gets pushed immediately to the stack
unsafe { Unrooted::from(r.value.lock().unwrap().clone_unrooted()) }
}
fn make_ref(a: WithVM<Generic<A>>) -> Reference<A> {
// SAFETY The returned, unrooted value gets pushed immediately to the stack
unsafe {
Reference {
value: Mutex::new(a.value.get_value().clone_unrooted()),
thread: GcPtr::from_raw(a.vm),
_marker: PhantomData,
}
}
}
mod std {
pub mod st {
pub mod reference {
pub use crate::reference::st as prim;
}
}
}
pub fn load(vm: &Thread) -> Result<ExternModule> {
ExternModule::new(
vm,
record! {
type Reference a => Reference<A>,
(store "<-") => primitive!(2, "std.st.reference.prim.(<-)", std::st::reference::prim::set),
load => primitive!(1, "std.st.reference.prim.load", std::st::reference::prim::get),
(ref_ "ref") => primitive!(1, "std.st.reference.prim.ref", std::st::reference::prim::make_ref),
},
)
}
} | mod std { | random_line_split |
reference.rs | use crate::real_std::{any::Any, fmt, marker::PhantomData, sync::Mutex};
use crate::{
api::{generic::A, Generic, Unrooted, Userdata, WithVM, IO},
gc::{CloneUnrooted, GcPtr, GcRef, Move, Trace},
thread::ThreadInternal,
value::{Cloner, Value},
vm::Thread,
ExternModule, Result,
};
#[derive(VmType)]
#[gluon(gluon_vm)]
#[gluon(vm_type = "std.reference.Reference")]
pub struct Reference<T> {
value: Mutex<Value>,
thread: GcPtr<Thread>,
_marker: PhantomData<T>,
}
impl<T> Userdata for Reference<T>
where
T: Any + Send + Sync,
{
fn deep_clone<'gc>(
&self,
deep_cloner: &'gc mut Cloner,
) -> Result<GcRef<'gc, Box<dyn Userdata>>> {
let value = self.value.lock().unwrap();
// SAFETY During the `alloc` call the unrooted values are scanned through the `DataDef`
unsafe {
let cloned_value = deep_cloner.deep_clone(&value)?.unrooted();
let data: Box<dyn Userdata> = Box::new(Reference {
value: Mutex::new(cloned_value),
thread: GcPtr::from_raw(deep_cloner.thread()),
_marker: PhantomData::<A>,
});
deep_cloner.gc().alloc(Move(data))
}
}
}
impl<T> fmt::Debug for Reference<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Ref({:?})", *self.value.lock().unwrap())
}
}
unsafe impl<T> Trace for Reference<T> {
impl_trace_fields! { self, gc; value }
}
fn set(r: &Reference<A>, a: Generic<A>) -> IO<()> {
match r.thread.deep_clone_value(&r.thread, a.get_value()) {
// SAFETY Rooted when stored in the reference
Ok(a) => unsafe {
*r.value.lock().unwrap() = a.get_value().clone_unrooted();
IO::Value(())
},
Err(err) => IO::Exception(format!("{}", err)),
}
}
fn get(r: &Reference<A>) -> IO<Unrooted<A>> {
// SAFETY The returned, unrooted value gets pushed immediately to the stack
IO::Value(unsafe { Unrooted::from(r.value.lock().unwrap().clone_unrooted()) })
}
fn | (a: WithVM<Generic<A>>) -> IO<Reference<A>> {
// SAFETY The returned, unrooted value gets pushed immediately to the stack
unsafe {
IO::Value(Reference {
value: Mutex::new(a.value.get_value().clone_unrooted()),
thread: GcPtr::from_raw(a.vm),
_marker: PhantomData,
})
}
}
mod std {
pub mod reference {
pub use crate::reference as prim;
}
}
pub fn load(vm: &Thread) -> Result<ExternModule> {
let _ = vm.register_type::<Reference<A>>("std.reference.Reference", &["a"]);
ExternModule::new(
vm,
record! {
type Reference a => Reference<A>,
(store "<-") => primitive!(2, "std.reference.prim.(<-)", std::reference::prim::set),
load => primitive!(1, "std.reference.prim.load", std::reference::prim::get),
(ref_ "ref") => primitive!(1, "std.reference.prim.ref", std::reference::prim::make_ref),
},
)
}
pub mod st {
use super::*;
use crate::api::RuntimeResult;
fn set(r: &Reference<A>, a: Generic<A>) -> RuntimeResult<(), String> {
match r.thread.deep_clone_value(&r.thread, a.get_value()) {
// SAFETY Rooted when stored in the reference
Ok(a) => unsafe {
*r.value.lock().unwrap() = a.get_value().clone_unrooted();
RuntimeResult::Return(())
},
Err(err) => RuntimeResult::Panic(format!("{}", err)),
}
}
fn get(r: &Reference<A>) -> Unrooted<A> {
// SAFETY The returned, unrooted value gets pushed immediately to the stack
unsafe { Unrooted::from(r.value.lock().unwrap().clone_unrooted()) }
}
fn make_ref(a: WithVM<Generic<A>>) -> Reference<A> {
// SAFETY The returned, unrooted value gets pushed immediately to the stack
unsafe {
Reference {
value: Mutex::new(a.value.get_value().clone_unrooted()),
thread: GcPtr::from_raw(a.vm),
_marker: PhantomData,
}
}
}
mod std {
pub mod st {
pub mod reference {
pub use crate::reference::st as prim;
}
}
}
pub fn load(vm: &Thread) -> Result<ExternModule> {
ExternModule::new(
vm,
record! {
type Reference a => Reference<A>,
(store "<-") => primitive!(2, "std.st.reference.prim.(<-)", std::st::reference::prim::set),
load => primitive!(1, "std.st.reference.prim.load", std::st::reference::prim::get),
(ref_ "ref") => primitive!(1, "std.st.reference.prim.ref", std::st::reference::prim::make_ref),
},
)
}
}
| make_ref | identifier_name |
plugins.ts | /*
* Copyright 2022 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {CleanWebpackPlugin} from "clean-webpack-plugin";
import ESLintPlugin from "eslint-webpack-plugin";
import ForkTsCheckerWebpackPlugin from "fork-ts-checker-webpack-plugin";
import fs from "fs";
import HtmlWebpackPlugin from "html-webpack-plugin";
import _ from "lodash";
import MiniCssExtractPlugin from "mini-css-extract-plugin";
import path from "path";
import webpack from "webpack";
import {ConfigOptions, getEntries} from "./variables";
import {LicensePlugins} from "./webpack-license-plugin";
const jasmineCore = require("jasmine-core");
const StatsPlugin = require("stats-webpack-plugin");
const SassLintPlugin = require("sass-lint-webpack");
const UnusedWebpackPlugin = require("unused-webpack-plugin");
const WebpackBuildNotifierPlugin = require("webpack-build-notifier");
export function plugins(configOptions: ConfigOptions): webpack.Plugin[] {
const plugins = [
new ESLintPlugin({
extensions: ["js", "msx"],
exclude: ["node_modules", "webpack/gen"],
failOnWarning: true,
threads: true
}),
new CleanWebpackPlugin(),
new UnusedWebpackPlugin({
directories: [
path.join(configOptions.railsRoot, "webpack"),
path.join(configOptions.railsRoot, "spec", "webpack")
],
exclude: ["config/**/*.*", "*.d.ts", 'tsconfig.json'],
}) as webpack.Plugin,
new SassLintPlugin({configPath: path.join(configOptions.assetsDir, ".sasslintrc")}) as webpack.Plugin,
new StatsPlugin("manifest.json", {
chunkModules: false,
source: false,
chunks: false,
modules: false,
assets: true
}) as webpack.Plugin,
new webpack.ProvidePlugin({
"$": "jquery",
"jQuery": "jquery",
"window.jQuery": "jquery"
}) as webpack.Plugin,
new LicensePlugins(configOptions.licenseReportFile),
new ForkTsCheckerWebpackPlugin({
typescript: { diagnosticOptions: { semantic: true, syntactic: true } }
})
];
if (configOptions.production) {
plugins.push(new MiniCssExtractPlugin({
// Options similar to the same options in webpackOptions.output
// both options are optional
filename: "[name]-[hash].css",
chunkFilename: "[id]-[hash].css",
ignoreOrder: true
}) as unknown as webpack.Plugin);
} else {
const jasmineFiles = jasmineCore.files;
const entries = getEntries(configOptions);
delete entries.specRoot;
const jasmineIndexPage = {
// rebuild every time; without this, `_specRunner.html` disappears in webpack-watch
// after a code change (because of the `clean-webpack-plugin`), unless the template
// itself changes.
cache: false,
| filename: "_specRunner.html",
template: path.join(configOptions.railsRoot, "spec", "webpack", "_specRunner.html.ejs"),
jasmineJsFiles: _.map(jasmineFiles.jsFiles.concat(jasmineFiles.bootFiles), (file) => {
return `__jasmine/${file}`;
}),
jasmineCssFiles: _.map(jasmineFiles.cssFiles, (file) => {
return `__jasmine/${file}`;
}),
excludeChunks: _.keys(entries)
};
class JasmineAssetsPlugin {
apply(compiler: webpack.Compiler) {
compiler.hooks.emit.tapAsync("JasmineAssetsPlugin",
(compilation: webpack.compilation.Compilation, callback: () => any) => {
const allJasmineAssets = jasmineFiles.jsFiles.concat(jasmineFiles.bootFiles)
.concat(jasmineFiles.cssFiles);
_.each(allJasmineAssets, (asset) => {
const file = path.join(jasmineFiles.path, asset);
const contents = fs.readFileSync(file).toString();
compilation.assets[`__jasmine/${asset}`] = {
source() {
return contents;
},
size() {
return contents.length;
}
};
});
callback();
});
}
}
plugins.push(new HtmlWebpackPlugin(jasmineIndexPage));
plugins.push(new JasmineAssetsPlugin());
// in Windows Server Core containers, this causes webpack to hang indefinitely.
// it's not critical for builds anyway, just a nice dev utility.
if (process.platform !== "win32") {
plugins.push(new WebpackBuildNotifierPlugin({
suppressSuccess: true,
suppressWarning: true
})
);
}
}
return plugins;
} | inject: true,
xhtml: true, | random_line_split |
plugins.ts | /*
* Copyright 2022 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {CleanWebpackPlugin} from "clean-webpack-plugin";
import ESLintPlugin from "eslint-webpack-plugin";
import ForkTsCheckerWebpackPlugin from "fork-ts-checker-webpack-plugin";
import fs from "fs";
import HtmlWebpackPlugin from "html-webpack-plugin";
import _ from "lodash";
import MiniCssExtractPlugin from "mini-css-extract-plugin";
import path from "path";
import webpack from "webpack";
import {ConfigOptions, getEntries} from "./variables";
import {LicensePlugins} from "./webpack-license-plugin";
const jasmineCore = require("jasmine-core");
const StatsPlugin = require("stats-webpack-plugin");
const SassLintPlugin = require("sass-lint-webpack");
const UnusedWebpackPlugin = require("unused-webpack-plugin");
const WebpackBuildNotifierPlugin = require("webpack-build-notifier");
export function plugins(configOptions: ConfigOptions): webpack.Plugin[] {
const plugins = [
new ESLintPlugin({
extensions: ["js", "msx"],
exclude: ["node_modules", "webpack/gen"],
failOnWarning: true,
threads: true
}),
new CleanWebpackPlugin(),
new UnusedWebpackPlugin({
directories: [
path.join(configOptions.railsRoot, "webpack"),
path.join(configOptions.railsRoot, "spec", "webpack")
],
exclude: ["config/**/*.*", "*.d.ts", 'tsconfig.json'],
}) as webpack.Plugin,
new SassLintPlugin({configPath: path.join(configOptions.assetsDir, ".sasslintrc")}) as webpack.Plugin,
new StatsPlugin("manifest.json", {
chunkModules: false,
source: false,
chunks: false,
modules: false,
assets: true
}) as webpack.Plugin,
new webpack.ProvidePlugin({
"$": "jquery",
"jQuery": "jquery",
"window.jQuery": "jquery"
}) as webpack.Plugin,
new LicensePlugins(configOptions.licenseReportFile),
new ForkTsCheckerWebpackPlugin({
typescript: { diagnosticOptions: { semantic: true, syntactic: true } }
})
];
if (configOptions.production) {
plugins.push(new MiniCssExtractPlugin({
// Options similar to the same options in webpackOptions.output
// both options are optional
filename: "[name]-[hash].css",
chunkFilename: "[id]-[hash].css",
ignoreOrder: true
}) as unknown as webpack.Plugin);
} else {
const jasmineFiles = jasmineCore.files;
const entries = getEntries(configOptions);
delete entries.specRoot;
const jasmineIndexPage = {
// rebuild every time; without this, `_specRunner.html` disappears in webpack-watch
// after a code change (because of the `clean-webpack-plugin`), unless the template
// itself changes.
cache: false,
inject: true,
xhtml: true,
filename: "_specRunner.html",
template: path.join(configOptions.railsRoot, "spec", "webpack", "_specRunner.html.ejs"),
jasmineJsFiles: _.map(jasmineFiles.jsFiles.concat(jasmineFiles.bootFiles), (file) => {
return `__jasmine/${file}`;
}),
jasmineCssFiles: _.map(jasmineFiles.cssFiles, (file) => {
return `__jasmine/${file}`;
}),
excludeChunks: _.keys(entries)
};
class JasmineAssetsPlugin {
apply(compiler: webpack.Compiler) |
callback();
});
}
}
plugins.push(new HtmlWebpackPlugin(jasmineIndexPage));
plugins.push(new JasmineAssetsPlugin());
// in Windows Server Core containers, this causes webpack to hang indefinitely.
// it's not critical for builds anyway, just a nice dev utility.
if (process.platform !== "win32") {
plugins.push(new WebpackBuildNotifierPlugin({
suppressSuccess: true,
suppressWarning: true
})
);
}
}
return plugins;
}
| {
compiler.hooks.emit.tapAsync("JasmineAssetsPlugin",
(compilation: webpack.compilation.Compilation, callback: () => any) => {
const allJasmineAssets = jasmineFiles.jsFiles.concat(jasmineFiles.bootFiles)
.concat(jasmineFiles.cssFiles);
_.each(allJasmineAssets, (asset) => {
const file = path.join(jasmineFiles.path, asset);
const contents = fs.readFileSync(file).toString();
compilation.assets[`__jasmine/${asset}`] = {
source() {
return contents;
},
size() {
return contents.length;
}
};
}); | identifier_body |
plugins.ts | /*
* Copyright 2022 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {CleanWebpackPlugin} from "clean-webpack-plugin";
import ESLintPlugin from "eslint-webpack-plugin";
import ForkTsCheckerWebpackPlugin from "fork-ts-checker-webpack-plugin";
import fs from "fs";
import HtmlWebpackPlugin from "html-webpack-plugin";
import _ from "lodash";
import MiniCssExtractPlugin from "mini-css-extract-plugin";
import path from "path";
import webpack from "webpack";
import {ConfigOptions, getEntries} from "./variables";
import {LicensePlugins} from "./webpack-license-plugin";
const jasmineCore = require("jasmine-core");
const StatsPlugin = require("stats-webpack-plugin");
const SassLintPlugin = require("sass-lint-webpack");
const UnusedWebpackPlugin = require("unused-webpack-plugin");
const WebpackBuildNotifierPlugin = require("webpack-build-notifier");
export function plugins(configOptions: ConfigOptions): webpack.Plugin[] {
const plugins = [
new ESLintPlugin({
extensions: ["js", "msx"],
exclude: ["node_modules", "webpack/gen"],
failOnWarning: true,
threads: true
}),
new CleanWebpackPlugin(),
new UnusedWebpackPlugin({
directories: [
path.join(configOptions.railsRoot, "webpack"),
path.join(configOptions.railsRoot, "spec", "webpack")
],
exclude: ["config/**/*.*", "*.d.ts", 'tsconfig.json'],
}) as webpack.Plugin,
new SassLintPlugin({configPath: path.join(configOptions.assetsDir, ".sasslintrc")}) as webpack.Plugin,
new StatsPlugin("manifest.json", {
chunkModules: false,
source: false,
chunks: false,
modules: false,
assets: true
}) as webpack.Plugin,
new webpack.ProvidePlugin({
"$": "jquery",
"jQuery": "jquery",
"window.jQuery": "jquery"
}) as webpack.Plugin,
new LicensePlugins(configOptions.licenseReportFile),
new ForkTsCheckerWebpackPlugin({
typescript: { diagnosticOptions: { semantic: true, syntactic: true } }
})
];
if (configOptions.production) {
plugins.push(new MiniCssExtractPlugin({
// Options similar to the same options in webpackOptions.output
// both options are optional
filename: "[name]-[hash].css",
chunkFilename: "[id]-[hash].css",
ignoreOrder: true
}) as unknown as webpack.Plugin);
} else {
const jasmineFiles = jasmineCore.files;
const entries = getEntries(configOptions);
delete entries.specRoot;
const jasmineIndexPage = {
// rebuild every time; without this, `_specRunner.html` disappears in webpack-watch
// after a code change (because of the `clean-webpack-plugin`), unless the template
// itself changes.
cache: false,
inject: true,
xhtml: true,
filename: "_specRunner.html",
template: path.join(configOptions.railsRoot, "spec", "webpack", "_specRunner.html.ejs"),
jasmineJsFiles: _.map(jasmineFiles.jsFiles.concat(jasmineFiles.bootFiles), (file) => {
return `__jasmine/${file}`;
}),
jasmineCssFiles: _.map(jasmineFiles.cssFiles, (file) => {
return `__jasmine/${file}`;
}),
excludeChunks: _.keys(entries)
};
class JasmineAssetsPlugin {
apply(compiler: webpack.Compiler) {
compiler.hooks.emit.tapAsync("JasmineAssetsPlugin",
(compilation: webpack.compilation.Compilation, callback: () => any) => {
const allJasmineAssets = jasmineFiles.jsFiles.concat(jasmineFiles.bootFiles)
.concat(jasmineFiles.cssFiles);
_.each(allJasmineAssets, (asset) => {
const file = path.join(jasmineFiles.path, asset);
const contents = fs.readFileSync(file).toString();
compilation.assets[`__jasmine/${asset}`] = {
source() {
return contents;
},
size() {
return contents.length;
}
};
});
callback();
});
}
}
plugins.push(new HtmlWebpackPlugin(jasmineIndexPage));
plugins.push(new JasmineAssetsPlugin());
// in Windows Server Core containers, this causes webpack to hang indefinitely.
// it's not critical for builds anyway, just a nice dev utility.
if (process.platform !== "win32") |
}
return plugins;
}
| {
plugins.push(new WebpackBuildNotifierPlugin({
suppressSuccess: true,
suppressWarning: true
})
);
} | conditional_block |
plugins.ts | /*
* Copyright 2022 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {CleanWebpackPlugin} from "clean-webpack-plugin";
import ESLintPlugin from "eslint-webpack-plugin";
import ForkTsCheckerWebpackPlugin from "fork-ts-checker-webpack-plugin";
import fs from "fs";
import HtmlWebpackPlugin from "html-webpack-plugin";
import _ from "lodash";
import MiniCssExtractPlugin from "mini-css-extract-plugin";
import path from "path";
import webpack from "webpack";
import {ConfigOptions, getEntries} from "./variables";
import {LicensePlugins} from "./webpack-license-plugin";
const jasmineCore = require("jasmine-core");
const StatsPlugin = require("stats-webpack-plugin");
const SassLintPlugin = require("sass-lint-webpack");
const UnusedWebpackPlugin = require("unused-webpack-plugin");
const WebpackBuildNotifierPlugin = require("webpack-build-notifier");
export function | (configOptions: ConfigOptions): webpack.Plugin[] {
const plugins = [
new ESLintPlugin({
extensions: ["js", "msx"],
exclude: ["node_modules", "webpack/gen"],
failOnWarning: true,
threads: true
}),
new CleanWebpackPlugin(),
new UnusedWebpackPlugin({
directories: [
path.join(configOptions.railsRoot, "webpack"),
path.join(configOptions.railsRoot, "spec", "webpack")
],
exclude: ["config/**/*.*", "*.d.ts", 'tsconfig.json'],
}) as webpack.Plugin,
new SassLintPlugin({configPath: path.join(configOptions.assetsDir, ".sasslintrc")}) as webpack.Plugin,
new StatsPlugin("manifest.json", {
chunkModules: false,
source: false,
chunks: false,
modules: false,
assets: true
}) as webpack.Plugin,
new webpack.ProvidePlugin({
"$": "jquery",
"jQuery": "jquery",
"window.jQuery": "jquery"
}) as webpack.Plugin,
new LicensePlugins(configOptions.licenseReportFile),
new ForkTsCheckerWebpackPlugin({
typescript: { diagnosticOptions: { semantic: true, syntactic: true } }
})
];
if (configOptions.production) {
plugins.push(new MiniCssExtractPlugin({
// Options similar to the same options in webpackOptions.output
// both options are optional
filename: "[name]-[hash].css",
chunkFilename: "[id]-[hash].css",
ignoreOrder: true
}) as unknown as webpack.Plugin);
} else {
const jasmineFiles = jasmineCore.files;
const entries = getEntries(configOptions);
delete entries.specRoot;
const jasmineIndexPage = {
// rebuild every time; without this, `_specRunner.html` disappears in webpack-watch
// after a code change (because of the `clean-webpack-plugin`), unless the template
// itself changes.
cache: false,
inject: true,
xhtml: true,
filename: "_specRunner.html",
template: path.join(configOptions.railsRoot, "spec", "webpack", "_specRunner.html.ejs"),
jasmineJsFiles: _.map(jasmineFiles.jsFiles.concat(jasmineFiles.bootFiles), (file) => {
return `__jasmine/${file}`;
}),
jasmineCssFiles: _.map(jasmineFiles.cssFiles, (file) => {
return `__jasmine/${file}`;
}),
excludeChunks: _.keys(entries)
};
class JasmineAssetsPlugin {
apply(compiler: webpack.Compiler) {
compiler.hooks.emit.tapAsync("JasmineAssetsPlugin",
(compilation: webpack.compilation.Compilation, callback: () => any) => {
const allJasmineAssets = jasmineFiles.jsFiles.concat(jasmineFiles.bootFiles)
.concat(jasmineFiles.cssFiles);
_.each(allJasmineAssets, (asset) => {
const file = path.join(jasmineFiles.path, asset);
const contents = fs.readFileSync(file).toString();
compilation.assets[`__jasmine/${asset}`] = {
source() {
return contents;
},
size() {
return contents.length;
}
};
});
callback();
});
}
}
plugins.push(new HtmlWebpackPlugin(jasmineIndexPage));
plugins.push(new JasmineAssetsPlugin());
// in Windows Server Core containers, this causes webpack to hang indefinitely.
// it's not critical for builds anyway, just a nice dev utility.
if (process.platform !== "win32") {
plugins.push(new WebpackBuildNotifierPlugin({
suppressSuccess: true,
suppressWarning: true
})
);
}
}
return plugins;
}
| plugins | identifier_name |
test_transport.py | import unittest
from node.openbazaar_daemon import OpenBazaarContext
import mock
from node import transport
def get_mock_open_bazaar_context():
return OpenBazaarContext.create_default_instance()
class TestTransportLayerCallbacks(unittest.TestCase):
"""Test the callback features of the TransportLayer class."""
def setUp(self):
# For testing sections
self.callback1 = mock.Mock()
self.callback2 = mock.Mock()
self.callback3 = mock.Mock()
self.validator1 = mock.Mock()
self.validator2 = mock.Mock()
self.validator3 = mock.Mock()
ob_ctx = get_mock_open_bazaar_context() | self.transport_layer = transport.TransportLayer(ob_ctx, guid, nickname)
self.transport_layer.add_callback('section_one', {'cb': self.callback1, 'validator_cb': self.validator1})
self.transport_layer.add_callback('section_one', {'cb': self.callback2, 'validator_cb': self.validator2})
self.transport_layer.add_callback('all', {'cb': self.callback3, 'validator_cb': self.validator3})
# For testing validators
self.callback4 = mock.Mock()
self.callback5 = mock.Mock()
self.validator4 = mock.Mock(return_value=True)
self.validator5 = mock.Mock(return_value=False)
self.transport_layer.add_callback('section_two', {'cb': self.callback4, 'validator_cb': self.validator4})
self.transport_layer.add_callback('section_two', {'cb': self.callback5, 'validator_cb': self.validator5})
def _assert_called(self, one, two, three):
self.assertEqual(self.callback1.call_count, one)
self.assertEqual(self.callback2.call_count, two)
self.assertEqual(self.callback3.call_count, three)
def test_fixture(self):
self._assert_called(0, 0, 0)
def test_callbacks(self):
self.transport_layer.trigger_callbacks('section_one', None)
self._assert_called(1, 1, 1)
def test_all_callback(self):
self.transport_layer.trigger_callbacks('section_with_no_register', None)
self._assert_called(0, 0, 1)
def test_validators(self):
self.transport_layer.trigger_callbacks('section_two', None)
self.assertEqual(self.validator4.call_count, 1)
self.assertEqual(self.validator5.call_count, 1)
self.assertEqual(self.callback4.call_count, 1)
self.assertEqual(self.callback5.call_count, 0)
if __name__ == "__main__":
unittest.main() | ob_ctx.nat_status = {'nat_type': 'Restric NAT'}
guid = 1
nickname = None
| random_line_split |
test_transport.py | import unittest
from node.openbazaar_daemon import OpenBazaarContext
import mock
from node import transport
def get_mock_open_bazaar_context():
|
class TestTransportLayerCallbacks(unittest.TestCase):
"""Test the callback features of the TransportLayer class."""
def setUp(self):
# For testing sections
self.callback1 = mock.Mock()
self.callback2 = mock.Mock()
self.callback3 = mock.Mock()
self.validator1 = mock.Mock()
self.validator2 = mock.Mock()
self.validator3 = mock.Mock()
ob_ctx = get_mock_open_bazaar_context()
ob_ctx.nat_status = {'nat_type': 'Restric NAT'}
guid = 1
nickname = None
self.transport_layer = transport.TransportLayer(ob_ctx, guid, nickname)
self.transport_layer.add_callback('section_one', {'cb': self.callback1, 'validator_cb': self.validator1})
self.transport_layer.add_callback('section_one', {'cb': self.callback2, 'validator_cb': self.validator2})
self.transport_layer.add_callback('all', {'cb': self.callback3, 'validator_cb': self.validator3})
# For testing validators
self.callback4 = mock.Mock()
self.callback5 = mock.Mock()
self.validator4 = mock.Mock(return_value=True)
self.validator5 = mock.Mock(return_value=False)
self.transport_layer.add_callback('section_two', {'cb': self.callback4, 'validator_cb': self.validator4})
self.transport_layer.add_callback('section_two', {'cb': self.callback5, 'validator_cb': self.validator5})
def _assert_called(self, one, two, three):
self.assertEqual(self.callback1.call_count, one)
self.assertEqual(self.callback2.call_count, two)
self.assertEqual(self.callback3.call_count, three)
def test_fixture(self):
self._assert_called(0, 0, 0)
def test_callbacks(self):
self.transport_layer.trigger_callbacks('section_one', None)
self._assert_called(1, 1, 1)
def test_all_callback(self):
self.transport_layer.trigger_callbacks('section_with_no_register', None)
self._assert_called(0, 0, 1)
def test_validators(self):
self.transport_layer.trigger_callbacks('section_two', None)
self.assertEqual(self.validator4.call_count, 1)
self.assertEqual(self.validator5.call_count, 1)
self.assertEqual(self.callback4.call_count, 1)
self.assertEqual(self.callback5.call_count, 0)
if __name__ == "__main__":
unittest.main()
| return OpenBazaarContext.create_default_instance() | identifier_body |
test_transport.py | import unittest
from node.openbazaar_daemon import OpenBazaarContext
import mock
from node import transport
def get_mock_open_bazaar_context():
return OpenBazaarContext.create_default_instance()
class TestTransportLayerCallbacks(unittest.TestCase):
"""Test the callback features of the TransportLayer class."""
def | (self):
# For testing sections
self.callback1 = mock.Mock()
self.callback2 = mock.Mock()
self.callback3 = mock.Mock()
self.validator1 = mock.Mock()
self.validator2 = mock.Mock()
self.validator3 = mock.Mock()
ob_ctx = get_mock_open_bazaar_context()
ob_ctx.nat_status = {'nat_type': 'Restric NAT'}
guid = 1
nickname = None
self.transport_layer = transport.TransportLayer(ob_ctx, guid, nickname)
self.transport_layer.add_callback('section_one', {'cb': self.callback1, 'validator_cb': self.validator1})
self.transport_layer.add_callback('section_one', {'cb': self.callback2, 'validator_cb': self.validator2})
self.transport_layer.add_callback('all', {'cb': self.callback3, 'validator_cb': self.validator3})
# For testing validators
self.callback4 = mock.Mock()
self.callback5 = mock.Mock()
self.validator4 = mock.Mock(return_value=True)
self.validator5 = mock.Mock(return_value=False)
self.transport_layer.add_callback('section_two', {'cb': self.callback4, 'validator_cb': self.validator4})
self.transport_layer.add_callback('section_two', {'cb': self.callback5, 'validator_cb': self.validator5})
def _assert_called(self, one, two, three):
self.assertEqual(self.callback1.call_count, one)
self.assertEqual(self.callback2.call_count, two)
self.assertEqual(self.callback3.call_count, three)
def test_fixture(self):
self._assert_called(0, 0, 0)
def test_callbacks(self):
self.transport_layer.trigger_callbacks('section_one', None)
self._assert_called(1, 1, 1)
def test_all_callback(self):
self.transport_layer.trigger_callbacks('section_with_no_register', None)
self._assert_called(0, 0, 1)
def test_validators(self):
self.transport_layer.trigger_callbacks('section_two', None)
self.assertEqual(self.validator4.call_count, 1)
self.assertEqual(self.validator5.call_count, 1)
self.assertEqual(self.callback4.call_count, 1)
self.assertEqual(self.callback5.call_count, 0)
if __name__ == "__main__":
unittest.main()
| setUp | identifier_name |
test_transport.py | import unittest
from node.openbazaar_daemon import OpenBazaarContext
import mock
from node import transport
def get_mock_open_bazaar_context():
return OpenBazaarContext.create_default_instance()
class TestTransportLayerCallbacks(unittest.TestCase):
"""Test the callback features of the TransportLayer class."""
def setUp(self):
# For testing sections
self.callback1 = mock.Mock()
self.callback2 = mock.Mock()
self.callback3 = mock.Mock()
self.validator1 = mock.Mock()
self.validator2 = mock.Mock()
self.validator3 = mock.Mock()
ob_ctx = get_mock_open_bazaar_context()
ob_ctx.nat_status = {'nat_type': 'Restric NAT'}
guid = 1
nickname = None
self.transport_layer = transport.TransportLayer(ob_ctx, guid, nickname)
self.transport_layer.add_callback('section_one', {'cb': self.callback1, 'validator_cb': self.validator1})
self.transport_layer.add_callback('section_one', {'cb': self.callback2, 'validator_cb': self.validator2})
self.transport_layer.add_callback('all', {'cb': self.callback3, 'validator_cb': self.validator3})
# For testing validators
self.callback4 = mock.Mock()
self.callback5 = mock.Mock()
self.validator4 = mock.Mock(return_value=True)
self.validator5 = mock.Mock(return_value=False)
self.transport_layer.add_callback('section_two', {'cb': self.callback4, 'validator_cb': self.validator4})
self.transport_layer.add_callback('section_two', {'cb': self.callback5, 'validator_cb': self.validator5})
def _assert_called(self, one, two, three):
self.assertEqual(self.callback1.call_count, one)
self.assertEqual(self.callback2.call_count, two)
self.assertEqual(self.callback3.call_count, three)
def test_fixture(self):
self._assert_called(0, 0, 0)
def test_callbacks(self):
self.transport_layer.trigger_callbacks('section_one', None)
self._assert_called(1, 1, 1)
def test_all_callback(self):
self.transport_layer.trigger_callbacks('section_with_no_register', None)
self._assert_called(0, 0, 1)
def test_validators(self):
self.transport_layer.trigger_callbacks('section_two', None)
self.assertEqual(self.validator4.call_count, 1)
self.assertEqual(self.validator5.call_count, 1)
self.assertEqual(self.callback4.call_count, 1)
self.assertEqual(self.callback5.call_count, 0)
if __name__ == "__main__":
| unittest.main() | conditional_block |
|
also-watched.component.spec.ts | import { async, ComponentFixture, TestBed } from '@angular/core/testing';
import { AlsoWatchedComponent } from './also-watched.component';
import { NO_ERRORS_SCHEMA } from '@angular/core';
import { RouterTestingModule } from '@angular/router/testing';
import { Response, ResponseOptions, Http, ConnectionBackend, BaseRequestOptions, RequestOptions, HttpModule } from '@angular/http';
import { MockBackend, MockConnection } from '@angular/http/testing';
| import { SearchPageComponent } from '../search-page/search-page.component';
import { FilmsService } from '../films.service';
describe('AlsoWatchedComponent', () => {
let component: AlsoWatchedComponent;
let fixture: ComponentFixture<AlsoWatchedComponent>;
beforeEach(async(() => {
TestBed.configureTestingModule({
declarations: [
AlsoWatchedComponent,
MyLibraryComponent,
SearchPageComponent
],
imports: [
RouterTestingModule,
HttpModule
],
providers: [
Http,
FilmsService,
{provide: RequestOptions, useClass: BaseRequestOptions},
{provide: ConnectionBackend, useClass: MockBackend},
],
schemas: [ NO_ERRORS_SCHEMA ]
})
.compileComponents();
}));
beforeEach(() => {
fixture = TestBed.createComponent(AlsoWatchedComponent);
component = fixture.componentInstance;
fixture.detectChanges();
});
it('should create', () => {
expect(component).toBeTruthy();
});
it('should exists title var', () => {
expect(component.title).toEqual('Viewers also watched');
});
}); | import { MyLibraryComponent } from '../my-library/my-library.component'; | random_line_split |
translator.js | 'use strict';
var messages = {
chinese : {
"Device not initialized or passphrase request cancelled" : "设备未初始化或已取消输入密码",
"Invalid signature" : "无效的签名",
"Not enough funds" : "资金不足",
"PIN Cancelled" : "PIN码输入已取消",
"Invalid PIN" : "PIN码错误",
"PIN removal cancelled" : "PIN码删除已取消",
"Ping cancelled" : "Ping已取消",
"PIN change cancelled" : "PIN码修改已取消",
"PIN change failed" : "PIN码修改失败",
"Wipe cancelled" : "重置已取消",
"Entropy cancelled" : "Entropy已取消",
"Fee over threshold. Signing cancelled." : "手续费超过阈值。签名已取消。",
"Signing cancelled by user" : "签名已取消",
"Apply settings cancelled" : "修改标签与语言已取消",
"Show address cancelled" : "地址显示已取消",
"Sign message cancelled" : "签名消息已取消",
"Load cancelled" : "加载已取消",
"CipherKeyValue cancelled" : "CipherKeyValue已取消",
"Reset cancelled" : "设置已取消",
"Not in bootloader mode" : "不在升级模式",
"Device is already initialized. Use Wipe first." : "设备已经初始化。请先重置设备。",
"Unknown message" : "消息不可识别",
"Not in Recovery mode" : "不在恢复模式",
"Not in Reset mode" : "不在设置模式",
"Not in Signing mode" : "不在签名模式",
"No transaction provided" : "没有提供交易",
"No key provided" : "没有提供键",
"No value provided" : "没有提供值",
"Value length must be a multiple of 16" : "值的长度必须是16的倍数",
"No setting provided" : "没有提供配置",
"No public key provided" : "没有提供公钥",
"Invalid public key provided" : "提供的公钥无效",
"No message provided" : "没有提供消息",
"Message length must be a multiple of 16" : "消息长度必须是16的倍数",
"Message too big" : "消息长度过大",
"Invalid word count (has to be 12, 18 or 24 bits)" : "无效的单词数量(必须是12、18或24个)",
"Wrong word retyped" : "单词输入错误",
"Word not found in a wordlist" : "单词表中不存在的单词",
"Invalid mnemonic, are words in correct order?" : "无效的种子,单词排列顺序是否正确?",
"Invalid strength (has to be 128, 192 or 256 bits)" : "无效的强度(必须是128、192或256位)",
"Failed to serialize input" : "系列化输入失败",
"Failed to serialize output" : "系列化输出失败",
"Encountered invalid prevhash" : "无效的prevhash",
"Failed to compile input" : "编制输入失败",
"Only one change output allowed" : "只允许一个找零输出",
"Transaction has changed during signing" : "签名期间交易已经改变了",
"Failed to compile output" : "编制输出失败",
"Signing error" : "签名出错了",
"Transaction must have at least one output" : "交易必须至少有一个输出",
"Transaction must have at least one input" : "交易必须至少有一个输入",
"Invalid coin name" : "无效的币种",
"Error signing message" : "签名消息出错了"
}
};
var translator = function(language, key) {
if (messages[language] && messages[language][key]) {
return messages[language][key];
} else {
|
module.exports = translator; | return key;
}
};
| random_line_split |
translator.js | 'use strict';
var messages = {
chinese : {
"Device not initialized or passphrase request cancelled" : "设备未初始化或已取消输入密码",
"Invalid signature" : "无效的签名",
"Not enough funds" : "资金不足",
"PIN Cancelled" : "PIN码输入已取消",
"Invalid PIN" : "PIN码错误",
"PIN removal cancelled" : "PIN码删除已取消",
"Ping cancelled" : "Ping已取消",
"PIN change cancelled" : "PIN码修改已取消",
"PIN change failed" : "PIN码修改失败",
"Wipe cancelled" : "重置已取消",
"Entropy cancelled" : "Entropy已取消",
"Fee over threshold. Signing cancelled." : "手续费超过阈值。签名已取消。",
"Signing cancelled by user" : "签名已取消",
"Apply settings cancelled" : "修改标签与语言已取消",
"Show address cancelled" : "地址显示已取消",
"Sign message cancelled" : "签名消息已取消",
"Load cancelled" : "加载已取消",
"CipherKeyValue cancelled" : "CipherKeyValue已取消",
"Reset cancelled" : "设置已取消",
"Not in bootloader mode" : "不在升级模式",
"Device is already initialized. Use Wipe first." : "设备已经初始化。请先重置设备。",
"Unknown message" : "消息不可识别",
"Not in Recovery mode" : "不在恢复模式",
"Not in Reset mode" : "不在设置模式",
"Not in Signing mode" : "不在签名模式",
"No transaction provided" : "没有提供交易",
"No key provided" : "没有提供键",
"No value provided" : "没有提供值",
"Value length must be a multiple of 16" : "值的长度必须是16的倍数",
"No setting provided" : "没有提供配置",
"No public key provided" : "没有提供公钥",
"Invalid public key provided" : "提供的公钥无效",
"No message provided" : "没有提供消息",
"Message length must be a multiple of 16" : "消息长度必须是16的倍数",
"Message too big" : "消息长度过大",
"Invalid word count (has to be 12, 18 or 24 bits)" : "无效的单词数量(必须是12、18或24个)",
"Wrong word retyped" : "单词输入错误",
"Word not found in a wordlist" : "单词表中不存在的单词",
"Invalid mnemonic, are words in correct order?" : "无效的种子,单词排列顺序是否正确?",
"Invalid strength (has to be 128, 192 or 256 bits)" : "无效的强度(必须是128、192或256位)",
"Failed to serialize input" : "系列化输入失败",
"Failed to serialize output" : "系列化输出失败",
"Encountered invalid prevhash" : "无效的prevhash",
"Failed to compile input" : "编制输入失败",
"Only one change output allowed" : "只允许一个找零输出",
"Transaction has changed during signing" : "签名期间交易已经改变了",
"Failed to compile output" : "编制输出失败",
"Signing error" : "签名出错了",
"Transaction must have at least one output" : "交易必须至少有一个输出",
"Transaction must have at least one input" : "交易必须至少有一个输入",
"Invalid coin name" : "无效的币种",
"Error signing message" : "签名消息出错了"
}
};
var translator = function(language, key) {
if (messages[language] && messages[language][key]) {
return messages[language][key];
} else {
return key;
}
};
module.exports = translator; | conditional_block |
||
eval_kanungo_est.py | import env
import numpy as np
import metaomr
import metaomr.kanungo as kan
from metaomr.page import Page
import glob
import pandas as pd
import itertools
import os.path
import sys
from datetime import datetime
from random import random, randint
IDEAL = [path for path in sorted(glob.glob('testset/modern/*.png'))
if 'nostaff' not in path]
def random_params():
|
columns = pd.MultiIndex.from_product([['real', 'estimate'], 'nu a0 a b0 b k'.split()])
columns = columns.append(pd.MultiIndex.from_product([['estimate'],['stat','time','status','nfev']]))
cols = []
results = []
fun = 'ks'
method = 'Nelder-Mead'
for image in IDEAL:
name = os.path.basename(image).split('.')[0]
page, = metaomr.open(image)
kimg = kan.KanungoImage(kan.normalized_page(page)[0])
for i in xrange(3):
params = random_params()
synth = Page(kimg.degrade(params))
synth.staff_dist = 8
for maxfev in [25, 50]:
start = datetime.now()
est_params = kan.est_parameters(synth, test_fn=kan.test_hists_ks if fun == 'ks' else kan.test_hists_chisq, opt_method=method, maxfev=maxfev)
end = datetime.now()
cols.append((name, fun, maxfev, i))
results.append(list(params) + list(est_params.x) + [est_params.fun, (end - start).total_seconds(), est_params.status, est_params.nfev])
sys.stderr.write('.')
res = pd.DataFrame(results, columns=columns)
res.index = pd.MultiIndex.from_tuples(cols)
res.index.names = 'doc test maxfev num'.split()
res.to_csv('kanungo_eval.csv')
sys.stderr.write('\n')
| if random() < 0.25:
nu = 0
else:
nu = random() * 0.05
if random() < 0.25:
a0 = a = 0
else:
a0 = random() * 0.2
a = 0.5 + random() * 2
if random() < 0.25:
b0 = b = 0
else:
b0 = random() * 0.2
b = 0.5 + random() * 2
k = randint(0, 4)
return nu, a0, a, b0, b, k | identifier_body |
eval_kanungo_est.py | import env
import numpy as np
import metaomr
import metaomr.kanungo as kan
from metaomr.page import Page
import glob
import pandas as pd
import itertools
import os.path
import sys
from datetime import datetime
from random import random, randint
IDEAL = [path for path in sorted(glob.glob('testset/modern/*.png'))
if 'nostaff' not in path]
def random_params():
if random() < 0.25:
|
else:
nu = random() * 0.05
if random() < 0.25:
a0 = a = 0
else:
a0 = random() * 0.2
a = 0.5 + random() * 2
if random() < 0.25:
b0 = b = 0
else:
b0 = random() * 0.2
b = 0.5 + random() * 2
k = randint(0, 4)
return nu, a0, a, b0, b, k
columns = pd.MultiIndex.from_product([['real', 'estimate'], 'nu a0 a b0 b k'.split()])
columns = columns.append(pd.MultiIndex.from_product([['estimate'],['stat','time','status','nfev']]))
cols = []
results = []
fun = 'ks'
method = 'Nelder-Mead'
for image in IDEAL:
name = os.path.basename(image).split('.')[0]
page, = metaomr.open(image)
kimg = kan.KanungoImage(kan.normalized_page(page)[0])
for i in xrange(3):
params = random_params()
synth = Page(kimg.degrade(params))
synth.staff_dist = 8
for maxfev in [25, 50]:
start = datetime.now()
est_params = kan.est_parameters(synth, test_fn=kan.test_hists_ks if fun == 'ks' else kan.test_hists_chisq, opt_method=method, maxfev=maxfev)
end = datetime.now()
cols.append((name, fun, maxfev, i))
results.append(list(params) + list(est_params.x) + [est_params.fun, (end - start).total_seconds(), est_params.status, est_params.nfev])
sys.stderr.write('.')
res = pd.DataFrame(results, columns=columns)
res.index = pd.MultiIndex.from_tuples(cols)
res.index.names = 'doc test maxfev num'.split()
res.to_csv('kanungo_eval.csv')
sys.stderr.write('\n')
| nu = 0 | conditional_block |
eval_kanungo_est.py | import env
import numpy as np
import metaomr
import metaomr.kanungo as kan
from metaomr.page import Page
import glob
import pandas as pd
import itertools
import os.path
import sys
from datetime import datetime
from random import random, randint
IDEAL = [path for path in sorted(glob.glob('testset/modern/*.png'))
if 'nostaff' not in path]
def | ():
if random() < 0.25:
nu = 0
else:
nu = random() * 0.05
if random() < 0.25:
a0 = a = 0
else:
a0 = random() * 0.2
a = 0.5 + random() * 2
if random() < 0.25:
b0 = b = 0
else:
b0 = random() * 0.2
b = 0.5 + random() * 2
k = randint(0, 4)
return nu, a0, a, b0, b, k
columns = pd.MultiIndex.from_product([['real', 'estimate'], 'nu a0 a b0 b k'.split()])
columns = columns.append(pd.MultiIndex.from_product([['estimate'],['stat','time','status','nfev']]))
cols = []
results = []
fun = 'ks'
method = 'Nelder-Mead'
for image in IDEAL:
name = os.path.basename(image).split('.')[0]
page, = metaomr.open(image)
kimg = kan.KanungoImage(kan.normalized_page(page)[0])
for i in xrange(3):
params = random_params()
synth = Page(kimg.degrade(params))
synth.staff_dist = 8
for maxfev in [25, 50]:
start = datetime.now()
est_params = kan.est_parameters(synth, test_fn=kan.test_hists_ks if fun == 'ks' else kan.test_hists_chisq, opt_method=method, maxfev=maxfev)
end = datetime.now()
cols.append((name, fun, maxfev, i))
results.append(list(params) + list(est_params.x) + [est_params.fun, (end - start).total_seconds(), est_params.status, est_params.nfev])
sys.stderr.write('.')
res = pd.DataFrame(results, columns=columns)
res.index = pd.MultiIndex.from_tuples(cols)
res.index.names = 'doc test maxfev num'.split()
res.to_csv('kanungo_eval.csv')
sys.stderr.write('\n')
| random_params | identifier_name |
eval_kanungo_est.py | import env
import numpy as np
import metaomr
import metaomr.kanungo as kan
from metaomr.page import Page
import glob
import pandas as pd
import itertools
import os.path
import sys
from datetime import datetime
from random import random, randint
IDEAL = [path for path in sorted(glob.glob('testset/modern/*.png'))
if 'nostaff' not in path]
def random_params():
if random() < 0.25:
nu = 0
else:
nu = random() * 0.05
if random() < 0.25:
a0 = a = 0
else:
a0 = random() * 0.2
a = 0.5 + random() * 2
if random() < 0.25:
b0 = b = 0
else:
b0 = random() * 0.2
b = 0.5 + random() * 2
k = randint(0, 4)
return nu, a0, a, b0, b, k
columns = pd.MultiIndex.from_product([['real', 'estimate'], 'nu a0 a b0 b k'.split()])
columns = columns.append(pd.MultiIndex.from_product([['estimate'],['stat','time','status','nfev']]))
cols = []
results = []
fun = 'ks'
method = 'Nelder-Mead'
for image in IDEAL:
name = os.path.basename(image).split('.')[0]
page, = metaomr.open(image)
kimg = kan.KanungoImage(kan.normalized_page(page)[0])
for i in xrange(3):
params = random_params()
synth = Page(kimg.degrade(params))
synth.staff_dist = 8
for maxfev in [25, 50]:
start = datetime.now()
est_params = kan.est_parameters(synth, test_fn=kan.test_hists_ks if fun == 'ks' else kan.test_hists_chisq, opt_method=method, maxfev=maxfev)
end = datetime.now()
cols.append((name, fun, maxfev, i))
results.append(list(params) + list(est_params.x) + [est_params.fun, (end - start).total_seconds(), est_params.status, est_params.nfev])
sys.stderr.write('.') | res.index = pd.MultiIndex.from_tuples(cols)
res.index.names = 'doc test maxfev num'.split()
res.to_csv('kanungo_eval.csv')
sys.stderr.write('\n') | res = pd.DataFrame(results, columns=columns) | random_line_split |
SimpleSidebarListItemHeader.tsx | import { ReactNode, useRef } from 'react';
import { noop } from '@proton/shared/lib/helpers/function';
import Icon from '../icon/Icon';
import { classnames } from '../../helpers';
import SidebarListItem from './SidebarListItem';
import { HotkeyTuple, useHotkeys } from '../../hooks';
interface Props {
toggle: boolean;
onToggle: (display: boolean) => void;
hasCaret?: boolean;
right?: ReactNode;
text: string;
title?: string;
onFocus?: (id: string) => void;
id?: string;
}
const SimpleSidebarListItemHeader = ({
toggle,
onToggle,
hasCaret = true,
right,
text,
id,
title,
onFocus = noop,
}: Props) => {
const buttonRef = useRef<HTMLButtonElement>(null);
const shortcutHandlers: HotkeyTuple[] = [
[
'ArrowRight',
(e) => {
e.stopPropagation();
onToggle(true);
},
],
[
'ArrowLeft',
() => {
onToggle(false);
},
],
];
useHotkeys(buttonRef, shortcutHandlers);
| <SidebarListItem className="navigation-link-header-group">
<div className="flex flex-nowrap">
<button
ref={buttonRef}
className="text-uppercase flex-item-fluid text-left navigation-link-header-group-link"
type="button"
onClick={() => onToggle(!toggle)}
title={title}
aria-expanded={toggle}
onFocus={() => onFocus(id || '')}
data-shortcut-target={id}
>
<span className="mr0-5 text-sm">{text}</span>
{hasCaret && (
<Icon
name="angle-down"
className={classnames(['navigation-icon--expand', toggle && 'rotateX-180'])}
/>
)}
</button>
{right}
</div>
</SidebarListItem>
);
};
export default SimpleSidebarListItemHeader; | return ( | random_line_split |
remove.rs | use cli::parse_args;
use Slate;
use message::Message;
use results::CommandResult;
use errors::CommandError;
const USAGE: &'static str = "
Slate: Remove an element.
Usage:
slate remove ([options] | <key>)
Options:
-h --help Show this screen.
-a --all Remove all keys.
Examples:
slate remove --all
#=> All keys have been removed
slate remove foo
#=> The key has been removed
";
#[derive(Debug, Deserialize)]
struct Args {
arg_key: Option<String>,
flag_all: bool,
}
pub fn | (slate: &Slate, argv: &Vec<String>) -> CommandResult {
let args: Args = parse_args(USAGE, argv).unwrap_or_else(|e| e.exit());
if args.flag_all {
try!(slate.clear());
Ok(Some(Message::Info("All keys have been removed".to_string())))
} else {
let key: String = match args.arg_key {
Some(string) => string,
None => {
return Err(CommandError::Argument("You must provide the name of a key".to_string()))
}
};
try!(slate.remove(&key));
Ok(Some(Message::Info("The key has been removed".to_string())))
}
}
| run | identifier_name |
remove.rs | use cli::parse_args;
use Slate;
use message::Message;
use results::CommandResult;
use errors::CommandError;
const USAGE: &'static str = "
Slate: Remove an element.
Usage:
slate remove ([options] | <key>)
Options:
-h --help Show this screen.
-a --all Remove all keys.
Examples:
slate remove --all
#=> All keys have been removed
slate remove foo
#=> The key has been removed
";
#[derive(Debug, Deserialize)]
struct Args {
arg_key: Option<String>,
flag_all: bool,
}
pub fn run(slate: &Slate, argv: &Vec<String>) -> CommandResult | {
let args: Args = parse_args(USAGE, argv).unwrap_or_else(|e| e.exit());
if args.flag_all {
try!(slate.clear());
Ok(Some(Message::Info("All keys have been removed".to_string())))
} else {
let key: String = match args.arg_key {
Some(string) => string,
None => {
return Err(CommandError::Argument("You must provide the name of a key".to_string()))
}
};
try!(slate.remove(&key));
Ok(Some(Message::Info("The key has been removed".to_string())))
}
} | identifier_body |
|
remove.rs | use cli::parse_args;
use Slate;
use message::Message;
use results::CommandResult;
use errors::CommandError;
const USAGE: &'static str = "
Slate: Remove an element.
Usage:
slate remove ([options] | <key>)
Options:
-h --help Show this screen.
-a --all Remove all keys.
Examples: |
slate remove foo
#=> The key has been removed
";
#[derive(Debug, Deserialize)]
struct Args {
arg_key: Option<String>,
flag_all: bool,
}
pub fn run(slate: &Slate, argv: &Vec<String>) -> CommandResult {
let args: Args = parse_args(USAGE, argv).unwrap_or_else(|e| e.exit());
if args.flag_all {
try!(slate.clear());
Ok(Some(Message::Info("All keys have been removed".to_string())))
} else {
let key: String = match args.arg_key {
Some(string) => string,
None => {
return Err(CommandError::Argument("You must provide the name of a key".to_string()))
}
};
try!(slate.remove(&key));
Ok(Some(Message::Info("The key has been removed".to_string())))
}
} | slate remove --all
#=> All keys have been removed | random_line_split |
majority_voting_test.py | #to get some base functionality for free, including the methods get_params and set_params
#to set and return the classifier's parameters as well as the score method to calculate the
#prediction accuracy,respectively
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
from sklearn.preprocessing import LabelEncoder
#import six too make the MajorityVoteClassifier compatible with python2.7
from sklearn.externals import six
from sklearn.base import clone
from sklearn.pipeline import _name_estimators
import numpy as np
import operator
class MajorityVoteClassifier(BaseEstimator,ClassifierMixin):
""" A majority vote ensemble classifier
Parameters
----------
classifiers : array-like, shape = [n_classifiers]
Different classifiers for the ensemble
vote : str, {'classlabel', 'probability'}
Default: 'classlabel'
If 'classlabel' the prediction is based on
the argmax of class labels. Else if
'probability', the argmax of the sum of
probabilities is used to predict the class label
(recommended for calibrated classifiers).
weights : array-like, shape = [n_classifiers]
Optional, default: None
If a list of `int` or `float` values are
provided , the classifiers are weithed by importance;
Uses uniform weights if 'weights = None'
"""
def __init__(self, classifiers,vote='classlabel', weights=None):
self.classifiers = classifiers
self.named_classifiers = {key: value for key, value in _name_estimators(classifiers)}
self.vote = vote
self.weights = weights
def fit(self, X, y):
""" Fit classifiers.
Parameters
----------
X : {array-like, sparse matrix},
shape = [n_samples, n_features]
Matrix of training samples.
y : array-like, shape = [n_samples]
Vector of target class labels.
Returns
-------
self : object
"""
# Use LabelEncoder to ensure class labels start
# with 0, which is important for np.argmax
# call in self.predict
self.lablenc_ = LabelEncoder()
self.lablenc_.fit(y)
self.classes_ = self.lablenc_.classes_
self.classifiers_ = []
for clf in self.classifiers:
fitted_clf = clone(clf).fit(X,self.lablenc_.transform(y))
self.classifiers_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix},
Shape = [n_samples, n_features]
Matrix of training samples
Returns
----------
maj_vote : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.vote == 'probability':
maj_vote = np.argmax(self.predict_proba(X),axis=1)
else:
# 'classlabel' vote
# Collect results from clf.predict calls
predictions = np.asarray([clf.predict(X) for clf in self.classifiers_]).T
maj_vote = np.apply_along_axis(lambda x: np.argmax(np.bincount(x,weights=self.weights)),axis=1,arr=predictions)
maj_vote = self.lablenc_.inverse_transform(maj_vote)
return maj_vote
def predict_proba(self, X):
""" Predict class probabilities for X.
Parameters
----------
X : {array-like, sparse matrix},
shape = [n_samples, n_features]
Training vectors, where n_samples is
the number of samples and
n_features is the number of features.
Returns
----------
avg_proba : array-like,
shape = [n_samples, n_classes]
Weighted average probability for
each class per sample.
"""
probas = np.asarray([clf.predict_proba(X) for clf in self.classifiers_])
avg_proba = np.average(probas,axis=0, weights=self.weights)
return avg_proba
def get_params(self, deep=True):
""" Get classifier parameter names for GridSearch"""
if not deep:
return super(MajorityVoteClassifier,self).get_params(deep=False)
else:
out = self.named_classifiers.copy()
for name, step in six.iteritems(self.named_classifiers):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
#get datas
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
iris = datasets.load_iris()
X,y = iris.data[50:,[1,2]],iris.target[50:]
le = LabelEncoder()
y = le.fit_transform(y)
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.5,random_state = 1)
#train logistic regression classifier, decision tree, k-nearest neightbor respectively
from sklearn.cross_validation import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
import numpy as np
clf1 = LogisticRegression(penalty = 'l2',C = 0.001,random_state = 0)
clf2 = DecisionTreeClassifier(max_depth = 1,criterion = 'entropy',random_state = 0)
clf3 = KNeighborsClassifier(n_neighbors = 1,p=2,metric = 'minkowski')
pipe1 = Pipeline([['sc',StandardScaler()],['clf',clf1]])
pipe3 = Pipeline([['sc',StandardScaler()],['clf',clf3]])
clf_labels = ['Logistic Regression','Decision Tree','KNN']
print('10-fold cross validation:\n')
for clf,label in zip([pipe1,clf2,pipe3],clf_labels):
scores = cross_val_score(estimator = clf,
X=X_train,
y=y_train,
cv=10,
scoring = 'roc_auc')
print ("ROC AUC: %0.2f (+/- %0.2f) [%s]" % (scores.mean(),scores.std(),label))
#combine the individual classifiers for majority rule voting in our MajorityVoteClassifier
#import os
#pwd = os.getcwd()
#os.chdir('E:\\machine-learning\\19-Ensemble Learning\\')
#from majority_voting import MajorityVoteClassifier
mv_clf = MajorityVoteClassifier(classifiers=[pipe1, clf2, pipe3])
clf_labels += ['Majority Voting']
all_clf = [pipe1, clf2, pipe3, mv_clf]
for clf, label in zip(all_clf, clf_labels):
scores = cross_val_score(estimator=clf,X=X_train,y=y_train,cv=10,scoring='roc_auc')
print("Accuracy: %0.2f (+/- %0.2f) [%s]"% (scores.mean(), scores.std(), label))
#os.chdir(pwd)
#compute the ROC curves from the test set to check if the MajorityVoteClassifier generalizes well to unseen data
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
import matplotlib.pyplot as plt
colors = ['black','orange','blue','green']
linestyles = [':', '--', '-.', '-']
for clf,label,clr,ls in zip(all_clf,clf_labels,colors,linestyles):
#assuming the label of the positive class is 1
y_pred = clf.fit(X_train,y_train).predict_proba(X_test)[:,1]
fpr,tpr,thresholds = roc_curve(y_true = y_test,y_score = y_pred)
roc_auc = auc(x= fpr,y=tpr)
plt.plot(fpr,tpr,color = clr,linestyle = ls,label = '%s (auc = %0.2f)' % (label,roc_auc))
plt.legend(loc = 'lower right') | plt.ylabel('True Positive Rate')
plt.show()
#tune the inverse regularization parameter C of the logistic regression classifier and the decision tree
#depth via a grid search for demonstration purposes
from sklearn.grid_search import GridSearchCV
params = {'decisiontreeclassifier__max_depth':[1,2],'pipeline-1__clf__C':[0.001,0.1,100.0]}
grid = GridSearchCV(estimator = mv_clf,param_grid=params,cv = 10,scoring = 'roc_auc')
grid.fit(X_train,y_train)
for params,mean_score,scores in grid.grid_scores_:
print('%0.3f +/- %0.2f %r' % (mean_score,scores.std()/2,params))
print('Best parameters : %s' % grid.best_params_)
print('Accuracy: %.2f' % grid.best_score_) | plt.plot([0, 1], [0, 1],linestyle='--',color='gray',linewidth=2)
plt.xlim([-0.1, 1.1])
plt.ylim([-0.1, 1.1])
plt.grid()
plt.xlabel('False Positive Rate') | random_line_split |
majority_voting_test.py | #to get some base functionality for free, including the methods get_params and set_params
#to set and return the classifier's parameters as well as the score method to calculate the
#prediction accuracy,respectively
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
from sklearn.preprocessing import LabelEncoder
#import six too make the MajorityVoteClassifier compatible with python2.7
from sklearn.externals import six
from sklearn.base import clone
from sklearn.pipeline import _name_estimators
import numpy as np
import operator
class MajorityVoteClassifier(BaseEstimator,ClassifierMixin):
""" A majority vote ensemble classifier
Parameters
----------
classifiers : array-like, shape = [n_classifiers]
Different classifiers for the ensemble
vote : str, {'classlabel', 'probability'}
Default: 'classlabel'
If 'classlabel' the prediction is based on
the argmax of class labels. Else if
'probability', the argmax of the sum of
probabilities is used to predict the class label
(recommended for calibrated classifiers).
weights : array-like, shape = [n_classifiers]
Optional, default: None
If a list of `int` or `float` values are
provided , the classifiers are weithed by importance;
Uses uniform weights if 'weights = None'
"""
def __init__(self, classifiers,vote='classlabel', weights=None):
self.classifiers = classifiers
self.named_classifiers = {key: value for key, value in _name_estimators(classifiers)}
self.vote = vote
self.weights = weights
def fit(self, X, y):
""" Fit classifiers.
Parameters
----------
X : {array-like, sparse matrix},
shape = [n_samples, n_features]
Matrix of training samples.
y : array-like, shape = [n_samples]
Vector of target class labels.
Returns
-------
self : object
"""
# Use LabelEncoder to ensure class labels start
# with 0, which is important for np.argmax
# call in self.predict
self.lablenc_ = LabelEncoder()
self.lablenc_.fit(y)
self.classes_ = self.lablenc_.classes_
self.classifiers_ = []
for clf in self.classifiers:
fitted_clf = clone(clf).fit(X,self.lablenc_.transform(y))
self.classifiers_.append(fitted_clf)
return self
def predict(self, X):
| maj_vote = np.apply_along_axis(lambda x: np.argmax(np.bincount(x,weights=self.weights)),axis=1,arr=predictions)
maj_vote = self.lablenc_.inverse_transform(maj_vote)
return maj_vote
def predict_proba(self, X):
""" Predict class probabilities for X.
Parameters
----------
X : {array-like, sparse matrix},
shape = [n_samples, n_features]
Training vectors, where n_samples is
the number of samples and
n_features is the number of features.
Returns
----------
avg_proba : array-like,
shape = [n_samples, n_classes]
Weighted average probability for
each class per sample.
"""
probas = np.asarray([clf.predict_proba(X) for clf in self.classifiers_])
avg_proba = np.average(probas,axis=0, weights=self.weights)
return avg_proba
def get_params(self, deep=True):
""" Get classifier parameter names for GridSearch"""
if not deep:
return super(MajorityVoteClassifier,self).get_params(deep=False)
else:
out = self.named_classifiers.copy()
for name, step in six.iteritems(self.named_classifiers):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
#get datas
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
iris = datasets.load_iris()
X,y = iris.data[50:,[1,2]],iris.target[50:]
le = LabelEncoder()
y = le.fit_transform(y)
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.5,random_state = 1)
#train logistic regression classifier, decision tree, k-nearest neightbor respectively
from sklearn.cross_validation import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
import numpy as np
clf1 = LogisticRegression(penalty = 'l2',C = 0.001,random_state = 0)
clf2 = DecisionTreeClassifier(max_depth = 1,criterion = 'entropy',random_state = 0)
clf3 = KNeighborsClassifier(n_neighbors = 1,p=2,metric = 'minkowski')
pipe1 = Pipeline([['sc',StandardScaler()],['clf',clf1]])
pipe3 = Pipeline([['sc',StandardScaler()],['clf',clf3]])
clf_labels = ['Logistic Regression','Decision Tree','KNN']
print('10-fold cross validation:\n')
for clf,label in zip([pipe1,clf2,pipe3],clf_labels):
scores = cross_val_score(estimator = clf,
X=X_train,
y=y_train,
cv=10,
scoring = 'roc_auc')
print ("ROC AUC: %0.2f (+/- %0.2f) [%s]" % (scores.mean(),scores.std(),label))
#combine the individual classifiers for majority rule voting in our MajorityVoteClassifier
#import os
#pwd = os.getcwd()
#os.chdir('E:\\machine-learning\\19-Ensemble Learning\\')
#from majority_voting import MajorityVoteClassifier
mv_clf = MajorityVoteClassifier(classifiers=[pipe1, clf2, pipe3])
clf_labels += ['Majority Voting']
all_clf = [pipe1, clf2, pipe3, mv_clf]
for clf, label in zip(all_clf, clf_labels):
scores = cross_val_score(estimator=clf,X=X_train,y=y_train,cv=10,scoring='roc_auc')
print("Accuracy: %0.2f (+/- %0.2f) [%s]"% (scores.mean(), scores.std(), label))
#os.chdir(pwd)
#compute the ROC curves from the test set to check if the MajorityVoteClassifier generalizes well to unseen data
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
import matplotlib.pyplot as plt
colors = ['black','orange','blue','green']
linestyles = [':', '--', '-.', '-']
for clf,label,clr,ls in zip(all_clf,clf_labels,colors,linestyles):
#assuming the label of the positive class is 1
y_pred = clf.fit(X_train,y_train).predict_proba(X_test)[:,1]
fpr,tpr,thresholds = roc_curve(y_true = y_test,y_score = y_pred)
roc_auc = auc(x= fpr,y=tpr)
plt.plot(fpr,tpr,color = clr,linestyle = ls,label = '%s (auc = %0.2f)' % (label,roc_auc))
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],linestyle='--',color='gray',linewidth=2)
plt.xlim([-0.1, 1.1])
plt.ylim([-0.1, 1.1])
plt.grid()
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.show()
#tune the inverse regularization parameter C of the logistic regression classifier and the decision tree
#depth via a grid search for demonstration purposes
from sklearn.grid_search import GridSearchCV
params = {'decisiontreeclassifier__max_depth':[1,2],'pipeline-1__clf__C':[0.001,0.1,100.0]}
grid = GridSearchCV(estimator = mv_clf,param_grid=params,cv = 10,scoring = 'roc_auc')
grid.fit(X_train,y_train)
for params,mean_score,scores in grid.grid_scores_:
print('%0.3f +/- %0.2f %r' % (mean_score,scores.std()/2,params))
print('Best parameters : %s' % grid.best_params_)
print('Accuracy: %.2f' % grid.best_score_)
| """ Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix},
Shape = [n_samples, n_features]
Matrix of training samples
Returns
----------
maj_vote : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.vote == 'probability':
maj_vote = np.argmax(self.predict_proba(X),axis=1)
else:
# 'classlabel' vote
# Collect results from clf.predict calls
predictions = np.asarray([clf.predict(X) for clf in self.classifiers_]).T | identifier_body |
majority_voting_test.py | #to get some base functionality for free, including the methods get_params and set_params
#to set and return the classifier's parameters as well as the score method to calculate the
#prediction accuracy,respectively
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
from sklearn.preprocessing import LabelEncoder
#import six too make the MajorityVoteClassifier compatible with python2.7
from sklearn.externals import six
from sklearn.base import clone
from sklearn.pipeline import _name_estimators
import numpy as np
import operator
class MajorityVoteClassifier(BaseEstimator,ClassifierMixin):
""" A majority vote ensemble classifier
Parameters
----------
classifiers : array-like, shape = [n_classifiers]
Different classifiers for the ensemble
vote : str, {'classlabel', 'probability'}
Default: 'classlabel'
If 'classlabel' the prediction is based on
the argmax of class labels. Else if
'probability', the argmax of the sum of
probabilities is used to predict the class label
(recommended for calibrated classifiers).
weights : array-like, shape = [n_classifiers]
Optional, default: None
If a list of `int` or `float` values are
provided , the classifiers are weithed by importance;
Uses uniform weights if 'weights = None'
"""
def __init__(self, classifiers,vote='classlabel', weights=None):
self.classifiers = classifiers
self.named_classifiers = {key: value for key, value in _name_estimators(classifiers)}
self.vote = vote
self.weights = weights
def | (self, X, y):
""" Fit classifiers.
Parameters
----------
X : {array-like, sparse matrix},
shape = [n_samples, n_features]
Matrix of training samples.
y : array-like, shape = [n_samples]
Vector of target class labels.
Returns
-------
self : object
"""
# Use LabelEncoder to ensure class labels start
# with 0, which is important for np.argmax
# call in self.predict
self.lablenc_ = LabelEncoder()
self.lablenc_.fit(y)
self.classes_ = self.lablenc_.classes_
self.classifiers_ = []
for clf in self.classifiers:
fitted_clf = clone(clf).fit(X,self.lablenc_.transform(y))
self.classifiers_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix},
Shape = [n_samples, n_features]
Matrix of training samples
Returns
----------
maj_vote : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.vote == 'probability':
maj_vote = np.argmax(self.predict_proba(X),axis=1)
else:
# 'classlabel' vote
# Collect results from clf.predict calls
predictions = np.asarray([clf.predict(X) for clf in self.classifiers_]).T
maj_vote = np.apply_along_axis(lambda x: np.argmax(np.bincount(x,weights=self.weights)),axis=1,arr=predictions)
maj_vote = self.lablenc_.inverse_transform(maj_vote)
return maj_vote
def predict_proba(self, X):
""" Predict class probabilities for X.
Parameters
----------
X : {array-like, sparse matrix},
shape = [n_samples, n_features]
Training vectors, where n_samples is
the number of samples and
n_features is the number of features.
Returns
----------
avg_proba : array-like,
shape = [n_samples, n_classes]
Weighted average probability for
each class per sample.
"""
probas = np.asarray([clf.predict_proba(X) for clf in self.classifiers_])
avg_proba = np.average(probas,axis=0, weights=self.weights)
return avg_proba
def get_params(self, deep=True):
""" Get classifier parameter names for GridSearch"""
if not deep:
return super(MajorityVoteClassifier,self).get_params(deep=False)
else:
out = self.named_classifiers.copy()
for name, step in six.iteritems(self.named_classifiers):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
#get datas
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
iris = datasets.load_iris()
X,y = iris.data[50:,[1,2]],iris.target[50:]
le = LabelEncoder()
y = le.fit_transform(y)
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.5,random_state = 1)
#train logistic regression classifier, decision tree, k-nearest neightbor respectively
from sklearn.cross_validation import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
import numpy as np
clf1 = LogisticRegression(penalty = 'l2',C = 0.001,random_state = 0)
clf2 = DecisionTreeClassifier(max_depth = 1,criterion = 'entropy',random_state = 0)
clf3 = KNeighborsClassifier(n_neighbors = 1,p=2,metric = 'minkowski')
pipe1 = Pipeline([['sc',StandardScaler()],['clf',clf1]])
pipe3 = Pipeline([['sc',StandardScaler()],['clf',clf3]])
clf_labels = ['Logistic Regression','Decision Tree','KNN']
print('10-fold cross validation:\n')
for clf,label in zip([pipe1,clf2,pipe3],clf_labels):
scores = cross_val_score(estimator = clf,
X=X_train,
y=y_train,
cv=10,
scoring = 'roc_auc')
print ("ROC AUC: %0.2f (+/- %0.2f) [%s]" % (scores.mean(),scores.std(),label))
#combine the individual classifiers for majority rule voting in our MajorityVoteClassifier
#import os
#pwd = os.getcwd()
#os.chdir('E:\\machine-learning\\19-Ensemble Learning\\')
#from majority_voting import MajorityVoteClassifier
mv_clf = MajorityVoteClassifier(classifiers=[pipe1, clf2, pipe3])
clf_labels += ['Majority Voting']
all_clf = [pipe1, clf2, pipe3, mv_clf]
for clf, label in zip(all_clf, clf_labels):
scores = cross_val_score(estimator=clf,X=X_train,y=y_train,cv=10,scoring='roc_auc')
print("Accuracy: %0.2f (+/- %0.2f) [%s]"% (scores.mean(), scores.std(), label))
#os.chdir(pwd)
#compute the ROC curves from the test set to check if the MajorityVoteClassifier generalizes well to unseen data
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
import matplotlib.pyplot as plt
colors = ['black','orange','blue','green']
linestyles = [':', '--', '-.', '-']
for clf,label,clr,ls in zip(all_clf,clf_labels,colors,linestyles):
#assuming the label of the positive class is 1
y_pred = clf.fit(X_train,y_train).predict_proba(X_test)[:,1]
fpr,tpr,thresholds = roc_curve(y_true = y_test,y_score = y_pred)
roc_auc = auc(x= fpr,y=tpr)
plt.plot(fpr,tpr,color = clr,linestyle = ls,label = '%s (auc = %0.2f)' % (label,roc_auc))
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],linestyle='--',color='gray',linewidth=2)
plt.xlim([-0.1, 1.1])
plt.ylim([-0.1, 1.1])
plt.grid()
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.show()
#tune the inverse regularization parameter C of the logistic regression classifier and the decision tree
#depth via a grid search for demonstration purposes
from sklearn.grid_search import GridSearchCV
params = {'decisiontreeclassifier__max_depth':[1,2],'pipeline-1__clf__C':[0.001,0.1,100.0]}
grid = GridSearchCV(estimator = mv_clf,param_grid=params,cv = 10,scoring = 'roc_auc')
grid.fit(X_train,y_train)
for params,mean_score,scores in grid.grid_scores_:
print('%0.3f +/- %0.2f %r' % (mean_score,scores.std()/2,params))
print('Best parameters : %s' % grid.best_params_)
print('Accuracy: %.2f' % grid.best_score_)
| fit | identifier_name |
majority_voting_test.py | #to get some base functionality for free, including the methods get_params and set_params
#to set and return the classifier's parameters as well as the score method to calculate the
#prediction accuracy,respectively
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
from sklearn.preprocessing import LabelEncoder
#import six too make the MajorityVoteClassifier compatible with python2.7
from sklearn.externals import six
from sklearn.base import clone
from sklearn.pipeline import _name_estimators
import numpy as np
import operator
class MajorityVoteClassifier(BaseEstimator,ClassifierMixin):
""" A majority vote ensemble classifier
Parameters
----------
classifiers : array-like, shape = [n_classifiers]
Different classifiers for the ensemble
vote : str, {'classlabel', 'probability'}
Default: 'classlabel'
If 'classlabel' the prediction is based on
the argmax of class labels. Else if
'probability', the argmax of the sum of
probabilities is used to predict the class label
(recommended for calibrated classifiers).
weights : array-like, shape = [n_classifiers]
Optional, default: None
If a list of `int` or `float` values are
provided , the classifiers are weithed by importance;
Uses uniform weights if 'weights = None'
"""
def __init__(self, classifiers,vote='classlabel', weights=None):
self.classifiers = classifiers
self.named_classifiers = {key: value for key, value in _name_estimators(classifiers)}
self.vote = vote
self.weights = weights
def fit(self, X, y):
""" Fit classifiers.
Parameters
----------
X : {array-like, sparse matrix},
shape = [n_samples, n_features]
Matrix of training samples.
y : array-like, shape = [n_samples]
Vector of target class labels.
Returns
-------
self : object
"""
# Use LabelEncoder to ensure class labels start
# with 0, which is important for np.argmax
# call in self.predict
self.lablenc_ = LabelEncoder()
self.lablenc_.fit(y)
self.classes_ = self.lablenc_.classes_
self.classifiers_ = []
for clf in self.classifiers:
fitted_clf = clone(clf).fit(X,self.lablenc_.transform(y))
self.classifiers_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix},
Shape = [n_samples, n_features]
Matrix of training samples
Returns
----------
maj_vote : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.vote == 'probability':
maj_vote = np.argmax(self.predict_proba(X),axis=1)
else:
# 'classlabel' vote
# Collect results from clf.predict calls
predictions = np.asarray([clf.predict(X) for clf in self.classifiers_]).T
maj_vote = np.apply_along_axis(lambda x: np.argmax(np.bincount(x,weights=self.weights)),axis=1,arr=predictions)
maj_vote = self.lablenc_.inverse_transform(maj_vote)
return maj_vote
def predict_proba(self, X):
""" Predict class probabilities for X.
Parameters
----------
X : {array-like, sparse matrix},
shape = [n_samples, n_features]
Training vectors, where n_samples is
the number of samples and
n_features is the number of features.
Returns
----------
avg_proba : array-like,
shape = [n_samples, n_classes]
Weighted average probability for
each class per sample.
"""
probas = np.asarray([clf.predict_proba(X) for clf in self.classifiers_])
avg_proba = np.average(probas,axis=0, weights=self.weights)
return avg_proba
def get_params(self, deep=True):
""" Get classifier parameter names for GridSearch"""
if not deep:
return super(MajorityVoteClassifier,self).get_params(deep=False)
else:
out = self.named_classifiers.copy()
for name, step in six.iteritems(self.named_classifiers):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
#get datas
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
iris = datasets.load_iris()
X,y = iris.data[50:,[1,2]],iris.target[50:]
le = LabelEncoder()
y = le.fit_transform(y)
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.5,random_state = 1)
#train logistic regression classifier, decision tree, k-nearest neightbor respectively
from sklearn.cross_validation import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
import numpy as np
clf1 = LogisticRegression(penalty = 'l2',C = 0.001,random_state = 0)
clf2 = DecisionTreeClassifier(max_depth = 1,criterion = 'entropy',random_state = 0)
clf3 = KNeighborsClassifier(n_neighbors = 1,p=2,metric = 'minkowski')
pipe1 = Pipeline([['sc',StandardScaler()],['clf',clf1]])
pipe3 = Pipeline([['sc',StandardScaler()],['clf',clf3]])
clf_labels = ['Logistic Regression','Decision Tree','KNN']
print('10-fold cross validation:\n')
for clf,label in zip([pipe1,clf2,pipe3],clf_labels):
|
#combine the individual classifiers for majority rule voting in our MajorityVoteClassifier
#import os
#pwd = os.getcwd()
#os.chdir('E:\\machine-learning\\19-Ensemble Learning\\')
#from majority_voting import MajorityVoteClassifier
mv_clf = MajorityVoteClassifier(classifiers=[pipe1, clf2, pipe3])
clf_labels += ['Majority Voting']
all_clf = [pipe1, clf2, pipe3, mv_clf]
for clf, label in zip(all_clf, clf_labels):
scores = cross_val_score(estimator=clf,X=X_train,y=y_train,cv=10,scoring='roc_auc')
print("Accuracy: %0.2f (+/- %0.2f) [%s]"% (scores.mean(), scores.std(), label))
#os.chdir(pwd)
#compute the ROC curves from the test set to check if the MajorityVoteClassifier generalizes well to unseen data
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
import matplotlib.pyplot as plt
colors = ['black','orange','blue','green']
linestyles = [':', '--', '-.', '-']
for clf,label,clr,ls in zip(all_clf,clf_labels,colors,linestyles):
#assuming the label of the positive class is 1
y_pred = clf.fit(X_train,y_train).predict_proba(X_test)[:,1]
fpr,tpr,thresholds = roc_curve(y_true = y_test,y_score = y_pred)
roc_auc = auc(x= fpr,y=tpr)
plt.plot(fpr,tpr,color = clr,linestyle = ls,label = '%s (auc = %0.2f)' % (label,roc_auc))
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],linestyle='--',color='gray',linewidth=2)
plt.xlim([-0.1, 1.1])
plt.ylim([-0.1, 1.1])
plt.grid()
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.show()
#tune the inverse regularization parameter C of the logistic regression classifier and the decision tree
#depth via a grid search for demonstration purposes
from sklearn.grid_search import GridSearchCV
params = {'decisiontreeclassifier__max_depth':[1,2],'pipeline-1__clf__C':[0.001,0.1,100.0]}
grid = GridSearchCV(estimator = mv_clf,param_grid=params,cv = 10,scoring = 'roc_auc')
grid.fit(X_train,y_train)
for params,mean_score,scores in grid.grid_scores_:
print('%0.3f +/- %0.2f %r' % (mean_score,scores.std()/2,params))
print('Best parameters : %s' % grid.best_params_)
print('Accuracy: %.2f' % grid.best_score_)
| scores = cross_val_score(estimator = clf,
X=X_train,
y=y_train,
cv=10,
scoring = 'roc_auc')
print ("ROC AUC: %0.2f (+/- %0.2f) [%s]" % (scores.mean(),scores.std(),label)) | conditional_block |
yarrharr.py | # -*- coding: utf-8 -*-
# Copyright © 2013, 2014, 2017, 2020 Tom Most <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Additional permission under GNU GPL version 3 section 7
#
# If you modify this Program, or any covered work, by linking or
# combining it with OpenSSL (or a modified version of that library),
# containing parts covered by the terms of the OpenSSL License, the
# licensors of this Program grant you additional permission to convey
# the resulting work. Corresponding Source for a non-source form of
# such a combination shall include the source code for the parts of
# OpenSSL used as well as that of the covered work.
from __future__ import absolute_import
import argparse
import os
import sys
import yarrharr
def m | argv=sys.argv[1:]):
parser = argparse.ArgumentParser(description="Yarrharr feed reader")
parser.add_argument("--version", action="version", version=yarrharr.__version__)
parser.parse_args(argv)
os.environ["DJANGO_SETTINGS_MODULE"] = "yarrharr.settings"
from yarrharr.application import run
run()
| ain( | identifier_name |
yarrharr.py | # -*- coding: utf-8 -*-
# Copyright © 2013, 2014, 2017, 2020 Tom Most <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Additional permission under GNU GPL version 3 section 7
#
# If you modify this Program, or any covered work, by linking or
# combining it with OpenSSL (or a modified version of that library),
# containing parts covered by the terms of the OpenSSL License, the
# licensors of this Program grant you additional permission to convey
# the resulting work. Corresponding Source for a non-source form of
# such a combination shall include the source code for the parts of
# OpenSSL used as well as that of the covered work.
from __future__ import absolute_import
import argparse
import os
import sys
import yarrharr
def main(argv=sys.argv[1:]):
p | arser = argparse.ArgumentParser(description="Yarrharr feed reader")
parser.add_argument("--version", action="version", version=yarrharr.__version__)
parser.parse_args(argv)
os.environ["DJANGO_SETTINGS_MODULE"] = "yarrharr.settings"
from yarrharr.application import run
run()
| identifier_body |
|
yarrharr.py | # -*- coding: utf-8 -*-
# Copyright © 2013, 2014, 2017, 2020 Tom Most <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Additional permission under GNU GPL version 3 section 7
#
# If you modify this Program, or any covered work, by linking or
# combining it with OpenSSL (or a modified version of that library),
# containing parts covered by the terms of the OpenSSL License, the
# licensors of this Program grant you additional permission to convey | # the resulting work. Corresponding Source for a non-source form of
# such a combination shall include the source code for the parts of
# OpenSSL used as well as that of the covered work.
from __future__ import absolute_import
import argparse
import os
import sys
import yarrharr
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(description="Yarrharr feed reader")
parser.add_argument("--version", action="version", version=yarrharr.__version__)
parser.parse_args(argv)
os.environ["DJANGO_SETTINGS_MODULE"] = "yarrharr.settings"
from yarrharr.application import run
run() | random_line_split |
|
transform.rs | use geometry::Transformable;
use math;
#[derive(Deserialize, Debug)]
#[serde(tag = "type")]
pub enum Transform {
Translate { value: [f32; 3] },
Scale { value: [f32; 3] },
RotateX { value: f32 },
RotateY { value: f32 },
RotateZ { value: f32 },
}
impl Transform {
pub fn to_transform(&self) -> math::Transform {
match *self {
Transform::Translate { value } => {
math::Transform::new(math::Matrix4::translate(value[0], value[1], value[2]))
}
Transform::Scale { value } => {
math::Transform::new(math::Matrix4::scale(value[0], value[1], value[2]))
}
Transform::RotateX { value } => math::Transform::new(math::Matrix4::rot_x(value)),
Transform::RotateY { value } => math::Transform::new(math::Matrix4::rot_y(value)),
Transform::RotateZ { value } => math::Transform::new(math::Matrix4::rot_z(value)),
}
}
pub fn perform(&self, transformable: &mut dyn Transformable) |
}
| {
let transform = self.to_transform();
transformable.transform(&transform);
} | identifier_body |
transform.rs | use geometry::Transformable;
use math;
#[derive(Deserialize, Debug)]
#[serde(tag = "type")]
pub enum Transform {
Translate { value: [f32; 3] },
Scale { value: [f32; 3] },
RotateX { value: f32 },
RotateY { value: f32 },
RotateZ { value: f32 },
}
impl Transform {
pub fn | (&self) -> math::Transform {
match *self {
Transform::Translate { value } => {
math::Transform::new(math::Matrix4::translate(value[0], value[1], value[2]))
}
Transform::Scale { value } => {
math::Transform::new(math::Matrix4::scale(value[0], value[1], value[2]))
}
Transform::RotateX { value } => math::Transform::new(math::Matrix4::rot_x(value)),
Transform::RotateY { value } => math::Transform::new(math::Matrix4::rot_y(value)),
Transform::RotateZ { value } => math::Transform::new(math::Matrix4::rot_z(value)),
}
}
pub fn perform(&self, transformable: &mut dyn Transformable) {
let transform = self.to_transform();
transformable.transform(&transform);
}
}
| to_transform | identifier_name |
transform.rs | use geometry::Transformable;
use math;
#[derive(Deserialize, Debug)]
#[serde(tag = "type")]
pub enum Transform {
Translate { value: [f32; 3] },
Scale { value: [f32; 3] },
RotateX { value: f32 },
RotateY { value: f32 },
RotateZ { value: f32 },
}
impl Transform {
pub fn to_transform(&self) -> math::Transform {
match *self {
Transform::Translate { value } => {
math::Transform::new(math::Matrix4::translate(value[0], value[1], value[2]))
}
Transform::Scale { value } => |
Transform::RotateX { value } => math::Transform::new(math::Matrix4::rot_x(value)),
Transform::RotateY { value } => math::Transform::new(math::Matrix4::rot_y(value)),
Transform::RotateZ { value } => math::Transform::new(math::Matrix4::rot_z(value)),
}
}
pub fn perform(&self, transformable: &mut dyn Transformable) {
let transform = self.to_transform();
transformable.transform(&transform);
}
}
| {
math::Transform::new(math::Matrix4::scale(value[0], value[1], value[2]))
} | conditional_block |
transform.rs | use geometry::Transformable;
use math;
#[derive(Deserialize, Debug)]
#[serde(tag = "type")]
pub enum Transform {
Translate { value: [f32; 3] },
Scale { value: [f32; 3] },
RotateX { value: f32 },
RotateY { value: f32 },
RotateZ { value: f32 },
}
impl Transform {
pub fn to_transform(&self) -> math::Transform {
match *self {
Transform::Translate { value } => {
math::Transform::new(math::Matrix4::translate(value[0], value[1], value[2]))
}
Transform::Scale { value } => {
math::Transform::new(math::Matrix4::scale(value[0], value[1], value[2]))
}
Transform::RotateX { value } => math::Transform::new(math::Matrix4::rot_x(value)),
Transform::RotateY { value } => math::Transform::new(math::Matrix4::rot_y(value)),
Transform::RotateZ { value } => math::Transform::new(math::Matrix4::rot_z(value)),
}
} |
pub fn perform(&self, transformable: &mut dyn Transformable) {
let transform = self.to_transform();
transformable.transform(&transform);
}
} | random_line_split |
|
textbox.js | instead of from right to left.
* ' The start or end of a literal part.
* " The start or end of a literal part.
* > Converts all characters that follow to uppercase.
* < Converts all characters that follow to lowercase.
* \ Cancel the special meaning of a character.
* Example:
* An american style phone number.
* <code>
* <a:textbox mask="(000)0000-0000;;_" />
* </code>
* Example:
* A dutch postal code
* <code>
* <a:textbox mask="0000 AA;;_" />
* </code>
* Example:
* A date
* <code>
* <a:textbox mask="00-00-0000;;_" datatype="xsd:date" />
* </code>
* Example:
* A serial number
* <code>
* <a:textbox mask="'WCS74'0000-00000;1;_" />
* </code>
* Example:
* A MAC address
* <code>
* <a:textbox mask="XX-XX-XX-XX-XX-XX;;_" />
* </code>
*/
this.$propHandlers["mask"] = function(value){
if (this.mask.toLowerCase() == "password")// || !apf.hasMsRangeObject)
return;
if (!value) {
throw new Error("Not Implemented");
}
if (!this.$masking) {
this.$masking = true;
this.implement(apf.textbox.masking);
this.focusselect = false;
//this.realtime = false;
}
this.setMask(this.mask);
};
//this.$propHandlers["ref"] = function(value) {
// this.$input.setAttribute("name", value.split("/").pop().split("::").pop()
// .replace(/[\@\.\(\)]*/g, ""));
//};
/**
* @attribute {String} initial-message the message displayed by this element
* when it doesn't have a value set. This property is inherited from parent
* nodes. When none is found it is looked for on the appsettings element.
*/
this.$propHandlers["initial-message"] = function(value){
if (value) {
//#ifdef __WITH_WINDOW_FOCUS
if (apf.hasFocusBug)
this.$input.onblur();
//#endif
//this.$propHandlers["value"].call(this, value, null, true);
}
if (!this.value)
this.$clear(true);
if (this.type == "password" && this.$inputInitFix) {
this.$inputInitFix.innerHTML = value;
apf.setStyleClass(this.$inputInitFix, "initFxEnabled");
}
};
/**
* @attribute {Boolean} focusselect whether the text in this element is
* selected when this element receives focus.
*/
this.$propHandlers["focusselect"] = function(value){
var _self = this;
this.$input.onmousedown = function(){
_self.focusselect = false;
};
this.$input.onmouseup =
this.$input.onmouseout = function(){
_self.focusselect = value;
};
};
/**
* @attribute {String} type the type or function this element represents.
* This can be any arbitrary name. Although there are some special values.
* Possible values:
* username this element is used to type in the name part of login credentials.
* password this element is used to type in the password part of login credentials.
*/
this.$propHandlers["type"] = function(value){
if (value && "password|username".indexOf(value) > -1
&& typeof this.focusselect == "undefined") {
this.focusselect = true;
this.$propHandlers["focusselect"].call(this, true);
}
};
this.$isTextInput = function(e){
return true;
};
/**** Public Methods ****/
//#ifdef __WITH_CONVENIENCE_API
/**
* Sets the value of this element. This should be one of the values
* specified in the values attribute.
* @param {String} value the new value of this element
*/
this.setValue = function(value){
return this.setProperty("value", value, false, true);
};
this.clear = function(){
this.setProperty("value", "");
}
//@todo cleanup and put initial-message behaviour in one location
this.$clear = function(noEvent){
if (this["initial-message"]) {
apf.setStyleClass(this.$ext, this.$baseCSSname + "Initial");
this.$propHandlers["value"].call(this, this["initial-message"], null, null, true);
}
else {
this.$propHandlers["value"].call(this, "", null, null, true);
}
if (!noEvent)
this.dispatchEvent("clear");//@todo this should work via value change
}
/**
* Returns the current value of this element.
* @return {String}
*/
this.getValue = function(){
var v = this.isHTMLBox ? this.$input.innerHTML : this.$input.value;
return v == this["initial-message"] ? "" : v.replace(/\r/g, "");
};
//#endif
/**
* Selects the text in this element.
*/
this.select = function(){
try {
this.$input.select();
}
catch(e){}
};
/**
* Deselects the text in this element.
*/
this.deselect = function(){this.$input.deselect();};
/**** Private Methods *****/
this.$enable = function(){this.$input.disabled = false;};
this.$disable = function(){this.$input.disabled = true;};
this.$insertData = function(str){
return this.setValue(str);
};
/**
* @private
*/
this.insert = function(text){
if (apf.hasMsRangeObject) {
try {
this.$input.focus();
}
catch(e) {}
var range = document.selection.createRange();
if (this.oninsert)
text = this.oninsert(text);
range.pasteHTML(text);
range.collapse(true);
range.select();
}
else {
this.$input.value += text;
}
};
this.addEventListener("$clear", function(){
this.value = "";//@todo what about property binding?
if (this["initial-message"] && apf.document.activeElement != this) {
this.$propHandlers["value"].call(this, this["initial-message"], null, null, true);
apf.setStyleClass(this.$ext, this.$baseCSSname + "Initial");
}
else {
this.$propHandlers["value"].call(this, "");
}
if (!this.$input.tagName.toLowerCase().match(/input|textarea/i)) {
if (apf.hasMsRangeObject) {
try {
var range = document.selection.createRange();
range.moveStart("sentence", -1);
//range.text = "";
range.select();
}
catch(e) {}
}
}
this.dispatchEvent("clear"); //@todo apf3.0
});
this.$keyHandler = function(key, ctrlKey, shiftKey, altKey, e){
if (this.$button && key == 27) {
//this.$clear();
if (this.value) {
this.change("");
e.stopPropagation();
}
//this.focus({mouse:true});
}
/*if (this.dispatchEvent("keydown", {
keyCode : key,
ctrlKey : ctrlKey,
shiftKey : shiftKey,
altKey : altKey,
htmlEvent : e}) === false)
return false;
// @todo: revisit this IF statement - dead code?
if (false && apf.isIE && (key == 86 && ctrlKey || key == 45 && shiftKey)) {
var text = window.clipboardData.getData("Text");
if ((text = this.dispatchEvent("keydown", {
text : this.onpaste(text)}) === false))
return false;
if (!text)
text = window.clipboardData.getData("Text");
this.$input.focus();
var range = document.selection.createRange();
range.text = "";
range.collapse();
range.pasteHTML(text.replace(/\n/g, "<br />").replace(/\t/g, " "));
return false;
}*/
};
this.$registerElement = function(oNode) {
if (!oNode) return;
if (oNode.localName == "autocomplete")
this.$autoComplete = oNode;
};
var fTimer;
this.$focus = function(e){
if (!this.$ext || this.$ext.disabled)
return;
this.$setStyleClass(this.$ext, this.$baseCSSname + "Focus");
if (this["initial-message"] && this.$input.value == this["initial-message"]) {
this.$propHandlers["value"].call(this, "", null, null, true);
apf.setStyleClass(this.$ext, "", [this.$baseCSSname + "Initial"]);
}
var _self = this;
function | delay | identifier_name |
|
textbox.js | Example:
* An american style phone number.
* <code>
* <a:textbox mask="(000)0000-0000;;_" />
* </code>
* Example:
* A dutch postal code
* <code>
* <a:textbox mask="0000 AA;;_" />
* </code>
* Example:
* A date
* <code>
* <a:textbox mask="00-00-0000;;_" datatype="xsd:date" />
* </code>
* Example:
* A serial number
* <code>
* <a:textbox mask="'WCS74'0000-00000;1;_" />
* </code>
* Example:
* A MAC address
* <code>
* <a:textbox mask="XX-XX-XX-XX-XX-XX;;_" />
* </code>
*/
this.$propHandlers["mask"] = function(value){
if (this.mask.toLowerCase() == "password")// || !apf.hasMsRangeObject)
return;
if (!value) {
throw new Error("Not Implemented");
}
if (!this.$masking) {
this.$masking = true;
this.implement(apf.textbox.masking);
this.focusselect = false;
//this.realtime = false;
}
this.setMask(this.mask);
};
//this.$propHandlers["ref"] = function(value) {
// this.$input.setAttribute("name", value.split("/").pop().split("::").pop()
// .replace(/[\@\.\(\)]*/g, ""));
//};
/**
* @attribute {String} initial-message the message displayed by this element
* when it doesn't have a value set. This property is inherited from parent
* nodes. When none is found it is looked for on the appsettings element.
*/
this.$propHandlers["initial-message"] = function(value){
if (value) {
//#ifdef __WITH_WINDOW_FOCUS
if (apf.hasFocusBug)
this.$input.onblur();
//#endif
//this.$propHandlers["value"].call(this, value, null, true);
}
if (!this.value)
this.$clear(true);
if (this.type == "password" && this.$inputInitFix) {
this.$inputInitFix.innerHTML = value;
apf.setStyleClass(this.$inputInitFix, "initFxEnabled");
}
};
/**
* @attribute {Boolean} focusselect whether the text in this element is
* selected when this element receives focus.
*/
this.$propHandlers["focusselect"] = function(value){
var _self = this;
this.$input.onmousedown = function(){
_self.focusselect = false;
};
this.$input.onmouseup =
this.$input.onmouseout = function(){
_self.focusselect = value;
};
};
/**
* @attribute {String} type the type or function this element represents.
* This can be any arbitrary name. Although there are some special values.
* Possible values:
* username this element is used to type in the name part of login credentials.
* password this element is used to type in the password part of login credentials.
*/
this.$propHandlers["type"] = function(value){
if (value && "password|username".indexOf(value) > -1
&& typeof this.focusselect == "undefined") {
this.focusselect = true;
this.$propHandlers["focusselect"].call(this, true);
}
};
this.$isTextInput = function(e){
return true;
};
/**** Public Methods ****/
//#ifdef __WITH_CONVENIENCE_API
/**
* Sets the value of this element. This should be one of the values
* specified in the values attribute.
* @param {String} value the new value of this element
*/
this.setValue = function(value){
return this.setProperty("value", value, false, true);
};
this.clear = function(){
this.setProperty("value", "");
}
//@todo cleanup and put initial-message behaviour in one location
this.$clear = function(noEvent){
if (this["initial-message"]) {
apf.setStyleClass(this.$ext, this.$baseCSSname + "Initial");
this.$propHandlers["value"].call(this, this["initial-message"], null, null, true);
}
else {
this.$propHandlers["value"].call(this, "", null, null, true);
}
if (!noEvent)
this.dispatchEvent("clear");//@todo this should work via value change
}
/**
* Returns the current value of this element.
* @return {String}
*/
this.getValue = function(){
var v = this.isHTMLBox ? this.$input.innerHTML : this.$input.value;
return v == this["initial-message"] ? "" : v.replace(/\r/g, "");
};
//#endif
/**
* Selects the text in this element.
*/
this.select = function(){
try {
this.$input.select();
}
catch(e){}
};
/**
* Deselects the text in this element.
*/
this.deselect = function(){this.$input.deselect();};
/**** Private Methods *****/
this.$enable = function(){this.$input.disabled = false;};
this.$disable = function(){this.$input.disabled = true;};
this.$insertData = function(str){
return this.setValue(str);
};
/**
* @private
*/
this.insert = function(text){
if (apf.hasMsRangeObject) {
try {
this.$input.focus();
}
catch(e) {}
var range = document.selection.createRange();
if (this.oninsert)
text = this.oninsert(text);
range.pasteHTML(text);
range.collapse(true);
range.select();
}
else {
this.$input.value += text;
}
};
this.addEventListener("$clear", function(){
this.value = "";//@todo what about property binding?
if (this["initial-message"] && apf.document.activeElement != this) {
this.$propHandlers["value"].call(this, this["initial-message"], null, null, true);
apf.setStyleClass(this.$ext, this.$baseCSSname + "Initial");
}
else {
this.$propHandlers["value"].call(this, "");
}
if (!this.$input.tagName.toLowerCase().match(/input|textarea/i)) {
if (apf.hasMsRangeObject) {
try {
var range = document.selection.createRange();
range.moveStart("sentence", -1);
//range.text = "";
range.select();
}
catch(e) {}
}
}
this.dispatchEvent("clear"); //@todo apf3.0
});
this.$keyHandler = function(key, ctrlKey, shiftKey, altKey, e){
if (this.$button && key == 27) {
//this.$clear();
if (this.value) {
this.change("");
e.stopPropagation();
}
//this.focus({mouse:true});
}
/*if (this.dispatchEvent("keydown", {
keyCode : key,
ctrlKey : ctrlKey,
shiftKey : shiftKey,
altKey : altKey,
htmlEvent : e}) === false)
return false;
// @todo: revisit this IF statement - dead code?
if (false && apf.isIE && (key == 86 && ctrlKey || key == 45 && shiftKey)) {
var text = window.clipboardData.getData("Text");
if ((text = this.dispatchEvent("keydown", {
text : this.onpaste(text)}) === false))
return false;
if (!text)
text = window.clipboardData.getData("Text");
this.$input.focus();
var range = document.selection.createRange();
range.text = "";
range.collapse();
range.pasteHTML(text.replace(/\n/g, "<br />").replace(/\t/g, " "));
return false;
}*/
};
this.$registerElement = function(oNode) {
if (!oNode) return;
if (oNode.localName == "autocomplete")
this.$autoComplete = oNode;
};
var fTimer;
this.$focus = function(e){
if (!this.$ext || this.$ext.disabled)
return;
this.$setStyleClass(this.$ext, this.$baseCSSname + "Focus");
if (this["initial-message"] && this.$input.value == this["initial-message"]) {
this.$propHandlers["value"].call(this, "", null, null, true);
apf.setStyleClass(this.$ext, "", [this.$baseCSSname + "Initial"]);
}
var _self = this;
function delay() | {
try {
if (!fTimer || document.activeElement != _self.$input) {
_self.$input.focus();
}
else {
clearInterval(fTimer);
return;
}
}
catch(e) {}
if (_self.$masking)
_self.setPosition();
if (_self.focusselect)
_self.select();
} | identifier_body |
|
textbox.js | == "autocomplete")
this.$autoComplete = oNode;
};
var fTimer;
this.$focus = function(e){
if (!this.$ext || this.$ext.disabled)
return;
this.$setStyleClass(this.$ext, this.$baseCSSname + "Focus");
if (this["initial-message"] && this.$input.value == this["initial-message"]) {
this.$propHandlers["value"].call(this, "", null, null, true);
apf.setStyleClass(this.$ext, "", [this.$baseCSSname + "Initial"]);
}
var _self = this;
function delay(){
try {
if (!fTimer || document.activeElement != _self.$input) {
_self.$input.focus();
}
else {
clearInterval(fTimer);
return;
}
}
catch(e) {}
if (_self.$masking)
_self.setPosition();
if (_self.focusselect)
_self.select();
};
if ((!e || e.mouse) && apf.isIE) {
clearInterval(fTimer);
fTimer = setInterval(delay, 1);
}
else
delay();
};
this.$blur = function(e){
if (!this.$ext)
return;
if (!this.realtime)
this.change(this.getValue());
this.$setStyleClass(this.$ext, "", [this.$baseCSSname + "Focus", "capsLock"]);
if (this["initial-message"] && this.$input.value == "") {
this.$propHandlers["value"].call(this, this["initial-message"], null, null, true);
apf.setStyleClass(this.$ext, this.$baseCSSname + "Initial");
}
/*if (apf.hasMsRangeObject) {
var r = this.$input.createTextRange();
r.collapse();
r.select();
}*/
try {
if (apf.isIE || !e || e.srcElement != apf.window)
this.$input.blur();
}
catch(e) {}
// check if we clicked on the oContainer. ifso dont hide it
if (this.oContainer) {
$setTimeout("var o = apf.lookup(" + this.$uniqueId + ");\
o.oContainer.style.display = 'none'", 100);
}
clearInterval(fTimer);
};
/**** Init ****/
this.$draw = function(){
var _self = this,
typedBefore = false;
//#ifdef __AMLCODEEDITOR
if (this.localName == "codeeditor") {
this.skin = "textarea";
this.$loadSkin();
}
//#endif
//Build Main Skin
this.$ext = this.$getExternal(null, null, function(oExt){
var mask = this.getAttribute("mask");
if ((typeof mask == "string" && mask.toLowerCase() == "password")
|| "secret|password".indexOf(this.localName) > -1) {
this.type = "password";
this.$getLayoutNode("main", "input").setAttribute("type", "password");
}
//#ifdef __WITH_HTML5
else if (this.localName == "email") {
this.datatype = (this.prefix ? this.prefix + ":" : "") + "email";
this.$propHandlers["datatype"].call(this, this.datatype, "datatype");
}
else if (this.localName == "url") {
this.datatype = (this.prefix ? this.prefix + ":" : "") + "url";
this.$propHandlers["datatype"].call(this, this.datatype, "datatype");
}
//#endif
oExt.setAttribute("onmousedown", "if (!this.host.disabled) \
this.host.dispatchEvent('mousedown', {htmlEvent : event});");
oExt.setAttribute("onmouseup", "if (!this.host.disabled) \
this.host.dispatchEvent('mouseup', {htmlEvent : event});");
oExt.setAttribute("onclick", "if (!this.host.disabled) \
this.host.dispatchEvent('click', {htmlEvent : event});");
});
this.$input = this.$getLayoutNode("main", "input", this.$ext);
this.$button = this.$getLayoutNode("main", "button", this.$ext);
this.$inputInitFix = this.$getLayoutNode("main", "initialfix", this.$ext);
if (this.type == "password")
this.$propHandlers["type"].call(this, "password");
if (!apf.hasContentEditable && "input|textarea".indexOf(this.$input.tagName.toLowerCase()) == -1) {
var node = this.$input;
this.$input = node.parentNode.insertBefore(document.createElement("textarea"), node);
node.parentNode.removeChild(node);
this.$input.className = node.className;
if (this.$ext == node)
this.$ext = this.$input;
}
if (this.$button) {
this.$button.onmousedown = function(){
_self.$clear(); //@todo why are both needed for doc filter
_self.change(""); //@todo only this one should be needed
_self.focus({mouse:true});
}
}
//@todo for skin switching this should be removed
if (this.$input.tagName.toLowerCase() == "textarea") {
this.addEventListener("focus", function(e){
//if (this.multiline != "optional")
//e.returnValue = false
});
}
this.$input.onselectstart = function(e){
if (!e) e = event;
e.cancelBubble = true;
}
this.$input.host = this;
this.$input.onkeydown = function(e){
e = e || window.event;
if (this.host.disabled) {
e.returnValue = false;
return false;
}
//Change
if (!_self.realtime) {
var value = _self.getValue();
if (e.keyCode == 13 && value != _self.value)
_self.change(value);
}
else if (apf.isWebkit && _self.xmlRoot && _self.getValue() != _self.value) //safari issue (only old??)
$setTimeout("var o = apf.lookup(" + _self.$uniqueId + ");\
o.change(o.getValue())");
if (_self.multiline == "optional" && e.keyCode == 13 && !e.shiftKey
|| e.ctrlKey && (e.keyCode == 66 || e.keyCode == 73
|| e.keyCode == 85)) {
e.returnValue = false;
return false;
}
if (typedBefore && this.getAttribute("type") == "password" && this.value != "") {
var hasClass = (_self.$ext.className.indexOf("capsLock") > -1),
capsKey = (e.keyCode === 20);
if (capsKey) // caps off
apf.setStyleClass(_self.$ext, hasClass ? null : "capsLock", hasClass ? ["capsLock"] : null);
}
//Autocomplete
if (_self.$autoComplete || _self.oContainer) {
var keyCode = e.keyCode;
$setTimeout(function(){
if (_self.$autoComplete)
_self.$autoComplete.fillAutocomplete(keyCode);
else
_self.fillAutocomplete(keyCode);
});
}
//Non this.$masking
if (!_self.mask) {
return _self.$keyHandler(e.keyCode, e.ctrlKey,
e.shiftKey, e.altKey, e);
}
};
this.$input.onkeyup = function(e){
if (!e)
e = event;
if (this.host.disabled)
return false;
var keyCode = e.keyCode;
if (_self.$button)
_self.$button.style.display = this.value ? "block" : "none";
if (_self.realtime) {
$setTimeout(function(){
var v;
if (!_self.mask && (v = _self.getValue()) != _self.value)
_self.change(v);
_self.dispatchEvent("keyup", {keyCode : keyCode});//@todo
});
}
else {
_self.dispatchEvent("keyup", {keyCode : keyCode});//@todo
}
//#ifdef __WITH_VALIDATION
if (_self.isValid && _self.isValid() && e.keyCode != 13 && e.keyCode != 17)
_self.clearError();
//#endif
};
//#ifdef __WITH_WINDOW_FOCUS
if (apf.hasFocusBug)
apf.sanitizeTextbox(this.$input);
//#endif
if (apf.hasAutocompleteXulBug)
this.$input.setAttribute("autocomplete", "off");
if ("INPUT|TEXTAREA".indexOf(this.$input.tagName) == -1) {
this.isHTMLBox = true;
this.$input.unselectable = "Off";
this.$input.contentEditable = true;
this.$input.style.width = "1px";
this.$input.select = function(){
var r = document.selection.createRange();
r.moveToElementText(this);
r.select();
}
};
this.$input.deselect = function(){
if (!document.selection) return;
var r = document.selection.createRange();
r.collapse();
r.select();
};
var f;
apf.addListener(this.$input, "keypress", f = function(e) {
if (_self.$input.getAttribute("type") != "password")
return apf.removeListener(_self.$input, "keypress", f);
e = e || window.event; | random_line_split |
||
index_object.py | import numpy as np
import pandas.util.testing as tm
from pandas import (Series, date_range, DatetimeIndex, Index, RangeIndex,
Float64Index)
from .pandas_vb_common import setup # noqa
class SetOperations(object):
goal_time = 0.2
params = (['datetime', 'date_string', 'int', 'strings'],
['intersection', 'union', 'symmetric_difference'])
param_names = ['dtype', 'method']
def setup(self, dtype, method):
N = 10**5
dates_left = date_range('1/1/2000', periods=N, freq='T')
fmt = '%Y-%m-%d %H:%M:%S'
date_str_left = Index(dates_left.strftime(fmt))
int_left = Index(np.arange(N))
str_left = tm.makeStringIndex(N)
data = {'datetime': {'left': dates_left, 'right': dates_left[:-1]},
'date_string': {'left': date_str_left,
'right': date_str_left[:-1]},
'int': {'left': int_left, 'right': int_left[:-1]},
'strings': {'left': str_left, 'right': str_left[:-1]}}
self.left = data[dtype]['left']
self.right = data[dtype]['right']
def time_operation(self, dtype, method):
getattr(self.left, method)(self.right)
class SetDisjoint(object):
goal_time = 0.2
def setup(self):
N = 10**5
B = N + 20000
self.datetime_left = DatetimeIndex(range(N))
self.datetime_right = DatetimeIndex(range(N, B))
def time_datetime_difference_disjoint(self):
self.datetime_left.difference(self.datetime_right)
class Datetime(object):
goal_time = 0.2
def setup(self):
self.dr = date_range('20000101', freq='D', periods=10000)
def time_is_dates_only(self):
self.dr._is_dates_only
class Ops(object):
sample_time = 0.2
params = ['float', 'int']
param_names = ['dtype']
def setup(self, dtype):
N = 10**6
indexes = {'int': 'makeIntIndex', 'float': 'makeFloatIndex'}
self.index = getattr(tm, indexes[dtype])(N)
def time_add(self, dtype):
self.index + 2
def time_subtract(self, dtype):
self.index - 2
def time_multiply(self, dtype):
self.index * 2
def time_divide(self, dtype):
self.index / 2
def time_modulo(self, dtype):
self.index % 2
class Range(object): | self.idx_dec = RangeIndex(start=10**7, stop=-1, step=-3)
def time_max(self):
self.idx_inc.max()
def time_max_trivial(self):
self.idx_dec.max()
def time_min(self):
self.idx_dec.min()
def time_min_trivial(self):
self.idx_inc.min()
class IndexAppend(object):
goal_time = 0.2
def setup(self):
N = 10000
self.range_idx = RangeIndex(0, 100)
self.int_idx = self.range_idx.astype(int)
self.obj_idx = self.int_idx.astype(str)
self.range_idxs = []
self.int_idxs = []
self.object_idxs = []
for i in range(1, N):
r_idx = RangeIndex(i * 100, (i + 1) * 100)
self.range_idxs.append(r_idx)
i_idx = r_idx.astype(int)
self.int_idxs.append(i_idx)
o_idx = i_idx.astype(str)
self.object_idxs.append(o_idx)
def time_append_range_list(self):
self.range_idx.append(self.range_idxs)
def time_append_int_list(self):
self.int_idx.append(self.int_idxs)
def time_append_obj_list(self):
self.obj_idx.append(self.object_idxs)
class Indexing(object):
goal_time = 0.2
params = ['String', 'Float', 'Int']
param_names = ['dtype']
def setup(self, dtype):
N = 10**6
self.idx = getattr(tm, 'make{}Index'.format(dtype))(N)
self.array_mask = (np.arange(N) % 3) == 0
self.series_mask = Series(self.array_mask)
self.sorted = self.idx.sort_values()
half = N // 2
self.non_unique = self.idx[:half].append(self.idx[:half])
self.non_unique_sorted = self.sorted[:half].append(self.sorted[:half])
self.key = self.sorted[N // 4]
def time_boolean_array(self, dtype):
self.idx[self.array_mask]
def time_boolean_series(self, dtype):
self.idx[self.series_mask]
def time_get(self, dtype):
self.idx[1]
def time_slice(self, dtype):
self.idx[:-1]
def time_slice_step(self, dtype):
self.idx[::2]
def time_get_loc(self, dtype):
self.idx.get_loc(self.key)
def time_get_loc_sorted(self, dtype):
self.sorted.get_loc(self.key)
def time_get_loc_non_unique(self, dtype):
self.non_unique.get_loc(self.key)
def time_get_loc_non_unique_sorted(self, dtype):
self.non_unique_sorted.get_loc(self.key)
class Float64IndexMethod(object):
# GH 13166
goal_time = 0.2
def setup(self):
N = 100000
a = np.arange(N)
self.ind = Float64Index(a * 4.8000000418824129e-08)
def time_get_loc(self):
self.ind.get_loc(0) |
goal_time = 0.2
def setup(self):
self.idx_inc = RangeIndex(start=0, stop=10**7, step=3) | random_line_split |
index_object.py | import numpy as np
import pandas.util.testing as tm
from pandas import (Series, date_range, DatetimeIndex, Index, RangeIndex,
Float64Index)
from .pandas_vb_common import setup # noqa
class SetOperations(object):
goal_time = 0.2
params = (['datetime', 'date_string', 'int', 'strings'],
['intersection', 'union', 'symmetric_difference'])
param_names = ['dtype', 'method']
def setup(self, dtype, method):
N = 10**5
dates_left = date_range('1/1/2000', periods=N, freq='T')
fmt = '%Y-%m-%d %H:%M:%S'
date_str_left = Index(dates_left.strftime(fmt))
int_left = Index(np.arange(N))
str_left = tm.makeStringIndex(N)
data = {'datetime': {'left': dates_left, 'right': dates_left[:-1]},
'date_string': {'left': date_str_left,
'right': date_str_left[:-1]},
'int': {'left': int_left, 'right': int_left[:-1]},
'strings': {'left': str_left, 'right': str_left[:-1]}}
self.left = data[dtype]['left']
self.right = data[dtype]['right']
def time_operation(self, dtype, method):
getattr(self.left, method)(self.right)
class SetDisjoint(object):
goal_time = 0.2
def setup(self):
N = 10**5
B = N + 20000
self.datetime_left = DatetimeIndex(range(N))
self.datetime_right = DatetimeIndex(range(N, B))
def time_datetime_difference_disjoint(self):
self.datetime_left.difference(self.datetime_right)
class Datetime(object):
goal_time = 0.2
def setup(self):
self.dr = date_range('20000101', freq='D', periods=10000)
def time_is_dates_only(self):
self.dr._is_dates_only
class Ops(object):
sample_time = 0.2
params = ['float', 'int']
param_names = ['dtype']
def setup(self, dtype):
N = 10**6
indexes = {'int': 'makeIntIndex', 'float': 'makeFloatIndex'}
self.index = getattr(tm, indexes[dtype])(N)
def time_add(self, dtype):
self.index + 2
def time_subtract(self, dtype):
self.index - 2
def time_multiply(self, dtype):
self.index * 2
def time_divide(self, dtype):
self.index / 2
def time_modulo(self, dtype):
self.index % 2
class Range(object):
goal_time = 0.2
def setup(self):
self.idx_inc = RangeIndex(start=0, stop=10**7, step=3)
self.idx_dec = RangeIndex(start=10**7, stop=-1, step=-3)
def time_max(self):
self.idx_inc.max()
def time_max_trivial(self):
self.idx_dec.max()
def time_min(self):
self.idx_dec.min()
def time_min_trivial(self):
self.idx_inc.min()
class IndexAppend(object):
goal_time = 0.2
def setup(self):
N = 10000
self.range_idx = RangeIndex(0, 100)
self.int_idx = self.range_idx.astype(int)
self.obj_idx = self.int_idx.astype(str)
self.range_idxs = []
self.int_idxs = []
self.object_idxs = []
for i in range(1, N):
r_idx = RangeIndex(i * 100, (i + 1) * 100)
self.range_idxs.append(r_idx)
i_idx = r_idx.astype(int)
self.int_idxs.append(i_idx)
o_idx = i_idx.astype(str)
self.object_idxs.append(o_idx)
def time_append_range_list(self):
self.range_idx.append(self.range_idxs)
def time_append_int_list(self):
self.int_idx.append(self.int_idxs)
def time_append_obj_list(self):
self.obj_idx.append(self.object_idxs)
class Indexing(object):
goal_time = 0.2
params = ['String', 'Float', 'Int']
param_names = ['dtype']
def setup(self, dtype):
N = 10**6
self.idx = getattr(tm, 'make{}Index'.format(dtype))(N)
self.array_mask = (np.arange(N) % 3) == 0
self.series_mask = Series(self.array_mask)
self.sorted = self.idx.sort_values()
half = N // 2
self.non_unique = self.idx[:half].append(self.idx[:half])
self.non_unique_sorted = self.sorted[:half].append(self.sorted[:half])
self.key = self.sorted[N // 4]
def time_boolean_array(self, dtype):
self.idx[self.array_mask]
def time_boolean_series(self, dtype):
self.idx[self.series_mask]
def time_get(self, dtype):
self.idx[1]
def time_slice(self, dtype):
self.idx[:-1]
def time_slice_step(self, dtype):
self.idx[::2]
def time_get_loc(self, dtype):
self.idx.get_loc(self.key)
def time_get_loc_sorted(self, dtype):
self.sorted.get_loc(self.key)
def time_get_loc_non_unique(self, dtype):
self.non_unique.get_loc(self.key)
def time_get_loc_non_unique_sorted(self, dtype):
self.non_unique_sorted.get_loc(self.key)
class Float64IndexMethod(object):
# GH 13166
goal_time = 0.2
def setup(self):
|
def time_get_loc(self):
self.ind.get_loc(0)
| N = 100000
a = np.arange(N)
self.ind = Float64Index(a * 4.8000000418824129e-08) | identifier_body |
index_object.py | import numpy as np
import pandas.util.testing as tm
from pandas import (Series, date_range, DatetimeIndex, Index, RangeIndex,
Float64Index)
from .pandas_vb_common import setup # noqa
class SetOperations(object):
goal_time = 0.2
params = (['datetime', 'date_string', 'int', 'strings'],
['intersection', 'union', 'symmetric_difference'])
param_names = ['dtype', 'method']
def setup(self, dtype, method):
N = 10**5
dates_left = date_range('1/1/2000', periods=N, freq='T')
fmt = '%Y-%m-%d %H:%M:%S'
date_str_left = Index(dates_left.strftime(fmt))
int_left = Index(np.arange(N))
str_left = tm.makeStringIndex(N)
data = {'datetime': {'left': dates_left, 'right': dates_left[:-1]},
'date_string': {'left': date_str_left,
'right': date_str_left[:-1]},
'int': {'left': int_left, 'right': int_left[:-1]},
'strings': {'left': str_left, 'right': str_left[:-1]}}
self.left = data[dtype]['left']
self.right = data[dtype]['right']
def time_operation(self, dtype, method):
getattr(self.left, method)(self.right)
class SetDisjoint(object):
goal_time = 0.2
def setup(self):
N = 10**5
B = N + 20000
self.datetime_left = DatetimeIndex(range(N))
self.datetime_right = DatetimeIndex(range(N, B))
def time_datetime_difference_disjoint(self):
self.datetime_left.difference(self.datetime_right)
class Datetime(object):
goal_time = 0.2
def setup(self):
self.dr = date_range('20000101', freq='D', periods=10000)
def time_is_dates_only(self):
self.dr._is_dates_only
class Ops(object):
sample_time = 0.2
params = ['float', 'int']
param_names = ['dtype']
def setup(self, dtype):
N = 10**6
indexes = {'int': 'makeIntIndex', 'float': 'makeFloatIndex'}
self.index = getattr(tm, indexes[dtype])(N)
def time_add(self, dtype):
self.index + 2
def time_subtract(self, dtype):
self.index - 2
def time_multiply(self, dtype):
self.index * 2
def time_divide(self, dtype):
self.index / 2
def time_modulo(self, dtype):
self.index % 2
class Range(object):
goal_time = 0.2
def setup(self):
self.idx_inc = RangeIndex(start=0, stop=10**7, step=3)
self.idx_dec = RangeIndex(start=10**7, stop=-1, step=-3)
def time_max(self):
self.idx_inc.max()
def time_max_trivial(self):
self.idx_dec.max()
def time_min(self):
self.idx_dec.min()
def time_min_trivial(self):
self.idx_inc.min()
class IndexAppend(object):
goal_time = 0.2
def setup(self):
N = 10000
self.range_idx = RangeIndex(0, 100)
self.int_idx = self.range_idx.astype(int)
self.obj_idx = self.int_idx.astype(str)
self.range_idxs = []
self.int_idxs = []
self.object_idxs = []
for i in range(1, N):
r_idx = RangeIndex(i * 100, (i + 1) * 100)
self.range_idxs.append(r_idx)
i_idx = r_idx.astype(int)
self.int_idxs.append(i_idx)
o_idx = i_idx.astype(str)
self.object_idxs.append(o_idx)
def time_append_range_list(self):
self.range_idx.append(self.range_idxs)
def time_append_int_list(self):
self.int_idx.append(self.int_idxs)
def time_append_obj_list(self):
self.obj_idx.append(self.object_idxs)
class Indexing(object):
goal_time = 0.2
params = ['String', 'Float', 'Int']
param_names = ['dtype']
def setup(self, dtype):
N = 10**6
self.idx = getattr(tm, 'make{}Index'.format(dtype))(N)
self.array_mask = (np.arange(N) % 3) == 0
self.series_mask = Series(self.array_mask)
self.sorted = self.idx.sort_values()
half = N // 2
self.non_unique = self.idx[:half].append(self.idx[:half])
self.non_unique_sorted = self.sorted[:half].append(self.sorted[:half])
self.key = self.sorted[N // 4]
def time_boolean_array(self, dtype):
self.idx[self.array_mask]
def time_boolean_series(self, dtype):
self.idx[self.series_mask]
def time_get(self, dtype):
self.idx[1]
def time_slice(self, dtype):
self.idx[:-1]
def time_slice_step(self, dtype):
self.idx[::2]
def time_get_loc(self, dtype):
self.idx.get_loc(self.key)
def time_get_loc_sorted(self, dtype):
self.sorted.get_loc(self.key)
def time_get_loc_non_unique(self, dtype):
self.non_unique.get_loc(self.key)
def | (self, dtype):
self.non_unique_sorted.get_loc(self.key)
class Float64IndexMethod(object):
# GH 13166
goal_time = 0.2
def setup(self):
N = 100000
a = np.arange(N)
self.ind = Float64Index(a * 4.8000000418824129e-08)
def time_get_loc(self):
self.ind.get_loc(0)
| time_get_loc_non_unique_sorted | identifier_name |
index_object.py | import numpy as np
import pandas.util.testing as tm
from pandas import (Series, date_range, DatetimeIndex, Index, RangeIndex,
Float64Index)
from .pandas_vb_common import setup # noqa
class SetOperations(object):
goal_time = 0.2
params = (['datetime', 'date_string', 'int', 'strings'],
['intersection', 'union', 'symmetric_difference'])
param_names = ['dtype', 'method']
def setup(self, dtype, method):
N = 10**5
dates_left = date_range('1/1/2000', periods=N, freq='T')
fmt = '%Y-%m-%d %H:%M:%S'
date_str_left = Index(dates_left.strftime(fmt))
int_left = Index(np.arange(N))
str_left = tm.makeStringIndex(N)
data = {'datetime': {'left': dates_left, 'right': dates_left[:-1]},
'date_string': {'left': date_str_left,
'right': date_str_left[:-1]},
'int': {'left': int_left, 'right': int_left[:-1]},
'strings': {'left': str_left, 'right': str_left[:-1]}}
self.left = data[dtype]['left']
self.right = data[dtype]['right']
def time_operation(self, dtype, method):
getattr(self.left, method)(self.right)
class SetDisjoint(object):
goal_time = 0.2
def setup(self):
N = 10**5
B = N + 20000
self.datetime_left = DatetimeIndex(range(N))
self.datetime_right = DatetimeIndex(range(N, B))
def time_datetime_difference_disjoint(self):
self.datetime_left.difference(self.datetime_right)
class Datetime(object):
goal_time = 0.2
def setup(self):
self.dr = date_range('20000101', freq='D', periods=10000)
def time_is_dates_only(self):
self.dr._is_dates_only
class Ops(object):
sample_time = 0.2
params = ['float', 'int']
param_names = ['dtype']
def setup(self, dtype):
N = 10**6
indexes = {'int': 'makeIntIndex', 'float': 'makeFloatIndex'}
self.index = getattr(tm, indexes[dtype])(N)
def time_add(self, dtype):
self.index + 2
def time_subtract(self, dtype):
self.index - 2
def time_multiply(self, dtype):
self.index * 2
def time_divide(self, dtype):
self.index / 2
def time_modulo(self, dtype):
self.index % 2
class Range(object):
goal_time = 0.2
def setup(self):
self.idx_inc = RangeIndex(start=0, stop=10**7, step=3)
self.idx_dec = RangeIndex(start=10**7, stop=-1, step=-3)
def time_max(self):
self.idx_inc.max()
def time_max_trivial(self):
self.idx_dec.max()
def time_min(self):
self.idx_dec.min()
def time_min_trivial(self):
self.idx_inc.min()
class IndexAppend(object):
goal_time = 0.2
def setup(self):
N = 10000
self.range_idx = RangeIndex(0, 100)
self.int_idx = self.range_idx.astype(int)
self.obj_idx = self.int_idx.astype(str)
self.range_idxs = []
self.int_idxs = []
self.object_idxs = []
for i in range(1, N):
|
def time_append_range_list(self):
self.range_idx.append(self.range_idxs)
def time_append_int_list(self):
self.int_idx.append(self.int_idxs)
def time_append_obj_list(self):
self.obj_idx.append(self.object_idxs)
class Indexing(object):
goal_time = 0.2
params = ['String', 'Float', 'Int']
param_names = ['dtype']
def setup(self, dtype):
N = 10**6
self.idx = getattr(tm, 'make{}Index'.format(dtype))(N)
self.array_mask = (np.arange(N) % 3) == 0
self.series_mask = Series(self.array_mask)
self.sorted = self.idx.sort_values()
half = N // 2
self.non_unique = self.idx[:half].append(self.idx[:half])
self.non_unique_sorted = self.sorted[:half].append(self.sorted[:half])
self.key = self.sorted[N // 4]
def time_boolean_array(self, dtype):
self.idx[self.array_mask]
def time_boolean_series(self, dtype):
self.idx[self.series_mask]
def time_get(self, dtype):
self.idx[1]
def time_slice(self, dtype):
self.idx[:-1]
def time_slice_step(self, dtype):
self.idx[::2]
def time_get_loc(self, dtype):
self.idx.get_loc(self.key)
def time_get_loc_sorted(self, dtype):
self.sorted.get_loc(self.key)
def time_get_loc_non_unique(self, dtype):
self.non_unique.get_loc(self.key)
def time_get_loc_non_unique_sorted(self, dtype):
self.non_unique_sorted.get_loc(self.key)
class Float64IndexMethod(object):
# GH 13166
goal_time = 0.2
def setup(self):
N = 100000
a = np.arange(N)
self.ind = Float64Index(a * 4.8000000418824129e-08)
def time_get_loc(self):
self.ind.get_loc(0)
| r_idx = RangeIndex(i * 100, (i + 1) * 100)
self.range_idxs.append(r_idx)
i_idx = r_idx.astype(int)
self.int_idxs.append(i_idx)
o_idx = i_idx.astype(str)
self.object_idxs.append(o_idx) | conditional_block |
ut_daemon.py | #!/usr/bin/env python3
"""
test/unit_tests_d/ut_daemon.py: unit test for the MMGen suite's Daemon class
"""
from subprocess import run,DEVNULL
from mmgen.common import *
from mmgen.daemon import *
from mmgen.protocol import init_proto
def test_flags():
d = CoinDaemon('eth')
vmsg(f'Available opts: {fmt_list(d.avail_opts,fmt="bare")}')
vmsg(f'Available flags: {fmt_list(d.avail_flags,fmt="bare")}')
vals = namedtuple('vals',['online','no_daemonize','keep_cfg_file'])
def gen():
for opts,flags,val in (
(None,None, vals(False,False,False)),
(None,['keep_cfg_file'], vals(False,False,True)),
(['online'],['keep_cfg_file'], vals(True,False,True)),
(['online','no_daemonize'],['keep_cfg_file'], vals(True,True,True)),
):
d = CoinDaemon('eth',opts=opts,flags=flags)
assert d.flag.keep_cfg_file == val.keep_cfg_file
assert d.opt.online == val.online
assert d.opt.no_daemonize == val.no_daemonize
d.flag.keep_cfg_file = not val.keep_cfg_file
d.flag.keep_cfg_file = val.keep_cfg_file
yield d
return tuple(gen())
def test_flags_err(ut,d):
def bad1(): d[0].flag.foo = False
def bad2(): d[0].opt.foo = False
def bad3(): d[0].opt.no_daemonize = True
def bad4(): d[0].flag.keep_cfg_file = 'x'
def bad5(): d[0].opt.no_daemonize = 'x'
def bad6(): d[0].flag.keep_cfg_file = False
def bad7(): d[1].flag.keep_cfg_file = True
ut.process_bad_data((
('flag (1)', 'ClassFlagsError', 'unrecognized flag', bad1 ),
('opt (1)', 'ClassFlagsError', 'unrecognized opt', bad2 ),
('opt (2)', 'AttributeError', 'is read-only', bad3 ),
('flag (2)', 'AssertionError', 'not boolean', bad4 ),
('opt (3)', 'AttributeError', 'is read-only', bad5 ),
('flag (3)', 'ClassFlagsError', 'not set', bad6 ),
('flag (4)', 'ClassFlagsError', 'already set', bad7 ),
))
arm_skip_daemons = ('openethereum','parity')
def test_cmds(op):
network_ids = CoinDaemon.get_network_ids()
import mmgen.daemon as daemon_mod
for test_suite in [True,False] if op == 'print' else [True]:
vmsg(orange(f'Start commands (op={op}, test_suite={test_suite}):'))
for coin,data in CoinDaemon.coins.items():
for daemon_id in data.daemon_ids:
| vmsg('{:16} {}'.format(
d.exec_fn+':',
cp.stdout.decode().splitlines()[0] ))
else:
if opt.quiet:
msg_r('.')
if op == 'stop' and hasattr(d,'rpc'):
run_session(d.rpc.stop_daemon(quiet=opt.quiet))
else:
getattr(d,op)(silent=opt.quiet)
class unit_tests:
win_skip = ('start','status','stop')
def flags(self,name,ut):
qmsg_r('Testing flags and opts...')
vmsg('')
daemons = test_flags()
qmsg('OK')
qmsg_r('Testing error handling for flags and opts...')
vmsg('')
test_flags_err(ut,daemons)
qmsg('OK')
return True
def cmds(self,name,ut):
qmsg_r('Testing start commands for coin daemons...')
vmsg('')
test_cmds('print')
qmsg('OK')
return True
def exec(self,name,ut):
qmsg_r('Testing availability of coin daemons...')
vmsg('')
test_cmds('check')
qmsg('OK')
return True
def start(self,name,ut):
msg_r('Starting coin daemons...')
qmsg('')
test_cmds('start')
msg('OK')
return True
def status(self,name,ut):
msg_r('Checking status of coin daemons...')
qmsg('')
test_cmds('start')
msg('OK')
return True
def stop(self,name,ut):
msg_r('Stopping coin daemons...')
qmsg('')
test_cmds('stop')
msg('OK')
return True
| if daemon_id in arm_skip_daemons:
continue
for network in data.networks:
if opt.no_altcoin_deps and coin != 'BTC':
continue
d = CoinDaemon(
proto=init_proto(coin=coin,network=network),
daemon_id = daemon_id,
test_suite = test_suite )
if op == 'print':
for cmd in d.start_cmds:
vmsg(' '.join(cmd))
elif op == 'check':
try:
cp = run([d.exec_fn,'--help'],stdout=PIPE,stderr=PIPE)
except:
die(2,f'Unable to execute {d.exec_fn}')
if cp.returncode:
die(2,f'Unable to execute {d.exec_fn}')
else: | conditional_block |
ut_daemon.py | #!/usr/bin/env python3
"""
test/unit_tests_d/ut_daemon.py: unit test for the MMGen suite's Daemon class
"""
from subprocess import run,DEVNULL
from mmgen.common import *
from mmgen.daemon import *
from mmgen.protocol import init_proto
def test_flags():
d = CoinDaemon('eth')
vmsg(f'Available opts: {fmt_list(d.avail_opts,fmt="bare")}')
vmsg(f'Available flags: {fmt_list(d.avail_flags,fmt="bare")}')
vals = namedtuple('vals',['online','no_daemonize','keep_cfg_file'])
def | ():
for opts,flags,val in (
(None,None, vals(False,False,False)),
(None,['keep_cfg_file'], vals(False,False,True)),
(['online'],['keep_cfg_file'], vals(True,False,True)),
(['online','no_daemonize'],['keep_cfg_file'], vals(True,True,True)),
):
d = CoinDaemon('eth',opts=opts,flags=flags)
assert d.flag.keep_cfg_file == val.keep_cfg_file
assert d.opt.online == val.online
assert d.opt.no_daemonize == val.no_daemonize
d.flag.keep_cfg_file = not val.keep_cfg_file
d.flag.keep_cfg_file = val.keep_cfg_file
yield d
return tuple(gen())
def test_flags_err(ut,d):
def bad1(): d[0].flag.foo = False
def bad2(): d[0].opt.foo = False
def bad3(): d[0].opt.no_daemonize = True
def bad4(): d[0].flag.keep_cfg_file = 'x'
def bad5(): d[0].opt.no_daemonize = 'x'
def bad6(): d[0].flag.keep_cfg_file = False
def bad7(): d[1].flag.keep_cfg_file = True
ut.process_bad_data((
('flag (1)', 'ClassFlagsError', 'unrecognized flag', bad1 ),
('opt (1)', 'ClassFlagsError', 'unrecognized opt', bad2 ),
('opt (2)', 'AttributeError', 'is read-only', bad3 ),
('flag (2)', 'AssertionError', 'not boolean', bad4 ),
('opt (3)', 'AttributeError', 'is read-only', bad5 ),
('flag (3)', 'ClassFlagsError', 'not set', bad6 ),
('flag (4)', 'ClassFlagsError', 'already set', bad7 ),
))
arm_skip_daemons = ('openethereum','parity')
def test_cmds(op):
network_ids = CoinDaemon.get_network_ids()
import mmgen.daemon as daemon_mod
for test_suite in [True,False] if op == 'print' else [True]:
vmsg(orange(f'Start commands (op={op}, test_suite={test_suite}):'))
for coin,data in CoinDaemon.coins.items():
for daemon_id in data.daemon_ids:
if daemon_id in arm_skip_daemons:
continue
for network in data.networks:
if opt.no_altcoin_deps and coin != 'BTC':
continue
d = CoinDaemon(
proto=init_proto(coin=coin,network=network),
daemon_id = daemon_id,
test_suite = test_suite )
if op == 'print':
for cmd in d.start_cmds:
vmsg(' '.join(cmd))
elif op == 'check':
try:
cp = run([d.exec_fn,'--help'],stdout=PIPE,stderr=PIPE)
except:
die(2,f'Unable to execute {d.exec_fn}')
if cp.returncode:
die(2,f'Unable to execute {d.exec_fn}')
else:
vmsg('{:16} {}'.format(
d.exec_fn+':',
cp.stdout.decode().splitlines()[0] ))
else:
if opt.quiet:
msg_r('.')
if op == 'stop' and hasattr(d,'rpc'):
run_session(d.rpc.stop_daemon(quiet=opt.quiet))
else:
getattr(d,op)(silent=opt.quiet)
class unit_tests:
win_skip = ('start','status','stop')
def flags(self,name,ut):
qmsg_r('Testing flags and opts...')
vmsg('')
daemons = test_flags()
qmsg('OK')
qmsg_r('Testing error handling for flags and opts...')
vmsg('')
test_flags_err(ut,daemons)
qmsg('OK')
return True
def cmds(self,name,ut):
qmsg_r('Testing start commands for coin daemons...')
vmsg('')
test_cmds('print')
qmsg('OK')
return True
def exec(self,name,ut):
qmsg_r('Testing availability of coin daemons...')
vmsg('')
test_cmds('check')
qmsg('OK')
return True
def start(self,name,ut):
msg_r('Starting coin daemons...')
qmsg('')
test_cmds('start')
msg('OK')
return True
def status(self,name,ut):
msg_r('Checking status of coin daemons...')
qmsg('')
test_cmds('start')
msg('OK')
return True
def stop(self,name,ut):
msg_r('Stopping coin daemons...')
qmsg('')
test_cmds('stop')
msg('OK')
return True
| gen | identifier_name |
ut_daemon.py | #!/usr/bin/env python3
"""
test/unit_tests_d/ut_daemon.py: unit test for the MMGen suite's Daemon class
"""
from subprocess import run,DEVNULL
from mmgen.common import *
from mmgen.daemon import *
from mmgen.protocol import init_proto
def test_flags():
d = CoinDaemon('eth')
vmsg(f'Available opts: {fmt_list(d.avail_opts,fmt="bare")}')
vmsg(f'Available flags: {fmt_list(d.avail_flags,fmt="bare")}')
vals = namedtuple('vals',['online','no_daemonize','keep_cfg_file'])
def gen():
for opts,flags,val in (
(None,None, vals(False,False,False)),
(None,['keep_cfg_file'], vals(False,False,True)),
(['online'],['keep_cfg_file'], vals(True,False,True)),
(['online','no_daemonize'],['keep_cfg_file'], vals(True,True,True)),
):
d = CoinDaemon('eth',opts=opts,flags=flags)
assert d.flag.keep_cfg_file == val.keep_cfg_file
assert d.opt.online == val.online
assert d.opt.no_daemonize == val.no_daemonize
d.flag.keep_cfg_file = not val.keep_cfg_file
d.flag.keep_cfg_file = val.keep_cfg_file
yield d
return tuple(gen())
def test_flags_err(ut,d):
def bad1(): d[0].flag.foo = False
def bad2(): d[0].opt.foo = False
def bad3(): d[0].opt.no_daemonize = True
def bad4(): d[0].flag.keep_cfg_file = 'x'
def bad5(): d[0].opt.no_daemonize = 'x'
def bad6(): d[0].flag.keep_cfg_file = False
def bad7(): d[1].flag.keep_cfg_file = True
ut.process_bad_data((
('flag (1)', 'ClassFlagsError', 'unrecognized flag', bad1 ),
('opt (1)', 'ClassFlagsError', 'unrecognized opt', bad2 ),
('opt (2)', 'AttributeError', 'is read-only', bad3 ),
('flag (2)', 'AssertionError', 'not boolean', bad4 ),
('opt (3)', 'AttributeError', 'is read-only', bad5 ),
('flag (3)', 'ClassFlagsError', 'not set', bad6 ),
('flag (4)', 'ClassFlagsError', 'already set', bad7 ),
))
arm_skip_daemons = ('openethereum','parity')
def test_cmds(op):
network_ids = CoinDaemon.get_network_ids()
import mmgen.daemon as daemon_mod
for test_suite in [True,False] if op == 'print' else [True]:
vmsg(orange(f'Start commands (op={op}, test_suite={test_suite}):'))
for coin,data in CoinDaemon.coins.items():
for daemon_id in data.daemon_ids:
if daemon_id in arm_skip_daemons:
continue
for network in data.networks:
if opt.no_altcoin_deps and coin != 'BTC': | proto=init_proto(coin=coin,network=network),
daemon_id = daemon_id,
test_suite = test_suite )
if op == 'print':
for cmd in d.start_cmds:
vmsg(' '.join(cmd))
elif op == 'check':
try:
cp = run([d.exec_fn,'--help'],stdout=PIPE,stderr=PIPE)
except:
die(2,f'Unable to execute {d.exec_fn}')
if cp.returncode:
die(2,f'Unable to execute {d.exec_fn}')
else:
vmsg('{:16} {}'.format(
d.exec_fn+':',
cp.stdout.decode().splitlines()[0] ))
else:
if opt.quiet:
msg_r('.')
if op == 'stop' and hasattr(d,'rpc'):
run_session(d.rpc.stop_daemon(quiet=opt.quiet))
else:
getattr(d,op)(silent=opt.quiet)
class unit_tests:
win_skip = ('start','status','stop')
def flags(self,name,ut):
qmsg_r('Testing flags and opts...')
vmsg('')
daemons = test_flags()
qmsg('OK')
qmsg_r('Testing error handling for flags and opts...')
vmsg('')
test_flags_err(ut,daemons)
qmsg('OK')
return True
def cmds(self,name,ut):
qmsg_r('Testing start commands for coin daemons...')
vmsg('')
test_cmds('print')
qmsg('OK')
return True
def exec(self,name,ut):
qmsg_r('Testing availability of coin daemons...')
vmsg('')
test_cmds('check')
qmsg('OK')
return True
def start(self,name,ut):
msg_r('Starting coin daemons...')
qmsg('')
test_cmds('start')
msg('OK')
return True
def status(self,name,ut):
msg_r('Checking status of coin daemons...')
qmsg('')
test_cmds('start')
msg('OK')
return True
def stop(self,name,ut):
msg_r('Stopping coin daemons...')
qmsg('')
test_cmds('stop')
msg('OK')
return True | continue
d = CoinDaemon( | random_line_split |
ut_daemon.py | #!/usr/bin/env python3
"""
test/unit_tests_d/ut_daemon.py: unit test for the MMGen suite's Daemon class
"""
from subprocess import run,DEVNULL
from mmgen.common import *
from mmgen.daemon import *
from mmgen.protocol import init_proto
def test_flags():
d = CoinDaemon('eth')
vmsg(f'Available opts: {fmt_list(d.avail_opts,fmt="bare")}')
vmsg(f'Available flags: {fmt_list(d.avail_flags,fmt="bare")}')
vals = namedtuple('vals',['online','no_daemonize','keep_cfg_file'])
def gen():
for opts,flags,val in (
(None,None, vals(False,False,False)),
(None,['keep_cfg_file'], vals(False,False,True)),
(['online'],['keep_cfg_file'], vals(True,False,True)),
(['online','no_daemonize'],['keep_cfg_file'], vals(True,True,True)),
):
d = CoinDaemon('eth',opts=opts,flags=flags)
assert d.flag.keep_cfg_file == val.keep_cfg_file
assert d.opt.online == val.online
assert d.opt.no_daemonize == val.no_daemonize
d.flag.keep_cfg_file = not val.keep_cfg_file
d.flag.keep_cfg_file = val.keep_cfg_file
yield d
return tuple(gen())
def test_flags_err(ut,d):
def bad1(): d[0].flag.foo = False
def bad2(): d[0].opt.foo = False
def bad3(): d[0].opt.no_daemonize = True
def bad4(): d[0].flag.keep_cfg_file = 'x'
def bad5(): d[0].opt.no_daemonize = 'x'
def bad6(): |
def bad7(): d[1].flag.keep_cfg_file = True
ut.process_bad_data((
('flag (1)', 'ClassFlagsError', 'unrecognized flag', bad1 ),
('opt (1)', 'ClassFlagsError', 'unrecognized opt', bad2 ),
('opt (2)', 'AttributeError', 'is read-only', bad3 ),
('flag (2)', 'AssertionError', 'not boolean', bad4 ),
('opt (3)', 'AttributeError', 'is read-only', bad5 ),
('flag (3)', 'ClassFlagsError', 'not set', bad6 ),
('flag (4)', 'ClassFlagsError', 'already set', bad7 ),
))
arm_skip_daemons = ('openethereum','parity')
def test_cmds(op):
network_ids = CoinDaemon.get_network_ids()
import mmgen.daemon as daemon_mod
for test_suite in [True,False] if op == 'print' else [True]:
vmsg(orange(f'Start commands (op={op}, test_suite={test_suite}):'))
for coin,data in CoinDaemon.coins.items():
for daemon_id in data.daemon_ids:
if daemon_id in arm_skip_daemons:
continue
for network in data.networks:
if opt.no_altcoin_deps and coin != 'BTC':
continue
d = CoinDaemon(
proto=init_proto(coin=coin,network=network),
daemon_id = daemon_id,
test_suite = test_suite )
if op == 'print':
for cmd in d.start_cmds:
vmsg(' '.join(cmd))
elif op == 'check':
try:
cp = run([d.exec_fn,'--help'],stdout=PIPE,stderr=PIPE)
except:
die(2,f'Unable to execute {d.exec_fn}')
if cp.returncode:
die(2,f'Unable to execute {d.exec_fn}')
else:
vmsg('{:16} {}'.format(
d.exec_fn+':',
cp.stdout.decode().splitlines()[0] ))
else:
if opt.quiet:
msg_r('.')
if op == 'stop' and hasattr(d,'rpc'):
run_session(d.rpc.stop_daemon(quiet=opt.quiet))
else:
getattr(d,op)(silent=opt.quiet)
class unit_tests:
win_skip = ('start','status','stop')
def flags(self,name,ut):
qmsg_r('Testing flags and opts...')
vmsg('')
daemons = test_flags()
qmsg('OK')
qmsg_r('Testing error handling for flags and opts...')
vmsg('')
test_flags_err(ut,daemons)
qmsg('OK')
return True
def cmds(self,name,ut):
qmsg_r('Testing start commands for coin daemons...')
vmsg('')
test_cmds('print')
qmsg('OK')
return True
def exec(self,name,ut):
qmsg_r('Testing availability of coin daemons...')
vmsg('')
test_cmds('check')
qmsg('OK')
return True
def start(self,name,ut):
msg_r('Starting coin daemons...')
qmsg('')
test_cmds('start')
msg('OK')
return True
def status(self,name,ut):
msg_r('Checking status of coin daemons...')
qmsg('')
test_cmds('start')
msg('OK')
return True
def stop(self,name,ut):
msg_r('Stopping coin daemons...')
qmsg('')
test_cmds('stop')
msg('OK')
return True
| d[0].flag.keep_cfg_file = False | identifier_body |
driver.py | """
Dummy Salesforce driver that simulates some parts of DB API 2
https://www.python.org/dev/peps/pep-0249/
should be independent on Django.db
and if possible should be independent on django.conf.settings
Code at lower level than DB API should be also here.
"""
from collections import namedtuple
import requests
import socket
from django.conf import settings
from django.utils.six import PY3
try:
import beatbox
except ImportError:
beatbox = None
import logging
log = logging.getLogger(__name__)
apilevel = "2.0"
# threadsafety = ...
# uses '%s' style parameters
paramstyle = 'format'
API_STUB = '/services/data/v35.0'
request_count = 0 # global counter
# All error types described in DB API 2 are implemented the same way as in
# Django 1.6, otherwise some exceptions are not correctly reported in it.
class Error(Exception if PY3 else StandardError):
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class DataError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class SalesforceError(DatabaseError):
"""
DatabaseError that usually gets detailed error information from SF response
in the second parameter, decoded from REST, that frequently need not to be
displayed.
"""
def __init__(self, message='', data=None, response=None, verbose=False):
DatabaseError.__init__(self, message)
self.data = data
self.response = response
self.verbose = verbose
if verbose:
log.info("Error (debug details) %s\n%s", response.text,
response.__dict__)
class Connection(object):
# close and commit can be safely ignored because everything is
# committed automatically and REST is stateles.
def close(self):
pass
def commit(self):
|
def rollback(self):
log.info("Rollback is not implemented.")
# DB API function
def connect(**params):
return Connection()
# LOW LEVEL
def getaddrinfo_wrapper(host, port, family=socket.AF_INET, socktype=0, proto=0, flags=0):
"""Patched 'getaddrinfo' with default family IPv4 (enabled by settings IPV4_ONLY=True)"""
return orig_getaddrinfo(host, port, family, socktype, proto, flags)
# patch to IPv4 if required and not patched by anything other yet
if getattr(settings, 'IPV4_ONLY', False) and socket.getaddrinfo.__module__ in ('socket', '_socket'):
log.info("Patched socket to IPv4 only")
orig_getaddrinfo = socket.getaddrinfo
# replace the original socket.getaddrinfo by our version
socket.getaddrinfo = getaddrinfo_wrapper
# ----
def handle_api_exceptions(url, f, *args, **kwargs):
"""Call REST API and handle exceptions
Params:
f: requests.get or requests.post...
_cursor: sharing the debug information in cursor
"""
#import pdb; pdb.set_trace()
#print("== REQUEST %s | %s | %s | %s" % (url, f, args, kwargs))
global request_count
# The 'verify' option is about verifying SSL certificates
kwargs_in = {'timeout': getattr(settings, 'SALESFORCE_QUERY_TIMEOUT', 3),
'verify': True}
kwargs_in.update(kwargs)
_cursor = kwargs_in.pop('_cursor', None)
log.debug('Request API URL: %s' % url)
request_count += 1
try:
response = f(url, *args, **kwargs_in)
# TODO some timeouts can be rarely raised as "SSLError: The read operation timed out"
except requests.exceptions.Timeout:
raise SalesforceError("Timeout, URL=%s" % url)
if response.status_code == 401:
# Unauthorized (expired or invalid session ID or OAuth)
data = response.json()[0]
if(data['errorCode'] == 'INVALID_SESSION_ID'):
token = f.__self__.auth.reauthenticate()
if('headers' in kwargs):
kwargs['headers'].update(dict(Authorization='OAuth %s' % token))
try:
response = f(url, *args, **kwargs_in)
except requests.exceptions.Timeout:
raise SalesforceError("Timeout, URL=%s" % url)
if response.status_code in (200, 201, 204):
return response
# TODO Remove this verbose setting after tuning of specific messages.
# Currently it is better more or less.
# http://www.salesforce.com/us/developer/docs/api_rest/Content/errorcodes.htm
verbose = not getattr(getattr(_cursor, 'query', None), 'debug_silent', False)
# Errors are reported in the body
data = response.json()[0]
if response.status_code == 404: # ResourceNotFound
if (f.__func__.__name__ == 'delete') and data['errorCode'] in (
'ENTITY_IS_DELETED', 'INVALID_CROSS_REFERENCE_KEY'):
# It is a delete command and the object is in trash bin or
# completely deleted or it only could be a valid Id for this type
# then is ignored similarly to delete by a classic database query:
# DELETE FROM xy WHERE id = 'something_deleted_yet'
return None
else:
# if this Id can not be ever valid.
raise SalesforceError("Couldn't connect to API (404): %s, URL=%s"
% (response.text, url), data, response, verbose
)
if(data['errorCode'] == 'INVALID_FIELD'):
raise SalesforceError(data['message'], data, response, verbose)
elif(data['errorCode'] == 'MALFORMED_QUERY'):
raise SalesforceError(data['message'], data, response, verbose)
elif(data['errorCode'] == 'INVALID_FIELD_FOR_INSERT_UPDATE'):
raise SalesforceError(data['message'], data, response, verbose)
elif(data['errorCode'] == 'METHOD_NOT_ALLOWED'):
raise SalesforceError('%s: %s' % (url, data['message']), data, response, verbose)
# some kind of failed query
else:
raise SalesforceError('%s' % data, data, response, verbose)
| pass | identifier_body |
driver.py | """
Dummy Salesforce driver that simulates some parts of DB API 2
https://www.python.org/dev/peps/pep-0249/
should be independent on Django.db
and if possible should be independent on django.conf.settings
Code at lower level than DB API should be also here.
"""
from collections import namedtuple
import requests
import socket
from django.conf import settings
from django.utils.six import PY3
try: | import beatbox
except ImportError:
beatbox = None
import logging
log = logging.getLogger(__name__)
apilevel = "2.0"
# threadsafety = ...
# uses '%s' style parameters
paramstyle = 'format'
API_STUB = '/services/data/v35.0'
request_count = 0 # global counter
# All error types described in DB API 2 are implemented the same way as in
# Django 1.6, otherwise some exceptions are not correctly reported in it.
class Error(Exception if PY3 else StandardError):
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class DataError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class SalesforceError(DatabaseError):
"""
DatabaseError that usually gets detailed error information from SF response
in the second parameter, decoded from REST, that frequently need not to be
displayed.
"""
def __init__(self, message='', data=None, response=None, verbose=False):
DatabaseError.__init__(self, message)
self.data = data
self.response = response
self.verbose = verbose
if verbose:
log.info("Error (debug details) %s\n%s", response.text,
response.__dict__)
class Connection(object):
# close and commit can be safely ignored because everything is
# committed automatically and REST is stateles.
def close(self):
pass
def commit(self):
pass
def rollback(self):
log.info("Rollback is not implemented.")
# DB API function
def connect(**params):
return Connection()
# LOW LEVEL
def getaddrinfo_wrapper(host, port, family=socket.AF_INET, socktype=0, proto=0, flags=0):
"""Patched 'getaddrinfo' with default family IPv4 (enabled by settings IPV4_ONLY=True)"""
return orig_getaddrinfo(host, port, family, socktype, proto, flags)
# patch to IPv4 if required and not patched by anything other yet
if getattr(settings, 'IPV4_ONLY', False) and socket.getaddrinfo.__module__ in ('socket', '_socket'):
log.info("Patched socket to IPv4 only")
orig_getaddrinfo = socket.getaddrinfo
# replace the original socket.getaddrinfo by our version
socket.getaddrinfo = getaddrinfo_wrapper
# ----
def handle_api_exceptions(url, f, *args, **kwargs):
"""Call REST API and handle exceptions
Params:
f: requests.get or requests.post...
_cursor: sharing the debug information in cursor
"""
#import pdb; pdb.set_trace()
#print("== REQUEST %s | %s | %s | %s" % (url, f, args, kwargs))
global request_count
# The 'verify' option is about verifying SSL certificates
kwargs_in = {'timeout': getattr(settings, 'SALESFORCE_QUERY_TIMEOUT', 3),
'verify': True}
kwargs_in.update(kwargs)
_cursor = kwargs_in.pop('_cursor', None)
log.debug('Request API URL: %s' % url)
request_count += 1
try:
response = f(url, *args, **kwargs_in)
# TODO some timeouts can be rarely raised as "SSLError: The read operation timed out"
except requests.exceptions.Timeout:
raise SalesforceError("Timeout, URL=%s" % url)
if response.status_code == 401:
# Unauthorized (expired or invalid session ID or OAuth)
data = response.json()[0]
if(data['errorCode'] == 'INVALID_SESSION_ID'):
token = f.__self__.auth.reauthenticate()
if('headers' in kwargs):
kwargs['headers'].update(dict(Authorization='OAuth %s' % token))
try:
response = f(url, *args, **kwargs_in)
except requests.exceptions.Timeout:
raise SalesforceError("Timeout, URL=%s" % url)
if response.status_code in (200, 201, 204):
return response
# TODO Remove this verbose setting after tuning of specific messages.
# Currently it is better more or less.
# http://www.salesforce.com/us/developer/docs/api_rest/Content/errorcodes.htm
verbose = not getattr(getattr(_cursor, 'query', None), 'debug_silent', False)
# Errors are reported in the body
data = response.json()[0]
if response.status_code == 404: # ResourceNotFound
if (f.__func__.__name__ == 'delete') and data['errorCode'] in (
'ENTITY_IS_DELETED', 'INVALID_CROSS_REFERENCE_KEY'):
# It is a delete command and the object is in trash bin or
# completely deleted or it only could be a valid Id for this type
# then is ignored similarly to delete by a classic database query:
# DELETE FROM xy WHERE id = 'something_deleted_yet'
return None
else:
# if this Id can not be ever valid.
raise SalesforceError("Couldn't connect to API (404): %s, URL=%s"
% (response.text, url), data, response, verbose
)
if(data['errorCode'] == 'INVALID_FIELD'):
raise SalesforceError(data['message'], data, response, verbose)
elif(data['errorCode'] == 'MALFORMED_QUERY'):
raise SalesforceError(data['message'], data, response, verbose)
elif(data['errorCode'] == 'INVALID_FIELD_FOR_INSERT_UPDATE'):
raise SalesforceError(data['message'], data, response, verbose)
elif(data['errorCode'] == 'METHOD_NOT_ALLOWED'):
raise SalesforceError('%s: %s' % (url, data['message']), data, response, verbose)
# some kind of failed query
else:
raise SalesforceError('%s' % data, data, response, verbose) | random_line_split |
|
driver.py | """
Dummy Salesforce driver that simulates some parts of DB API 2
https://www.python.org/dev/peps/pep-0249/
should be independent on Django.db
and if possible should be independent on django.conf.settings
Code at lower level than DB API should be also here.
"""
from collections import namedtuple
import requests
import socket
from django.conf import settings
from django.utils.six import PY3
try:
import beatbox
except ImportError:
beatbox = None
import logging
log = logging.getLogger(__name__)
apilevel = "2.0"
# threadsafety = ...
# uses '%s' style parameters
paramstyle = 'format'
API_STUB = '/services/data/v35.0'
request_count = 0 # global counter
# All error types described in DB API 2 are implemented the same way as in
# Django 1.6, otherwise some exceptions are not correctly reported in it.
class Error(Exception if PY3 else StandardError):
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class DataError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class SalesforceError(DatabaseError):
"""
DatabaseError that usually gets detailed error information from SF response
in the second parameter, decoded from REST, that frequently need not to be
displayed.
"""
def __init__(self, message='', data=None, response=None, verbose=False):
DatabaseError.__init__(self, message)
self.data = data
self.response = response
self.verbose = verbose
if verbose:
log.info("Error (debug details) %s\n%s", response.text,
response.__dict__)
class Connection(object):
# close and commit can be safely ignored because everything is
# committed automatically and REST is stateles.
def close(self):
pass
def commit(self):
pass
def rollback(self):
log.info("Rollback is not implemented.")
# DB API function
def connect(**params):
return Connection()
# LOW LEVEL
def getaddrinfo_wrapper(host, port, family=socket.AF_INET, socktype=0, proto=0, flags=0):
"""Patched 'getaddrinfo' with default family IPv4 (enabled by settings IPV4_ONLY=True)"""
return orig_getaddrinfo(host, port, family, socktype, proto, flags)
# patch to IPv4 if required and not patched by anything other yet
if getattr(settings, 'IPV4_ONLY', False) and socket.getaddrinfo.__module__ in ('socket', '_socket'):
log.info("Patched socket to IPv4 only")
orig_getaddrinfo = socket.getaddrinfo
# replace the original socket.getaddrinfo by our version
socket.getaddrinfo = getaddrinfo_wrapper
# ----
def handle_api_exceptions(url, f, *args, **kwargs):
"""Call REST API and handle exceptions
Params:
f: requests.get or requests.post...
_cursor: sharing the debug information in cursor
"""
#import pdb; pdb.set_trace()
#print("== REQUEST %s | %s | %s | %s" % (url, f, args, kwargs))
global request_count
# The 'verify' option is about verifying SSL certificates
kwargs_in = {'timeout': getattr(settings, 'SALESFORCE_QUERY_TIMEOUT', 3),
'verify': True}
kwargs_in.update(kwargs)
_cursor = kwargs_in.pop('_cursor', None)
log.debug('Request API URL: %s' % url)
request_count += 1
try:
response = f(url, *args, **kwargs_in)
# TODO some timeouts can be rarely raised as "SSLError: The read operation timed out"
except requests.exceptions.Timeout:
raise SalesforceError("Timeout, URL=%s" % url)
if response.status_code == 401:
# Unauthorized (expired or invalid session ID or OAuth)
data = response.json()[0]
if(data['errorCode'] == 'INVALID_SESSION_ID'):
token = f.__self__.auth.reauthenticate()
if('headers' in kwargs):
kwargs['headers'].update(dict(Authorization='OAuth %s' % token))
try:
response = f(url, *args, **kwargs_in)
except requests.exceptions.Timeout:
raise SalesforceError("Timeout, URL=%s" % url)
if response.status_code in (200, 201, 204):
return response
# TODO Remove this verbose setting after tuning of specific messages.
# Currently it is better more or less.
# http://www.salesforce.com/us/developer/docs/api_rest/Content/errorcodes.htm
verbose = not getattr(getattr(_cursor, 'query', None), 'debug_silent', False)
# Errors are reported in the body
data = response.json()[0]
if response.status_code == 404: # ResourceNotFound
if (f.__func__.__name__ == 'delete') and data['errorCode'] in (
'ENTITY_IS_DELETED', 'INVALID_CROSS_REFERENCE_KEY'):
# It is a delete command and the object is in trash bin or
# completely deleted or it only could be a valid Id for this type
# then is ignored similarly to delete by a classic database query:
# DELETE FROM xy WHERE id = 'something_deleted_yet'
return None
else:
# if this Id can not be ever valid.
|
if(data['errorCode'] == 'INVALID_FIELD'):
raise SalesforceError(data['message'], data, response, verbose)
elif(data['errorCode'] == 'MALFORMED_QUERY'):
raise SalesforceError(data['message'], data, response, verbose)
elif(data['errorCode'] == 'INVALID_FIELD_FOR_INSERT_UPDATE'):
raise SalesforceError(data['message'], data, response, verbose)
elif(data['errorCode'] == 'METHOD_NOT_ALLOWED'):
raise SalesforceError('%s: %s' % (url, data['message']), data, response, verbose)
# some kind of failed query
else:
raise SalesforceError('%s' % data, data, response, verbose)
| raise SalesforceError("Couldn't connect to API (404): %s, URL=%s"
% (response.text, url), data, response, verbose
) | conditional_block |
driver.py | """
Dummy Salesforce driver that simulates some parts of DB API 2
https://www.python.org/dev/peps/pep-0249/
should be independent on Django.db
and if possible should be independent on django.conf.settings
Code at lower level than DB API should be also here.
"""
from collections import namedtuple
import requests
import socket
from django.conf import settings
from django.utils.six import PY3
try:
import beatbox
except ImportError:
beatbox = None
import logging
log = logging.getLogger(__name__)
apilevel = "2.0"
# threadsafety = ...
# uses '%s' style parameters
paramstyle = 'format'
API_STUB = '/services/data/v35.0'
request_count = 0 # global counter
# All error types described in DB API 2 are implemented the same way as in
# Django 1.6, otherwise some exceptions are not correctly reported in it.
class Error(Exception if PY3 else StandardError):
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class DataError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class SalesforceError(DatabaseError):
"""
DatabaseError that usually gets detailed error information from SF response
in the second parameter, decoded from REST, that frequently need not to be
displayed.
"""
def __init__(self, message='', data=None, response=None, verbose=False):
DatabaseError.__init__(self, message)
self.data = data
self.response = response
self.verbose = verbose
if verbose:
log.info("Error (debug details) %s\n%s", response.text,
response.__dict__)
class | (object):
# close and commit can be safely ignored because everything is
# committed automatically and REST is stateles.
def close(self):
pass
def commit(self):
pass
def rollback(self):
log.info("Rollback is not implemented.")
# DB API function
def connect(**params):
return Connection()
# LOW LEVEL
def getaddrinfo_wrapper(host, port, family=socket.AF_INET, socktype=0, proto=0, flags=0):
"""Patched 'getaddrinfo' with default family IPv4 (enabled by settings IPV4_ONLY=True)"""
return orig_getaddrinfo(host, port, family, socktype, proto, flags)
# patch to IPv4 if required and not patched by anything other yet
if getattr(settings, 'IPV4_ONLY', False) and socket.getaddrinfo.__module__ in ('socket', '_socket'):
log.info("Patched socket to IPv4 only")
orig_getaddrinfo = socket.getaddrinfo
# replace the original socket.getaddrinfo by our version
socket.getaddrinfo = getaddrinfo_wrapper
# ----
def handle_api_exceptions(url, f, *args, **kwargs):
"""Call REST API and handle exceptions
Params:
f: requests.get or requests.post...
_cursor: sharing the debug information in cursor
"""
#import pdb; pdb.set_trace()
#print("== REQUEST %s | %s | %s | %s" % (url, f, args, kwargs))
global request_count
# The 'verify' option is about verifying SSL certificates
kwargs_in = {'timeout': getattr(settings, 'SALESFORCE_QUERY_TIMEOUT', 3),
'verify': True}
kwargs_in.update(kwargs)
_cursor = kwargs_in.pop('_cursor', None)
log.debug('Request API URL: %s' % url)
request_count += 1
try:
response = f(url, *args, **kwargs_in)
# TODO some timeouts can be rarely raised as "SSLError: The read operation timed out"
except requests.exceptions.Timeout:
raise SalesforceError("Timeout, URL=%s" % url)
if response.status_code == 401:
# Unauthorized (expired or invalid session ID or OAuth)
data = response.json()[0]
if(data['errorCode'] == 'INVALID_SESSION_ID'):
token = f.__self__.auth.reauthenticate()
if('headers' in kwargs):
kwargs['headers'].update(dict(Authorization='OAuth %s' % token))
try:
response = f(url, *args, **kwargs_in)
except requests.exceptions.Timeout:
raise SalesforceError("Timeout, URL=%s" % url)
if response.status_code in (200, 201, 204):
return response
# TODO Remove this verbose setting after tuning of specific messages.
# Currently it is better more or less.
# http://www.salesforce.com/us/developer/docs/api_rest/Content/errorcodes.htm
verbose = not getattr(getattr(_cursor, 'query', None), 'debug_silent', False)
# Errors are reported in the body
data = response.json()[0]
if response.status_code == 404: # ResourceNotFound
if (f.__func__.__name__ == 'delete') and data['errorCode'] in (
'ENTITY_IS_DELETED', 'INVALID_CROSS_REFERENCE_KEY'):
# It is a delete command and the object is in trash bin or
# completely deleted or it only could be a valid Id for this type
# then is ignored similarly to delete by a classic database query:
# DELETE FROM xy WHERE id = 'something_deleted_yet'
return None
else:
# if this Id can not be ever valid.
raise SalesforceError("Couldn't connect to API (404): %s, URL=%s"
% (response.text, url), data, response, verbose
)
if(data['errorCode'] == 'INVALID_FIELD'):
raise SalesforceError(data['message'], data, response, verbose)
elif(data['errorCode'] == 'MALFORMED_QUERY'):
raise SalesforceError(data['message'], data, response, verbose)
elif(data['errorCode'] == 'INVALID_FIELD_FOR_INSERT_UPDATE'):
raise SalesforceError(data['message'], data, response, verbose)
elif(data['errorCode'] == 'METHOD_NOT_ALLOWED'):
raise SalesforceError('%s: %s' % (url, data['message']), data, response, verbose)
# some kind of failed query
else:
raise SalesforceError('%s' % data, data, response, verbose)
| Connection | identifier_name |
test_favicon.py | from django.test import TestCase
from django.test.utils import override_settings | """
Tests of the courseware favicon.
"""
shard = 1
def test_favicon_redirect(self):
resp = self.client.get("/favicon.ico")
self.assertEqual(resp.status_code, 301)
self.assertRedirects(
resp,
"/static/images/favicon.ico",
status_code=301, target_status_code=404 # @@@ how to avoid 404?
)
@override_settings(FAVICON_PATH="images/foo.ico")
def test_favicon_redirect_with_favicon_path_setting(self):
self.reset_urls()
resp = self.client.get("/favicon.ico")
self.assertEqual(resp.status_code, 301)
self.assertRedirects(
resp,
"/static/images/foo.ico",
status_code=301, target_status_code=404 # @@@ how to avoid 404?
) |
from util.testing import UrlResetMixin
class FaviconTestCase(UrlResetMixin, TestCase): | random_line_split |
test_favicon.py | from django.test import TestCase
from django.test.utils import override_settings
from util.testing import UrlResetMixin
class FaviconTestCase(UrlResetMixin, TestCase):
"""
Tests of the courseware favicon.
"""
shard = 1
def test_favicon_redirect(self):
resp = self.client.get("/favicon.ico")
self.assertEqual(resp.status_code, 301)
self.assertRedirects(
resp,
"/static/images/favicon.ico",
status_code=301, target_status_code=404 # @@@ how to avoid 404?
)
@override_settings(FAVICON_PATH="images/foo.ico")
def | (self):
self.reset_urls()
resp = self.client.get("/favicon.ico")
self.assertEqual(resp.status_code, 301)
self.assertRedirects(
resp,
"/static/images/foo.ico",
status_code=301, target_status_code=404 # @@@ how to avoid 404?
)
| test_favicon_redirect_with_favicon_path_setting | identifier_name |
test_favicon.py | from django.test import TestCase
from django.test.utils import override_settings
from util.testing import UrlResetMixin
class FaviconTestCase(UrlResetMixin, TestCase):
"""
Tests of the courseware favicon.
"""
shard = 1
def test_favicon_redirect(self):
|
@override_settings(FAVICON_PATH="images/foo.ico")
def test_favicon_redirect_with_favicon_path_setting(self):
self.reset_urls()
resp = self.client.get("/favicon.ico")
self.assertEqual(resp.status_code, 301)
self.assertRedirects(
resp,
"/static/images/foo.ico",
status_code=301, target_status_code=404 # @@@ how to avoid 404?
)
| resp = self.client.get("/favicon.ico")
self.assertEqual(resp.status_code, 301)
self.assertRedirects(
resp,
"/static/images/favicon.ico",
status_code=301, target_status_code=404 # @@@ how to avoid 404?
) | identifier_body |
aspirasi.module.ts | import { NgModule } from '@angular/core';
import { CommonModule } from '@angular/common'; |
import { routing } from './aspirasi.routing';
import { Aspirasi } from './aspirasi.component';
import { Tables } from './components/tables/tables.component';
import { CustomEditorComponent } from '../../shared/custom-editor.component';
import { CustomRenderComponent } from '../../shared/custom-render.component';
@NgModule({
imports: [
CommonModule,
FormsModule,
NgaModule,
routing,
DropdownModule.forRoot(),
Ng2SmartTableModule
],
entryComponents: [CustomEditorComponent, CustomRenderComponent],
declarations: [
Aspirasi,
Tables,
CustomEditorComponent,
CustomRenderComponent
],
providers: [
]
})
export class AspirasiModule {} | import { FormsModule } from '@angular/forms';
import { NgaModule } from '../../theme/nga.module';
import { Ng2SmartTableModule } from 'ng2-smart-table';
import { DropdownModule} from 'ng2-bootstrap'; | random_line_split |
aspirasi.module.ts | import { NgModule } from '@angular/core';
import { CommonModule } from '@angular/common';
import { FormsModule } from '@angular/forms';
import { NgaModule } from '../../theme/nga.module';
import { Ng2SmartTableModule } from 'ng2-smart-table';
import { DropdownModule} from 'ng2-bootstrap';
import { routing } from './aspirasi.routing';
import { Aspirasi } from './aspirasi.component';
import { Tables } from './components/tables/tables.component';
import { CustomEditorComponent } from '../../shared/custom-editor.component';
import { CustomRenderComponent } from '../../shared/custom-render.component';
@NgModule({
imports: [
CommonModule,
FormsModule,
NgaModule,
routing,
DropdownModule.forRoot(),
Ng2SmartTableModule
],
entryComponents: [CustomEditorComponent, CustomRenderComponent],
declarations: [
Aspirasi,
Tables,
CustomEditorComponent,
CustomRenderComponent
],
providers: [
]
})
export class | {}
| AspirasiModule | identifier_name |
recover.py | # This file is part of VoltDB.
# Copyright (C) 2008-2014 VoltDB Inc.
#
# This file contains original code and/or modifications of original code.
# Any modifications made by VoltDB Inc. are licensed under the following
# terms and conditions:
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
@VOLT.Command(
# Uses all default except last is safemode switch availability
bundles = VOLT.ServerBundle('recover',
needs_catalog=False,
supports_live=False,
default_host=True,
safemode_available=True,
supports_daemon=True,
supports_multiple_daemons=True),
description = 'Start the database and recover the previous state.'
)
def | (runner):
runner.go()
| recover | identifier_name |
recover.py | # This file is part of VoltDB.
# Copyright (C) 2008-2014 VoltDB Inc.
#
# This file contains original code and/or modifications of original code. | # Any modifications made by VoltDB Inc. are licensed under the following
# terms and conditions:
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
@VOLT.Command(
# Uses all default except last is safemode switch availability
bundles = VOLT.ServerBundle('recover',
needs_catalog=False,
supports_live=False,
default_host=True,
safemode_available=True,
supports_daemon=True,
supports_multiple_daemons=True),
description = 'Start the database and recover the previous state.'
)
def recover(runner):
runner.go() | random_line_split |
|
recover.py | # This file is part of VoltDB.
# Copyright (C) 2008-2014 VoltDB Inc.
#
# This file contains original code and/or modifications of original code.
# Any modifications made by VoltDB Inc. are licensed under the following
# terms and conditions:
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
@VOLT.Command(
# Uses all default except last is safemode switch availability
bundles = VOLT.ServerBundle('recover',
needs_catalog=False,
supports_live=False,
default_host=True,
safemode_available=True,
supports_daemon=True,
supports_multiple_daemons=True),
description = 'Start the database and recover the previous state.'
)
def recover(runner):
| runner.go() | identifier_body |
|
rt.rs | // Copyright 2013-2015, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
//! General — Library initialization and miscellaneous functions
use std::cell::Cell;
use std::ptr;
use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
use glib::translate::*;
use ffi;
thread_local! {
static IS_MAIN_THREAD: Cell<bool> = Cell::new(false)
}
static INITIALIZED: AtomicBool = ATOMIC_BOOL_INIT;
/// Asserts that this is the main thread and either `gdk::init` or `gtk::init` has been called.
macro_rules! assert_initialized_main_thread {
() => (
if !::rt::is_initialized_main_thread() {
if ::rt::is_initialized() {
panic!("GDK may only be used from the main thread.");
}
else {
panic!("GDK has not been initialized. Call `gdk::init` or `gtk::init` first.");
}
}
)
}
/// No-op.
macro_rules! skip_assert_initialized {
() => ()
}
/// Asserts that neither `gdk::init` nor `gtk::init` has been called.
macro_rules! assert_not_initialized {
() => (
if ::rt::is_initialized() {
panic!("This function has to be called before `gdk::init` or `gtk::init`.");
}
)
}
/// Returns `true` if GDK has been initialized.
#[inline]
pub fn is_initialized() -> bool {
skip_assert_initialized!();
INITIALIZED.load(Ordering::Acquire)
}
/// Returns `true` if GDK has been initialized and this is the main thread.
#[inline]
pub fn is_initialized_main_thread() -> bool {
skip_assert_initialized!();
IS_MAIN_THREAD.with(|c| c.get())
}
/// Informs this crate that GDK has been initialized and the current thread is the main one.
pub unsafe fn set_initialized() {
skip_assert_initialized!();
if is_initialized_main_thread() {
return;
}
else if is_initialized() {
panic!("Attempted to initialize GDK from two different threads.");
}
INITIALIZED.store(true, Ordering::Release);
IS_MAIN_THREAD.with(|c| c.set(true));
}
pub fn init() {
assert_not_initialized!();
unsafe {
ffi::gdk_init(ptr::null_mut(), ptr::null_mut());
set_initialized();
}
}
pub fn get_display_arg_name() -> Option<String> {
assert_initialized_main_thread!();
unsafe {
from_glib_none(ffi::gdk_get_display_arg_name())
}
}
pub fn notify_startup_complete() {
assert_initialized_main_thread!();
unsafe { ffi::gdk_notify_startup_complete() }
}
pub fn notify_startup_complete_with_id(startup_id: &str) {
assert_initialized_main_thread!();
unsafe {
ffi::gdk_notify_startup_complete_with_id(startup_id.to_glib_none().0);
}
}
#[cfg(feature = "3.10")]
pub fn set_allowed_backends(backends: &str) {
assert_not_initialized!();
unsafe {
ffi::gdk_set_allowed_backends(backends.to_glib_none().0)
}
}
pub fn get_program_class() -> Option<String> {
assert_initialized_main_thread!();
unsafe {
from_glib_none(ffi::gdk_get_program_class())
}
}
pub fn set_program_class(program_class: &str) {
assert_initialized_main_thread!();
unsafe {
ffi::gdk_set_program_class(program_class.to_glib_none().0)
}
}
pub fn flush() {
assert_initialized_main_thread!();
unsafe { ffi::gdk_flush() }
}
pub fn screen_width() -> i32 {
assert_initialized_main_thread!();
unsafe { ffi::gdk_screen_width() }
}
pub fn screen_height() -> i32 {
assert_initialized_main_thread!();
unsafe { ffi::gdk_screen_height() }
}
pub fn screen_width_mm() -> i32 {
assert_initialized_main_thread!(); | assert_initialized_main_thread!();
unsafe { ffi::gdk_screen_height_mm() }
}
pub fn beep() {
assert_initialized_main_thread!();
unsafe { ffi::gdk_flush() }
}
pub fn error_trap_push() {
assert_initialized_main_thread!();
unsafe { ffi::gdk_error_trap_push() }
}
pub fn error_trap_pop() -> i32 {
assert_initialized_main_thread!();
unsafe { ffi::gdk_error_trap_pop() }
}
pub fn error_trap_pop_ignored() {
assert_initialized_main_thread!();
unsafe { ffi::gdk_error_trap_pop_ignored() }
} | unsafe { ffi::gdk_screen_width_mm() }
}
pub fn screen_height_mm() -> i32 { | random_line_split |
rt.rs | // Copyright 2013-2015, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
//! General — Library initialization and miscellaneous functions
use std::cell::Cell;
use std::ptr;
use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
use glib::translate::*;
use ffi;
thread_local! {
static IS_MAIN_THREAD: Cell<bool> = Cell::new(false)
}
static INITIALIZED: AtomicBool = ATOMIC_BOOL_INIT;
/// Asserts that this is the main thread and either `gdk::init` or `gtk::init` has been called.
macro_rules! assert_initialized_main_thread {
() => (
if !::rt::is_initialized_main_thread() {
if ::rt::is_initialized() {
panic!("GDK may only be used from the main thread.");
}
else {
panic!("GDK has not been initialized. Call `gdk::init` or `gtk::init` first.");
}
}
)
}
/// No-op.
macro_rules! skip_assert_initialized {
() => ()
}
/// Asserts that neither `gdk::init` nor `gtk::init` has been called.
macro_rules! assert_not_initialized {
() => (
if ::rt::is_initialized() {
panic!("This function has to be called before `gdk::init` or `gtk::init`.");
}
)
}
/// Returns `true` if GDK has been initialized.
#[inline]
pub fn is_initialized() -> bool {
skip_assert_initialized!();
INITIALIZED.load(Ordering::Acquire)
}
/// Returns `true` if GDK has been initialized and this is the main thread.
#[inline]
pub fn is_initialized_main_thread() -> bool {
skip_assert_initialized!();
IS_MAIN_THREAD.with(|c| c.get())
}
/// Informs this crate that GDK has been initialized and the current thread is the main one.
pub unsafe fn set_initialized() {
skip_assert_initialized!();
if is_initialized_main_thread() {
return;
}
else if is_initialized() {
panic!("Attempted to initialize GDK from two different threads.");
}
INITIALIZED.store(true, Ordering::Release);
IS_MAIN_THREAD.with(|c| c.set(true));
}
pub fn init() {
assert_not_initialized!();
unsafe {
ffi::gdk_init(ptr::null_mut(), ptr::null_mut());
set_initialized();
}
}
pub fn ge | -> Option<String> {
assert_initialized_main_thread!();
unsafe {
from_glib_none(ffi::gdk_get_display_arg_name())
}
}
pub fn notify_startup_complete() {
assert_initialized_main_thread!();
unsafe { ffi::gdk_notify_startup_complete() }
}
pub fn notify_startup_complete_with_id(startup_id: &str) {
assert_initialized_main_thread!();
unsafe {
ffi::gdk_notify_startup_complete_with_id(startup_id.to_glib_none().0);
}
}
#[cfg(feature = "3.10")]
pub fn set_allowed_backends(backends: &str) {
assert_not_initialized!();
unsafe {
ffi::gdk_set_allowed_backends(backends.to_glib_none().0)
}
}
pub fn get_program_class() -> Option<String> {
assert_initialized_main_thread!();
unsafe {
from_glib_none(ffi::gdk_get_program_class())
}
}
pub fn set_program_class(program_class: &str) {
assert_initialized_main_thread!();
unsafe {
ffi::gdk_set_program_class(program_class.to_glib_none().0)
}
}
pub fn flush() {
assert_initialized_main_thread!();
unsafe { ffi::gdk_flush() }
}
pub fn screen_width() -> i32 {
assert_initialized_main_thread!();
unsafe { ffi::gdk_screen_width() }
}
pub fn screen_height() -> i32 {
assert_initialized_main_thread!();
unsafe { ffi::gdk_screen_height() }
}
pub fn screen_width_mm() -> i32 {
assert_initialized_main_thread!();
unsafe { ffi::gdk_screen_width_mm() }
}
pub fn screen_height_mm() -> i32 {
assert_initialized_main_thread!();
unsafe { ffi::gdk_screen_height_mm() }
}
pub fn beep() {
assert_initialized_main_thread!();
unsafe { ffi::gdk_flush() }
}
pub fn error_trap_push() {
assert_initialized_main_thread!();
unsafe { ffi::gdk_error_trap_push() }
}
pub fn error_trap_pop() -> i32 {
assert_initialized_main_thread!();
unsafe { ffi::gdk_error_trap_pop() }
}
pub fn error_trap_pop_ignored() {
assert_initialized_main_thread!();
unsafe { ffi::gdk_error_trap_pop_ignored() }
}
| t_display_arg_name() | identifier_name |
rt.rs | // Copyright 2013-2015, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
//! General — Library initialization and miscellaneous functions
use std::cell::Cell;
use std::ptr;
use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
use glib::translate::*;
use ffi;
thread_local! {
static IS_MAIN_THREAD: Cell<bool> = Cell::new(false)
}
static INITIALIZED: AtomicBool = ATOMIC_BOOL_INIT;
/// Asserts that this is the main thread and either `gdk::init` or `gtk::init` has been called.
macro_rules! assert_initialized_main_thread {
() => (
if !::rt::is_initialized_main_thread() {
if ::rt::is_initialized() {
panic!("GDK may only be used from the main thread.");
}
else {
panic!("GDK has not been initialized. Call `gdk::init` or `gtk::init` first.");
}
}
)
}
/// No-op.
macro_rules! skip_assert_initialized {
() => ()
}
/// Asserts that neither `gdk::init` nor `gtk::init` has been called.
macro_rules! assert_not_initialized {
() => (
if ::rt::is_initialized() {
panic!("This function has to be called before `gdk::init` or `gtk::init`.");
}
)
}
/// Returns `true` if GDK has been initialized.
#[inline]
pub fn is_initialized() -> bool {
skip_assert_initialized!();
INITIALIZED.load(Ordering::Acquire)
}
/// Returns `true` if GDK has been initialized and this is the main thread.
#[inline]
pub fn is_initialized_main_thread() -> bool {
skip_assert_initialized!();
IS_MAIN_THREAD.with(|c| c.get())
}
/// Informs this crate that GDK has been initialized and the current thread is the main one.
pub unsafe fn set_initialized() {
skip_assert_initialized!();
if is_initialized_main_thread() {
| else if is_initialized() {
panic!("Attempted to initialize GDK from two different threads.");
}
INITIALIZED.store(true, Ordering::Release);
IS_MAIN_THREAD.with(|c| c.set(true));
}
pub fn init() {
assert_not_initialized!();
unsafe {
ffi::gdk_init(ptr::null_mut(), ptr::null_mut());
set_initialized();
}
}
pub fn get_display_arg_name() -> Option<String> {
assert_initialized_main_thread!();
unsafe {
from_glib_none(ffi::gdk_get_display_arg_name())
}
}
pub fn notify_startup_complete() {
assert_initialized_main_thread!();
unsafe { ffi::gdk_notify_startup_complete() }
}
pub fn notify_startup_complete_with_id(startup_id: &str) {
assert_initialized_main_thread!();
unsafe {
ffi::gdk_notify_startup_complete_with_id(startup_id.to_glib_none().0);
}
}
#[cfg(feature = "3.10")]
pub fn set_allowed_backends(backends: &str) {
assert_not_initialized!();
unsafe {
ffi::gdk_set_allowed_backends(backends.to_glib_none().0)
}
}
pub fn get_program_class() -> Option<String> {
assert_initialized_main_thread!();
unsafe {
from_glib_none(ffi::gdk_get_program_class())
}
}
pub fn set_program_class(program_class: &str) {
assert_initialized_main_thread!();
unsafe {
ffi::gdk_set_program_class(program_class.to_glib_none().0)
}
}
pub fn flush() {
assert_initialized_main_thread!();
unsafe { ffi::gdk_flush() }
}
pub fn screen_width() -> i32 {
assert_initialized_main_thread!();
unsafe { ffi::gdk_screen_width() }
}
pub fn screen_height() -> i32 {
assert_initialized_main_thread!();
unsafe { ffi::gdk_screen_height() }
}
pub fn screen_width_mm() -> i32 {
assert_initialized_main_thread!();
unsafe { ffi::gdk_screen_width_mm() }
}
pub fn screen_height_mm() -> i32 {
assert_initialized_main_thread!();
unsafe { ffi::gdk_screen_height_mm() }
}
pub fn beep() {
assert_initialized_main_thread!();
unsafe { ffi::gdk_flush() }
}
pub fn error_trap_push() {
assert_initialized_main_thread!();
unsafe { ffi::gdk_error_trap_push() }
}
pub fn error_trap_pop() -> i32 {
assert_initialized_main_thread!();
unsafe { ffi::gdk_error_trap_pop() }
}
pub fn error_trap_pop_ignored() {
assert_initialized_main_thread!();
unsafe { ffi::gdk_error_trap_pop_ignored() }
}
| return;
}
| conditional_block |
rt.rs | // Copyright 2013-2015, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
//! General — Library initialization and miscellaneous functions
use std::cell::Cell;
use std::ptr;
use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
use glib::translate::*;
use ffi;
thread_local! {
static IS_MAIN_THREAD: Cell<bool> = Cell::new(false)
}
static INITIALIZED: AtomicBool = ATOMIC_BOOL_INIT;
/// Asserts that this is the main thread and either `gdk::init` or `gtk::init` has been called.
macro_rules! assert_initialized_main_thread {
() => (
if !::rt::is_initialized_main_thread() {
if ::rt::is_initialized() {
panic!("GDK may only be used from the main thread.");
}
else {
panic!("GDK has not been initialized. Call `gdk::init` or `gtk::init` first.");
}
}
)
}
/// No-op.
macro_rules! skip_assert_initialized {
() => ()
}
/// Asserts that neither `gdk::init` nor `gtk::init` has been called.
macro_rules! assert_not_initialized {
() => (
if ::rt::is_initialized() {
panic!("This function has to be called before `gdk::init` or `gtk::init`.");
}
)
}
/// Returns `true` if GDK has been initialized.
#[inline]
pub fn is_initialized() -> bool {
skip_assert_initialized!();
INITIALIZED.load(Ordering::Acquire)
}
/// Returns `true` if GDK has been initialized and this is the main thread.
#[inline]
pub fn is_initialized_main_thread() -> bool {
skip_assert_initialized!();
IS_MAIN_THREAD.with(|c| c.get())
}
/// Informs this crate that GDK has been initialized and the current thread is the main one.
pub unsafe fn set_initialized() {
| pub fn init() {
assert_not_initialized!();
unsafe {
ffi::gdk_init(ptr::null_mut(), ptr::null_mut());
set_initialized();
}
}
pub fn get_display_arg_name() -> Option<String> {
assert_initialized_main_thread!();
unsafe {
from_glib_none(ffi::gdk_get_display_arg_name())
}
}
pub fn notify_startup_complete() {
assert_initialized_main_thread!();
unsafe { ffi::gdk_notify_startup_complete() }
}
pub fn notify_startup_complete_with_id(startup_id: &str) {
assert_initialized_main_thread!();
unsafe {
ffi::gdk_notify_startup_complete_with_id(startup_id.to_glib_none().0);
}
}
#[cfg(feature = "3.10")]
pub fn set_allowed_backends(backends: &str) {
assert_not_initialized!();
unsafe {
ffi::gdk_set_allowed_backends(backends.to_glib_none().0)
}
}
pub fn get_program_class() -> Option<String> {
assert_initialized_main_thread!();
unsafe {
from_glib_none(ffi::gdk_get_program_class())
}
}
pub fn set_program_class(program_class: &str) {
assert_initialized_main_thread!();
unsafe {
ffi::gdk_set_program_class(program_class.to_glib_none().0)
}
}
pub fn flush() {
assert_initialized_main_thread!();
unsafe { ffi::gdk_flush() }
}
pub fn screen_width() -> i32 {
assert_initialized_main_thread!();
unsafe { ffi::gdk_screen_width() }
}
pub fn screen_height() -> i32 {
assert_initialized_main_thread!();
unsafe { ffi::gdk_screen_height() }
}
pub fn screen_width_mm() -> i32 {
assert_initialized_main_thread!();
unsafe { ffi::gdk_screen_width_mm() }
}
pub fn screen_height_mm() -> i32 {
assert_initialized_main_thread!();
unsafe { ffi::gdk_screen_height_mm() }
}
pub fn beep() {
assert_initialized_main_thread!();
unsafe { ffi::gdk_flush() }
}
pub fn error_trap_push() {
assert_initialized_main_thread!();
unsafe { ffi::gdk_error_trap_push() }
}
pub fn error_trap_pop() -> i32 {
assert_initialized_main_thread!();
unsafe { ffi::gdk_error_trap_pop() }
}
pub fn error_trap_pop_ignored() {
assert_initialized_main_thread!();
unsafe { ffi::gdk_error_trap_pop_ignored() }
}
| skip_assert_initialized!();
if is_initialized_main_thread() {
return;
}
else if is_initialized() {
panic!("Attempted to initialize GDK from two different threads.");
}
INITIALIZED.store(true, Ordering::Release);
IS_MAIN_THREAD.with(|c| c.set(true));
}
| identifier_body |
cpNbnet.py | 节
self.need_write = 0 # 需要写的字节
self.buff_read = "" # 读缓存
self.buff_write = "" # 写缓存
self.sock_obj = "" # sock对象
def printState(self):
if DEBUG:
dbgPrint('\n - current state of fd: %d' % self.sock_obj.fileno())
dbgPrint(" - - state: %s" % self.state)
dbgPrint(" - - have_read: %s" % self.have_read)
dbgPrint(" - - need_read: %s" % self.need_read)
dbgPrint(" - - have_write: %s" % self.have_write)
dbgPrint(" - - need_write: %s" % self.need_write)
dbgPrint(" - - buff_read: %s" % self.buff_read)
dbgPrint(" - - buff_write: %s" % self.buff_write)
dbgPrint(" - - sock_obj: %s" % self.sock_obj)
class nbNetBase:
def setFd(self, sock):
dbgPrint("\n setFd start")
tmp_state = STATE() # 实例化类
tmp_state.sock_obj = sock # 定义类中sock
self.conn_state[sock.fileno()] = tmp_state # 把sock加入到字典中
self.conn_state[sock.fileno()].printState()
dbgPrint("\n setFd end")
def accept(self, fd):
dbgPrint("\n accept start!")
sock_state = self.conn_state[fd] # 取出fd对应连接
sock = sock_state.sock_obj # 取出fd的sock
conn, addr = sock.accept() # 取出连接请求
conn.setblocking(0) # 设置非阻塞模式
return conn # 返回连接
def close(self, fd):
try:
sock = self.conn_state[fd].sock_obj # 取出fd的sock
sock.close() # 关闭sock
except:
dbgPrint("Close fd: %s" % fd)
finally:
self.epoll_sock.unregister(fd) # 将fd重epoll中注销
self.conn_state.pop(fd) # 踢出字典
def read(self, fd):
try:
sock_state = self.conn_state[fd] # 取出fd对应连接
conn = sock_state.sock_obj # 取出fd连接请求
if sock_state.need_read <= 0: # 需要读取字节为空报错
raise socket.error
one_read = conn.recv(sock_state.need_read) # 读取传输的字符
dbgPrint("\n func fd: %d, one_read: %s, need_read: %d" %
(fd, one_read, sock_state.need_read))
if len(one_re | 0报错
raise socket.error
sock_state.buff_read += one_read # 把读取数据存到读缓存中
sock_state.have_read += len(one_read) # 已经读取完的数据量
sock_state.need_read -= len(one_read) # 还需要读取数据的量
sock_state.printState()
if sock_state.have_read == 10: # 10字节为头文件处理
header_said_need_read = int(sock_state.have_read) # 读取数据的量
if header_said_need_read <= 0: # 如果还需读0字节报错
raise socket.error
sock_state.need_read += header_said_need_read # 还需读取数量变化
sock_state.buff_read = '' # 读缓存清空
sock_state.printState()
return "readcontent" # 还需读取数据
elif sock_state.need_read == 0:
return "process" # 读取数据完成,转换状态
else:
return "readmore" # 还需读取数据
except (socket.error, ValueError), msg:
try:
if msg.errno == 11: # errno等于11,尝试进行一次读取
dbgPrint("11" + msg)
return "retry"
except:
pass
return "closing"
def write(self, fd):
sock_state = self.conn_state[fd] # 取出fd对应的连接构造体
conn = sock_state.sock_obj # 取出fd对于连接
last_have_send = sock_state.have_write # 已经写数据的量
try:
have_send = conn.send(
sock_state.buff_write[last_have_send:]) # 发送剩下的数据
sock_state.have_write += have_send # 已经写的数据量
sock_state.need_write -= have_send # 还需写的数据量
if sock_state.need_write == 0 and sock_state.have_write != 0: # 写数据完成
sock_state.printState()
dbgPrint("\n write date end")
return "writecomplete" # 返回写入完成
else:
return "writemore" # 返回计算写入
except socket.error, msg:
return "closing"
def run(self):
while True:
epoll_list = self.epoll_sock.poll() # 定义poll()事件发生的list
for fd, events in epoll_list:
sock_state = self.conn_state[fd] # 取出fd构造体
if select.EPOLLHUP & events: # 文件描述符挂断
dbgPrint("EPOLLHUP")
sock_state.state = "closing" # fd状态设置为closing
elif select.EPOLLERR & events:
dbgPrint("EPOLLERR") # 文件描述符出错
sock_state.state = "closing" # 对应fd状态为closing
self.state_machine(fd) # 状态机调用
def state_machine(self, fd):
sock_state = self.conn_state[fd] # fd构造体
self.sm[sock_state.state](fd) # 通过sm字典调用对应状态的函数
class nbNet(nbNetBase):
def __init__(self, addr, port, logic):
dbgPrint('\n__init__: start!')
self.conn_state = {} # 定义字典保存每个连接状态
self.listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.listen_sock.bind((addr, port))
self.listen_sock.listen(10) # 排队长度
self.setFd(self.listen_sock) # 定义listen socket 放入字典conn_state
self.epoll_sock = select.epoll() # 初始化fd的epoll
self.epoll_sock.register(
self.listen_sock.fileno(), select.EPOLLIN) # linten可以读的描述符
self.logic = logic # 业务处理
self.sm = {
"accept": self.accept2read,
"read": self.read2process,
"write": self.write2read,
"process": self.process,
"closing": self.close,
} # 状态调用机的字典
dbgPrint('\n__init__: end, register no: %s' %
self.listen_sock.fileno())
def process(self, fd):
sock_state = self.conn_state[fd]
response = self.logic(sock_state.buff_read) # 业务函数处理
sock_state.buff_write = "%010d%s" % (len(response), response) # 发送的数据
sock_state.need_write = len(sock_state.buff_write) # 需要发送的长度
sock_state.state = "write" # fd对应的状态
self.epoll_sock.modify(fd, select.EPOLLOUT) # fd对应的epoll为改写模式
sock_state.printState()
def accept2read(self, fd):
conn = self.accept(fd)
self.epoll_sock.register(
conn.fileno(), select.EPOLLIN) # 发送数据后重新将fd的epoll改成读
self.setFd(conn) # fd生成构造体
self.conn_state[conn.fileno()].state = "read" # fd状态为read
dbgPrint("\n -- accept end!")
def read2process(self, fd):
read_ret = ""
# 状态转换
try:
read_ret = self.read(fd) # read函数返回值
except (Exception), msg:
dbgPrint(msg)
read_ret = "closing"
if read_ret == "process": # 读取完成,转换到process
self.process(fd)
elif read_ret == "readcontent": # readcontent、readmore、retry 继续读取
pass
elif read_ret == "readmore":
pass
elif read_ret == "retry":
pass
elif read_ret == "closing":
self.conn_state[fd].state = 'closing' # 状态为 | ad) == 0: # 读取数据为 | conditional_block |
cpNbnet.py | 字节
self.need_write = 0 # 需要写的字节
self.buff_read = "" # 读缓存
self.buff_write = "" # 写缓存
self.sock_obj = "" # sock对象
def printState(self):
if DEBUG:
dbgPrint('\n - current state of fd: %d' % self.sock_obj.fileno())
dbgPrint(" - - state: %s" % self.state)
dbgPrint(" - - have_read: %s" % self.have_read)
dbgPrint(" - - need_read: %s" % self.need_read)
dbgPrint(" - - have_write: %s" % self.have_write)
dbgPrint(" - - need_write: %s" % self.need_write)
dbgPrint(" - - buff_read: %s" % self.buff_read)
dbgPrint(" - - buff_write: %s" % self.buff_write)
dbgPrint(" - - sock_obj: %s" % self.sock_obj)
class nbNetBase:
def setFd(self, sock):
dbgPrint("\n setFd start")
tmp_state = STATE() # 实例化类
tmp_state.sock_obj = sock # 定义类中sock
self.conn_state[sock.fileno()] = tmp_state # 把sock加入到字典中
self.conn_state[sock.fileno()].printState()
dbgPrint("\n setFd end")
def accept(self, fd):
dbgPrint("\n accept start!")
sock_state = self.conn_state[fd] # 取出fd对应连接
sock = sock_state.sock_obj # 取出fd的sock
conn, addr = sock.accept() # 取出连接请求
conn.setblocking(0) # 设置非阻塞模式
return conn # 返回连接
def close(self, fd):
try:
sock = self.conn_state[fd].sock_obj # 取出fd的sock
sock.close() # 关闭sock
except:
dbgPrint("Close fd: %s" % fd)
finally:
self.epoll_sock.unregister(fd) # 将fd重epoll中注销
self.conn_state.pop(fd) # 踢出字典
def read(self, fd):
try:
sock_state = self.conn_state[fd] # 取出fd对应连接
conn = sock_state.sock_obj # 取出fd连接请求
if sock_state.need_read <= 0: # 需要读取字节为空报错
raise socket.error
one_read = conn.recv(sock_state.need_read) # 读取传输的字符
dbgPrint("\n func fd: %d, one_read: %s, need_read: %d" %
(fd, one_read, sock_state.need_read))
if len(one_read) == 0: # 读取数据为0报错 | sock_state.printState()
if sock_state.have_read == 10: # 10字节为头文件处理
header_said_need_read = int(sock_state.have_read) # 读取数据的量
if header_said_need_read <= 0: # 如果还需读0字节报错
raise socket.error
sock_state.need_read += header_said_need_read # 还需读取数量变化
sock_state.buff_read = '' # 读缓存清空
sock_state.printState()
return "readcontent" # 还需读取数据
elif sock_state.need_read == 0:
return "process" # 读取数据完成,转换状态
else:
return "readmore" # 还需读取数据
except (socket.error, ValueError), msg:
try:
if msg.errno == 11: # errno等于11,尝试进行一次读取
dbgPrint("11" + msg)
return "retry"
except:
pass
return "closing"
def write(self, fd):
sock_state = self.conn_state[fd] # 取出fd对应的连接构造体
conn = sock_state.sock_obj # 取出fd对于连接
last_have_send = sock_state.have_write # 已经写数据的量
try:
have_send = conn.send(
sock_state.buff_write[last_have_send:]) # 发送剩下的数据
sock_state.have_write += have_send # 已经写的数据量
sock_state.need_write -= have_send # 还需写的数据量
if sock_state.need_write == 0 and sock_state.have_write != 0: # 写数据完成
sock_state.printState()
dbgPrint("\n write date end")
return "writecomplete" # 返回写入完成
else:
return "writemore" # 返回计算写入
except socket.error, msg:
return "closing"
def run(self):
while True:
epoll_list = self.epoll_sock.poll() # 定义poll()事件发生的list
for fd, events in epoll_list:
sock_state = self.conn_state[fd] # 取出fd构造体
if select.EPOLLHUP & events: # 文件描述符挂断
dbgPrint("EPOLLHUP")
sock_state.state = "closing" # fd状态设置为closing
elif select.EPOLLERR & events:
dbgPrint("EPOLLERR") # 文件描述符出错
sock_state.state = "closing" # 对应fd状态为closing
self.state_machine(fd) # 状态机调用
def state_machine(self, fd):
sock_state = self.conn_state[fd] # fd构造体
self.sm[sock_state.state](fd) # 通过sm字典调用对应状态的函数
class nbNet(nbNetBase):
def __init__(self, addr, port, logic):
dbgPrint('\n__init__: start!')
self.conn_state = {} # 定义字典保存每个连接状态
self.listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.listen_sock.bind((addr, port))
self.listen_sock.listen(10) # 排队长度
self.setFd(self.listen_sock) # 定义listen socket 放入字典conn_state
self.epoll_sock = select.epoll() # 初始化fd的epoll
self.epoll_sock.register(
self.listen_sock.fileno(), select.EPOLLIN) # linten可以读的描述符
self.logic = logic # 业务处理
self.sm = {
"accept": self.accept2read,
"read": self.read2process,
"write": self.write2read,
"process": self.process,
"closing": self.close,
} # 状态调用机的字典
dbgPrint('\n__init__: end, register no: %s' %
self.listen_sock.fileno())
def process(self, fd):
sock_state = self.conn_state[fd]
response = self.logic(sock_state.buff_read) # 业务函数处理
sock_state.buff_write = "%010d%s" % (len(response), response) # 发送的数据
sock_state.need_write = len(sock_state.buff_write) # 需要发送的长度
sock_state.state = "write" # fd对应的状态
self.epoll_sock.modify(fd, select.EPOLLOUT) # fd对应的epoll为改写模式
sock_state.printState()
def accept2read(self, fd):
conn = self.accept(fd)
self.epoll_sock.register(
conn.fileno(), select.EPOLLIN) # 发送数据后重新将fd的epoll改成读
self.setFd(conn) # fd生成构造体
self.conn_state[conn.fileno()].state = "read" # fd状态为read
dbgPrint("\n -- accept end!")
def read2process(self, fd):
read_ret = ""
# 状态转换
try:
read_ret = self.read(fd) # read函数返回值
except (Exception), msg:
dbgPrint(msg)
read_ret = "closing"
if read_ret == "process": # 读取完成,转换到process
self.process(fd)
elif read_ret == "readcontent": # readcontent、readmore、retry 继续读取
pass
elif read_ret == "readmore":
pass
elif read_ret == "retry":
pass
elif read_ret == "closing":
self.conn_state[fd].state = 'closing' # 状态为closing关闭 | raise socket.error
sock_state.buff_read += one_read # 把读取数据存到读缓存中
sock_state.have_read += len(one_read) # 已经读取完的数据量
sock_state.need_read -= len(one_read) # 还需要读取数据的量 | random_line_split |
cpNbnet.py | 节
self.need_write = 0 # 需要写的字节
self.buff_read = "" # 读缓存
self.buff_write = "" # 写缓存
self.sock_obj = "" # sock对象
def printState(self):
if DEBUG:
dbgPrint('\n - current state of fd: %d' % self.sock_obj.fileno())
dbgPrint(" - - state: %s" % self.state)
dbgPrint(" - - have_read: %s" % self.have_read)
dbgPrint(" - - need_read: %s" % self.need_read)
dbgPrint(" - - have_write: %s" % self.have_write)
dbgPrint(" - - need_write: %s" % self.need_write)
dbgPrint(" - - buff_read: %s" % self.buff_read)
dbgPrint(" - - buff_write: %s" % self.buff_write)
dbgPrint(" - - sock_obj: %s" % self.sock_obj)
class nbNetBase:
def setFd(self, sock):
dbgPrint("\n setFd start")
tmp_state = STATE() # 实例化类
tmp_state.sock_obj = sock # 定义类中sock
self.conn_state[sock.fileno()] = tmp_state # 把sock加入到字典中
self.conn_state[sock.fileno()].printState()
dbgPrint("\n setFd end")
def accept(self, fd):
dbgPrint("\n accept start!")
sock_state = self.conn_state[fd] # 取出fd对应连接
sock = sock_state.sock_obj # 取出fd的sock
conn, addr = sock.accept() # 取出连接请求
conn.setblocking(0) # 设置非阻塞模式
return conn # 返回连接
def close(self, fd):
try:
sock = self.conn_state[fd].sock_obj # 取出fd的sock
sock.close() # 关闭sock
except:
dbgPrint("Close fd: %s" % fd)
finally:
self.epoll_sock.unregister(fd) # 将fd重epoll中注销
self.conn_state.pop(fd) # 踢出字典
def read(self, fd):
try:
sock_state = self.conn_state[fd] # 取出fd对应连接
conn = sock_state.sock_obj # 取出fd连接请求
if sock_state.need_read <= 0: # 需要读取字节为空报错
raise socket.error
one_read = conn.recv(sock_state.need_read) # 读取传输的字符
dbgPrint("\n func fd: %d, one_read: %s, need_read: %d" %
(fd, one_read, sock_state.need_read))
if len(one_read) == 0: # 读取数据为0报错
raise socket.error
sock_state.buff_read += one_read # 把读取数据存到读缓存中
sock_state.have_read += len(one_read) # 已经读取完的数据量
sock_state.need_read -= len(one_read) # 还需要读取数据的量
sock_state.printState()
if sock_state.have_read == 10: # 10字节为头文件处理
header_said_need_read = int(sock_state.have_read) # 读取数据的量
if header_said_need_read <= 0: # 如果还需读0字节报错
raise socket.error
sock_state.need_read += header_said_need_read # 还需读取数量变化
sock_state.buff_read = '' # 读缓存清空
sock_state.printState()
return "readcontent" # 还需读取数据
elif sock_state.need_read == 0:
return "process" # 读取数据完成,转换状态
else:
return "readmore" # 还需读取数据
except (socket.error, ValueError), msg:
try:
if msg.errno == 11: # errno等于11,尝试进行一次读取
dbgPrint("11" + msg)
return "retry"
except:
pass
return "closing"
def write(self, fd):
sock_state = self.conn_state[fd] # 取出fd对应的连接构造体
conn = sock_state.sock_obj # 取出fd对于连接
last_have_send = sock_state.have_write # 已经写数据的量
try:
have_send = conn.send(
sock_state.buff_write[last_have_send:]) # 发送剩下的数据
sock_state.have_write += have_send # 已经写的数据量
sock_state.need_write -= have_send # 还需写的数据量
if sock_state.need_write == 0 and sock_state.have_write != 0: # 写数据完成
sock_state.printState()
dbgPrint("\n write date end")
return "writecomplete" # 返回写入完成
else:
return "writemore" # 返回计算写入
except socket.error, msg:
return "closing"
def run(self):
while True:
epoll_list = self.epoll_sock.poll() # 定义poll()事件发生的list
for fd, events in epoll_list:
sock_state = self.conn_state[fd] # 取出fd构造体
if select.EPOLLHUP & events: # 文件描述符挂断
dbgPrint("EPOLLHUP")
sock_state.state = "closing" # fd状态设置为closing
elif select.EPOLLERR & events:
dbgPrint("EPOLLERR") # 文件描述符出错
sock_state.state = "closing" # 对应fd状态为closing
self.state_machine(fd) # 状态机调用
def state_machine(self, fd):
sock_state = self.conn_state[fd] # fd构造体
self.sm[sock_state.state](fd) # 通过sm字典调用对应状态的函数
class nbNet(nbNetBase):
def __init__(self, addr, port, logic):
dbgPrint('\n__init__: start!')
self.conn_state = {} # 定义字典保存每个连接状态
self.listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.listen_sock.bind((addr, port))
self.listen_sock.listen(10) # 排队长度
self.setFd(self.listen_sock) # 定义listen socket 放入字典conn_state
self.epoll_sock = select.epoll() # 初始化fd的epoll
self.epoll_sock.register(
self.listen_sock.fileno(), select.EPOLLIN) # linten可以读的描述符
self.logic = logic # 业务处理
self.sm = {
"accept": self.accept2read,
"read": self.read2process,
"write": self.write2read,
"process": self.process,
"closing": self.close,
} # 状态调用机的字典
dbgPrint('\n__init__: end, register no: %s' %
self.listen_sock.fileno())
def process(self, fd):
sock_state = self.conn_state[fd]
response = self.logic(sock_state.buff_read) # 业务函数处理
sock_state.buff_write = "%010d%s" % (len(response), response) # 发送的数据
sock_state.need_write = len(sock_state.buff_write) # 需要发送的长度
sock_state.state = "write" # fd对应的状态
self.epoll_sock.modify(fd, select.EPOLLOUT) # fd对应的epoll为改写模式
sock_state.printState()
def accept2read(self, fd):
conn = self.accept(fd)
self.epoll_sock.register(
conn.fileno(), select.EPOLLIN) # 发送数据后重新将fd的epoll改成读
self.setFd(conn) # fd生成构造体
self.conn_state[conn.fileno()].state = "read" # fd状态为read
dbgPrint("\n -- accept end!")
def read2process(self, fd):
read_ret = ""
# 状态转换
try:
| self.conn_state[fd].state = 'closing' # 状态 | read_ret = self.read(fd) # read函数返回值
except (Exception), msg:
dbgPrint(msg)
read_ret = "closing"
if read_ret == "process": # 读取完成,转换到process
self.process(fd)
elif read_ret == "readcontent": # readcontent、readmore、retry 继续读取
pass
elif read_ret == "readmore":
pass
elif read_ret == "retry":
pass
elif read_ret == "closing":
| identifier_body |
cpNbnet.py | :
def __init__(self):
self.state = "accept" # 定义状态
self.have_read = 0 # 记录读了的字节
self.need_read = 10 # 头文件需要读取10个字节
self.have_write = 0 # 记录读了的字节
self.need_write = 0 # 需要写的字节
self.buff_read = "" # 读缓存
self.buff_write = "" # 写缓存
self.sock_obj = "" # sock对象
def printState(self):
if DEBUG:
dbgPrint('\n - current state of fd: %d' % self.sock_obj.fileno())
dbgPrint(" - - state: %s" % self.state)
dbgPrint(" - - have_read: %s" % self.have_read)
dbgPrint(" - - need_read: %s" % self.need_read)
dbgPrint(" - - have_write: %s" % self.have_write)
dbgPrint(" - - need_write: %s" % self.need_write)
dbgPrint(" - - buff_read: %s" % self.buff_read)
dbgPrint(" - - buff_write: %s" % self.buff_write)
dbgPrint(" - - sock_obj: %s" % self.sock_obj)
class nbNetBase:
def setFd(self, sock):
dbgPrint("\n setFd start")
tmp_state = STATE() # 实例化类
tmp_state.sock_obj = sock # 定义类中sock
self.conn_state[sock.fileno()] = tmp_state # 把sock加入到字典中
self.conn_state[sock.fileno()].printState()
dbgPrint("\n setFd end")
def accept(self, fd):
dbgPrint("\n accept start!")
sock_state = self.conn_state[fd] # 取出fd对应连接
sock = sock_state.sock_obj # 取出fd的sock
conn, addr = sock.accept() # 取出连接请求
conn.setblocking(0) # 设置非阻塞模式
return conn # 返回连接
def close(self, fd):
try:
sock = self.conn_state[fd].sock_obj # 取出fd的sock
sock.close() # 关闭sock
except:
dbgPrint("Close fd: %s" % fd)
finally:
self.epoll_sock.unregister(fd) # 将fd重epoll中注销
self.conn_state.pop(fd) # 踢出字典
def read(self, fd):
try:
sock_state = self.conn_state[fd] # 取出fd对应连接
conn = sock_state.sock_obj # 取出fd连接请求
if sock_state.need_read <= 0: # 需要读取字节为空报错
raise socket.error
one_read = conn.recv(sock_state.need_read) # 读取传输的字符
dbgPrint("\n func fd: %d, one_read: %s, need_read: %d" %
(fd, one_read, sock_state.need_read))
if len(one_read) == 0: # 读取数据为0报错
raise socket.error
sock_state.buff_read += one_read # 把读取数据存到读缓存中
sock_state.have_read += len(one_read) # 已经读取完的数据量
sock_state.need_read -= len(one_read) # 还需要读取数据的量
sock_state.printState()
if sock_state.have_read == 10: # 10字节为头文件处理
header_said_need_read = int(sock_state.have_read) # 读取数据的量
if header_said_need_read <= 0: # 如果还需读0字节报错
raise socket.error
sock_state.need_read += header_said_need_read # 还需读取数量变化
sock_state.buff_read = '' # 读缓存清空
sock_state.printState()
return "readcontent" # 还需读取数据
elif sock_state.need_read == 0:
return "process" # 读取数据完成,转换状态
else:
return "readmore" # 还需读取数据
except (socket.error, ValueError), msg:
try:
if msg.errno == 11: # errno等于11,尝试进行一次读取
dbgPrint("11" + msg)
return "retry"
except:
pass
return "closing"
def write(self, fd):
sock_state = self.conn_state[fd] # 取出fd对应的连接构造体
conn = sock_state.sock_obj # 取出fd对于连接
last_have_send = sock_state.have_write # 已经写数据的量
try:
have_send = conn.send(
sock_state.buff_write[last_have_send:]) # 发送剩下的数据
sock_state.have_write += have_send # 已经写的数据量
sock_state.need_write -= have_send # 还需写的数据量
if sock_state.need_write == 0 and sock_state.have_write != 0: # 写数据完成
sock_state.printState()
dbgPrint("\n write date end")
return "writecomplete" # 返回写入完成
else:
return "writemore" # 返回计算写入
except socket.error, msg:
return "closing"
def run(self):
while True:
epoll_list = self.epoll_sock.poll() # 定义poll()事件发生的list
for fd, events in epoll_list:
sock_state = self.conn_state[fd] # 取出fd构造体
if select.EPOLLHUP & events: # 文件描述符挂断
dbgPrint("EPOLLHUP")
sock_state.state = "closing" # fd状态设置为closing
elif select.EPOLLERR & events:
dbgPrint("EPOLLERR") # 文件描述符出错
sock_state.state = "closing" # 对应fd状态为closing
self.state_machine(fd) # 状态机调用
def state_machine(self, fd):
sock_state = self.conn_state[fd] # fd构造体
self.sm[sock_state.state](fd) # 通过sm字典调用对应状态的函数
class nbNet(nbNetBase):
def __init__(self, addr, port, logic):
dbgPrint('\n__init__: start!')
self.conn_state = {} # 定义字典保存每个连接状态
self.listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.listen_sock.bind((addr, port))
self.listen_sock.listen(10) # 排队长度
self.setFd(self.listen_sock) # 定义listen socket 放入字典conn_state
self.epoll_sock = select.epoll() # 初始化fd的epoll
self.epoll_sock.register(
self.listen_sock.fileno(), select.EPOLLIN) # linten可以读的描述符
self.logic = logic # 业务处理
self.sm = {
"accept": self.accept2read,
"read": self.read2process,
"write": self.write2read,
"process": self.process,
"closing": self.close,
} # 状态调用机的字典
dbgPrint('\n__init__: end, register no: %s' %
self.listen_sock.fileno())
def process(self, fd):
sock_state = self.conn_state[fd]
response = self.logic(sock_state.buff_read) # 业务函数处理
sock_state.buff_write = "%010d%s" % (len(response), response) # 发送的数据
sock_state.need_write = len(sock_state.buff_write) # 需要发送的长度
sock_state.state = "write" # fd对应的状态
self.epoll_sock.modify(fd, select.EPOLLOUT) # fd对应的epoll为改写模式
sock_state.printState()
def accept2read(self, fd):
conn = self.accept(fd)
self.epoll_sock.register(
conn.fileno(), select.EPOLLIN) # 发送数据后重新将fd的epoll改成读
self.setFd(conn) # fd生成构造体
self.conn_state[conn.fileno()].state = "read" # fd状态为read
dbgPrint("\n -- accept end!")
def read2process(self, fd):
read_ret = ""
# 状态转换
try:
read_ret = self.read(fd) # read函数返回值
except (Exception), msg:
dbgPrint(msg)
read_ret = "closing"
if read_ret == "process": # 读取完成,转换到process
| STATE | identifier_name |
|
basic_client.rs | // use std::io::prelude::*;
use std::net::{Shutdown, TcpStream};
use std::ffi::CString;
use std::thread;
use std::str;
extern crate mbedtls;
use mbedtls::mbed;
use mbedtls::mbed::ssl::error::SSLError;
const TO_WRITE : &'static [u8] = b"GET / HTTP/1.1\r\n\r\n";
fn main() {
let mut stream = TcpStream::connect("216.58.210.78:443").unwrap();
{
let mut entropy = mbedtls::mbed::entropy::EntropyContext::new();
let mut entropy_func = |d : &mut[u8] | entropy.entropy_func(d);
let mut ctr_drbg = mbed::ctr_drbg::CtrDrbgContext::with_seed(
&mut entropy_func, None
).unwrap();
let mut random_func = |f: &mut[u8] | ctr_drbg.random(f);
let mut ssl_config = mbed::ssl::SSLConfig::new();
let mut ssl_context = mbed::ssl::SSLContext::new();
ssl_config.set_rng(&mut random_func);
ssl_config.set_defaults(
mbed::ssl::EndpointType::Client,
mbed::ssl::TransportType::Stream,
mbed::ssl::SSLPreset::Default,
).unwrap();
ssl_config.set_authmode(mbed::ssl::AuthMode::VerifyNone);
ssl_context.setup(&ssl_config).unwrap();
ssl_context.set_hostname(&CString::new("mbed TLS Server 1").unwrap()).unwrap();
ssl_context.set_bio_async(&mut stream);
attempt_io(|| ssl_context.handshake());
let size_written = attempt_io(|| ssl_context.write(TO_WRITE));
assert!(size_written == TO_WRITE.len());
let mut buffer = [0; 4096];
let size_read = attempt_io(|| ssl_context.read(&mut buffer));
println!(
"Read: {} bytes:\n---\n{}\n---",
size_read, str::from_utf8(&buffer[..size_read]).unwrap() |
stream.shutdown(Shutdown::Both).unwrap();
}
fn attempt_io<I, F: FnMut() -> Result<I, SSLError>>(mut f: F) -> I {
loop {
match f() {
Ok(i) => return i,
Err(SSLError::WantRead) | Err(SSLError::WantWrite) => {
thread::sleep_ms(100);
continue
},
Err(e) => panic!("Got error: {}", e),
}
}
} | );
attempt_io(|| ssl_context.close_notify());
} | random_line_split |
basic_client.rs | // use std::io::prelude::*;
use std::net::{Shutdown, TcpStream};
use std::ffi::CString;
use std::thread;
use std::str;
extern crate mbedtls;
use mbedtls::mbed;
use mbedtls::mbed::ssl::error::SSLError;
const TO_WRITE : &'static [u8] = b"GET / HTTP/1.1\r\n\r\n";
fn | () {
let mut stream = TcpStream::connect("216.58.210.78:443").unwrap();
{
let mut entropy = mbedtls::mbed::entropy::EntropyContext::new();
let mut entropy_func = |d : &mut[u8] | entropy.entropy_func(d);
let mut ctr_drbg = mbed::ctr_drbg::CtrDrbgContext::with_seed(
&mut entropy_func, None
).unwrap();
let mut random_func = |f: &mut[u8] | ctr_drbg.random(f);
let mut ssl_config = mbed::ssl::SSLConfig::new();
let mut ssl_context = mbed::ssl::SSLContext::new();
ssl_config.set_rng(&mut random_func);
ssl_config.set_defaults(
mbed::ssl::EndpointType::Client,
mbed::ssl::TransportType::Stream,
mbed::ssl::SSLPreset::Default,
).unwrap();
ssl_config.set_authmode(mbed::ssl::AuthMode::VerifyNone);
ssl_context.setup(&ssl_config).unwrap();
ssl_context.set_hostname(&CString::new("mbed TLS Server 1").unwrap()).unwrap();
ssl_context.set_bio_async(&mut stream);
attempt_io(|| ssl_context.handshake());
let size_written = attempt_io(|| ssl_context.write(TO_WRITE));
assert!(size_written == TO_WRITE.len());
let mut buffer = [0; 4096];
let size_read = attempt_io(|| ssl_context.read(&mut buffer));
println!(
"Read: {} bytes:\n---\n{}\n---",
size_read, str::from_utf8(&buffer[..size_read]).unwrap()
);
attempt_io(|| ssl_context.close_notify());
}
stream.shutdown(Shutdown::Both).unwrap();
}
fn attempt_io<I, F: FnMut() -> Result<I, SSLError>>(mut f: F) -> I {
loop {
match f() {
Ok(i) => return i,
Err(SSLError::WantRead) | Err(SSLError::WantWrite) => {
thread::sleep_ms(100);
continue
},
Err(e) => panic!("Got error: {}", e),
}
}
}
| main | identifier_name |
basic_client.rs | // use std::io::prelude::*;
use std::net::{Shutdown, TcpStream};
use std::ffi::CString;
use std::thread;
use std::str;
extern crate mbedtls;
use mbedtls::mbed;
use mbedtls::mbed::ssl::error::SSLError;
const TO_WRITE : &'static [u8] = b"GET / HTTP/1.1\r\n\r\n";
fn main() {
let mut stream = TcpStream::connect("216.58.210.78:443").unwrap();
{
let mut entropy = mbedtls::mbed::entropy::EntropyContext::new();
let mut entropy_func = |d : &mut[u8] | entropy.entropy_func(d);
let mut ctr_drbg = mbed::ctr_drbg::CtrDrbgContext::with_seed(
&mut entropy_func, None
).unwrap();
let mut random_func = |f: &mut[u8] | ctr_drbg.random(f);
let mut ssl_config = mbed::ssl::SSLConfig::new();
let mut ssl_context = mbed::ssl::SSLContext::new();
ssl_config.set_rng(&mut random_func);
ssl_config.set_defaults(
mbed::ssl::EndpointType::Client,
mbed::ssl::TransportType::Stream,
mbed::ssl::SSLPreset::Default,
).unwrap();
ssl_config.set_authmode(mbed::ssl::AuthMode::VerifyNone);
ssl_context.setup(&ssl_config).unwrap();
ssl_context.set_hostname(&CString::new("mbed TLS Server 1").unwrap()).unwrap();
ssl_context.set_bio_async(&mut stream);
attempt_io(|| ssl_context.handshake());
let size_written = attempt_io(|| ssl_context.write(TO_WRITE));
assert!(size_written == TO_WRITE.len());
let mut buffer = [0; 4096];
let size_read = attempt_io(|| ssl_context.read(&mut buffer));
println!(
"Read: {} bytes:\n---\n{}\n---",
size_read, str::from_utf8(&buffer[..size_read]).unwrap()
);
attempt_io(|| ssl_context.close_notify());
}
stream.shutdown(Shutdown::Both).unwrap();
}
fn attempt_io<I, F: FnMut() -> Result<I, SSLError>>(mut f: F) -> I | {
loop {
match f() {
Ok(i) => return i,
Err(SSLError::WantRead) | Err(SSLError::WantWrite) => {
thread::sleep_ms(100);
continue
},
Err(e) => panic!("Got error: {}", e),
}
}
} | identifier_body |
|
tests.py | from datetime import datetime, timedelta
import time
import unittest
from django.http import HttpRequest, HttpResponse, parse_cookie
from django.core.handlers.wsgi import WSGIRequest
from django.core.handlers.modpython import ModPythonRequest
from django.utils.http import cookie_date
class RequestsTests(unittest.TestCase):
| super(FakeModPythonRequest, self).__init__(*args, **kwargs)
self._get = self._post = self._meta = self._cookies = {}
class Dummy:
def get_options(self):
return {}
req = Dummy()
req.uri = 'bogus'
request = FakeModPythonRequest(req)
self.assertEqual(request.path, 'bogus')
self.assertEqual(request.GET.keys(), [])
self.assertEqual(request.POST.keys(), [])
self.assertEqual(request.COOKIES.keys(), [])
self.assertEqual(request.META.keys(), [])
def test_parse_cookie(self):
self.assertEqual(parse_cookie('invalid:key=true'), {})
def test_httprequest_location(self):
request = HttpRequest()
self.assertEqual(request.build_absolute_uri(location="https://www.example.com/asdf"),
'https://www.example.com/asdf')
request.get_host = lambda: 'www.example.com'
request.path = ''
self.assertEqual(request.build_absolute_uri(location="/path/with:colons"),
'http://www.example.com/path/with:colons')
def test_near_expiration(self):
"Cookie will expire when an near expiration time is provided"
response = HttpResponse()
# There is a timing weakness in this test; The
# expected result for max-age requires that there be
# a very slight difference between the evaluated expiration
# time, and the time evaluated in set_cookie(). If this
# difference doesn't exist, the cookie time will be
# 1 second larger. To avoid the problem, put in a quick sleep,
# which guarantees that there will be a time difference.
expires = datetime.utcnow() + timedelta(seconds=10)
time.sleep(0.001)
response.set_cookie('datetime', expires=expires)
datetime_cookie = response.cookies['datetime']
self.assertEqual(datetime_cookie['max-age'], 10)
def test_far_expiration(self):
"Cookie will expire when an distant expiration time is provided"
response = HttpResponse()
response.set_cookie('datetime', expires=datetime(2028, 1, 1, 4, 5, 6))
datetime_cookie = response.cookies['datetime']
self.assertEqual(datetime_cookie['expires'], 'Sat, 01-Jan-2028 04:05:06 GMT')
def test_max_age_expiration(self):
"Cookie will expire if max_age is provided"
response = HttpResponse()
response.set_cookie('max_age', max_age=10)
max_age_cookie = response.cookies['max_age']
self.assertEqual(max_age_cookie['max-age'], 10)
self.assertEqual(max_age_cookie['expires'], cookie_date(time.time()+10))
| def test_httprequest(self):
request = HttpRequest()
self.assertEqual(request.GET.keys(), [])
self.assertEqual(request.POST.keys(), [])
self.assertEqual(request.COOKIES.keys(), [])
self.assertEqual(request.META.keys(), [])
def test_wsgirequest(self):
request = WSGIRequest({'PATH_INFO': 'bogus', 'REQUEST_METHOD': 'bogus'})
self.assertEqual(request.GET.keys(), [])
self.assertEqual(request.POST.keys(), [])
self.assertEqual(request.COOKIES.keys(), [])
self.assertEqual(set(request.META.keys()), set(['PATH_INFO', 'REQUEST_METHOD', 'SCRIPT_NAME']))
self.assertEqual(request.META['PATH_INFO'], 'bogus')
self.assertEqual(request.META['REQUEST_METHOD'], 'bogus')
self.assertEqual(request.META['SCRIPT_NAME'], '')
def test_modpythonrequest(self):
class FakeModPythonRequest(ModPythonRequest):
def __init__(self, *args, **kwargs): | identifier_body |
tests.py | from datetime import datetime, timedelta
import time
import unittest
from django.http import HttpRequest, HttpResponse, parse_cookie
from django.core.handlers.wsgi import WSGIRequest
from django.core.handlers.modpython import ModPythonRequest
from django.utils.http import cookie_date
class RequestsTests(unittest.TestCase):
def test_httprequest(self):
request = HttpRequest()
self.assertEqual(request.GET.keys(), [])
self.assertEqual(request.POST.keys(), [])
self.assertEqual(request.COOKIES.keys(), [])
self.assertEqual(request.META.keys(), [])
def test_wsgirequest(self):
request = WSGIRequest({'PATH_INFO': 'bogus', 'REQUEST_METHOD': 'bogus'})
self.assertEqual(request.GET.keys(), [])
self.assertEqual(request.POST.keys(), [])
self.assertEqual(request.COOKIES.keys(), [])
self.assertEqual(set(request.META.keys()), set(['PATH_INFO', 'REQUEST_METHOD', 'SCRIPT_NAME']))
self.assertEqual(request.META['PATH_INFO'], 'bogus')
self.assertEqual(request.META['REQUEST_METHOD'], 'bogus')
self.assertEqual(request.META['SCRIPT_NAME'], '')
def test_modpythonrequest(self):
class FakeModPythonRequest(ModPythonRequest):
def __init__(self, *args, **kwargs):
super(FakeModPythonRequest, self).__init__(*args, **kwargs)
self._get = self._post = self._meta = self._cookies = {}
class Dummy:
def get_options(self):
return {}
req = Dummy()
req.uri = 'bogus'
request = FakeModPythonRequest(req)
self.assertEqual(request.path, 'bogus')
self.assertEqual(request.GET.keys(), [])
self.assertEqual(request.POST.keys(), [])
self.assertEqual(request.COOKIES.keys(), [])
self.assertEqual(request.META.keys(), [])
def test_parse_cookie(self):
self.assertEqual(parse_cookie('invalid:key=true'), {})
def test_httprequest_location(self):
request = HttpRequest()
self.assertEqual(request.build_absolute_uri(location="https://www.example.com/asdf"),
'https://www.example.com/asdf')
request.get_host = lambda: 'www.example.com'
request.path = ''
self.assertEqual(request.build_absolute_uri(location="/path/with:colons"),
'http://www.example.com/path/with:colons')
def test_near_expiration(self):
"Cookie will expire when an near expiration time is provided"
response = HttpResponse()
# There is a timing weakness in this test; The
# expected result for max-age requires that there be
# a very slight difference between the evaluated expiration
# time, and the time evaluated in set_cookie(). If this
# difference doesn't exist, the cookie time will be
# 1 second larger. To avoid the problem, put in a quick sleep,
# which guarantees that there will be a time difference.
expires = datetime.utcnow() + timedelta(seconds=10)
time.sleep(0.001)
response.set_cookie('datetime', expires=expires)
datetime_cookie = response.cookies['datetime']
self.assertEqual(datetime_cookie['max-age'], 10)
def test_far_expiration(self):
"Cookie will expire when an distant expiration time is provided"
response = HttpResponse()
response.set_cookie('datetime', expires=datetime(2028, 1, 1, 4, 5, 6))
datetime_cookie = response.cookies['datetime']
self.assertEqual(datetime_cookie['expires'], 'Sat, 01-Jan-2028 04:05:06 GMT')
def | (self):
"Cookie will expire if max_age is provided"
response = HttpResponse()
response.set_cookie('max_age', max_age=10)
max_age_cookie = response.cookies['max_age']
self.assertEqual(max_age_cookie['max-age'], 10)
self.assertEqual(max_age_cookie['expires'], cookie_date(time.time()+10))
| test_max_age_expiration | identifier_name |
tests.py | from datetime import datetime, timedelta
import time
import unittest
from django.http import HttpRequest, HttpResponse, parse_cookie
from django.core.handlers.wsgi import WSGIRequest
from django.core.handlers.modpython import ModPythonRequest
from django.utils.http import cookie_date
class RequestsTests(unittest.TestCase):
def test_httprequest(self):
request = HttpRequest()
self.assertEqual(request.GET.keys(), [])
self.assertEqual(request.POST.keys(), [])
self.assertEqual(request.COOKIES.keys(), [])
self.assertEqual(request.META.keys(), [])
def test_wsgirequest(self):
request = WSGIRequest({'PATH_INFO': 'bogus', 'REQUEST_METHOD': 'bogus'})
self.assertEqual(request.GET.keys(), [])
self.assertEqual(request.POST.keys(), [])
self.assertEqual(request.COOKIES.keys(), [])
self.assertEqual(set(request.META.keys()), set(['PATH_INFO', 'REQUEST_METHOD', 'SCRIPT_NAME']))
self.assertEqual(request.META['PATH_INFO'], 'bogus')
self.assertEqual(request.META['REQUEST_METHOD'], 'bogus')
self.assertEqual(request.META['SCRIPT_NAME'], '')
def test_modpythonrequest(self):
class FakeModPythonRequest(ModPythonRequest):
def __init__(self, *args, **kwargs):
super(FakeModPythonRequest, self).__init__(*args, **kwargs)
self._get = self._post = self._meta = self._cookies = {}
class Dummy:
def get_options(self):
return {}
req = Dummy()
req.uri = 'bogus'
request = FakeModPythonRequest(req)
self.assertEqual(request.path, 'bogus')
self.assertEqual(request.GET.keys(), [])
self.assertEqual(request.POST.keys(), [])
self.assertEqual(request.COOKIES.keys(), [])
self.assertEqual(request.META.keys(), [])
def test_parse_cookie(self):
self.assertEqual(parse_cookie('invalid:key=true'), {})
def test_httprequest_location(self):
request = HttpRequest()
self.assertEqual(request.build_absolute_uri(location="https://www.example.com/asdf"),
'https://www.example.com/asdf')
request.get_host = lambda: 'www.example.com'
request.path = ''
self.assertEqual(request.build_absolute_uri(location="/path/with:colons"),
'http://www.example.com/path/with:colons')
| "Cookie will expire when an near expiration time is provided"
response = HttpResponse()
# There is a timing weakness in this test; The
# expected result for max-age requires that there be
# a very slight difference between the evaluated expiration
# time, and the time evaluated in set_cookie(). If this
# difference doesn't exist, the cookie time will be
# 1 second larger. To avoid the problem, put in a quick sleep,
# which guarantees that there will be a time difference.
expires = datetime.utcnow() + timedelta(seconds=10)
time.sleep(0.001)
response.set_cookie('datetime', expires=expires)
datetime_cookie = response.cookies['datetime']
self.assertEqual(datetime_cookie['max-age'], 10)
def test_far_expiration(self):
"Cookie will expire when an distant expiration time is provided"
response = HttpResponse()
response.set_cookie('datetime', expires=datetime(2028, 1, 1, 4, 5, 6))
datetime_cookie = response.cookies['datetime']
self.assertEqual(datetime_cookie['expires'], 'Sat, 01-Jan-2028 04:05:06 GMT')
def test_max_age_expiration(self):
"Cookie will expire if max_age is provided"
response = HttpResponse()
response.set_cookie('max_age', max_age=10)
max_age_cookie = response.cookies['max_age']
self.assertEqual(max_age_cookie['max-age'], 10)
self.assertEqual(max_age_cookie['expires'], cookie_date(time.time()+10)) | def test_near_expiration(self): | random_line_split |
group___f_l_a_s_h___exported___constants.js | var group___f_l_a_s_h___exported___constants =
[
[ "Flash_Latency", "group___flash___latency.html", "group___flash___latency" ],
[ "Half_Cycle_Enable_Disable", "group___half___cycle___enable___disable.html", "group___half___cycle___enable___disable" ],
[ "Prefetch_Buffer_Enable_Disable", "group___prefetch___buffer___enable___disable.html", "group___prefetch___buffer___enable___disable" ],
[ "Option_Bytes_Write_Protection", "group___option___bytes___write___protection.html", "group___option___bytes___write___protection" ],
[ "Option_Bytes_IWatchdog", "group___option___bytes___i_watchdog.html", "group___option___bytes___i_watchdog" ],
[ "Option_Bytes_nRST_STOP", "group___option___bytes__n_r_s_t___s_t_o_p.html", "group___option___bytes__n_r_s_t___s_t_o_p" ],
[ "Option_Bytes_nRST_STDBY", "group___option___bytes__n_r_s_t___s_t_d_b_y.html", "group___option___bytes__n_r_s_t___s_t_d_b_y" ],
[ "FLASH_Interrupts", "group___f_l_a_s_h___interrupts.html", "group___f_l_a_s_h___interrupts" ],
[ "FLASH_Flags", "group___f_l_a_s_h___flags.html", "group___f_l_a_s_h___flags" ] | ]; | random_line_split |
|
node.py | """ EPYNET Classes """
from . import epanet2
from .objectcollection import ObjectCollection
from .baseobject import BaseObject, lazy_property
from .pattern import Pattern
class Node(BaseObject):
""" Base EPANET Node class """
static_properties = {'elevation': epanet2.EN_ELEVATION}
properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE}
def __init__(self, uid, network):
super(Node, self).__init__(uid, network)
self.links = ObjectCollection()
def get_index(self, uid):
if not self._index:
self._index = self.network().ep.ENgetnodeindex(uid)
return self._index
def set_object_value(self, code, value):
return self.network().ep.ENsetnodevalue(self.index, code, value)
def get_object_value(self, code):
return self.network().ep.ENgetnodevalue(self.index, code)
@property
def index(self):
return self.get_index(self.uid)
@lazy_property
def coordinates(self):
return self.network().ep.ENgetcoord(self.index)
# extra functionality
@lazy_property
def upstream_links(self):
""" return a list of upstream links """
if self.results != {}:
raise ValueError("This method is only supported for steady state simulations")
links = ObjectCollection()
for link in self.links:
if (link.to_node == self and link.flow >= 1e-3) or (link.from_node == self and link.flow < -1e-3):
links[link.uid] = link
return links
@lazy_property
def downstream_links(self):
""" return a list of downstream nodes """
if self.results != {}:
raise ValueError("This method is only supported for steady state simulations")
links = ObjectCollection()
for link in self.links:
if (link.from_node == self and link.flow >= 1e-3) or (link.to_node == self and link.flow < 1e-3):
links[link.uid] = link
return links
@lazy_property
def inflow(self):
outflow = 0
for link in self.upstream_links:
outflow += abs(link.flow)
return outflow
@lazy_property
def outflow(self):
outflow = 0
for link in self.downstream_links:
outflow += abs(link.flow)
return outflow
""" calculates all the water flowing out of the node """
class Reservoir(Node):
""" EPANET Reservoir Class """
node_type = "Reservoir"
class Junction(Node):
| self.network().solved = False
self.set_object_value(epanet2.EN_PATTERN, pattern_index)
class Tank(Node):
""" EPANET Tank Class """
node_type = "Tank"
static_properties = {'elevation': epanet2.EN_ELEVATION, 'basedemand': epanet2.EN_BASEDEMAND,
'initvolume': epanet2.EN_INITVOLUME, 'diameter': epanet2.EN_TANKDIAM,
'minvolume': epanet2.EN_MINVOLUME, 'minlevel': epanet2.EN_MINLEVEL,
'maxlevel': epanet2.EN_MAXLEVEL, 'maxvolume': 25, 'tanklevel': epanet2.EN_TANKLEVEL}
properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE,
'demand': epanet2.EN_DEMAND, 'volume': 24, 'level': epanet2.EN_TANKLEVEL}
| """ EPANET Junction Class """
static_properties = {'elevation': epanet2.EN_ELEVATION, 'basedemand': epanet2.EN_BASEDEMAND, 'emitter': epanet2.EN_EMITTER}
properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE, 'demand': epanet2.EN_DEMAND}
node_type = "Junction"
@property
def pattern(self):
pattern_index = int(self.get_property(epanet2.EN_PATTERN))
uid = self.network().ep.ENgetpatternid(pattern_index)
return Pattern(uid, self.network())
@pattern.setter
def pattern(self, value):
if isinstance(value, int):
pattern_index = value
elif isinstance(value, str):
pattern_index = self.network().ep.ENgetpatternindex(value)
else:
pattern_index = value.index
| identifier_body |
node.py | """ EPYNET Classes """
from . import epanet2
from .objectcollection import ObjectCollection
from .baseobject import BaseObject, lazy_property
from .pattern import Pattern
class Node(BaseObject):
""" Base EPANET Node class """
static_properties = {'elevation': epanet2.EN_ELEVATION}
properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE}
def __init__(self, uid, network):
super(Node, self).__init__(uid, network)
self.links = ObjectCollection()
def get_index(self, uid):
if not self._index:
self._index = self.network().ep.ENgetnodeindex(uid)
return self._index
def set_object_value(self, code, value):
return self.network().ep.ENsetnodevalue(self.index, code, value)
def get_object_value(self, code):
return self.network().ep.ENgetnodevalue(self.index, code)
@property
def index(self):
return self.get_index(self.uid)
@lazy_property
def coordinates(self):
return self.network().ep.ENgetcoord(self.index)
# extra functionality
@lazy_property
def upstream_links(self):
""" return a list of upstream links """
if self.results != {}:
raise ValueError("This method is only supported for steady state simulations")
links = ObjectCollection()
for link in self.links:
if (link.to_node == self and link.flow >= 1e-3) or (link.from_node == self and link.flow < -1e-3):
links[link.uid] = link
return links
@lazy_property
def downstream_links(self):
""" return a list of downstream nodes """
if self.results != {}:
raise ValueError("This method is only supported for steady state simulations")
links = ObjectCollection()
for link in self.links:
if (link.from_node == self and link.flow >= 1e-3) or (link.to_node == self and link.flow < 1e-3):
links[link.uid] = link
return links
@lazy_property
def inflow(self):
outflow = 0
for link in self.upstream_links:
outflow += abs(link.flow)
return outflow
@lazy_property
def outflow(self):
outflow = 0
for link in self.downstream_links:
outflow += abs(link.flow)
return outflow
""" calculates all the water flowing out of the node """
class Reservoir(Node):
""" EPANET Reservoir Class """
node_type = "Reservoir"
class Junction(Node):
""" EPANET Junction Class """
static_properties = {'elevation': epanet2.EN_ELEVATION, 'basedemand': epanet2.EN_BASEDEMAND, 'emitter': epanet2.EN_EMITTER}
properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE, 'demand': epanet2.EN_DEMAND}
node_type = "Junction"
@property
def | (self):
pattern_index = int(self.get_property(epanet2.EN_PATTERN))
uid = self.network().ep.ENgetpatternid(pattern_index)
return Pattern(uid, self.network())
@pattern.setter
def pattern(self, value):
if isinstance(value, int):
pattern_index = value
elif isinstance(value, str):
pattern_index = self.network().ep.ENgetpatternindex(value)
else:
pattern_index = value.index
self.network().solved = False
self.set_object_value(epanet2.EN_PATTERN, pattern_index)
class Tank(Node):
""" EPANET Tank Class """
node_type = "Tank"
static_properties = {'elevation': epanet2.EN_ELEVATION, 'basedemand': epanet2.EN_BASEDEMAND,
'initvolume': epanet2.EN_INITVOLUME, 'diameter': epanet2.EN_TANKDIAM,
'minvolume': epanet2.EN_MINVOLUME, 'minlevel': epanet2.EN_MINLEVEL,
'maxlevel': epanet2.EN_MAXLEVEL, 'maxvolume': 25, 'tanklevel': epanet2.EN_TANKLEVEL}
properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE,
'demand': epanet2.EN_DEMAND, 'volume': 24, 'level': epanet2.EN_TANKLEVEL}
| pattern | identifier_name |
node.py | """ EPYNET Classes """
from . import epanet2
from .objectcollection import ObjectCollection
from .baseobject import BaseObject, lazy_property
from .pattern import Pattern
class Node(BaseObject):
""" Base EPANET Node class """
static_properties = {'elevation': epanet2.EN_ELEVATION}
properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE}
def __init__(self, uid, network):
super(Node, self).__init__(uid, network)
self.links = ObjectCollection()
def get_index(self, uid):
if not self._index:
self._index = self.network().ep.ENgetnodeindex(uid)
return self._index
def set_object_value(self, code, value):
return self.network().ep.ENsetnodevalue(self.index, code, value)
def get_object_value(self, code):
return self.network().ep.ENgetnodevalue(self.index, code)
@property
def index(self):
return self.get_index(self.uid)
@lazy_property
def coordinates(self):
return self.network().ep.ENgetcoord(self.index)
# extra functionality
@lazy_property
def upstream_links(self):
""" return a list of upstream links """
if self.results != {}:
raise ValueError("This method is only supported for steady state simulations")
links = ObjectCollection()
for link in self.links:
if (link.to_node == self and link.flow >= 1e-3) or (link.from_node == self and link.flow < -1e-3):
links[link.uid] = link
return links
@lazy_property
def downstream_links(self):
""" return a list of downstream nodes """
if self.results != {}:
raise ValueError("This method is only supported for steady state simulations")
links = ObjectCollection()
for link in self.links:
if (link.from_node == self and link.flow >= 1e-3) or (link.to_node == self and link.flow < 1e-3):
links[link.uid] = link
return links
@lazy_property
def inflow(self):
outflow = 0
for link in self.upstream_links:
outflow += abs(link.flow)
return outflow
@lazy_property
def outflow(self):
outflow = 0
for link in self.downstream_links:
outflow += abs(link.flow)
return outflow
""" calculates all the water flowing out of the node """
class Reservoir(Node):
""" EPANET Reservoir Class """
node_type = "Reservoir"
class Junction(Node):
""" EPANET Junction Class """
static_properties = {'elevation': epanet2.EN_ELEVATION, 'basedemand': epanet2.EN_BASEDEMAND, 'emitter': epanet2.EN_EMITTER}
properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE, 'demand': epanet2.EN_DEMAND}
node_type = "Junction"
@property
def pattern(self):
pattern_index = int(self.get_property(epanet2.EN_PATTERN))
uid = self.network().ep.ENgetpatternid(pattern_index)
return Pattern(uid, self.network())
@pattern.setter
def pattern(self, value):
if isinstance(value, int):
|
elif isinstance(value, str):
pattern_index = self.network().ep.ENgetpatternindex(value)
else:
pattern_index = value.index
self.network().solved = False
self.set_object_value(epanet2.EN_PATTERN, pattern_index)
class Tank(Node):
""" EPANET Tank Class """
node_type = "Tank"
static_properties = {'elevation': epanet2.EN_ELEVATION, 'basedemand': epanet2.EN_BASEDEMAND,
'initvolume': epanet2.EN_INITVOLUME, 'diameter': epanet2.EN_TANKDIAM,
'minvolume': epanet2.EN_MINVOLUME, 'minlevel': epanet2.EN_MINLEVEL,
'maxlevel': epanet2.EN_MAXLEVEL, 'maxvolume': 25, 'tanklevel': epanet2.EN_TANKLEVEL}
properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE,
'demand': epanet2.EN_DEMAND, 'volume': 24, 'level': epanet2.EN_TANKLEVEL}
| pattern_index = value | conditional_block |
node.py | """ EPYNET Classes """
from . import epanet2
from .objectcollection import ObjectCollection
from .baseobject import BaseObject, lazy_property
from .pattern import Pattern
class Node(BaseObject):
""" Base EPANET Node class """
static_properties = {'elevation': epanet2.EN_ELEVATION}
properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE}
def __init__(self, uid, network):
super(Node, self).__init__(uid, network)
self.links = ObjectCollection()
def get_index(self, uid):
if not self._index:
self._index = self.network().ep.ENgetnodeindex(uid)
return self._index
def set_object_value(self, code, value):
return self.network().ep.ENsetnodevalue(self.index, code, value)
def get_object_value(self, code):
return self.network().ep.ENgetnodevalue(self.index, code)
@property
def index(self):
return self.get_index(self.uid)
@lazy_property
def coordinates(self):
return self.network().ep.ENgetcoord(self.index)
# extra functionality
@lazy_property | raise ValueError("This method is only supported for steady state simulations")
links = ObjectCollection()
for link in self.links:
if (link.to_node == self and link.flow >= 1e-3) or (link.from_node == self and link.flow < -1e-3):
links[link.uid] = link
return links
@lazy_property
def downstream_links(self):
""" return a list of downstream nodes """
if self.results != {}:
raise ValueError("This method is only supported for steady state simulations")
links = ObjectCollection()
for link in self.links:
if (link.from_node == self and link.flow >= 1e-3) or (link.to_node == self and link.flow < 1e-3):
links[link.uid] = link
return links
@lazy_property
def inflow(self):
outflow = 0
for link in self.upstream_links:
outflow += abs(link.flow)
return outflow
@lazy_property
def outflow(self):
outflow = 0
for link in self.downstream_links:
outflow += abs(link.flow)
return outflow
""" calculates all the water flowing out of the node """
class Reservoir(Node):
""" EPANET Reservoir Class """
node_type = "Reservoir"
class Junction(Node):
""" EPANET Junction Class """
static_properties = {'elevation': epanet2.EN_ELEVATION, 'basedemand': epanet2.EN_BASEDEMAND, 'emitter': epanet2.EN_EMITTER}
properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE, 'demand': epanet2.EN_DEMAND}
node_type = "Junction"
@property
def pattern(self):
pattern_index = int(self.get_property(epanet2.EN_PATTERN))
uid = self.network().ep.ENgetpatternid(pattern_index)
return Pattern(uid, self.network())
@pattern.setter
def pattern(self, value):
if isinstance(value, int):
pattern_index = value
elif isinstance(value, str):
pattern_index = self.network().ep.ENgetpatternindex(value)
else:
pattern_index = value.index
self.network().solved = False
self.set_object_value(epanet2.EN_PATTERN, pattern_index)
class Tank(Node):
""" EPANET Tank Class """
node_type = "Tank"
static_properties = {'elevation': epanet2.EN_ELEVATION, 'basedemand': epanet2.EN_BASEDEMAND,
'initvolume': epanet2.EN_INITVOLUME, 'diameter': epanet2.EN_TANKDIAM,
'minvolume': epanet2.EN_MINVOLUME, 'minlevel': epanet2.EN_MINLEVEL,
'maxlevel': epanet2.EN_MAXLEVEL, 'maxvolume': 25, 'tanklevel': epanet2.EN_TANKLEVEL}
properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE,
'demand': epanet2.EN_DEMAND, 'volume': 24, 'level': epanet2.EN_TANKLEVEL} | def upstream_links(self):
""" return a list of upstream links """
if self.results != {}: | random_line_split |
procGOME_L2.py | #!/usr/bin/env python
import numpy.ma as ma
import os,sys, subprocess, math, datetime
from os.path import basename
import numpy as np
import time as tt
import gdal
import h5py
from datetime import timedelta
from gdalconst import GDT_Float32, GA_Update
from osgeo import ogr, osr
import logging
#TODO: change for handling of SUB data
#def extractProduct(lat, lon, time, data, dataAttributes, attributes):
def createImgGOME_L2(fileAbsPath, pixelSize=0.25):
hdf = h5py.File(fileAbsPath, 'r')
driver = gdal.GetDriverByName('GTiff')
lat = np.array(hdf['/GEOLOCATION/LatitudeCentre'])
lon = np.array(hdf['/GEOLOCATION/LongitudeCentre'])
lon = -180*(lon.astype(int)/180) + (lon%180)
time = np.array(hdf['/GEOLOCATION/Time'])
data = np.array(hdf['/TOTAL_COLUMNS/SO2'])
dataAttributes = hdf['/TOTAL_COLUMNS/SO2'].attrs
metadata = hdf['/META_DATA'].attrs
time_computed = []
for i in range(len(time)):
time_computed.append(tt.mktime((datetime.datetime(1950, 1, 1)+timedelta(days=int(time['Day'][i]))+timedelta(milliseconds=int(time['MillisecondOfDay'][i]))).timetuple()))
instrument = metadata['InstrumentID'][0]
level = 'L'+metadata['ProcessingLevel'][0]
fillValue = dataAttributes['FillValue'][0]
satellite = metadata['SatelliteID'][0]
dataType = GDT_Float32
stepSize = 1500
cnt = 0
ySize = 1
workingDir = os.path.dirname(os.path.realpath(__file__)) + '/../'
processes = []
outFileList = []
for i in range(0,len(lat),stepSize):
if i + stepSize > len(lat):
stepSize = len(lat)-i
timeSlice = time_computed[i:stepSize+i]
timeAvg = np.average(timeSlice)
date = datetime.datetime.fromtimestamp(timeAvg).strftime('%Y%m%d.%H%M%S')
timeStart = timeSlice[0]
timeEnd = timeSlice[-1]
filenameCoords = 'GOME_Coords_' + date + '.tif'
coord_ds = driver.Create(filenameCoords, stepSize, ySize, 2, dataType)
coord_ds.GetRasterBand(1).WriteArray(np.reshape(lat[i:stepSize+i],(stepSize,1)).transpose())
coord_ds.GetRasterBand(2).WriteArray(np.reshape(lon[i:stepSize+i],(stepSize,1)).transpose())
coord_ds = None
filename = instrument + '_' +satellite + '_' + level + '_SO2_' + date + '_tmp.tif'
filenameOutput = filename[0:-8] + '.tif'
data_ds = driver.Create(filename, stepSize, ySize, 1, dataType)
band = np.reshape(data[i:stepSize+i],(stepSize,1)).transpose() | data_ds = None
window = str(stepSize)+'x'+str(ySize)
processes.append((subprocess.Popen([workingDir + '/bin/remap', '-i', filename, '-o', filenameOutput, '-a', filenameCoords,'-q','-s', str(pixelSize),'-w',window ,'-f','80000','-n','-9999'], stdout=open(os.devnull, 'wb')), filenameOutput,timeStart, timeEnd,maxValue,minValue ))
outFileList.append(filenameOutput)
cnt += 1
# print subprocess.Popen([workingDir + '/bin/remap', '-i', filename, '-o', filenameOutput, '-a', filenameCoords,'-q','-s', str(pixelSize),'-w',window ,'-f','80000','-n','-9999'], stdout=open(os.devnull, 'wb')), filenameOutput,timeStart, timeEnd,maxValue,minValue
numProcesses = cnt-1
while processes:
for p in processes:
if p[0].poll() != None:
logging.debug( 'Finished remapping for ' + p[1])
dst_ds = gdal.Open(p[1], GA_Update)
dst_ds.SetMetadataItem('GLOBAL_MAX', str(p[4]))
dst_ds.SetMetadataItem('GLOBAL_MIN', str(p[5]))
timeStart = datetime.datetime.fromtimestamp(p[2])
timeStart = timeStart.strftime('%Y-%m-%dT%H:%M:%SZ')
timeEnd = datetime.datetime.fromtimestamp(p[3])
timeEnd = timeEnd.strftime('%Y-%m-%dT%H:%M:%SZ')
dst_ds.SetMetadataItem('TIME_START', timeStart)
dst_ds.SetMetadataItem('TIME_END', timeEnd)
dst_ds.GetRasterBand(1).SetNoDataValue(-9999)
dst_ds.GetRasterBand(1).ComputeStatistics(False)
dst_ds = None
processes.remove(p)
tt.sleep(0.1)
os.system('rm ' + filenameCoords[0:11] + '*')
os.system('rm ' + filename[0:11] + '*_tmp.tif')
return outFileList
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.exit('\nUsage: %s GOME_file \n' % sys.argv[0] )
else:
if not os.path.exists(sys.argv[1]):
sys.exit('\nERROR: File %s was not found!\n' % sys.argv[1])
fileAbsPath = sys.argv[1]
createImgGOME_L2(fileAbsPath)
exit(0) | band[band == fillValue] = -9999
maxValue=np.max(ma.masked_equal(band,-9999))
minValue=np.min(ma.masked_equal(band,-9999))
data_ds.GetRasterBand(1).WriteArray(band) | random_line_split |
procGOME_L2.py | #!/usr/bin/env python
import numpy.ma as ma
import os,sys, subprocess, math, datetime
from os.path import basename
import numpy as np
import time as tt
import gdal
import h5py
from datetime import timedelta
from gdalconst import GDT_Float32, GA_Update
from osgeo import ogr, osr
import logging
#TODO: change for handling of SUB data
#def extractProduct(lat, lon, time, data, dataAttributes, attributes):
def createImgGOME_L2(fileAbsPath, pixelSize=0.25):
hdf = h5py.File(fileAbsPath, 'r')
driver = gdal.GetDriverByName('GTiff')
lat = np.array(hdf['/GEOLOCATION/LatitudeCentre'])
lon = np.array(hdf['/GEOLOCATION/LongitudeCentre'])
lon = -180*(lon.astype(int)/180) + (lon%180)
time = np.array(hdf['/GEOLOCATION/Time'])
data = np.array(hdf['/TOTAL_COLUMNS/SO2'])
dataAttributes = hdf['/TOTAL_COLUMNS/SO2'].attrs
metadata = hdf['/META_DATA'].attrs
time_computed = []
for i in range(len(time)):
|
instrument = metadata['InstrumentID'][0]
level = 'L'+metadata['ProcessingLevel'][0]
fillValue = dataAttributes['FillValue'][0]
satellite = metadata['SatelliteID'][0]
dataType = GDT_Float32
stepSize = 1500
cnt = 0
ySize = 1
workingDir = os.path.dirname(os.path.realpath(__file__)) + '/../'
processes = []
outFileList = []
for i in range(0,len(lat),stepSize):
if i + stepSize > len(lat):
stepSize = len(lat)-i
timeSlice = time_computed[i:stepSize+i]
timeAvg = np.average(timeSlice)
date = datetime.datetime.fromtimestamp(timeAvg).strftime('%Y%m%d.%H%M%S')
timeStart = timeSlice[0]
timeEnd = timeSlice[-1]
filenameCoords = 'GOME_Coords_' + date + '.tif'
coord_ds = driver.Create(filenameCoords, stepSize, ySize, 2, dataType)
coord_ds.GetRasterBand(1).WriteArray(np.reshape(lat[i:stepSize+i],(stepSize,1)).transpose())
coord_ds.GetRasterBand(2).WriteArray(np.reshape(lon[i:stepSize+i],(stepSize,1)).transpose())
coord_ds = None
filename = instrument + '_' +satellite + '_' + level + '_SO2_' + date + '_tmp.tif'
filenameOutput = filename[0:-8] + '.tif'
data_ds = driver.Create(filename, stepSize, ySize, 1, dataType)
band = np.reshape(data[i:stepSize+i],(stepSize,1)).transpose()
band[band == fillValue] = -9999
maxValue=np.max(ma.masked_equal(band,-9999))
minValue=np.min(ma.masked_equal(band,-9999))
data_ds.GetRasterBand(1).WriteArray(band)
data_ds = None
window = str(stepSize)+'x'+str(ySize)
processes.append((subprocess.Popen([workingDir + '/bin/remap', '-i', filename, '-o', filenameOutput, '-a', filenameCoords,'-q','-s', str(pixelSize),'-w',window ,'-f','80000','-n','-9999'], stdout=open(os.devnull, 'wb')), filenameOutput,timeStart, timeEnd,maxValue,minValue ))
outFileList.append(filenameOutput)
cnt += 1
# print subprocess.Popen([workingDir + '/bin/remap', '-i', filename, '-o', filenameOutput, '-a', filenameCoords,'-q','-s', str(pixelSize),'-w',window ,'-f','80000','-n','-9999'], stdout=open(os.devnull, 'wb')), filenameOutput,timeStart, timeEnd,maxValue,minValue
numProcesses = cnt-1
while processes:
for p in processes:
if p[0].poll() != None:
logging.debug( 'Finished remapping for ' + p[1])
dst_ds = gdal.Open(p[1], GA_Update)
dst_ds.SetMetadataItem('GLOBAL_MAX', str(p[4]))
dst_ds.SetMetadataItem('GLOBAL_MIN', str(p[5]))
timeStart = datetime.datetime.fromtimestamp(p[2])
timeStart = timeStart.strftime('%Y-%m-%dT%H:%M:%SZ')
timeEnd = datetime.datetime.fromtimestamp(p[3])
timeEnd = timeEnd.strftime('%Y-%m-%dT%H:%M:%SZ')
dst_ds.SetMetadataItem('TIME_START', timeStart)
dst_ds.SetMetadataItem('TIME_END', timeEnd)
dst_ds.GetRasterBand(1).SetNoDataValue(-9999)
dst_ds.GetRasterBand(1).ComputeStatistics(False)
dst_ds = None
processes.remove(p)
tt.sleep(0.1)
os.system('rm ' + filenameCoords[0:11] + '*')
os.system('rm ' + filename[0:11] + '*_tmp.tif')
return outFileList
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.exit('\nUsage: %s GOME_file \n' % sys.argv[0] )
else:
if not os.path.exists(sys.argv[1]):
sys.exit('\nERROR: File %s was not found!\n' % sys.argv[1])
fileAbsPath = sys.argv[1]
createImgGOME_L2(fileAbsPath)
exit(0)
| time_computed.append(tt.mktime((datetime.datetime(1950, 1, 1)+timedelta(days=int(time['Day'][i]))+timedelta(milliseconds=int(time['MillisecondOfDay'][i]))).timetuple())) | conditional_block |
procGOME_L2.py | #!/usr/bin/env python
import numpy.ma as ma
import os,sys, subprocess, math, datetime
from os.path import basename
import numpy as np
import time as tt
import gdal
import h5py
from datetime import timedelta
from gdalconst import GDT_Float32, GA_Update
from osgeo import ogr, osr
import logging
#TODO: change for handling of SUB data
#def extractProduct(lat, lon, time, data, dataAttributes, attributes):
def createImgGOME_L2(fileAbsPath, pixelSize=0.25):
|
dataType = GDT_Float32
stepSize = 1500
cnt = 0
ySize = 1
workingDir = os.path.dirname(os.path.realpath(__file__)) + '/../'
processes = []
outFileList = []
for i in range(0,len(lat),stepSize):
if i + stepSize > len(lat):
stepSize = len(lat)-i
timeSlice = time_computed[i:stepSize+i]
timeAvg = np.average(timeSlice)
date = datetime.datetime.fromtimestamp(timeAvg).strftime('%Y%m%d.%H%M%S')
timeStart = timeSlice[0]
timeEnd = timeSlice[-1]
filenameCoords = 'GOME_Coords_' + date + '.tif'
coord_ds = driver.Create(filenameCoords, stepSize, ySize, 2, dataType)
coord_ds.GetRasterBand(1).WriteArray(np.reshape(lat[i:stepSize+i],(stepSize,1)).transpose())
coord_ds.GetRasterBand(2).WriteArray(np.reshape(lon[i:stepSize+i],(stepSize,1)).transpose())
coord_ds = None
filename = instrument + '_' +satellite + '_' + level + '_SO2_' + date + '_tmp.tif'
filenameOutput = filename[0:-8] + '.tif'
data_ds = driver.Create(filename, stepSize, ySize, 1, dataType)
band = np.reshape(data[i:stepSize+i],(stepSize,1)).transpose()
band[band == fillValue] = -9999
maxValue=np.max(ma.masked_equal(band,-9999))
minValue=np.min(ma.masked_equal(band,-9999))
data_ds.GetRasterBand(1).WriteArray(band)
data_ds = None
window = str(stepSize)+'x'+str(ySize)
processes.append((subprocess.Popen([workingDir + '/bin/remap', '-i', filename, '-o', filenameOutput, '-a', filenameCoords,'-q','-s', str(pixelSize),'-w',window ,'-f','80000','-n','-9999'], stdout=open(os.devnull, 'wb')), filenameOutput,timeStart, timeEnd,maxValue,minValue ))
outFileList.append(filenameOutput)
cnt += 1
# print subprocess.Popen([workingDir + '/bin/remap', '-i', filename, '-o', filenameOutput, '-a', filenameCoords,'-q','-s', str(pixelSize),'-w',window ,'-f','80000','-n','-9999'], stdout=open(os.devnull, 'wb')), filenameOutput,timeStart, timeEnd,maxValue,minValue
numProcesses = cnt-1
while processes:
for p in processes:
if p[0].poll() != None:
logging.debug( 'Finished remapping for ' + p[1])
dst_ds = gdal.Open(p[1], GA_Update)
dst_ds.SetMetadataItem('GLOBAL_MAX', str(p[4]))
dst_ds.SetMetadataItem('GLOBAL_MIN', str(p[5]))
timeStart = datetime.datetime.fromtimestamp(p[2])
timeStart = timeStart.strftime('%Y-%m-%dT%H:%M:%SZ')
timeEnd = datetime.datetime.fromtimestamp(p[3])
timeEnd = timeEnd.strftime('%Y-%m-%dT%H:%M:%SZ')
dst_ds.SetMetadataItem('TIME_START', timeStart)
dst_ds.SetMetadataItem('TIME_END', timeEnd)
dst_ds.GetRasterBand(1).SetNoDataValue(-9999)
dst_ds.GetRasterBand(1).ComputeStatistics(False)
dst_ds = None
processes.remove(p)
tt.sleep(0.1)
os.system('rm ' + filenameCoords[0:11] + '*')
os.system('rm ' + filename[0:11] + '*_tmp.tif')
return outFileList
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.exit('\nUsage: %s GOME_file \n' % sys.argv[0] )
else:
if not os.path.exists(sys.argv[1]):
sys.exit('\nERROR: File %s was not found!\n' % sys.argv[1])
fileAbsPath = sys.argv[1]
createImgGOME_L2(fileAbsPath)
exit(0)
| hdf = h5py.File(fileAbsPath, 'r')
driver = gdal.GetDriverByName('GTiff')
lat = np.array(hdf['/GEOLOCATION/LatitudeCentre'])
lon = np.array(hdf['/GEOLOCATION/LongitudeCentre'])
lon = -180*(lon.astype(int)/180) + (lon%180)
time = np.array(hdf['/GEOLOCATION/Time'])
data = np.array(hdf['/TOTAL_COLUMNS/SO2'])
dataAttributes = hdf['/TOTAL_COLUMNS/SO2'].attrs
metadata = hdf['/META_DATA'].attrs
time_computed = []
for i in range(len(time)):
time_computed.append(tt.mktime((datetime.datetime(1950, 1, 1)+timedelta(days=int(time['Day'][i]))+timedelta(milliseconds=int(time['MillisecondOfDay'][i]))).timetuple()))
instrument = metadata['InstrumentID'][0]
level = 'L'+metadata['ProcessingLevel'][0]
fillValue = dataAttributes['FillValue'][0]
satellite = metadata['SatelliteID'][0] | identifier_body |
procGOME_L2.py | #!/usr/bin/env python
import numpy.ma as ma
import os,sys, subprocess, math, datetime
from os.path import basename
import numpy as np
import time as tt
import gdal
import h5py
from datetime import timedelta
from gdalconst import GDT_Float32, GA_Update
from osgeo import ogr, osr
import logging
#TODO: change for handling of SUB data
#def extractProduct(lat, lon, time, data, dataAttributes, attributes):
def | (fileAbsPath, pixelSize=0.25):
hdf = h5py.File(fileAbsPath, 'r')
driver = gdal.GetDriverByName('GTiff')
lat = np.array(hdf['/GEOLOCATION/LatitudeCentre'])
lon = np.array(hdf['/GEOLOCATION/LongitudeCentre'])
lon = -180*(lon.astype(int)/180) + (lon%180)
time = np.array(hdf['/GEOLOCATION/Time'])
data = np.array(hdf['/TOTAL_COLUMNS/SO2'])
dataAttributes = hdf['/TOTAL_COLUMNS/SO2'].attrs
metadata = hdf['/META_DATA'].attrs
time_computed = []
for i in range(len(time)):
time_computed.append(tt.mktime((datetime.datetime(1950, 1, 1)+timedelta(days=int(time['Day'][i]))+timedelta(milliseconds=int(time['MillisecondOfDay'][i]))).timetuple()))
instrument = metadata['InstrumentID'][0]
level = 'L'+metadata['ProcessingLevel'][0]
fillValue = dataAttributes['FillValue'][0]
satellite = metadata['SatelliteID'][0]
dataType = GDT_Float32
stepSize = 1500
cnt = 0
ySize = 1
workingDir = os.path.dirname(os.path.realpath(__file__)) + '/../'
processes = []
outFileList = []
for i in range(0,len(lat),stepSize):
if i + stepSize > len(lat):
stepSize = len(lat)-i
timeSlice = time_computed[i:stepSize+i]
timeAvg = np.average(timeSlice)
date = datetime.datetime.fromtimestamp(timeAvg).strftime('%Y%m%d.%H%M%S')
timeStart = timeSlice[0]
timeEnd = timeSlice[-1]
filenameCoords = 'GOME_Coords_' + date + '.tif'
coord_ds = driver.Create(filenameCoords, stepSize, ySize, 2, dataType)
coord_ds.GetRasterBand(1).WriteArray(np.reshape(lat[i:stepSize+i],(stepSize,1)).transpose())
coord_ds.GetRasterBand(2).WriteArray(np.reshape(lon[i:stepSize+i],(stepSize,1)).transpose())
coord_ds = None
filename = instrument + '_' +satellite + '_' + level + '_SO2_' + date + '_tmp.tif'
filenameOutput = filename[0:-8] + '.tif'
data_ds = driver.Create(filename, stepSize, ySize, 1, dataType)
band = np.reshape(data[i:stepSize+i],(stepSize,1)).transpose()
band[band == fillValue] = -9999
maxValue=np.max(ma.masked_equal(band,-9999))
minValue=np.min(ma.masked_equal(band,-9999))
data_ds.GetRasterBand(1).WriteArray(band)
data_ds = None
window = str(stepSize)+'x'+str(ySize)
processes.append((subprocess.Popen([workingDir + '/bin/remap', '-i', filename, '-o', filenameOutput, '-a', filenameCoords,'-q','-s', str(pixelSize),'-w',window ,'-f','80000','-n','-9999'], stdout=open(os.devnull, 'wb')), filenameOutput,timeStart, timeEnd,maxValue,minValue ))
outFileList.append(filenameOutput)
cnt += 1
# print subprocess.Popen([workingDir + '/bin/remap', '-i', filename, '-o', filenameOutput, '-a', filenameCoords,'-q','-s', str(pixelSize),'-w',window ,'-f','80000','-n','-9999'], stdout=open(os.devnull, 'wb')), filenameOutput,timeStart, timeEnd,maxValue,minValue
numProcesses = cnt-1
while processes:
for p in processes:
if p[0].poll() != None:
logging.debug( 'Finished remapping for ' + p[1])
dst_ds = gdal.Open(p[1], GA_Update)
dst_ds.SetMetadataItem('GLOBAL_MAX', str(p[4]))
dst_ds.SetMetadataItem('GLOBAL_MIN', str(p[5]))
timeStart = datetime.datetime.fromtimestamp(p[2])
timeStart = timeStart.strftime('%Y-%m-%dT%H:%M:%SZ')
timeEnd = datetime.datetime.fromtimestamp(p[3])
timeEnd = timeEnd.strftime('%Y-%m-%dT%H:%M:%SZ')
dst_ds.SetMetadataItem('TIME_START', timeStart)
dst_ds.SetMetadataItem('TIME_END', timeEnd)
dst_ds.GetRasterBand(1).SetNoDataValue(-9999)
dst_ds.GetRasterBand(1).ComputeStatistics(False)
dst_ds = None
processes.remove(p)
tt.sleep(0.1)
os.system('rm ' + filenameCoords[0:11] + '*')
os.system('rm ' + filename[0:11] + '*_tmp.tif')
return outFileList
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.exit('\nUsage: %s GOME_file \n' % sys.argv[0] )
else:
if not os.path.exists(sys.argv[1]):
sys.exit('\nERROR: File %s was not found!\n' % sys.argv[1])
fileAbsPath = sys.argv[1]
createImgGOME_L2(fileAbsPath)
exit(0)
| createImgGOME_L2 | identifier_name |
app.js | /**
* @ngdoc overview
* @name hciApp
* @description
* # hciApp
*
* Main module of the application.
*/
angular
.module('hciApp', [
'ngAnimate',
'ngCookies',
'ngResource',
'ngRoute',
'ngSanitize',
'ngTouch',
'ui.bootstrap',
'ngMaterial'
])
.config(function ($routeProvider) {
$routeProvider
.when('/main', {
templateUrl: 'views/main.html',
controller: 'MainCtrl'
})
.when('/main/:mode', {
templateUrl: 'views/main.html',
controller: 'MainCtrl'
})
.when('/about', {
templateUrl: 'views/about.html',
controller: 'AboutCtrl'
})
.when('/ideas', {
templateUrl: 'views/ideas.html',
controller: 'IdeasCtrl'
})
.when('/details', {
templateUrl: 'views/details.html',
controller: 'DetailsCtrl'
})
.otherwise({
redirectTo: '/main'
});
}); | 'use strict';
| random_line_split |
|
map.spec.ts | import { map } from './map';
describe('map function', () => {
const input = [1, 2, 3, 4, 5];
const incr: (x: number) => number = x => 1 + x;
test('is a pure function', () => {
map(incr)(input);
expect(input).toMatchSnapshot();
}); |
test('[1, 2, 3, 4, 5] |> map(incr) === [2, 3, 4, 5, 6]', () => {
expect(map(incr)(input)).toMatchSnapshot();
});
test('[] |> map(incr) === []', () => {
expect(map(incr)([])).toMatchSnapshot();
});
test('call the iteree callback with correct inputs', () => {
const cb = jest.fn((x: number) => 1 + x);
const link = { fake: 'fake' };
map<number, number>(cb)(input, link);
input.forEach((i, index) => {
expect(cb.mock.calls[index][0]).toBe(i);
expect(cb.mock.calls[index][1]).toBe(index);
expect(cb.mock.calls[index][2]).toEqual(input);
expect(cb.mock.calls[index][3]).toBe(link);
});
});
}); | random_line_split |
|
lib.rs | 57, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929,
937, 941, 947, 953, 967, 971, 977, 983, 991, 997,
];
const INITIAL_CAPACITY: usize = 10000;
struct PrimeInner {
data: Vec<u64>,
}
impl PrimeInner {
#[inline]
fn new() -> PrimeInner {
PrimeInner::with_capacity(INITIAL_CAPACITY)
}
#[inline]
fn new_empty() -> PrimeInner {
let mut data = Vec::with_capacity(INITIAL_CAPACITY);
data.push(2);
data.push(3);
PrimeInner { data }
}
#[inline]
fn with_capacity(capacity: usize) -> PrimeInner {
let mut data = Vec::with_capacity(capacity + SMALL_PRIMES.len());
data.extend(SMALL_PRIMES.iter().cloned());
PrimeInner { data }
}
#[inline]
fn max_prime(&self) -> u64 {
*self.data.last().unwrap()
}
#[inline]
fn nth(&mut self, n: usize) -> u64 {
self.grow(n + 1);
self.data[n]
}
#[inline]
fn contains(&mut self, n: u64) -> bool {
if n < self.max_prime() {
return self.data.binary_search(&n).is_ok();
}
if !self.is_coprime(n) {
return false;
}
(self.data.len()..)
.map(|i| self.nth(i))
.take_while(|&p| p * p <= n)
.all(|p| !n.is_multiple_of(&p))
}
#[inline]
fn is_coprime(&self, n: u64) -> bool {
self.data
.iter()
.take_while(|&&p| p * p <= n)
.all(|&p| !n.is_multiple_of(&p))
}
#[inline]
fn grow(&mut self, len: usize) {
if self.data.len() >= len {
return;
}
for n in (self.max_prime() + 2..).step_by(2) {
if self.is_coprime(n) {
self.data.push(n);
}
if self.data.len() >= len {
return;
}
}
}
}
/// Prime number set
#[derive(Clone)]
pub struct PrimeSet {
data: Rc<RefCell<PrimeInner>>,
}
impl Default for PrimeSet {
fn default() -> Self {
Self::new()
}
}
impl PrimeSet {
/// Create a new prime number generator.
#[inline]
pub fn new() -> Self {
Self::from_inner(PrimeInner::new())
}
/// Create a new prime number generator with empty buffers.
#[inline]
pub fn new_empty() -> Self {
Self::from_inner(PrimeInner::new_empty())
}
/// Create a new prime number generator with specifying buffer capacity.
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
Self::from_inner(PrimeInner::with_capacity(capacity))
}
/// Get nth prime.
///
/// # Example
///
/// ```
/// use prime::PrimeSet;
/// let ps = PrimeSet::new();
/// assert_eq!(2, ps.nth(0));
/// assert_eq!(3, ps.nth(1));
/// assert_eq!(5, ps.nth(2));
/// assert_eq!(743, ps.nth(131));
/// ```
#[inline]
pub fn nth(&self, n: usize) -> u64 {
self.data.borrow_mut().nth(n)
}
/// An iterator visiting all prime numbers in ascending order.
///
/// # Example
///
/// ```
/// use prime::PrimeSet;
/// let mut it = PrimeSet::new().iter();
/// assert_eq!(Some(2), it.next());
/// assert_eq!(Some(3), it.next());
/// assert_eq!(Some(5), it.next());
/// assert_eq!(Some(7), it.next());
/// ```
#[inline]
pub fn iter(&self) -> Nums {
Nums {
idx: 0,
data: self.data.clone(),
}
}
/// Return `true` if the given number is prime.
#[inline]
pub fn contains(&self, n: u64) -> bool {
self.data.borrow_mut().contains(n)
}
/// Calculates the combination of the number
#[inline]
pub fn combination(&self, n: u64, r: u64) -> u64 {
let mut fac = Factorized::<u64>::new(self);
for n in (r + 1)..(n + 1) {
fac.mul_assign(n);
}
for n in 1..(n - r + 1) {
fac.div_assign(n);
}
fac.into_integer()
}
fn from_inner(inner: PrimeInner) -> PrimeSet {
PrimeSet {
data: Rc::new(RefCell::new(inner)),
}
}
}
impl<'a> IntoIterator for &'a PrimeSet {
type Item = u64;
type IntoIter = Nums;
fn into_iter(self) -> Nums {
self.iter()
}
}
/// Prime number iterator
pub struct Nums {
idx: usize,
data: Rc<RefCell<PrimeInner>>,
}
impl Iterator for Nums {
type Item = u64;
#[inline]
fn next(&mut self) -> Option<u64> {
let p = self.data.borrow_mut().nth(self.idx);
self.idx += 1;
Some(p)
}
}
/// The base and exponent that represents factor.
pub type Factor<T> = (T, i32);
/// Numbers which can be factorized.
pub trait Factorize: Integer + FromPrimitive + Clone {
/// An iterator visiting all factors in ascending order.
fn factorize(&self, ps: &PrimeSet) -> Factors<Self>;
/// Calculates the number of all positive divisors.
fn num_of_divisor(&self, ps: &PrimeSet) -> u64 {
if self.is_zero() {
return Zero::zero();
}
self.factorize(ps)
.map(|(_base, exp)| (exp as u64) + 1)
.product()
}
/// Calculates the sum of all positive divisors.
fn sum_of_divisor(&self, ps: &PrimeSet) -> Self {
if self.is_zero() {
return Zero::zero();
}
let one: Self = One::one();
self.factorize(ps)
.map(|(base, exp)| {
let denom = base.clone() - one.clone();
(num_traits::pow(base, (exp as usize) + 1) - one.clone()) / denom
})
.fold(num_traits::one::<Self>(), |acc, n| acc * n)
}
/// Calculates the number of proper positive divisors.
#[inline]
fn num_of_proper_divisor(&self, ps: &PrimeSet) -> u64 {
self.num_of_divisor(ps) - 1
}
/// Caluculates the sum of all positive divisors.
#[inline]
fn sum_of_proper_divisor(&self, ps: &PrimeSet) -> Self {
self.sum_of_divisor(ps) - self.clone()
}
}
macro_rules! trait_impl_unsigned {
($($t:ty)*) => ($(
impl Factorize for $t {
#[inline]
fn factorize(&self, ps: &PrimeSet) -> Factors<$t> {
Factors { num: *self, iter: ps.iter() }
}
}
)*)
}
macro_rules! trait_impl_signed {
($($t:ty)*) => ($(
impl Factorize for $t {
#[inline]
fn factorize(&self, ps: &PrimeSet) -> Factors<$t> {
if *self < 0 {
Factors { num: -*self, iter: ps.iter() }
} else {
Factors { num: *self, iter: ps.iter() }
}
}
}
)*)
}
trait_impl_unsigned!(usize u8 u16 u32 u64);
trait_impl_signed!(isize i8 i16 i32 i64);
/// Factors iterator.
pub struct Factors<T> {
num: T,
iter: Nums,
}
impl<T: Integer + FromPrimitive + Clone> Iterator for Factors<T> {
type Item = Factor<T>;
#[inline]
fn next(&mut self) -> Option<Factor<T>> {
if self.num <= One::one() |
while let Some(p) = self.iter.next() {
let p: T = FromPrimitive::from_u64(p).unwrap();
if p.clone() * p.clone() > self.num {
let n = mem::replace(&mut self.num, | {
return None;
} | conditional_block |
lib.rs | return;
}
for n in (self.max_prime() + 2..).step_by(2) {
if self.is_coprime(n) {
self.data.push(n);
}
if self.data.len() >= len {
return;
}
}
}
}
/// Prime number set
#[derive(Clone)]
pub struct PrimeSet {
data: Rc<RefCell<PrimeInner>>,
}
impl Default for PrimeSet {
fn default() -> Self {
Self::new()
}
}
impl PrimeSet {
/// Create a new prime number generator.
#[inline]
pub fn new() -> Self {
Self::from_inner(PrimeInner::new())
}
/// Create a new prime number generator with empty buffers.
#[inline]
pub fn new_empty() -> Self {
Self::from_inner(PrimeInner::new_empty())
}
/// Create a new prime number generator with specifying buffer capacity.
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
Self::from_inner(PrimeInner::with_capacity(capacity))
}
/// Get nth prime.
///
/// # Example
///
/// ```
/// use prime::PrimeSet;
/// let ps = PrimeSet::new();
/// assert_eq!(2, ps.nth(0));
/// assert_eq!(3, ps.nth(1));
/// assert_eq!(5, ps.nth(2));
/// assert_eq!(743, ps.nth(131));
/// ```
#[inline]
pub fn nth(&self, n: usize) -> u64 {
self.data.borrow_mut().nth(n)
}
/// An iterator visiting all prime numbers in ascending order.
///
/// # Example
///
/// ```
/// use prime::PrimeSet;
/// let mut it = PrimeSet::new().iter();
/// assert_eq!(Some(2), it.next());
/// assert_eq!(Some(3), it.next());
/// assert_eq!(Some(5), it.next());
/// assert_eq!(Some(7), it.next());
/// ```
#[inline]
pub fn iter(&self) -> Nums {
Nums {
idx: 0,
data: self.data.clone(),
}
}
/// Return `true` if the given number is prime.
#[inline]
pub fn contains(&self, n: u64) -> bool {
self.data.borrow_mut().contains(n)
}
/// Calculates the combination of the number
#[inline]
pub fn combination(&self, n: u64, r: u64) -> u64 {
let mut fac = Factorized::<u64>::new(self);
for n in (r + 1)..(n + 1) {
fac.mul_assign(n);
}
for n in 1..(n - r + 1) {
fac.div_assign(n);
}
fac.into_integer()
}
fn from_inner(inner: PrimeInner) -> PrimeSet {
PrimeSet {
data: Rc::new(RefCell::new(inner)),
}
}
}
impl<'a> IntoIterator for &'a PrimeSet {
type Item = u64;
type IntoIter = Nums;
fn into_iter(self) -> Nums {
self.iter()
}
}
/// Prime number iterator
pub struct Nums {
idx: usize,
data: Rc<RefCell<PrimeInner>>,
}
impl Iterator for Nums {
type Item = u64;
#[inline]
fn next(&mut self) -> Option<u64> {
let p = self.data.borrow_mut().nth(self.idx);
self.idx += 1;
Some(p)
}
}
/// The base and exponent that represents factor.
pub type Factor<T> = (T, i32);
/// Numbers which can be factorized.
pub trait Factorize: Integer + FromPrimitive + Clone {
/// An iterator visiting all factors in ascending order.
fn factorize(&self, ps: &PrimeSet) -> Factors<Self>;
/// Calculates the number of all positive divisors.
fn num_of_divisor(&self, ps: &PrimeSet) -> u64 {
if self.is_zero() {
return Zero::zero();
}
self.factorize(ps)
.map(|(_base, exp)| (exp as u64) + 1)
.product()
}
/// Calculates the sum of all positive divisors.
fn sum_of_divisor(&self, ps: &PrimeSet) -> Self {
if self.is_zero() {
return Zero::zero();
}
let one: Self = One::one();
self.factorize(ps)
.map(|(base, exp)| {
let denom = base.clone() - one.clone();
(num_traits::pow(base, (exp as usize) + 1) - one.clone()) / denom
})
.fold(num_traits::one::<Self>(), |acc, n| acc * n)
}
/// Calculates the number of proper positive divisors.
#[inline]
fn num_of_proper_divisor(&self, ps: &PrimeSet) -> u64 {
self.num_of_divisor(ps) - 1
}
/// Caluculates the sum of all positive divisors.
#[inline]
fn sum_of_proper_divisor(&self, ps: &PrimeSet) -> Self {
self.sum_of_divisor(ps) - self.clone()
}
}
macro_rules! trait_impl_unsigned {
($($t:ty)*) => ($(
impl Factorize for $t {
#[inline]
fn factorize(&self, ps: &PrimeSet) -> Factors<$t> {
Factors { num: *self, iter: ps.iter() }
}
}
)*)
}
macro_rules! trait_impl_signed {
($($t:ty)*) => ($(
impl Factorize for $t {
#[inline]
fn factorize(&self, ps: &PrimeSet) -> Factors<$t> {
if *self < 0 {
Factors { num: -*self, iter: ps.iter() }
} else {
Factors { num: *self, iter: ps.iter() }
}
}
}
)*)
}
trait_impl_unsigned!(usize u8 u16 u32 u64);
trait_impl_signed!(isize i8 i16 i32 i64);
/// Factors iterator.
pub struct Factors<T> {
num: T,
iter: Nums,
}
impl<T: Integer + FromPrimitive + Clone> Iterator for Factors<T> {
type Item = Factor<T>;
#[inline]
fn next(&mut self) -> Option<Factor<T>> {
if self.num <= One::one() {
return None;
}
while let Some(p) = self.iter.next() {
let p: T = FromPrimitive::from_u64(p).unwrap();
if p.clone() * p.clone() > self.num {
let n = mem::replace(&mut self.num, One::one());
return Some((n, 1));
}
if self.num.is_multiple_of(&p) {
let mut exp = 1;
self.num = self.num.clone() / p.clone();
while self.num.is_multiple_of(&p) {
exp += 1;
self.num = self.num.clone() / p.clone();
}
return Some((p, exp));
}
}
unreachable!()
}
}
/// Factorized number providing multiple or divide operation without causing
/// overflow.
///
/// # Example
///
/// ```
/// use prime::{Factorized, PrimeSet};
/// use std::iter;
///
/// // Calculates 40C20
/// let ps = PrimeSet::new();
/// let mut fac = Factorized::<u64>::new(&ps);
/// for n in 21..41 {
/// fac.mul_assign(n);
/// }
/// for n in 1..21 {
/// fac.div_assign(n);
/// }
/// assert_eq!(137846528820, fac.into_integer());
/// ```
pub struct Factorized<'a, T> {
ps: &'a PrimeSet,
map: HashMap<T, i32>,
}
impl<'a, T: Factorize + Eq + Hash> Factorized<'a, T> {
/// Creates new empty factorized number.
///
/// The empty factorized number represents `1`.
pub fn new(ps: &PrimeSet) -> Factorized<'_, T> {
Factorized {
ps,
map: HashMap::new(),
}
}
/// Creates a factorized number from an integer type.
pub fn from_integer(ps: &PrimeSet, n: T) -> Factorized<'_, T> {
Factorized {
ps,
map: n.factorize(ps).collect(),
}
}
/// Converts the factorized number into an integer type.
pub fn into_integer(self) -> T {
self.map
.into_iter()
.fold::<T, _>(One::one(), |prod, (base, exp)| {
if exp > 0 {
prod * num_traits::pow(base, exp as usize)
} else {
prod / num_traits::pow(base, (-exp) as usize)
}
})
}
/// Takes LCM (lowest common multiple) with given number and the factorized
/// number.
pub fn | lcm_with | identifier_name |
|
lib.rs | it.next());
/// ```
#[inline]
pub fn iter(&self) -> Nums {
Nums {
idx: 0,
data: self.data.clone(),
}
}
/// Return `true` if the given number is prime.
#[inline]
pub fn contains(&self, n: u64) -> bool {
self.data.borrow_mut().contains(n)
}
/// Calculates the combination of the number
#[inline]
pub fn combination(&self, n: u64, r: u64) -> u64 {
let mut fac = Factorized::<u64>::new(self);
for n in (r + 1)..(n + 1) {
fac.mul_assign(n);
}
for n in 1..(n - r + 1) {
fac.div_assign(n);
}
fac.into_integer()
}
fn from_inner(inner: PrimeInner) -> PrimeSet {
PrimeSet {
data: Rc::new(RefCell::new(inner)),
}
}
}
impl<'a> IntoIterator for &'a PrimeSet {
type Item = u64;
type IntoIter = Nums;
fn into_iter(self) -> Nums {
self.iter()
}
}
/// Prime number iterator
pub struct Nums {
idx: usize,
data: Rc<RefCell<PrimeInner>>,
}
impl Iterator for Nums {
type Item = u64;
#[inline]
fn next(&mut self) -> Option<u64> {
let p = self.data.borrow_mut().nth(self.idx);
self.idx += 1;
Some(p)
}
}
/// The base and exponent that represents factor.
pub type Factor<T> = (T, i32);
/// Numbers which can be factorized.
pub trait Factorize: Integer + FromPrimitive + Clone {
/// An iterator visiting all factors in ascending order.
fn factorize(&self, ps: &PrimeSet) -> Factors<Self>;
/// Calculates the number of all positive divisors.
fn num_of_divisor(&self, ps: &PrimeSet) -> u64 {
if self.is_zero() {
return Zero::zero();
}
self.factorize(ps)
.map(|(_base, exp)| (exp as u64) + 1)
.product()
}
/// Calculates the sum of all positive divisors.
fn sum_of_divisor(&self, ps: &PrimeSet) -> Self {
if self.is_zero() {
return Zero::zero();
}
let one: Self = One::one();
self.factorize(ps)
.map(|(base, exp)| {
let denom = base.clone() - one.clone();
(num_traits::pow(base, (exp as usize) + 1) - one.clone()) / denom
})
.fold(num_traits::one::<Self>(), |acc, n| acc * n)
}
/// Calculates the number of proper positive divisors.
#[inline]
fn num_of_proper_divisor(&self, ps: &PrimeSet) -> u64 {
self.num_of_divisor(ps) - 1
}
/// Caluculates the sum of all positive divisors.
#[inline]
fn sum_of_proper_divisor(&self, ps: &PrimeSet) -> Self {
self.sum_of_divisor(ps) - self.clone()
}
}
macro_rules! trait_impl_unsigned {
($($t:ty)*) => ($(
impl Factorize for $t {
#[inline]
fn factorize(&self, ps: &PrimeSet) -> Factors<$t> {
Factors { num: *self, iter: ps.iter() }
}
}
)*)
}
macro_rules! trait_impl_signed {
($($t:ty)*) => ($(
impl Factorize for $t {
#[inline]
fn factorize(&self, ps: &PrimeSet) -> Factors<$t> {
if *self < 0 {
Factors { num: -*self, iter: ps.iter() }
} else {
Factors { num: *self, iter: ps.iter() }
}
}
}
)*)
}
trait_impl_unsigned!(usize u8 u16 u32 u64);
trait_impl_signed!(isize i8 i16 i32 i64);
/// Factors iterator.
pub struct Factors<T> {
num: T,
iter: Nums,
}
impl<T: Integer + FromPrimitive + Clone> Iterator for Factors<T> {
type Item = Factor<T>;
#[inline]
fn next(&mut self) -> Option<Factor<T>> {
if self.num <= One::one() {
return None;
}
while let Some(p) = self.iter.next() {
let p: T = FromPrimitive::from_u64(p).unwrap();
if p.clone() * p.clone() > self.num {
let n = mem::replace(&mut self.num, One::one());
return Some((n, 1));
}
if self.num.is_multiple_of(&p) {
let mut exp = 1;
self.num = self.num.clone() / p.clone();
while self.num.is_multiple_of(&p) {
exp += 1;
self.num = self.num.clone() / p.clone();
}
return Some((p, exp));
}
}
unreachable!()
}
}
/// Factorized number providing multiple or divide operation without causing
/// overflow.
///
/// # Example
///
/// ```
/// use prime::{Factorized, PrimeSet};
/// use std::iter;
///
/// // Calculates 40C20
/// let ps = PrimeSet::new();
/// let mut fac = Factorized::<u64>::new(&ps);
/// for n in 21..41 {
/// fac.mul_assign(n);
/// }
/// for n in 1..21 {
/// fac.div_assign(n);
/// }
/// assert_eq!(137846528820, fac.into_integer());
/// ```
pub struct Factorized<'a, T> {
ps: &'a PrimeSet,
map: HashMap<T, i32>,
}
impl<'a, T: Factorize + Eq + Hash> Factorized<'a, T> {
/// Creates new empty factorized number.
///
/// The empty factorized number represents `1`.
pub fn new(ps: &PrimeSet) -> Factorized<'_, T> {
Factorized {
ps,
map: HashMap::new(),
}
}
/// Creates a factorized number from an integer type.
pub fn from_integer(ps: &PrimeSet, n: T) -> Factorized<'_, T> {
Factorized {
ps,
map: n.factorize(ps).collect(),
}
}
/// Converts the factorized number into an integer type.
pub fn into_integer(self) -> T {
self.map
.into_iter()
.fold::<T, _>(One::one(), |prod, (base, exp)| {
if exp > 0 {
prod * num_traits::pow(base, exp as usize)
} else {
prod / num_traits::pow(base, (-exp) as usize)
}
})
}
/// Takes LCM (lowest common multiple) with given number and the factorized
/// number.
pub fn lcm_with(&mut self, n: T) {
for (b, e) in n.factorize(self.ps) {
match self.map.entry(b) {
Vacant(entry) => {
let _ = entry.insert(e);
}
Occupied(entry) => {
let p = entry.into_mut();
*p = cmp::max(e, *p);
}
}
}
}
/// Multiples the factorized number and given number.
pub fn mul_assign(&mut self, n: T) {
for (b, e) in n.factorize(self.ps) {
match self.map.entry(b) {
Vacant(entry) => {
let _ = entry.insert(e);
}
Occupied(entry) => {
*entry.into_mut() += e;
}
}
}
}
/// Divides the factorized number by given number.
pub fn div_assign(&mut self, n: T) {
for (b, e) in n.factorize(self.ps) {
match self.map.entry(b) {
Vacant(entry) => {
let _ = entry.insert(-e);
}
Occupied(entry) => {
*entry.into_mut() -= e;
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::{Factor, Factorize, PrimeSet};
#[test]
fn iter() {
let p1 = PrimeSet::new_empty();
assert_eq!(
super::SMALL_PRIMES,
&p1.iter()
.take(super::SMALL_PRIMES.len())
.collect::<Vec<_>>()[..]
)
}
#[test]
fn contains() | {
let ps = PrimeSet::new();
assert!(!ps.contains(0));
assert!(!ps.contains(1));
assert!(ps.contains(2));
assert!(ps.contains(3));
assert!(!ps.contains(4));
assert!(ps.contains(5));
assert!(!ps.contains(6));
assert!(ps.contains(7));
assert!(!ps.contains(100));
} | identifier_body |
|
lib.rs | ps: &PrimeSet) -> Self {
if self.is_zero() {
return Zero::zero();
}
let one: Self = One::one();
self.factorize(ps)
.map(|(base, exp)| {
let denom = base.clone() - one.clone();
(num_traits::pow(base, (exp as usize) + 1) - one.clone()) / denom
})
.fold(num_traits::one::<Self>(), |acc, n| acc * n)
}
/// Calculates the number of proper positive divisors.
#[inline]
fn num_of_proper_divisor(&self, ps: &PrimeSet) -> u64 {
self.num_of_divisor(ps) - 1
}
/// Caluculates the sum of all positive divisors.
#[inline]
fn sum_of_proper_divisor(&self, ps: &PrimeSet) -> Self {
self.sum_of_divisor(ps) - self.clone()
}
}
macro_rules! trait_impl_unsigned {
($($t:ty)*) => ($(
impl Factorize for $t {
#[inline]
fn factorize(&self, ps: &PrimeSet) -> Factors<$t> {
Factors { num: *self, iter: ps.iter() }
}
}
)*)
}
macro_rules! trait_impl_signed {
($($t:ty)*) => ($(
impl Factorize for $t {
#[inline]
fn factorize(&self, ps: &PrimeSet) -> Factors<$t> {
if *self < 0 {
Factors { num: -*self, iter: ps.iter() }
} else {
Factors { num: *self, iter: ps.iter() }
}
}
}
)*)
}
trait_impl_unsigned!(usize u8 u16 u32 u64);
trait_impl_signed!(isize i8 i16 i32 i64);
/// Factors iterator.
pub struct Factors<T> {
num: T,
iter: Nums,
}
impl<T: Integer + FromPrimitive + Clone> Iterator for Factors<T> {
type Item = Factor<T>;
#[inline]
fn next(&mut self) -> Option<Factor<T>> {
if self.num <= One::one() {
return None;
}
while let Some(p) = self.iter.next() {
let p: T = FromPrimitive::from_u64(p).unwrap();
if p.clone() * p.clone() > self.num {
let n = mem::replace(&mut self.num, One::one());
return Some((n, 1));
}
if self.num.is_multiple_of(&p) {
let mut exp = 1;
self.num = self.num.clone() / p.clone();
while self.num.is_multiple_of(&p) {
exp += 1;
self.num = self.num.clone() / p.clone();
}
return Some((p, exp));
}
}
unreachable!()
}
}
/// Factorized number providing multiple or divide operation without causing
/// overflow.
///
/// # Example
///
/// ```
/// use prime::{Factorized, PrimeSet};
/// use std::iter;
///
/// // Calculates 40C20
/// let ps = PrimeSet::new();
/// let mut fac = Factorized::<u64>::new(&ps);
/// for n in 21..41 {
/// fac.mul_assign(n);
/// }
/// for n in 1..21 {
/// fac.div_assign(n);
/// }
/// assert_eq!(137846528820, fac.into_integer());
/// ```
pub struct Factorized<'a, T> {
ps: &'a PrimeSet,
map: HashMap<T, i32>,
}
impl<'a, T: Factorize + Eq + Hash> Factorized<'a, T> {
/// Creates new empty factorized number.
///
/// The empty factorized number represents `1`.
pub fn new(ps: &PrimeSet) -> Factorized<'_, T> {
Factorized {
ps,
map: HashMap::new(),
}
}
/// Creates a factorized number from an integer type.
pub fn from_integer(ps: &PrimeSet, n: T) -> Factorized<'_, T> {
Factorized {
ps,
map: n.factorize(ps).collect(),
}
}
/// Converts the factorized number into an integer type.
pub fn into_integer(self) -> T {
self.map
.into_iter()
.fold::<T, _>(One::one(), |prod, (base, exp)| {
if exp > 0 {
prod * num_traits::pow(base, exp as usize)
} else {
prod / num_traits::pow(base, (-exp) as usize)
}
})
}
/// Takes LCM (lowest common multiple) with given number and the factorized
/// number.
pub fn lcm_with(&mut self, n: T) {
for (b, e) in n.factorize(self.ps) {
match self.map.entry(b) {
Vacant(entry) => {
let _ = entry.insert(e);
}
Occupied(entry) => {
let p = entry.into_mut();
*p = cmp::max(e, *p);
}
}
}
}
/// Multiples the factorized number and given number.
pub fn mul_assign(&mut self, n: T) {
for (b, e) in n.factorize(self.ps) {
match self.map.entry(b) {
Vacant(entry) => {
let _ = entry.insert(e);
}
Occupied(entry) => {
*entry.into_mut() += e;
}
}
}
}
/// Divides the factorized number by given number.
pub fn div_assign(&mut self, n: T) {
for (b, e) in n.factorize(self.ps) {
match self.map.entry(b) {
Vacant(entry) => {
let _ = entry.insert(-e);
}
Occupied(entry) => {
*entry.into_mut() -= e;
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::{Factor, Factorize, PrimeSet};
#[test]
fn iter() {
let p1 = PrimeSet::new_empty();
assert_eq!(
super::SMALL_PRIMES,
&p1.iter()
.take(super::SMALL_PRIMES.len())
.collect::<Vec<_>>()[..]
)
}
#[test]
fn contains() {
let ps = PrimeSet::new();
assert!(!ps.contains(0));
assert!(!ps.contains(1));
assert!(ps.contains(2));
assert!(ps.contains(3));
assert!(!ps.contains(4));
assert!(ps.contains(5));
assert!(!ps.contains(6));
assert!(ps.contains(7));
assert!(!ps.contains(100));
}
#[test]
fn multi_iter() {
let ps = PrimeSet::new();
for (p1, p2) in ps.iter().zip(ps.iter()).take(500) {
assert_eq!(p1, p2);
}
}
#[test]
fn clone_clones_data() {
let p1 = PrimeSet::new_empty();
let p2 = p1.clone();
let _ = p1.nth(5000);
let l1 = p1.data.borrow().data.len();
let l2 = p2.data.borrow().data.len();
assert_eq!(l1, l2);
}
#[test]
fn factorize() {
fn check(n: u32, fs: &[Factor<u32>]) {
let ps = PrimeSet::new();
assert_eq!(fs, &n.factorize(&ps).collect::<Vec<_>>()[..]);
}
check(0, &[]);
check(1, &[]);
check(2, &[(2, 1)]);
check(3, &[(3, 1)]);
check(4, &[(2, 2)]);
check(5, &[(5, 1)]);
check(6, &[(2, 1), (3, 1)]);
check(7, &[(7, 1)]);
check(8, &[(2, 3)]);
check(9, &[(3, 2)]);
check(10, &[(2, 1), (5, 1)]);
check(8 * 27, &[(2, 3), (3, 3)]);
check(97, &[(97, 1)]);
check(97 * 41, &[(41, 1), (97, 1)]);
}
#[test]
fn num_of_divisor() {
let pairs = &[
(0, 0),
(1, 1),
(2, 2),
(3, 2),
(4, 3),
(5, 2),
(6, 4),
(7, 2),
(8, 4),
(9, 3),
(10, 4),
(11, 2),
(12, 6), | (24, 8),
(36, 9), | random_line_split |
|
wavetable.js | PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
var kDefaultNumberOfResampleRanges = 11;
function | (name, context) {
this.name = name;
this.context = context;
this.sampleRate = context.sampleRate;
this.url = "wave-tables/" + this.name;
this.waveTableSize = 4096; // hard-coded for now
this.buffer = 0;
this.numberOfResampleRanges = kDefaultNumberOfResampleRanges;
}
WaveTable.prototype.getWaveDataForPitch = function(pitchFrequency) {
var nyquist = 0.5 * this.sampleRate;
var lowestNumPartials = this.getNumberOfPartialsForRange(0);
var lowestFundamental = nyquist / lowestNumPartials;
// Find out pitch range
var ratio = pitchFrequency / lowestFundamental;
var pitchRange = ratio == 0.0 ? 0 : Math.floor(Math.log(ratio) / Math.LN2);
if (pitchRange < 0)
pitchRange = 0;
// Too bad, we'll alias if pitch is greater than around 5KHz :)
if (pitchRange >= this.numberOfResampleRanges)
pitchRange = this.numberOfResampleRanges - 1;
return this.buffers[pitchRange];
}
WaveTable.prototype.getNumberOfPartialsForRange = function(j) {
// goes from 1024 -> 4 @ 44.1KHz (and do same for 48KHz)
// goes from 2048 -> 8 @ 96KHz
var npartials = Math.pow(2, 1 + this.numberOfResampleRanges - j);
if (this.getSampleRate() > 48000.0)
npartials *= 2; // high sample rate allows more harmonics at given fundamental
return npartials;
}
WaveTable.prototype.getWaveTableSize = function() {
return this.waveTableSize;
}
WaveTable.prototype.getSampleRate = function() {
return this.sampleRate;
}
WaveTable.prototype.getRateScale = function() {
return this.getWaveTableSize() / this.getSampleRate();
}
WaveTable.prototype.getNumberOfResampleRanges = function() {
this.numberOfResampleRanges;
}
WaveTable.prototype.getName = function() {
return this.name;
}
WaveTable.prototype.load = function(callback) {
var request = new XMLHttpRequest();
request.open("GET", this.url, true);
var wave = this;
request.onload = function() {
// Get the frequency-domain waveform data.
var f = eval('(' + request.responseText + ')');
// Copy into more efficient Float32Arrays.
var n = f.real.length;
frequencyData = { "real": new Float32Array(n), "imag": new Float32Array(n) };
wave.frequencyData = frequencyData;
for (var i = 0; i < n; ++i) {
frequencyData.real[i] = f.real[i];
frequencyData.imag[i] = f.imag[i];
}
wave.createBuffers();
if (callback)
callback(wave);
};
request.onerror = function() {
alert("error loading: " + wave.url);
};
request.send();
}
WaveTable.prototype.print = function() {
var f = this.frequencyData;
var info = document.getElementById("info");
var s = "";
for (var i = 0; i < 2048; ++i) {
s += "{" + f.real[i] + ", " + f.imag[i] + "}, <br>";
}
info.innerHTML = s;
}
WaveTable.prototype.printBuffer = function(buffer) {
var info = document.getElementById("info");
var s = "";
for (var i = 0; i < 4096; ++i) {
s += buffer[i] + "<br>";
}
info.innerHTML = s;
}
// WaveTable.prototype.createBuffers = function() {
// var f = this.frequencyData;
//
// var n = 4096;
//
// var fft = new FFT(n, 44100);
//
// // Copy from loaded frequency data and scale.
// for (var i = 0; i < n / 2; ++i) {
// fft.real[i] = 4096 * f.real[i];
// fft.imag[i] = 4096 * f.imag[i];
// }
//
// // Now do inverse FFT
// this.data = fft.inverse();
// var data = this.data;
//
// this.buffer = context.createBuffer(1, data.length, 44100);
//
// // Copy data to the buffer.
// var p = this.buffer.getChannelData(0);
// for (var i = 0; i < data.length; ++i) {
// p[i] = data[i];
// }
// }
// Convert into time-domain wave tables.
// We actually create several of them for non-aliasing playback at different playback rates.
WaveTable.prototype.createBuffers = function() {
// resample ranges
//
// let's divide up versions of our waves based on the maximum fundamental frequency we're
// resampling at. Let's use fundamental frequencies based on dividing Nyquist by powers of two.
// For example for 44.1KHz sample-rate we have:
//
// ranges
// ----------------------------------
// 21Hz, 43Hz, 86Hz, 172Hz, 344Hz, 689Hz, 1378Hz, 2756Hz, 5512Hz, 11025Hz, 22050Hz <-- 44.1KHz
// 23Hz, 47Hz, 94Hz, 187Hz, 375Hz, 750Hz, 1500Hz, 3000Hz, 6000Hz, 12000Hz, 24000Hz, 48000Hz <-- 96KHz
//
// and number of partials:
//
// 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1
// 2048, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1
//
// But it's probably OK if we skip the very highest fundamental frequencies and only
// go up to 5512Hz, so we have a total of 9 resample ranges
//
// 0 1 2 3 4 5 6 7 8
// The FFT size needs to be at least 2048 @ 44.1KHz and 4096 @ 96KHz
//
// So let's try to use FFT size of 4096 all the time and pull out the harmonics we want
//
this.buffers = new Array();
var finalScale = 1.0;
for (var j = 0; j < this.numberOfResampleRanges; ++j) {
var n = this.waveTableSize;
var frame = new FFT(n, this.sampleRate);
// Copy from loaded frequency data and scale.
var f = this.frequencyData;
var scale = n;
for (var i = 0; i < n / 2; ++i) {
frame.real[i] = scale * f.real[i];
frame.imag[i] = scale * f.imag[i];
}
var realP = frame.real;
var imagP = frame.imag;
// Find the starting bin where we should start clearing out
// (we need to clear out the highest frequencies to band-limit the waveform)
var fftSize = n;
var halfSize = fftSize / 2;
var npartials = this.getNumberOfPartialsForRange(j);
// Now, go through and cull out the aliasing harmonics...
for (var i = npartials + 1; i < halfSize; i++) {
realP[i] = 0.0 | WaveTable | identifier_name |
wavetable.js | PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
var kDefaultNumberOfResampleRanges = 11;
function WaveTable(name, context) |
WaveTable.prototype.getWaveDataForPitch = function(pitchFrequency) {
var nyquist = 0.5 * this.sampleRate;
var lowestNumPartials = this.getNumberOfPartialsForRange(0);
var lowestFundamental = nyquist / lowestNumPartials;
// Find out pitch range
var ratio = pitchFrequency / lowestFundamental;
var pitchRange = ratio == 0.0 ? 0 : Math.floor(Math.log(ratio) / Math.LN2);
if (pitchRange < 0)
pitchRange = 0;
// Too bad, we'll alias if pitch is greater than around 5KHz :)
if (pitchRange >= this.numberOfResampleRanges)
pitchRange = this.numberOfResampleRanges - 1;
return this.buffers[pitchRange];
}
WaveTable.prototype.getNumberOfPartialsForRange = function(j) {
// goes from 1024 -> 4 @ 44.1KHz (and do same for 48KHz)
// goes from 2048 -> 8 @ 96KHz
var npartials = Math.pow(2, 1 + this.numberOfResampleRanges - j);
if (this.getSampleRate() > 48000.0)
npartials *= 2; // high sample rate allows more harmonics at given fundamental
return npartials;
}
WaveTable.prototype.getWaveTableSize = function() {
return this.waveTableSize;
}
WaveTable.prototype.getSampleRate = function() {
return this.sampleRate;
}
WaveTable.prototype.getRateScale = function() {
return this.getWaveTableSize() / this.getSampleRate();
}
WaveTable.prototype.getNumberOfResampleRanges = function() {
this.numberOfResampleRanges;
}
WaveTable.prototype.getName = function() {
return this.name;
}
WaveTable.prototype.load = function(callback) {
var request = new XMLHttpRequest();
request.open("GET", this.url, true);
var wave = this;
request.onload = function() {
// Get the frequency-domain waveform data.
var f = eval('(' + request.responseText + ')');
// Copy into more efficient Float32Arrays.
var n = f.real.length;
frequencyData = { "real": new Float32Array(n), "imag": new Float32Array(n) };
wave.frequencyData = frequencyData;
for (var i = 0; i < n; ++i) {
frequencyData.real[i] = f.real[i];
frequencyData.imag[i] = f.imag[i];
}
wave.createBuffers();
if (callback)
callback(wave);
};
request.onerror = function() {
alert("error loading: " + wave.url);
};
request.send();
}
WaveTable.prototype.print = function() {
var f = this.frequencyData;
var info = document.getElementById("info");
var s = "";
for (var i = 0; i < 2048; ++i) {
s += "{" + f.real[i] + ", " + f.imag[i] + "}, <br>";
}
info.innerHTML = s;
}
WaveTable.prototype.printBuffer = function(buffer) {
var info = document.getElementById("info");
var s = "";
for (var i = 0; i < 4096; ++i) {
s += buffer[i] + "<br>";
}
info.innerHTML = s;
}
// WaveTable.prototype.createBuffers = function() {
// var f = this.frequencyData;
//
// var n = 4096;
//
// var fft = new FFT(n, 44100);
//
// // Copy from loaded frequency data and scale.
// for (var i = 0; i < n / 2; ++i) {
// fft.real[i] = 4096 * f.real[i];
// fft.imag[i] = 4096 * f.imag[i];
// }
//
// // Now do inverse FFT
// this.data = fft.inverse();
// var data = this.data;
//
// this.buffer = context.createBuffer(1, data.length, 44100);
//
// // Copy data to the buffer.
// var p = this.buffer.getChannelData(0);
// for (var i = 0; i < data.length; ++i) {
// p[i] = data[i];
// }
// }
// Convert into time-domain wave tables.
// We actually create several of them for non-aliasing playback at different playback rates.
WaveTable.prototype.createBuffers = function() {
// resample ranges
//
// let's divide up versions of our waves based on the maximum fundamental frequency we're
// resampling at. Let's use fundamental frequencies based on dividing Nyquist by powers of two.
// For example for 44.1KHz sample-rate we have:
//
// ranges
// ----------------------------------
// 21Hz, 43Hz, 86Hz, 172Hz, 344Hz, 689Hz, 1378Hz, 2756Hz, 5512Hz, 11025Hz, 22050Hz <-- 44.1KHz
// 23Hz, 47Hz, 94Hz, 187Hz, 375Hz, 750Hz, 1500Hz, 3000Hz, 6000Hz, 12000Hz, 24000Hz, 48000Hz <-- 96KHz
//
// and number of partials:
//
// 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1
// 2048, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1
//
// But it's probably OK if we skip the very highest fundamental frequencies and only
// go up to 5512Hz, so we have a total of 9 resample ranges
//
// 0 1 2 3 4 5 6 7 8
// The FFT size needs to be at least 2048 @ 44.1KHz and 4096 @ 96KHz
//
// So let's try to use FFT size of 4096 all the time and pull out the harmonics we want
//
this.buffers = new Array();
var finalScale = 1.0;
for (var j = 0; j < this.numberOfResampleRanges; ++j) {
var n = this.waveTableSize;
var frame = new FFT(n, this.sampleRate);
// Copy from loaded frequency data and scale.
var f = this.frequencyData;
var scale = n;
for (var i = 0; i < n / 2; ++i) {
frame.real[i] = scale * f.real[i];
frame.imag[i] = scale * f.imag[i];
}
var realP = frame.real;
var imagP = frame.imag;
// Find the starting bin where we should start clearing out
// (we need to clear out the highest frequencies to band-limit the waveform)
var fftSize = n;
var halfSize = fftSize / 2;
var npartials = this.getNumberOfPartialsForRange(j);
// Now, go through and cull out the aliasing harmonics...
for (var i = npartials + 1; i < halfSize; i++) {
realP[i] = 0. | {
this.name = name;
this.context = context;
this.sampleRate = context.sampleRate;
this.url = "wave-tables/" + this.name;
this.waveTableSize = 4096; // hard-coded for now
this.buffer = 0;
this.numberOfResampleRanges = kDefaultNumberOfResampleRanges;
} | identifier_body |
wavetable.js | PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
var kDefaultNumberOfResampleRanges = 11; | this.sampleRate = context.sampleRate;
this.url = "wave-tables/" + this.name;
this.waveTableSize = 4096; // hard-coded for now
this.buffer = 0;
this.numberOfResampleRanges = kDefaultNumberOfResampleRanges;
}
WaveTable.prototype.getWaveDataForPitch = function(pitchFrequency) {
var nyquist = 0.5 * this.sampleRate;
var lowestNumPartials = this.getNumberOfPartialsForRange(0);
var lowestFundamental = nyquist / lowestNumPartials;
// Find out pitch range
var ratio = pitchFrequency / lowestFundamental;
var pitchRange = ratio == 0.0 ? 0 : Math.floor(Math.log(ratio) / Math.LN2);
if (pitchRange < 0)
pitchRange = 0;
// Too bad, we'll alias if pitch is greater than around 5KHz :)
if (pitchRange >= this.numberOfResampleRanges)
pitchRange = this.numberOfResampleRanges - 1;
return this.buffers[pitchRange];
}
WaveTable.prototype.getNumberOfPartialsForRange = function(j) {
// goes from 1024 -> 4 @ 44.1KHz (and do same for 48KHz)
// goes from 2048 -> 8 @ 96KHz
var npartials = Math.pow(2, 1 + this.numberOfResampleRanges - j);
if (this.getSampleRate() > 48000.0)
npartials *= 2; // high sample rate allows more harmonics at given fundamental
return npartials;
}
WaveTable.prototype.getWaveTableSize = function() {
return this.waveTableSize;
}
WaveTable.prototype.getSampleRate = function() {
return this.sampleRate;
}
WaveTable.prototype.getRateScale = function() {
return this.getWaveTableSize() / this.getSampleRate();
}
WaveTable.prototype.getNumberOfResampleRanges = function() {
this.numberOfResampleRanges;
}
WaveTable.prototype.getName = function() {
return this.name;
}
WaveTable.prototype.load = function(callback) {
var request = new XMLHttpRequest();
request.open("GET", this.url, true);
var wave = this;
request.onload = function() {
// Get the frequency-domain waveform data.
var f = eval('(' + request.responseText + ')');
// Copy into more efficient Float32Arrays.
var n = f.real.length;
frequencyData = { "real": new Float32Array(n), "imag": new Float32Array(n) };
wave.frequencyData = frequencyData;
for (var i = 0; i < n; ++i) {
frequencyData.real[i] = f.real[i];
frequencyData.imag[i] = f.imag[i];
}
wave.createBuffers();
if (callback)
callback(wave);
};
request.onerror = function() {
alert("error loading: " + wave.url);
};
request.send();
}
WaveTable.prototype.print = function() {
var f = this.frequencyData;
var info = document.getElementById("info");
var s = "";
for (var i = 0; i < 2048; ++i) {
s += "{" + f.real[i] + ", " + f.imag[i] + "}, <br>";
}
info.innerHTML = s;
}
WaveTable.prototype.printBuffer = function(buffer) {
var info = document.getElementById("info");
var s = "";
for (var i = 0; i < 4096; ++i) {
s += buffer[i] + "<br>";
}
info.innerHTML = s;
}
// WaveTable.prototype.createBuffers = function() {
// var f = this.frequencyData;
//
// var n = 4096;
//
// var fft = new FFT(n, 44100);
//
// // Copy from loaded frequency data and scale.
// for (var i = 0; i < n / 2; ++i) {
// fft.real[i] = 4096 * f.real[i];
// fft.imag[i] = 4096 * f.imag[i];
// }
//
// // Now do inverse FFT
// this.data = fft.inverse();
// var data = this.data;
//
// this.buffer = context.createBuffer(1, data.length, 44100);
//
// // Copy data to the buffer.
// var p = this.buffer.getChannelData(0);
// for (var i = 0; i < data.length; ++i) {
// p[i] = data[i];
// }
// }
// Convert into time-domain wave tables.
// We actually create several of them for non-aliasing playback at different playback rates.
WaveTable.prototype.createBuffers = function() {
// resample ranges
//
// let's divide up versions of our waves based on the maximum fundamental frequency we're
// resampling at. Let's use fundamental frequencies based on dividing Nyquist by powers of two.
// For example for 44.1KHz sample-rate we have:
//
// ranges
// ----------------------------------
// 21Hz, 43Hz, 86Hz, 172Hz, 344Hz, 689Hz, 1378Hz, 2756Hz, 5512Hz, 11025Hz, 22050Hz <-- 44.1KHz
// 23Hz, 47Hz, 94Hz, 187Hz, 375Hz, 750Hz, 1500Hz, 3000Hz, 6000Hz, 12000Hz, 24000Hz, 48000Hz <-- 96KHz
//
// and number of partials:
//
// 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1
// 2048, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1
//
// But it's probably OK if we skip the very highest fundamental frequencies and only
// go up to 5512Hz, so we have a total of 9 resample ranges
//
// 0 1 2 3 4 5 6 7 8
// The FFT size needs to be at least 2048 @ 44.1KHz and 4096 @ 96KHz
//
// So let's try to use FFT size of 4096 all the time and pull out the harmonics we want
//
this.buffers = new Array();
var finalScale = 1.0;
for (var j = 0; j < this.numberOfResampleRanges; ++j) {
var n = this.waveTableSize;
var frame = new FFT(n, this.sampleRate);
// Copy from loaded frequency data and scale.
var f = this.frequencyData;
var scale = n;
for (var i = 0; i < n / 2; ++i) {
frame.real[i] = scale * f.real[i];
frame.imag[i] = scale * f.imag[i];
}
var realP = frame.real;
var imagP = frame.imag;
// Find the starting bin where we should start clearing out
// (we need to clear out the highest frequencies to band-limit the waveform)
var fftSize = n;
var halfSize = fftSize / 2;
var npartials = this.getNumberOfPartialsForRange(j);
// Now, go through and cull out the aliasing harmonics...
for (var i = npartials + 1; i < halfSize; i++) {
realP[i] = 0.0 |
function WaveTable(name, context) {
this.name = name;
this.context = context; | random_line_split |
grid.js | //TODO: encapsulate back to private function when card design is done
//typical sizes:
/*
portrait:
iPhone 4,5 375px
iPhone 6 320px
iPhone 6+ 414px
Galaxy S3 360px
landscape:
iPhone 4 480px
iPhone 5 568px
iPhone 6 667px (574px container)
iPhone 6+ 736px (574px container)
Galaxy S3 640px (574px container)
*/
const ContainerWidth = {
XS: 375,
SM: 574,
MD: 728,
LG: 938,
XL: 1148,
XXL: 1384
}
const Breakpoints = {
XS: 0, // Extra small screen / phone
SM: 544, // Small screen / phone
MD: 768, // Medium screen / tablet
LG: 992, // Large screen / desktop
XL: 1200, // Extra large screen / wide desktop
XXL: 1440, // Extra large screen / wide desktop
}
const COLUMNS = 14
//since media query defines smaller base font size in typography.scss we need to calculated gutters properly
const getGutter = containerOrBrowserWidth => (containerOrBrowserWidth > Breakpoints.SM ? 16 : 14)
export default {
ContainerWidth : ContainerWidth,
getFluidContainerWidth(browserWidth) | ,
getContainerWidth(browserWidth) {
//should match variables from bootstrap
if (browserWidth <= ContainerWidth.SM) {
return browserWidth //container becomes fluid for small size
} else if (browserWidth > ContainerWidth.SM && browserWidth < Breakpoints.MD) {
return ContainerWidth.SM
} else if (browserWidth >= Breakpoints.MD && browserWidth < Breakpoints.LG) {
return ContainerWidth.MD
} else if (browserWidth >= Breakpoints.LG && browserWidth < Breakpoints.XL) {
return ContainerWidth.LG
} else if (browserWidth >= Breakpoints.XL && browserWidth < Breakpoints.XXL) {
return ContainerWidth.XL
} else if (browserWidth >= Breakpoints.XXL) {
return ContainerWidth.XXL
}
},
init(containerWidth) {
return {
//returns width in px of Container's content area (width without paddings)
getColumnContentWidth({numberOfCols}) {
const oneColPercent = (100 / COLUMNS) / 100
const containerGutter = containerWidth >= Breakpoints.SM ? getGutter(containerWidth) : 0
return containerWidth * (oneColPercent * numberOfCols) - containerGutter
}
}
},
}
| {
return browserWidth - getGutter(browserWidth)
} | identifier_body |
grid.js | //TODO: encapsulate back to private function when card design is done
//typical sizes:
/*
portrait:
iPhone 4,5 375px
iPhone 6 320px
iPhone 6+ 414px
Galaxy S3 360px
landscape:
iPhone 4 480px
iPhone 5 568px
iPhone 6 667px (574px container)
iPhone 6+ 736px (574px container)
Galaxy S3 640px (574px container)
*/
const ContainerWidth = {
XS: 375,
SM: 574,
MD: 728,
LG: 938,
XL: 1148,
XXL: 1384
}
const Breakpoints = {
XS: 0, // Extra small screen / phone
SM: 544, // Small screen / phone
MD: 768, // Medium screen / tablet
LG: 992, // Large screen / desktop
XL: 1200, // Extra large screen / wide desktop
XXL: 1440, // Extra large screen / wide desktop
}
const COLUMNS = 14
//since media query defines smaller base font size in typography.scss we need to calculated gutters properly
const getGutter = containerOrBrowserWidth => (containerOrBrowserWidth > Breakpoints.SM ? 16 : 14)
export default {
ContainerWidth : ContainerWidth,
getFluidContainerWidth(browserWidth) {
return browserWidth - getGutter(browserWidth)
},
getContainerWidth(browserWidth) {
//should match variables from bootstrap
if (browserWidth <= ContainerWidth.SM) {
return browserWidth //container becomes fluid for small size
} else if (browserWidth > ContainerWidth.SM && browserWidth < Breakpoints.MD) {
return ContainerWidth.SM
} else if (browserWidth >= Breakpoints.MD && browserWidth < Breakpoints.LG) {
return ContainerWidth.MD
} else if (browserWidth >= Breakpoints.LG && browserWidth < Breakpoints.XL) {
return ContainerWidth.LG
} else if (browserWidth >= Breakpoints.XL && browserWidth < Breakpoints.XXL) {
return ContainerWidth.XL
} else if (browserWidth >= Breakpoints.XXL) {
return ContainerWidth.XXL
}
},
| (containerWidth) {
return {
//returns width in px of Container's content area (width without paddings)
getColumnContentWidth({numberOfCols}) {
const oneColPercent = (100 / COLUMNS) / 100
const containerGutter = containerWidth >= Breakpoints.SM ? getGutter(containerWidth) : 0
return containerWidth * (oneColPercent * numberOfCols) - containerGutter
}
}
},
}
| init | identifier_name |
grid.js | //TODO: encapsulate back to private function when card design is done
//typical sizes:
/*
portrait:
iPhone 4,5 375px
iPhone 6 320px
iPhone 6+ 414px
Galaxy S3 360px
landscape:
iPhone 4 480px
iPhone 5 568px
iPhone 6 667px (574px container)
iPhone 6+ 736px (574px container)
Galaxy S3 640px (574px container)
*/
const ContainerWidth = {
XS: 375,
SM: 574,
MD: 728,
LG: 938,
XL: 1148,
XXL: 1384
}
const Breakpoints = {
XS: 0, // Extra small screen / phone
SM: 544, // Small screen / phone
MD: 768, // Medium screen / tablet
LG: 992, // Large screen / desktop
XL: 1200, // Extra large screen / wide desktop
XXL: 1440, // Extra large screen / wide desktop
}
const COLUMNS = 14
//since media query defines smaller base font size in typography.scss we need to calculated gutters properly
const getGutter = containerOrBrowserWidth => (containerOrBrowserWidth > Breakpoints.SM ? 16 : 14)
export default {
ContainerWidth : ContainerWidth,
getFluidContainerWidth(browserWidth) {
return browserWidth - getGutter(browserWidth)
},
getContainerWidth(browserWidth) {
//should match variables from bootstrap
if (browserWidth <= ContainerWidth.SM) | else if (browserWidth > ContainerWidth.SM && browserWidth < Breakpoints.MD) {
return ContainerWidth.SM
} else if (browserWidth >= Breakpoints.MD && browserWidth < Breakpoints.LG) {
return ContainerWidth.MD
} else if (browserWidth >= Breakpoints.LG && browserWidth < Breakpoints.XL) {
return ContainerWidth.LG
} else if (browserWidth >= Breakpoints.XL && browserWidth < Breakpoints.XXL) {
return ContainerWidth.XL
} else if (browserWidth >= Breakpoints.XXL) {
return ContainerWidth.XXL
}
},
init(containerWidth) {
return {
//returns width in px of Container's content area (width without paddings)
getColumnContentWidth({numberOfCols}) {
const oneColPercent = (100 / COLUMNS) / 100
const containerGutter = containerWidth >= Breakpoints.SM ? getGutter(containerWidth) : 0
return containerWidth * (oneColPercent * numberOfCols) - containerGutter
}
}
},
}
| {
return browserWidth //container becomes fluid for small size
} | conditional_block |
grid.js | //TODO: encapsulate back to private function when card design is done
//typical sizes:
/*
portrait:
iPhone 4,5 375px
iPhone 6 320px
iPhone 6+ 414px
Galaxy S3 360px
landscape:
iPhone 4 480px
iPhone 5 568px
iPhone 6 667px (574px container)
iPhone 6+ 736px (574px container)
Galaxy S3 640px (574px container)
*/
const ContainerWidth = {
XS: 375,
SM: 574,
MD: 728,
LG: 938,
XL: 1148,
XXL: 1384
}
const Breakpoints = {
XS: 0, // Extra small screen / phone
SM: 544, // Small screen / phone
MD: 768, // Medium screen / tablet
LG: 992, // Large screen / desktop
XL: 1200, // Extra large screen / wide desktop
XXL: 1440, // Extra large screen / wide desktop
}
const COLUMNS = 14
//since media query defines smaller base font size in typography.scss we need to calculated gutters properly
const getGutter = containerOrBrowserWidth => (containerOrBrowserWidth > Breakpoints.SM ? 16 : 14)
export default {
ContainerWidth : ContainerWidth,
getFluidContainerWidth(browserWidth) {
return browserWidth - getGutter(browserWidth)
}, |
if (browserWidth <= ContainerWidth.SM) {
return browserWidth //container becomes fluid for small size
} else if (browserWidth > ContainerWidth.SM && browserWidth < Breakpoints.MD) {
return ContainerWidth.SM
} else if (browserWidth >= Breakpoints.MD && browserWidth < Breakpoints.LG) {
return ContainerWidth.MD
} else if (browserWidth >= Breakpoints.LG && browserWidth < Breakpoints.XL) {
return ContainerWidth.LG
} else if (browserWidth >= Breakpoints.XL && browserWidth < Breakpoints.XXL) {
return ContainerWidth.XL
} else if (browserWidth >= Breakpoints.XXL) {
return ContainerWidth.XXL
}
},
init(containerWidth) {
return {
//returns width in px of Container's content area (width without paddings)
getColumnContentWidth({numberOfCols}) {
const oneColPercent = (100 / COLUMNS) / 100
const containerGutter = containerWidth >= Breakpoints.SM ? getGutter(containerWidth) : 0
return containerWidth * (oneColPercent * numberOfCols) - containerGutter
}
}
},
} |
getContainerWidth(browserWidth) {
//should match variables from bootstrap
| random_line_split |
InfrastructureAugmenter.js | const fs = require('fs-promise');
const turf = require('turf');
const _ = require('underscore');
const complexify = require('geojson-tools').complexify;
class InfrastructureAugmenter {
constructor(callback) {
this.aggregatedData = null;
this.buildingsGeo = null;
this.landingsGeo = null;
this.landingsGeoById = null;
this.cablesGeo = null;
this.cablesGeoById = null;
this.oceanGeo = null;
this.loaded = false;
const p1 = fs.readFile('telegeography-data/aggregated-data.json', 'utf8');
p1.then(data => this.aggregatedData = JSON.parse(data));
const p2 = fs.readFile('telegeography-data/internetexchanges/buildings.geojson', 'utf8');
p2.then(data => this.buildingsGeo = JSON.parse(data));
const p3 = fs.readFile('maps/landingpoints.json', 'utf8');
p3.then(data => this.landingsGeo = JSON.parse(data));
const p4 = fs.readFile('maps/ocean.json', 'utf8');
p4.then(data => this.oceanGeo = JSON.parse(data));
const p5 = fs.readFile('maps/cable-data.json', 'utf8');
p5.then(data => this.cablesGeo = JSON.parse(data));
Promise.all([p1, p2, p3, p4, p5])
.then(() => {
this.loaded = true;
this.landingsGeoById = this._generateGeoById(this.landingsGeo, 'id');
this.cablesGeoById = this._generateGeoById(this.cablesGeo, 'cable_id');
callback(null);
}).catch(err => {
callback(err)
})
}
_generateGeoById(geoObj, propName) {
let geoById = {};
geoObj.features.forEach(feature => {
let prop = feature.properties[propName];
geoById[prop] = feature; // DANGER DANGER:
});
return geoById;
}
addInfrastructureData(hop, nextHop) {
var self = this; // gross
hop.infrastructure = {
exchanges: [],
landings: [],
cable: null
};
if (hop.geo && this.loaded && nextHop && nextHop.geo) {
let hopGeoJSON = {
type: "Feature",
properties: {},
geometry: {
type: "Point",
coordinates: [hop.geo.lon, hop.geo.lat]
}
};
// If there is an Autonymous System change
if (hop.geo.as !== nextHop.geo.as &&
hop.geo.as !== '*' && nextHop.geo.as !== '*') {
// console.log('AUTONYMOUS SYSTEM CHANGE')
// console.log(hop)
let radius = 25; // in kilometers
let nearby = [];
this.buildingsGeo.features.forEach(feature => {
let dist = turf.distance(hopGeoJSON, feature, 'kilometers');
/*if (dist <= radius)*/ nearby.push({
dist, feature, fromAsn: hop.geo.as, toAsn: nextHop.geo.as
});
});
if (nearby.length > 0) {
if (nearby.length > 1) nearby = _.sortBy(nearby, obj => obj.dist);
hop.infrastructure.exchanges.push(nearby[0]);
console.log("NEAREST EXCHANGE POINT IS " + nearby[0].dist + " MILES AWAY");
}
let asn = hop.geo.as.split(' ')[0].substring(2);
// console.log(`AS change detected for ${hop.ip}. ${hop.geo.as} -> ${nextHop.geo.as}`)
}
let nearby = [];
let points = [[hop.geo.lon, hop.geo.lat],[nextHop.geo.lon, nextHop.geo.lat]];
//console.log(`HOP: [${hop.geo.lat}, ${hop.geo.lon}] [${nextHop.geo.lat}, ${nextHop.geo.lon}]`)
if (this._crossesOcean(points)) {
let nextHopGeoJSON = {
type: "Feature",
properties: {},
geometry: {
type: "Point",
coordinates: [nextHop.geo.lon, nextHop.geo.lat]
}
};
let landingNearHop = [];
let landingNearNextHop = [];
this.landingsGeo.features.forEach((feature, i) => {
//console.log(feature);
//return;
landingNearHop.push({ dist: turf.distance(hopGeoJSON, feature, 'kilometers'),
feature: feature,
coords: feature.geometry.coordinates,
id: feature.properties.id,
cableId: feature.properties.cable_id});
landingNearNextHop.push({ dist: turf.distance(nextHopGeoJSON, feature, 'kilometers'),
feature: feature,
coords: feature.geometry.coordinates,
id: feature.properties.id,
cableId: feature.properties.cable_id});
});
landingNearHop = _.sortBy(landingNearHop, function(hop) { return hop.dist });
landingNearNextHop = _.sortBy(landingNearNextHop,function(hop) { return hop.dist });
let c = getCables()[0];
hop.infrastructure.landings.push(c.start); | hop.infrastructure.landings.push(c.end);
hop.infrastructure.cable = c.cable;
// console.log(`${c.cable.properties.name} START: ${c.distStart} END: ${c.distEnd} SUM: ${c.distSum}`);
// cables.forEach(c => {
// if (c) {
// console.log(`${c.cable.properties.name} START: ${c.distStart} END: ${c.distEnd} SUM: ${c.distSum}`);
// hop.infrastructure.landings.push(c.start);
// hop.infrastructure.landings.push(c.end);
// hop.infrastructure.cable = c.cable;
// } else {
// console.log('CABLE NOT FOUND');
// }
// });
function getCables() {
let cables = [];
// For each landing points near the hop
for (let i = 0; i < landingNearHop.length; i++) {
// get that landing point's id
let cableId = landingNearHop[i].feature.properties.cable_id;
// For each landing point that cable has
for (let k = 0; k < self.aggregatedData.cable[cableId].landing_points.length; k++) {
let landing = self.aggregatedData.cable[cableId].landing_points[k];
// For all landing points near the next hop
for (let l = 0; l < landingNearNextHop.length; l++) {
if (landingNearNextHop[l].feature.properties.id == landing.id &&
landingNearNextHop[l].feature.properties.id != landingNearHop[i].feature.properties.id) {
cables.push({
start: landingNearHop[i].feature,
end: landingNearNextHop[l].feature,
cable: self.cablesGeoById[cableId],
distSum: landingNearHop[i].dist + landingNearNextHop[l].dist,
distStart: landingNearHop[i].dist,
distEnd: landingNearNextHop[l].dist
});
}
}
}
}
return _.uniq(_.sortBy(cables, cable => cable.distSum), cable => cable.cable.properties.id);
}
function getCableIds(cables) {
let ids = [];
cables.forEach(({cable_id}) => ids.push(parseInt(cable_id)));
return ids;
}
}
}
}
_crossesOcean(points) {
let inside = false;
let numPointsMustBeInOcean = 2;
let numPointsInOcean = 0;
points = complexify(points, 150);
points.shift(); // first point is duplicated by complexify
points.shift(); // remove first point
points.pop(); // remove last point
if (points.length < numPointsMustBeInOcean) return false;
for (let i = 0; i < points.length; i++) {
//console.log(points[i]);
if (turf.inside(turf.point(points[i]), this.oceanGeo.features[0])) {
numPointsInOcean++;
if (numPointsInOcean == numPointsMustBeInOcean) {
inside = true;
break;
}
}
}
return inside;
}
}
module.exports = InfrastructureAugmenter; | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.