file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
lib.rs | pub result: String,
}
impl fmt::Display for LayerResult {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} | {}", self.layer, self.result)
}
}
/// A Transition is the LayerResult of running the command on the lower layer
/// and of running the command on the higher layer. No-op transitions are not recorded.
#[derive(Debug, Eq, Ord, PartialOrd, PartialEq)]
pub struct Transition {
pub before: Option<LayerResult>,
pub after: LayerResult,
}
impl fmt::Display for Transition {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match &self.before {
Some(be) => write!(f, "({} -> {})", be, self.after),
None => write!(f, "-> {}", self.after),
}
}
}
/// Starts the bisect operation. Calculates highest and lowest layer result and if they have
/// different outputs it starts a binary chop to figure out which layer(s) caused the change.
fn get_changes<T>(layers: Vec<Layer>, action: &T) -> Result<Vec<Transition>, Error>
where
T: ContainerAction + 'static,
{
let first_layer = layers.first().expect("no first layer");
let last_layer = layers.last().expect("no last layer");
let first_image_name: String = first_layer.image_name.clone();
let last_image_name = &last_layer.image_name;
let action_c = action.clone();
let left_handle = thread::spawn(move || action_c.try_container(&first_image_name));
let end = action.try_container(last_image_name);
let start = left_handle.join().expect("first layer execution error!");
if start == end {
return Ok(vec![Transition {
before: None,
after: LayerResult {
layer: last_layer.clone(),
result: start,
},
}]);
}
bisect(
Vec::from(&layers[1..layers.len() - 1]),
LayerResult {
layer: first_layer.clone(),
result: start,
},
LayerResult {
layer: last_layer.clone(),
result: end,
},
action,
)
}
fn bisect<T>(
history: Vec<Layer>,
start: LayerResult,
end: LayerResult,
action: &T,
) -> Result<Vec<Transition>, Error>
where
T: ContainerAction + 'static,
{
let size = history.len();
if size == 0 {
if start.result == end.result {
return Err(Error::new(std::io::ErrorKind::Other, ""));
}
return Ok(vec![Transition {
before: Some(start.clone()),
after: end.clone(),
}]);
}
let half = size / 2;
let mid_result = LayerResult {
layer: history[half].clone(),
result: action.try_container(&history[half].image_name),
};
if size == 1 {
let mut results = Vec::<Transition>::new();
if *start.result != mid_result.result {
results.push(Transition {
before: Some(start.clone()),
after: mid_result.clone(),
});
}
if mid_result.result != *end.result {
results.push(Transition {
before: Some(mid_result),
after: end.clone(),
});
}
return Ok(results);
}
if start.result == mid_result.result {
action.skip((mid_result.layer.height - start.layer.height) as u64);
return bisect(Vec::from(&history[half + 1..]), mid_result, end, action);
}
if mid_result.result == end.result {
action.skip((end.layer.height - mid_result.layer.height) as u64);
return bisect(Vec::from(&history[..half]), start, mid_result, action);
}
let clone_a = action.clone();
let clone_b = action.clone();
let mid_result_c = mid_result.clone();
let hist_a = Vec::from(&history[..half]);
let left_handle = thread::spawn(move || bisect(hist_a, start, mid_result, &clone_a));
let right_handle =
thread::spawn(move || bisect(Vec::from(&history[half + 1..]), mid_result_c, end, &clone_b));
let mut left_results: Vec<Transition> = left_handle
.join()
.expect("left")
.expect("left transition err");
let right_results: Vec<Transition> = right_handle
.join()
.expect("right")
.expect("right transition err");
left_results.extend(right_results); // These results are sorted later...
Ok(left_results)
}
trait ContainerAction: Clone + Send {
fn try_container(&self, container_id: &str) -> String;
fn skip(&self, count: u64) -> ();
}
#[derive(Clone)]
struct DockerContainer {
pb: Arc<ProgressBar>,
command_line: Vec<String>,
timeout_in_seconds: usize,
}
impl DockerContainer {
fn new(total: u64, command_line: Vec<String>, timeout_in_seconds: usize) -> DockerContainer {
let pb = Arc::new(ProgressBar::new(total));
DockerContainer {
pb,
command_line,
timeout_in_seconds,
}
}
}
struct Guard<'a> { buf: &'a mut Vec<u8>, len: usize }
impl<'a> Drop for Guard<'a> {
fn drop(&mut self) {
unsafe { self.buf.set_len(self.len); }
}
}
impl ContainerAction for DockerContainer {
fn try_container(&self, container_id: &str) -> String {
let docker: Docker = Docker::connect_with_defaults().expect("docker daemon running?");
let container_name: String = rand::thread_rng().gen_range(0., 1.3e4).to_string();
//Create container
let mut create = ContainerCreateOptions::new(&container_id);
let mut host_config = ContainerHostConfig::new();
host_config.auto_remove(false);
create.host_config(host_config);
let it = self.command_line.iter();
for command in it {
create.cmd(command.clone());
}
let container: CreateContainerResponse = docker
.create_container(Some(&container_name), &create)
.expect("couldn't create container");
let result = docker.start_container(&container.id);
if result.is_err() {
let err: dockworker::errors::Error = result.unwrap_err();
return format!("{}", err);
}
let log_options = ContainerLogOptions {
stdout: true,
stderr: true,
since: None,
timestamps: None,
tail: None,
follow: true,
};
let timeout = Duration::from_secs(self.timeout_in_seconds as u64);
let mut container_output = String::new();
let now = SystemTime::now();
let timeout_time = now + timeout;
let result = docker.log_container(&container_name, &log_options);
if let Ok(result) = result {
let mut r = result;
let reservation_size = 32;
let mut buf = Vec::<u8>::new();
{
let mut g = Guard { len: buf.len(), buf: &mut buf };
loop {
if g.len == g.buf.len() |
match r.read(&mut g.buf[g.len..]) {
Ok(0) => { break; }
Ok(n) => g.len += n,
Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
Err(_e) => { break; }
}
if SystemTime::now() > timeout_time {
break;
}
}
}
container_output = String::from_utf8_lossy(&buf).to_string();
}
self.pb.inc(1);
let _stop_result = docker.stop_container(&container.id, timeout);
container_output
}
fn skip(&self, count: u64) -> () {
self.pb.inc(count);
}
}
/// Struct to hold parameters.
pub struct BisectOptions {
pub timeout_in_seconds: usize,
pub trunc_size: usize,
}
/// Create containers based on layers and run command_line against them.
/// Result is the differences in std out and std err.
pub fn try_bisect(
histories: &Vec<ImageLayer>,
command_line: Vec<String>,
options: BisectOptions,
) -> Result<Vec<Transition>, Error> {
println!(
"\n{}\n\n{:?}\n",
"Command to apply to layers:".bold(),
&command_line
);
let create_and_try_container = DockerContainer::new(
histories.len() as u64,
command_line,
options.timeout_in_seconds,
);
println!("{}", "Skipped missing layers:".bold());
println!();
let mut layers = Vec::new();
for (index, event) in histories.iter().rev().enumerate() {
let mut created = event.created_by.clone();
created = truncate(&created, options.trunc_size).to_string();
match event.id.clone() {
Some(layer_name) => layers.push(Layer {
height: index,
image_name: layer_name,
creation_command: event.created_by.clone(),
}),
None => println!("{:<3}: {}.", index, truncate(&created, options.trunc_size)),
}
}
println!();
println!(
"{}",
"Bisecting found layers (running command on the layers) ==>\ | {
g.buf.resize(g.len + reservation_size, 0);
} | conditional_block |
lib.rs | ");
let first_image_name: String = first_layer.image_name.clone();
let last_image_name = &last_layer.image_name;
let action_c = action.clone();
let left_handle = thread::spawn(move || action_c.try_container(&first_image_name));
let end = action.try_container(last_image_name);
let start = left_handle.join().expect("first layer execution error!");
if start == end {
return Ok(vec![Transition {
before: None,
after: LayerResult {
layer: last_layer.clone(),
result: start,
},
}]);
}
bisect(
Vec::from(&layers[1..layers.len() - 1]),
LayerResult {
layer: first_layer.clone(),
result: start,
},
LayerResult {
layer: last_layer.clone(),
result: end,
},
action,
)
}
fn bisect<T>(
history: Vec<Layer>,
start: LayerResult,
end: LayerResult,
action: &T,
) -> Result<Vec<Transition>, Error>
where
T: ContainerAction + 'static,
{
let size = history.len();
if size == 0 {
if start.result == end.result {
return Err(Error::new(std::io::ErrorKind::Other, ""));
}
return Ok(vec![Transition {
before: Some(start.clone()),
after: end.clone(),
}]);
}
let half = size / 2;
let mid_result = LayerResult {
layer: history[half].clone(),
result: action.try_container(&history[half].image_name),
};
if size == 1 {
let mut results = Vec::<Transition>::new();
if *start.result != mid_result.result {
results.push(Transition {
before: Some(start.clone()),
after: mid_result.clone(),
});
}
if mid_result.result != *end.result {
results.push(Transition {
before: Some(mid_result),
after: end.clone(),
});
}
return Ok(results);
}
if start.result == mid_result.result {
action.skip((mid_result.layer.height - start.layer.height) as u64);
return bisect(Vec::from(&history[half + 1..]), mid_result, end, action);
}
if mid_result.result == end.result {
action.skip((end.layer.height - mid_result.layer.height) as u64);
return bisect(Vec::from(&history[..half]), start, mid_result, action);
}
let clone_a = action.clone();
let clone_b = action.clone();
let mid_result_c = mid_result.clone();
let hist_a = Vec::from(&history[..half]);
let left_handle = thread::spawn(move || bisect(hist_a, start, mid_result, &clone_a));
let right_handle =
thread::spawn(move || bisect(Vec::from(&history[half + 1..]), mid_result_c, end, &clone_b));
let mut left_results: Vec<Transition> = left_handle
.join()
.expect("left")
.expect("left transition err");
let right_results: Vec<Transition> = right_handle
.join()
.expect("right")
.expect("right transition err");
left_results.extend(right_results); // These results are sorted later...
Ok(left_results)
}
trait ContainerAction: Clone + Send {
fn try_container(&self, container_id: &str) -> String;
fn skip(&self, count: u64) -> ();
}
#[derive(Clone)]
struct DockerContainer {
pb: Arc<ProgressBar>,
command_line: Vec<String>,
timeout_in_seconds: usize,
}
impl DockerContainer {
fn new(total: u64, command_line: Vec<String>, timeout_in_seconds: usize) -> DockerContainer {
let pb = Arc::new(ProgressBar::new(total));
DockerContainer {
pb,
command_line,
timeout_in_seconds,
}
}
}
struct Guard<'a> { buf: &'a mut Vec<u8>, len: usize }
impl<'a> Drop for Guard<'a> {
fn drop(&mut self) {
unsafe { self.buf.set_len(self.len); }
}
}
impl ContainerAction for DockerContainer {
fn try_container(&self, container_id: &str) -> String {
let docker: Docker = Docker::connect_with_defaults().expect("docker daemon running?");
let container_name: String = rand::thread_rng().gen_range(0., 1.3e4).to_string();
//Create container
let mut create = ContainerCreateOptions::new(&container_id);
let mut host_config = ContainerHostConfig::new();
host_config.auto_remove(false);
create.host_config(host_config);
let it = self.command_line.iter();
for command in it {
create.cmd(command.clone());
}
let container: CreateContainerResponse = docker
.create_container(Some(&container_name), &create)
.expect("couldn't create container");
let result = docker.start_container(&container.id);
if result.is_err() {
let err: dockworker::errors::Error = result.unwrap_err();
return format!("{}", err);
}
let log_options = ContainerLogOptions {
stdout: true,
stderr: true,
since: None,
timestamps: None,
tail: None,
follow: true,
};
let timeout = Duration::from_secs(self.timeout_in_seconds as u64);
let mut container_output = String::new();
let now = SystemTime::now();
let timeout_time = now + timeout;
let result = docker.log_container(&container_name, &log_options);
if let Ok(result) = result {
let mut r = result;
let reservation_size = 32;
let mut buf = Vec::<u8>::new();
{
let mut g = Guard { len: buf.len(), buf: &mut buf };
loop {
if g.len == g.buf.len() {
g.buf.resize(g.len + reservation_size, 0);
}
match r.read(&mut g.buf[g.len..]) {
Ok(0) => { break; }
Ok(n) => g.len += n,
Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
Err(_e) => { break; }
}
if SystemTime::now() > timeout_time {
break;
}
}
}
container_output = String::from_utf8_lossy(&buf).to_string();
}
self.pb.inc(1);
let _stop_result = docker.stop_container(&container.id, timeout);
container_output
}
fn skip(&self, count: u64) -> () {
self.pb.inc(count);
}
}
/// Struct to hold parameters.
pub struct BisectOptions {
pub timeout_in_seconds: usize,
pub trunc_size: usize,
}
/// Create containers based on layers and run command_line against them.
/// Result is the differences in std out and std err.
pub fn try_bisect(
histories: &Vec<ImageLayer>,
command_line: Vec<String>,
options: BisectOptions,
) -> Result<Vec<Transition>, Error> {
println!(
"\n{}\n\n{:?}\n",
"Command to apply to layers:".bold(),
&command_line
);
let create_and_try_container = DockerContainer::new(
histories.len() as u64,
command_line,
options.timeout_in_seconds,
);
println!("{}", "Skipped missing layers:".bold());
println!();
let mut layers = Vec::new();
for (index, event) in histories.iter().rev().enumerate() {
let mut created = event.created_by.clone();
created = truncate(&created, options.trunc_size).to_string();
match event.id.clone() {
Some(layer_name) => layers.push(Layer {
height: index,
image_name: layer_name,
creation_command: event.created_by.clone(),
}),
None => println!("{:<3}: {}.", index, truncate(&created, options.trunc_size)),
}
}
println!();
println!(
"{}",
"Bisecting found layers (running command on the layers) ==>\n".bold()
);
if layers.len() < 2 {
println!();
eprintln!(
"{} layers found in cache - not enough layers to bisect.",
layers.len()
);
return Err(Error::new(
std::io::ErrorKind::Other,
"no cached layers found!",
));
}
let results = get_changes(layers, &create_and_try_container);
create_and_try_container.pb.finish_with_message("done");
results
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
#[derive(Clone)]
struct MapAction {
map: HashMap<String, String>,
}
impl MapAction {
fn new(from: Vec<usize>, to: Vec<&str>) -> Self {
let mut object = MapAction {
map: HashMap::new(),
};
for (f, t) in from.iter().zip(to.iter()) {
object.map.insert(f.to_string(), t.to_string());
}
object
}
}
impl ContainerAction for MapAction {
fn try_container(&self, container_id: &str) -> String {
let none = String::new();
let result: &String = self.map.get(container_id).unwrap_or(&none);
result.clone()
}
fn skip(&self, _count: u64) -> () | {} | identifier_body |
|
meanS1.py | full_matrices=False)
snew = np.clip(s - lambdap/beta, 0, None)
thetay = np.reshape(np.dot(u, np.dot(np.diag(snew), v)), pnew, order = 'F')
rho = rhohalf - alpha * beta * (thetax - thetay)
diff = np.sum(np.abs(thetax - thetay))
itr = itr + 1
#if(diag == True):
# print(diff)
return thetay, pX, pY, rho
#cancpt is the candicate of change points
#Kmax is the maximum number of segmentations
#U is the optimal information criterion for different number of change points without penalty term
#taumat is the location of change points
def dynProg(cancpt, Kmax, X, Y, pnew, m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter):
Nr = Kmax - 2
npt = cancpt.shape[0]
V=np.zeros((npt, npt))
V[:]=np.nan
for j1 in range(npt):
for j2 in range((j1+1), npt):
if (j1==0) or (j1==(npt-1)):
start = cancpt[j1]
else:
start = cancpt[j1]+1
end = cancpt[j2]+1
pX1 = X[start:end, :, :]
pY1 = Y[start:end, :]
thetal, pX1, pY1, rhol = prsm(pX1, pY1, pnew, pY1.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True)
error1 = pY1[:, 0] - np.matmul(pX1, thetal)
ssrl = np.matmul(error1.transpose(), error1)
V[j1, j2] = ssrl
Vt=V[0:(npt-1),1:npt]
nw=Vt.shape[0]
U = []#np.zeros(Kmax)
U.append(Vt[0,nw-1])
D = Vt[:,nw-1].copy()
Pos = np.zeros((nw, Nr))
Pos[:] = np.nan
Pos[nw-1,:] = nw
taumat = np.zeros((Nr, Nr))
taumat[:] = np.nan
for k in range(Nr):
for j in range(nw-1):
dist = Vt[j,j:(nw-1)] + D[(j+1):nw]
D[j] = np.min(dist)
Pos[j,0] = int(np.where(dist == np.min(dist))[0][0] + j) +1
if k > 0:
Pos[j,1:(k+1)] = Pos[int(Pos[j,0]),range(k)]
U.append(D[0])
taumat[k,range(k+1)] = Pos[0,range(k + 1)]
return taumat, U
def evaluate(Selected_Index,true_cpt_pos):
cpt_dist=np.zeros([len(true_cpt_pos)])
sel_dist=np.zeros([len(Selected_Index)])
for i in range(len(true_cpt_pos)):
dist=abs(Selected_Index-true_cpt_pos[i])
cpt_dist[i]=np.min(dist)
cpt_dist_max=np.max(cpt_dist)
for j in range(len(Selected_Index)):
dist=abs(Selected_Index[j]-true_cpt_pos)
sel_dist[j]=np.min(dist)
sel_dist_max=np.max(sel_dist)
return cpt_dist_max,sel_dist_max
###Simulation Start
Itermax=10
m=8
p=8
signal=0.5
R1=3
omega = np.zeros((p, p))
for j in range(0, p):
for i in range(0, p):
|
T1=100
T2=75
T3=150
T4=75
T5=100
nall=T1+T2+T3+T4+T5
true_cpt_pos=np.array([T1-1,T1+T2-1,T1+T2+T3-1,T1+T2+T3+T4-1])
cpt_num=np.zeros([Itermax])
d_under=np.zeros([Itermax])
d_over=np.zeros([Itermax])
MSIC=np.zeros([Itermax])
h_selected=np.zeros([Itermax])
np.random.seed(2019)
test_number=5
for Iter in range(Itermax):
print(Iter)
X1,Y1=simu(-1,m,p,R1,omega,T1,signal,0,1)
X2,Y2=simu(0,m,p,R1,omega,T2,signal,0,1)
X3,Y3=simu(1,m,p,R1,omega,T3,signal,0,1)
X4,Y4=simu(-1,m,p,R1,omega,T4,signal,0,1)
X5,Y5=simu(0,m,p,R1,omega,T5,signal,0,1)
Xall=np.vstack((X1, X2,X3,X4,X5))
Yall=np.vstack((Y1, Y2,Y3,Y4,Y5))
hlist=range(16*int((m/np.log(nall))**(0.5)),48*int((m/np.log(nall))**(0.5)),4*int((m/np.log(nall))**(0.5)))
hn=len(hlist)
MSE_test=np.zeros([hn])
test_sample=random.sample(range(0,nall),test_number)
for kk in range(test_number):
#X,X_test,Y,Y_test=train_test_split(Xall,Yall,test_size=0.002,random_state=0)
X=np.delete(Xall,test_sample[kk],axis=0)
Y=np.delete(Yall,test_sample[kk],axis=0)
X_test=Xall[test_sample[kk],:,:]
Y_test=Yall[test_sample[kk],:]
for jj in range(hn):
h=int(hlist[jj])
print(h)
pnew = X.shape[2]
n= X.shape[0]
S = np.zeros(n)
Xini = X.reshape(n * m, pnew, order = 'F')
Yini = Y.reshape(n * m, 1, order = 'F')
XXinv = np.matmul(Xini.transpose(), Xini)
XXinv = np.linalg.pinv(XXinv)
thetaini = np.matmul(XXinv, np.matmul(Xini.transpose(), Yini))[:, 0]
rhoini = np.zeros(pnew)
lambdap = np.sqrt((m + pnew/m)/h)* m**(-1) * 0.3
rhoini[:] = lambdap
maxiter = 1000
epstol = 1e-3
alpha = 0.9
beta = 1
for j in range(h+1, n - h):
pX1 = X[(j - h):(j + 1), :, :]
pY1 = Y[(j - h):(j + 1), :]
pX2 = X[(j + 1): (j + h + 1), :, :]
pY2 = Y[(j + 1): (j + h + 1), :]
pX = X[(j - h): (j + h + 1), :, :]
pY = Y[(j - h): (j + h + 1), :]
thetal, pX1, pY1, rhol = prsm(pX1, pY1, pnew, pY1.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True)
thetar, pX2, pY2, rhor = prsm(pX2, pY2, pnew, pY2.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True)
theta, pX, pY, rho = prsm(pX, pY, pnew, pY.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True)
error1 = pY1[:, 0] - np.matmul(pX1, thetal)
error2 = pY2[:, 0] - np.matmul(pX2, thetar)
error = pY[:, 0] - np.matmul(pX, theta)
ssrl = np.matmul(error1.transpose(), error1)/(h * m)
ssrr = np.matmul(error2.transpose(), error2)/(h * m)
ssr = np.matmul(error.transpose(), error)/(h * m)
S[j] = ssr - ssrl - ssrr
# print(j)
cpt = np.array([0])
for j | omega[i, j] = 0.5**(np.abs(i - j)) | conditional_block |
meanS1.py |
def prsm(pX, pY, pnew, nh, m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, diag):
pX = pX.reshape(nh * m, pnew, order = 'F')
pY = pY.reshape(nh * m, 1, order = 'F')
thetay = thetaini
rho = rhoini
itr = 0
diff = 1
while itr <= maxiter and diff > epstol:
Xinverse = np.linalg.inv((2 * np.matmul(pX.transpose(), pX)/(nh * m) + beta * np.eye(pnew)))
XY = beta * thetay + rho + (2 * np.matmul(pX.transpose(), pY)/(nh * m))[:, 0]
thetax = np.matmul(Xinverse, XY)
rhohalf = rho - alpha * beta * (thetax - thetay)
mtheta = (thetax - rhohalf).reshape(m, int(pnew/m), order = 'F')
u, s, v= np.linalg.svd(mtheta, full_matrices=False)
snew = np.clip(s - lambdap/beta, 0, None)
thetay = np.reshape(np.dot(u, np.dot(np.diag(snew), v)), pnew, order = 'F')
rho = rhohalf - alpha * beta * (thetax - thetay)
diff = np.sum(np.abs(thetax - thetay))
itr = itr + 1
#if(diag == True):
# print(diff)
return thetay, pX, pY, rho
#cancpt is the candicate of change points
#Kmax is the maximum number of segmentations
#U is the optimal information criterion for different number of change points without penalty term
#taumat is the location of change points
def dynProg(cancpt, Kmax, X, Y, pnew, m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter):
Nr = Kmax - 2
npt = cancpt.shape[0]
V=np.zeros((npt, npt))
V[:]=np.nan
for j1 in range(npt):
for j2 in range((j1+1), npt):
if (j1==0) or (j1==(npt-1)):
start = cancpt[j1]
else:
start = cancpt[j1]+1
end = cancpt[j2]+1
pX1 = X[start:end, :, :]
pY1 = Y[start:end, :]
thetal, pX1, pY1, rhol = prsm(pX1, pY1, pnew, pY1.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True)
error1 = pY1[:, 0] - np.matmul(pX1, thetal)
ssrl = np.matmul(error1.transpose(), error1)
V[j1, j2] = ssrl
Vt=V[0:(npt-1),1:npt]
nw=Vt.shape[0]
U = []#np.zeros(Kmax)
U.append(Vt[0,nw-1])
D = Vt[:,nw-1].copy()
Pos = np.zeros((nw, Nr))
Pos[:] = np.nan
Pos[nw-1,:] = nw
taumat = np.zeros((Nr, Nr))
taumat[:] = np.nan
for k in range(Nr):
for j in range(nw-1):
dist = Vt[j,j:(nw-1)] + D[(j+1):nw]
D[j] = np.min(dist)
Pos[j,0] = int(np.where(dist == np.min(dist))[0][0] + j) +1
if k > 0:
Pos[j,1:(k+1)] = Pos[int(Pos[j,0]),range(k)]
U.append(D[0])
taumat[k,range(k+1)] = Pos[0,range(k + 1)]
return taumat, U
def evaluate(Selected_Index,true_cpt_pos):
cpt_dist=np.zeros([len(true_cpt_pos)])
sel_dist=np.zeros([len(Selected_Index)])
for i in range(len(true_cpt_pos)):
dist=abs(Selected_Index-true_cpt_pos[i])
cpt_dist[i]=np.min(dist)
cpt_dist_max=np.max(cpt_dist)
for j in range(len(Selected_Index)):
dist=abs(Selected_Index[j]-true_cpt_pos)
sel_dist[j]=np.min(dist)
sel_dist_max=np.max(sel_dist)
return cpt_dist_max,sel_dist_max
###Simulation Start
Itermax=10
m=8
p=8
signal=0.5
R1=3
omega = np.zeros((p, p))
for j in range(0, p):
for i in range(0, p):
omega[i, j] = 0.5**(np.abs(i - j))
T1=100
T2=75
T3=150
T4=75
T5=100
nall=T1+T2+T3+T4+T5
true_cpt_pos=np.array([T1-1,T1+T2-1,T1+T2+T3-1,T1+T2+T3+T4-1])
cpt_num=np.zeros([Itermax])
d_under=np.zeros([Itermax])
d_over=np.zeros([Itermax])
MSIC=np.zeros([Itermax])
h_selected=np.zeros([Itermax])
np.random.seed(2019)
test_number=5
for Iter in range(Itermax):
print(Iter)
X1,Y1=simu(-1,m,p,R1,omega,T1,signal,0,1)
X2,Y2=simu(0,m,p,R1,omega,T2,signal,0,1)
X3,Y3=simu(1,m,p,R1,omega,T3,signal,0,1)
X4,Y4=simu(-1,m,p,R1,omega,T4,signal,0,1)
X5,Y5=simu(0,m,p,R1,omega,T5,signal,0,1)
Xall=np.vstack((X1, X2,X3,X4,X5))
Yall=np.vstack((Y1, Y2,Y3,Y4,Y5))
hlist=range(16*int((m/np.log(nall))**(0.5)),48*int((m/np.log(nall))**(0.5)),4*int((m/np.log(nall))**(0.5)))
hn=len(hlist)
MSE_test=np.zeros([hn])
test_sample=random.sample(range(0,nall),test_number)
for kk in range(test_number):
#X,X_test,Y,Y_test=train_test_split(Xall,Yall,test_size=0.002,random_state=0)
X=np.delete(Xall,test_sample[kk],axis=0)
Y=np.delete(Yall,test_sample[kk],axis=0)
X_test=Xall[test_sample[kk],:,:]
Y_test=Yall[test_sample[kk],:]
for jj in range(hn):
h=int(hlist[jj])
print(h)
pnew = X.shape[2]
n= X.shape[0]
S = np.zeros(n)
Xini = X.reshape(n * m, pnew, order = 'F')
Yini = Y.reshape(n * m, 1, order = 'F')
XXinv = np.matmul(Xini.transpose(), Xini)
XXinv = np.linalg.pinv(XXinv)
thetaini = np.matmul(XXinv, np.matmul(Xini.transpose(), Yini))[:, 0]
rhoini = np.zeros(pnew)
lambdap = np.sqrt((m + pnew/m)/h)* | Theta = np.random.normal(mu1,sigma,size=(m,p))
u, s, v= np.linalg.svd(Theta, full_matrices = False)
s[R:] = 0
smat = np.diag(s)
Theta = np.dot(u, np.dot(smat, v))
X= np.random.multivariate_normal(np.zeros(p), omega, m*T)
#X= np.random.multivariate_normal(np.zeros(m * p), omega, T)
X= np.reshape(X,[T,m,p])
Xnew = np.zeros([T, m, m *p])
for j in range(T):
for i in range(m):
em = np.zeros(m)
em[i] = 1
Xnew[j, i, :] = np.kron(X[j,i,:], np.transpose(em))
# Xnew is n, m, m*p
E=np.random.normal(mu,sigma,size=(T,m))
Y=np.zeros([T,m])
for j in range(T):
Y[j,:]=np.sum(np.multiply(Theta,X[j,:,:]),axis=1)+signal*E[j,:]
return Xnew, Y | identifier_body |
|
meanS1.py | (mu1,m,p,R,omega,T,signal,mu,sigma):
Theta = np.random.normal(mu1,sigma,size=(m,p))
u, s, v= np.linalg.svd(Theta, full_matrices = False)
s[R:] = 0
smat = np.diag(s)
Theta = np.dot(u, np.dot(smat, v))
X= np.random.multivariate_normal(np.zeros(p), omega, m*T)
#X= np.random.multivariate_normal(np.zeros(m * p), omega, T)
X= np.reshape(X,[T,m,p])
Xnew = np.zeros([T, m, m *p])
for j in range(T):
for i in range(m):
em = np.zeros(m)
em[i] = 1
Xnew[j, i, :] = np.kron(X[j,i,:], np.transpose(em))
# Xnew is n, m, m*p
E=np.random.normal(mu,sigma,size=(T,m))
Y=np.zeros([T,m])
for j in range(T):
Y[j,:]=np.sum(np.multiply(Theta,X[j,:,:]),axis=1)+signal*E[j,:]
return Xnew, Y
def prsm(pX, pY, pnew, nh, m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, diag):
pX = pX.reshape(nh * m, pnew, order = 'F')
pY = pY.reshape(nh * m, 1, order = 'F')
thetay = thetaini
rho = rhoini
itr = 0
diff = 1
while itr <= maxiter and diff > epstol:
Xinverse = np.linalg.inv((2 * np.matmul(pX.transpose(), pX)/(nh * m) + beta * np.eye(pnew)))
XY = beta * thetay + rho + (2 * np.matmul(pX.transpose(), pY)/(nh * m))[:, 0]
thetax = np.matmul(Xinverse, XY)
rhohalf = rho - alpha * beta * (thetax - thetay)
mtheta = (thetax - rhohalf).reshape(m, int(pnew/m), order = 'F')
u, s, v= np.linalg.svd(mtheta, full_matrices=False)
snew = np.clip(s - lambdap/beta, 0, None)
thetay = np.reshape(np.dot(u, np.dot(np.diag(snew), v)), pnew, order = 'F')
rho = rhohalf - alpha * beta * (thetax - thetay)
diff = np.sum(np.abs(thetax - thetay))
itr = itr + 1
#if(diag == True):
# print(diff)
return thetay, pX, pY, rho
#cancpt is the candicate of change points
#Kmax is the maximum number of segmentations
#U is the optimal information criterion for different number of change points without penalty term
#taumat is the location of change points
def dynProg(cancpt, Kmax, X, Y, pnew, m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter):
Nr = Kmax - 2
npt = cancpt.shape[0]
V=np.zeros((npt, npt))
V[:]=np.nan
for j1 in range(npt):
for j2 in range((j1+1), npt):
if (j1==0) or (j1==(npt-1)):
start = cancpt[j1]
else:
start = cancpt[j1]+1
end = cancpt[j2]+1
pX1 = X[start:end, :, :]
pY1 = Y[start:end, :]
thetal, pX1, pY1, rhol = prsm(pX1, pY1, pnew, pY1.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True)
error1 = pY1[:, 0] - np.matmul(pX1, thetal)
ssrl = np.matmul(error1.transpose(), error1)
V[j1, j2] = ssrl
Vt=V[0:(npt-1),1:npt]
nw=Vt.shape[0]
U = []#np.zeros(Kmax)
U.append(Vt[0,nw-1])
D = Vt[:,nw-1].copy()
Pos = np.zeros((nw, Nr))
Pos[:] = np.nan
Pos[nw-1,:] = nw
taumat = np.zeros((Nr, Nr))
taumat[:] = np.nan
for k in range(Nr):
for j in range(nw-1):
dist = Vt[j,j:(nw-1)] + D[(j+1):nw]
D[j] = np.min(dist)
Pos[j,0] = int(np.where(dist == np.min(dist))[0][0] + j) +1
if k > 0:
Pos[j,1:(k+1)] = Pos[int(Pos[j,0]),range(k)]
U.append(D[0])
taumat[k,range(k+1)] = Pos[0,range(k + 1)]
return taumat, U
def evaluate(Selected_Index,true_cpt_pos):
cpt_dist=np.zeros([len(true_cpt_pos)])
sel_dist=np.zeros([len(Selected_Index)])
for i in range(len(true_cpt_pos)):
dist=abs(Selected_Index-true_cpt_pos[i])
cpt_dist[i]=np.min(dist)
cpt_dist_max=np.max(cpt_dist)
for j in range(len(Selected_Index)):
dist=abs(Selected_Index[j]-true_cpt_pos)
sel_dist[j]=np.min(dist)
sel_dist_max=np.max(sel_dist)
return cpt_dist_max,sel_dist_max
###Simulation Start
Itermax=10
m=8
p=8
signal=0.5
R1=3
omega = np.zeros((p, p))
for j in range(0, p):
for i in range(0, p):
omega[i, j] = 0.5**(np.abs(i - j))
T1=100
T2=75
T3=150
T4=75
T5=100
nall=T1+T2+T3+T4+T5
true_cpt_pos=np.array([T1-1,T1+T2-1,T1+T2+T3-1,T1+T2+T3+T4-1])
cpt_num=np.zeros([Itermax])
d_under=np.zeros([Itermax])
d_over=np.zeros([Itermax])
MSIC=np.zeros([Itermax])
h_selected=np.zeros([Itermax])
np.random.seed(2019)
test_number=5
for Iter in range(Itermax):
print(Iter)
X1,Y1=simu(-1,m,p,R1,omega,T1,signal,0,1)
X2,Y2=simu(0,m,p,R1,omega,T2,signal,0,1)
X3,Y3=simu(1,m,p,R1,omega,T3,signal,0,1)
X4,Y4=simu(-1,m,p,R1,omega,T4,signal,0,1)
X5,Y5=simu(0,m,p,R1,omega,T5,signal,0,1)
Xall=np.vstack((X1, X2,X3,X4,X5))
Yall=np.vstack((Y1, Y2,Y3,Y4,Y5))
hlist=range(16*int((m/np.log(nall))**(0.5)),48*int((m/np.log(nall))**(0.5)),4*int((m/np.log(nall))**(0.5)))
hn=len(hlist)
MSE_test=np.zeros([hn])
test_sample=random.sample(range(0,nall),test_number)
for kk in range(test_number):
#X,X_test,Y,Y_test=train_test_split(Xall,Yall,test_size=0.002,random_state=0)
X=np.delete(Xall,test_sample[kk],axis=0)
Y=np.delete(Yall,test_sample[kk],axis=0)
X_test=Xall[test_sample[kk],:,:]
Y_test=Yall[test_sample[kk],:]
for jj in range(hn):
h=int(hlist[jj])
print(h)
pnew = X.shape[2]
n= X.shape[0]
S = np.zeros(n)
Xini = X.reshape(n * m, pnew, order = 'F')
Yini = Y.reshape(n * m, 1, order = 'F')
XXinv = np.matmul(Xini.transpose(), Xini)
XXinv = np.linalg.pinv(XXinv)
thetaini = np.matmul(XXinv, np.matmul(Xini.transpose(), Yini))[:, 0]
rhoini = np.zeros(p | simu | identifier_name |
|
meanS1.py | theta, full_matrices=False)
snew = np.clip(s - lambdap/beta, 0, None)
thetay = np.reshape(np.dot(u, np.dot(np.diag(snew), v)), pnew, order = 'F')
rho = rhohalf - alpha * beta * (thetax - thetay)
diff = np.sum(np.abs(thetax - thetay))
itr = itr + 1
#if(diag == True):
# print(diff)
return thetay, pX, pY, rho
#cancpt is the candicate of change points
#Kmax is the maximum number of segmentations
#U is the optimal information criterion for different number of change points without penalty term
#taumat is the location of change points
def dynProg(cancpt, Kmax, X, Y, pnew, m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter):
Nr = Kmax - 2
npt = cancpt.shape[0]
V=np.zeros((npt, npt))
V[:]=np.nan
for j1 in range(npt):
for j2 in range((j1+1), npt):
if (j1==0) or (j1==(npt-1)):
start = cancpt[j1]
else:
start = cancpt[j1]+1
end = cancpt[j2]+1
pX1 = X[start:end, :, :]
pY1 = Y[start:end, :]
thetal, pX1, pY1, rhol = prsm(pX1, pY1, pnew, pY1.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True)
error1 = pY1[:, 0] - np.matmul(pX1, thetal)
ssrl = np.matmul(error1.transpose(), error1)
V[j1, j2] = ssrl
Vt=V[0:(npt-1),1:npt]
nw=Vt.shape[0]
U = []#np.zeros(Kmax)
U.append(Vt[0,nw-1])
D = Vt[:,nw-1].copy()
Pos = np.zeros((nw, Nr))
Pos[:] = np.nan
Pos[nw-1,:] = nw
taumat = np.zeros((Nr, Nr)) | for k in range(Nr):
for j in range(nw-1):
dist = Vt[j,j:(nw-1)] + D[(j+1):nw]
D[j] = np.min(dist)
Pos[j,0] = int(np.where(dist == np.min(dist))[0][0] + j) +1
if k > 0:
Pos[j,1:(k+1)] = Pos[int(Pos[j,0]),range(k)]
U.append(D[0])
taumat[k,range(k+1)] = Pos[0,range(k + 1)]
return taumat, U
def evaluate(Selected_Index,true_cpt_pos):
cpt_dist=np.zeros([len(true_cpt_pos)])
sel_dist=np.zeros([len(Selected_Index)])
for i in range(len(true_cpt_pos)):
dist=abs(Selected_Index-true_cpt_pos[i])
cpt_dist[i]=np.min(dist)
cpt_dist_max=np.max(cpt_dist)
for j in range(len(Selected_Index)):
dist=abs(Selected_Index[j]-true_cpt_pos)
sel_dist[j]=np.min(dist)
sel_dist_max=np.max(sel_dist)
return cpt_dist_max,sel_dist_max
###Simulation Start
Itermax=10
m=8
p=8
signal=0.5
R1=3
omega = np.zeros((p, p))
for j in range(0, p):
for i in range(0, p):
omega[i, j] = 0.5**(np.abs(i - j))
T1=100
T2=75
T3=150
T4=75
T5=100
nall=T1+T2+T3+T4+T5
true_cpt_pos=np.array([T1-1,T1+T2-1,T1+T2+T3-1,T1+T2+T3+T4-1])
cpt_num=np.zeros([Itermax])
d_under=np.zeros([Itermax])
d_over=np.zeros([Itermax])
MSIC=np.zeros([Itermax])
h_selected=np.zeros([Itermax])
np.random.seed(2019)
test_number=5
for Iter in range(Itermax):
print(Iter)
X1,Y1=simu(-1,m,p,R1,omega,T1,signal,0,1)
X2,Y2=simu(0,m,p,R1,omega,T2,signal,0,1)
X3,Y3=simu(1,m,p,R1,omega,T3,signal,0,1)
X4,Y4=simu(-1,m,p,R1,omega,T4,signal,0,1)
X5,Y5=simu(0,m,p,R1,omega,T5,signal,0,1)
Xall=np.vstack((X1, X2,X3,X4,X5))
Yall=np.vstack((Y1, Y2,Y3,Y4,Y5))
hlist=range(16*int((m/np.log(nall))**(0.5)),48*int((m/np.log(nall))**(0.5)),4*int((m/np.log(nall))**(0.5)))
hn=len(hlist)
MSE_test=np.zeros([hn])
test_sample=random.sample(range(0,nall),test_number)
for kk in range(test_number):
#X,X_test,Y,Y_test=train_test_split(Xall,Yall,test_size=0.002,random_state=0)
X=np.delete(Xall,test_sample[kk],axis=0)
Y=np.delete(Yall,test_sample[kk],axis=0)
X_test=Xall[test_sample[kk],:,:]
Y_test=Yall[test_sample[kk],:]
for jj in range(hn):
h=int(hlist[jj])
print(h)
pnew = X.shape[2]
n= X.shape[0]
S = np.zeros(n)
Xini = X.reshape(n * m, pnew, order = 'F')
Yini = Y.reshape(n * m, 1, order = 'F')
XXinv = np.matmul(Xini.transpose(), Xini)
XXinv = np.linalg.pinv(XXinv)
thetaini = np.matmul(XXinv, np.matmul(Xini.transpose(), Yini))[:, 0]
rhoini = np.zeros(pnew)
lambdap = np.sqrt((m + pnew/m)/h)* m**(-1) * 0.3
rhoini[:] = lambdap
maxiter = 1000
epstol = 1e-3
alpha = 0.9
beta = 1
for j in range(h+1, n - h):
pX1 = X[(j - h):(j + 1), :, :]
pY1 = Y[(j - h):(j + 1), :]
pX2 = X[(j + 1): (j + h + 1), :, :]
pY2 = Y[(j + 1): (j + h + 1), :]
pX = X[(j - h): (j + h + 1), :, :]
pY = Y[(j - h): (j + h + 1), :]
thetal, pX1, pY1, rhol = prsm(pX1, pY1, pnew, pY1.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True)
thetar, pX2, pY2, rhor = prsm(pX2, pY2, pnew, pY2.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True)
theta, pX, pY, rho = prsm(pX, pY, pnew, pY.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True)
error1 = pY1[:, 0] - np.matmul(pX1, thetal)
error2 = pY2[:, 0] - np.matmul(pX2, thetar)
error = pY[:, 0] - np.matmul(pX, theta)
ssrl = np.matmul(error1.transpose(), error1)/(h * m)
ssrr = np.matmul(error2.transpose(), error2)/(h * m)
ssr = np.matmul(error.transpose(), error)/(h * m)
S[j] = ssr - ssrl - ssrr
# print(j)
cpt = np.array([0])
for j | taumat[:] = np.nan | random_line_split |
lib.rs | new();
/// let input = r.create_input(111);
/// let compute: react::InputCellID = r.create_compute(&[react::CellID::Input(input)], |_| 222).unwrap();
/// ```
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct ComputeCellID(usize);
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct CallbackID(usize);
pub type Callback<'reactor, T> = RefCell<Box<dyn 'reactor + FnMut(T)>>;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum CellID {
Input(InputCellID),
Compute(ComputeCellID),
}
#[derive(Debug, PartialEq)]
pub enum RemoveCallbackError {
NonexistentCell,
NonexistentCallback,
}
struct InputCell<T> {
clients: HashSet<ComputeCellID>,
value: T,
}
impl<T: Copy + Debug + PartialEq> InputCell<T> {
pub fn new(init: T) -> Self {
InputCell {
clients: HashSet::new(),
value: init,
}
}
}
struct ComputeCell<'r, T: Debug> {
fun: Box<dyn 'r + Fn(&[T]) -> T>,
deps: Vec<CellID>,
callbacks: HashMap<CallbackID, Callback<'r, T>>,
prev_val: Cell<Option<T>>,
next_cbid: usize, // increases monotonically; increments on adding a callback
clients: HashSet<ComputeCellID>,
}
impl<'r, T: Copy + Debug + PartialEq + 'r> ComputeCell<'r, T> {
pub fn new<F>(fun: F, deps: &[CellID]) -> Self
where
F: 'r + Fn(&[T]) -> T,
{
ComputeCell {
fun: Box::new(fun),
deps: deps.to_vec(),
callbacks: HashMap::new(),
prev_val: Cell::new(None),
next_cbid: 0,
clients: HashSet::new(),
}
}
pub fn call(&self, reactor: &Reactor<'r, T>) -> T {
let deps = self
.deps
.iter()
.map(|c| reactor.value(*c).unwrap())
.collect::<Vec<T>>();
let nv = (self.fun)(&deps);
let mut fire_callbacks = false;
if let Some(pv) = self.prev_val.get() {
if nv != pv {
self.prev_val.set(Some(nv));
fire_callbacks = true;
}
} else {
self.prev_val.set(Some(nv));
fire_callbacks = true;
}
if fire_callbacks {
for c in self.callbacks.values() {
(&mut *c.borrow_mut())(nv);
}
}
nv
}
}
#[derive(Default)]
pub struct Reactor<'r, T: Debug> {
input_cells: Vec<InputCell<T>>,
compute_cells: Vec<ComputeCell<'r, T>>,
}
// You are guaranteed that Reactor will only be tested against types that are Copy + PartialEq.
impl<'r, T: Copy + Debug + PartialEq + 'r> Reactor<'r, T> {
pub fn new() -> Self {
Reactor {
input_cells: Vec::new(),
compute_cells: Vec::new(),
}
}
// Creates an input cell with the specified initial value, returning its ID.
pub fn create_input(&mut self, initial: T) -> InputCellID {
let idx = self.input_cells.len();
let id = InputCellID(idx);
self.input_cells.push(InputCell::new(initial));
id
}
// Creates a compute cell with the specified dependencies and compute function.
// The compute function is expected to take in its arguments in the same order as specified in
// `dependencies`.
// You do not need to reject compute functions that expect more arguments than there are
// dependencies (how would you check for this, anyway?).
//
// If any dependency doesn't exist, returns an Err with that nonexistent dependency.
// (If multiple dependencies do not exist, exactly which one is returned is not defined and
// will not be tested)
//
// Notice that there is no way to *remove* a cell.
// This means that you may assume, without checking, that if the dependencies exist at creation
// time they will continue to exist as long as the Reactor exists.
pub fn create_compute<F>(
&mut self,
dependencies: &[CellID],
compute_func: F,
) -> Result<ComputeCellID, CellID>
where
F: 'r + Fn(&[T]) -> T,
{
let cidx = self.compute_cells.len();
let cid = ComputeCellID(cidx);
for id in dependencies.iter() {
match id {
CellID::Input(InputCellID(idx)) => {
if *idx >= self.input_cells.len() {
return Err(*id);
}
}
CellID::Compute(ComputeCellID(idx)) => {
if *idx >= self.compute_cells.len() {
return Err(*id);
}
}
}
}
// register as clients with all dependencies.
for id in dependencies.iter() {
match id {
CellID::Input(InputCellID(idx)) => {
let _ = self.input_cells[*idx].clients.insert(cid);
}
CellID::Compute(ComputeCellID(idx)) => {
let _ = self.compute_cells[*idx].clients.insert(cid);
}
}
}
let cell = ComputeCell::new(compute_func, dependencies);
cell.call(&self); // set the initial value
self.compute_cells.push(cell);
Ok(cid)
}
// Retrieves the current value of the cell, or None if the cell does not exist.
//
// You may wonder whether it is possible to implement `get(&self, id: CellID) -> Option<&Cell>`
// and have a `value(&self)` method on `Cell`.
//
// It turns out this introduces a significant amount of extra complexity to this exercise.
// We chose not to cover this here, since this exercise is probably enough work as-is.
pub fn value(&self, id: CellID) -> Option<T> {
match id {
CellID::Input(InputCellID(idx)) => self.input_cells.get(idx).map(|i| i.value),
CellID::Compute(ComputeCellID(idx)) => {
if let Some(cell) = self.compute_cells.get(idx) {
Some(cell.call(&self))
} else {
None
}
} | }
// Sets the value of the specified input cell.
//
// Returns false if the cell does not exist.
//
// Similarly, you may wonder about `get_mut(&mut self, id: CellID) -> Option<&mut Cell>`, with
// a `set_value(&mut self, new_value: T)` method on `Cell`.
//
// As before, that turned out to add too much extra complexity.
pub fn set_value(&mut self, id: InputCellID, new_value: T) -> bool {
let InputCellID(idx) = id;
if idx < self.input_cells.len() {
let old_value = self.input_cells[idx].value;
if old_value == new_value {
return true;
}
self.input_cells[idx].value = new_value;
let mut clients1 = self.input_cells[idx].clients.clone();
let mut clients2 = HashSet::new();
let mut done = false;
// Recursively iterate through all clients until we've converged on the
// the stable set of them. Does at least N extra checks, where N is
// the numer of ultimate clients.
while !done {
for client in clients1.iter() {
clients2.insert(client.clone());
let ComputeCellID(idx) = client;
let cell = &self.compute_cells[*idx];
// first find all the clients that will be called without us
clients2.extend(cell.clients.iter());
}
for client in clients2.iter() {
let ComputeCellID(idx) = client;
let cell = &self.compute_cells[*idx];
clients1.extend(cell.clients.iter());
}
done = clients1 == clients2;
}
// This has the potential to call more clients than needed, but ComputeCells
// cache their previous value and only invoke their callbacks on change,
// so client callbacks won't get invoked more than once.
//
// There's an implicit assumption here that each ComputeCell's function is
// cheap to run, which is probably not true in general. We could do a
// topological sort of the client graph to ensure we only call leaf nodes.
for client in clients1 {
let ComputeCellID(idx) = client;
let cell = &self.compute_cells[idx];
cell.call(&self);
}
// we have set a new value and called all clients, return true
true
} else {
// the new value was the same as the old value, return false
false
}
}
// Adds a callback to the specified compute cell.
//
// Returns the ID of the just-added callback, or None if the cell doesn't exist.
//
// Callbacks on input cells will not be tested.
//
// The semantics of callbacks (as will be tested):
// For a single set_value call, each compute cell's callbacks should each be called | } | random_line_split |
lib.rs | new();
/// let input = r.create_input(111);
/// let compute: react::InputCellID = r.create_compute(&[react::CellID::Input(input)], |_| 222).unwrap();
/// ```
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct ComputeCellID(usize);
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct CallbackID(usize);
pub type Callback<'reactor, T> = RefCell<Box<dyn 'reactor + FnMut(T)>>;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum CellID {
Input(InputCellID),
Compute(ComputeCellID),
}
#[derive(Debug, PartialEq)]
pub enum RemoveCallbackError {
NonexistentCell,
NonexistentCallback,
}
struct InputCell<T> {
clients: HashSet<ComputeCellID>,
value: T,
}
impl<T: Copy + Debug + PartialEq> InputCell<T> {
pub fn new(init: T) -> Self |
}
struct ComputeCell<'r, T: Debug> {
fun: Box<dyn 'r + Fn(&[T]) -> T>,
deps: Vec<CellID>,
callbacks: HashMap<CallbackID, Callback<'r, T>>,
prev_val: Cell<Option<T>>,
next_cbid: usize, // increases monotonically; increments on adding a callback
clients: HashSet<ComputeCellID>,
}
impl<'r, T: Copy + Debug + PartialEq + 'r> ComputeCell<'r, T> {
pub fn new<F>(fun: F, deps: &[CellID]) -> Self
where
F: 'r + Fn(&[T]) -> T,
{
ComputeCell {
fun: Box::new(fun),
deps: deps.to_vec(),
callbacks: HashMap::new(),
prev_val: Cell::new(None),
next_cbid: 0,
clients: HashSet::new(),
}
}
pub fn call(&self, reactor: &Reactor<'r, T>) -> T {
let deps = self
.deps
.iter()
.map(|c| reactor.value(*c).unwrap())
.collect::<Vec<T>>();
let nv = (self.fun)(&deps);
let mut fire_callbacks = false;
if let Some(pv) = self.prev_val.get() {
if nv != pv {
self.prev_val.set(Some(nv));
fire_callbacks = true;
}
} else {
self.prev_val.set(Some(nv));
fire_callbacks = true;
}
if fire_callbacks {
for c in self.callbacks.values() {
(&mut *c.borrow_mut())(nv);
}
}
nv
}
}
#[derive(Default)]
pub struct Reactor<'r, T: Debug> {
input_cells: Vec<InputCell<T>>,
compute_cells: Vec<ComputeCell<'r, T>>,
}
// You are guaranteed that Reactor will only be tested against types that are Copy + PartialEq.
impl<'r, T: Copy + Debug + PartialEq + 'r> Reactor<'r, T> {
pub fn new() -> Self {
Reactor {
input_cells: Vec::new(),
compute_cells: Vec::new(),
}
}
// Creates an input cell with the specified initial value, returning its ID.
pub fn create_input(&mut self, initial: T) -> InputCellID {
let idx = self.input_cells.len();
let id = InputCellID(idx);
self.input_cells.push(InputCell::new(initial));
id
}
// Creates a compute cell with the specified dependencies and compute function.
// The compute function is expected to take in its arguments in the same order as specified in
// `dependencies`.
// You do not need to reject compute functions that expect more arguments than there are
// dependencies (how would you check for this, anyway?).
//
// If any dependency doesn't exist, returns an Err with that nonexistent dependency.
// (If multiple dependencies do not exist, exactly which one is returned is not defined and
// will not be tested)
//
// Notice that there is no way to *remove* a cell.
// This means that you may assume, without checking, that if the dependencies exist at creation
// time they will continue to exist as long as the Reactor exists.
pub fn create_compute<F>(
&mut self,
dependencies: &[CellID],
compute_func: F,
) -> Result<ComputeCellID, CellID>
where
F: 'r + Fn(&[T]) -> T,
{
let cidx = self.compute_cells.len();
let cid = ComputeCellID(cidx);
for id in dependencies.iter() {
match id {
CellID::Input(InputCellID(idx)) => {
if *idx >= self.input_cells.len() {
return Err(*id);
}
}
CellID::Compute(ComputeCellID(idx)) => {
if *idx >= self.compute_cells.len() {
return Err(*id);
}
}
}
}
// register as clients with all dependencies.
for id in dependencies.iter() {
match id {
CellID::Input(InputCellID(idx)) => {
let _ = self.input_cells[*idx].clients.insert(cid);
}
CellID::Compute(ComputeCellID(idx)) => {
let _ = self.compute_cells[*idx].clients.insert(cid);
}
}
}
let cell = ComputeCell::new(compute_func, dependencies);
cell.call(&self); // set the initial value
self.compute_cells.push(cell);
Ok(cid)
}
// Retrieves the current value of the cell, or None if the cell does not exist.
//
// You may wonder whether it is possible to implement `get(&self, id: CellID) -> Option<&Cell>`
// and have a `value(&self)` method on `Cell`.
//
// It turns out this introduces a significant amount of extra complexity to this exercise.
// We chose not to cover this here, since this exercise is probably enough work as-is.
pub fn value(&self, id: CellID) -> Option<T> {
match id {
CellID::Input(InputCellID(idx)) => self.input_cells.get(idx).map(|i| i.value),
CellID::Compute(ComputeCellID(idx)) => {
if let Some(cell) = self.compute_cells.get(idx) {
Some(cell.call(&self))
} else {
None
}
}
}
}
// Sets the value of the specified input cell.
//
// Returns false if the cell does not exist.
//
// Similarly, you may wonder about `get_mut(&mut self, id: CellID) -> Option<&mut Cell>`, with
// a `set_value(&mut self, new_value: T)` method on `Cell`.
//
// As before, that turned out to add too much extra complexity.
pub fn set_value(&mut self, id: InputCellID, new_value: T) -> bool {
let InputCellID(idx) = id;
if idx < self.input_cells.len() {
let old_value = self.input_cells[idx].value;
if old_value == new_value {
return true;
}
self.input_cells[idx].value = new_value;
let mut clients1 = self.input_cells[idx].clients.clone();
let mut clients2 = HashSet::new();
let mut done = false;
// Recursively iterate through all clients until we've converged on the
// the stable set of them. Does at least N extra checks, where N is
// the numer of ultimate clients.
while !done {
for client in clients1.iter() {
clients2.insert(client.clone());
let ComputeCellID(idx) = client;
let cell = &self.compute_cells[*idx];
// first find all the clients that will be called without us
clients2.extend(cell.clients.iter());
}
for client in clients2.iter() {
let ComputeCellID(idx) = client;
let cell = &self.compute_cells[*idx];
clients1.extend(cell.clients.iter());
}
done = clients1 == clients2;
}
// This has the potential to call more clients than needed, but ComputeCells
// cache their previous value and only invoke their callbacks on change,
// so client callbacks won't get invoked more than once.
//
// There's an implicit assumption here that each ComputeCell's function is
// cheap to run, which is probably not true in general. We could do a
// topological sort of the client graph to ensure we only call leaf nodes.
for client in clients1 {
let ComputeCellID(idx) = client;
let cell = &self.compute_cells[idx];
cell.call(&self);
}
// we have set a new value and called all clients, return true
true
} else {
// the new value was the same as the old value, return false
false
}
}
// Adds a callback to the specified compute cell.
//
// Returns the ID of the just-added callback, or None if the cell doesn't exist.
//
// Callbacks on input cells will not be tested.
//
// The semantics of callbacks (as will be tested):
// For a single set_value call, each compute cell's callbacks should each | {
InputCell {
clients: HashSet::new(),
value: init,
}
} | identifier_body |
lib.rs | new();
/// let input = r.create_input(111);
/// let compute: react::InputCellID = r.create_compute(&[react::CellID::Input(input)], |_| 222).unwrap();
/// ```
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct ComputeCellID(usize);
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct CallbackID(usize);
pub type Callback<'reactor, T> = RefCell<Box<dyn 'reactor + FnMut(T)>>;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum CellID {
Input(InputCellID),
Compute(ComputeCellID),
}
#[derive(Debug, PartialEq)]
pub enum RemoveCallbackError {
NonexistentCell,
NonexistentCallback,
}
struct InputCell<T> {
clients: HashSet<ComputeCellID>,
value: T,
}
impl<T: Copy + Debug + PartialEq> InputCell<T> {
pub fn new(init: T) -> Self {
InputCell {
clients: HashSet::new(),
value: init,
}
}
}
struct ComputeCell<'r, T: Debug> {
fun: Box<dyn 'r + Fn(&[T]) -> T>,
deps: Vec<CellID>,
callbacks: HashMap<CallbackID, Callback<'r, T>>,
prev_val: Cell<Option<T>>,
next_cbid: usize, // increases monotonically; increments on adding a callback
clients: HashSet<ComputeCellID>,
}
impl<'r, T: Copy + Debug + PartialEq + 'r> ComputeCell<'r, T> {
pub fn new<F>(fun: F, deps: &[CellID]) -> Self
where
F: 'r + Fn(&[T]) -> T,
{
ComputeCell {
fun: Box::new(fun),
deps: deps.to_vec(),
callbacks: HashMap::new(),
prev_val: Cell::new(None),
next_cbid: 0,
clients: HashSet::new(),
}
}
pub fn call(&self, reactor: &Reactor<'r, T>) -> T {
let deps = self
.deps
.iter()
.map(|c| reactor.value(*c).unwrap())
.collect::<Vec<T>>();
let nv = (self.fun)(&deps);
let mut fire_callbacks = false;
if let Some(pv) = self.prev_val.get() {
if nv != pv {
self.prev_val.set(Some(nv));
fire_callbacks = true;
}
} else {
self.prev_val.set(Some(nv));
fire_callbacks = true;
}
if fire_callbacks {
for c in self.callbacks.values() {
(&mut *c.borrow_mut())(nv);
}
}
nv
}
}
#[derive(Default)]
pub struct Reactor<'r, T: Debug> {
input_cells: Vec<InputCell<T>>,
compute_cells: Vec<ComputeCell<'r, T>>,
}
// You are guaranteed that Reactor will only be tested against types that are Copy + PartialEq.
impl<'r, T: Copy + Debug + PartialEq + 'r> Reactor<'r, T> {
pub fn | () -> Self {
Reactor {
input_cells: Vec::new(),
compute_cells: Vec::new(),
}
}
// Creates an input cell with the specified initial value, returning its ID.
pub fn create_input(&mut self, initial: T) -> InputCellID {
let idx = self.input_cells.len();
let id = InputCellID(idx);
self.input_cells.push(InputCell::new(initial));
id
}
// Creates a compute cell with the specified dependencies and compute function.
// The compute function is expected to take in its arguments in the same order as specified in
// `dependencies`.
// You do not need to reject compute functions that expect more arguments than there are
// dependencies (how would you check for this, anyway?).
//
// If any dependency doesn't exist, returns an Err with that nonexistent dependency.
// (If multiple dependencies do not exist, exactly which one is returned is not defined and
// will not be tested)
//
// Notice that there is no way to *remove* a cell.
// This means that you may assume, without checking, that if the dependencies exist at creation
// time they will continue to exist as long as the Reactor exists.
pub fn create_compute<F>(
&mut self,
dependencies: &[CellID],
compute_func: F,
) -> Result<ComputeCellID, CellID>
where
F: 'r + Fn(&[T]) -> T,
{
let cidx = self.compute_cells.len();
let cid = ComputeCellID(cidx);
for id in dependencies.iter() {
match id {
CellID::Input(InputCellID(idx)) => {
if *idx >= self.input_cells.len() {
return Err(*id);
}
}
CellID::Compute(ComputeCellID(idx)) => {
if *idx >= self.compute_cells.len() {
return Err(*id);
}
}
}
}
// register as clients with all dependencies.
for id in dependencies.iter() {
match id {
CellID::Input(InputCellID(idx)) => {
let _ = self.input_cells[*idx].clients.insert(cid);
}
CellID::Compute(ComputeCellID(idx)) => {
let _ = self.compute_cells[*idx].clients.insert(cid);
}
}
}
let cell = ComputeCell::new(compute_func, dependencies);
cell.call(&self); // set the initial value
self.compute_cells.push(cell);
Ok(cid)
}
// Retrieves the current value of the cell, or None if the cell does not exist.
//
// You may wonder whether it is possible to implement `get(&self, id: CellID) -> Option<&Cell>`
// and have a `value(&self)` method on `Cell`.
//
// It turns out this introduces a significant amount of extra complexity to this exercise.
// We chose not to cover this here, since this exercise is probably enough work as-is.
pub fn value(&self, id: CellID) -> Option<T> {
match id {
CellID::Input(InputCellID(idx)) => self.input_cells.get(idx).map(|i| i.value),
CellID::Compute(ComputeCellID(idx)) => {
if let Some(cell) = self.compute_cells.get(idx) {
Some(cell.call(&self))
} else {
None
}
}
}
}
// Sets the value of the specified input cell.
//
// Returns false if the cell does not exist.
//
// Similarly, you may wonder about `get_mut(&mut self, id: CellID) -> Option<&mut Cell>`, with
// a `set_value(&mut self, new_value: T)` method on `Cell`.
//
// As before, that turned out to add too much extra complexity.
pub fn set_value(&mut self, id: InputCellID, new_value: T) -> bool {
let InputCellID(idx) = id;
if idx < self.input_cells.len() {
let old_value = self.input_cells[idx].value;
if old_value == new_value {
return true;
}
self.input_cells[idx].value = new_value;
let mut clients1 = self.input_cells[idx].clients.clone();
let mut clients2 = HashSet::new();
let mut done = false;
// Recursively iterate through all clients until we've converged on the
// the stable set of them. Does at least N extra checks, where N is
// the numer of ultimate clients.
while !done {
for client in clients1.iter() {
clients2.insert(client.clone());
let ComputeCellID(idx) = client;
let cell = &self.compute_cells[*idx];
// first find all the clients that will be called without us
clients2.extend(cell.clients.iter());
}
for client in clients2.iter() {
let ComputeCellID(idx) = client;
let cell = &self.compute_cells[*idx];
clients1.extend(cell.clients.iter());
}
done = clients1 == clients2;
}
// This has the potential to call more clients than needed, but ComputeCells
// cache their previous value and only invoke their callbacks on change,
// so client callbacks won't get invoked more than once.
//
// There's an implicit assumption here that each ComputeCell's function is
// cheap to run, which is probably not true in general. We could do a
// topological sort of the client graph to ensure we only call leaf nodes.
for client in clients1 {
let ComputeCellID(idx) = client;
let cell = &self.compute_cells[idx];
cell.call(&self);
}
// we have set a new value and called all clients, return true
true
} else {
// the new value was the same as the old value, return false
false
}
}
// Adds a callback to the specified compute cell.
//
// Returns the ID of the just-added callback, or None if the cell doesn't exist.
//
// Callbacks on input cells will not be tested.
//
// The semantics of callbacks (as will be tested):
// For a single set_value call, each compute cell's callbacks should each be | new | identifier_name |
utils.py | , read = s()
new_note = notes_o(samples)
# note too high considered as noise
if new_note[0] != 0 and new_note[0] <= 120:
note_klass = Note(time=total_frames / float(samplerate), pitch=new_note[0], volume=new_note[1] - 20,
duration=new_note[2])
result.append(note_klass)
total_frames += read
if read < hop_s:
break
return result
def read_bpm_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE):
print("====> reading bpm from sound file")
win_s, hop_s = 1024, 512
s = source(filename, samplerate, hop_s)
samplerate = s.samplerate
'''
phase Phase based onset detection function
This function uses information both in frequency and in phase to determine
changes in the spectral content that might correspond to musical onsets. It
is best suited for complex signals such as polyphonic recordings.
Juan-Pablo Bello, Mike P. Davies, and Mark B. Sandler. Phase-based note
onset detection for music signals. In Proceedings of the IEEE International
Conference on Acoustics Speech and Signal Processing, pages 441444,
Hong-Kong, 2003.
'''
o = tempo("phase", win_s, hop_s, samplerate)
beats = []
total_frames = 0
while True:
samples, read = s()
is_beat = o(samples)
if is_beat:
this_beat = o.get_last_s()
beats.append(this_beat)
# if o.get_confidence() > .2 and len(beats) > 2.:
# break
total_frames += read
if read < hop_s:
break
def beats_to_bpm(beats, path):
# if enough beats are found, convert to periods then to bpm
if len(beats) > 1:
if len(beats) < 4:
print("few beats found in {:s}".format(path))
bpms = 60. / numpy.diff(beats)
return numpy.median(bpms)
else:
print("not enough beats found in {:s}".format(path))
return 0
return beats_to_bpm(beats, filename)
def read_pitch_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE):
"""
this method try to read pitches from a sound wave file with a list of dict of pitch and confidence
"""
if os.path.isfile(filename) is False:
raise Exception('File not found with filename = %s' % filename)
print("====> reading pitch from sound file")
win_s = 4096 // DOWN_SAMPLE # fft size
hop_s = 512 // DOWN_SAMPLE # hop size
s = source(filename, samplerate, hop_s)
samplerate = s.samplerate
tolerance = 0.8
pitch_o = pitch("yin", win_s, hop_s, samplerate)
pitch_o.set_unit("midi")
pitch_o.set_tolerance(tolerance)
result = []
# total number of frames read
total_frames = 0
while True:
samples, read = s()
# the pitch value is not rounded and many zeroes occur
that_pitch = pitch_o(samples)[0]
confidence = pitch_o.get_confidence()
result.append(dict(time=total_frames / float(samplerate), pitch=that_pitch, confidence=confidence))
total_frames += read
if read < hop_s:
break
group_result_with_log_density = compute_density_from_pitch_result(result)
density_level_list = compute_density_level(group_result_with_log_density, result[len(result) - 1]['time'])
print("====> density level list length %s" % len(density_level_list))
proportion_list = get_emphasis_start_times(group_result_with_log_density, result[len(result) - 1]['time'])
print("====> emphasis proportion list length = %d" % len(proportion_list))
return dict(pitch_result=result, emphasis_proportion_list=proportion_list, density_level_list=density_level_list)
def compute_density_level(group_result_with_log_density: List[dict], length: float):
"""
following result of function compute_density_from_pitch_result, this method will compute for each group,
a readable (from 0 to 9) density value for further usage
:param group_result_with_log_density:
:param length end time
:return:
"""
log_density_list = [group['log_density'] for group in group_result_with_log_density]
max_val = max(log_density_list)
min_val = min(log_density_list)
# split range with 10 and compute which to where
range_val = max_val - min_val
total_level = 9
gap = range_val / total_level
level_list = []
for i, log_density in enumerate(log_density_list):
level = 5
if gap != 0:
level = round((log_density - min_val) / gap)
level_list.append(dict(level=level, start_time=group_result_with_log_density[i]['pitches'][0]['time']))
for level_dict in level_list:
start = level_dict['start_time'] / length
level_dict['start_time'] = start
return level_list
def compute_density_from_pitch_result(pitch_result: List[dict]):
group_result = []
group = []
for i, pitch_dict in enumerate(pitch_result):
# current is not zero, but previous is zero
# should flush the group
if round(pitch_dict['pitch']) != 0 and i - 1 >= 0 and round(pitch_result[i - 1]['pitch']) == 0:
group_result.append(group)
group = []
group.append(pitch_dict)
# now for each group we have the elements which are essentially divided by time frame
# we just need to identify the average density and get the highest ones
density_list = [len(group) for group in group_result]
# average_density = sum(density_list) / len(density_list)
log_density_list = numpy.log10(density_list)
# only for those group with density > coefficient * log_max_density is qualified to be the emphasis one.
# but here we just give the density log result
group_result_with_log_density = []
for i, group in enumerate(group_result):
group_result_with_log_density.append(dict(log_density=log_density_list[i], pitches=group))
return group_result_with_log_density
def get_emphasis_start_times(group_result_with_log_density: List[dict], length: float, coefficient: int = 0.8,
threshold: int = 1):
"""
:param group_result_with_log_density compute_density_from_pitch_result function result
:param coefficient compares to the max log value, which should we consider emphasis
:param threshold means only pitch density more than threshold could use emphasis method
:param length is the length of sound in second unit
"""
log_density_list = [group['log_density'] for group in group_result_with_log_density]
max_log_density = max(log_density_list)
filter_value = coefficient * max_log_density
pitch_group_list = []
for group in group_result_with_log_density:
if group['log_density'] >= threshold and group['log_density'] >= filter_value:
pitch_group_list.append(group['pitches'])
# now we have pitch group, we can know where to start emphasis and where to end
range_time_list = []
for pitch_group in pitch_group_list:
start = pitch_group[0]['time']
end = pitch_group[len(pitch_group) - 1]['time']
range_time_list.append(dict(start=start, end=end))
# transform proportion value for further beats computing
proportion_list = []
for range_time in range_time_list:
start = range_time['start'] / length |
def drum_note_to_heart_beat_track(midi_instance: MIDIFile):
"""
@Deprecated
"""
# exporting bass drum notes
bass_drum_beats_in_ms = []
ms_per_tick = 60 * 1000 / (tempo * TICKSPERQUARTERNOTE)
for event in midi_instance.tracks[channel_map[CHANNEL_NAME_DRUM_KIT]].eventList:
if isinstance(event, NoteOn) and event.pitch == drum_map['BassDrum']:
bass_drum_beats_in_ms.append(ms_per_tick * event.tick)
single_heart_beat = AudioSegment.from_file('./single_heartbeat.mp3', format='mp3')
heartbeat_track = AudioSegment.empty()
for i, bass_drum_beat_note_on in enumerate(bass_drum_beats_in_ms):
if i == 0:
heartbeat_track += AudioSegment.silent(duration=bass_drum_beat_note_on)
elif i + 1 < len(bass_drum_beats_in_ms):
# if the next bass drum time is early than heartbeat track
if len(heartbeat_track) > bass_drum_beats_in_ms[i + 1]:
continue
# fill the gap till the next | end = range_time['end'] / length
proportion_list.append(dict(start=start, end=end))
return proportion_list
| random_line_split |
utils.py | s = source(filename, samplerate, hop_s)
samplerate = s.samplerate
tolerance = 0.8
pitch_o = pitch("yin", win_s, hop_s, samplerate)
pitch_o.set_unit("midi")
pitch_o.set_tolerance(tolerance)
result = []
# total number of frames read
total_frames = 0
while True:
samples, read = s()
# the pitch value is not rounded and many zeroes occur
that_pitch = pitch_o(samples)[0]
confidence = pitch_o.get_confidence()
result.append(dict(time=total_frames / float(samplerate), pitch=that_pitch, confidence=confidence))
total_frames += read
if read < hop_s:
break
group_result_with_log_density = compute_density_from_pitch_result(result)
density_level_list = compute_density_level(group_result_with_log_density, result[len(result) - 1]['time'])
print("====> density level list length %s" % len(density_level_list))
proportion_list = get_emphasis_start_times(group_result_with_log_density, result[len(result) - 1]['time'])
print("====> emphasis proportion list length = %d" % len(proportion_list))
return dict(pitch_result=result, emphasis_proportion_list=proportion_list, density_level_list=density_level_list)
def compute_density_level(group_result_with_log_density: List[dict], length: float):
"""
following result of function compute_density_from_pitch_result, this method will compute for each group,
a readable (from 0 to 9) density value for further usage
:param group_result_with_log_density:
:param length end time
:return:
"""
log_density_list = [group['log_density'] for group in group_result_with_log_density]
max_val = max(log_density_list)
min_val = min(log_density_list)
# split range with 10 and compute which to where
range_val = max_val - min_val
total_level = 9
gap = range_val / total_level
level_list = []
for i, log_density in enumerate(log_density_list):
level = 5
if gap != 0:
level = round((log_density - min_val) / gap)
level_list.append(dict(level=level, start_time=group_result_with_log_density[i]['pitches'][0]['time']))
for level_dict in level_list:
start = level_dict['start_time'] / length
level_dict['start_time'] = start
return level_list
def compute_density_from_pitch_result(pitch_result: List[dict]):
group_result = []
group = []
for i, pitch_dict in enumerate(pitch_result):
# current is not zero, but previous is zero
# should flush the group
if round(pitch_dict['pitch']) != 0 and i - 1 >= 0 and round(pitch_result[i - 1]['pitch']) == 0:
group_result.append(group)
group = []
group.append(pitch_dict)
# now for each group we have the elements which are essentially divided by time frame
# we just need to identify the average density and get the highest ones
density_list = [len(group) for group in group_result]
# average_density = sum(density_list) / len(density_list)
log_density_list = numpy.log10(density_list)
# only for those group with density > coefficient * log_max_density is qualified to be the emphasis one.
# but here we just give the density log result
group_result_with_log_density = []
for i, group in enumerate(group_result):
group_result_with_log_density.append(dict(log_density=log_density_list[i], pitches=group))
return group_result_with_log_density
def get_emphasis_start_times(group_result_with_log_density: List[dict], length: float, coefficient: int = 0.8,
threshold: int = 1):
"""
:param group_result_with_log_density compute_density_from_pitch_result function result
:param coefficient compares to the max log value, which should we consider emphasis
:param threshold means only pitch density more than threshold could use emphasis method
:param length is the length of sound in second unit
"""
log_density_list = [group['log_density'] for group in group_result_with_log_density]
max_log_density = max(log_density_list)
filter_value = coefficient * max_log_density
pitch_group_list = []
for group in group_result_with_log_density:
if group['log_density'] >= threshold and group['log_density'] >= filter_value:
pitch_group_list.append(group['pitches'])
# now we have pitch group, we can know where to start emphasis and where to end
range_time_list = []
for pitch_group in pitch_group_list:
start = pitch_group[0]['time']
end = pitch_group[len(pitch_group) - 1]['time']
range_time_list.append(dict(start=start, end=end))
# transform proportion value for further beats computing
proportion_list = []
for range_time in range_time_list:
start = range_time['start'] / length
end = range_time['end'] / length
proportion_list.append(dict(start=start, end=end))
return proportion_list
def drum_note_to_heart_beat_track(midi_instance: MIDIFile):
"""
@Deprecated
"""
# exporting bass drum notes
bass_drum_beats_in_ms = []
ms_per_tick = 60 * 1000 / (tempo * TICKSPERQUARTERNOTE)
for event in midi_instance.tracks[channel_map[CHANNEL_NAME_DRUM_KIT]].eventList:
if isinstance(event, NoteOn) and event.pitch == drum_map['BassDrum']:
bass_drum_beats_in_ms.append(ms_per_tick * event.tick)
single_heart_beat = AudioSegment.from_file('./single_heartbeat.mp3', format='mp3')
heartbeat_track = AudioSegment.empty()
for i, bass_drum_beat_note_on in enumerate(bass_drum_beats_in_ms):
if i == 0:
heartbeat_track += AudioSegment.silent(duration=bass_drum_beat_note_on)
elif i + 1 < len(bass_drum_beats_in_ms):
# if the next bass drum time is early than heartbeat track
if len(heartbeat_track) > bass_drum_beats_in_ms[i + 1]:
continue
# fill the gap till the next heart beat
gap = bass_drum_beats_in_ms[i + 1] - len(heartbeat_track)
heartbeat_track += AudioSegment.silent(duration=gap)
elif i == len(bass_drum_beats_in_ms) - 1:
# ignore the last one
continue
heartbeat_track += single_heart_beat
heartbeat_track.export('heartbeat_track.mp3', format='mp3')
def get_one_bar_heart_beat(filename: str, bpm: int):
"""
given defined bpm, it generates a bar of heartbeat sound.
given the fact that the heart beat track has a certain length of each beat, the bpm cannot be too high,
which is undetermined yet.
:return:
"""
heart_beat_track = AudioSegment.from_file(file=filename, format='mp3')
heart_beat_1 = heart_beat_track[70:180]
heart_beat_2 = heart_beat_track[380:490]
# AudioSegment.export(part, 'single_heartbeat1.mp3')
tick_per_sec = 60 * 1000 / bpm
# make a sequential beats by a quarter notes which means a tick contains 2 heat beats
# and this is only applied for a half bar.
# in conclusion, one bar has two sets of heart beats
result_track = AudioSegment.empty()
# first set
result_track += heart_beat_1
gap = tick_per_sec / 2 - len(result_track)
result_track += AudioSegment.silent(gap)
result_track += heart_beat_2
# fill the gap
gap = tick_per_sec * 2 - len(result_track)
result_track += AudioSegment.silent(gap)
# # second set
result_track += heart_beat_1
gap = tick_per_sec * 2.5 - len(result_track)
result_track += AudioSegment.silent(gap)
result_track += heart_beat_2
# # fill the end gap
gap = tick_per_sec * 4 - len(result_track)
result_track += AudioSegment.silent(gap)
return result_track
def get_heart_beat_track(filename: str, bar_count: int, bpm: int):
result = AudioSegment.empty()
for i in range(bar_count):
result += get_one_bar_heart_beat(filename, bpm)
return result
def get_heart_beat_track_and_save(filename: str, dest_filename: str, bar_count: int, bpm: int):
r | esult = get_heart_beat_track(filename, bar_count, bpm)
# reduce 3dB of the result
result = result - 3
# tick_per_sec = 60 * 1000 / bpm
# fade_time = round(tick_per_sec * 4)
# result.fade_in(fade_time)
# result.fade_out(fade_time)
AudioSegment.export(result, dest_filename)
| identifier_body |
|
utils.py | read = s()
new_note = notes_o(samples)
# note too high considered as noise
if new_note[0] != 0 and new_note[0] <= 120:
note_klass = Note(time=total_frames / float(samplerate), pitch=new_note[0], volume=new_note[1] - 20,
duration=new_note[2])
result.append(note_klass)
total_frames += read
if read < hop_s:
break
return result
def read_bpm_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE):
print("====> reading bpm from sound file")
win_s, hop_s = 1024, 512
s = source(filename, samplerate, hop_s)
samplerate = s.samplerate
'''
phase Phase based onset detection function
This function uses information both in frequency and in phase to determine
changes in the spectral content that might correspond to musical onsets. It
is best suited for complex signals such as polyphonic recordings.
Juan-Pablo Bello, Mike P. Davies, and Mark B. Sandler. Phase-based note
onset detection for music signals. In Proceedings of the IEEE International
Conference on Acoustics Speech and Signal Processing, pages 441444,
Hong-Kong, 2003.
'''
o = tempo("phase", win_s, hop_s, samplerate)
beats = []
total_frames = 0
while True:
samples, read = s()
is_beat = o(samples)
if is_beat:
this_beat = o.get_last_s()
beats.append(this_beat)
# if o.get_confidence() > .2 and len(beats) > 2.:
# break
total_frames += read
if read < hop_s:
break
def beats_to_bpm(beats, path):
# if enough beats are found, convert to periods then to bpm
if len(beats) > 1:
if len(beats) < 4:
print("few beats found in {:s}".format(path))
bpms = 60. / numpy.diff(beats)
return numpy.median(bpms)
else:
print("not enough beats found in {:s}".format(path))
return 0
return beats_to_bpm(beats, filename)
def read_pitch_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE):
"""
this method try to read pitches from a sound wave file with a list of dict of pitch and confidence
"""
if os.path.isfile(filename) is False:
raise Exception('File not found with filename = %s' % filename)
print("====> reading pitch from sound file")
win_s = 4096 // DOWN_SAMPLE # fft size
hop_s = 512 // DOWN_SAMPLE # hop size
s = source(filename, samplerate, hop_s)
samplerate = s.samplerate
tolerance = 0.8
pitch_o = pitch("yin", win_s, hop_s, samplerate)
pitch_o.set_unit("midi")
pitch_o.set_tolerance(tolerance)
result = []
# total number of frames read
total_frames = 0
while True:
samples, read = s()
# the pitch value is not rounded and many zeroes occur
that_pitch = pitch_o(samples)[0]
confidence = pitch_o.get_confidence()
result.append(dict(time=total_frames / float(samplerate), pitch=that_pitch, confidence=confidence))
total_frames += read
if read < hop_s:
break
group_result_with_log_density = compute_density_from_pitch_result(result)
density_level_list = compute_density_level(group_result_with_log_density, result[len(result) - 1]['time'])
print("====> density level list length %s" % len(density_level_list))
proportion_list = get_emphasis_start_times(group_result_with_log_density, result[len(result) - 1]['time'])
print("====> emphasis proportion list length = %d" % len(proportion_list))
return dict(pitch_result=result, emphasis_proportion_list=proportion_list, density_level_list=density_level_list)
def compute_density_level(group_result_with_log_density: List[dict], length: float):
"""
following result of function compute_density_from_pitch_result, this method will compute for each group,
a readable (from 0 to 9) density value for further usage
:param group_result_with_log_density:
:param length end time
:return:
"""
log_density_list = [group['log_density'] for group in group_result_with_log_density]
max_val = max(log_density_list)
min_val = min(log_density_list)
# split range with 10 and compute which to where
range_val = max_val - min_val
total_level = 9
gap = range_val / total_level
level_list = []
for i, log_density in enumerate(log_density_list):
level = 5
if gap != 0:
level = round((log_density - min_val) / gap)
level_list.append(dict(level=level, start_time=group_result_with_log_density[i]['pitches'][0]['time']))
for level_dict in level_list:
start = level_dict['start_time'] / length
level_dict['start_time'] = start
return level_list
def c | pitch_result: List[dict]):
group_result = []
group = []
for i, pitch_dict in enumerate(pitch_result):
# current is not zero, but previous is zero
# should flush the group
if round(pitch_dict['pitch']) != 0 and i - 1 >= 0 and round(pitch_result[i - 1]['pitch']) == 0:
group_result.append(group)
group = []
group.append(pitch_dict)
# now for each group we have the elements which are essentially divided by time frame
# we just need to identify the average density and get the highest ones
density_list = [len(group) for group in group_result]
# average_density = sum(density_list) / len(density_list)
log_density_list = numpy.log10(density_list)
# only for those group with density > coefficient * log_max_density is qualified to be the emphasis one.
# but here we just give the density log result
group_result_with_log_density = []
for i, group in enumerate(group_result):
group_result_with_log_density.append(dict(log_density=log_density_list[i], pitches=group))
return group_result_with_log_density
def get_emphasis_start_times(group_result_with_log_density: List[dict], length: float, coefficient: int = 0.8,
threshold: int = 1):
"""
:param group_result_with_log_density compute_density_from_pitch_result function result
:param coefficient compares to the max log value, which should we consider emphasis
:param threshold means only pitch density more than threshold could use emphasis method
:param length is the length of sound in second unit
"""
log_density_list = [group['log_density'] for group in group_result_with_log_density]
max_log_density = max(log_density_list)
filter_value = coefficient * max_log_density
pitch_group_list = []
for group in group_result_with_log_density:
if group['log_density'] >= threshold and group['log_density'] >= filter_value:
pitch_group_list.append(group['pitches'])
# now we have pitch group, we can know where to start emphasis and where to end
range_time_list = []
for pitch_group in pitch_group_list:
start = pitch_group[0]['time']
end = pitch_group[len(pitch_group) - 1]['time']
range_time_list.append(dict(start=start, end=end))
# transform proportion value for further beats computing
proportion_list = []
for range_time in range_time_list:
start = range_time['start'] / length
end = range_time['end'] / length
proportion_list.append(dict(start=start, end=end))
return proportion_list
def drum_note_to_heart_beat_track(midi_instance: MIDIFile):
"""
@Deprecated
"""
# exporting bass drum notes
bass_drum_beats_in_ms = []
ms_per_tick = 60 * 1000 / (tempo * TICKSPERQUARTERNOTE)
for event in midi_instance.tracks[channel_map[CHANNEL_NAME_DRUM_KIT]].eventList:
if isinstance(event, NoteOn) and event.pitch == drum_map['BassDrum']:
bass_drum_beats_in_ms.append(ms_per_tick * event.tick)
single_heart_beat = AudioSegment.from_file('./single_heartbeat.mp3', format='mp3')
heartbeat_track = AudioSegment.empty()
for i, bass_drum_beat_note_on in enumerate(bass_drum_beats_in_ms):
if i == 0:
heartbeat_track += AudioSegment.silent(duration=bass_drum_beat_note_on)
elif i + 1 < len(bass_drum_beats_in_ms):
# if the next bass drum time is early than heartbeat track
if len(heartbeat_track) > bass_drum_beats_in_ms[i + 1]:
continue
# fill the gap till the | ompute_density_from_pitch_result( | identifier_name |
utils.py | read = s()
new_note = notes_o(samples)
# note too high considered as noise
if new_note[0] != 0 and new_note[0] <= 120:
note_klass = Note(time=total_frames / float(samplerate), pitch=new_note[0], volume=new_note[1] - 20,
duration=new_note[2])
result.append(note_klass)
total_frames += read
if read < hop_s:
break
return result
def read_bpm_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE):
print("====> reading bpm from sound file")
win_s, hop_s = 1024, 512
s = source(filename, samplerate, hop_s)
samplerate = s.samplerate
'''
phase Phase based onset detection function
This function uses information both in frequency and in phase to determine
changes in the spectral content that might correspond to musical onsets. It
is best suited for complex signals such as polyphonic recordings.
Juan-Pablo Bello, Mike P. Davies, and Mark B. Sandler. Phase-based note
onset detection for music signals. In Proceedings of the IEEE International
Conference on Acoustics Speech and Signal Processing, pages 441444,
Hong-Kong, 2003.
'''
o = tempo("phase", win_s, hop_s, samplerate)
beats = []
total_frames = 0
while True:
samples, read = s()
is_beat = o(samples)
if is_beat:
this_beat = o.get_last_s()
beats.append(this_beat)
# if o.get_confidence() > .2 and len(beats) > 2.:
# break
total_frames += read
if read < hop_s:
break
def beats_to_bpm(beats, path):
# if enough beats are found, convert to periods then to bpm
if len(beats) > 1:
if len(beats) < 4:
print("few beats found in {:s}".format(path))
bpms = 60. / numpy.diff(beats)
return numpy.median(bpms)
else:
print("not enough beats found in {:s}".format(path))
return 0
return beats_to_bpm(beats, filename)
def read_pitch_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE):
"""
this method try to read pitches from a sound wave file with a list of dict of pitch and confidence
"""
if os.path.isfile(filename) is False:
raise Exception('File not found with filename = %s' % filename)
print("====> reading pitch from sound file")
win_s = 4096 // DOWN_SAMPLE # fft size
hop_s = 512 // DOWN_SAMPLE # hop size
s = source(filename, samplerate, hop_s)
samplerate = s.samplerate
tolerance = 0.8
pitch_o = pitch("yin", win_s, hop_s, samplerate)
pitch_o.set_unit("midi")
pitch_o.set_tolerance(tolerance)
result = []
# total number of frames read
total_frames = 0
while True:
samples, read = s()
# the pitch value is not rounded and many zeroes occur
that_pitch = pitch_o(samples)[0]
confidence = pitch_o.get_confidence()
result.append(dict(time=total_frames / float(samplerate), pitch=that_pitch, confidence=confidence))
total_frames += read
if read < hop_s:
break
group_result_with_log_density = compute_density_from_pitch_result(result)
density_level_list = compute_density_level(group_result_with_log_density, result[len(result) - 1]['time'])
print("====> density level list length %s" % len(density_level_list))
proportion_list = get_emphasis_start_times(group_result_with_log_density, result[len(result) - 1]['time'])
print("====> emphasis proportion list length = %d" % len(proportion_list))
return dict(pitch_result=result, emphasis_proportion_list=proportion_list, density_level_list=density_level_list)
def compute_density_level(group_result_with_log_density: List[dict], length: float):
"""
following result of function compute_density_from_pitch_result, this method will compute for each group,
a readable (from 0 to 9) density value for further usage
:param group_result_with_log_density:
:param length end time
:return:
"""
log_density_list = [group['log_density'] for group in group_result_with_log_density]
max_val = max(log_density_list)
min_val = min(log_density_list)
# split range with 10 and compute which to where
range_val = max_val - min_val
total_level = 9
gap = range_val / total_level
level_list = []
for i, log_density in enumerate(log_density_list):
level = 5
if gap != 0:
level = round((log_density - min_val) / gap)
level_list.append(dict(level=level, start_time=group_result_with_log_density[i]['pitches'][0]['time']))
for level_dict in level_list:
start = level_dict['start_time'] / length
level_dict['start_time'] = start
return level_list
def compute_density_from_pitch_result(pitch_result: List[dict]):
group_result = []
group = []
for i, pitch_dict in enumerate(pitch_result):
# current is not zero, but previous is zero
# should flush the group
if round(pitch_dict['pitch']) != 0 and i - 1 >= 0 and round(pitch_result[i - 1]['pitch']) == 0:
group_result.append(group)
group = []
group.append(pitch_dict)
# now for each group we have the elements which are essentially divided by time frame
# we just need to identify the average density and get the highest ones
density_list = [len(group) for group in group_result]
# average_density = sum(density_list) / len(density_list)
log_density_list = numpy.log10(density_list)
# only for those group with density > coefficient * log_max_density is qualified to be the emphasis one.
# but here we just give the density log result
group_result_with_log_density = []
for i, group in enumerate(group_result):
group_result_with_log_density.append(dict(log_density=log_density_list[i], pitches=group))
return group_result_with_log_density
def get_emphasis_start_times(group_result_with_log_density: List[dict], length: float, coefficient: int = 0.8,
threshold: int = 1):
"""
:param group_result_with_log_density compute_density_from_pitch_result function result
:param coefficient compares to the max log value, which should we consider emphasis
:param threshold means only pitch density more than threshold could use emphasis method
:param length is the length of sound in second unit
"""
log_density_list = [group['log_density'] for group in group_result_with_log_density]
max_log_density = max(log_density_list)
filter_value = coefficient * max_log_density
pitch_group_list = []
for group in group_result_with_log_density:
if group['log_density'] >= threshold and group['log_density'] >= filter_value:
pitch_group_list.append(group['pitches'])
# now we have pitch group, we can know where to start emphasis and where to end
range_time_list = []
for pitch_group in pitch_group_list:
s |
# transform proportion value for further beats computing
proportion_list = []
for range_time in range_time_list:
start = range_time['start'] / length
end = range_time['end'] / length
proportion_list.append(dict(start=start, end=end))
return proportion_list
def drum_note_to_heart_beat_track(midi_instance: MIDIFile):
"""
@Deprecated
"""
# exporting bass drum notes
bass_drum_beats_in_ms = []
ms_per_tick = 60 * 1000 / (tempo * TICKSPERQUARTERNOTE)
for event in midi_instance.tracks[channel_map[CHANNEL_NAME_DRUM_KIT]].eventList:
if isinstance(event, NoteOn) and event.pitch == drum_map['BassDrum']:
bass_drum_beats_in_ms.append(ms_per_tick * event.tick)
single_heart_beat = AudioSegment.from_file('./single_heartbeat.mp3', format='mp3')
heartbeat_track = AudioSegment.empty()
for i, bass_drum_beat_note_on in enumerate(bass_drum_beats_in_ms):
if i == 0:
heartbeat_track += AudioSegment.silent(duration=bass_drum_beat_note_on)
elif i + 1 < len(bass_drum_beats_in_ms):
# if the next bass drum time is early than heartbeat track
if len(heartbeat_track) > bass_drum_beats_in_ms[i + 1]:
continue
# fill the gap till | tart = pitch_group[0]['time']
end = pitch_group[len(pitch_group) - 1]['time']
range_time_list.append(dict(start=start, end=end))
| conditional_block |
save_articles.go | имеет значение NULL.
func getArticleIds(limit int, showTiming bool) []string {
startTime := time.Now()
// db, err := sql.Open("sqlite3", dbFileName)
db, err := sqlx.Open("postgres", DSN)
checkErr(err)
ids := make([]string, 0)
err = db.Select(&ids, fmt.Sprintf("SELECT obj_id FROM articles WHERE migration_status IS NULL LIMIT %d", limit))
checkErr(err)
// закомментированный код работает тоже в том числе для sqllite3
// rows, err := db.Query(fmt.Sprintf("SELECT obj_id FROM articles WHERE migration_status = '%s' LIMIT %d", status, limit))
// checkErr(err)
// var id string
// for rows.Next() {
// err = rows.Scan(&id)
// checkErr(err)
// ids = append(ids, id)
// }
// rows.Close() //good habit to close
err = db.Close()
checkErr(err)
if showTiming {
fmt.Printf("Got %v ids in %v. \n", len(ids), time.Since(startTime))
}
return ids
}
// Делает последовательные запросы к API возвращая массив пар:
// [ [id, text], [id,text],...]
func getAPITexts(ids []string) [][]string {
// startTime := time.Now()
articles := make([][]string, 0)
for _, id := range ids {
articles = append(articles, getOneArticleFromAPI(id))
}
// duration := time.Since(startTime)
// fmt.Printf("Got %v articles in %v. \n", len(ids), duration)
return articles
}
// Делает параллельные запросы к API возвращая массив пар:
// [ [id, text], [id,text],...]
func getAPITextsParallel(ids []string, showTiming bool) [][]string {
startTime := time.Now()
articles := make([][]string, 0)
ch := make(chan []string)
for _, id := range ids {
go func(id string) {
ch <- getOneArticleFromAPI(id)
}(id)
}
for range ids {
v := <-ch
articles = append(articles, v)
}
close(ch)
if showTiming {
fmt.Printf("Got %v articles in %v. \n", len(ids), time.Since(startTime))
}
return articles
}
// Возвращает id материала и его текст в виде [id, text] из API
func getOneArticleFromAPI(id string) []string {
client := http.Client{
Timeout: time.Duration(requestTimeout) * time.Second,
}
req, err := http.NewRequest("GET", fmt.Sprintf(urlArticle, id), nil)
if err != nil {
fmt.Println(err)
}
req.Close = true
req.Header.Set("Connection", "close")
resp, err := client.Do(req)
// resp, err := http.Get(fmt.Sprintf(urlArticle, id))
if err != nil {
fmt.Println(err)
return []string{id, ""}
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println(err)
return []string{id, ""}
}
s := string(body)
return []string{id, s}
}
// Преобразует массив текстов в массив записей.
// Запись это отображение: имя_поля -> значение_поля
func textsToArticleRecords(texts [][]string) []map[string]interface{} {
records := make([]map[string]interface{}, 0)
for _, o := range texts {
id := o[0]
text := o[1]
// record := map[string]string{"obj_id": id}
var objmap map[string]interface{} //json.RawMessage
err := json.Unmarshal([]byte(text), &objmap)
if err != nil {
fmt.Println(err)
objmap = make(map[string]interface{})
objmap["obj_id"] = id
objmap["migration_status"] = "error"
} else {
objmap["migration_status"] = "success"
}
records = append(records, objmap)
}
return records
}
// Сохраняет массив записей в базу данных.
// Запись представляет собой map[string]interface{}.
func saveArticlesToDatabase(records []map[string]interface{}, showTiming bool) {
startTime := time.Now()
paramsArray := make([][]interface{}, 0)
for _, record := range records {
params := make([]interface{}, 0)
params = append(params, getMapVal(record, "announce"))
params = append(params, getMapVal(record, "authors"))
params = append(params, getMapVal(record, "date_modified"))
params = append(params, getMapVal(record, "full-text"))
params = append(params, getMapVal(record, "images"))
params = append(params, getMapVal(record, "index_priority"))
params = append(params, getMapVal(record, "is_active"))
params = append(params, getMapVal(record, "is_announce"))
params = append(params, getMapVal(record, "is_paid"))
params = append(params, getMapVal(record, "link_title"))
params = append(params, getMapVal(record, "links"))
params = append(params, getMapVal(record, "obj_kind"))
params = append(params, getMapVal(record, "projects"))
params = append(params, getMapVal(record, "release_date"))
params = append(params, getMapVal(record, "spiegel"))
params = append(params, getMapVal(record, "title"))
params = append(params, getMapVal(record, "uannounce"))
params = append(params, getMapVal(record, "url"))
params = append(params, getMapVal(record, "migration_status"))
params = append(params, getMapVal(record, "obj_id"))
paramsArray = append(paramsArray, params)
}
sqlUpdate := `
UPDATE articles
SET
announce = $1,
authors = $2,
date_modified = $3,
"full-text" = $4,
images = $5,
index_priority = $6,
is_active = $7,
is_announce = $8,
is_paid = $9,
link_title = $10,
links = $11,
obj_kind = $12,
projects = $13,
release_date = $14,
spiegel = $15,
title = $16,
uannounce = $17,
url = $18,
migration_status = $19
WHERE
obj_id = $20
`
execMany(sqlUpdate, paramsArray)
if showTiming {
fmt.Printf("Saved %v articles to database in %v. \n", len(records), time.Since(startTime))
}
}
// Получает значение поля из отображения.
// Возвращает NULL в случае отсутствия поля,
// и тестовое представление если поле содержит JSON.
func getMapVal(m map[string]interface{}, key string) interface{} {
v, ok := m[key]
if !ok {
return nil
}
s, ok := v.(string)
if ok {
return s
}
b, err := json.Marshal(v)
if err == nil {
return string(b)
}
return "something bad"
}
// Исполняет запрос к базе данных. For all kinds of databases.
func exec(sqlText string) {
// db, err := sql.Open("sqlite3", dbFileName)
db, err := sqlx.Open("postgres", DSN)
defer db.Close()
checkErr(err)
stmt, err := db.Prepare(sqlText)
defer stmt.Close()
checkErr(err)
_, err = stmt.Exec()
checkErr(err)
}
// Исполняет запрос к базе данных. Specific to postgresql.
func mustExec(sqlText string) {
db, err := sqlx.Open("postgres", DSN)
defer db.Close()
if err != nil {
log.Fatalln(err)
}
db.MustExec(sqlText)
}
// Исполняет несколько параметризованных запросов на обновление или вставку.
// Если запрос не прошел, печатает сообщение.
func execMany(sqlText string, paramsArray [][]interface{}) {
// db, err := sql.Open("sqlite3", dbFileName)
db, err := sqlx.Open("postgres", DSN)
// defer db.Close()
checkErr(err)
stmt, err := db.Prepare(sqlText)
checkErr(err)
for _, params := range paramsArray {
// fmt.Println("params length================", len(params))
res, err := stmt.Exec(params...)
checkErr(err)
// Если запрос не затронул ни одну запись, выводим сообщение.
affect, err := res.RowsAffected()
checkErr(err)
if affect == 0 {
fmt.Println("Affected->", affect)
}
}
err = stmt.Close()
checkErr(err)
err = db.Close()
checkErr(err)
}
// Печатаем сообщение об ошибке
func checkErr(err error) {
if err != nil {
fmt.Print(err)
}
}
| conditional_block |
||
save_articles.go | Показывать времена исполнения")
flag.Parse()
// flag.Usage()
if batchSize == 0 {
os.Exit(0)
}
return
}
// Порождает таблицу articles в базе данных
func createArticlesTable() {
sqlCreateArticles := `
CREATE TABLE IF NOT EXISTS articles (
obj_id text PRIMARY KEY,
announce text NULL,
authors text NULL,
date_modified text NULL,
"full-text" text NULL,
images text NULL,
index_priority text NULL,
is_active text NULL,
is_announce text NULL,
is_paid text NULL,
link_title text NULL,
links text NULL,
obj_kind text NULL,
projects text NULL,
release_date text NULL,
spiegel text NULL,
title text NULL,
uannounce text NULL,
url text NULL,
migration_status text NULL, -- DEFAULT ''::text,
process_status text NULL,
elastic_status text NULL,
lemmatized_text text NULL,
entities_text text NULL,
entities_grouped text NULL
);
CREATE INDEX IF NOT EXISTS articles_migration_status__idx ON articles (migration_status);
CREATE INDEX IF NOT EXISTS articles_process_status__idx ON articles (process_status);
CREATE INDEX IF NOT EXISTS articles_elastic_status__idx ON articles (elastic_status);
`
mustExec(sqlCreateArticles)
fmt.Println("Таблица articles создана. Вставка новых записей ...")
}
// Заполняет таблицу articles идентификаторами статей полученными
// из таблицы связей rubrics_objects
func fillArticlesWithIds() {
startTime := time.Now()
sqlFillArticlesWithIds := `
INSERT INTO articles(obj_id)
SELECT DISTINCT rubrics_objects.object_id
FROM rubrics_objects LEFT JOIN articles ON rubrics_objects.object_id = articles.obj_id
WHERE
articles.obj_id IS NULL
AND rubrics_objects.kind = 'article'
ON CONFLICT (obj_id) DO NOTHING
;
`
mustExec(sqlFillArticlesWithIds)
fmt.Printf("Новые записи вставлены в таблицу articles за %v \n", time.Since(startTime))
}
// Заполняет таблицу articles текстами из API.
// - n - полное количество новых записей. For info only.
// - batchSize - Количество одновременных з | тексты статей
articleTexts := getAPITextsParallel(ids, showTiming)
// преобразовываем тексты в записи - массивы полей материала
articleRecords := textsToArticleRecords(articleTexts)
// Сохраняем записи в базу данных
saveArticlesToDatabase(articleRecords, showTiming)
// Выводим сообщение
counter += len(ids)
duration := time.Since(startTime)
durationHours := float64(duration) / float64(time.Hour)
articlesPerHour := float64(counter) / durationHours
fmt.Printf("Таблица articles. Загружено %8d/%d статей за %14v. Средняя скорость %.0f статей/час. \n", counter, n, duration, articlesPerHour)
// отдыхаем
time.Sleep(sleepTime)
// Берем следующую порцию идентификаторов
ids = getArticleIds(batchSize, showTiming)
}
}
// Получает количество новых записей в таблице articles,
// где поле migration_status имеет значение NULL.
func getNewRecordsNumber() int {
db, err := sqlx.Open("postgres", DSN)
checkErr(err)
ids := make([]int, 0)
err = db.Select(&ids, "SELECT count(obj_id) FROM articles WHERE migration_status IS NULL")
checkErr(err)
err = db.Close()
checkErr(err)
return ids[0]
}
// Получает массив идентификаторов (размером не более limit) статей из базы данных,
// в которых поле migration_status имеет значение NULL.
func getArticleIds(limit int, showTiming bool) []string {
startTime := time.Now()
// db, err := sql.Open("sqlite3", dbFileName)
db, err := sqlx.Open("postgres", DSN)
checkErr(err)
ids := make([]string, 0)
err = db.Select(&ids, fmt.Sprintf("SELECT obj_id FROM articles WHERE migration_status IS NULL LIMIT %d", limit))
checkErr(err)
// закомментированный код работает тоже в том числе для sqllite3
// rows, err := db.Query(fmt.Sprintf("SELECT obj_id FROM articles WHERE migration_status = '%s' LIMIT %d", status, limit))
// checkErr(err)
// var id string
// for rows.Next() {
// err = rows.Scan(&id)
// checkErr(err)
// ids = append(ids, id)
// }
// rows.Close() //good habit to close
err = db.Close()
checkErr(err)
if showTiming {
fmt.Printf("Got %v ids in %v. \n", len(ids), time.Since(startTime))
}
return ids
}
// Делает последовательные запросы к API возвращая массив пар:
// [ [id, text], [id,text],...]
func getAPITexts(ids []string) [][]string {
// startTime := time.Now()
articles := make([][]string, 0)
for _, id := range ids {
articles = append(articles, getOneArticleFromAPI(id))
}
// duration := time.Since(startTime)
// fmt.Printf("Got %v articles in %v. \n", len(ids), duration)
return articles
}
// Делает параллельные запросы к API возвращая массив пар:
// [ [id, text], [id,text],...]
func getAPITextsParallel(ids []string, showTiming bool) [][]string {
startTime := time.Now()
articles := make([][]string, 0)
ch := make(chan []string)
for _, id := range ids {
go func(id string) {
ch <- getOneArticleFromAPI(id)
}(id)
}
for range ids {
v := <-ch
articles = append(articles, v)
}
close(ch)
if showTiming {
fmt.Printf("Got %v articles in %v. \n", len(ids), time.Since(startTime))
}
return articles
}
// Возвращает id материала и его текст в виде [id, text] из API
func getOneArticleFromAPI(id string) []string {
client := http.Client{
Timeout: time.Duration(requestTimeout) * time.Second,
}
req, err := http.NewRequest("GET", fmt.Sprintf(urlArticle, id), nil)
if err != nil {
fmt.Println(err)
}
req.Close = true
req.Header.Set("Connection", "close")
resp, err := client.Do(req)
// resp, err := http.Get(fmt.Sprintf(urlArticle, id))
if err != nil {
fmt.Println(err)
return []string{id, ""}
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println(err)
return []string{id, ""}
}
s := string(body)
return []string{id, s}
}
// Преобразует массив текстов в массив записей.
// Запись это отображение: имя_поля -> значение_поля
func textsToArticleRecords(texts [][]string) []map[string]interface{} {
records := make([]map[string]interface{}, 0)
for _, o := range texts {
id := o[0]
text := o[1]
// record := map[string]string{"obj_id": id}
var objmap map[string]interface{} //json.RawMessage
err := json.Unmarshal([]byte(text), &objmap)
if err != nil {
fmt.Println(err)
objmap = make(map[string]interface{})
objmap["obj_id"] = id
objmap["migration_status"] = "error"
} else {
objmap["migration_status"] = "success"
}
records = append(records, objmap)
}
return records
}
// Сохраняет массив записей в базу данных.
// Запись представляет собой map[string]interface{}.
func saveArticlesToDatabase(records []map[string]interface{}, showTiming bool) {
startTime := time.Now()
paramsArray := make([][]interface{}, 0)
for _, record := range records {
params := make([]interface{}, 0)
params = append | апросов к API.
// - showTiming - Показывать времена исполнения
func fillArticlesWithTexts(n, batchSize int, showTiming bool) {
// время отдыха между порциями запросов
var sleepTime = 50 * time.Millisecond
// Счетчик сделанных запросов
counter := 0
//Время начала процесса
startTime := time.Now()
//Берем первую порцию идентификаторов из таблицы articles
ids := getArticleIds(batchSize, showTiming)
// Пока в порции в порции есть идентификаторы
for len(ids) > 0 {
//Запрашиваем | identifier_body |
save_articles.go | Показывать времена исполнения")
flag.Parse()
// flag.Usage()
if batchSize == 0 {
os.Exit(0)
}
return
}
// Порождает таблицу articles в базе данных
func createArticlesTable() {
sqlCreateArticles := `
CREATE TABLE IF NOT EXISTS articles (
obj_id text PRIMARY KEY,
announce text NULL,
authors text NULL,
date_modified text NULL,
"full-text" text NULL,
images text NULL,
index_priority text NULL,
is_active text NULL,
is_announce text NULL,
is_paid text NULL,
link_title text NULL,
links text NULL,
obj_kind text NULL,
projects text NULL,
release_date text NULL,
spiegel text NULL,
title text NULL,
uannounce text NULL,
url text NULL,
migration_status text NULL, -- DEFAULT ''::text,
process_status text NULL,
elastic_status text NULL,
lemmatized_text text NULL,
entities_text text NULL,
entities_grouped text NULL
);
CREATE INDEX IF NOT EXISTS articles_migration_status__idx ON articles (migration_status);
CREATE INDEX IF NOT EXISTS articles_process_status__idx ON articles (process_status);
CREATE INDEX IF NOT EXISTS articles_elastic_status__idx ON articles (elastic_status);
`
mustExec(sqlCreateArticles)
fmt.Println("Таблица articles создана. Вставка новых записей ...")
}
// Заполняет таблицу articles идентификаторами статей полученными
// из таблицы связей rubrics_objects
func fillArticlesWithIds() {
startTime := time.Now()
sqlFillArticlesWithIds := `
INSERT INTO articles(obj_id)
SELECT DISTINCT rubrics_objects.object_id
FROM rubrics_objects LEFT JOIN articles ON rubrics_objects.object_id = articles.obj_id
WHERE
articles.obj_id IS NULL
AND rubrics_objects.kind = 'article'
ON CONFLICT (obj_id) DO NOTHING
;
`
mustExec(sqlFillArticlesWithIds)
fmt.Printf("Новые записи вставлены в таблицу articles за %v \n", time.Since(startTime))
}
// Заполняет таблицу articles текстами из API.
// - n - полное количество новых записей. For info only.
// - batchSize - Количество одновременных запросов к API.
// - showTiming - Показывать времена исполнения
func fillArticlesWithTexts(n, batchSize int, showTiming bool) {
// время отдыха между порциями запросов
var sleepTime = 50 * time.Millisecond
// Счетчик сделанных запросов
counter := 0
//Время начала процесса
startTime := time.Now()
//Берем первую порцию идентификаторов из таблицы articles
ids := getArticleIds(batchSize, showTiming)
// Пока в порции в порции есть идентификаторы
for len(ids) > 0 {
//Запрашиваем тексты статей
articleTexts := getAPITextsParallel(ids, showTiming)
// преобразовываем тексты в записи - массивы полей материала
articleRecords := textsToArticleRecords(articleTexts)
// Сохраняем записи в базу данных
saveArticlesToDatabase(articleRecords, showTiming)
// Выводим сообщение
counter += len(ids)
duration := time.Since(startTime)
durationHours := float64(duration) / float64(time.Hour)
articlesPerHour := float64(counter) / durationHours
fmt.Printf("Таблица articles. Загружено %8d/%d статей за %14v. Средняя скорость %.0f статей/час. \n", counter, n, duration, articlesPerHour)
// отдыхаем
time.Sleep(sleepTime)
// Берем следующую порцию идентификаторов
ids = getArticleIds(batchSize, showTiming)
}
}
// Получает количество новых записей в таблице articles,
// где поле migration_status имеет значение NULL.
func getNewRecordsNumber() int {
db, err := sqlx.Open("postgres", DSN)
checkErr(err)
ids := make([]int, 0)
err = db.Select(&ids, "SELECT count(obj_id) FROM articles WHERE migration_status IS NULL")
checkErr(err)
err = db.Close()
checkErr(err)
return ids[0]
}
// Получает массив идентификаторов (размером не более limit) статей из базы данных,
// в которых поле migration_status имеет значение NULL.
func getArticleIds(limit int, showTiming bool) []string {
startTime := time.Now()
// db, err := sql.Open("sqlite3", dbFileName)
db, err := sqlx.Open("postgres", DSN)
checkErr(err)
ids := make([]string, 0)
err = db.Select(&ids, fmt.Sprintf("SELECT obj_id FROM articles WHERE migration_status IS NULL LIMIT %d", limit))
checkErr(err)
// закомментированный код работает тоже в том числе для sqllite3
// rows, err := db.Query(fmt.Sprintf("SELECT obj_id FROM articles WHERE migration_status = '%s' LIMIT %d", status, limit))
// checkErr(err)
// var id string
// for rows.Next() {
// err = rows.Scan(&id)
// checkErr(err)
// ids = append(ids, id)
// }
// rows.Close() //good habit to close
err = db.Close()
checkErr(err)
if showTiming {
fmt.Printf("Got %v ids in %v. \n", len(ids), time.Since(startTime))
}
return ids
}
// Делает последовательные запросы к API возвращая массив пар:
// [ [id, text], [id,text],...]
func getAPITexts(ids []string) [][]string {
// startTime := time.Now()
articles := make([][]string, 0)
for _, id := range ids {
articles = append(articles, getOneArticleFromAPI(id))
}
// duration := time.Since(startTime)
// fmt.Printf("Got %v articles in %v. \n", len(ids), duration)
return articles
}
// Дел | ные запросы к API возвращая массив пар:
// [ [id, text], [id,text],...]
func getAPITextsParallel(ids []string, showTiming bool) [][]string {
startTime := time.Now()
articles := make([][]string, 0)
ch := make(chan []string)
for _, id := range ids {
go func(id string) {
ch <- getOneArticleFromAPI(id)
}(id)
}
for range ids {
v := <-ch
articles = append(articles, v)
}
close(ch)
if showTiming {
fmt.Printf("Got %v articles in %v. \n", len(ids), time.Since(startTime))
}
return articles
}
// Возвращает id материала и его текст в виде [id, text] из API
func getOneArticleFromAPI(id string) []string {
client := http.Client{
Timeout: time.Duration(requestTimeout) * time.Second,
}
req, err := http.NewRequest("GET", fmt.Sprintf(urlArticle, id), nil)
if err != nil {
fmt.Println(err)
}
req.Close = true
req.Header.Set("Connection", "close")
resp, err := client.Do(req)
// resp, err := http.Get(fmt.Sprintf(urlArticle, id))
if err != nil {
fmt.Println(err)
return []string{id, ""}
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println(err)
return []string{id, ""}
}
s := string(body)
return []string{id, s}
}
// Преобразует массив текстов в массив записей.
// Запись это отображение: имя_поля -> значение_поля
func textsToArticleRecords(texts [][]string) []map[string]interface{} {
records := make([]map[string]interface{}, 0)
for _, o := range texts {
id := o[0]
text := o[1]
// record := map[string]string{"obj_id": id}
var objmap map[string]interface{} //json.RawMessage
err := json.Unmarshal([]byte(text), &objmap)
if err != nil {
fmt.Println(err)
objmap = make(map[string]interface{})
objmap["obj_id"] = id
objmap["migration_status"] = "error"
} else {
objmap["migration_status"] = "success"
}
records = append(records, objmap)
}
return records
}
// Сохраняет массив записей в базу данных.
// Запись представляет собой map[string]interface{}.
func saveArticlesToDatabase(records []map[string]interface{}, showTiming bool) {
startTime := time.Now()
paramsArray := make([][]interface{}, 0)
for _, record := range records {
params := make([]interface{}, 0)
params = append(params, | ает параллель | identifier_name |
save_articles.go | Показывать времена исполнения")
flag.Parse()
// flag.Usage()
if batchSize == 0 {
os.Exit(0)
}
return
}
// Порождает таблицу articles в базе данных
func createArticlesTable() {
sqlCreateArticles := `
CREATE TABLE IF NOT EXISTS articles (
obj_id text PRIMARY KEY,
announce text NULL,
authors text NULL,
date_modified text NULL,
"full-text" text NULL,
images text NULL,
index_priority text NULL,
is_active text NULL,
is_announce text NULL,
is_paid text NULL,
link_title text NULL,
links text NULL,
obj_kind text NULL,
projects text NULL,
release_date text NULL,
spiegel text NULL,
title text NULL,
uannounce text NULL,
url text NULL,
migration_status text NULL, -- DEFAULT ''::text,
process_status text NULL,
elastic_status text NULL,
lemmatized_text text NULL,
entities_text text NULL,
entities_grouped text NULL
);
CREATE INDEX IF NOT EXISTS articles_migration_status__idx ON articles (migration_status);
CREATE INDEX IF NOT EXISTS articles_process_status__idx ON articles (process_status);
CREATE INDEX IF NOT EXISTS articles_elastic_status__idx ON articles (elastic_status);
`
mustExec(sqlCreateArticles)
fmt.Println("Таблица articles создана. Вставка новых записей ...")
}
// Заполняет таблицу articles идентификаторами статей полученными
// из таблицы связей rubrics_objects
func fillArticlesWithIds() {
startTime := time.Now()
sqlFillArticlesWithIds := `
INSERT INTO articles(obj_id)
SELECT DISTINCT rubrics_objects.object_id
FROM rubrics_objects LEFT JOIN articles ON rubrics_objects.object_id = articles.obj_id
WHERE
articles.obj_id IS NULL
AND rubrics_objects.kind = 'article'
ON CONFLICT (obj_id) DO NOTHING
;
`
mustExec(sqlFillArticlesWithIds)
fmt.Printf("Новые записи вставлены в таблицу articles за %v \n", time.Since(startTime))
}
// Заполняет таблицу articles текстами из API.
// - n - полное количество новых записей. For info only.
// - batchSize - Количество одновременных запросов к API.
// - showTiming - Показывать времена исполнения
func fillArticlesWithTexts(n, batchSize int, showTiming bool) {
// время отдыха между порциями запросов
var sleepTime = 50 * time.Millisecond
// Счетчик сделанных запросов
counter := 0
//Время начала процесса
startTime := time.Now()
//Берем первую порцию идентификаторов из таблицы articles
ids := getArticleIds(batchSize, showTiming)
// Пока в порции в порции есть идентификаторы
for len(ids) > 0 {
//Запрашиваем тексты статей
articleTexts := getAPITextsParallel(ids, showTiming)
// преобразовываем тексты в записи - массивы полей материала
articleRecords := textsToArticleRecords(articleTexts)
// Сохраняем записи в базу данных
saveArticlesToDatabase(articleRecords, showTiming)
// Выводим сообщение
counter += len(ids)
duration := time.Since(startTime)
durationHours := float64(duration) / float64(time.Hour)
articlesPerHour := float64(counter) / durationHours
fmt.Printf("Таблица articles. Загружено %8d/%d статей за %14v. Средняя скорость %.0f статей/час. \n", counter, n, duration, articlesPerHour)
|
}
// Получает количество новых записей в таблице articles,
// где поле migration_status имеет значение NULL.
func getNewRecordsNumber() int {
db, err := sqlx.Open("postgres", DSN)
checkErr(err)
ids := make([]int, 0)
err = db.Select(&ids, "SELECT count(obj_id) FROM articles WHERE migration_status IS NULL")
checkErr(err)
err = db.Close()
checkErr(err)
return ids[0]
}
// Получает массив идентификаторов (размером не более limit) статей из базы данных,
// в которых поле migration_status имеет значение NULL.
func getArticleIds(limit int, showTiming bool) []string {
startTime := time.Now()
// db, err := sql.Open("sqlite3", dbFileName)
db, err := sqlx.Open("postgres", DSN)
checkErr(err)
ids := make([]string, 0)
err = db.Select(&ids, fmt.Sprintf("SELECT obj_id FROM articles WHERE migration_status IS NULL LIMIT %d", limit))
checkErr(err)
// закомментированный код работает тоже в том числе для sqllite3
// rows, err := db.Query(fmt.Sprintf("SELECT obj_id FROM articles WHERE migration_status = '%s' LIMIT %d", status, limit))
// checkErr(err)
// var id string
// for rows.Next() {
// err = rows.Scan(&id)
// checkErr(err)
// ids = append(ids, id)
// }
// rows.Close() //good habit to close
err = db.Close()
checkErr(err)
if showTiming {
fmt.Printf("Got %v ids in %v. \n", len(ids), time.Since(startTime))
}
return ids
}
// Делает последовательные запросы к API возвращая массив пар:
// [ [id, text], [id,text],...]
func getAPITexts(ids []string) [][]string {
// startTime := time.Now()
articles := make([][]string, 0)
for _, id := range ids {
articles = append(articles, getOneArticleFromAPI(id))
}
// duration := time.Since(startTime)
// fmt.Printf("Got %v articles in %v. \n", len(ids), duration)
return articles
}
// Делает параллельные запросы к API возвращая массив пар:
// [ [id, text], [id,text],...]
func getAPITextsParallel(ids []string, showTiming bool) [][]string {
startTime := time.Now()
articles := make([][]string, 0)
ch := make(chan []string)
for _, id := range ids {
go func(id string) {
ch <- getOneArticleFromAPI(id)
}(id)
}
for range ids {
v := <-ch
articles = append(articles, v)
}
close(ch)
if showTiming {
fmt.Printf("Got %v articles in %v. \n", len(ids), time.Since(startTime))
}
return articles
}
// Возвращает id материала и его текст в виде [id, text] из API
func getOneArticleFromAPI(id string) []string {
client := http.Client{
Timeout: time.Duration(requestTimeout) * time.Second,
}
req, err := http.NewRequest("GET", fmt.Sprintf(urlArticle, id), nil)
if err != nil {
fmt.Println(err)
}
req.Close = true
req.Header.Set("Connection", "close")
resp, err := client.Do(req)
// resp, err := http.Get(fmt.Sprintf(urlArticle, id))
if err != nil {
fmt.Println(err)
return []string{id, ""}
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println(err)
return []string{id, ""}
}
s := string(body)
return []string{id, s}
}
// Преобразует массив текстов в массив записей.
// Запись это отображение: имя_поля -> значение_поля
func textsToArticleRecords(texts [][]string) []map[string]interface{} {
records := make([]map[string]interface{}, 0)
for _, o := range texts {
id := o[0]
text := o[1]
// record := map[string]string{"obj_id": id}
var objmap map[string]interface{} //json.RawMessage
err := json.Unmarshal([]byte(text), &objmap)
if err != nil {
fmt.Println(err)
objmap = make(map[string]interface{})
objmap["obj_id"] = id
objmap["migration_status"] = "error"
} else {
objmap["migration_status"] = "success"
}
records = append(records, objmap)
}
return records
}
// Сохраняет массив записей в базу данных.
// Запись представляет собой map[string]interface{}.
func saveArticlesToDatabase(records []map[string]interface{}, showTiming bool) {
startTime := time.Now()
paramsArray := make([][]interface{}, 0)
for _, record := range records {
params := make([]interface{}, 0)
params = append(params | // отдыхаем
time.Sleep(sleepTime)
// Берем следующую порцию идентификаторов
ids = getArticleIds(batchSize, showTiming)
} | random_line_split |
segment_all_data.py | 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]
def filter_bad_values(events):
events2 = [item for item in events if item[key_value] > 0]
return events2
'''
take list of events, find gaps of time in events, and use those gaps
to split the events into segments of events where there is activity
'''
def segment(dict_of_lists):
segments = []
for key in dict_of_lists:
pilllists = dict_of_lists[key]['pill']
senselist = dict_of_lists[key]['sense']
timelist = pilllists[0]
valuelist = pilllists[1]
sensetimes = senselist[0]
temperatures = senselist[1]
humidities = senselist[2]
lights = senselist[3]
t1_list = []
t2_list = []
if len(timelist) == 0:
continue
seg_t1 = timelist[0]
t1 = seg_t1
last_t2 = t1
is_one_segment_found = False
for t in timelist:
t2 = t
dt = float(t2-t1)*k_conversion_factor
if dt > k_segment_split_duaration:
seg_t2 = last_t1
t1_list.append(seg_t1)
t2_list.append(seg_t2)
seg_t1 = t2
is_one_segment_found = True
last_t1 = t1
t1 = t2
if not is_one_segment_found:
seg_t2 = last_t1
dt = seg_t2 - seg_t1
t1_list.append(seg_t1)
t2_list.append(seg_t2)
seg_t1 = t2
for i in range(len(t1_list)):
segment_dict = {}
t1 = t1_list[i]
t2 = t2_list[i]
i1 = bisect(timelist, t1)
i2 = bisect(timelist, t2) + 1
segment_dict['pill'] = (timelist[i1:i2], valuelist[i1:i2])
j1 = bisect(sensetimes, t1)
j2 = bisect(sensetimes, t2) + 1
segment_dict['sense'] = (sensetimes[j1:j2],temperatures[j1:j2],humidities[j1:j2],lights[j1:j2])
segment_dict[key_id] = key
segments.append(segment_dict)
return segments
'''
build series of observation data at fixed intervals for training HMMs
'''
def compute_log_variance(x, logbase = 2.0, offset=1.0):
return numpy.log(numpy.var(x) + offset) / numpy.log(logbase)
def compute_log_range(x, logbase = 2.0, maxval=10.):
imin = numpy.argmin(x)
imax = numpy.argmax(x)
min = x[imin]
max = x[imax]
#if the max happened later than the min, then this was an increase
#we only are looking at lights out
if imax > imin:
range = 0
else:
range = max - min
fracchange = range / (min + 20)
fracchange = fracchange - 0.25
if fracchange < 0:
fracchange = 0
val = numpy.ceil(numpy.log(fracchange + 1) / numpy.log(logbase))
if val > maxval:
val = maxval
return val
def summarize(segments, interval_in_minutes):
if segments is None or len(segments) == 0:
return None
summary = []
for segment in segments:
times = segment['pill'][0]
values = segment['pill'][1]
id = segment[key_id]
sensetimes = segment['sense'][0]
humidities = segment['sense'][1]
temperatures = segment['sense'][2]
lights = segment['sense'][3]
if times is None or len(times) == 0:
continue
t0 = times[0]
tf = times[-1]
#get time in minutes from first
times = [(t - t0) * k_conversion_factor for t in times ]
sensetimes = [(t - t0) * k_conversion_factor for t in sensetimes]
#get index of each time point
indices = [int(t / interval_in_minutes) for t in times]
indices2 = [int(t / interval_in_minutes) for t in sensetimes]
if len(indices2) == 0 or len(indices) == 0:
continue
if indices2[-1] > indices[-1]:
maxidx = indices2[-1]
else:
maxidx = indices[-1]
mycounts = []
myenergies = []
mylight = []
mytimeofday = []
#create counts and energies arrays
for i in xrange(maxidx+1):
mycounts.append(0)
myenergies.append(0)
mylight.append(0)
mytimeofday.append(0)
#SUMMARIZE PILL DATA
for i in xrange(len(indices)):
idx = indices[i]
mycounts[idx] = mycounts[idx] + 1
myenergies[idx] = myenergies[idx] + values[i]
for i in range(len(myenergies)):
#transform energy output to to a quantized log value
logval = int(numpy.ceil(numpy.log10(myenergies[i] + 1.0) ))
myenergies[i] = logval
for i in range(len(mycounts)):
logval = int(numpy.ceil(numpy.log(mycounts[i] + 1.0)/numpy.log(2.0) ))
mycounts[i] = logval
for i in range(len(mytimeofday)):
tt = t0 + interval_in_minutes*i*60
mytimeofday[i] = k_hour_mode_lookup[datetime.datetime.fromtimestamp(tt).hour]
#SUMMARIZE SENSE DATA
for idx in xrange(maxidx+1):
indices = [i for i in xrange(len(indices2)) if indices2[i] == idx]
lightvals = numpy.array(map(lights.__getitem__, indices))
if len(lightvals) == 0:
lightvals = numpy.array([0])
y = int(compute_log_range(lightvals, 3, 1.))
mylight[idx] = y
|
'''
remove segments that are too long or too short
those that are in the acceptable range, pad with zeros to fill out
the max length
'''
def enforce_summary_limits(summary, min_length, max_length):
summary2 = []
if summary is None:
print 'got a nonexistant summary. wat?'
return None
for item in summary:
counts = item[key_counts]
#reject
if len(counts) < min_length:
#print "rejecting %d length item, which was less than %d counts" % (len(counts), min_length)
continue
#reject
if len(counts) > max_length:
#print "rejecting %d length item, which was greater than %d counts" % (len(counts), max_length)
continue
summary2.append(deepcopy(item))
return summary2
def prepend_zeros(summary, numzeros, numzeros2):
for item in summary:
for key in item:
if key in k_sensor_keys:
thisvector = item[key]
item[key] = numpy.concatenate((numpy.zeros((numzeros, )), thisvector, numpy.zeros((numzeros2, ))))
def vectorize_measurements(summary):
meas = []
info = []
for item in summary:
e = item[key_energies]
c = item[key_counts]
l = item[key_lightvar]
id = item[key_id]
interval = item[key_interval]
label = None
if item.has_key(key_label):
label = item[key_label]
if len(e) != len(c):
print ("somehow, energies and counts are not the same length.")
continue
arr = numpy.array([e, c, l])
meas.append(arr)
info.append((id,interval,label))
return meas, info
def get_labels(summary, dict_of_lists):
for id in dict_of_lists:
if not dict_of_lists[id].has_key(key_survey):
continue
survey = dict_of_lists[id][key_survey]
#assume it's all sorted
matching_summaries = [s for s in summary if s[key_id] == id]
if len(matching_sum |
summary.append({key_counts : mycounts, key_energies : myenergies, key_lightvar : mylight, key_id : id, key_interval : (t0, tf)})
return summary | random_line_split |
segment_all_data.py | 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]
def filter_bad_values(events):
events2 = [item for item in events if item[key_value] > 0]
return events2
'''
take list of events, find gaps of time in events, and use those gaps
to split the events into segments of events where there is activity
'''
def segment(dict_of_lists):
segments = []
for key in dict_of_lists:
pilllists = dict_of_lists[key]['pill']
senselist = dict_of_lists[key]['sense']
timelist = pilllists[0]
valuelist = pilllists[1]
sensetimes = senselist[0]
temperatures = senselist[1]
humidities = senselist[2]
lights = senselist[3]
t1_list = []
t2_list = []
if len(timelist) == 0:
continue
seg_t1 = timelist[0]
t1 = seg_t1
last_t2 = t1
is_one_segment_found = False
for t in timelist:
t2 = t
dt = float(t2-t1)*k_conversion_factor
if dt > k_segment_split_duaration:
seg_t2 = last_t1
t1_list.append(seg_t1)
t2_list.append(seg_t2)
seg_t1 = t2
is_one_segment_found = True
last_t1 = t1
t1 = t2
if not is_one_segment_found:
seg_t2 = last_t1
dt = seg_t2 - seg_t1
t1_list.append(seg_t1)
t2_list.append(seg_t2)
seg_t1 = t2
for i in range(len(t1_list)):
segment_dict = {}
t1 = t1_list[i]
t2 = t2_list[i]
i1 = bisect(timelist, t1)
i2 = bisect(timelist, t2) + 1
segment_dict['pill'] = (timelist[i1:i2], valuelist[i1:i2])
j1 = bisect(sensetimes, t1)
j2 = bisect(sensetimes, t2) + 1
segment_dict['sense'] = (sensetimes[j1:j2],temperatures[j1:j2],humidities[j1:j2],lights[j1:j2])
segment_dict[key_id] = key
segments.append(segment_dict)
return segments
'''
build series of observation data at fixed intervals for training HMMs
'''
def compute_log_variance(x, logbase = 2.0, offset=1.0):
return numpy.log(numpy.var(x) + offset) / numpy.log(logbase)
def compute_log_range(x, logbase = 2.0, maxval=10.):
imin = numpy.argmin(x)
imax = numpy.argmax(x)
min = x[imin]
max = x[imax]
#if the max happened later than the min, then this was an increase
#we only are looking at lights out
if imax > imin:
range = 0
else:
range = max - min
fracchange = range / (min + 20)
fracchange = fracchange - 0.25
if fracchange < 0:
fracchange = 0
val = numpy.ceil(numpy.log(fracchange + 1) / numpy.log(logbase))
if val > maxval:
val = maxval
return val
def summarize(segments, interval_in_minutes):
if segments is None or len(segments) == 0:
return None
summary = []
for segment in segments:
times = segment['pill'][0]
values = segment['pill'][1]
id = segment[key_id]
sensetimes = segment['sense'][0]
humidities = segment['sense'][1]
temperatures = segment['sense'][2]
lights = segment['sense'][3]
if times is None or len(times) == 0:
continue
t0 = times[0]
tf = times[-1]
#get time in minutes from first
times = [(t - t0) * k_conversion_factor for t in times ]
sensetimes = [(t - t0) * k_conversion_factor for t in sensetimes]
#get index of each time point
indices = [int(t / interval_in_minutes) for t in times]
indices2 = [int(t / interval_in_minutes) for t in sensetimes]
if len(indices2) == 0 or len(indices) == 0:
continue
if indices2[-1] > indices[-1]:
maxidx = indices2[-1]
else:
maxidx = indices[-1]
mycounts = []
myenergies = []
mylight = []
mytimeofday = []
#create counts and energies arrays
for i in xrange(maxidx+1):
mycounts.append(0)
myenergies.append(0)
mylight.append(0)
mytimeofday.append(0)
#SUMMARIZE PILL DATA
for i in xrange(len(indices)):
idx = indices[i]
mycounts[idx] = mycounts[idx] + 1
myenergies[idx] = myenergies[idx] + values[i]
for i in range(len(myenergies)):
#transform energy output to to a quantized log value
logval = int(numpy.ceil(numpy.log10(myenergies[i] + 1.0) ))
myenergies[i] = logval
for i in range(len(mycounts)):
logval = int(numpy.ceil(numpy.log(mycounts[i] + 1.0)/numpy.log(2.0) ))
mycounts[i] = logval
for i in range(len(mytimeofday)):
tt = t0 + interval_in_minutes*i*60
mytimeofday[i] = k_hour_mode_lookup[datetime.datetime.fromtimestamp(tt).hour]
#SUMMARIZE SENSE DATA
for idx in xrange(maxidx+1):
indices = [i for i in xrange(len(indices2)) if indices2[i] == idx]
lightvals = numpy.array(map(lights.__getitem__, indices))
if len(lightvals) == 0:
lightvals = numpy.array([0])
y = int(compute_log_range(lightvals, 3, 1.))
mylight[idx] = y
summary.append({key_counts : mycounts, key_energies : myenergies, key_lightvar : mylight, key_id : id, key_interval : (t0, tf)})
return summary
'''
remove segments that are too long or too short
those that are in the acceptable range, pad with zeros to fill out
the max length
'''
def enforce_summary_limits(summary, min_length, max_length):
summary2 = []
if summary is None:
print 'got a nonexistant summary. wat?'
return None
for item in summary:
counts = item[key_counts]
#reject
if len(counts) < min_length:
#print "rejecting %d length item, which was less than %d counts" % (len(counts), min_length)
continue
#reject
if len(counts) > max_length:
#print "rejecting %d length item, which was greater than %d counts" % (len(counts), max_length)
continue
summary2.append(deepcopy(item))
return summary2
def prepend_zeros(summary, numzeros, numzeros2):
|
def vectorize_measurements(summary):
meas = []
info = []
for item in summary:
e = item[key_energies]
c = item[key_counts]
l = item[key_lightvar]
id = item[key_id]
interval = item[key_interval]
label = None
if item.has_key(key_label):
label = item[key_label]
if len(e) != len(c):
print ("somehow, energies and counts are not the same length.")
continue
arr = numpy.array([e, c, l])
meas.append(arr)
info.append((id,interval,label))
return meas, info
def get_labels(summary, dict_of_lists):
for id in dict_of_lists:
if not dict_of_lists[id].has_key(key_survey):
continue
survey = dict_of_lists[id][key_survey]
#assume it's all sorted
matching_summaries = [s for s in summary if s[key_id] == id]
if len(matching_sum | for item in summary:
for key in item:
if key in k_sensor_keys:
thisvector = item[key]
item[key] = numpy.concatenate((numpy.zeros((numzeros, )), thisvector, numpy.zeros((numzeros2, )))) | identifier_body |
segment_all_data.py | 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]
def filter_bad_values(events):
events2 = [item for item in events if item[key_value] > 0]
return events2
'''
take list of events, find gaps of time in events, and use those gaps
to split the events into segments of events where there is activity
'''
def segment(dict_of_lists):
segments = []
for key in dict_of_lists:
pilllists = dict_of_lists[key]['pill']
senselist = dict_of_lists[key]['sense']
timelist = pilllists[0]
valuelist = pilllists[1]
sensetimes = senselist[0]
temperatures = senselist[1]
humidities = senselist[2]
lights = senselist[3]
t1_list = []
t2_list = []
if len(timelist) == 0:
continue
seg_t1 = timelist[0]
t1 = seg_t1
last_t2 = t1
is_one_segment_found = False
for t in timelist:
t2 = t
dt = float(t2-t1)*k_conversion_factor
if dt > k_segment_split_duaration:
seg_t2 = last_t1
t1_list.append(seg_t1)
t2_list.append(seg_t2)
seg_t1 = t2
is_one_segment_found = True
last_t1 = t1
t1 = t2
if not is_one_segment_found:
seg_t2 = last_t1
dt = seg_t2 - seg_t1
t1_list.append(seg_t1)
t2_list.append(seg_t2)
seg_t1 = t2
for i in range(len(t1_list)):
segment_dict = {}
t1 = t1_list[i]
t2 = t2_list[i]
i1 = bisect(timelist, t1)
i2 = bisect(timelist, t2) + 1
segment_dict['pill'] = (timelist[i1:i2], valuelist[i1:i2])
j1 = bisect(sensetimes, t1)
j2 = bisect(sensetimes, t2) + 1
segment_dict['sense'] = (sensetimes[j1:j2],temperatures[j1:j2],humidities[j1:j2],lights[j1:j2])
segment_dict[key_id] = key
segments.append(segment_dict)
return segments
'''
build series of observation data at fixed intervals for training HMMs
'''
def | (x, logbase = 2.0, offset=1.0):
return numpy.log(numpy.var(x) + offset) / numpy.log(logbase)
def compute_log_range(x, logbase = 2.0, maxval=10.):
imin = numpy.argmin(x)
imax = numpy.argmax(x)
min = x[imin]
max = x[imax]
#if the max happened later than the min, then this was an increase
#we only are looking at lights out
if imax > imin:
range = 0
else:
range = max - min
fracchange = range / (min + 20)
fracchange = fracchange - 0.25
if fracchange < 0:
fracchange = 0
val = numpy.ceil(numpy.log(fracchange + 1) / numpy.log(logbase))
if val > maxval:
val = maxval
return val
def summarize(segments, interval_in_minutes):
if segments is None or len(segments) == 0:
return None
summary = []
for segment in segments:
times = segment['pill'][0]
values = segment['pill'][1]
id = segment[key_id]
sensetimes = segment['sense'][0]
humidities = segment['sense'][1]
temperatures = segment['sense'][2]
lights = segment['sense'][3]
if times is None or len(times) == 0:
continue
t0 = times[0]
tf = times[-1]
#get time in minutes from first
times = [(t - t0) * k_conversion_factor for t in times ]
sensetimes = [(t - t0) * k_conversion_factor for t in sensetimes]
#get index of each time point
indices = [int(t / interval_in_minutes) for t in times]
indices2 = [int(t / interval_in_minutes) for t in sensetimes]
if len(indices2) == 0 or len(indices) == 0:
continue
if indices2[-1] > indices[-1]:
maxidx = indices2[-1]
else:
maxidx = indices[-1]
mycounts = []
myenergies = []
mylight = []
mytimeofday = []
#create counts and energies arrays
for i in xrange(maxidx+1):
mycounts.append(0)
myenergies.append(0)
mylight.append(0)
mytimeofday.append(0)
#SUMMARIZE PILL DATA
for i in xrange(len(indices)):
idx = indices[i]
mycounts[idx] = mycounts[idx] + 1
myenergies[idx] = myenergies[idx] + values[i]
for i in range(len(myenergies)):
#transform energy output to to a quantized log value
logval = int(numpy.ceil(numpy.log10(myenergies[i] + 1.0) ))
myenergies[i] = logval
for i in range(len(mycounts)):
logval = int(numpy.ceil(numpy.log(mycounts[i] + 1.0)/numpy.log(2.0) ))
mycounts[i] = logval
for i in range(len(mytimeofday)):
tt = t0 + interval_in_minutes*i*60
mytimeofday[i] = k_hour_mode_lookup[datetime.datetime.fromtimestamp(tt).hour]
#SUMMARIZE SENSE DATA
for idx in xrange(maxidx+1):
indices = [i for i in xrange(len(indices2)) if indices2[i] == idx]
lightvals = numpy.array(map(lights.__getitem__, indices))
if len(lightvals) == 0:
lightvals = numpy.array([0])
y = int(compute_log_range(lightvals, 3, 1.))
mylight[idx] = y
summary.append({key_counts : mycounts, key_energies : myenergies, key_lightvar : mylight, key_id : id, key_interval : (t0, tf)})
return summary
'''
remove segments that are too long or too short
those that are in the acceptable range, pad with zeros to fill out
the max length
'''
def enforce_summary_limits(summary, min_length, max_length):
summary2 = []
if summary is None:
print 'got a nonexistant summary. wat?'
return None
for item in summary:
counts = item[key_counts]
#reject
if len(counts) < min_length:
#print "rejecting %d length item, which was less than %d counts" % (len(counts), min_length)
continue
#reject
if len(counts) > max_length:
#print "rejecting %d length item, which was greater than %d counts" % (len(counts), max_length)
continue
summary2.append(deepcopy(item))
return summary2
def prepend_zeros(summary, numzeros, numzeros2):
for item in summary:
for key in item:
if key in k_sensor_keys:
thisvector = item[key]
item[key] = numpy.concatenate((numpy.zeros((numzeros, )), thisvector, numpy.zeros((numzeros2, ))))
def vectorize_measurements(summary):
meas = []
info = []
for item in summary:
e = item[key_energies]
c = item[key_counts]
l = item[key_lightvar]
id = item[key_id]
interval = item[key_interval]
label = None
if item.has_key(key_label):
label = item[key_label]
if len(e) != len(c):
print ("somehow, energies and counts are not the same length.")
continue
arr = numpy.array([e, c, l])
meas.append(arr)
info.append((id,interval,label))
return meas, info
def get_labels(summary, dict_of_lists):
for id in dict_of_lists:
if not dict_of_lists[id].has_key(key_survey):
continue
survey = dict_of_lists[id][key_survey]
#assume it's all sorted
matching_summaries = [s for s in summary if s[key_id] == id]
if len(matching_sum | compute_log_variance | identifier_name |
segment_all_data.py | 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]
def filter_bad_values(events):
events2 = [item for item in events if item[key_value] > 0]
return events2
'''
take list of events, find gaps of time in events, and use those gaps
to split the events into segments of events where there is activity
'''
def segment(dict_of_lists):
segments = []
for key in dict_of_lists:
| last_t2 = t1
is_one_segment_found = False
for t in timelist:
t2 = t
dt = float(t2-t1)*k_conversion_factor
if dt > k_segment_split_duaration:
seg_t2 = last_t1
t1_list.append(seg_t1)
t2_list.append(seg_t2)
seg_t1 = t2
is_one_segment_found = True
last_t1 = t1
t1 = t2
if not is_one_segment_found:
seg_t2 = last_t1
dt = seg_t2 - seg_t1
t1_list.append(seg_t1)
t2_list.append(seg_t2)
seg_t1 = t2
for i in range(len(t1_list)):
segment_dict = {}
t1 = t1_list[i]
t2 = t2_list[i]
i1 = bisect(timelist, t1)
i2 = bisect(timelist, t2) + 1
segment_dict['pill'] = (timelist[i1:i2], valuelist[i1:i2])
j1 = bisect(sensetimes, t1)
j2 = bisect(sensetimes, t2) + 1
segment_dict['sense'] = (sensetimes[j1:j2],temperatures[j1:j2],humidities[j1:j2],lights[j1:j2])
segment_dict[key_id] = key
segments.append(segment_dict)
return segments
'''
build series of observation data at fixed intervals for training HMMs
'''
def compute_log_variance(x, logbase = 2.0, offset=1.0):
return numpy.log(numpy.var(x) + offset) / numpy.log(logbase)
def compute_log_range(x, logbase = 2.0, maxval=10.):
imin = numpy.argmin(x)
imax = numpy.argmax(x)
min = x[imin]
max = x[imax]
#if the max happened later than the min, then this was an increase
#we only are looking at lights out
if imax > imin:
range = 0
else:
range = max - min
fracchange = range / (min + 20)
fracchange = fracchange - 0.25
if fracchange < 0:
fracchange = 0
val = numpy.ceil(numpy.log(fracchange + 1) / numpy.log(logbase))
if val > maxval:
val = maxval
return val
def summarize(segments, interval_in_minutes):
if segments is None or len(segments) == 0:
return None
summary = []
for segment in segments:
times = segment['pill'][0]
values = segment['pill'][1]
id = segment[key_id]
sensetimes = segment['sense'][0]
humidities = segment['sense'][1]
temperatures = segment['sense'][2]
lights = segment['sense'][3]
if times is None or len(times) == 0:
continue
t0 = times[0]
tf = times[-1]
#get time in minutes from first
times = [(t - t0) * k_conversion_factor for t in times ]
sensetimes = [(t - t0) * k_conversion_factor for t in sensetimes]
#get index of each time point
indices = [int(t / interval_in_minutes) for t in times]
indices2 = [int(t / interval_in_minutes) for t in sensetimes]
if len(indices2) == 0 or len(indices) == 0:
continue
if indices2[-1] > indices[-1]:
maxidx = indices2[-1]
else:
maxidx = indices[-1]
mycounts = []
myenergies = []
mylight = []
mytimeofday = []
#create counts and energies arrays
for i in xrange(maxidx+1):
mycounts.append(0)
myenergies.append(0)
mylight.append(0)
mytimeofday.append(0)
#SUMMARIZE PILL DATA
for i in xrange(len(indices)):
idx = indices[i]
mycounts[idx] = mycounts[idx] + 1
myenergies[idx] = myenergies[idx] + values[i]
for i in range(len(myenergies)):
#transform energy output to to a quantized log value
logval = int(numpy.ceil(numpy.log10(myenergies[i] + 1.0) ))
myenergies[i] = logval
for i in range(len(mycounts)):
logval = int(numpy.ceil(numpy.log(mycounts[i] + 1.0)/numpy.log(2.0) ))
mycounts[i] = logval
for i in range(len(mytimeofday)):
tt = t0 + interval_in_minutes*i*60
mytimeofday[i] = k_hour_mode_lookup[datetime.datetime.fromtimestamp(tt).hour]
#SUMMARIZE SENSE DATA
for idx in xrange(maxidx+1):
indices = [i for i in xrange(len(indices2)) if indices2[i] == idx]
lightvals = numpy.array(map(lights.__getitem__, indices))
if len(lightvals) == 0:
lightvals = numpy.array([0])
y = int(compute_log_range(lightvals, 3, 1.))
mylight[idx] = y
summary.append({key_counts : mycounts, key_energies : myenergies, key_lightvar : mylight, key_id : id, key_interval : (t0, tf)})
return summary
'''
remove segments that are too long or too short
those that are in the acceptable range, pad with zeros to fill out
the max length
'''
def enforce_summary_limits(summary, min_length, max_length):
summary2 = []
if summary is None:
print 'got a nonexistant summary. wat?'
return None
for item in summary:
counts = item[key_counts]
#reject
if len(counts) < min_length:
#print "rejecting %d length item, which was less than %d counts" % (len(counts), min_length)
continue
#reject
if len(counts) > max_length:
#print "rejecting %d length item, which was greater than %d counts" % (len(counts), max_length)
continue
summary2.append(deepcopy(item))
return summary2
def prepend_zeros(summary, numzeros, numzeros2):
for item in summary:
for key in item:
if key in k_sensor_keys:
thisvector = item[key]
item[key] = numpy.concatenate((numpy.zeros((numzeros, )), thisvector, numpy.zeros((numzeros2, ))))
def vectorize_measurements(summary):
meas = []
info = []
for item in summary:
e = item[key_energies]
c = item[key_counts]
l = item[key_lightvar]
id = item[key_id]
interval = item[key_interval]
label = None
if item.has_key(key_label):
label = item[key_label]
if len(e) != len(c):
print ("somehow, energies and counts are not the same length.")
continue
arr = numpy.array([e, c, l])
meas.append(arr)
info.append((id,interval,label))
return meas, info
def get_labels(summary, dict_of_lists):
for id in dict_of_lists:
if not dict_of_lists[id].has_key(key_survey):
continue
survey = dict_of_lists[id][key_survey]
#assume it's all sorted
matching_summaries = [s for s in summary if s[key_id] == id]
if len(matching_summaries | pilllists = dict_of_lists[key]['pill']
senselist = dict_of_lists[key]['sense']
timelist = pilllists[0]
valuelist = pilllists[1]
sensetimes = senselist[0]
temperatures = senselist[1]
humidities = senselist[2]
lights = senselist[3]
t1_list = []
t2_list = []
if len(timelist) == 0:
continue
seg_t1 = timelist[0]
t1 = seg_t1 | conditional_block |
listparser.py | а итогов голосования по мажоритарной системе выборов(Протокол №1)',
u'Сводная таблица результатов выборов',
u'Сводная таблица итогов голосования',
u'Сводный отчет об итогах голосования',
u'Сводная таблица о результатах выборов']
PATTERNS_TO_EXCLUDE = [u'одномандатн', u'мажоритарн']
nrec = 0
no_results_href = 0
class LoadRetryWithDifferentFormat(Exception):
pass
class LoadFailedDoNotRetry(Exception):
def __init__(self, url):
self.url = url
def __str__(self):
return repr(self.url)
class LoadFailedDifferentCandidates(Exception):
pass
class LoadErrorNoDataMarker(Exception):
def __init__(self, url):
self.url = url
def __str__(self):
return repr(self.url)
class LoadFailedEmptyCells(Exception):
def __init__(self, url):
self.url = url
def __str__(self):
return repr(self.url)
def check_len(rr_cc, target_len, error_message):
if len(rr_cc) != target_len:
print error_message
raise LoadRetryWithDifferentFormat
def check_lens(rr_cc, target_lens, error_message):
if len(rr_cc) not in target_lens:
print error_message
raise LoadRetryWithDifferentFormat
def check_text (r_c, target_texts, error_message):
if not any_text_is_there(target_texts, r_c.get_text()):
print error_message
print r_c.get_text().strip()
print repr(target_texts).decode('unicode-escape')
raise LoadRetryWithDifferentFormat
def check_not_empty(s, error_message):
if s == '':
print error_message
raise LoadRetryWithDifferentFormat
def make_link (href, title):
return "<a href=\'" + href + "\'>" + title + "</a>"
n_pages_got = 0
n_pages_exceptions = 0
n_pages_retried = 0
n_pages_got_after_retries = 0
def print_parser_stats():
print 'n_pages_got', n_pages_got
print 'n_pages_exceptions', n_pages_exceptions
print 'n_pages_retried', n_pages_retried
print 'n_pages_got_after_retries', n_pages_got_after_retries
return
def get_safe(link):
global n_pages_got
global n_pages_exceptions
global n_pages_retried
global n_pages_got_after_retries
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:45.0) Gecko/20100101 Firefox/45.0'
}
n_attempts = 0
done = False
max_attempts = 1000
while n_attempts < max_attempts and not done:
try:
bad_page = True
n_retries = 0
max_retries = 7
while n_retries < max_retries and bad_page:
bad_page = False
sleep(1)
response = requests.get(link, headers, timeout=5)
page = BeautifulSoup(response.text,'html.parser')
for table in page.find_all('table'):
if not bad_page:
for r in table.find_all('tr'):
if not bad_page:
for c in r.find_all('td'):
if not bad_page and c.get_text().strip() == u'Нет данных для построения отчета.':
bad_page = True
if n_retries == 0:
n_pages_retried += 1
if bad_page:
n_retries += 1
if n_retries != 0 and not bad_page:
n_pages_got_after_retries += 1
done = True
n_pages_got += 1
except:
print 'requests.get failed, attempt ' + str(n_attempts)
sleep(3)
n_pages_exceptions += 1
pass
n_attempts += 1
if n_attempts == max_attempts:
print 'ERRORERROR: did not manage to get the data from url ' + link[0]
exit(1)
return response
def any_text_is_there(patterns, text):
t = text.lower().replace(' ', '')
for ptrn in patterns:
p = ptrn.lower()
p = p.replace(' ', '')
if re.search(p, t) is not None:
return True
return False
rs = 0
class HTMLListParser:
def __init__(self):
return
def get_level(self, table):
ss = table.find_all('select')
level = None
for s in ss:
if s.attrs['name'] == 'urovproved':
options = s.find_all('option')
for opt in options:
if 'selected' in opt.attrs:
t = opt.get_text().lower()
if t in levels:
if level is None:
level = levels[t]
else:
print 'ERRORERROR: Cannot work with several elections levels simultaneously, please select one'
exit(1)
return level
def parse_elections_list_file(self, file):
f = codecs.open(file, encoding='windows-1251')
d = f.read()
soup = BeautifulSoup(d, 'html.parser')
f.close()
take_next = False
for table in soup.find_all('table'):
if re.search(u'Уровень выборов', table.get_text()) is not None:
if len(table.find_all('table')) == 0:
level = self. | level is None or level == '':
print('ERRORERROR: No level for elections list')
exit(1)
elections_list['level'] = level
return elections_list
# find the innermost table with this text
if re.search(u'Всего найдено записей:', table.get_text()) is not None:
if len(table.find_all('table')) == 0:
take_next = True
return None
def get_results_href(self, href):
response = get_safe(href)
soup = BeautifulSoup(response.text, 'html.parser')
global rs
rs += 1
if rs == 24:
pass
results_hrefs = []
for table in soup.find_all('table'):
if any_text_is_there(PATTERNS_LINKS_TO_RESULT_DATA, table.get_text()):
# Look for innermost table with ^^ header
if len(table.find_all('table')) == 0:
rr = table.find_all('tr')
for r in rr:
cc = r.find_all('td')
check_lens(cc, [1,2], 'Must be 1 or 2 columns here, exiting')
for c in cc:
if any_text_is_there(PATTERNS_LINKS_TO_RESULT_DATA, c.get_text()):
links = c.find_all('a', href=True)
check_len(links, 1, 'Must be 1 link here')
for l in links:
results_hrefs.append ({'href':l['href'], 'title':l.text})
# If empty - exception
if len(results_hrefs) == 0:
print 'Did not find results_href, ERRORERROR'
raise LoadFailedDoNotRetry(href)
# If one link - return it
elif len(results_hrefs) == 1:
return results_hrefs[0]['href']
# If there are several protocols (links), try to return one which does not contain patterns to exclude
# If did not manage, return the last one (it usually contains links to results we need)
else:
for r in results_hrefs:
if not any_text_is_there(PATTERNS_TO_EXCLUDE, r['title']):
return r['href']
return results_hrefs[len(results_hrefs) - 1]['href']
return None
def parse_elections_list_row(self, elections_list, rr, n_filtered_out, nr):
global nrec
global no_results_href
# date
cc = rr[nrec].find_all('td')
check_len(cc, 1, 'Must be 1 column here')
dt = cc[0].get_text().strip()
nrec += 1
cc = rr[nrec].find_all('td')
region = ''
while (nrec < nr) and (len(cc) == 2):
if cc[0].get_text().strip() != '':
region = cc[0].get_text().strip()
if region == '':
print 'ERRORERROR: Empty region, exiting'
exit(1)
links = cc[1].find_all('a', href=True)
href = links[0]['href']
title = links[0].text.strip()
print 'Region: ' + region + ' title: ' + title + ' date: ' + dt + ', row ' + str(nrec) + ' out of ' + str(nr)
try:
results_href = self.get_results_href(href)
rec = {'date': dt, 'generic_href': href | get_level(table)
if take_next:
elections_list = self.parse_elections_list_table(table)
if | conditional_block |
listparser.py | таблица итогов голосования по мажоритарной системе выборов(Протокол №1)',
u'Сводная таблица результатов выборов',
u'Сводная таблица итогов голосования',
u'Сводный отчет об итогах голосования',
u'Сводная таблица о результатах выборов']
PATTERNS_TO_EXCLUDE = [u'одномандатн', u'мажоритарн']
nrec = 0
no_results_href = 0
class LoadRetryWithDifferentFormat(Exception):
pass
class LoadFailedDoNotRetry(Exception):
def __init__(self, url):
self.url = url
def __str__(self):
return repr(self.url)
class LoadFailedDifferentCandidates(Exception):
pass
class LoadErrorNoDataMarker(Exception):
def __init__(self, url):
self.url = url
def __str__(self):
return repr(self.url)
class LoadFailedEmptyCells(Exception):
def __init__(self, url):
self.url = url
def __str__(self):
return repr(self.url)
def check_len(rr_cc, target_len, error_message):
if len(rr_cc) != target_len:
print error_message
raise LoadRetryWithDifferentFormat
def check_lens(rr_cc, target_lens, error_message):
if len(rr_cc) not in target_lens:
print error_message
raise LoadRetryWithDifferentFormat
def check_text (r_c, target_texts, error_message):
if not any_text_is_there(target_texts, r_c.get_text()):
print error_message
print r_c.get_text().strip()
print repr(target_texts).decode('unicode-escape')
raise LoadRetryWithDifferentFormat
def check_not_empty(s, error_message):
if s == '':
print error_message
raise LoadRetryWithDifferentFormat
def make_link (href, title):
return "<a href=\'" + href + "\'>" + title + "</a>"
n_pages_got = 0
n_pages_exceptions = 0
n_pages_retried = 0
n_pages_got_after_retries = 0
def print_parser_stats():
print 'n_pages_got', n_pages_got
print 'n_pages_exceptions', n_pages_exceptions
print 'n_pages_retried', n_pages_retried
print 'n_pages_got_after_retries', n_pages_got_after_retries
return
def get_safe(link):
global n_pages_got
global n_pages_exceptions
global n_pages_retried
global n_pages_got_after_retries
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:45.0) Gecko/20100101 Firefox/45.0'
}
n_attempts = 0
done = False
max_attempts = 1000
while n_attempts < max_attempts and not done:
try:
bad_page = True
n_retries = 0
max_retries = 7
while n_retries < max_retries and bad_page:
bad_page = False
sleep(1)
response = requests.get(link, headers, timeout=5)
page = BeautifulSoup(response.text,'html.parser')
for table in page.find_all('table'):
if not bad_page:
for r in table.find_all('tr'):
if not bad_page:
for c in r.find_all('td'):
if not bad_page and c.get_text().strip() == u'Нет данных для построения отчета.':
bad_page = True
if n_retries == 0:
n_pages_retried += 1
if bad_page:
n_retries += 1
if n_retries != 0 and not bad_page:
n_pages_got_after_retries += 1
done = True
n_pages_got += 1
except:
print 'requests.get failed, attempt ' + str(n_attempts)
sleep(3)
n_pages_exceptions += 1
pass
n_attempts += 1
if n_attempts == max_attempts:
print 'ERRORERROR: did not manage to get the data from url ' + link[0]
exit(1)
return response
def any_text_is_there(patterns, text):
t = text.lower().replace(' ', '')
for ptrn in patterns:
p = ptrn.lower()
p = p.replace(' ', '')
if re.search(p, t) is not None:
return True
return False
rs = 0
class HTMLListParser:
def __init__(self):
return
def get_level(self, table):
ss = table.find_all('select')
level = None
for s in ss:
if s.attrs['name'] == 'urovproved':
options = s.find_all('option')
for opt in options:
if 'selected' in opt.attrs:
t = opt.get_text().lower()
if t in levels:
if level is None:
level = levels[t]
else:
print 'ERRORERROR: Cannot work with several elections levels simultaneously, please select one'
exit(1)
return level
def parse_elections_list_file(self, file):
f = codecs.open(file, encoding='windows-1251')
d = f.read()
soup = BeautifulSoup(d, 'html.parser')
f.close()
take_next = False
for table in soup.find_all('table'):
if re.search(u'Уровень выборов', table.get_text()) is not None:
if len(table.find_all('table')) == 0:
level = self.get_level(table)
if take_next:
elections_list = self.parse_elections_list_table(table)
if level is None or level == '':
print('ERRORERROR: No level for elections list')
exit(1)
elections_list['level'] = level
return elections_list
# find the innermost table with this text
if re.search(u'Всего найдено записей:', table.get_text()) is not None:
if len(table.find_all('table')) == 0:
take_next = True
return None
def get_results_href(self, href):
response = get_safe(href)
soup = BeautifulSoup(response.text, 'html.parser')
global rs
rs += 1
if rs == 24:
pass
results_hrefs = []
for table in soup.find_all('table'):
if any_text_is_there(PATTERNS_LINKS_TO_RESULT_DATA, table.get_text()):
# Look for innermost table with ^^ header
if len(table.find_all('table')) == 0:
rr = table.find_all('tr')
for r in rr:
cc = r.find_all('td')
check_lens(cc, [1,2], 'Must be 1 or 2 columns here, exiting')
for c in cc:
if any_text_is_there(PATTERNS_LINKS_TO_RESULT_DATA, c.get_text()):
links = c.find_all('a', href=True)
check_len(links, 1, 'Must be 1 link here')
for l in links: | if len(results_hrefs) == 0:
print 'Did not find results_href, ERRORERROR'
raise LoadFailedDoNotRetry(href)
# If one link - return it
elif len(results_hrefs) == 1:
return results_hrefs[0]['href']
# If there are several protocols (links), try to return one which does not contain patterns to exclude
# If did not manage, return the last one (it usually contains links to results we need)
else:
for r in results_hrefs:
if not any_text_is_there(PATTERNS_TO_EXCLUDE, r['title']):
return r['href']
return results_hrefs[len(results_hrefs) - 1]['href']
return None
def parse_elections_list_row(self, elections_list, rr, n_filtered_out, nr):
global nrec
global no_results_href
# date
cc = rr[nrec].find_all('td')
check_len(cc, 1, 'Must be 1 column here')
dt = cc[0].get_text().strip()
nrec += 1
cc = rr[nrec].find_all('td')
region = ''
while (nrec < nr) and (len(cc) == 2):
if cc[0].get_text().strip() != '':
region = cc[0].get_text().strip()
if region == '':
print 'ERRORERROR: Empty region, exiting'
exit(1)
links = cc[1].find_all('a', href=True)
href = links[0]['href']
title = links[0].text.strip()
print 'Region: ' + region + ' title: ' + title + ' date: ' + dt + ', row ' + str(nrec) + ' out of ' + str(nr)
try:
results_href = self.get_results_href(href)
rec = {'date': dt, 'generic_href': href, ' | results_hrefs.append ({'href':l['href'], 'title':l.text})
# If empty - exception | random_line_split |
listparser.py | а итогов голосования по мажоритарной системе выборов(Протокол №1)',
u'Сводная таблица результатов выборов',
u'Сводная таблица итогов голосования',
u'Сводный отчет об итогах голосования',
u'Сводная таблица о результатах выборов']
PATTERNS_TO_EXCLUDE = [u'одномандатн', u'мажоритарн']
nrec = 0
no_results_href = 0
class LoadRetryWithDifferentFormat(Exception):
pass
class LoadFailedDoNotRetry(Exception):
def __init__(self, url):
self.url = url
def __str__(self):
return repr(self.url)
class LoadFailedDifferentCandidates(Exception):
pass
class LoadErrorNoDataMarker(Exception):
def __init__(self, url):
self.url = url
def __str__(self):
return repr(self.url)
class LoadFailedEmptyCells(Exception):
def __init__(self, url):
self.url = url
def __str__(self):
return repr(self.url)
def check_len(rr_cc, target_len, error_message):
if len(rr_cc) != target_len:
print error_message
raise LoadRetryWithDifferentFormat
def check_lens(rr_cc, target_lens, error_message):
if len(rr_cc) not in target_lens:
print error_message
raise LoadRetryWithDifferentFormat
def check_text (r_c, target_texts, error_message):
if not any_text_is_there(target_texts, r_c.get_text()):
print error_message
print r_c.get_text().strip()
print repr(target_texts).decode('unicode-escape')
raise LoadRetryWithDifferentFormat
def check_not_empty(s, error_message):
if s == '':
print error_message
raise LoadRetryWithDifferentFormat
def make_link (href, title):
return "<a href=\'" + href + "\'>" + title + "</a>"
n_pages_got = 0
n_pages_exceptions = 0
n_pages_retried = 0
n_pages_got_after_retries = 0
def print_parser_stats():
print 'n_pages_got', n_pages_got
print 'n_pages_exceptions', n_pages_exceptions
print 'n_pages_retried', n_pages_retried
print 'n_pages_got_after_retries', n_pages_got_after_retries
return
def get_safe(link):
global n_pages_got
global n_pages_exceptions
global n_pages_retried
global n_pages_got_after_retries
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:45.0) Gecko/20100101 Firefox/45.0'
}
n_attempts = 0
done = False
max_attempts = 1000
while n_attempts < max_attempts and not done:
try:
bad_page = True
n_retries = 0
max_retries = 7
while n_retries < max_retries and bad_page:
bad_page = False
sleep(1)
response = requests.get(link, headers, timeout=5)
page = BeautifulSoup(response.text,'html.parser')
for table in page.find_all('table'):
if not bad_page:
for r in table.find_all('tr'):
if not bad_page:
for c in r.find_all('td'):
if not bad_page and c.get_text().strip() == u'Нет данных для построения отчета.':
bad_page = True
if n_retries == 0:
n_pages_retried += 1
if bad_page:
n_retries += 1
if n_retries != 0 and not bad_page:
n_pages_got_after_retries += 1
done = True
n_pages_got += 1
except:
print 'requests.get failed, attempt ' + str(n_attempts)
sleep(3)
n_pages_exceptions += 1
pass
n_attempts += 1
if n_attempts == max_attempts:
print 'ERRORERROR: did not manage to get the data from url ' + link[0]
exit(1)
return response
def any_text_is_there(patterns, text):
t = text.lower().replace(' ', '')
for ptrn in patterns:
p = ptrn.lower()
p = p.replace(' ', '')
if re.search(p, t) is not None:
return True
return False
rs = 0
class HTMLListParser:
def __init__(self):
return
def get_level(self, table):
ss = table.find_all('select')
level = None
for s in ss:
if s.attrs['name'] == 'urovproved':
options = s.find_all('option')
for opt in options:
if 'selected' in opt.attrs:
t = opt.get_text().lower()
if t in levels:
if level is None:
level = levels[t]
else:
print 'ERRORERROR: Cannot work with several elections levels simultaneously, | l == '':
print('ERRORERROR: No level for elections list')
exit(1)
elections_list['level'] = level
return elections_list
# find the innermost table with this text
if re.search(u'Всего найдено записей:', table.get_text()) is not None:
if len(table.find_all('table')) == 0:
take_next = True
return None
def get_results_href(self, href):
response = get_safe(href)
soup = BeautifulSoup(response.text, 'html.parser')
global rs
rs += 1
if rs == 24:
pass
results_hrefs = []
for table in soup.find_all('table'):
if any_text_is_there(PATTERNS_LINKS_TO_RESULT_DATA, table.get_text()):
# Look for innermost table with ^^ header
if len(table.find_all('table')) == 0:
rr = table.find_all('tr')
for r in rr:
cc = r.find_all('td')
check_lens(cc, [1,2], 'Must be 1 or 2 columns here, exiting')
for c in cc:
if any_text_is_there(PATTERNS_LINKS_TO_RESULT_DATA, c.get_text()):
links = c.find_all('a', href=True)
check_len(links, 1, 'Must be 1 link here')
for l in links:
results_hrefs.append ({'href':l['href'], 'title':l.text})
# If empty - exception
if len(results_hrefs) == 0:
print 'Did not find results_href, ERRORERROR'
raise LoadFailedDoNotRetry(href)
# If one link - return it
elif len(results_hrefs) == 1:
return results_hrefs[0]['href']
# If there are several protocols (links), try to return one which does not contain patterns to exclude
# If did not manage, return the last one (it usually contains links to results we need)
else:
for r in results_hrefs:
if not any_text_is_there(PATTERNS_TO_EXCLUDE, r['title']):
return r['href']
return results_hrefs[len(results_hrefs) - 1]['href']
return None
def parse_elections_list_row(self, elections_list, rr, n_filtered_out, nr):
global nrec
global no_results_href
# date
cc = rr[nrec].find_all('td')
check_len(cc, 1, 'Must be 1 column here')
dt = cc[0].get_text().strip()
nrec += 1
cc = rr[nrec].find_all('td')
region = ''
while (nrec < nr) and (len(cc) == 2):
if cc[0].get_text().strip() != '':
region = cc[0].get_text().strip()
if region == '':
print 'ERRORERROR: Empty region, exiting'
exit(1)
links = cc[1].find_all('a', href=True)
href = links[0]['href']
title = links[0].text.strip()
print 'Region: ' + region + ' title: ' + title + ' date: ' + dt + ', row ' + str(nrec) + ' out of ' + str(nr)
try:
results_href = self.get_results_href(href)
rec = {'date': dt, 'generic_href': href, | please select one'
exit(1)
return level
def parse_elections_list_file(self, file):
f = codecs.open(file, encoding='windows-1251')
d = f.read()
soup = BeautifulSoup(d, 'html.parser')
f.close()
take_next = False
for table in soup.find_all('table'):
if re.search(u'Уровень выборов', table.get_text()) is not None:
if len(table.find_all('table')) == 0:
level = self.get_level(table)
if take_next:
elections_list = self.parse_elections_list_table(table)
if level is None or leve | identifier_body |
listparser.py | а итогов голосования по мажоритарной системе выборов(Протокол №1)',
u'Сводная таблица результатов выборов',
u'Сводная таблица итогов голосования',
u'Сводный отчет об итогах голосования',
u'Сводная таблица о результатах выборов']
PATTERNS_TO_EXCLUDE = [u'одномандатн', u'мажоритарн']
nrec = 0
no_results_href = 0
class LoadRetryWithDifferentFormat(Exception):
pass
class LoadFailedDoNotRetry(Exception):
def __init__(self, url):
self.url = url
def __str__(self):
return repr(self.url)
class LoadFailedDifferentCandidates(Exception):
pass
class LoadErrorNoDataMarker(Exception):
def __init__(self, url):
self.url = url
def __str__(self):
return repr(self.url)
class LoadFailedEmptyCells(Exception):
def __init__(self, url):
self.url = url
def __str__(self):
return repr(self.url)
def check_len(rr_cc, target_len, error_message):
if len(rr_cc) != target_len:
print er | age
raise LoadRetryWithDifferentFormat
def check_lens(rr_cc, target_lens, error_message):
if len(rr_cc) not in target_lens:
print error_message
raise LoadRetryWithDifferentFormat
def check_text (r_c, target_texts, error_message):
if not any_text_is_there(target_texts, r_c.get_text()):
print error_message
print r_c.get_text().strip()
print repr(target_texts).decode('unicode-escape')
raise LoadRetryWithDifferentFormat
def check_not_empty(s, error_message):
if s == '':
print error_message
raise LoadRetryWithDifferentFormat
def make_link (href, title):
return "<a href=\'" + href + "\'>" + title + "</a>"
n_pages_got = 0
n_pages_exceptions = 0
n_pages_retried = 0
n_pages_got_after_retries = 0
def print_parser_stats():
print 'n_pages_got', n_pages_got
print 'n_pages_exceptions', n_pages_exceptions
print 'n_pages_retried', n_pages_retried
print 'n_pages_got_after_retries', n_pages_got_after_retries
return
def get_safe(link):
global n_pages_got
global n_pages_exceptions
global n_pages_retried
global n_pages_got_after_retries
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:45.0) Gecko/20100101 Firefox/45.0'
}
n_attempts = 0
done = False
max_attempts = 1000
while n_attempts < max_attempts and not done:
try:
bad_page = True
n_retries = 0
max_retries = 7
while n_retries < max_retries and bad_page:
bad_page = False
sleep(1)
response = requests.get(link, headers, timeout=5)
page = BeautifulSoup(response.text,'html.parser')
for table in page.find_all('table'):
if not bad_page:
for r in table.find_all('tr'):
if not bad_page:
for c in r.find_all('td'):
if not bad_page and c.get_text().strip() == u'Нет данных для построения отчета.':
bad_page = True
if n_retries == 0:
n_pages_retried += 1
if bad_page:
n_retries += 1
if n_retries != 0 and not bad_page:
n_pages_got_after_retries += 1
done = True
n_pages_got += 1
except:
print 'requests.get failed, attempt ' + str(n_attempts)
sleep(3)
n_pages_exceptions += 1
pass
n_attempts += 1
if n_attempts == max_attempts:
print 'ERRORERROR: did not manage to get the data from url ' + link[0]
exit(1)
return response
def any_text_is_there(patterns, text):
t = text.lower().replace(' ', '')
for ptrn in patterns:
p = ptrn.lower()
p = p.replace(' ', '')
if re.search(p, t) is not None:
return True
return False
rs = 0
class HTMLListParser:
def __init__(self):
return
def get_level(self, table):
ss = table.find_all('select')
level = None
for s in ss:
if s.attrs['name'] == 'urovproved':
options = s.find_all('option')
for opt in options:
if 'selected' in opt.attrs:
t = opt.get_text().lower()
if t in levels:
if level is None:
level = levels[t]
else:
print 'ERRORERROR: Cannot work with several elections levels simultaneously, please select one'
exit(1)
return level
def parse_elections_list_file(self, file):
f = codecs.open(file, encoding='windows-1251')
d = f.read()
soup = BeautifulSoup(d, 'html.parser')
f.close()
take_next = False
for table in soup.find_all('table'):
if re.search(u'Уровень выборов', table.get_text()) is not None:
if len(table.find_all('table')) == 0:
level = self.get_level(table)
if take_next:
elections_list = self.parse_elections_list_table(table)
if level is None or level == '':
print('ERRORERROR: No level for elections list')
exit(1)
elections_list['level'] = level
return elections_list
# find the innermost table with this text
if re.search(u'Всего найдено записей:', table.get_text()) is not None:
if len(table.find_all('table')) == 0:
take_next = True
return None
def get_results_href(self, href):
response = get_safe(href)
soup = BeautifulSoup(response.text, 'html.parser')
global rs
rs += 1
if rs == 24:
pass
results_hrefs = []
for table in soup.find_all('table'):
if any_text_is_there(PATTERNS_LINKS_TO_RESULT_DATA, table.get_text()):
# Look for innermost table with ^^ header
if len(table.find_all('table')) == 0:
rr = table.find_all('tr')
for r in rr:
cc = r.find_all('td')
check_lens(cc, [1,2], 'Must be 1 or 2 columns here, exiting')
for c in cc:
if any_text_is_there(PATTERNS_LINKS_TO_RESULT_DATA, c.get_text()):
links = c.find_all('a', href=True)
check_len(links, 1, 'Must be 1 link here')
for l in links:
results_hrefs.append ({'href':l['href'], 'title':l.text})
# If empty - exception
if len(results_hrefs) == 0:
print 'Did not find results_href, ERRORERROR'
raise LoadFailedDoNotRetry(href)
# If one link - return it
elif len(results_hrefs) == 1:
return results_hrefs[0]['href']
# If there are several protocols (links), try to return one which does not contain patterns to exclude
# If did not manage, return the last one (it usually contains links to results we need)
else:
for r in results_hrefs:
if not any_text_is_there(PATTERNS_TO_EXCLUDE, r['title']):
return r['href']
return results_hrefs[len(results_hrefs) - 1]['href']
return None
def parse_elections_list_row(self, elections_list, rr, n_filtered_out, nr):
global nrec
global no_results_href
# date
cc = rr[nrec].find_all('td')
check_len(cc, 1, 'Must be 1 column here')
dt = cc[0].get_text().strip()
nrec += 1
cc = rr[nrec].find_all('td')
region = ''
while (nrec < nr) and (len(cc) == 2):
if cc[0].get_text().strip() != '':
region = cc[0].get_text().strip()
if region == '':
print 'ERRORERROR: Empty region, exiting'
exit(1)
links = cc[1].find_all('a', href=True)
href = links[0]['href']
title = links[0].text.strip()
print 'Region: ' + region + ' title: ' + title + ' date: ' + dt + ', row ' + str(nrec) + ' out of ' + str(nr)
try:
results_href = self.get_results_href(href)
rec = {'date': dt, 'generic_href': | ror_mess | identifier_name |
wasitests.rs | #[derive(Debug, Clone, PartialEq, Eq)]
pub struct NativeOutput {
stdout: String,
stderr: String,
result: i64,
}
/// Compile and execute the test file as native code, saving the results to be
/// compared against later.
///
/// This function attempts to clean up its output after it executes it.
fn generate_native_output(
temp_dir: &Path,
file: &str,
normalized_name: &str,
args: &[String],
options: &WasiOptions,
) -> io::Result<NativeOutput> {
let executable_path = temp_dir.join(normalized_name);
println!(
"Compiling program {} to native at {}",
file,
executable_path.to_string_lossy()
);
let native_out = Command::new("rustc")
.arg(file)
.arg("-o")
.args(args)
.arg(&executable_path)
.output()
.expect("Failed to compile program to native code");
util::print_info_on_error(&native_out, "COMPILATION FAILED");
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let mut perm = executable_path
.metadata()
.expect("native executable")
.permissions();
perm.set_mode(0o766);
println!(
"Setting execute permissions on {}",
executable_path.to_string_lossy()
);
fs::set_permissions(&executable_path, perm)?;
}
println!(
"Executing native program at {}",
executable_path.to_string_lossy()
);
// workspace root
const EXECUTE_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/wasi");
let mut native_command = Command::new(&executable_path)
.current_dir(EXECUTE_DIR)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap();
if let Some(stdin_str) = &options.stdin {
write!(native_command.stdin.as_ref().unwrap(), "{}", stdin_str).unwrap();
}
let result = native_command
.wait()
.expect("Failed to execute native program");
let stdout_str = {
let mut stdout = native_command.stdout.unwrap();
let mut s = String::new();
stdout.read_to_string(&mut s).unwrap();
s
};
let stderr_str = {
let mut stderr = native_command.stderr.unwrap();
let mut s = String::new();
stderr.read_to_string(&mut s).unwrap();
s
};
if !result.success() {
println!("NATIVE PROGRAM FAILED");
println!("stdout:\n{}", stdout_str);
eprintln!("stderr:\n{}", stderr_str);
}
let result = result.code().unwrap() as i64;
Ok(NativeOutput {
stdout: stdout_str,
stderr: stderr_str,
result,
})
}
/// compile the Wasm file for the given version of WASI
///
/// returns the path of where the wasm file is
fn compile_wasm_for_version(
temp_dir: &Path,
file: &str,
out_dir: &Path,
rs_mod_name: &str,
version: WasiVersion,
) -> io::Result<PathBuf> {
//let out_dir = base_dir; //base_dir.join("..").join(version.get_directory_name());
if !out_dir.exists() {
fs::create_dir(out_dir)?;
}
let wasm_out_name = {
let mut wasm_out_name = out_dir.join(rs_mod_name);
wasm_out_name.set_extension("wasm");
wasm_out_name
};
println!("Reading contents from file `{}`", file);
let file_contents: String = {
let mut fc = String::new();
let mut f = fs::OpenOptions::new().read(true).open(file)?;
f.read_to_string(&mut fc)?;
fc
};
let temp_wasi_rs_file_name = temp_dir.join(format!("wasi_modified_version_{}.rs", rs_mod_name));
{
let mut actual_file = fs::OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(&temp_wasi_rs_file_name)
.unwrap();
actual_file.write_all(file_contents.as_bytes()).unwrap();
}
println!(
"Compiling wasm module `{}` with toolchain `{}`",
&wasm_out_name.to_string_lossy(),
version.get_compiler_toolchain()
);
let mut command = Command::new("rustc");
command
.arg(format!("+{}", version.get_compiler_toolchain()))
.arg("--target=wasm32-wasi")
.arg("-C")
.arg("opt-level=z")
.arg(&temp_wasi_rs_file_name)
.arg("-o")
.arg(&wasm_out_name);
println!("Command {:?}", command);
let wasm_compilation_out = command.output().expect("Failed to compile program to wasm");
util::print_info_on_error(&wasm_compilation_out, "WASM COMPILATION");
println!(
"Removing file `{}`",
&temp_wasi_rs_file_name.to_string_lossy()
);
// to prevent commiting huge binary blobs forever
let wasm_strip_out = Command::new("wasm-strip")
.arg(&wasm_out_name)
.output()
.expect("Failed to strip compiled wasm module");
util::print_info_on_error(&wasm_strip_out, "STRIPPING WASM");
let wasm_opt_out = Command::new("wasm-opt")
.arg("-Oz")
.arg(&wasm_out_name)
.arg("-o")
.arg(&wasm_out_name)
.output()
.expect("Failed to optimize compiled wasm module with wasm-opt!");
util::print_info_on_error(&wasm_opt_out, "OPTIMIZING WASM");
Ok(wasm_out_name)
}
/// Returns the a Vec of the test modules created
fn compile(temp_dir: &Path, file: &str, wasi_versions: &[WasiVersion]) {
let src_code: String = fs::read_to_string(file).unwrap();
let options: WasiOptions = extract_args_from_source_file(&src_code).unwrap_or_default();
assert!(file.ends_with(".rs"));
let rs_mod_name = {
Path::new(&file.to_lowercase())
.file_stem()
.unwrap()
.to_string_lossy()
.to_string()
};
let base_dir = Path::new(file).parent().unwrap();
let NativeOutput {
stdout,
stderr,
result,
} = generate_native_output(temp_dir, file, &rs_mod_name, &options.args, &options)
.expect("Generate native output");
let test = WasiTest {
wasm_prog_name: format!("{}.wasm", rs_mod_name),
stdout,
stderr,
result,
options,
};
let test_serialized = test.into_wasi_wast();
println!("Generated test output: {}", &test_serialized);
wasi_versions
.iter()
.map(|&version| {
let out_dir = base_dir.join("..").join(version.get_directory_name());
if !out_dir.exists() {
fs::create_dir(&out_dir).unwrap();
}
let wasm_out_name = {
let mut wasm_out_name = out_dir.join(rs_mod_name.clone());
wasm_out_name.set_extension("wast");
wasm_out_name
};
println!("Writing test output to {}", wasm_out_name.to_string_lossy());
fs::write(&wasm_out_name, test_serialized.clone()).unwrap();
println!("Compiling wasm version {:?}", version);
compile_wasm_for_version(temp_dir, file, &out_dir, &rs_mod_name, version)
.unwrap_or_else(|_| panic!("Could not compile Wasm to WASI version {:?}, perhaps you need to install the `{}` rust toolchain", version, version.get_compiler_toolchain()));
}).for_each(drop); // Do nothing with it, but let the iterator be consumed/iterated.
}
const WASI_TEST_SRC_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/wasi/tests/*.rs");
pub fn build(wasi_versions: &[WasiVersion], specific_tests: &[&str]) {
let temp_dir = tempfile::TempDir::new().unwrap();
for entry in glob(WASI_TEST_SRC_DIR).unwrap() {
match entry {
Ok(path) => {
let test = path.to_str().unwrap();
if !specific_tests.is_empty() {
if let Some(filename) = path.file_stem().and_then(|f| f.to_str()) {
if specific_tests.contains(&filename) {
compile(temp_dir.path(), test, wasi_versions);
}
}
} else {
compile(temp_dir.path(), test, wasi_versions);
}
}
Err(e) => println!("{:?}", e),
}
}
println!("All modules generated.");
}
/// This is the structure of the `.wast` file
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct WasiTest {
/// The name of the wasm module to run
pub wasm_prog_name: String,
/// The program expected output on stdout
pub stdout: String,
/// The program expected output on stderr
pub stderr: String,
/// The program expected result
pub result: i64 |
use super::util;
use super::wasi_version::*;
| random_line_split |
|
wasitests.rs | executable_path.to_string_lossy()
);
fs::set_permissions(&executable_path, perm)?;
}
println!(
"Executing native program at {}",
executable_path.to_string_lossy()
);
// workspace root
const EXECUTE_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/wasi");
let mut native_command = Command::new(&executable_path)
.current_dir(EXECUTE_DIR)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap();
if let Some(stdin_str) = &options.stdin {
write!(native_command.stdin.as_ref().unwrap(), "{}", stdin_str).unwrap();
}
let result = native_command
.wait()
.expect("Failed to execute native program");
let stdout_str = {
let mut stdout = native_command.stdout.unwrap();
let mut s = String::new();
stdout.read_to_string(&mut s).unwrap();
s
};
let stderr_str = {
let mut stderr = native_command.stderr.unwrap();
let mut s = String::new();
stderr.read_to_string(&mut s).unwrap();
s
};
if !result.success() {
println!("NATIVE PROGRAM FAILED");
println!("stdout:\n{}", stdout_str);
eprintln!("stderr:\n{}", stderr_str);
}
let result = result.code().unwrap() as i64;
Ok(NativeOutput {
stdout: stdout_str,
stderr: stderr_str,
result,
})
}
/// compile the Wasm file for the given version of WASI
///
/// returns the path of where the wasm file is
fn compile_wasm_for_version(
temp_dir: &Path,
file: &str,
out_dir: &Path,
rs_mod_name: &str,
version: WasiVersion,
) -> io::Result<PathBuf> {
//let out_dir = base_dir; //base_dir.join("..").join(version.get_directory_name());
if !out_dir.exists() {
fs::create_dir(out_dir)?;
}
let wasm_out_name = {
let mut wasm_out_name = out_dir.join(rs_mod_name);
wasm_out_name.set_extension("wasm");
wasm_out_name
};
println!("Reading contents from file `{}`", file);
let file_contents: String = {
let mut fc = String::new();
let mut f = fs::OpenOptions::new().read(true).open(file)?;
f.read_to_string(&mut fc)?;
fc
};
let temp_wasi_rs_file_name = temp_dir.join(format!("wasi_modified_version_{}.rs", rs_mod_name));
{
let mut actual_file = fs::OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(&temp_wasi_rs_file_name)
.unwrap();
actual_file.write_all(file_contents.as_bytes()).unwrap();
}
println!(
"Compiling wasm module `{}` with toolchain `{}`",
&wasm_out_name.to_string_lossy(),
version.get_compiler_toolchain()
);
let mut command = Command::new("rustc");
command
.arg(format!("+{}", version.get_compiler_toolchain()))
.arg("--target=wasm32-wasi")
.arg("-C")
.arg("opt-level=z")
.arg(&temp_wasi_rs_file_name)
.arg("-o")
.arg(&wasm_out_name);
println!("Command {:?}", command);
let wasm_compilation_out = command.output().expect("Failed to compile program to wasm");
util::print_info_on_error(&wasm_compilation_out, "WASM COMPILATION");
println!(
"Removing file `{}`",
&temp_wasi_rs_file_name.to_string_lossy()
);
// to prevent commiting huge binary blobs forever
let wasm_strip_out = Command::new("wasm-strip")
.arg(&wasm_out_name)
.output()
.expect("Failed to strip compiled wasm module");
util::print_info_on_error(&wasm_strip_out, "STRIPPING WASM");
let wasm_opt_out = Command::new("wasm-opt")
.arg("-Oz")
.arg(&wasm_out_name)
.arg("-o")
.arg(&wasm_out_name)
.output()
.expect("Failed to optimize compiled wasm module with wasm-opt!");
util::print_info_on_error(&wasm_opt_out, "OPTIMIZING WASM");
Ok(wasm_out_name)
}
/// Returns the a Vec of the test modules created
fn compile(temp_dir: &Path, file: &str, wasi_versions: &[WasiVersion]) {
let src_code: String = fs::read_to_string(file).unwrap();
let options: WasiOptions = extract_args_from_source_file(&src_code).unwrap_or_default();
assert!(file.ends_with(".rs"));
let rs_mod_name = {
Path::new(&file.to_lowercase())
.file_stem()
.unwrap()
.to_string_lossy()
.to_string()
};
let base_dir = Path::new(file).parent().unwrap();
let NativeOutput {
stdout,
stderr,
result,
} = generate_native_output(temp_dir, file, &rs_mod_name, &options.args, &options)
.expect("Generate native output");
let test = WasiTest {
wasm_prog_name: format!("{}.wasm", rs_mod_name),
stdout,
stderr,
result,
options,
};
let test_serialized = test.into_wasi_wast();
println!("Generated test output: {}", &test_serialized);
wasi_versions
.iter()
.map(|&version| {
let out_dir = base_dir.join("..").join(version.get_directory_name());
if !out_dir.exists() {
fs::create_dir(&out_dir).unwrap();
}
let wasm_out_name = {
let mut wasm_out_name = out_dir.join(rs_mod_name.clone());
wasm_out_name.set_extension("wast");
wasm_out_name
};
println!("Writing test output to {}", wasm_out_name.to_string_lossy());
fs::write(&wasm_out_name, test_serialized.clone()).unwrap();
println!("Compiling wasm version {:?}", version);
compile_wasm_for_version(temp_dir, file, &out_dir, &rs_mod_name, version)
.unwrap_or_else(|_| panic!("Could not compile Wasm to WASI version {:?}, perhaps you need to install the `{}` rust toolchain", version, version.get_compiler_toolchain()));
}).for_each(drop); // Do nothing with it, but let the iterator be consumed/iterated.
}
const WASI_TEST_SRC_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/wasi/tests/*.rs");
pub fn build(wasi_versions: &[WasiVersion], specific_tests: &[&str]) {
let temp_dir = tempfile::TempDir::new().unwrap();
for entry in glob(WASI_TEST_SRC_DIR).unwrap() {
match entry {
Ok(path) => {
let test = path.to_str().unwrap();
if !specific_tests.is_empty() {
if let Some(filename) = path.file_stem().and_then(|f| f.to_str()) {
if specific_tests.contains(&filename) {
compile(temp_dir.path(), test, wasi_versions);
}
}
} else |
}
Err(e) => println!("{:?}", e),
}
}
println!("All modules generated.");
}
/// This is the structure of the `.wast` file
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct WasiTest {
/// The name of the wasm module to run
pub wasm_prog_name: String,
/// The program expected output on stdout
pub stdout: String,
/// The program expected output on stderr
pub stderr: String,
/// The program expected result
pub result: i64,
/// The program options
pub options: WasiOptions,
}
impl WasiTest {
fn into_wasi_wast(self) -> String {
use std::fmt::Write;
let mut out = format!(
";; This file was generated by https://github.com/wasmerio/wasi-tests\n
(wasi_test \"{}\"",
self.wasm_prog_name
);
if !self.options.env.is_empty() {
let envs = self
.options
.env
.iter()
.map(|(name, value)| format!("\"{}={}\"", name, value))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (envs {})", envs);
}
if !self.options.args.is_empty() {
let args = self
.options
.args
.iter()
.map(|v| format!("\"{}\"", v))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (args {})", args);
}
if !self.options.dir.is_empty() {
let preopens = self
.options
.dir
.iter()
.map(|v| format!("\"{}\"", v))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\ | {
compile(temp_dir.path(), test, wasi_versions);
} | conditional_block |
wasitests.rs | executable_path.to_string_lossy()
);
fs::set_permissions(&executable_path, perm)?;
}
println!(
"Executing native program at {}",
executable_path.to_string_lossy()
);
// workspace root
const EXECUTE_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/wasi");
let mut native_command = Command::new(&executable_path)
.current_dir(EXECUTE_DIR)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap();
if let Some(stdin_str) = &options.stdin {
write!(native_command.stdin.as_ref().unwrap(), "{}", stdin_str).unwrap();
}
let result = native_command
.wait()
.expect("Failed to execute native program");
let stdout_str = {
let mut stdout = native_command.stdout.unwrap();
let mut s = String::new();
stdout.read_to_string(&mut s).unwrap();
s
};
let stderr_str = {
let mut stderr = native_command.stderr.unwrap();
let mut s = String::new();
stderr.read_to_string(&mut s).unwrap();
s
};
if !result.success() {
println!("NATIVE PROGRAM FAILED");
println!("stdout:\n{}", stdout_str);
eprintln!("stderr:\n{}", stderr_str);
}
let result = result.code().unwrap() as i64;
Ok(NativeOutput {
stdout: stdout_str,
stderr: stderr_str,
result,
})
}
/// compile the Wasm file for the given version of WASI
///
/// returns the path of where the wasm file is
fn compile_wasm_for_version(
temp_dir: &Path,
file: &str,
out_dir: &Path,
rs_mod_name: &str,
version: WasiVersion,
) -> io::Result<PathBuf> {
//let out_dir = base_dir; //base_dir.join("..").join(version.get_directory_name());
if !out_dir.exists() {
fs::create_dir(out_dir)?;
}
let wasm_out_name = {
let mut wasm_out_name = out_dir.join(rs_mod_name);
wasm_out_name.set_extension("wasm");
wasm_out_name
};
println!("Reading contents from file `{}`", file);
let file_contents: String = {
let mut fc = String::new();
let mut f = fs::OpenOptions::new().read(true).open(file)?;
f.read_to_string(&mut fc)?;
fc
};
let temp_wasi_rs_file_name = temp_dir.join(format!("wasi_modified_version_{}.rs", rs_mod_name));
{
let mut actual_file = fs::OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(&temp_wasi_rs_file_name)
.unwrap();
actual_file.write_all(file_contents.as_bytes()).unwrap();
}
println!(
"Compiling wasm module `{}` with toolchain `{}`",
&wasm_out_name.to_string_lossy(),
version.get_compiler_toolchain()
);
let mut command = Command::new("rustc");
command
.arg(format!("+{}", version.get_compiler_toolchain()))
.arg("--target=wasm32-wasi")
.arg("-C")
.arg("opt-level=z")
.arg(&temp_wasi_rs_file_name)
.arg("-o")
.arg(&wasm_out_name);
println!("Command {:?}", command);
let wasm_compilation_out = command.output().expect("Failed to compile program to wasm");
util::print_info_on_error(&wasm_compilation_out, "WASM COMPILATION");
println!(
"Removing file `{}`",
&temp_wasi_rs_file_name.to_string_lossy()
);
// to prevent commiting huge binary blobs forever
let wasm_strip_out = Command::new("wasm-strip")
.arg(&wasm_out_name)
.output()
.expect("Failed to strip compiled wasm module");
util::print_info_on_error(&wasm_strip_out, "STRIPPING WASM");
let wasm_opt_out = Command::new("wasm-opt")
.arg("-Oz")
.arg(&wasm_out_name)
.arg("-o")
.arg(&wasm_out_name)
.output()
.expect("Failed to optimize compiled wasm module with wasm-opt!");
util::print_info_on_error(&wasm_opt_out, "OPTIMIZING WASM");
Ok(wasm_out_name)
}
/// Returns the a Vec of the test modules created
fn | (temp_dir: &Path, file: &str, wasi_versions: &[WasiVersion]) {
let src_code: String = fs::read_to_string(file).unwrap();
let options: WasiOptions = extract_args_from_source_file(&src_code).unwrap_or_default();
assert!(file.ends_with(".rs"));
let rs_mod_name = {
Path::new(&file.to_lowercase())
.file_stem()
.unwrap()
.to_string_lossy()
.to_string()
};
let base_dir = Path::new(file).parent().unwrap();
let NativeOutput {
stdout,
stderr,
result,
} = generate_native_output(temp_dir, file, &rs_mod_name, &options.args, &options)
.expect("Generate native output");
let test = WasiTest {
wasm_prog_name: format!("{}.wasm", rs_mod_name),
stdout,
stderr,
result,
options,
};
let test_serialized = test.into_wasi_wast();
println!("Generated test output: {}", &test_serialized);
wasi_versions
.iter()
.map(|&version| {
let out_dir = base_dir.join("..").join(version.get_directory_name());
if !out_dir.exists() {
fs::create_dir(&out_dir).unwrap();
}
let wasm_out_name = {
let mut wasm_out_name = out_dir.join(rs_mod_name.clone());
wasm_out_name.set_extension("wast");
wasm_out_name
};
println!("Writing test output to {}", wasm_out_name.to_string_lossy());
fs::write(&wasm_out_name, test_serialized.clone()).unwrap();
println!("Compiling wasm version {:?}", version);
compile_wasm_for_version(temp_dir, file, &out_dir, &rs_mod_name, version)
.unwrap_or_else(|_| panic!("Could not compile Wasm to WASI version {:?}, perhaps you need to install the `{}` rust toolchain", version, version.get_compiler_toolchain()));
}).for_each(drop); // Do nothing with it, but let the iterator be consumed/iterated.
}
const WASI_TEST_SRC_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/wasi/tests/*.rs");
pub fn build(wasi_versions: &[WasiVersion], specific_tests: &[&str]) {
let temp_dir = tempfile::TempDir::new().unwrap();
for entry in glob(WASI_TEST_SRC_DIR).unwrap() {
match entry {
Ok(path) => {
let test = path.to_str().unwrap();
if !specific_tests.is_empty() {
if let Some(filename) = path.file_stem().and_then(|f| f.to_str()) {
if specific_tests.contains(&filename) {
compile(temp_dir.path(), test, wasi_versions);
}
}
} else {
compile(temp_dir.path(), test, wasi_versions);
}
}
Err(e) => println!("{:?}", e),
}
}
println!("All modules generated.");
}
/// This is the structure of the `.wast` file
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct WasiTest {
/// The name of the wasm module to run
pub wasm_prog_name: String,
/// The program expected output on stdout
pub stdout: String,
/// The program expected output on stderr
pub stderr: String,
/// The program expected result
pub result: i64,
/// The program options
pub options: WasiOptions,
}
impl WasiTest {
fn into_wasi_wast(self) -> String {
use std::fmt::Write;
let mut out = format!(
";; This file was generated by https://github.com/wasmerio/wasi-tests\n
(wasi_test \"{}\"",
self.wasm_prog_name
);
if !self.options.env.is_empty() {
let envs = self
.options
.env
.iter()
.map(|(name, value)| format!("\"{}={}\"", name, value))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (envs {})", envs);
}
if !self.options.args.is_empty() {
let args = self
.options
.args
.iter()
.map(|v| format!("\"{}\"", v))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (args {})", args);
}
if !self.options.dir.is_empty() {
let preopens = self
.options
.dir
.iter()
.map(|v| format!("\"{}\"", v))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\ | compile | identifier_name |
wasitests.rs | executable_path.to_string_lossy()
);
fs::set_permissions(&executable_path, perm)?;
}
println!(
"Executing native program at {}",
executable_path.to_string_lossy()
);
// workspace root
const EXECUTE_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/wasi");
let mut native_command = Command::new(&executable_path)
.current_dir(EXECUTE_DIR)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap();
if let Some(stdin_str) = &options.stdin {
write!(native_command.stdin.as_ref().unwrap(), "{}", stdin_str).unwrap();
}
let result = native_command
.wait()
.expect("Failed to execute native program");
let stdout_str = {
let mut stdout = native_command.stdout.unwrap();
let mut s = String::new();
stdout.read_to_string(&mut s).unwrap();
s
};
let stderr_str = {
let mut stderr = native_command.stderr.unwrap();
let mut s = String::new();
stderr.read_to_string(&mut s).unwrap();
s
};
if !result.success() {
println!("NATIVE PROGRAM FAILED");
println!("stdout:\n{}", stdout_str);
eprintln!("stderr:\n{}", stderr_str);
}
let result = result.code().unwrap() as i64;
Ok(NativeOutput {
stdout: stdout_str,
stderr: stderr_str,
result,
})
}
/// compile the Wasm file for the given version of WASI
///
/// returns the path of where the wasm file is
fn compile_wasm_for_version(
temp_dir: &Path,
file: &str,
out_dir: &Path,
rs_mod_name: &str,
version: WasiVersion,
) -> io::Result<PathBuf> {
//let out_dir = base_dir; //base_dir.join("..").join(version.get_directory_name());
if !out_dir.exists() {
fs::create_dir(out_dir)?;
}
let wasm_out_name = {
let mut wasm_out_name = out_dir.join(rs_mod_name);
wasm_out_name.set_extension("wasm");
wasm_out_name
};
println!("Reading contents from file `{}`", file);
let file_contents: String = {
let mut fc = String::new();
let mut f = fs::OpenOptions::new().read(true).open(file)?;
f.read_to_string(&mut fc)?;
fc
};
let temp_wasi_rs_file_name = temp_dir.join(format!("wasi_modified_version_{}.rs", rs_mod_name));
{
let mut actual_file = fs::OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(&temp_wasi_rs_file_name)
.unwrap();
actual_file.write_all(file_contents.as_bytes()).unwrap();
}
println!(
"Compiling wasm module `{}` with toolchain `{}`",
&wasm_out_name.to_string_lossy(),
version.get_compiler_toolchain()
);
let mut command = Command::new("rustc");
command
.arg(format!("+{}", version.get_compiler_toolchain()))
.arg("--target=wasm32-wasi")
.arg("-C")
.arg("opt-level=z")
.arg(&temp_wasi_rs_file_name)
.arg("-o")
.arg(&wasm_out_name);
println!("Command {:?}", command);
let wasm_compilation_out = command.output().expect("Failed to compile program to wasm");
util::print_info_on_error(&wasm_compilation_out, "WASM COMPILATION");
println!(
"Removing file `{}`",
&temp_wasi_rs_file_name.to_string_lossy()
);
// to prevent commiting huge binary blobs forever
let wasm_strip_out = Command::new("wasm-strip")
.arg(&wasm_out_name)
.output()
.expect("Failed to strip compiled wasm module");
util::print_info_on_error(&wasm_strip_out, "STRIPPING WASM");
let wasm_opt_out = Command::new("wasm-opt")
.arg("-Oz")
.arg(&wasm_out_name)
.arg("-o")
.arg(&wasm_out_name)
.output()
.expect("Failed to optimize compiled wasm module with wasm-opt!");
util::print_info_on_error(&wasm_opt_out, "OPTIMIZING WASM");
Ok(wasm_out_name)
}
/// Returns the a Vec of the test modules created
fn compile(temp_dir: &Path, file: &str, wasi_versions: &[WasiVersion]) {
let src_code: String = fs::read_to_string(file).unwrap();
let options: WasiOptions = extract_args_from_source_file(&src_code).unwrap_or_default();
assert!(file.ends_with(".rs"));
let rs_mod_name = {
Path::new(&file.to_lowercase())
.file_stem()
.unwrap()
.to_string_lossy()
.to_string()
};
let base_dir = Path::new(file).parent().unwrap();
let NativeOutput {
stdout,
stderr,
result,
} = generate_native_output(temp_dir, file, &rs_mod_name, &options.args, &options)
.expect("Generate native output");
let test = WasiTest {
wasm_prog_name: format!("{}.wasm", rs_mod_name),
stdout,
stderr,
result,
options,
};
let test_serialized = test.into_wasi_wast();
println!("Generated test output: {}", &test_serialized);
wasi_versions
.iter()
.map(|&version| {
let out_dir = base_dir.join("..").join(version.get_directory_name());
if !out_dir.exists() {
fs::create_dir(&out_dir).unwrap();
}
let wasm_out_name = {
let mut wasm_out_name = out_dir.join(rs_mod_name.clone());
wasm_out_name.set_extension("wast");
wasm_out_name
};
println!("Writing test output to {}", wasm_out_name.to_string_lossy());
fs::write(&wasm_out_name, test_serialized.clone()).unwrap();
println!("Compiling wasm version {:?}", version);
compile_wasm_for_version(temp_dir, file, &out_dir, &rs_mod_name, version)
.unwrap_or_else(|_| panic!("Could not compile Wasm to WASI version {:?}, perhaps you need to install the `{}` rust toolchain", version, version.get_compiler_toolchain()));
}).for_each(drop); // Do nothing with it, but let the iterator be consumed/iterated.
}
const WASI_TEST_SRC_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/wasi/tests/*.rs");
pub fn build(wasi_versions: &[WasiVersion], specific_tests: &[&str]) {
let temp_dir = tempfile::TempDir::new().unwrap();
for entry in glob(WASI_TEST_SRC_DIR).unwrap() {
match entry {
Ok(path) => {
let test = path.to_str().unwrap();
if !specific_tests.is_empty() {
if let Some(filename) = path.file_stem().and_then(|f| f.to_str()) {
if specific_tests.contains(&filename) {
compile(temp_dir.path(), test, wasi_versions);
}
}
} else {
compile(temp_dir.path(), test, wasi_versions);
}
}
Err(e) => println!("{:?}", e),
}
}
println!("All modules generated.");
}
/// This is the structure of the `.wast` file
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct WasiTest {
/// The name of the wasm module to run
pub wasm_prog_name: String,
/// The program expected output on stdout
pub stdout: String,
/// The program expected output on stderr
pub stderr: String,
/// The program expected result
pub result: i64,
/// The program options
pub options: WasiOptions,
}
impl WasiTest {
fn into_wasi_wast(self) -> String | .options
.args
.iter()
.map(|v| format!("\"{}\"", v))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (args {})", args);
}
if !self.options.dir.is_empty() {
let preopens = self
.options
.dir
.iter()
.map(|v| format!("\"{}\"", v))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n | {
use std::fmt::Write;
let mut out = format!(
";; This file was generated by https://github.com/wasmerio/wasi-tests\n
(wasi_test \"{}\"",
self.wasm_prog_name
);
if !self.options.env.is_empty() {
let envs = self
.options
.env
.iter()
.map(|(name, value)| format!("\"{}={}\"", name, value))
.collect::<Vec<String>>()
.join(" ");
let _ = write!(out, "\n (envs {})", envs);
}
if !self.options.args.is_empty() {
let args = self | identifier_body |
biology.js | with five-part symmetry and an internal skeleton made from calcium carbonate.",
"Arthropoda: Phylum for segmented animals consisting of a head, thorax, and abdomen. Their bodies are covered with an exoskeleton.",
"Crustacea: Phylum for segmented animals with 18 to 20 segments, two pairs of antennae, and compound eyes that are usually on stalks."
],
[
"Magnoliophyta: Phylum for plants that produce flowers and seeds.",
"Pinophyta: Phylum for cone-bearing plants, mostly trees.",
"Lycopodiophyta: Phylum for evergreen plants that include club and spike mosses. These plants do not produce flowers.",
"Equisetophyta: Phylum for plants that have hollow, jointed stems with rough ribs."
]
],
[
[
"Ascidiaceae: Class for cold-blooded marine animals that have neither a brain nor a skull and live inside a sac.",
"Aves: Class for warm-blooded animals with beaks and light bones that are hollow in areas.",
"Mammalia: Class for warm-blooded animals covered with fur or skin that may grow hair. The females have mammary glands.",
"Reptilia: Class for cold-blooded animals with scaly skin and either short legs or no legs at all."
],
[
"Magnoliopsida: Class for plants that sprout with two leaves. Veins in their leaves have a branching structure (dicots).",
"Liliopsida: Class for plants that sprout with one leaf. Veins in their leaves are typically parallel to each other (monocots)."
],
[
"Asteroidea: Class for echinoderms that are often shaped like a star, though they can be nearly circular in shape.",
"Crinoidea: Class for echinoderms that can have the appearance of a simple bush with a stalk that attaches to the seafloor.",
"Echinoidea: Class for echinoderms with a hard shell covered by spines.",
"Holothuroidea: Class for echinoderms that are shaped like a cylinder and have a mouth at one end and an anus at the other end. Their outer surface is soft."
]
],
[
[
"Artiodactyla: Order for two or four toed mammals that are usually found in groups or herds.",
"Carnivora: Order for meat-eating mammals. Some supplement their diet with fruits, plants, and insects.",
"Diprotodonts: Order for mammals in which two of the four digits of their hind legs are fused together up to the base of their claws.",
"Primates: Order for mammals with opposable thumbs and hands that are able to grasp."
],
[
"Alismatales: Order for Liliopsida plants that live submerged or mostly submerged in freshwater and marine environments. The flowers are pollinated by wind or water.",
"Poales: Order for grass-like Liliopsida plants with green sepals and petals that are bract-like (look like leaves or scales).",
"Liliales: Order for Liliopsida plants with flowers that have three sepals and three petals that are so similar that they cannot be distinguished from one another.",
"Asparagales: Order for Liliopsida plants with flowers in which the sepals and petals are often distinguishable."
],
[
"Apodida: Order for worm-like sea cucumbers that lack tube feet and have a thin outer covering that is often transparent.",
"Aspidochirotida: Order for sea cucumbers with 15 to 30 short tentacles that are shaped like shields or mops.",
"Oendrochirotida: Order for sea cucumbers with 10 to 30 highly branched tentacles.",
"Molpadiida: Order for sea cucumbers with 15 short, stubby tentacles and no tube feet. Bodies taper toward the anus, forming a tail."
]
],
[
[
"Felidae: Family for carnivores that have retractable claws and can either purr or roar.",
"Mustelidae: Family for carnivores that typically have long tails. All of them have especially well-developed anal glands.",
"Procyonidae: Family for small to medium sized mammals with short to long tails. They are found only from Canada to Argentina.",
"Ursidae: Family for small-to-large mammals with large ears and short tails."
],
[
"Alliaceae: Family for Asparagales plants with an onion-like odor, small flowers, and six stamens.",
"Agavaceae: Family for Asparagales plants with large flowers and six stamens.",
"Orchidaceae: Family for Asparagales plants with small to large flowers and one stamen.",
"Iridaceae: Family for Asparagales plants with small to large flowers and three stamens."
],
[
"Cucumariidae: Family for sea cucumbers with 10 branching tentacles that are used to capture particles from the surrounding water.",
"Phyllophoridae: Family for sea cucumbers with more than 10 branching tentacles. The shorter tentacles are used for cleaning.",
"Psolidae: Family for sea cucumbers covered on the top side by plates made of calcium carbonate.",
"Sclerodactylidae: Family for sea cucumbers with 10 to 20 tentacles and scattered tube feet."
]
],
[
[
"Melursus: Genus for bears with long narrow snouts, which look similar to an anteater's snout. They have small teeth and no incisors.",
"Helarctos: Genus for small bears that stand only about 30 inches to the shoulder.",
"Ursus: Genus for bears whose fur is typically uniform in color.",
"Tremarctos: Genus for large bears with white fur circling or almost circling their eyes. They feed mostly on fruit."
],
[
"Goodyera: Genus for long-stemmed orchids with small flowers. Within each flower's lip is a single patch of 'hair' (papillae).",
"Platanthera: Genus for orchids with green, white, or yellow flowers that have a small lobed or fringed lip.",
"Liparis: Genus for an orchid with one to a few leaves at its base and very small flowers. Each flower has a wide lip.",
"Spathoglottis: Genus for flowers with a lip that has a callous-like growth near its base."
],
[
"Pentacta: Genus for sea cucumbers with a flat underside and three distinct rows of tube feet, a firm body wall, and low papillae on the dorsal side.",
"Pseudocolochirus: Genus for sea cucumbers with three rows of tube feet on the bottom and large obvious papillae on the top.",
"Stolus: Genus for small sea cucumbers with tube feet that are distributed throughout the body.",
"Thyone: Genus for sea cucumbers with tube feet scattered equally over the body."
]
],
[
[
"arctos: A large bear known for its brown coat. It eats mostly vegetation.",
"americanus: A medium to large bear known for its typically black or dark brown coat.",
"maritimus: A large, aquatic bear that has adapted to a cold climate.",
"ursinus: A small to medium bear with black fur, though sometimes with gray and brown fur mixed in."
],
[
"hawaiensis: A species of wide-lipped orchid with green flowers that grows only in Hawaii.",
"liliifolia: A species of wide-lipped orchid with brown flowers.",
"loeselii: A species of wide-lipped orchid with green or greenish-yellow flowers.",
"vexillifera: A species of wide-lipped orchid found in the Caribbean and Central and South America."
],
[
"anceps: A species of yellow and pink sea cucumbers, with stiff, fleshy skin, low papillae on the body, and prominent papillae near the anus.",
"australis: A species of sea cucumbers with a squarish body that are grey to orange in color.",
"crassa: A species of grey sea cucumbers with a pink underside that are typically found on mud.",
"quadrangularis: A species for grey sea cucumbers with prominent tapering papillae along the corners of their squarish bodies."
]
]
];
function setOptions(i, k) {
if(k === undefined)
k = 0;
if(i > 6)
throw "We use the six-kingdom system here";
$("#classifying").text(classifications[i]);
for(var j = 1; j < 6; j++) {
var option;
if(i < 1)
option = options[i][j-1];
else {
if(k === 2 && i === 1)
k = 0;
option = options[i][k][j-1];
}
if(option === undefined) | {
$("#option-" + j).hide();
} | conditional_block |
|
biology.js | ). They generally have multiple cells.",
"Bacteria: Kingdom for organisms with a single cell that have no nucleus.",
"Fungi: Kingdom for organisms that absorb nutrients for energy. They may have one or more cells.",
"Plantae: Kingdom for autotrophs - organisms that use photosynthesis to make their own food. They usually have multiple cells.",
"Protoctista: Kingdom for any organism that does not fit the other kingdoms."
],
[
[
"Chordata: Phylum for animals with a notochord (a rodlike structure) at some stage of development that sometimes develops into a backbone.",
"Echinodermata: Phylum for animals with five-part symmetry and an internal skeleton made from calcium carbonate.",
"Arthropoda: Phylum for segmented animals consisting of a head, thorax, and abdomen. Their bodies are covered with an exoskeleton.",
"Crustacea: Phylum for segmented animals with 18 to 20 segments, two pairs of antennae, and compound eyes that are usually on stalks."
],
[
"Magnoliophyta: Phylum for plants that produce flowers and seeds.",
"Pinophyta: Phylum for cone-bearing plants, mostly trees.",
"Lycopodiophyta: Phylum for evergreen plants that include club and spike mosses. These plants do not produce flowers.",
"Equisetophyta: Phylum for plants that have hollow, jointed stems with rough ribs."
]
],
[
[
"Ascidiaceae: Class for cold-blooded marine animals that have neither a brain nor a skull and live inside a sac.",
"Aves: Class for warm-blooded animals with beaks and light bones that are hollow in areas.",
"Mammalia: Class for warm-blooded animals covered with fur or skin that may grow hair. The females have mammary glands.",
"Reptilia: Class for cold-blooded animals with scaly skin and either short legs or no legs at all."
],
[
"Magnoliopsida: Class for plants that sprout with two leaves. Veins in their leaves have a branching structure (dicots).",
"Liliopsida: Class for plants that sprout with one leaf. Veins in their leaves are typically parallel to each other (monocots)."
],
[
"Asteroidea: Class for echinoderms that are often shaped like a star, though they can be nearly circular in shape.",
"Crinoidea: Class for echinoderms that can have the appearance of a simple bush with a stalk that attaches to the seafloor.",
"Echinoidea: Class for echinoderms with a hard shell covered by spines.",
"Holothuroidea: Class for echinoderms that are shaped like a cylinder and have a mouth at one end and an anus at the other end. Their outer surface is soft."
]
],
[
[
"Artiodactyla: Order for two or four toed mammals that are usually found in groups or herds.",
"Carnivora: Order for meat-eating mammals. Some supplement their diet with fruits, plants, and insects.",
"Diprotodonts: Order for mammals in which two of the four digits of their hind legs are fused together up to the base of their claws.",
"Primates: Order for mammals with opposable thumbs and hands that are able to grasp."
],
[
"Alismatales: Order for Liliopsida plants that live submerged or mostly submerged in freshwater and marine environments. The flowers are pollinated by wind or water.",
"Poales: Order for grass-like Liliopsida plants with green sepals and petals that are bract-like (look like leaves or scales).",
"Liliales: Order for Liliopsida plants with flowers that have three sepals and three petals that are so similar that they cannot be distinguished from one another.",
"Asparagales: Order for Liliopsida plants with flowers in which the sepals and petals are often distinguishable."
],
[
"Apodida: Order for worm-like sea cucumbers that lack tube feet and have a thin outer covering that is often transparent.",
"Aspidochirotida: Order for sea cucumbers with 15 to 30 short tentacles that are shaped like shields or mops.",
"Oendrochirotida: Order for sea cucumbers with 10 to 30 highly branched tentacles.",
"Molpadiida: Order for sea cucumbers with 15 short, stubby tentacles and no tube feet. Bodies taper toward the anus, forming a tail."
]
],
[
[
"Felidae: Family for carnivores that have retractable claws and can either purr or roar.",
"Mustelidae: Family for carnivores that typically have long tails. All of them have especially well-developed anal glands.",
"Procyonidae: Family for small to medium sized mammals with short to long tails. They are found only from Canada to Argentina.",
"Ursidae: Family for small-to-large mammals with large ears and short tails."
],
[
"Alliaceae: Family for Asparagales plants with an onion-like odor, small flowers, and six stamens.",
"Agavaceae: Family for Asparagales plants with large flowers and six stamens.",
"Orchidaceae: Family for Asparagales plants with small to large flowers and one stamen.",
"Iridaceae: Family for Asparagales plants with small to large flowers and three stamens."
],
[
"Cucumariidae: Family for sea cucumbers with 10 branching tentacles that are used to capture particles from the surrounding water.",
"Phyllophoridae: Family for sea cucumbers with more than 10 branching tentacles. The shorter tentacles are used for cleaning.",
"Psolidae: Family for sea cucumbers covered on the top side by plates made of calcium carbonate.",
"Sclerodactylidae: Family for sea cucumbers with 10 to 20 tentacles and scattered tube feet."
]
],
[
[
"Melursus: Genus for bears with long narrow snouts, which look similar to an anteater's snout. They have small teeth and no incisors.",
"Helarctos: Genus for small bears that stand only about 30 inches to the shoulder.",
"Ursus: Genus for bears whose fur is typically uniform in color.",
"Tremarctos: Genus for large bears with white fur circling or almost circling their eyes. They feed mostly on fruit."
],
[
"Goodyera: Genus for long-stemmed orchids with small flowers. Within each flower's lip is a single patch of 'hair' (papillae).",
"Platanthera: Genus for orchids with green, white, or yellow flowers that have a small lobed or fringed lip.",
"Liparis: Genus for an orchid with one to a few leaves at its base and very small flowers. Each flower has a wide lip.",
"Spathoglottis: Genus for flowers with a lip that has a callous-like growth near its base."
],
[
"Pentacta: Genus for sea cucumbers with a flat underside and three distinct rows of tube feet, a firm body wall, and low papillae on the dorsal side.",
"Pseudocolochirus: Genus for sea cucumbers with three rows of tube feet on the bottom and large obvious papillae on the top.",
"Stolus: Genus for small sea cucumbers with tube feet that are distributed throughout the body.",
"Thyone: Genus for sea cucumbers with tube feet scattered equally over the body."
]
],
[
[
"arctos: A large bear known for its brown coat. It eats mostly vegetation.",
"americanus: A medium to large bear known for its typically black or dark brown coat.",
"maritimus: A large, aquatic bear that has adapted to a cold climate.",
"ursinus: A small to medium bear with black fur, though sometimes with gray and brown fur mixed in."
],
[
"hawaiensis: A species of wide-lipped orchid with green flowers that grows only in Hawaii.",
"liliifolia: A species of wide-lipped orchid with brown flowers.",
"loeselii: A species of wide-lipped orchid with green or greenish-yellow flowers.",
"vexillifera: A species of wide-lipped orchid found in the Caribbean and Central and South America."
],
[
"anceps: A species of yellow and pink sea cucumbers, with stiff, fleshy skin, low papillae on the body, and prominent papillae near the anus.",
"australis: A species of sea cucumbers with a squarish body that are grey to orange in color.",
"crassa: A species of grey sea cucumbers with a pink underside that are typically found on mud.", | "quadrangularis: A species for grey sea cucumbers with prominent tapering papillae along the corners of their squarish bodies."
] | random_line_split |
|
biology.js | "Bacteria: Kingdom for organisms with a single cell that have no nucleus.",
"Fungi: Kingdom for organisms that absorb nutrients for energy. They may have one or more cells.",
"Plantae: Kingdom for autotrophs - organisms that use photosynthesis to make their own food. They usually have multiple cells.",
"Protoctista: Kingdom for any organism that does not fit the other kingdoms."
],
[
[
"Chordata: Phylum for animals with a notochord (a rodlike structure) at some stage of development that sometimes develops into a backbone.",
"Echinodermata: Phylum for animals with five-part symmetry and an internal skeleton made from calcium carbonate.",
"Arthropoda: Phylum for segmented animals consisting of a head, thorax, and abdomen. Their bodies are covered with an exoskeleton.",
"Crustacea: Phylum for segmented animals with 18 to 20 segments, two pairs of antennae, and compound eyes that are usually on stalks."
],
[
"Magnoliophyta: Phylum for plants that produce flowers and seeds.",
"Pinophyta: Phylum for cone-bearing plants, mostly trees.",
"Lycopodiophyta: Phylum for evergreen plants that include club and spike mosses. These plants do not produce flowers.",
"Equisetophyta: Phylum for plants that have hollow, jointed stems with rough ribs."
]
],
[
[
"Ascidiaceae: Class for cold-blooded marine animals that have neither a brain nor a skull and live inside a sac.",
"Aves: Class for warm-blooded animals with beaks and light bones that are hollow in areas.",
"Mammalia: Class for warm-blooded animals covered with fur or skin that may grow hair. The females have mammary glands.",
"Reptilia: Class for cold-blooded animals with scaly skin and either short legs or no legs at all."
],
[
"Magnoliopsida: Class for plants that sprout with two leaves. Veins in their leaves have a branching structure (dicots).",
"Liliopsida: Class for plants that sprout with one leaf. Veins in their leaves are typically parallel to each other (monocots)."
],
[
"Asteroidea: Class for echinoderms that are often shaped like a star, though they can be nearly circular in shape.",
"Crinoidea: Class for echinoderms that can have the appearance of a simple bush with a stalk that attaches to the seafloor.",
"Echinoidea: Class for echinoderms with a hard shell covered by spines.",
"Holothuroidea: Class for echinoderms that are shaped like a cylinder and have a mouth at one end and an anus at the other end. Their outer surface is soft."
]
],
[
[
"Artiodactyla: Order for two or four toed mammals that are usually found in groups or herds.",
"Carnivora: Order for meat-eating mammals. Some supplement their diet with fruits, plants, and insects.",
"Diprotodonts: Order for mammals in which two of the four digits of their hind legs are fused together up to the base of their claws.",
"Primates: Order for mammals with opposable thumbs and hands that are able to grasp."
],
[
"Alismatales: Order for Liliopsida plants that live submerged or mostly submerged in freshwater and marine environments. The flowers are pollinated by wind or water.",
"Poales: Order for grass-like Liliopsida plants with green sepals and petals that are bract-like (look like leaves or scales).",
"Liliales: Order for Liliopsida plants with flowers that have three sepals and three petals that are so similar that they cannot be distinguished from one another.",
"Asparagales: Order for Liliopsida plants with flowers in which the sepals and petals are often distinguishable."
],
[
"Apodida: Order for worm-like sea cucumbers that lack tube feet and have a thin outer covering that is often transparent.",
"Aspidochirotida: Order for sea cucumbers with 15 to 30 short tentacles that are shaped like shields or mops.",
"Oendrochirotida: Order for sea cucumbers with 10 to 30 highly branched tentacles.",
"Molpadiida: Order for sea cucumbers with 15 short, stubby tentacles and no tube feet. Bodies taper toward the anus, forming a tail."
]
],
[
[
"Felidae: Family for carnivores that have retractable claws and can either purr or roar.",
"Mustelidae: Family for carnivores that typically have long tails. All of them have especially well-developed anal glands.",
"Procyonidae: Family for small to medium sized mammals with short to long tails. They are found only from Canada to Argentina.",
"Ursidae: Family for small-to-large mammals with large ears and short tails."
],
[
"Alliaceae: Family for Asparagales plants with an onion-like odor, small flowers, and six stamens.",
"Agavaceae: Family for Asparagales plants with large flowers and six stamens.",
"Orchidaceae: Family for Asparagales plants with small to large flowers and one stamen.",
"Iridaceae: Family for Asparagales plants with small to large flowers and three stamens."
],
[
"Cucumariidae: Family for sea cucumbers with 10 branching tentacles that are used to capture particles from the surrounding water.",
"Phyllophoridae: Family for sea cucumbers with more than 10 branching tentacles. The shorter tentacles are used for cleaning.",
"Psolidae: Family for sea cucumbers covered on the top side by plates made of calcium carbonate.",
"Sclerodactylidae: Family for sea cucumbers with 10 to 20 tentacles and scattered tube feet."
]
],
[
[
"Melursus: Genus for bears with long narrow snouts, which look similar to an anteater's snout. They have small teeth and no incisors.",
"Helarctos: Genus for small bears that stand only about 30 inches to the shoulder.",
"Ursus: Genus for bears whose fur is typically uniform in color.",
"Tremarctos: Genus for large bears with white fur circling or almost circling their eyes. They feed mostly on fruit."
],
[
"Goodyera: Genus for long-stemmed orchids with small flowers. Within each flower's lip is a single patch of 'hair' (papillae).",
"Platanthera: Genus for orchids with green, white, or yellow flowers that have a small lobed or fringed lip.",
"Liparis: Genus for an orchid with one to a few leaves at its base and very small flowers. Each flower has a wide lip.",
"Spathoglottis: Genus for flowers with a lip that has a callous-like growth near its base."
],
[
"Pentacta: Genus for sea cucumbers with a flat underside and three distinct rows of tube feet, a firm body wall, and low papillae on the dorsal side.",
"Pseudocolochirus: Genus for sea cucumbers with three rows of tube feet on the bottom and large obvious papillae on the top.",
"Stolus: Genus for small sea cucumbers with tube feet that are distributed throughout the body.",
"Thyone: Genus for sea cucumbers with tube feet scattered equally over the body."
]
],
[
[
"arctos: A large bear known for its brown coat. It eats mostly vegetation.",
"americanus: A medium to large bear known for its typically black or dark brown coat.",
"maritimus: A large, aquatic bear that has adapted to a cold climate.",
"ursinus: A small to medium bear with black fur, though sometimes with gray and brown fur mixed in."
],
[
"hawaiensis: A species of wide-lipped orchid with green flowers that grows only in Hawaii.",
"liliifolia: A species of wide-lipped orchid with brown flowers.",
"loeselii: A species of wide-lipped orchid with green or greenish-yellow flowers.",
"vexillifera: A species of wide-lipped orchid found in the Caribbean and Central and South America."
],
[
"anceps: A species of yellow and pink sea cucumbers, with stiff, fleshy skin, low papillae on the body, and prominent papillae near the anus.",
"australis: A species of sea cucumbers with a squarish body that are grey to orange in color.",
"crassa: A species of grey sea cucumbers with a pink underside that are typically found on mud.",
"quadrangularis: A species for grey sea cucumbers with prominent tapering papillae along the corners of their squarish bodies."
]
]
];
function | setOptions | identifier_name |
|
biology.js | for segmented animals consisting of a head, thorax, and abdomen. Their bodies are covered with an exoskeleton.",
"Crustacea: Phylum for segmented animals with 18 to 20 segments, two pairs of antennae, and compound eyes that are usually on stalks."
],
[
"Magnoliophyta: Phylum for plants that produce flowers and seeds.",
"Pinophyta: Phylum for cone-bearing plants, mostly trees.",
"Lycopodiophyta: Phylum for evergreen plants that include club and spike mosses. These plants do not produce flowers.",
"Equisetophyta: Phylum for plants that have hollow, jointed stems with rough ribs."
]
],
[
[
"Ascidiaceae: Class for cold-blooded marine animals that have neither a brain nor a skull and live inside a sac.",
"Aves: Class for warm-blooded animals with beaks and light bones that are hollow in areas.",
"Mammalia: Class for warm-blooded animals covered with fur or skin that may grow hair. The females have mammary glands.",
"Reptilia: Class for cold-blooded animals with scaly skin and either short legs or no legs at all."
],
[
"Magnoliopsida: Class for plants that sprout with two leaves. Veins in their leaves have a branching structure (dicots).",
"Liliopsida: Class for plants that sprout with one leaf. Veins in their leaves are typically parallel to each other (monocots)."
],
[
"Asteroidea: Class for echinoderms that are often shaped like a star, though they can be nearly circular in shape.",
"Crinoidea: Class for echinoderms that can have the appearance of a simple bush with a stalk that attaches to the seafloor.",
"Echinoidea: Class for echinoderms with a hard shell covered by spines.",
"Holothuroidea: Class for echinoderms that are shaped like a cylinder and have a mouth at one end and an anus at the other end. Their outer surface is soft."
]
],
[
[
"Artiodactyla: Order for two or four toed mammals that are usually found in groups or herds.",
"Carnivora: Order for meat-eating mammals. Some supplement their diet with fruits, plants, and insects.",
"Diprotodonts: Order for mammals in which two of the four digits of their hind legs are fused together up to the base of their claws.",
"Primates: Order for mammals with opposable thumbs and hands that are able to grasp."
],
[
"Alismatales: Order for Liliopsida plants that live submerged or mostly submerged in freshwater and marine environments. The flowers are pollinated by wind or water.",
"Poales: Order for grass-like Liliopsida plants with green sepals and petals that are bract-like (look like leaves or scales).",
"Liliales: Order for Liliopsida plants with flowers that have three sepals and three petals that are so similar that they cannot be distinguished from one another.",
"Asparagales: Order for Liliopsida plants with flowers in which the sepals and petals are often distinguishable."
],
[
"Apodida: Order for worm-like sea cucumbers that lack tube feet and have a thin outer covering that is often transparent.",
"Aspidochirotida: Order for sea cucumbers with 15 to 30 short tentacles that are shaped like shields or mops.",
"Oendrochirotida: Order for sea cucumbers with 10 to 30 highly branched tentacles.",
"Molpadiida: Order for sea cucumbers with 15 short, stubby tentacles and no tube feet. Bodies taper toward the anus, forming a tail."
]
],
[
[
"Felidae: Family for carnivores that have retractable claws and can either purr or roar.",
"Mustelidae: Family for carnivores that typically have long tails. All of them have especially well-developed anal glands.",
"Procyonidae: Family for small to medium sized mammals with short to long tails. They are found only from Canada to Argentina.",
"Ursidae: Family for small-to-large mammals with large ears and short tails."
],
[
"Alliaceae: Family for Asparagales plants with an onion-like odor, small flowers, and six stamens.",
"Agavaceae: Family for Asparagales plants with large flowers and six stamens.",
"Orchidaceae: Family for Asparagales plants with small to large flowers and one stamen.",
"Iridaceae: Family for Asparagales plants with small to large flowers and three stamens."
],
[
"Cucumariidae: Family for sea cucumbers with 10 branching tentacles that are used to capture particles from the surrounding water.",
"Phyllophoridae: Family for sea cucumbers with more than 10 branching tentacles. The shorter tentacles are used for cleaning.",
"Psolidae: Family for sea cucumbers covered on the top side by plates made of calcium carbonate.",
"Sclerodactylidae: Family for sea cucumbers with 10 to 20 tentacles and scattered tube feet."
]
],
[
[
"Melursus: Genus for bears with long narrow snouts, which look similar to an anteater's snout. They have small teeth and no incisors.",
"Helarctos: Genus for small bears that stand only about 30 inches to the shoulder.",
"Ursus: Genus for bears whose fur is typically uniform in color.",
"Tremarctos: Genus for large bears with white fur circling or almost circling their eyes. They feed mostly on fruit."
],
[
"Goodyera: Genus for long-stemmed orchids with small flowers. Within each flower's lip is a single patch of 'hair' (papillae).",
"Platanthera: Genus for orchids with green, white, or yellow flowers that have a small lobed or fringed lip.",
"Liparis: Genus for an orchid with one to a few leaves at its base and very small flowers. Each flower has a wide lip.",
"Spathoglottis: Genus for flowers with a lip that has a callous-like growth near its base."
],
[
"Pentacta: Genus for sea cucumbers with a flat underside and three distinct rows of tube feet, a firm body wall, and low papillae on the dorsal side.",
"Pseudocolochirus: Genus for sea cucumbers with three rows of tube feet on the bottom and large obvious papillae on the top.",
"Stolus: Genus for small sea cucumbers with tube feet that are distributed throughout the body.",
"Thyone: Genus for sea cucumbers with tube feet scattered equally over the body."
]
],
[
[
"arctos: A large bear known for its brown coat. It eats mostly vegetation.",
"americanus: A medium to large bear known for its typically black or dark brown coat.",
"maritimus: A large, aquatic bear that has adapted to a cold climate.",
"ursinus: A small to medium bear with black fur, though sometimes with gray and brown fur mixed in."
],
[
"hawaiensis: A species of wide-lipped orchid with green flowers that grows only in Hawaii.",
"liliifolia: A species of wide-lipped orchid with brown flowers.",
"loeselii: A species of wide-lipped orchid with green or greenish-yellow flowers.",
"vexillifera: A species of wide-lipped orchid found in the Caribbean and Central and South America."
],
[
"anceps: A species of yellow and pink sea cucumbers, with stiff, fleshy skin, low papillae on the body, and prominent papillae near the anus.",
"australis: A species of sea cucumbers with a squarish body that are grey to orange in color.",
"crassa: A species of grey sea cucumbers with a pink underside that are typically found on mud.",
"quadrangularis: A species for grey sea cucumbers with prominent tapering papillae along the corners of their squarish bodies."
]
]
];
function setOptions(i, k) | {
if(k === undefined)
k = 0;
if(i > 6)
throw "We use the six-kingdom system here";
$("#classifying").text(classifications[i]);
for(var j = 1; j < 6; j++) {
var option;
if(i < 1)
option = options[i][j-1];
else {
if(k === 2 && i === 1)
k = 0;
option = options[i][k][j-1];
}
if(option === undefined) {
$("#option-" + j).hide();
} else {
$("#option-" + j).show();
$("#label-" + j).text(option); | identifier_body |
|
app.js | '},
{type: '250 miles'},
{type: '500 miles'}
],
arroBranches: [
{
"sName": "Air Force",
"bActiveFilter": false
},
{
"sName": "Army",
"bActiveFilter": false
},
{
"sName": "Navy",
"bActiveFilter": false
},
{
"sName": "Marine Corps",
"bActiveFilter": false
},
{
"sName": "Defense Logistics Agency",
"bActiveFilter": false
}
],
arroConus: [{
sName: 'CONUS',
bActiveFilter: false
},{
sName: 'OCONUS',
bActiveFilter: false
}],
arroViewBy: [
{type: 'Zip Code'},
{type: 'Installation'}
]
};
$http.defaults.cache = _oMiData;
// binds a series of uniquely named constants to a controller's scope
this.constantify = function(scope, arrsConstants) {
arrsConstants.forEach(function(sKeyName) {
if (typeof _context._constants[sKeyName] === 'object') {
scope[sKeyName] = JSON.parse(JSON.stringify(_context._constants[sKeyName]));
} else {
scope[sKeyName] = _context._constants[sKeyName];
}
});
}
this.get = function(sDataKey) {
return new Promise(function(resolve, reject){
$http.get(sDataKey).then(function(response){
resolve(response.data);
}, function(reason){
resolve({
error: 'An error occurred!'
});
})
});
}
// a syntactic sugar for mock data
this.mock = function(sUriSubstring) {
return this.get('/data/get-' + sUriSubstring + '.json');
}
this.init = function() {
$http.get('/data/get-installations.json');
$http.get('/data/get-program-cards.json');
$http.get('/data/get-programs.json');
$http.get('/data/states.json');
}
}])
// ref: https://stackoverflow.com/questions/22408790/angularjs-passing-data-between-pages
// description: this service allows passing data between views
// TODO: maybe all gets should be byval, eg $scope.arroBranches = JSON.parse(JSON.stringify(MI.Data._constants.arroBranches));
.service('MiState', function(){
var _oState = {};
function set(data) {
_oState = data;
}
function get() {
return _oState;
}
return {
set: set,
get: get
}
})
// description: creates a base controller
// TODO: a single, generic MI.get() which internally looks at State, Data, $templateCache, $http, then handle error or not found.
// rule of thumb: If three views need it, add it here.
.service('BaseController', ['$location',
'cssInjector',
'MiData',
'MiState',
'$templateCache',
'$compile',
'$timeout',
'$window',
'Overlay',
function($location, cssInjector, MiData, MiState, $templateCache, $compile, $timeout, $window, Overlay) {
var oContext = this,
oLazyLoad = {
miCard: function() {
myapp._compileProvider.directive('miCard', function() {
return {
templateUrl: '/views/miCard/miCard.html'
}
})
}
};
function getViewName() {
var sResolvedViewName = MiData.sResolvedViewName;
if (sResolvedViewName) {
MiData.sResolvedViewName = '';
return sResolvedViewName;
}
return $location.$$path.slice(1).split('/')[0] || 'main';
}
function getStyle(sViewName) {
cssInjector.add('/views/' + sViewName + '/' + sViewName + '.css');
}
// description: a utility method to quickly get a needed item from a list.
// finds the first member of arr with a given key-value pair.
// either returns a desired key's value or else the whole matched object.
// can support up to 2 levels deep using subkey; objects more complex can't use this approach.
function getFirstMatch(arr, sKey, sValToMatch, sValToReturn, sSubKey) {
var arrFiltered = arr.filter(function(el){
return sSubKey ? el[sKey][sSubKey] === sValToMatch : el[sKey] === sValToMatch;
}) || '';
if (arrFiltered && arrFiltered[0]) {
return sValToReturn ? arrFiltered[0][sValToReturn] : arrFiltered[0];
}
return '';
}
function init(scope, oOptions) {
var sViewName = getViewName();
scope.MI = this;
MiData.init();
MiData.get('/views/' + sViewName + '/' + sViewName + '.html').then(function(sHtml) {
$compile($(sHtml))(scope); // virtually add dynamic html to controller scope, but do not actually render to browser DOM
scope.sViewName = sViewName;
getStyle(sViewName);
if (oOptions) {
if (oOptions.constantify) MiData.constantify(scope, oOptions.constantify);
if (oOptions.lazilyLoad) fLazyLoad(oOptions.lazilyLoad, scope);
if (oOptions.fCallback) oOptions.fCallback(scope, oOptions); |
scope.$apply();
});
}
// bLazyLoadingDone is needed to trigger $compile because it creates a diff
// also, make sure your lazily loaded element has some content. Or again, there will be no diff and no render will happen.
// ref: https://stackoverflow.com/questions/38514918/when-lazy-loading-directive-its-never-run?noredirect=1&lq=1
function fLazyLoad(arrsfDirectives, scope) {
arrsfDirectives.forEach(function(sfDirective){
oLazyLoad[sfDirective]();
});
scope.bLazyLoadingDone = true;
}
return {
$location: $location,
Data: MiData,
getStyle: getStyle,
getFirstMatch: getFirstMatch,
init: init,
State: MiState,
$window: $window,
Overlay: Overlay
}
}])
// syntactic sugar. Maybe collapse into one service if you want, but this is modular.
.service('MI', ['BaseController', function(BaseController){
return BaseController;
}])
// don't lazy load these because they're needed on main/initial view
.directive('miSearch', ['MI', function(MI){
return {
templateUrl: 'looking-for',
link: function(scope, element, attrs) {
var oChangeOptions = scope.$parent.oLookingForSelectionChange;
if (oChangeOptions) {
scope.$watch('oLookingForSelection',
oChangeOptions.fFunction,
oChangeOptions.bDigest);
}
}
}
}])
// TODO: this directive needs it's own scope and data
.directive('miTypeaheadPrograms', function(){
return {
templateUrl: 'typeahead-programs'
}
})
.directive('miTypeaheadInstallations', function(){
return {
templateUrl: 'typeahead-installations',
link: function(scope, element, attrs) {
// TODO: should MDInstallations be standard under MI.Data?
// TODO: should we include a reference or element to return on click as a standard here?
scope.fShowInstallationsOverlay = scope.$parent.fHandleInstallationTextClick || function () {
scope.arroInstallations = scope.MDInstallations;
scope.$overlay = scope.MI.Overlay.showByTemplateName(scope, 'installations-overlay');
}
}
}
})
.directive('miRegionAccordions', function(){
return {
restrict: 'E',
templateUrl: 'region-accordions',
scope: { arrdirectiveregions: '=' },
link: function(scope, element, attrs) {
//ref: https://stackoverflow.com/questions/17900201/how-to-access-parent-scope-from-within-a-custom-directive-with-own-scope-in-an
scope.fHandleInstallationTextClick = scope.$parent.fHandleInstallationTextClick;
// animated open indicator
scope.fIndicateOpen = function ($event) {
var $panelHeading = $($event.currentTarget).find('.panel-heading'),
$openIndicator = $panelHeading.prev();
if (!$openIndicator.length) { // create $openIndicator if it's not there
$openIndicator = $('<div class="open-indicator">');
$panelHeading.before($openIndicator);
}
if ($event.currentTarget.classList.contains('panel-open')) { // ensure indicator is expanded in this case
$openIndicator.animate({
width: 40
}, 300);
} else { // ensure indicator is collapsed in this case
$openIndicator.animate({
width: 0
}, 300);
}
}
}
}
})
// TODO: delete this block
// define other directives for lazy loading later on
// ref: https://stackoverflow.com/questions/12538665/how-can-directives-be-lazy | } | random_line_split |
app.js | '},
{type: '250 miles'},
{type: '500 miles'}
],
arroBranches: [
{
"sName": "Air Force",
"bActiveFilter": false
},
{
"sName": "Army",
"bActiveFilter": false
},
{
"sName": "Navy",
"bActiveFilter": false
},
{
"sName": "Marine Corps",
"bActiveFilter": false
},
{
"sName": "Defense Logistics Agency",
"bActiveFilter": false
}
],
arroConus: [{
sName: 'CONUS',
bActiveFilter: false
},{
sName: 'OCONUS',
bActiveFilter: false
}],
arroViewBy: [
{type: 'Zip Code'},
{type: 'Installation'}
]
};
$http.defaults.cache = _oMiData;
// binds a series of uniquely named constants to a controller's scope
this.constantify = function(scope, arrsConstants) {
arrsConstants.forEach(function(sKeyName) {
if (typeof _context._constants[sKeyName] === 'object') {
scope[sKeyName] = JSON.parse(JSON.stringify(_context._constants[sKeyName]));
} else {
scope[sKeyName] = _context._constants[sKeyName];
}
});
}
this.get = function(sDataKey) {
return new Promise(function(resolve, reject){
$http.get(sDataKey).then(function(response){
resolve(response.data);
}, function(reason){
resolve({
error: 'An error occurred!'
});
})
});
}
// a syntactic sugar for mock data
this.mock = function(sUriSubstring) {
return this.get('/data/get-' + sUriSubstring + '.json');
}
this.init = function() {
$http.get('/data/get-installations.json');
$http.get('/data/get-program-cards.json');
$http.get('/data/get-programs.json');
$http.get('/data/states.json');
}
}])
// ref: https://stackoverflow.com/questions/22408790/angularjs-passing-data-between-pages
// description: this service allows passing data between views
// TODO: maybe all gets should be byval, eg $scope.arroBranches = JSON.parse(JSON.stringify(MI.Data._constants.arroBranches));
.service('MiState', function(){
var _oState = {};
function set(data) {
_oState = data;
}
function get() {
return _oState;
}
return {
set: set,
get: get
}
})
// description: creates a base controller
// TODO: a single, generic MI.get() which internally looks at State, Data, $templateCache, $http, then handle error or not found.
// rule of thumb: If three views need it, add it here.
.service('BaseController', ['$location',
'cssInjector',
'MiData',
'MiState',
'$templateCache',
'$compile',
'$timeout',
'$window',
'Overlay',
function($location, cssInjector, MiData, MiState, $templateCache, $compile, $timeout, $window, Overlay) {
var oContext = this,
oLazyLoad = {
miCard: function() {
myapp._compileProvider.directive('miCard', function() {
return {
templateUrl: '/views/miCard/miCard.html'
}
})
}
};
function getViewName() {
var sResolvedViewName = MiData.sResolvedViewName;
if (sResolvedViewName) {
MiData.sResolvedViewName = '';
return sResolvedViewName;
}
return $location.$$path.slice(1).split('/')[0] || 'main';
}
function getStyle(sViewName) {
cssInjector.add('/views/' + sViewName + '/' + sViewName + '.css');
}
// description: a utility method to quickly get a needed item from a list.
// finds the first member of arr with a given key-value pair.
// either returns a desired key's value or else the whole matched object.
// can support up to 2 levels deep using subkey; objects more complex can't use this approach.
function getFirstMatch(arr, sKey, sValToMatch, sValToReturn, sSubKey) {
var arrFiltered = arr.filter(function(el){
return sSubKey ? el[sKey][sSubKey] === sValToMatch : el[sKey] === sValToMatch;
}) || '';
if (arrFiltered && arrFiltered[0]) {
return sValToReturn ? arrFiltered[0][sValToReturn] : arrFiltered[0];
}
return '';
}
function init(scope, oOptions) {
var sViewName = getViewName();
scope.MI = this;
MiData.init();
MiData.get('/views/' + sViewName + '/' + sViewName + '.html').then(function(sHtml) {
$compile($(sHtml))(scope); // virtually add dynamic html to controller scope, but do not actually render to browser DOM
scope.sViewName = sViewName;
getStyle(sViewName);
if (oOptions) {
if (oOptions.constantify) MiData.constantify(scope, oOptions.constantify);
if (oOptions.lazilyLoad) fLazyLoad(oOptions.lazilyLoad, scope);
if (oOptions.fCallback) oOptions.fCallback(scope, oOptions);
}
scope.$apply();
});
}
// bLazyLoadingDone is needed to trigger $compile because it creates a diff
// also, make sure your lazily loaded element has some content. Or again, there will be no diff and no render will happen.
// ref: https://stackoverflow.com/questions/38514918/when-lazy-loading-directive-its-never-run?noredirect=1&lq=1
function fLazyLoad(arrsfDirectives, scope) |
return {
$location: $location,
Data: MiData,
getStyle: getStyle,
getFirstMatch: getFirstMatch,
init: init,
State: MiState,
$window: $window,
Overlay: Overlay
}
}])
// syntactic sugar. Maybe collapse into one service if you want, but this is modular.
.service('MI', ['BaseController', function(BaseController){
return BaseController;
}])
// don't lazy load these because they're needed on main/initial view
.directive('miSearch', ['MI', function(MI){
return {
templateUrl: 'looking-for',
link: function(scope, element, attrs) {
var oChangeOptions = scope.$parent.oLookingForSelectionChange;
if (oChangeOptions) {
scope.$watch('oLookingForSelection',
oChangeOptions.fFunction,
oChangeOptions.bDigest);
}
}
}
}])
// TODO: this directive needs it's own scope and data
.directive('miTypeaheadPrograms', function(){
return {
templateUrl: 'typeahead-programs'
}
})
.directive('miTypeaheadInstallations', function(){
return {
templateUrl: 'typeahead-installations',
link: function(scope, element, attrs) {
// TODO: should MDInstallations be standard under MI.Data?
// TODO: should we include a reference or element to return on click as a standard here?
scope.fShowInstallationsOverlay = scope.$parent.fHandleInstallationTextClick || function () {
scope.arroInstallations = scope.MDInstallations;
scope.$overlay = scope.MI.Overlay.showByTemplateName(scope, 'installations-overlay');
}
}
}
})
.directive('miRegionAccordions', function(){
return {
restrict: 'E',
templateUrl: 'region-accordions',
scope: { arrdirectiveregions: '=' },
link: function(scope, element, attrs) {
//ref: https://stackoverflow.com/questions/17900201/how-to-access-parent-scope-from-within-a-custom-directive-with-own-scope-in-an
scope.fHandleInstallationTextClick = scope.$parent.fHandleInstallationTextClick;
// animated open indicator
scope.fIndicateOpen = function ($event) {
var $panelHeading = $($event.currentTarget).find('.panel-heading'),
$openIndicator = $panelHeading.prev();
if (!$openIndicator.length) { // create $openIndicator if it's not there
$openIndicator = $('<div class="open-indicator">');
$panelHeading.before($openIndicator);
}
if ($event.currentTarget.classList.contains('panel-open')) { // ensure indicator is expanded in this case
$openIndicator.animate({
width: 40
}, 300);
} else { // ensure indicator is collapsed in this case
$openIndicator.animate({
width: 0
}, 300);
}
}
}
}
})
// TODO: delete this block
// define other directives for lazy loading later on
// ref: https://stackoverflow.com/questions/12538665/how-can-directives-be-l | {
arrsfDirectives.forEach(function(sfDirective){
oLazyLoad[sfDirective]();
});
scope.bLazyLoadingDone = true;
} | identifier_body |
app.js | '},
{type: '250 miles'},
{type: '500 miles'}
],
arroBranches: [
{
"sName": "Air Force",
"bActiveFilter": false
},
{
"sName": "Army",
"bActiveFilter": false
},
{
"sName": "Navy",
"bActiveFilter": false
},
{
"sName": "Marine Corps",
"bActiveFilter": false
},
{
"sName": "Defense Logistics Agency",
"bActiveFilter": false
}
],
arroConus: [{
sName: 'CONUS',
bActiveFilter: false
},{
sName: 'OCONUS',
bActiveFilter: false
}],
arroViewBy: [
{type: 'Zip Code'},
{type: 'Installation'}
]
};
$http.defaults.cache = _oMiData;
// binds a series of uniquely named constants to a controller's scope
this.constantify = function(scope, arrsConstants) {
arrsConstants.forEach(function(sKeyName) {
if (typeof _context._constants[sKeyName] === 'object') {
scope[sKeyName] = JSON.parse(JSON.stringify(_context._constants[sKeyName]));
} else {
scope[sKeyName] = _context._constants[sKeyName];
}
});
}
this.get = function(sDataKey) {
return new Promise(function(resolve, reject){
$http.get(sDataKey).then(function(response){
resolve(response.data);
}, function(reason){
resolve({
error: 'An error occurred!'
});
})
});
}
// a syntactic sugar for mock data
this.mock = function(sUriSubstring) {
return this.get('/data/get-' + sUriSubstring + '.json');
}
this.init = function() {
$http.get('/data/get-installations.json');
$http.get('/data/get-program-cards.json');
$http.get('/data/get-programs.json');
$http.get('/data/states.json');
}
}])
// ref: https://stackoverflow.com/questions/22408790/angularjs-passing-data-between-pages
// description: this service allows passing data between views
// TODO: maybe all gets should be byval, eg $scope.arroBranches = JSON.parse(JSON.stringify(MI.Data._constants.arroBranches));
.service('MiState', function(){
var _oState = {};
function set(data) {
_oState = data;
}
function get() {
return _oState;
}
return {
set: set,
get: get
}
})
// description: creates a base controller
// TODO: a single, generic MI.get() which internally looks at State, Data, $templateCache, $http, then handle error or not found.
// rule of thumb: If three views need it, add it here.
.service('BaseController', ['$location',
'cssInjector',
'MiData',
'MiState',
'$templateCache',
'$compile',
'$timeout',
'$window',
'Overlay',
function($location, cssInjector, MiData, MiState, $templateCache, $compile, $timeout, $window, Overlay) {
var oContext = this,
oLazyLoad = {
miCard: function() {
myapp._compileProvider.directive('miCard', function() {
return {
templateUrl: '/views/miCard/miCard.html'
}
})
}
};
function getViewName() {
var sResolvedViewName = MiData.sResolvedViewName;
if (sResolvedViewName) {
MiData.sResolvedViewName = '';
return sResolvedViewName;
}
return $location.$$path.slice(1).split('/')[0] || 'main';
}
function getStyle(sViewName) {
cssInjector.add('/views/' + sViewName + '/' + sViewName + '.css');
}
// description: a utility method to quickly get a needed item from a list.
// finds the first member of arr with a given key-value pair.
// either returns a desired key's value or else the whole matched object.
// can support up to 2 levels deep using subkey; objects more complex can't use this approach.
function getFirstMatch(arr, sKey, sValToMatch, sValToReturn, sSubKey) {
var arrFiltered = arr.filter(function(el){
return sSubKey ? el[sKey][sSubKey] === sValToMatch : el[sKey] === sValToMatch;
}) || '';
if (arrFiltered && arrFiltered[0]) {
return sValToReturn ? arrFiltered[0][sValToReturn] : arrFiltered[0];
}
return '';
}
function init(scope, oOptions) {
var sViewName = getViewName();
scope.MI = this;
MiData.init();
MiData.get('/views/' + sViewName + '/' + sViewName + '.html').then(function(sHtml) {
$compile($(sHtml))(scope); // virtually add dynamic html to controller scope, but do not actually render to browser DOM
scope.sViewName = sViewName;
getStyle(sViewName);
if (oOptions) {
if (oOptions.constantify) MiData.constantify(scope, oOptions.constantify);
if (oOptions.lazilyLoad) fLazyLoad(oOptions.lazilyLoad, scope);
if (oOptions.fCallback) oOptions.fCallback(scope, oOptions);
}
scope.$apply();
});
}
// bLazyLoadingDone is needed to trigger $compile because it creates a diff
// also, make sure your lazily loaded element has some content. Or again, there will be no diff and no render will happen.
// ref: https://stackoverflow.com/questions/38514918/when-lazy-loading-directive-its-never-run?noredirect=1&lq=1
function | (arrsfDirectives, scope) {
arrsfDirectives.forEach(function(sfDirective){
oLazyLoad[sfDirective]();
});
scope.bLazyLoadingDone = true;
}
return {
$location: $location,
Data: MiData,
getStyle: getStyle,
getFirstMatch: getFirstMatch,
init: init,
State: MiState,
$window: $window,
Overlay: Overlay
}
}])
// syntactic sugar. Maybe collapse into one service if you want, but this is modular.
.service('MI', ['BaseController', function(BaseController){
return BaseController;
}])
// don't lazy load these because they're needed on main/initial view
.directive('miSearch', ['MI', function(MI){
return {
templateUrl: 'looking-for',
link: function(scope, element, attrs) {
var oChangeOptions = scope.$parent.oLookingForSelectionChange;
if (oChangeOptions) {
scope.$watch('oLookingForSelection',
oChangeOptions.fFunction,
oChangeOptions.bDigest);
}
}
}
}])
// TODO: this directive needs it's own scope and data
.directive('miTypeaheadPrograms', function(){
return {
templateUrl: 'typeahead-programs'
}
})
.directive('miTypeaheadInstallations', function(){
return {
templateUrl: 'typeahead-installations',
link: function(scope, element, attrs) {
// TODO: should MDInstallations be standard under MI.Data?
// TODO: should we include a reference or element to return on click as a standard here?
scope.fShowInstallationsOverlay = scope.$parent.fHandleInstallationTextClick || function () {
scope.arroInstallations = scope.MDInstallations;
scope.$overlay = scope.MI.Overlay.showByTemplateName(scope, 'installations-overlay');
}
}
}
})
.directive('miRegionAccordions', function(){
return {
restrict: 'E',
templateUrl: 'region-accordions',
scope: { arrdirectiveregions: '=' },
link: function(scope, element, attrs) {
//ref: https://stackoverflow.com/questions/17900201/how-to-access-parent-scope-from-within-a-custom-directive-with-own-scope-in-an
scope.fHandleInstallationTextClick = scope.$parent.fHandleInstallationTextClick;
// animated open indicator
scope.fIndicateOpen = function ($event) {
var $panelHeading = $($event.currentTarget).find('.panel-heading'),
$openIndicator = $panelHeading.prev();
if (!$openIndicator.length) { // create $openIndicator if it's not there
$openIndicator = $('<div class="open-indicator">');
$panelHeading.before($openIndicator);
}
if ($event.currentTarget.classList.contains('panel-open')) { // ensure indicator is expanded in this case
$openIndicator.animate({
width: 40
}, 300);
} else { // ensure indicator is collapsed in this case
$openIndicator.animate({
width: 0
}, 300);
}
}
}
}
})
// TODO: delete this block
// define other directives for lazy loading later on
// ref: https://stackoverflow.com/questions/12538665/how-can-directives-be-lazy | fLazyLoad | identifier_name |
app.js | '},
{type: '250 miles'},
{type: '500 miles'}
],
arroBranches: [
{
"sName": "Air Force",
"bActiveFilter": false
},
{
"sName": "Army",
"bActiveFilter": false
},
{
"sName": "Navy",
"bActiveFilter": false
},
{
"sName": "Marine Corps",
"bActiveFilter": false
},
{
"sName": "Defense Logistics Agency",
"bActiveFilter": false
}
],
arroConus: [{
sName: 'CONUS',
bActiveFilter: false
},{
sName: 'OCONUS',
bActiveFilter: false
}],
arroViewBy: [
{type: 'Zip Code'},
{type: 'Installation'}
]
};
$http.defaults.cache = _oMiData;
// binds a series of uniquely named constants to a controller's scope
this.constantify = function(scope, arrsConstants) {
arrsConstants.forEach(function(sKeyName) {
if (typeof _context._constants[sKeyName] === 'object') {
scope[sKeyName] = JSON.parse(JSON.stringify(_context._constants[sKeyName]));
} else {
scope[sKeyName] = _context._constants[sKeyName];
}
});
}
this.get = function(sDataKey) {
return new Promise(function(resolve, reject){
$http.get(sDataKey).then(function(response){
resolve(response.data);
}, function(reason){
resolve({
error: 'An error occurred!'
});
})
});
}
// a syntactic sugar for mock data
this.mock = function(sUriSubstring) {
return this.get('/data/get-' + sUriSubstring + '.json');
}
this.init = function() {
$http.get('/data/get-installations.json');
$http.get('/data/get-program-cards.json');
$http.get('/data/get-programs.json');
$http.get('/data/states.json');
}
}])
// ref: https://stackoverflow.com/questions/22408790/angularjs-passing-data-between-pages
// description: this service allows passing data between views
// TODO: maybe all gets should be byval, eg $scope.arroBranches = JSON.parse(JSON.stringify(MI.Data._constants.arroBranches));
.service('MiState', function(){
var _oState = {};
function set(data) {
_oState = data;
}
function get() {
return _oState;
}
return {
set: set,
get: get
}
})
// description: creates a base controller
// TODO: a single, generic MI.get() which internally looks at State, Data, $templateCache, $http, then handle error or not found.
// rule of thumb: If three views need it, add it here.
.service('BaseController', ['$location',
'cssInjector',
'MiData',
'MiState',
'$templateCache',
'$compile',
'$timeout',
'$window',
'Overlay',
function($location, cssInjector, MiData, MiState, $templateCache, $compile, $timeout, $window, Overlay) {
var oContext = this,
oLazyLoad = {
miCard: function() {
myapp._compileProvider.directive('miCard', function() {
return {
templateUrl: '/views/miCard/miCard.html'
}
})
}
};
function getViewName() {
var sResolvedViewName = MiData.sResolvedViewName;
if (sResolvedViewName) {
MiData.sResolvedViewName = '';
return sResolvedViewName;
}
return $location.$$path.slice(1).split('/')[0] || 'main';
}
function getStyle(sViewName) {
cssInjector.add('/views/' + sViewName + '/' + sViewName + '.css');
}
// description: a utility method to quickly get a needed item from a list.
// finds the first member of arr with a given key-value pair.
// either returns a desired key's value or else the whole matched object.
// can support up to 2 levels deep using subkey; objects more complex can't use this approach.
function getFirstMatch(arr, sKey, sValToMatch, sValToReturn, sSubKey) {
var arrFiltered = arr.filter(function(el){
return sSubKey ? el[sKey][sSubKey] === sValToMatch : el[sKey] === sValToMatch;
}) || '';
if (arrFiltered && arrFiltered[0]) {
return sValToReturn ? arrFiltered[0][sValToReturn] : arrFiltered[0];
}
return '';
}
function init(scope, oOptions) {
var sViewName = getViewName();
scope.MI = this;
MiData.init();
MiData.get('/views/' + sViewName + '/' + sViewName + '.html').then(function(sHtml) {
$compile($(sHtml))(scope); // virtually add dynamic html to controller scope, but do not actually render to browser DOM
scope.sViewName = sViewName;
getStyle(sViewName);
if (oOptions) {
if (oOptions.constantify) MiData.constantify(scope, oOptions.constantify);
if (oOptions.lazilyLoad) fLazyLoad(oOptions.lazilyLoad, scope);
if (oOptions.fCallback) oOptions.fCallback(scope, oOptions);
}
scope.$apply();
});
}
// bLazyLoadingDone is needed to trigger $compile because it creates a diff
// also, make sure your lazily loaded element has some content. Or again, there will be no diff and no render will happen.
// ref: https://stackoverflow.com/questions/38514918/when-lazy-loading-directive-its-never-run?noredirect=1&lq=1
function fLazyLoad(arrsfDirectives, scope) {
arrsfDirectives.forEach(function(sfDirective){
oLazyLoad[sfDirective]();
});
scope.bLazyLoadingDone = true;
}
return {
$location: $location,
Data: MiData,
getStyle: getStyle,
getFirstMatch: getFirstMatch,
init: init,
State: MiState,
$window: $window,
Overlay: Overlay
}
}])
// syntactic sugar. Maybe collapse into one service if you want, but this is modular.
.service('MI', ['BaseController', function(BaseController){
return BaseController;
}])
// don't lazy load these because they're needed on main/initial view
.directive('miSearch', ['MI', function(MI){
return {
templateUrl: 'looking-for',
link: function(scope, element, attrs) {
var oChangeOptions = scope.$parent.oLookingForSelectionChange;
if (oChangeOptions) {
scope.$watch('oLookingForSelection',
oChangeOptions.fFunction,
oChangeOptions.bDigest);
}
}
}
}])
// TODO: this directive needs it's own scope and data
.directive('miTypeaheadPrograms', function(){
return {
templateUrl: 'typeahead-programs'
}
})
.directive('miTypeaheadInstallations', function(){
return {
templateUrl: 'typeahead-installations',
link: function(scope, element, attrs) {
// TODO: should MDInstallations be standard under MI.Data?
// TODO: should we include a reference or element to return on click as a standard here?
scope.fShowInstallationsOverlay = scope.$parent.fHandleInstallationTextClick || function () {
scope.arroInstallations = scope.MDInstallations;
scope.$overlay = scope.MI.Overlay.showByTemplateName(scope, 'installations-overlay');
}
}
}
})
.directive('miRegionAccordions', function(){
return {
restrict: 'E',
templateUrl: 'region-accordions',
scope: { arrdirectiveregions: '=' },
link: function(scope, element, attrs) {
//ref: https://stackoverflow.com/questions/17900201/how-to-access-parent-scope-from-within-a-custom-directive-with-own-scope-in-an
scope.fHandleInstallationTextClick = scope.$parent.fHandleInstallationTextClick;
// animated open indicator
scope.fIndicateOpen = function ($event) {
var $panelHeading = $($event.currentTarget).find('.panel-heading'),
$openIndicator = $panelHeading.prev();
if (!$openIndicator.length) |
if ($event.currentTarget.classList.contains('panel-open')) { // ensure indicator is expanded in this case
$openIndicator.animate({
width: 40
}, 300);
} else { // ensure indicator is collapsed in this case
$openIndicator.animate({
width: 0
}, 300);
}
}
}
}
})
// TODO: delete this block
// define other directives for lazy loading later on
// ref: https://stackoverflow.com/questions/12538665/how-can-directives-be-l | { // create $openIndicator if it's not there
$openIndicator = $('<div class="open-indicator">');
$panelHeading.before($openIndicator);
} | conditional_block |
test.rs | () {
// Windows compatible.
let output = sh("echo hi").read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_start() {
let handle1 = cmd!(path_to_exe("echo"), "hi")
.stdout_capture()
.start()
.unwrap();
let handle2 = cmd!(path_to_exe("echo"), "lo")
.stdout_capture()
.start()
.unwrap();
let output1 = handle1.wait().unwrap();
let output2 = handle2.wait().unwrap();
assert_eq!("hi", str::from_utf8(&output1.stdout).unwrap().trim());
assert_eq!("lo", str::from_utf8(&output2.stdout).unwrap().trim());
}
#[test]
fn test_error() {
let result = false_cmd().run();
if let Err(err) = result {
assert_eq!(err.kind(), io::ErrorKind::Other);
} else {
panic!("Expected a status error.");
}
}
#[test]
fn test_unchecked() {
let unchecked_false = false_cmd().unchecked();
// Unchecked errors shouldn't cause `run` to return an error.
let output = unchecked_false
.pipe(cmd!(path_to_exe("echo"), "waa"))
.stdout_capture()
.run()
.unwrap();
// The value of the exit code is preserved.
assert_eq!(1, output.status.code().unwrap());
assert_eq!("waa", String::from_utf8_lossy(&output.stdout).trim());
}
#[test]
fn test_unchecked_in_pipe() {
let zero = cmd!(path_to_exe("status"), "0");
let one = cmd!(path_to_exe("status"), "1");
let two = cmd!(path_to_exe("status"), "2");
// Right takes precedence over left.
let output = one.pipe(two.clone()).unchecked().run().unwrap();
assert_eq!(2, output.status.code().unwrap());
// Except that checked on the left takes precedence over unchecked on
// the right.
let output = one.pipe(two.unchecked()).unchecked().run().unwrap();
assert_eq!(1, output.status.code().unwrap());
// Right takes precedence over the left again if they're both unchecked.
let output = one
.unchecked()
.pipe(two.unchecked())
.unchecked()
.run()
.unwrap();
assert_eq!(2, output.status.code().unwrap());
// Except that if the right is a success, the left takes precedence.
let output = one
.unchecked()
.pipe(zero.unchecked())
.unchecked()
.run()
.unwrap();
assert_eq!(1, output.status.code().unwrap());
// Even if the right is checked.
let output = one.unchecked().pipe(zero).unchecked().run().unwrap();
assert_eq!(1, output.status.code().unwrap());
}
#[test]
fn test_pipe() {
let output = sh("echo xxx")
.pipe(cmd!(path_to_exe("x_to_y")))
.read()
.unwrap();
assert_eq!("yyy", output);
// Check that errors on either side are propagated.
let result = true_cmd().pipe(false_cmd()).run();
assert!(result.is_err());
let result = false_cmd().pipe(true_cmd()).run();
assert!(result.is_err());
}
#[test]
fn test_pipe_with_kill() {
// Make sure both sides get killed.
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
// Note that we don't use unchecked() here. This tests that kill suppresses
// exit status errors.
let handle = sleep_cmd.pipe(sleep_cmd.clone()).start().unwrap();
handle.kill().unwrap();
// But calling wait again should be an error, because of the status.
handle.wait().unwrap_err();
}
#[test]
fn test_pipe_start() {
let nonexistent_cmd = cmd!(path_to_exe("nonexistent!!!"));
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
// Errors starting the left side of a pipe are returned immediately, and
// the right side is never started.
nonexistent_cmd.pipe(&sleep_cmd).start().unwrap_err();
// Errors starting the right side are also returned immediately, and the
// the left side is killed first.
sleep_cmd.pipe(nonexistent_cmd).start().unwrap_err();
}
#[test]
fn test_multiple_threads() {
// Wait on the sleep command in a background thread, while the main thread
// kills it.
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
let handle = Arc::new(sleep_cmd.unchecked().start().unwrap());
let arc_clone = handle.clone();
let wait_thread = std::thread::spawn(move || {
arc_clone.wait().unwrap();
});
handle.kill().unwrap();
wait_thread.join().unwrap();
}
#[test]
fn test_nonblocking_waits() {
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
// Make sure pipelines handle try_wait correctly.
let handle = sleep_cmd.pipe(&sleep_cmd).unchecked().start().unwrap();
// Make sure try_wait doesn't block on it.
assert!(handle.try_wait().unwrap().is_none());
handle.kill().unwrap();
}
#[test]
fn test_input() {
let expr = cmd!(path_to_exe("x_to_y")).stdin_bytes("xxx");
let output = expr.read().unwrap();
assert_eq!("yyy", output);
}
#[test]
fn test_stderr() {
let (mut reader, writer) = ::os_pipe::pipe().unwrap();
sh("echo hi>&2").stderr_file(writer).run().unwrap();
let mut s = String::new();
reader.read_to_string(&mut s).unwrap();
assert_eq!(s.trim(), "hi");
}
#[test]
fn test_null() {
let expr = cmd!(path_to_exe("cat"))
.stdin_null()
.stdout_null()
.stderr_null();
let output = expr.read().unwrap();
assert_eq!("", output);
}
#[test]
fn test_path() {
let dir = tempfile::tempdir().unwrap();
let input_file = dir.path().join("input_file");
let output_file = dir.path().join("output_file");
File::create(&input_file)
.unwrap()
.write_all(b"xxx")
.unwrap();
let expr = cmd!(path_to_exe("x_to_y"))
.stdin_path(&input_file)
.stdout_path(&output_file);
let output = expr.read().unwrap();
assert_eq!("", output);
let mut file_output = String::new();
File::open(&output_file)
.unwrap()
.read_to_string(&mut file_output)
.unwrap();
assert_eq!("yyy", file_output);
}
#[test]
fn test_swapping() {
let output = sh("echo hi")
.stdout_to_stderr()
.stderr_capture()
.run()
.unwrap();
let stderr = str::from_utf8(&output.stderr).unwrap().trim();
assert_eq!("hi", stderr);
// Windows compatible. (Requires no space before the ">".)
let output = sh("echo hi>&2").stderr_to_stdout().read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_file() {
let dir = tempfile::tempdir().unwrap();
let file = dir.path().join("file");
File::create(&file).unwrap().write_all(b"example").unwrap();
let expr = cmd!(path_to_exe("cat")).stdin_file(File::open(&file).unwrap());
let output = expr.read().unwrap();
assert_eq!(output, "example");
}
#[test]
fn test_ergonomics() {
let mystr = "owned string".to_owned();
let mypathbuf = Path::new("a/b/c").to_owned();
let myvec = vec![1, 2, 3];
// These are nonsense expressions. We just want to make sure they compile.
let _ = sh("true")
.stdin_path(&*mystr)
.stdin_bytes(&*myvec)
.stdout_path(&*mypathbuf);
let _ = sh("true")
.stdin_path(mystr)
.stdin_bytes(myvec)
.stdout_path(mypathbuf);
// Unfortunately, this one doesn't work with our Into<Vec<u8>> bound on input().
// TODO: Is it worth having these impls for &Vec in other cases?
// let _ = sh("true").stdin_path(&mystr).stdin_bytes(&myvec).stdout_path(&mypathbuf);
}
#[test]
fn test_capture_both() {
// Windows compatible, no space before ">", and we trim newlines at the end to avoid
// dealing with the different kinds.
let output = sh("echo hi && echo lo>&2")
.stdout_capture()
.stderr_capture()
.run()
.unwrap();
assert_eq!("hi", str::from_utf8(&output.stdout).unwrap().trim());
assert_eq!("lo", str::from_utf8(&output.stderr).unwrap().trim());
}
#[test]
fn test_dir() {
// This test checks the interaction of `dir` and relative exe paths.
// Make sure that's actually what we're testing.
let pwd_path = path_to_exe("pwd");
assert!(pwd | test_sh | identifier_name |
|
test.rs | ::thread::spawn(move || {
arc_clone.wait().unwrap();
});
handle.kill().unwrap();
wait_thread.join().unwrap();
}
#[test]
fn test_nonblocking_waits() {
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
// Make sure pipelines handle try_wait correctly.
let handle = sleep_cmd.pipe(&sleep_cmd).unchecked().start().unwrap();
// Make sure try_wait doesn't block on it.
assert!(handle.try_wait().unwrap().is_none());
handle.kill().unwrap();
}
#[test]
fn test_input() {
let expr = cmd!(path_to_exe("x_to_y")).stdin_bytes("xxx");
let output = expr.read().unwrap();
assert_eq!("yyy", output);
}
#[test]
fn test_stderr() {
let (mut reader, writer) = ::os_pipe::pipe().unwrap();
sh("echo hi>&2").stderr_file(writer).run().unwrap();
let mut s = String::new();
reader.read_to_string(&mut s).unwrap();
assert_eq!(s.trim(), "hi");
}
#[test]
fn test_null() {
let expr = cmd!(path_to_exe("cat"))
.stdin_null()
.stdout_null()
.stderr_null();
let output = expr.read().unwrap();
assert_eq!("", output);
}
#[test]
fn test_path() {
let dir = tempfile::tempdir().unwrap();
let input_file = dir.path().join("input_file");
let output_file = dir.path().join("output_file");
File::create(&input_file)
.unwrap()
.write_all(b"xxx")
.unwrap();
let expr = cmd!(path_to_exe("x_to_y"))
.stdin_path(&input_file)
.stdout_path(&output_file);
let output = expr.read().unwrap();
assert_eq!("", output);
let mut file_output = String::new();
File::open(&output_file)
.unwrap()
.read_to_string(&mut file_output)
.unwrap();
assert_eq!("yyy", file_output);
}
#[test]
fn test_swapping() {
let output = sh("echo hi")
.stdout_to_stderr()
.stderr_capture()
.run()
.unwrap();
let stderr = str::from_utf8(&output.stderr).unwrap().trim();
assert_eq!("hi", stderr);
// Windows compatible. (Requires no space before the ">".)
let output = sh("echo hi>&2").stderr_to_stdout().read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_file() {
let dir = tempfile::tempdir().unwrap();
let file = dir.path().join("file");
File::create(&file).unwrap().write_all(b"example").unwrap();
let expr = cmd!(path_to_exe("cat")).stdin_file(File::open(&file).unwrap());
let output = expr.read().unwrap();
assert_eq!(output, "example");
}
#[test]
fn test_ergonomics() {
let mystr = "owned string".to_owned();
let mypathbuf = Path::new("a/b/c").to_owned();
let myvec = vec![1, 2, 3];
// These are nonsense expressions. We just want to make sure they compile.
let _ = sh("true")
.stdin_path(&*mystr)
.stdin_bytes(&*myvec)
.stdout_path(&*mypathbuf);
let _ = sh("true")
.stdin_path(mystr)
.stdin_bytes(myvec)
.stdout_path(mypathbuf);
// Unfortunately, this one doesn't work with our Into<Vec<u8>> bound on input().
// TODO: Is it worth having these impls for &Vec in other cases?
// let _ = sh("true").stdin_path(&mystr).stdin_bytes(&myvec).stdout_path(&mypathbuf);
}
#[test]
fn test_capture_both() {
// Windows compatible, no space before ">", and we trim newlines at the end to avoid
// dealing with the different kinds.
let output = sh("echo hi && echo lo>&2")
.stdout_capture()
.stderr_capture()
.run()
.unwrap();
assert_eq!("hi", str::from_utf8(&output.stdout).unwrap().trim());
assert_eq!("lo", str::from_utf8(&output.stderr).unwrap().trim());
}
#[test]
fn test_dir() {
// This test checks the interaction of `dir` and relative exe paths.
// Make sure that's actually what we're testing.
let pwd_path = path_to_exe("pwd");
assert!(pwd_path.is_relative());
let pwd = cmd!(pwd_path);
// First assert that ordinary commands happen in the parent's dir.
let pwd_output = pwd.read().unwrap();
let pwd_path = Path::new(&pwd_output);
assert_eq!(pwd_path, env::current_dir().unwrap());
// Now create a temp dir and make sure we can set dir to it. This
// also tests the interaction of `dir` and relative exe paths.
let dir = tempfile::tempdir().unwrap();
let pwd_output = pwd.dir(dir.path()).read().unwrap();
let pwd_path = Path::new(&pwd_output);
// pwd_path isn't totally canonical on Windows, because it
// doesn't have a prefix. Thus we have to canonicalize both
// sides. (This also handles symlinks in TMP_DIR.)
assert_eq!(
pwd_path.canonicalize().unwrap(),
dir.path().canonicalize().unwrap()
);
}
#[test]
fn test_env() {
let output = cmd!(path_to_exe("print_env"), "foo")
.env("foo", "bar")
.read()
.unwrap();
assert_eq!("bar", output);
}
#[test]
fn test_full_env() {
// Note that it's important that no other tests use this variable name,
// because the test runner is multithreaded.
let var_name = "TEST_FULL_ENV";
// Capture the parent env, and make sure it does *not* contain our variable.
let clean_env: HashMap<String, String> = env::vars().collect();
assert!(
!clean_env.contains_key(var_name),
"why is this variable set?"
);
// Run a child process with that map passed to full_env(). It should be guaranteed not to
// see our variable, regardless of any outer env() calls or changes in the parent.
let clean_child = cmd!(path_to_exe("print_env"), var_name).full_env(clean_env);
// Dirty the parent env. Should be suppressed.
env::set_var(var_name, "junk1");
// And make an outer env() call. Should also be suppressed.
let dirty_child = clean_child.env(var_name, "junk2");
// Check that neither of those have any effect.
let output = dirty_child.read().unwrap();
assert_eq!("", output);
}
#[test]
fn test_env_remove() {
// Set an environment variable in the parent. Note that it's important that
// no other tests use this variable name, because the test runner is
// multithreaded.
let var_name = "TEST_ENV_REMOVE";
env::set_var(var_name, "junk2");
// Run a command that observes the variable.
let output1 = cmd!(path_to_exe("print_env"), var_name).read().unwrap();
assert_eq!("junk2", output1);
// Run the same command with that variable removed.
let output2 = cmd!(path_to_exe("print_env"), var_name)
.env_remove(var_name)
.read()
.unwrap();
assert_eq!("", output2);
}
#[test]
fn test_env_remove_case_sensitivity() {
// Env var deletion is particularly sensitive to the differences in
// case-sensitivity between Unix and Windows. The semantics of env_remove
// in duct must *match the platform*.
// Set an environment variable in the parent. Note that it's important that
// no other tests use this variable name, because the test runner is
// multithreaded.
let var_name = "TEST_ENV_REMOVE_CASE_SENSITIVITY";
env::set_var(var_name, "abc123");
// Run a command that tries to clear the same variable, but in lowercase.
let output1 = cmd!(path_to_exe("print_env"), var_name)
.env_remove(var_name.to_lowercase())
.read()
.unwrap();
// Now try to clear that variable from the parent environment, again using
// lowercase, and run the same command without `env_remove`.
env::remove_var(var_name.to_lowercase());
let output2 = cmd!(path_to_exe("print_env"), var_name).read().unwrap();
// On Unix, env vars are case sensitive, and we don't expect either removal
// to have any effect. On Windows, they're insensitive, and we expect both
// removals to work. The key thing is that both approaches to removal have
// the *same effect*.
assert_eq!(output1, output2, "failed to match platform behavior!!!");
// Go ahead and assert the exact expected output, just in case. If these
// assertions ever break, it might be this test's fault and not the code's.
if cfg!(windows) | {
assert_eq!(output1, "");
} | conditional_block |
|
test.rs | let handle2 = cmd!(path_to_exe("echo"), "lo")
.stdout_capture()
.start()
.unwrap();
let output1 = handle1.wait().unwrap();
let output2 = handle2.wait().unwrap();
assert_eq!("hi", str::from_utf8(&output1.stdout).unwrap().trim());
assert_eq!("lo", str::from_utf8(&output2.stdout).unwrap().trim());
}
#[test]
fn test_error() {
let result = false_cmd().run();
if let Err(err) = result {
assert_eq!(err.kind(), io::ErrorKind::Other);
} else {
panic!("Expected a status error.");
}
}
#[test]
fn test_unchecked() {
let unchecked_false = false_cmd().unchecked();
// Unchecked errors shouldn't cause `run` to return an error.
let output = unchecked_false
.pipe(cmd!(path_to_exe("echo"), "waa"))
.stdout_capture()
.run()
.unwrap();
// The value of the exit code is preserved.
assert_eq!(1, output.status.code().unwrap());
assert_eq!("waa", String::from_utf8_lossy(&output.stdout).trim());
}
#[test]
fn test_unchecked_in_pipe() {
let zero = cmd!(path_to_exe("status"), "0");
let one = cmd!(path_to_exe("status"), "1");
let two = cmd!(path_to_exe("status"), "2");
// Right takes precedence over left.
let output = one.pipe(two.clone()).unchecked().run().unwrap();
assert_eq!(2, output.status.code().unwrap());
// Except that checked on the left takes precedence over unchecked on
// the right.
let output = one.pipe(two.unchecked()).unchecked().run().unwrap();
assert_eq!(1, output.status.code().unwrap());
// Right takes precedence over the left again if they're both unchecked.
let output = one
.unchecked()
.pipe(two.unchecked())
.unchecked()
.run()
.unwrap();
assert_eq!(2, output.status.code().unwrap());
// Except that if the right is a success, the left takes precedence.
let output = one
.unchecked()
.pipe(zero.unchecked())
.unchecked()
.run()
.unwrap();
assert_eq!(1, output.status.code().unwrap());
// Even if the right is checked.
let output = one.unchecked().pipe(zero).unchecked().run().unwrap();
assert_eq!(1, output.status.code().unwrap());
}
#[test]
fn test_pipe() {
let output = sh("echo xxx")
.pipe(cmd!(path_to_exe("x_to_y")))
.read()
.unwrap();
assert_eq!("yyy", output);
// Check that errors on either side are propagated.
let result = true_cmd().pipe(false_cmd()).run();
assert!(result.is_err());
let result = false_cmd().pipe(true_cmd()).run();
assert!(result.is_err());
}
#[test]
fn test_pipe_with_kill() {
// Make sure both sides get killed.
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
// Note that we don't use unchecked() here. This tests that kill suppresses
// exit status errors.
let handle = sleep_cmd.pipe(sleep_cmd.clone()).start().unwrap();
handle.kill().unwrap();
// But calling wait again should be an error, because of the status.
handle.wait().unwrap_err();
}
#[test]
fn test_pipe_start() {
let nonexistent_cmd = cmd!(path_to_exe("nonexistent!!!"));
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
// Errors starting the left side of a pipe are returned immediately, and
// the right side is never started.
nonexistent_cmd.pipe(&sleep_cmd).start().unwrap_err();
// Errors starting the right side are also returned immediately, and the
// the left side is killed first.
sleep_cmd.pipe(nonexistent_cmd).start().unwrap_err();
}
#[test]
fn test_multiple_threads() {
// Wait on the sleep command in a background thread, while the main thread
// kills it.
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
let handle = Arc::new(sleep_cmd.unchecked().start().unwrap());
let arc_clone = handle.clone();
let wait_thread = std::thread::spawn(move || {
arc_clone.wait().unwrap();
});
handle.kill().unwrap();
wait_thread.join().unwrap();
}
#[test]
fn test_nonblocking_waits() {
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
// Make sure pipelines handle try_wait correctly.
let handle = sleep_cmd.pipe(&sleep_cmd).unchecked().start().unwrap();
// Make sure try_wait doesn't block on it.
assert!(handle.try_wait().unwrap().is_none());
handle.kill().unwrap();
}
#[test]
fn test_input() {
let expr = cmd!(path_to_exe("x_to_y")).stdin_bytes("xxx");
let output = expr.read().unwrap();
assert_eq!("yyy", output);
}
#[test]
fn test_stderr() {
let (mut reader, writer) = ::os_pipe::pipe().unwrap();
sh("echo hi>&2").stderr_file(writer).run().unwrap();
let mut s = String::new();
reader.read_to_string(&mut s).unwrap();
assert_eq!(s.trim(), "hi");
}
#[test]
fn test_null() {
let expr = cmd!(path_to_exe("cat"))
.stdin_null()
.stdout_null()
.stderr_null();
let output = expr.read().unwrap();
assert_eq!("", output);
}
#[test]
fn test_path() {
let dir = tempfile::tempdir().unwrap();
let input_file = dir.path().join("input_file");
let output_file = dir.path().join("output_file");
File::create(&input_file)
.unwrap()
.write_all(b"xxx")
.unwrap();
let expr = cmd!(path_to_exe("x_to_y"))
.stdin_path(&input_file)
.stdout_path(&output_file);
let output = expr.read().unwrap();
assert_eq!("", output);
let mut file_output = String::new();
File::open(&output_file)
.unwrap()
.read_to_string(&mut file_output)
.unwrap();
assert_eq!("yyy", file_output);
}
#[test]
fn test_swapping() {
let output = sh("echo hi")
.stdout_to_stderr()
.stderr_capture()
.run()
.unwrap();
let stderr = str::from_utf8(&output.stderr).unwrap().trim();
assert_eq!("hi", stderr);
// Windows compatible. (Requires no space before the ">".)
let output = sh("echo hi>&2").stderr_to_stdout().read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_file() {
let dir = tempfile::tempdir().unwrap();
let file = dir.path().join("file");
File::create(&file).unwrap().write_all(b"example").unwrap();
let expr = cmd!(path_to_exe("cat")).stdin_file(File::open(&file).unwrap());
let output = expr.read().unwrap();
assert_eq!(output, "example");
}
#[test]
fn test_ergonomics() {
let mystr = "owned string".to_owned();
let mypathbuf = Path::new("a/b/c").to_owned();
let myvec = vec![1, 2, 3];
// These are nonsense expressions. We just want to make sure they compile.
let _ = sh("true")
.stdin_path(&*mystr)
.stdin_bytes(&*myvec)
.stdout_path(&*mypathbuf);
let _ = sh("true")
.stdin_path(mystr)
.stdin_bytes(myvec)
.stdout_path(mypathbuf);
// Unfortunately, this one doesn't work with our Into<Vec<u8>> bound on input().
// TODO: Is it worth having these impls for &Vec in other cases?
// let _ = sh("true").stdin_path(&mystr).stdin_bytes(&myvec).stdout_path(&mypathbuf);
}
#[test]
fn test_capture_both() {
// Windows compatible, no space before ">", and we trim newlines at the end to avoid
// dealing with the different kinds.
let output = sh("echo hi && echo lo>&2")
.stdout_capture()
.stderr_capture()
.run()
.unwrap();
assert_eq!("hi", str::from_utf8(&output.stdout).unwrap().trim());
assert_eq!("lo", str::from_utf8(&output.stderr).unwrap().trim());
}
#[test]
fn test_dir() {
// This test checks the interaction of `dir` and relative exe paths.
// Make sure that's actually what we're testing.
let pwd_path = path_to_exe("pwd");
assert!(pwd_path.is_relative());
let pwd = cmd!(pwd_path);
// First assert that ordinary commands happen in the parent's dir.
let pwd_output = pwd.read().unwrap();
let pwd_path = Path::new(&pwd_output);
assert_eq!(pwd_path | .stdout_capture()
.start()
.unwrap(); | random_line_split |
|
test.rs | ("status"), "2");
// Right takes precedence over left.
let output = one.pipe(two.clone()).unchecked().run().unwrap();
assert_eq!(2, output.status.code().unwrap());
// Except that checked on the left takes precedence over unchecked on
// the right.
let output = one.pipe(two.unchecked()).unchecked().run().unwrap();
assert_eq!(1, output.status.code().unwrap());
// Right takes precedence over the left again if they're both unchecked.
let output = one
.unchecked()
.pipe(two.unchecked())
.unchecked()
.run()
.unwrap();
assert_eq!(2, output.status.code().unwrap());
// Except that if the right is a success, the left takes precedence.
let output = one
.unchecked()
.pipe(zero.unchecked())
.unchecked()
.run()
.unwrap();
assert_eq!(1, output.status.code().unwrap());
// Even if the right is checked.
let output = one.unchecked().pipe(zero).unchecked().run().unwrap();
assert_eq!(1, output.status.code().unwrap());
}
#[test]
fn test_pipe() {
let output = sh("echo xxx")
.pipe(cmd!(path_to_exe("x_to_y")))
.read()
.unwrap();
assert_eq!("yyy", output);
// Check that errors on either side are propagated.
let result = true_cmd().pipe(false_cmd()).run();
assert!(result.is_err());
let result = false_cmd().pipe(true_cmd()).run();
assert!(result.is_err());
}
#[test]
fn test_pipe_with_kill() |
#[test]
fn test_pipe_start() {
let nonexistent_cmd = cmd!(path_to_exe("nonexistent!!!"));
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
// Errors starting the left side of a pipe are returned immediately, and
// the right side is never started.
nonexistent_cmd.pipe(&sleep_cmd).start().unwrap_err();
// Errors starting the right side are also returned immediately, and the
// the left side is killed first.
sleep_cmd.pipe(nonexistent_cmd).start().unwrap_err();
}
#[test]
fn test_multiple_threads() {
// Wait on the sleep command in a background thread, while the main thread
// kills it.
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
let handle = Arc::new(sleep_cmd.unchecked().start().unwrap());
let arc_clone = handle.clone();
let wait_thread = std::thread::spawn(move || {
arc_clone.wait().unwrap();
});
handle.kill().unwrap();
wait_thread.join().unwrap();
}
#[test]
fn test_nonblocking_waits() {
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
// Make sure pipelines handle try_wait correctly.
let handle = sleep_cmd.pipe(&sleep_cmd).unchecked().start().unwrap();
// Make sure try_wait doesn't block on it.
assert!(handle.try_wait().unwrap().is_none());
handle.kill().unwrap();
}
#[test]
fn test_input() {
let expr = cmd!(path_to_exe("x_to_y")).stdin_bytes("xxx");
let output = expr.read().unwrap();
assert_eq!("yyy", output);
}
#[test]
fn test_stderr() {
let (mut reader, writer) = ::os_pipe::pipe().unwrap();
sh("echo hi>&2").stderr_file(writer).run().unwrap();
let mut s = String::new();
reader.read_to_string(&mut s).unwrap();
assert_eq!(s.trim(), "hi");
}
#[test]
fn test_null() {
let expr = cmd!(path_to_exe("cat"))
.stdin_null()
.stdout_null()
.stderr_null();
let output = expr.read().unwrap();
assert_eq!("", output);
}
#[test]
fn test_path() {
let dir = tempfile::tempdir().unwrap();
let input_file = dir.path().join("input_file");
let output_file = dir.path().join("output_file");
File::create(&input_file)
.unwrap()
.write_all(b"xxx")
.unwrap();
let expr = cmd!(path_to_exe("x_to_y"))
.stdin_path(&input_file)
.stdout_path(&output_file);
let output = expr.read().unwrap();
assert_eq!("", output);
let mut file_output = String::new();
File::open(&output_file)
.unwrap()
.read_to_string(&mut file_output)
.unwrap();
assert_eq!("yyy", file_output);
}
#[test]
fn test_swapping() {
let output = sh("echo hi")
.stdout_to_stderr()
.stderr_capture()
.run()
.unwrap();
let stderr = str::from_utf8(&output.stderr).unwrap().trim();
assert_eq!("hi", stderr);
// Windows compatible. (Requires no space before the ">".)
let output = sh("echo hi>&2").stderr_to_stdout().read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_file() {
let dir = tempfile::tempdir().unwrap();
let file = dir.path().join("file");
File::create(&file).unwrap().write_all(b"example").unwrap();
let expr = cmd!(path_to_exe("cat")).stdin_file(File::open(&file).unwrap());
let output = expr.read().unwrap();
assert_eq!(output, "example");
}
#[test]
fn test_ergonomics() {
let mystr = "owned string".to_owned();
let mypathbuf = Path::new("a/b/c").to_owned();
let myvec = vec![1, 2, 3];
// These are nonsense expressions. We just want to make sure they compile.
let _ = sh("true")
.stdin_path(&*mystr)
.stdin_bytes(&*myvec)
.stdout_path(&*mypathbuf);
let _ = sh("true")
.stdin_path(mystr)
.stdin_bytes(myvec)
.stdout_path(mypathbuf);
// Unfortunately, this one doesn't work with our Into<Vec<u8>> bound on input().
// TODO: Is it worth having these impls for &Vec in other cases?
// let _ = sh("true").stdin_path(&mystr).stdin_bytes(&myvec).stdout_path(&mypathbuf);
}
#[test]
fn test_capture_both() {
// Windows compatible, no space before ">", and we trim newlines at the end to avoid
// dealing with the different kinds.
let output = sh("echo hi && echo lo>&2")
.stdout_capture()
.stderr_capture()
.run()
.unwrap();
assert_eq!("hi", str::from_utf8(&output.stdout).unwrap().trim());
assert_eq!("lo", str::from_utf8(&output.stderr).unwrap().trim());
}
#[test]
fn test_dir() {
// This test checks the interaction of `dir` and relative exe paths.
// Make sure that's actually what we're testing.
let pwd_path = path_to_exe("pwd");
assert!(pwd_path.is_relative());
let pwd = cmd!(pwd_path);
// First assert that ordinary commands happen in the parent's dir.
let pwd_output = pwd.read().unwrap();
let pwd_path = Path::new(&pwd_output);
assert_eq!(pwd_path, env::current_dir().unwrap());
// Now create a temp dir and make sure we can set dir to it. This
// also tests the interaction of `dir` and relative exe paths.
let dir = tempfile::tempdir().unwrap();
let pwd_output = pwd.dir(dir.path()).read().unwrap();
let pwd_path = Path::new(&pwd_output);
// pwd_path isn't totally canonical on Windows, because it
// doesn't have a prefix. Thus we have to canonicalize both
// sides. (This also handles symlinks in TMP_DIR.)
assert_eq!(
pwd_path.canonicalize().unwrap(),
dir.path().canonicalize().unwrap()
);
}
#[test]
fn test_env() {
let output = cmd!(path_to_exe("print_env"), "foo")
.env("foo", "bar")
.read()
.unwrap();
assert_eq!("bar", output);
}
#[test]
fn test_full_env() {
// Note that it's important that no other tests use this variable name,
// because the test runner is multithreaded.
let var_name = "TEST_FULL_ENV";
// Capture the parent env, and make sure it does *not* contain our variable.
let clean_env: HashMap<String, String> = env::vars().collect();
assert!(
!clean_env.contains_key(var_name),
"why is this variable set?"
);
// Run a child process with that map passed to full_env(). It should be guaranteed | {
// Make sure both sides get killed.
let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000");
// Note that we don't use unchecked() here. This tests that kill suppresses
// exit status errors.
let handle = sleep_cmd.pipe(sleep_cmd.clone()).start().unwrap();
handle.kill().unwrap();
// But calling wait again should be an error, because of the status.
handle.wait().unwrap_err();
} | identifier_body |
writebacker.go | Transactioner: transactionerConfig{
// Commit transaction once 20 write queries have been performed (as
// identified in checksum_write_back benchmark).
MaxTransactionSize: 20,
// Commit transaction after 2 minutes regardless of the number of
// queries performed.
MaxTransactionLifetime: 2 * time.Minute,
},
}
type WriteBacker struct {
Config *WriteBackerConfig
tomb *tomb.Tomb
batcher *batcher
endOfQueueSignal chan struct{}
workerPool *work.WorkerPool
workerPoolStopped chan struct{}
fieldLogger log.Interface
}
func NewWriteBacker(config *WriteBackerConfig) *WriteBacker {
return &WriteBacker{
Config: config,
}
}
func (w *WriteBacker) Start(ctx context.Context) {
w.tomb, _ = tomb.WithContext(ctx)
w.fieldLogger = w.Config.Logger.WithFields(log.Fields{
"run": w.Config.RunID,
"snapshot": w.Config.SnapshotName,
"filesystem": w.Config.FileSystemName,
"namespace": w.Config.Namespace,
"component": "workqueue.WriteBacker",
})
w.endOfQueueSignal = make(chan struct{})
w.workerPoolStopped = make(chan struct{})
w.batcher = w.createBatcher()
w.workerPool = w.createWorkerPool()
w.tomb.Go(func() error {
concurrency, err := w.Config.DB.ServerConcurrency()
if err != nil {
return pkgErrors.Wrap(err, "(*WriteBacker).Start")
}
w.batcher.Start(w.tomb.Context(nil))
for i := 0; i < concurrency; i++ {
w.tomb.Go(w.processor)
}
w.workerPool.Start()
w.tomb.Go(w.endOfQueueHandler)
w.tomb.Go(w.batcherManager)
w.tomb.Go(w.workerPoolManager)
return nil
})
}
func (w *WriteBacker) SignalEndOfQueue() {
close(w.endOfQueueSignal)
}
func (w *WriteBacker) SignalStop() {
w.tomb.Kill(lifecycle.ErrStopSignalled)
}
func (w *WriteBacker) Wait() error {
return w.tomb.Wait()
}
func (w *WriteBacker) | () <-chan struct{} {
return w.tomb.Dead()
}
func (w *WriteBacker) Err() error {
return w.tomb.Err()
}
func (w *WriteBacker) endOfQueueHandler() error {
select {
case <-w.endOfQueueSignal:
w.fieldLogger.Info("Received end-of-queue signal")
w.fieldLogger.Debug("Draining and stopping worker pool as end-of-queue is reached")
w.workerPool.Drain()
w.workerPool.Stop()
close(w.workerPoolStopped)
w.fieldLogger.Debug("Closing batcher as end-of-queue is reached")
err := w.batcher.Close(w.tomb.Context(nil))
if err != nil {
err = pkgErrors.Wrap(err, "(*WriteBacker).endOfQueueHandler")
w.fieldLogger.WithError(err).WithFields(log.Fields{
"action": "stopping",
}).Error("Encountered error while closing batcher")
return err
}
return nil
case <-w.tomb.Dying():
return tomb.ErrDying
}
}
func (w *WriteBacker) workerPoolManager() error {
select {
case <-w.workerPoolStopped:
// There is no way of receiving errors from the worker pool
return nil
case <-w.tomb.Dying():
w.fieldLogger.Debug("Stopping and waiting for worker pool as component is dying")
w.workerPool.Stop()
return tomb.ErrDying
}
}
func (w *WriteBacker) batcherManager() error {
select {
case <-w.batcher.Dead():
err := w.batcher.Err()
if err == lifecycle.ErrStopSignalled {
return nil
} else if err != nil {
err = pkgErrors.Wrap(err, "(*WriteBacker).batcherManager")
w.fieldLogger.WithError(err).WithFields(log.Fields{
"action": "stopping",
}).Error("Batcher died")
return err
}
return nil
case <-w.tomb.Dying():
w.fieldLogger.Debug("Waiting for batcher to finish as component is dying")
_ = w.batcher.Wait()
return tomb.ErrDying
}
}
func (w *WriteBacker) createBatcher() *batcher {
config := &batcherConfig{}
*config = w.Config.Batcher
return newBatcher(config)
}
func (w *WriteBacker) createWorkerPool() *work.WorkerPool {
workerPool := work.NewWorkerPool(writeBackerContext{}, 1, w.Config.Namespace, w.Config.Pool)
workerPool.Middleware(
func(wBCtx *writeBackerContext, job *work.Job, next work.NextMiddlewareFunc) error {
wBCtx.WriteBacker = w
return next()
},
)
jobName := workqueue.WriteBackJobName(w.Config.FileSystemName, w.Config.SnapshotName)
workerPool.Job(jobName, (*writeBackerContext).Process)
return workerPool
}
func (w *WriteBacker) createTransactioner() *transactioner {
config := &transactionerConfig{}
*config = w.Config.Transactioner
config.DB = w.Config.DB
return newTransactioner(w.tomb.Context(nil), config)
}
func (w *WriteBacker) processor() error {
ctx := w.tomb.Context(nil)
transactioner := w.createTransactioner()
batchChan := w.batcher.Out()
dying := w.tomb.Dying()
for {
select {
case batch, ok := <-batchChan:
if !ok {
transactioner.Commit()
return nil
}
err := w.processBatch(ctx, batch, transactioner)
batch.Return()
if err != nil {
err = pkgErrors.Wrap(err, "(*WriteBacker).processor")
w.fieldLogger.WithError(err).WithFields(log.Fields{
"action": "stopping",
}).Error("Encountered error while processing batch")
transactioner.Close()
return err
}
case <-dying:
transactioner.Close()
return tomb.ErrDying
}
}
}
func (w *WriteBacker) processBatch(ctx context.Context, batch *filesBatch, transactioner *transactioner) error {
w.fieldLogger.Debug("Starting processing of batch")
checksums, fileIDs := w.collectFilesInBatch(batch)
// TODO pool files
var files []meda.File
files, err := transactioner.AppendFilesByIDs(files, ctx, fileIDs)
if err != nil {
return pkgErrors.Wrap(err, "(*WriteBacker).processBatch: fetch files from database")
}
for i := range files {
// Pointer to file in files, don't copy
file := &files[i]
checksum, ok := checksums[file.ID]
if !ok {
return pkgErrors.Wrapf(ErrFetchedUnexpectedFile, "(*WriteBacker).processBatch: process file with id %d", file.ID)
} else if checksum == nil {
return pkgErrors.Wrapf(ErrFetchedDuplicateFile, "(*WriteBacker).processBatch: process file with id %d", file.ID)
}
checksums[file.ID] = nil
if file.Checksum != nil && file.ToBeCompared == 1 && !bytes.Equal(checksum, file.Checksum) {
// info logging is handled by issueChecksumWarning
err = w.issueChecksumWarning(ctx, file, checksum, transactioner)
if err != nil {
// error logging is handled by issueChecksumWarning (escalating)
return pkgErrors.Wrapf(err, "(*WriteBacker).processBatch: process file with id %d", file.ID)
}
}
file.Checksum = checksum
file.LastRead.Uint64, file.LastRead.Valid = w.Config.RunID, true
file.ToBeCompared = 0
// ToBeRead is set in a separate loop
}
// Check that all files in the batch have been processed
for _, checksum := range checksums {
if checksum != nil {
return pkgErrors.Wrap(ErrFetchedInsufficientFiles, "(*WriteBacker).processBatch: check all files processed")
}
}
// Set ToBeRead to 0 and drop files, which have already been written
for i := 0; i < len(files); {
if files[i].ToBeRead != 1 {
files[i] = files[len(files)-1]
files = files[:len(files)-1]
} else {
files[i].ToBeRead = 0
i++
}
}
err = transactioner.UpdateFilesChecksums(ctx, files, w.Config.RunID)
if err != nil {
return pkgErrors.Wrap(err, "(*WriteBacker).processBatch: update files in database")
}
w.fieldLogger.Debug("Finished processing of batch")
return nil
}
func (w *WriteBacker) collectFilesInBatch(batch *filesBatch) (map[uint64][]byte, []uint64) {
checksums := make(map[uint64][]byte)
// TODO use pool for fileIDs | Dead | identifier_name |
writebacker.go | Transactioner: transactionerConfig{
// Commit transaction once 20 write queries have been performed (as
// identified in checksum_write_back benchmark).
MaxTransactionSize: 20,
// Commit transaction after 2 minutes regardless of the number of
// queries performed.
MaxTransactionLifetime: 2 * time.Minute,
},
}
type WriteBacker struct {
Config *WriteBackerConfig
tomb *tomb.Tomb
batcher *batcher
endOfQueueSignal chan struct{}
workerPool *work.WorkerPool
workerPoolStopped chan struct{}
fieldLogger log.Interface
}
func NewWriteBacker(config *WriteBackerConfig) *WriteBacker {
return &WriteBacker{
Config: config,
}
}
func (w *WriteBacker) Start(ctx context.Context) {
w.tomb, _ = tomb.WithContext(ctx)
w.fieldLogger = w.Config.Logger.WithFields(log.Fields{
"run": w.Config.RunID,
"snapshot": w.Config.SnapshotName,
"filesystem": w.Config.FileSystemName,
"namespace": w.Config.Namespace,
"component": "workqueue.WriteBacker",
})
w.endOfQueueSignal = make(chan struct{})
w.workerPoolStopped = make(chan struct{})
w.batcher = w.createBatcher()
w.workerPool = w.createWorkerPool()
w.tomb.Go(func() error {
concurrency, err := w.Config.DB.ServerConcurrency()
if err != nil {
return pkgErrors.Wrap(err, "(*WriteBacker).Start")
}
w.batcher.Start(w.tomb.Context(nil))
for i := 0; i < concurrency; i++ {
w.tomb.Go(w.processor)
}
w.workerPool.Start()
w.tomb.Go(w.endOfQueueHandler)
w.tomb.Go(w.batcherManager)
w.tomb.Go(w.workerPoolManager)
return nil
})
}
func (w *WriteBacker) SignalEndOfQueue() {
close(w.endOfQueueSignal)
}
func (w *WriteBacker) SignalStop() {
w.tomb.Kill(lifecycle.ErrStopSignalled)
}
func (w *WriteBacker) Wait() error {
return w.tomb.Wait()
}
func (w *WriteBacker) Dead() <-chan struct{} {
return w.tomb.Dead()
}
func (w *WriteBacker) Err() error {
return w.tomb.Err()
}
func (w *WriteBacker) endOfQueueHandler() error {
select {
case <-w.endOfQueueSignal:
w.fieldLogger.Info("Received end-of-queue signal")
w.fieldLogger.Debug("Draining and stopping worker pool as end-of-queue is reached")
w.workerPool.Drain()
w.workerPool.Stop()
close(w.workerPoolStopped)
w.fieldLogger.Debug("Closing batcher as end-of-queue is reached")
err := w.batcher.Close(w.tomb.Context(nil))
if err != nil {
err = pkgErrors.Wrap(err, "(*WriteBacker).endOfQueueHandler")
w.fieldLogger.WithError(err).WithFields(log.Fields{
"action": "stopping",
}).Error("Encountered error while closing batcher")
return err
}
return nil
case <-w.tomb.Dying():
return tomb.ErrDying
}
}
func (w *WriteBacker) workerPoolManager() error |
func (w *WriteBacker) batcherManager() error {
select {
case <-w.batcher.Dead():
err := w.batcher.Err()
if err == lifecycle.ErrStopSignalled {
return nil
} else if err != nil {
err = pkgErrors.Wrap(err, "(*WriteBacker).batcherManager")
w.fieldLogger.WithError(err).WithFields(log.Fields{
"action": "stopping",
}).Error("Batcher died")
return err
}
return nil
case <-w.tomb.Dying():
w.fieldLogger.Debug("Waiting for batcher to finish as component is dying")
_ = w.batcher.Wait()
return tomb.ErrDying
}
}
func (w *WriteBacker) createBatcher() *batcher {
config := &batcherConfig{}
*config = w.Config.Batcher
return newBatcher(config)
}
func (w *WriteBacker) createWorkerPool() *work.WorkerPool {
workerPool := work.NewWorkerPool(writeBackerContext{}, 1, w.Config.Namespace, w.Config.Pool)
workerPool.Middleware(
func(wBCtx *writeBackerContext, job *work.Job, next work.NextMiddlewareFunc) error {
wBCtx.WriteBacker = w
return next()
},
)
jobName := workqueue.WriteBackJobName(w.Config.FileSystemName, w.Config.SnapshotName)
workerPool.Job(jobName, (*writeBackerContext).Process)
return workerPool
}
func (w *WriteBacker) createTransactioner() *transactioner {
config := &transactionerConfig{}
*config = w.Config.Transactioner
config.DB = w.Config.DB
return newTransactioner(w.tomb.Context(nil), config)
}
func (w *WriteBacker) processor() error {
ctx := w.tomb.Context(nil)
transactioner := w.createTransactioner()
batchChan := w.batcher.Out()
dying := w.tomb.Dying()
for {
select {
case batch, ok := <-batchChan:
if !ok {
transactioner.Commit()
return nil
}
err := w.processBatch(ctx, batch, transactioner)
batch.Return()
if err != nil {
err = pkgErrors.Wrap(err, "(*WriteBacker).processor")
w.fieldLogger.WithError(err).WithFields(log.Fields{
"action": "stopping",
}).Error("Encountered error while processing batch")
transactioner.Close()
return err
}
case <-dying:
transactioner.Close()
return tomb.ErrDying
}
}
}
func (w *WriteBacker) processBatch(ctx context.Context, batch *filesBatch, transactioner *transactioner) error {
w.fieldLogger.Debug("Starting processing of batch")
checksums, fileIDs := w.collectFilesInBatch(batch)
// TODO pool files
var files []meda.File
files, err := transactioner.AppendFilesByIDs(files, ctx, fileIDs)
if err != nil {
return pkgErrors.Wrap(err, "(*WriteBacker).processBatch: fetch files from database")
}
for i := range files {
// Pointer to file in files, don't copy
file := &files[i]
checksum, ok := checksums[file.ID]
if !ok {
return pkgErrors.Wrapf(ErrFetchedUnexpectedFile, "(*WriteBacker).processBatch: process file with id %d", file.ID)
} else if checksum == nil {
return pkgErrors.Wrapf(ErrFetchedDuplicateFile, "(*WriteBacker).processBatch: process file with id %d", file.ID)
}
checksums[file.ID] = nil
if file.Checksum != nil && file.ToBeCompared == 1 && !bytes.Equal(checksum, file.Checksum) {
// info logging is handled by issueChecksumWarning
err = w.issueChecksumWarning(ctx, file, checksum, transactioner)
if err != nil {
// error logging is handled by issueChecksumWarning (escalating)
return pkgErrors.Wrapf(err, "(*WriteBacker).processBatch: process file with id %d", file.ID)
}
}
file.Checksum = checksum
file.LastRead.Uint64, file.LastRead.Valid = w.Config.RunID, true
file.ToBeCompared = 0
// ToBeRead is set in a separate loop
}
// Check that all files in the batch have been processed
for _, checksum := range checksums {
if checksum != nil {
return pkgErrors.Wrap(ErrFetchedInsufficientFiles, "(*WriteBacker).processBatch: check all files processed")
}
}
// Set ToBeRead to 0 and drop files, which have already been written
for i := 0; i < len(files); {
if files[i].ToBeRead != 1 {
files[i] = files[len(files)-1]
files = files[:len(files)-1]
} else {
files[i].ToBeRead = 0
i++
}
}
err = transactioner.UpdateFilesChecksums(ctx, files, w.Config.RunID)
if err != nil {
return pkgErrors.Wrap(err, "(*WriteBacker).processBatch: update files in database")
}
w.fieldLogger.Debug("Finished processing of batch")
return nil
}
func (w *WriteBacker) collectFilesInBatch(batch *filesBatch) (map[uint64][]byte, []uint64) {
checksums := make(map[uint64][]byte)
// TODO use pool for file | {
select {
case <-w.workerPoolStopped:
// There is no way of receiving errors from the worker pool
return nil
case <-w.tomb.Dying():
w.fieldLogger.Debug("Stopping and waiting for worker pool as component is dying")
w.workerPool.Stop()
return tomb.ErrDying
}
} | identifier_body |
writebacker.go | Transactioner: transactionerConfig{
// Commit transaction once 20 write queries have been performed (as
// identified in checksum_write_back benchmark).
MaxTransactionSize: 20,
// Commit transaction after 2 minutes regardless of the number of
// queries performed.
MaxTransactionLifetime: 2 * time.Minute,
},
}
type WriteBacker struct {
Config *WriteBackerConfig
tomb *tomb.Tomb
batcher *batcher
endOfQueueSignal chan struct{}
workerPool *work.WorkerPool
workerPoolStopped chan struct{}
fieldLogger log.Interface
}
func NewWriteBacker(config *WriteBackerConfig) *WriteBacker {
return &WriteBacker{
Config: config,
}
} | w.tomb, _ = tomb.WithContext(ctx)
w.fieldLogger = w.Config.Logger.WithFields(log.Fields{
"run": w.Config.RunID,
"snapshot": w.Config.SnapshotName,
"filesystem": w.Config.FileSystemName,
"namespace": w.Config.Namespace,
"component": "workqueue.WriteBacker",
})
w.endOfQueueSignal = make(chan struct{})
w.workerPoolStopped = make(chan struct{})
w.batcher = w.createBatcher()
w.workerPool = w.createWorkerPool()
w.tomb.Go(func() error {
concurrency, err := w.Config.DB.ServerConcurrency()
if err != nil {
return pkgErrors.Wrap(err, "(*WriteBacker).Start")
}
w.batcher.Start(w.tomb.Context(nil))
for i := 0; i < concurrency; i++ {
w.tomb.Go(w.processor)
}
w.workerPool.Start()
w.tomb.Go(w.endOfQueueHandler)
w.tomb.Go(w.batcherManager)
w.tomb.Go(w.workerPoolManager)
return nil
})
}
func (w *WriteBacker) SignalEndOfQueue() {
close(w.endOfQueueSignal)
}
func (w *WriteBacker) SignalStop() {
w.tomb.Kill(lifecycle.ErrStopSignalled)
}
func (w *WriteBacker) Wait() error {
return w.tomb.Wait()
}
func (w *WriteBacker) Dead() <-chan struct{} {
return w.tomb.Dead()
}
func (w *WriteBacker) Err() error {
return w.tomb.Err()
}
func (w *WriteBacker) endOfQueueHandler() error {
select {
case <-w.endOfQueueSignal:
w.fieldLogger.Info("Received end-of-queue signal")
w.fieldLogger.Debug("Draining and stopping worker pool as end-of-queue is reached")
w.workerPool.Drain()
w.workerPool.Stop()
close(w.workerPoolStopped)
w.fieldLogger.Debug("Closing batcher as end-of-queue is reached")
err := w.batcher.Close(w.tomb.Context(nil))
if err != nil {
err = pkgErrors.Wrap(err, "(*WriteBacker).endOfQueueHandler")
w.fieldLogger.WithError(err).WithFields(log.Fields{
"action": "stopping",
}).Error("Encountered error while closing batcher")
return err
}
return nil
case <-w.tomb.Dying():
return tomb.ErrDying
}
}
func (w *WriteBacker) workerPoolManager() error {
select {
case <-w.workerPoolStopped:
// There is no way of receiving errors from the worker pool
return nil
case <-w.tomb.Dying():
w.fieldLogger.Debug("Stopping and waiting for worker pool as component is dying")
w.workerPool.Stop()
return tomb.ErrDying
}
}
func (w *WriteBacker) batcherManager() error {
select {
case <-w.batcher.Dead():
err := w.batcher.Err()
if err == lifecycle.ErrStopSignalled {
return nil
} else if err != nil {
err = pkgErrors.Wrap(err, "(*WriteBacker).batcherManager")
w.fieldLogger.WithError(err).WithFields(log.Fields{
"action": "stopping",
}).Error("Batcher died")
return err
}
return nil
case <-w.tomb.Dying():
w.fieldLogger.Debug("Waiting for batcher to finish as component is dying")
_ = w.batcher.Wait()
return tomb.ErrDying
}
}
func (w *WriteBacker) createBatcher() *batcher {
config := &batcherConfig{}
*config = w.Config.Batcher
return newBatcher(config)
}
func (w *WriteBacker) createWorkerPool() *work.WorkerPool {
workerPool := work.NewWorkerPool(writeBackerContext{}, 1, w.Config.Namespace, w.Config.Pool)
workerPool.Middleware(
func(wBCtx *writeBackerContext, job *work.Job, next work.NextMiddlewareFunc) error {
wBCtx.WriteBacker = w
return next()
},
)
jobName := workqueue.WriteBackJobName(w.Config.FileSystemName, w.Config.SnapshotName)
workerPool.Job(jobName, (*writeBackerContext).Process)
return workerPool
}
func (w *WriteBacker) createTransactioner() *transactioner {
config := &transactionerConfig{}
*config = w.Config.Transactioner
config.DB = w.Config.DB
return newTransactioner(w.tomb.Context(nil), config)
}
func (w *WriteBacker) processor() error {
ctx := w.tomb.Context(nil)
transactioner := w.createTransactioner()
batchChan := w.batcher.Out()
dying := w.tomb.Dying()
for {
select {
case batch, ok := <-batchChan:
if !ok {
transactioner.Commit()
return nil
}
err := w.processBatch(ctx, batch, transactioner)
batch.Return()
if err != nil {
err = pkgErrors.Wrap(err, "(*WriteBacker).processor")
w.fieldLogger.WithError(err).WithFields(log.Fields{
"action": "stopping",
}).Error("Encountered error while processing batch")
transactioner.Close()
return err
}
case <-dying:
transactioner.Close()
return tomb.ErrDying
}
}
}
func (w *WriteBacker) processBatch(ctx context.Context, batch *filesBatch, transactioner *transactioner) error {
w.fieldLogger.Debug("Starting processing of batch")
checksums, fileIDs := w.collectFilesInBatch(batch)
// TODO pool files
var files []meda.File
files, err := transactioner.AppendFilesByIDs(files, ctx, fileIDs)
if err != nil {
return pkgErrors.Wrap(err, "(*WriteBacker).processBatch: fetch files from database")
}
for i := range files {
// Pointer to file in files, don't copy
file := &files[i]
checksum, ok := checksums[file.ID]
if !ok {
return pkgErrors.Wrapf(ErrFetchedUnexpectedFile, "(*WriteBacker).processBatch: process file with id %d", file.ID)
} else if checksum == nil {
return pkgErrors.Wrapf(ErrFetchedDuplicateFile, "(*WriteBacker).processBatch: process file with id %d", file.ID)
}
checksums[file.ID] = nil
if file.Checksum != nil && file.ToBeCompared == 1 && !bytes.Equal(checksum, file.Checksum) {
// info logging is handled by issueChecksumWarning
err = w.issueChecksumWarning(ctx, file, checksum, transactioner)
if err != nil {
// error logging is handled by issueChecksumWarning (escalating)
return pkgErrors.Wrapf(err, "(*WriteBacker).processBatch: process file with id %d", file.ID)
}
}
file.Checksum = checksum
file.LastRead.Uint64, file.LastRead.Valid = w.Config.RunID, true
file.ToBeCompared = 0
// ToBeRead is set in a separate loop
}
// Check that all files in the batch have been processed
for _, checksum := range checksums {
if checksum != nil {
return pkgErrors.Wrap(ErrFetchedInsufficientFiles, "(*WriteBacker).processBatch: check all files processed")
}
}
// Set ToBeRead to 0 and drop files, which have already been written
for i := 0; i < len(files); {
if files[i].ToBeRead != 1 {
files[i] = files[len(files)-1]
files = files[:len(files)-1]
} else {
files[i].ToBeRead = 0
i++
}
}
err = transactioner.UpdateFilesChecksums(ctx, files, w.Config.RunID)
if err != nil {
return pkgErrors.Wrap(err, "(*WriteBacker).processBatch: update files in database")
}
w.fieldLogger.Debug("Finished processing of batch")
return nil
}
func (w *WriteBacker) collectFilesInBatch(batch *filesBatch) (map[uint64][]byte, []uint64) {
checksums := make(map[uint64][]byte)
// TODO use pool for fileIDs |
func (w *WriteBacker) Start(ctx context.Context) { | random_line_split |
writebacker.go | ")
w.fieldLogger.WithError(err).WithFields(log.Fields{
"action": "stopping",
}).Error("Encountered error while closing batcher")
return err
}
return nil
case <-w.tomb.Dying():
return tomb.ErrDying
}
}
func (w *WriteBacker) workerPoolManager() error {
select {
case <-w.workerPoolStopped:
// There is no way of receiving errors from the worker pool
return nil
case <-w.tomb.Dying():
w.fieldLogger.Debug("Stopping and waiting for worker pool as component is dying")
w.workerPool.Stop()
return tomb.ErrDying
}
}
func (w *WriteBacker) batcherManager() error {
select {
case <-w.batcher.Dead():
err := w.batcher.Err()
if err == lifecycle.ErrStopSignalled {
return nil
} else if err != nil {
err = pkgErrors.Wrap(err, "(*WriteBacker).batcherManager")
w.fieldLogger.WithError(err).WithFields(log.Fields{
"action": "stopping",
}).Error("Batcher died")
return err
}
return nil
case <-w.tomb.Dying():
w.fieldLogger.Debug("Waiting for batcher to finish as component is dying")
_ = w.batcher.Wait()
return tomb.ErrDying
}
}
func (w *WriteBacker) createBatcher() *batcher {
config := &batcherConfig{}
*config = w.Config.Batcher
return newBatcher(config)
}
func (w *WriteBacker) createWorkerPool() *work.WorkerPool {
workerPool := work.NewWorkerPool(writeBackerContext{}, 1, w.Config.Namespace, w.Config.Pool)
workerPool.Middleware(
func(wBCtx *writeBackerContext, job *work.Job, next work.NextMiddlewareFunc) error {
wBCtx.WriteBacker = w
return next()
},
)
jobName := workqueue.WriteBackJobName(w.Config.FileSystemName, w.Config.SnapshotName)
workerPool.Job(jobName, (*writeBackerContext).Process)
return workerPool
}
func (w *WriteBacker) createTransactioner() *transactioner {
config := &transactionerConfig{}
*config = w.Config.Transactioner
config.DB = w.Config.DB
return newTransactioner(w.tomb.Context(nil), config)
}
func (w *WriteBacker) processor() error {
ctx := w.tomb.Context(nil)
transactioner := w.createTransactioner()
batchChan := w.batcher.Out()
dying := w.tomb.Dying()
for {
select {
case batch, ok := <-batchChan:
if !ok {
transactioner.Commit()
return nil
}
err := w.processBatch(ctx, batch, transactioner)
batch.Return()
if err != nil {
err = pkgErrors.Wrap(err, "(*WriteBacker).processor")
w.fieldLogger.WithError(err).WithFields(log.Fields{
"action": "stopping",
}).Error("Encountered error while processing batch")
transactioner.Close()
return err
}
case <-dying:
transactioner.Close()
return tomb.ErrDying
}
}
}
func (w *WriteBacker) processBatch(ctx context.Context, batch *filesBatch, transactioner *transactioner) error {
w.fieldLogger.Debug("Starting processing of batch")
checksums, fileIDs := w.collectFilesInBatch(batch)
// TODO pool files
var files []meda.File
files, err := transactioner.AppendFilesByIDs(files, ctx, fileIDs)
if err != nil {
return pkgErrors.Wrap(err, "(*WriteBacker).processBatch: fetch files from database")
}
for i := range files {
// Pointer to file in files, don't copy
file := &files[i]
checksum, ok := checksums[file.ID]
if !ok {
return pkgErrors.Wrapf(ErrFetchedUnexpectedFile, "(*WriteBacker).processBatch: process file with id %d", file.ID)
} else if checksum == nil {
return pkgErrors.Wrapf(ErrFetchedDuplicateFile, "(*WriteBacker).processBatch: process file with id %d", file.ID)
}
checksums[file.ID] = nil
if file.Checksum != nil && file.ToBeCompared == 1 && !bytes.Equal(checksum, file.Checksum) {
// info logging is handled by issueChecksumWarning
err = w.issueChecksumWarning(ctx, file, checksum, transactioner)
if err != nil {
// error logging is handled by issueChecksumWarning (escalating)
return pkgErrors.Wrapf(err, "(*WriteBacker).processBatch: process file with id %d", file.ID)
}
}
file.Checksum = checksum
file.LastRead.Uint64, file.LastRead.Valid = w.Config.RunID, true
file.ToBeCompared = 0
// ToBeRead is set in a separate loop
}
// Check that all files in the batch have been processed
for _, checksum := range checksums {
if checksum != nil {
return pkgErrors.Wrap(ErrFetchedInsufficientFiles, "(*WriteBacker).processBatch: check all files processed")
}
}
// Set ToBeRead to 0 and drop files, which have already been written
for i := 0; i < len(files); {
if files[i].ToBeRead != 1 {
files[i] = files[len(files)-1]
files = files[:len(files)-1]
} else {
files[i].ToBeRead = 0
i++
}
}
err = transactioner.UpdateFilesChecksums(ctx, files, w.Config.RunID)
if err != nil {
return pkgErrors.Wrap(err, "(*WriteBacker).processBatch: update files in database")
}
w.fieldLogger.Debug("Finished processing of batch")
return nil
}
func (w *WriteBacker) collectFilesInBatch(batch *filesBatch) (map[uint64][]byte, []uint64) {
checksums := make(map[uint64][]byte)
// TODO use pool for fileIDs
fileIDs := make([]uint64, 0, len(batch.Files))
for i := range batch.Files {
file := &batch.Files[i]
if checksum, ok := checksums[file.ID]; ok {
w.fieldLogger.WithFields(log.Fields{
"action": "skipping",
"file_id": file.ID,
"first_checksum": checksum,
"subsequent_checksum": file.Checksum,
}).Warn("Received same file multiple times in job batch, dropping all but first encounter")
continue
}
checksums[file.ID] = file.Checksum
fileIDs = append(fileIDs, file.ID)
}
return checksums, fileIDs
}
func (w *WriteBacker) issueChecksumWarning(ctx context.Context, file *meda.File, checksum []byte, transactioner *transactioner) error {
fieldLogger := w.fieldLogger.WithFields(log.Fields{
"file_id": file.ID,
"file_path": file.Path,
"file_modification_time": file.ModificationTime,
"file_size": file.FileSize,
"expected_checksum": hex.EncodeToString(file.Checksum),
"actual_checksum": hex.EncodeToString(checksum),
"file_last_read": file.LastRead.Uint64,
})
fieldLogger.Info("Discovered checksum mismatch, writing checksum warning")
checksumWarning := meda.ChecksumWarning{
FileID: file.ID,
Path: file.Path,
ModificationTime: file.ModificationTime,
FileSize: file.FileSize,
ExpectedChecksum: file.Checksum,
ActualChecksum: checksum,
Discovered: w.Config.RunID,
LastRead: file.LastRead.Uint64,
Created: meda.Time(time.Now()),
}
err := transactioner.InsertChecksumWarning(ctx, &checksumWarning)
if err != nil {
err = pkgErrors.Wrap(err, "(*WriteBacker).issueChecksumWarning")
fieldLogger.WithError(err).WithFields(log.Fields{
"action": "escalating",
}).Error("Encountered error while issuing checksum warning")
}
return nil
}
type writeBackerContext struct {
WriteBacker *WriteBacker
}
func (w *writeBackerContext) Process(job *work.Job) error {
ctx := w.WriteBacker.tomb.Context(nil)
// TODO pool
writeBackPack := workqueue.WriteBackPack{}
err := writeBackPack.FromJobArgs(job.Args)
if err != nil | {
err = pkgErrors.Wrap(err, "(*writeBackerContext).Process: unmarshal WriteBackPack from job")
w.WriteBacker.fieldLogger.WithError(err).WithFields(log.Fields{
"action": "stopping",
"args": job.Args,
"job_name": job.Name,
}).Error("Encountered error while unmarshaling WriteBackPack from job")
w.WriteBacker.tomb.Kill(err)
// return nil as to not re-queue the job
return nil
} | conditional_block |
|
find_panics.py | iscv64-unknown-elf-objdump"
# TODO: For all functions below the initial batch, it would like be preferable to
# automatically populate the list with additional functions in the core library using
# debug info. For now, however, I do this manually.
panic_functions = [
"expect_failed",
"unwrap_failed",
"panic_bounds_check",
"slice_index_order_fail",
"slice_end_index_len_fail",
"slice_start_index_len_fail",
"slice17len_mismatch_fail",
"str16slice_error_fail",
"copy_from_slice17len_mismatch_fail",
"copy_from_slice17",
"panicking5panic",
# below are functions I have manually traced up from the above, more "core" panics, on a riscv binary with a low inline threshold
"6unwrap17",
"6expect17",
"11copy_within17",
"core..fmt..builders..PadAdapter", # calls slice_error_fail
"11copy_within17", # calls panicking::panic
"write_char", # calls PadAdapter one above
"write_str", # calls write_char
"printable5check", # calls slice_index_order_fail
"char$u20$as$u20$core..fmt..Debug", # calls printable5check
"GenericRadix7fmt_int", # calls slice_start_index_len_fail
# below are functions I manually traced on an arm binary,
# with a somewhat higher inline threshold.
"10unwrap_err17h6",
"13is_whitespace17",
"$u20$core..slice..index..SliceIndex$LT",
"core..iter..adapters..filter..Filter$LT$I$C$P$GT$$u20$as$u20$core..iter",
"_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17h4c77379bd26a525bE",
"_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17hfe7e43aa2388c47bE",
]
# Pre-compiled regex lookups
dw_at_file_re = re.compile(r""".*(?:DW_AT_call_file|DW_AT_decl_file).*""")
dw_at_line_re = re.compile(r""".*(?:DW_AT_call_line|DW_AT_decl_line).*""")
line_info_re = re.compile(r""".*Line info.*""")
abstract_origin_re = re.compile(r""".*DW_AT_abstract_origin.*""")
dw_at_linkage_name_re = re.compile(r""".*DW_AT_linkage_name.*""")
dw_at_name_re = re.compile(r""".*DW_AT_name.*""")
def matches_panic_funcs(name):
"""If the passed name contains one of the known panic_functions,
return the match
"""
for func in panic_functions:
if func in name:
return func
return ""
def linkage_or_origin_all_parents(elf, addr, linkage=False):
|
def any_origin_matches_panic_func(elf, addr):
"""returns name if any origin for the passed addr matches one
of the functions in the panic_functions array
"""
origins = linkage_or_origin_all_parents(elf, addr)
for origin in origins:
name = matches_panic_funcs(origin)
if name:
return name
return ""
def any_linkage_matches_panic_func(elf, addr):
"""returns True + name if any linkage for the passed addr matches one
of the functions in the panic_functions array
"""
linkages = linkage_or_origin_all_parents(elf, addr, True)
for linkage in linkages:
name = matches_panic_funcs(linkage)
if name:
return name
return ""
def check_for_source_in_parent(elf, addr):
"""Takes in a dwarfdump lookup including parents of the source DWARF
location, returns the first parent with a call file not in
the core library. If found, this often indicates the source of the panic
in the Tock source code.
"""
result = subprocess.run(
(DWARFDUMP, "--lookup=0x" + addr, "-p", elf), capture_output=True, text=True
)
dwarfdump = result.stdout
matches = re.findall(dw_at_file_re, dwarfdump)
def getFile(line):
return line.strip().split('"')[1]
source_files = list(map(getFile, matches))
for (i, f) in enumerate(source_files[::-1]):
if "/core/" not in f:
line_matches = re.findall(dw_at_line_re, dwarfdump)
def getLine(line):
return line.strip().split("(")[1].split(")")[0]
source_lines = list(map(getLine, line_matches))
source_line = source_lines[::-1][i]
return (f, source_line)
return ("", "")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("ELF", help="ELF file for analysis")
parser.add_argument(
"--verbose",
"-v",
action="store_true",
help="Output additional DWARF info for each panic location in the binary",
)
parser.add_argument("--riscv", action="store_true", help="Use risc-v based objdump")
return parser.parse_args()
# Find all addresses that panic, and get basic dwarf info on those addresses
def find_all_panics(objdump, elf, is_riscv):
panic_list = []
within_core_panic_list = []
no_info_panic_list = []
result = subprocess.run((objdump, "-d", elf), capture_output=True, text=True)
objdump_out = result.stdout
for function in panic_functions:
function_re = re.compile(".*:.*#.*" + function + ".*")
if not is_riscv:
# Arm-none-eabi-objdump uses ';' for comments instead of '#'
function_re = re.compile(".*:.*<.*" + function + ".*")
# TODO: arm elfs include loads of offsets from symbols in such a way that these lines
# are matched by this regex. In general, these loads occur within the instruction stream
# associated with the symbol at hand, and will usually be excluded by logic later in
# this function. This leads to `within_core_panic_list` and `no_info_panic_list`
# containing more "panics" than when analyzing a risc-v binary. We could fix this
# by matching *only* on functions with instructions that actually jump to a new symbol,
# but this would require a list of such instructions for each architecture. However
# as written it actually lets us identify panics which are jumped to via addresses
# stored in registers, which may actually catch additional valid panics.
matches = re.findall(function_re, objdump_out)
def getAddr(line):
return line.strip().split(":")[0]
addrs = list(map(getAddr, matches))
for addr in addrs:
result = subprocess.run(
(DWARFDUMP, "--lookup=0x" + addr, elf), capture_output=True, text=True
)
dwarfdump = result.stdout
dw_at_file = re.search(dw_at_file_re, dwarfdump)
dw_at_line = re.search(dw_at_line_re, dwarfdump)
line_info = re.search(line_info_re, dwarfdump)
abstract_origin = re.search(abstract_origin_re, dwarfdump)
linkage_name = re.search(dw_at_linkage_name_re, dwarfdump)
file_string = ""
line_string = ""
line_info_string = ""
abstract_origin_string = ""
linkage_name_string = ""
if dw_at_file:
file_string = dw_at_file.group(0).strip()
line_string = dw_at_line.group(0).strip()
panicinfo = {}
panicinfo["addr"] = addr
panicinfo["function"] = function
if line_info:
line_info_string = line_info.group(0).strip()
panicinfo["line_info"] = line_info_string
if abstract_origin:
abstract_origin_string = abstract_origin.group(0).strip()
if linkage_name:
linkage_name_string = linkage_name.group(0).strip()
if "DW_AT_call_file" in file_string and "DW_AT_decl_file" in file_string:
| """Returns a list of the abstract origin or linkage of all parents of the dwarf
location for the passed address
"""
result = subprocess.run(
(DWARFDUMP, "--lookup=0x" + addr, "-p", elf), capture_output=True, text=True
)
dwarfdump = result.stdout
regex = abstract_origin_re
if linkage:
regex = dw_at_linkage_name_re
matches = re.findall(regex, dwarfdump)
def getFunction(line):
return line.strip().split('"')[1]
origins = list(map(getFunction, matches))
return origins | identifier_body |
find_panics.py | u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17hfe7e43aa2388c47bE",
]
# Pre-compiled regex lookups
dw_at_file_re = re.compile(r""".*(?:DW_AT_call_file|DW_AT_decl_file).*""")
dw_at_line_re = re.compile(r""".*(?:DW_AT_call_line|DW_AT_decl_line).*""")
line_info_re = re.compile(r""".*Line info.*""")
abstract_origin_re = re.compile(r""".*DW_AT_abstract_origin.*""")
dw_at_linkage_name_re = re.compile(r""".*DW_AT_linkage_name.*""")
dw_at_name_re = re.compile(r""".*DW_AT_name.*""")
def matches_panic_funcs(name):
"""If the passed name contains one of the known panic_functions,
return the match
"""
for func in panic_functions:
if func in name:
return func
return ""
def linkage_or_origin_all_parents(elf, addr, linkage=False):
"""Returns a list of the abstract origin or linkage of all parents of the dwarf
location for the passed address
"""
result = subprocess.run(
(DWARFDUMP, "--lookup=0x" + addr, "-p", elf), capture_output=True, text=True
)
dwarfdump = result.stdout
regex = abstract_origin_re
if linkage:
regex = dw_at_linkage_name_re
matches = re.findall(regex, dwarfdump)
def getFunction(line):
return line.strip().split('"')[1]
origins = list(map(getFunction, matches))
return origins
def any_origin_matches_panic_func(elf, addr):
"""returns name if any origin for the passed addr matches one
of the functions in the panic_functions array
"""
origins = linkage_or_origin_all_parents(elf, addr)
for origin in origins:
name = matches_panic_funcs(origin)
if name:
return name
return ""
def any_linkage_matches_panic_func(elf, addr):
"""returns True + name if any linkage for the passed addr matches one
of the functions in the panic_functions array
"""
linkages = linkage_or_origin_all_parents(elf, addr, True)
for linkage in linkages:
name = matches_panic_funcs(linkage)
if name:
return name
return ""
def check_for_source_in_parent(elf, addr):
"""Takes in a dwarfdump lookup including parents of the source DWARF
location, returns the first parent with a call file not in
the core library. If found, this often indicates the source of the panic
in the Tock source code.
"""
result = subprocess.run(
(DWARFDUMP, "--lookup=0x" + addr, "-p", elf), capture_output=True, text=True
)
dwarfdump = result.stdout
matches = re.findall(dw_at_file_re, dwarfdump)
def getFile(line):
return line.strip().split('"')[1]
source_files = list(map(getFile, matches))
for (i, f) in enumerate(source_files[::-1]):
if "/core/" not in f:
line_matches = re.findall(dw_at_line_re, dwarfdump)
def getLine(line):
return line.strip().split("(")[1].split(")")[0]
source_lines = list(map(getLine, line_matches))
source_line = source_lines[::-1][i]
return (f, source_line)
return ("", "")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("ELF", help="ELF file for analysis")
parser.add_argument(
"--verbose",
"-v",
action="store_true",
help="Output additional DWARF info for each panic location in the binary",
)
parser.add_argument("--riscv", action="store_true", help="Use risc-v based objdump")
return parser.parse_args()
# Find all addresses that panic, and get basic dwarf info on those addresses
def find_all_panics(objdump, elf, is_riscv):
panic_list = []
within_core_panic_list = []
no_info_panic_list = []
result = subprocess.run((objdump, "-d", elf), capture_output=True, text=True)
objdump_out = result.stdout
for function in panic_functions:
function_re = re.compile(".*:.*#.*" + function + ".*")
if not is_riscv:
# Arm-none-eabi-objdump uses ';' for comments instead of '#'
function_re = re.compile(".*:.*<.*" + function + ".*")
# TODO: arm elfs include loads of offsets from symbols in such a way that these lines
# are matched by this regex. In general, these loads occur within the instruction stream
# associated with the symbol at hand, and will usually be excluded by logic later in
# this function. This leads to `within_core_panic_list` and `no_info_panic_list`
# containing more "panics" than when analyzing a risc-v binary. We could fix this
# by matching *only* on functions with instructions that actually jump to a new symbol,
# but this would require a list of such instructions for each architecture. However
# as written it actually lets us identify panics which are jumped to via addresses
# stored in registers, which may actually catch additional valid panics.
matches = re.findall(function_re, objdump_out)
def getAddr(line):
return line.strip().split(":")[0]
addrs = list(map(getAddr, matches))
for addr in addrs:
result = subprocess.run(
(DWARFDUMP, "--lookup=0x" + addr, elf), capture_output=True, text=True
)
dwarfdump = result.stdout
dw_at_file = re.search(dw_at_file_re, dwarfdump)
dw_at_line = re.search(dw_at_line_re, dwarfdump)
line_info = re.search(line_info_re, dwarfdump)
abstract_origin = re.search(abstract_origin_re, dwarfdump)
linkage_name = re.search(dw_at_linkage_name_re, dwarfdump)
file_string = ""
line_string = ""
line_info_string = ""
abstract_origin_string = ""
linkage_name_string = ""
if dw_at_file:
file_string = dw_at_file.group(0).strip()
line_string = dw_at_line.group(0).strip()
panicinfo = {}
panicinfo["addr"] = addr
panicinfo["function"] = function
if line_info:
line_info_string = line_info.group(0).strip()
panicinfo["line_info"] = line_info_string
if abstract_origin:
abstract_origin_string = abstract_origin.group(0).strip()
if linkage_name:
linkage_name_string = linkage_name.group(0).strip()
if "DW_AT_call_file" in file_string and "DW_AT_decl_file" in file_string:
raise RuntimeError("I misunderstand DWARF")
if "DW_AT_call_file" in file_string or "DW_AT_decl_file" in file_string:
filename = file_string.split('"')[1]
line_num = line_string.split("(")[1].split(")")[0]
if "DW_AT_call_file" in file_string:
panicinfo["call_file"] = filename
panicinfo["call_line"] = line_num
if "DW_AT_decl_file" in file_string:
panicinfo["decl_file"] = filename
panicinfo["decl_line"] = line_num
if not "/core/" in filename:
if not "closure" in abstract_origin_string:
panicinfo["best_guess_source"] = "call/decl"
else:
panicinfo["best_guess_source"] = "call-closure-line-info"
panic_list.append(panicinfo)
continue
else: # 'core' in filename
(parent_file, parent_line) = check_for_source_in_parent(elf, addr)
if parent_file:
panicinfo["parent_call_file"] = parent_file
panicinfo["parent_call_line"] = parent_line
panicinfo["best_guess_source"] = "parent"
panic_list.append(panicinfo)
continue
elif not abstract_origin and not linkage_name:
no_info_panic_list.append(panicinfo)
continue
elif abstract_origin:
if "core" in abstract_origin_string:
name = matches_panic_funcs(abstract_origin_string)
if name:
within_core_panic_list.append(panicinfo)
continue
else:
name2 = any_origin_matches_panic_func(elf, addr)
name3 = any_linkage_matches_panic_func(elf, addr)
if name2:
within_core_panic_list.append(panicinfo)
continue
elif name3:
within_core_panic_list.append(panicinfo)
continue
else:
no_info_panic_list.append(panicinfo)
continue
elif "closure" in abstract_origin_string:
# not in core, in closure, line info is probably sufficient
panicinfo["best_guess_source"] = "lineinfo"
panic_list.append(panicinfo)
continue
else:
# i have not seen this happen -- core in file, not closure, origin not core
| raise RuntimeError("Unhandled") | conditional_block |
|
find_panics.py | iscv64-unknown-elf-objdump"
# TODO: For all functions below the initial batch, it would like be preferable to
# automatically populate the list with additional functions in the core library using
# debug info. For now, however, I do this manually.
panic_functions = [
"expect_failed",
"unwrap_failed",
"panic_bounds_check",
"slice_index_order_fail",
"slice_end_index_len_fail",
"slice_start_index_len_fail",
"slice17len_mismatch_fail",
"str16slice_error_fail",
"copy_from_slice17len_mismatch_fail",
"copy_from_slice17",
"panicking5panic",
# below are functions I have manually traced up from the above, more "core" panics, on a riscv binary with a low inline threshold
"6unwrap17",
"6expect17",
"11copy_within17",
"core..fmt..builders..PadAdapter", # calls slice_error_fail
"11copy_within17", # calls panicking::panic
"write_char", # calls PadAdapter one above
"write_str", # calls write_char
"printable5check", # calls slice_index_order_fail
"char$u20$as$u20$core..fmt..Debug", # calls printable5check
"GenericRadix7fmt_int", # calls slice_start_index_len_fail
# below are functions I manually traced on an arm binary,
# with a somewhat higher inline threshold.
"10unwrap_err17h6",
"13is_whitespace17",
"$u20$core..slice..index..SliceIndex$LT",
"core..iter..adapters..filter..Filter$LT$I$C$P$GT$$u20$as$u20$core..iter",
"_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17h4c77379bd26a525bE",
"_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17hfe7e43aa2388c47bE",
]
# Pre-compiled regex lookups
dw_at_file_re = re.compile(r""".*(?:DW_AT_call_file|DW_AT_decl_file).*""")
dw_at_line_re = re.compile(r""".*(?:DW_AT_call_line|DW_AT_decl_line).*""")
line_info_re = re.compile(r""".*Line info.*""")
abstract_origin_re = re.compile(r""".*DW_AT_abstract_origin.*""")
dw_at_linkage_name_re = re.compile(r""".*DW_AT_linkage_name.*""")
dw_at_name_re = re.compile(r""".*DW_AT_name.*""")
def matches_panic_funcs(name):
"""If the passed name contains one of the known panic_functions,
return the match
"""
for func in panic_functions:
if func in name:
return func
return ""
def | (elf, addr, linkage=False):
"""Returns a list of the abstract origin or linkage of all parents of the dwarf
location for the passed address
"""
result = subprocess.run(
(DWARFDUMP, "--lookup=0x" + addr, "-p", elf), capture_output=True, text=True
)
dwarfdump = result.stdout
regex = abstract_origin_re
if linkage:
regex = dw_at_linkage_name_re
matches = re.findall(regex, dwarfdump)
def getFunction(line):
return line.strip().split('"')[1]
origins = list(map(getFunction, matches))
return origins
def any_origin_matches_panic_func(elf, addr):
"""returns name if any origin for the passed addr matches one
of the functions in the panic_functions array
"""
origins = linkage_or_origin_all_parents(elf, addr)
for origin in origins:
name = matches_panic_funcs(origin)
if name:
return name
return ""
def any_linkage_matches_panic_func(elf, addr):
"""returns True + name if any linkage for the passed addr matches one
of the functions in the panic_functions array
"""
linkages = linkage_or_origin_all_parents(elf, addr, True)
for linkage in linkages:
name = matches_panic_funcs(linkage)
if name:
return name
return ""
def check_for_source_in_parent(elf, addr):
"""Takes in a dwarfdump lookup including parents of the source DWARF
location, returns the first parent with a call file not in
the core library. If found, this often indicates the source of the panic
in the Tock source code.
"""
result = subprocess.run(
(DWARFDUMP, "--lookup=0x" + addr, "-p", elf), capture_output=True, text=True
)
dwarfdump = result.stdout
matches = re.findall(dw_at_file_re, dwarfdump)
def getFile(line):
return line.strip().split('"')[1]
source_files = list(map(getFile, matches))
for (i, f) in enumerate(source_files[::-1]):
if "/core/" not in f:
line_matches = re.findall(dw_at_line_re, dwarfdump)
def getLine(line):
return line.strip().split("(")[1].split(")")[0]
source_lines = list(map(getLine, line_matches))
source_line = source_lines[::-1][i]
return (f, source_line)
return ("", "")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("ELF", help="ELF file for analysis")
parser.add_argument(
"--verbose",
"-v",
action="store_true",
help="Output additional DWARF info for each panic location in the binary",
)
parser.add_argument("--riscv", action="store_true", help="Use risc-v based objdump")
return parser.parse_args()
# Find all addresses that panic, and get basic dwarf info on those addresses
def find_all_panics(objdump, elf, is_riscv):
panic_list = []
within_core_panic_list = []
no_info_panic_list = []
result = subprocess.run((objdump, "-d", elf), capture_output=True, text=True)
objdump_out = result.stdout
for function in panic_functions:
function_re = re.compile(".*:.*#.*" + function + ".*")
if not is_riscv:
# Arm-none-eabi-objdump uses ';' for comments instead of '#'
function_re = re.compile(".*:.*<.*" + function + ".*")
# TODO: arm elfs include loads of offsets from symbols in such a way that these lines
# are matched by this regex. In general, these loads occur within the instruction stream
# associated with the symbol at hand, and will usually be excluded by logic later in
# this function. This leads to `within_core_panic_list` and `no_info_panic_list`
# containing more "panics" than when analyzing a risc-v binary. We could fix this
# by matching *only* on functions with instructions that actually jump to a new symbol,
# but this would require a list of such instructions for each architecture. However
# as written it actually lets us identify panics which are jumped to via addresses
# stored in registers, which may actually catch additional valid panics.
matches = re.findall(function_re, objdump_out)
def getAddr(line):
return line.strip().split(":")[0]
addrs = list(map(getAddr, matches))
for addr in addrs:
result = subprocess.run(
(DWARFDUMP, "--lookup=0x" + addr, elf), capture_output=True, text=True
)
dwarfdump = result.stdout
dw_at_file = re.search(dw_at_file_re, dwarfdump)
dw_at_line = re.search(dw_at_line_re, dwarfdump)
line_info = re.search(line_info_re, dwarfdump)
abstract_origin = re.search(abstract_origin_re, dwarfdump)
linkage_name = re.search(dw_at_linkage_name_re, dwarfdump)
file_string = ""
line_string = ""
line_info_string = ""
abstract_origin_string = ""
linkage_name_string = ""
if dw_at_file:
file_string = dw_at_file.group(0).strip()
line_string = dw_at_line.group(0).strip()
panicinfo = {}
panicinfo["addr"] = addr
panicinfo["function"] = function
if line_info:
line_info_string = line_info.group(0).strip()
panicinfo["line_info"] = line_info_string
if abstract_origin:
abstract_origin_string = abstract_origin.group(0).strip()
if linkage_name:
linkage_name_string = linkage_name.group(0).strip()
if "DW_AT_call_file" in file_string and "DW_AT_decl_file" in file_string | linkage_or_origin_all_parents | identifier_name |
find_panics.py | import re
import subprocess
import sys
if platform.system() == 'Darwin':
DWARFDUMP = "dwarfdump"
elif platform.system() == 'Linux':
DWARFDUMP = "llvm-dwarfdump"
else:
raise NotImplementedError("Unknown platform")
# Note: In practice, GCC objdumps are better at symbol resolution than LLVM objdump
ARM_OBJDUMP = "arm-none-eabi-objdump"
RISCV_OBJDUMP = "riscv64-unknown-elf-objdump"
# TODO: For all functions below the initial batch, it would like be preferable to
# automatically populate the list with additional functions in the core library using
# debug info. For now, however, I do this manually.
panic_functions = [
"expect_failed",
"unwrap_failed",
"panic_bounds_check",
"slice_index_order_fail",
"slice_end_index_len_fail",
"slice_start_index_len_fail",
"slice17len_mismatch_fail",
"str16slice_error_fail",
"copy_from_slice17len_mismatch_fail",
"copy_from_slice17",
"panicking5panic",
# below are functions I have manually traced up from the above, more "core" panics, on a riscv binary with a low inline threshold
"6unwrap17",
"6expect17",
"11copy_within17",
"core..fmt..builders..PadAdapter", # calls slice_error_fail
"11copy_within17", # calls panicking::panic
"write_char", # calls PadAdapter one above
"write_str", # calls write_char
"printable5check", # calls slice_index_order_fail
"char$u20$as$u20$core..fmt..Debug", # calls printable5check
"GenericRadix7fmt_int", # calls slice_start_index_len_fail
# below are functions I manually traced on an arm binary,
# with a somewhat higher inline threshold.
"10unwrap_err17h6",
"13is_whitespace17",
"$u20$core..slice..index..SliceIndex$LT",
"core..iter..adapters..filter..Filter$LT$I$C$P$GT$$u20$as$u20$core..iter",
"_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17h4c77379bd26a525bE",
"_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17hfe7e43aa2388c47bE",
]
# Pre-compiled regex lookups
dw_at_file_re = re.compile(r""".*(?:DW_AT_call_file|DW_AT_decl_file).*""")
dw_at_line_re = re.compile(r""".*(?:DW_AT_call_line|DW_AT_decl_line).*""")
line_info_re = re.compile(r""".*Line info.*""")
abstract_origin_re = re.compile(r""".*DW_AT_abstract_origin.*""")
dw_at_linkage_name_re = re.compile(r""".*DW_AT_linkage_name.*""")
dw_at_name_re = re.compile(r""".*DW_AT_name.*""")
def matches_panic_funcs(name):
"""If the passed name contains one of the known panic_functions,
return the match
"""
for func in panic_functions:
if func in name:
return func
return ""
def linkage_or_origin_all_parents(elf, addr, linkage=False):
"""Returns a list of the abstract origin or linkage of all parents of the dwarf
location for the passed address
"""
result = subprocess.run(
(DWARFDUMP, "--lookup=0x" + addr, "-p", elf), capture_output=True, text=True
)
dwarfdump = result.stdout
regex = abstract_origin_re
if linkage:
regex = dw_at_linkage_name_re
matches = re.findall(regex, dwarfdump)
def getFunction(line):
return line.strip().split('"')[1]
origins = list(map(getFunction, matches))
return origins
def any_origin_matches_panic_func(elf, addr):
"""returns name if any origin for the passed addr matches one
of the functions in the panic_functions array
"""
origins = linkage_or_origin_all_parents(elf, addr)
for origin in origins:
name = matches_panic_funcs(origin)
if name:
return name
return ""
def any_linkage_matches_panic_func(elf, addr):
"""returns True + name if any linkage for the passed addr matches one
of the functions in the panic_functions array
"""
linkages = linkage_or_origin_all_parents(elf, addr, True)
for linkage in linkages:
name = matches_panic_funcs(linkage)
if name:
return name
return ""
def check_for_source_in_parent(elf, addr):
"""Takes in a dwarfdump lookup including parents of the source DWARF
location, returns the first parent with a call file not in
the core library. If found, this often indicates the source of the panic
in the Tock source code.
"""
result = subprocess.run(
(DWARFDUMP, "--lookup=0x" + addr, "-p", elf), capture_output=True, text=True
)
dwarfdump = result.stdout
matches = re.findall(dw_at_file_re, dwarfdump)
def getFile(line):
return line.strip().split('"')[1]
source_files = list(map(getFile, matches))
for (i, f) in enumerate(source_files[::-1]):
if "/core/" not in f:
line_matches = re.findall(dw_at_line_re, dwarfdump)
def getLine(line):
return line.strip().split("(")[1].split(")")[0]
source_lines = list(map(getLine, line_matches))
source_line = source_lines[::-1][i]
return (f, source_line)
return ("", "")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("ELF", help="ELF file for analysis")
parser.add_argument(
"--verbose",
"-v",
action="store_true",
help="Output additional DWARF info for each panic location in the binary",
)
parser.add_argument("--riscv", action="store_true", help="Use risc-v based objdump")
return parser.parse_args()
# Find all addresses that panic, and get basic dwarf info on those addresses
def find_all_panics(objdump, elf, is_riscv):
panic_list = []
within_core_panic_list = []
no_info_panic_list = []
result = subprocess.run((objdump, "-d", elf), capture_output=True, text=True)
objdump_out = result.stdout
for function in panic_functions:
function_re = re.compile(".*:.*#.*" + function + ".*")
if not is_riscv:
# Arm-none-eabi-objdump uses ';' for comments instead of '#'
function_re = re.compile(".*:.*<.*" + function + ".*")
# TODO: arm elfs include loads of offsets from symbols in such a way that these lines
# are matched by this regex. In general, these loads occur within the instruction stream
# associated with the symbol at hand, and will usually be excluded by logic later in
# this function. This leads to `within_core_panic_list` and `no_info_panic_list`
# containing more "panics" than when analyzing a risc-v binary. We could fix this
# by matching *only* on functions with instructions that actually jump to a new symbol,
# but this would require a list of such instructions for each architecture. However
# as written it actually lets us identify panics which are jumped to via addresses
# stored in registers, which may actually catch additional valid panics.
matches = re.findall(function_re, objdump_out)
def getAddr(line):
return line.strip().split(":")[0]
addrs = list(map(getAddr, matches))
for addr in addrs:
result = subprocess.run(
(DWARFDUMP, "--lookup=0x" + addr, elf), capture_output=True, text=True
)
dwarfdump = result.stdout
dw_at_file = re.search(dw_at_file_re, dwarfdump)
dw_at_line = re.search(dw_at_line_re, dwarfdump)
line_info = re.search(line_info_re, dwarfdump)
abstract_origin = re.search(abstract_origin_re, dwarfdump)
linkage_name = re.search(dw_at_linkage_name_re, dwarfdump)
file_string = ""
line_string = ""
line_info_string = ""
abstract_origin_string = ""
linkage_name_string = ""
if dw_at_file:
file_string = dw_at_file.group(0).strip()
line_string = dw_at_line.group(0).strip()
panicinfo = | import argparse
import platform | random_line_split |
|
topics_and_partitions.go | itionsData) loadTopic(t string) *topicPartitionsData |
// A helper type mapping topics to their partitions that can be updated
// atomically.
type topicsPartitions struct {
v atomic.Value // topicsPartitionsData (map[string]*topicPartitions)
}
func (t *topicsPartitions) load() topicsPartitionsData {
if t == nil {
return nil
}
return t.v.Load().(topicsPartitionsData)
}
func (t *topicsPartitions) storeData(d topicsPartitionsData) { t.v.Store(d) }
func (t *topicsPartitions) storeTopics(topics []string) { t.v.Store(t.ensureTopics(topics)) }
func (t *topicsPartitions) clone() topicsPartitionsData {
current := t.load()
clone := make(map[string]*topicPartitions, len(current))
for k, v := range current {
clone[k] = v
}
return clone
}
// Ensures that the topics exist in the returned map, but does not store the
// update. This can be used to update the data and store later, rather than
// storing immediately.
func (t *topicsPartitions) ensureTopics(topics []string) topicsPartitionsData {
var cloned bool
current := t.load()
for _, topic := range topics {
if _, exists := current[topic]; !exists {
if !cloned {
current = t.clone()
cloned = true
}
current[topic] = newTopicPartitions()
}
}
return current
}
// Updates the topic partitions data atomic value.
//
// If this is the first time seeing partitions, we do processing of unknown
// partitions that may be buffered for producing.
func (cl *Client) storePartitionsUpdate(topic string, l *topicPartitions, lv *topicPartitionsData, hadPartitions bool) {
// If the topic already had partitions, then there would be no
// unknown topic waiting and we do not need to notify anything.
if hadPartitions {
l.v.Store(lv)
return
}
p := &cl.producer
p.unknownTopicsMu.Lock()
defer p.unknownTopicsMu.Unlock()
// If the topic did not have partitions, then we need to store the
// partition update BEFORE unlocking the mutex to guard against this
// sequence of events:
//
// - unlock waiters
// - delete waiter
// - new produce recreates waiter
// - we store update
// - we never notify the recreated waiter
//
// By storing before releasing the locks, we ensure that later
// partition loads for this topic under the mu will see our update.
defer l.v.Store(lv)
// If there are no unknown topics or this topic is not unknown, then we
// have nothing to do.
if len(p.unknownTopics) == 0 {
return
}
unknown, exists := p.unknownTopics[topic]
if !exists {
return
}
// If we loaded no partitions because of a retriable error, we signal
// the waiting goroutine that a try happened. It is possible the
// goroutine is quitting and will not be draining unknownWait, so we do
// not require the send.
if len(lv.partitions) == 0 && kerr.IsRetriable(lv.loadErr) {
select {
case unknown.wait <- lv.loadErr:
default:
}
return
}
// Either we have a fatal error or we can successfully partition.
//
// Even with a fatal error, if we loaded any partitions, we partition.
// If we only had a fatal error, we can finish promises in a goroutine.
// If we are partitioning, we have to do it under the unknownMu to
// ensure prior buffered records are produced in order before we
// release the mu.
delete(p.unknownTopics, topic)
close(unknown.wait) // allow waiting goroutine to quit
if len(lv.partitions) == 0 {
cl.failUnknownTopicRecords(topic, unknown, lv.loadErr)
} else {
for _, pr := range unknown.buffered {
cl.doPartitionRecord(l, lv, pr)
}
}
}
// If a metadata request fails after retrying (internally retrying, so only a
// few times), or the metadata request does not return topics that we requested
// (which may also happen additionally consuming via regex), then we need to
// bump errors for topics that were previously loaded, and bump errors for
// topics awaiting load.
//
// This has two modes of operation:
//
// 1) if no topics were missing, then the metadata request failed outright,
// and we need to bump errors on all stored topics and unknown topics.
//
// 2) if topics were missing, then the metadata request was successful but
// had missing data, and we need to bump errors on only what was mising.
//
func (cl *Client) bumpMetadataFailForTopics(requested map[string]*topicPartitions, err error, missingTopics ...string) {
p := &cl.producer
// mode 1
if len(missingTopics) == 0 {
for _, topic := range requested {
for _, topicPartition := range topic.load().partitions {
topicPartition.records.bumpRepeatedLoadErr(err)
}
}
}
// mode 2
var missing map[string]bool
for _, failTopic := range missingTopics {
if missing == nil {
missing = make(map[string]bool, len(missingTopics))
}
missing[failTopic] = true
if topic, exists := requested[failTopic]; exists {
for _, topicPartition := range topic.load().partitions {
topicPartition.records.bumpRepeatedLoadErr(err)
}
}
}
p.unknownTopicsMu.Lock()
defer p.unknownTopicsMu.Unlock()
for topic, unknown := range p.unknownTopics {
// if nil, mode 1, else mode 2
if missing != nil && !missing[topic] {
continue
}
select {
case unknown.wait <- err:
default:
}
}
}
// topicPartitionsData is the data behind a topicPartitions' v.
//
// We keep this in an atomic because it is expected to be extremely read heavy,
// and if it were behind a lock, the lock would need to be held for a while.
type topicPartitionsData struct {
// NOTE if adding anything to this struct, be sure to fix meta merge.
loadErr error // could be auth, unknown, leader not avail, or creation err
isInternal bool
partitions []*topicPartition // partition num => partition
writablePartitions []*topicPartition // subset of above
}
// topicPartition contains all information from Kafka for a topic's partition,
// as well as what a client is producing to it or info about consuming from it.
type topicPartition struct {
// If we have a load error (leader/listener/replica not available), we
// keep the old topicPartition data and the new error.
loadErr error
// If we do not have a load error, we determine if the new
// topicPartition is the same or different from the old based on
// whether the data changed (leader or leader epoch, etc.).
topicPartitionData
// If we do not have a load error, we copy the records and cursor
// pointers from the old after updating any necessary fields in them
// (see migrate functions below).
//
// Only one of records or cursor is non-nil.
records *recBuf
cursor *cursor
}
// Contains stuff that changes on metadata update that we copy into a cursor or
// recBuf.
type topicPartitionData struct {
// Our leader; if metadata sees this change, the metadata update
// migrates the cursor to a different source with the session stopped,
// and the recBuf to a different sink under a tight mutex.
leader int32
// What we believe to be the epoch of the leader for this partition.
//
// For cursors, for KIP-320, if a broker receives a fetch request where
// the current leader epoch does not match the brokers, either the
// broker is behind and returns UnknownLeaderEpoch, or we are behind
// and the broker returns FencedLeaderEpoch. For the former, we back
// off and retry. For the latter, we update our metadata.
leaderEpoch int32
}
// migrateProductionTo is called on metadata update if a topic partition's sink
// has changed. This moves record production from one sink to the other; this
// must be done such that records produced during migration follow those
// already buffered.
func (old *topicPartition) migrateProductionTo(new *topicPartition) {
// First, remove our record buffer from the old sink.
old.records.sink.removeRecBuf(old.records)
// Before this next lock, record producing will buffer to the
// in-migration-progress records and may trigger draining to
// the old sink. That is fine, the old sink no longer consumes
// from these records. We just have wasted drain triggers.
old.records.mu.Lock() // guard setting sink and topic partition data
old.records.sink = new.records.sink
old.records.topicPartitionData = new.topicPartitionData
old | {
tp, exists := d[t]
if !exists {
return nil
}
return tp.load()
} | identifier_body |
topics_and_partitions.go | to their partitions that can be updated
// atomically.
type topicsPartitions struct {
v atomic.Value // topicsPartitionsData (map[string]*topicPartitions)
}
func (t *topicsPartitions) load() topicsPartitionsData {
if t == nil {
return nil
}
return t.v.Load().(topicsPartitionsData)
}
func (t *topicsPartitions) storeData(d topicsPartitionsData) { t.v.Store(d) }
func (t *topicsPartitions) storeTopics(topics []string) { t.v.Store(t.ensureTopics(topics)) }
func (t *topicsPartitions) clone() topicsPartitionsData {
current := t.load()
clone := make(map[string]*topicPartitions, len(current))
for k, v := range current {
clone[k] = v
}
return clone
}
// Ensures that the topics exist in the returned map, but does not store the
// update. This can be used to update the data and store later, rather than
// storing immediately.
func (t *topicsPartitions) ensureTopics(topics []string) topicsPartitionsData {
var cloned bool
current := t.load()
for _, topic := range topics {
if _, exists := current[topic]; !exists {
if !cloned {
current = t.clone()
cloned = true
}
current[topic] = newTopicPartitions()
}
}
return current
}
// Updates the topic partitions data atomic value.
//
// If this is the first time seeing partitions, we do processing of unknown
// partitions that may be buffered for producing.
func (cl *Client) storePartitionsUpdate(topic string, l *topicPartitions, lv *topicPartitionsData, hadPartitions bool) {
// If the topic already had partitions, then there would be no
// unknown topic waiting and we do not need to notify anything.
if hadPartitions {
l.v.Store(lv)
return
}
p := &cl.producer
p.unknownTopicsMu.Lock()
defer p.unknownTopicsMu.Unlock()
// If the topic did not have partitions, then we need to store the
// partition update BEFORE unlocking the mutex to guard against this
// sequence of events:
//
// - unlock waiters
// - delete waiter
// - new produce recreates waiter
// - we store update
// - we never notify the recreated waiter
//
// By storing before releasing the locks, we ensure that later
// partition loads for this topic under the mu will see our update.
defer l.v.Store(lv)
// If there are no unknown topics or this topic is not unknown, then we
// have nothing to do.
if len(p.unknownTopics) == 0 {
return
}
unknown, exists := p.unknownTopics[topic]
if !exists {
return
}
// If we loaded no partitions because of a retriable error, we signal
// the waiting goroutine that a try happened. It is possible the
// goroutine is quitting and will not be draining unknownWait, so we do
// not require the send.
if len(lv.partitions) == 0 && kerr.IsRetriable(lv.loadErr) {
select {
case unknown.wait <- lv.loadErr:
default:
}
return
}
// Either we have a fatal error or we can successfully partition.
//
// Even with a fatal error, if we loaded any partitions, we partition.
// If we only had a fatal error, we can finish promises in a goroutine.
// If we are partitioning, we have to do it under the unknownMu to
// ensure prior buffered records are produced in order before we
// release the mu.
delete(p.unknownTopics, topic)
close(unknown.wait) // allow waiting goroutine to quit
if len(lv.partitions) == 0 {
cl.failUnknownTopicRecords(topic, unknown, lv.loadErr)
} else {
for _, pr := range unknown.buffered {
cl.doPartitionRecord(l, lv, pr)
}
}
}
// If a metadata request fails after retrying (internally retrying, so only a
// few times), or the metadata request does not return topics that we requested
// (which may also happen additionally consuming via regex), then we need to
// bump errors for topics that were previously loaded, and bump errors for
// topics awaiting load.
//
// This has two modes of operation:
//
// 1) if no topics were missing, then the metadata request failed outright,
// and we need to bump errors on all stored topics and unknown topics.
//
// 2) if topics were missing, then the metadata request was successful but
// had missing data, and we need to bump errors on only what was mising.
//
func (cl *Client) bumpMetadataFailForTopics(requested map[string]*topicPartitions, err error, missingTopics ...string) {
p := &cl.producer
// mode 1
if len(missingTopics) == 0 {
for _, topic := range requested {
for _, topicPartition := range topic.load().partitions {
topicPartition.records.bumpRepeatedLoadErr(err)
}
}
}
// mode 2
var missing map[string]bool
for _, failTopic := range missingTopics {
if missing == nil {
missing = make(map[string]bool, len(missingTopics))
}
missing[failTopic] = true
if topic, exists := requested[failTopic]; exists {
for _, topicPartition := range topic.load().partitions {
topicPartition.records.bumpRepeatedLoadErr(err)
}
}
}
p.unknownTopicsMu.Lock()
defer p.unknownTopicsMu.Unlock()
for topic, unknown := range p.unknownTopics {
// if nil, mode 1, else mode 2
if missing != nil && !missing[topic] {
continue
}
select {
case unknown.wait <- err:
default:
}
}
}
// topicPartitionsData is the data behind a topicPartitions' v.
//
// We keep this in an atomic because it is expected to be extremely read heavy,
// and if it were behind a lock, the lock would need to be held for a while.
type topicPartitionsData struct {
// NOTE if adding anything to this struct, be sure to fix meta merge.
loadErr error // could be auth, unknown, leader not avail, or creation err
isInternal bool
partitions []*topicPartition // partition num => partition
writablePartitions []*topicPartition // subset of above
}
// topicPartition contains all information from Kafka for a topic's partition,
// as well as what a client is producing to it or info about consuming from it.
type topicPartition struct {
// If we have a load error (leader/listener/replica not available), we
// keep the old topicPartition data and the new error.
loadErr error
// If we do not have a load error, we determine if the new
// topicPartition is the same or different from the old based on
// whether the data changed (leader or leader epoch, etc.).
topicPartitionData
// If we do not have a load error, we copy the records and cursor
// pointers from the old after updating any necessary fields in them
// (see migrate functions below).
//
// Only one of records or cursor is non-nil.
records *recBuf
cursor *cursor
}
// Contains stuff that changes on metadata update that we copy into a cursor or
// recBuf.
type topicPartitionData struct {
// Our leader; if metadata sees this change, the metadata update
// migrates the cursor to a different source with the session stopped,
// and the recBuf to a different sink under a tight mutex.
leader int32
// What we believe to be the epoch of the leader for this partition.
//
// For cursors, for KIP-320, if a broker receives a fetch request where
// the current leader epoch does not match the brokers, either the
// broker is behind and returns UnknownLeaderEpoch, or we are behind
// and the broker returns FencedLeaderEpoch. For the former, we back
// off and retry. For the latter, we update our metadata.
leaderEpoch int32
}
// migrateProductionTo is called on metadata update if a topic partition's sink
// has changed. This moves record production from one sink to the other; this
// must be done such that records produced during migration follow those
// already buffered.
func (old *topicPartition) migrateProductionTo(new *topicPartition) {
// First, remove our record buffer from the old sink.
old.records.sink.removeRecBuf(old.records)
// Before this next lock, record producing will buffer to the
// in-migration-progress records and may trigger draining to
// the old sink. That is fine, the old sink no longer consumes
// from these records. We just have wasted drain triggers.
old.records.mu.Lock() // guard setting sink and topic partition data
old.records.sink = new.records.sink
old.records.topicPartitionData = new.topicPartitionData
old.records.mu.Unlock()
| // After the unlock above, record buffering can trigger drains
// on the new sink, which is not yet consuming from these
// records. Again, just more wasted drain triggers. | random_line_split |
|
topics_and_partitions.go | itionsData) loadTopic(t string) *topicPartitionsData {
tp, exists := d[t]
if !exists {
return nil
}
return tp.load()
}
// A helper type mapping topics to their partitions that can be updated
// atomically.
type topicsPartitions struct {
v atomic.Value // topicsPartitionsData (map[string]*topicPartitions)
}
func (t *topicsPartitions) load() topicsPartitionsData {
if t == nil {
return nil
}
return t.v.Load().(topicsPartitionsData)
}
func (t *topicsPartitions) storeData(d topicsPartitionsData) { t.v.Store(d) }
func (t *topicsPartitions) storeTopics(topics []string) { t.v.Store(t.ensureTopics(topics)) }
func (t *topicsPartitions) clone() topicsPartitionsData {
current := t.load()
clone := make(map[string]*topicPartitions, len(current))
for k, v := range current {
clone[k] = v
}
return clone
}
// Ensures that the topics exist in the returned map, but does not store the
// update. This can be used to update the data and store later, rather than
// storing immediately.
func (t *topicsPartitions) ensureTopics(topics []string) topicsPartitionsData {
var cloned bool
current := t.load()
for _, topic := range topics {
if _, exists := current[topic]; !exists {
if !cloned {
current = t.clone()
cloned = true
}
current[topic] = newTopicPartitions()
}
}
return current
}
// Updates the topic partitions data atomic value.
//
// If this is the first time seeing partitions, we do processing of unknown
// partitions that may be buffered for producing.
func (cl *Client) storePartitionsUpdate(topic string, l *topicPartitions, lv *topicPartitionsData, hadPartitions bool) {
// If the topic already had partitions, then there would be no
// unknown topic waiting and we do not need to notify anything.
if hadPartitions {
l.v.Store(lv)
return
}
p := &cl.producer
p.unknownTopicsMu.Lock()
defer p.unknownTopicsMu.Unlock()
// If the topic did not have partitions, then we need to store the
// partition update BEFORE unlocking the mutex to guard against this
// sequence of events:
//
// - unlock waiters
// - delete waiter
// - new produce recreates waiter
// - we store update
// - we never notify the recreated waiter
//
// By storing before releasing the locks, we ensure that later
// partition loads for this topic under the mu will see our update.
defer l.v.Store(lv)
// If there are no unknown topics or this topic is not unknown, then we
// have nothing to do.
if len(p.unknownTopics) == 0 {
return
}
unknown, exists := p.unknownTopics[topic]
if !exists {
return
}
// If we loaded no partitions because of a retriable error, we signal
// the waiting goroutine that a try happened. It is possible the
// goroutine is quitting and will not be draining unknownWait, so we do
// not require the send.
if len(lv.partitions) == 0 && kerr.IsRetriable(lv.loadErr) {
select {
case unknown.wait <- lv.loadErr:
default:
}
return
}
// Either we have a fatal error or we can successfully partition.
//
// Even with a fatal error, if we loaded any partitions, we partition.
// If we only had a fatal error, we can finish promises in a goroutine.
// If we are partitioning, we have to do it under the unknownMu to
// ensure prior buffered records are produced in order before we
// release the mu.
delete(p.unknownTopics, topic)
close(unknown.wait) // allow waiting goroutine to quit
if len(lv.partitions) == 0 {
cl.failUnknownTopicRecords(topic, unknown, lv.loadErr)
} else {
for _, pr := range unknown.buffered {
cl.doPartitionRecord(l, lv, pr)
}
}
}
// If a metadata request fails after retrying (internally retrying, so only a
// few times), or the metadata request does not return topics that we requested
// (which may also happen additionally consuming via regex), then we need to
// bump errors for topics that were previously loaded, and bump errors for
// topics awaiting load.
//
// This has two modes of operation:
//
// 1) if no topics were missing, then the metadata request failed outright,
// and we need to bump errors on all stored topics and unknown topics.
//
// 2) if topics were missing, then the metadata request was successful but
// had missing data, and we need to bump errors on only what was mising.
//
func (cl *Client) bumpMetadataFailForTopics(requested map[string]*topicPartitions, err error, missingTopics ...string) {
p := &cl.producer
// mode 1
if len(missingTopics) == 0 {
for _, topic := range requested {
for _, topicPartition := range topic.load().partitions {
topicPartition.records.bumpRepeatedLoadErr(err)
}
}
}
// mode 2
var missing map[string]bool
for _, failTopic := range missingTopics {
if missing == nil {
missing = make(map[string]bool, len(missingTopics))
}
missing[failTopic] = true
if topic, exists := requested[failTopic]; exists {
for _, topicPartition := range topic.load().partitions {
topicPartition.records.bumpRepeatedLoadErr(err)
}
}
}
p.unknownTopicsMu.Lock()
defer p.unknownTopicsMu.Unlock()
for topic, unknown := range p.unknownTopics {
// if nil, mode 1, else mode 2
if missing != nil && !missing[topic] {
continue
}
select {
case unknown.wait <- err:
default:
}
}
}
// topicPartitionsData is the data behind a topicPartitions' v.
//
// We keep this in an atomic because it is expected to be extremely read heavy,
// and if it were behind a lock, the lock would need to be held for a while.
type topicPartitionsData struct {
// NOTE if adding anything to this struct, be sure to fix meta merge.
loadErr error // could be auth, unknown, leader not avail, or creation err
isInternal bool
partitions []*topicPartition // partition num => partition
writablePartitions []*topicPartition // subset of above
}
// topicPartition contains all information from Kafka for a topic's partition,
// as well as what a client is producing to it or info about consuming from it.
type topicPartition struct {
// If we have a load error (leader/listener/replica not available), we
// keep the old topicPartition data and the new error.
loadErr error
// If we do not have a load error, we determine if the new
// topicPartition is the same or different from the old based on
// whether the data changed (leader or leader epoch, etc.).
topicPartitionData
// If we do not have a load error, we copy the records and cursor
// pointers from the old after updating any necessary fields in them
// (see migrate functions below).
//
// Only one of records or cursor is non-nil.
records *recBuf
cursor *cursor
}
// Contains stuff that changes on metadata update that we copy into a cursor or
// recBuf.
type topicPartitionData struct {
// Our leader; if metadata sees this change, the metadata update
// migrates the cursor to a different source with the session stopped,
// and the recBuf to a different sink under a tight mutex.
leader int32
// What we believe to be the epoch of the leader for this partition.
//
// For cursors, for KIP-320, if a broker receives a fetch request where
// the current leader epoch does not match the brokers, either the
// broker is behind and returns UnknownLeaderEpoch, or we are behind
// and the broker returns FencedLeaderEpoch. For the former, we back
// off and retry. For the latter, we update our metadata.
leaderEpoch int32
}
// migrateProductionTo is called on metadata update if a topic partition's sink
// has changed. This moves record production from one sink to the other; this
// must be done such that records produced during migration follow those
// already buffered.
func (old *topicPartition) | (new *topicPartition) {
// First, remove our record buffer from the old sink.
old.records.sink.removeRecBuf(old.records)
// Before this next lock, record producing will buffer to the
// in-migration-progress records and may trigger draining to
// the old sink. That is fine, the old sink no longer consumes
// from these records. We just have wasted drain triggers.
old.records.mu.Lock() // guard setting sink and topic partition data
old.records.sink = new.records.sink
old.records.topicPartitionData = new.topicPartitionData
old | migrateProductionTo | identifier_name |
topics_and_partitions.go | itionsData) loadTopic(t string) *topicPartitionsData {
tp, exists := d[t]
if !exists {
return nil
}
return tp.load()
}
// A helper type mapping topics to their partitions that can be updated
// atomically.
type topicsPartitions struct {
v atomic.Value // topicsPartitionsData (map[string]*topicPartitions)
}
func (t *topicsPartitions) load() topicsPartitionsData {
if t == nil {
return nil
}
return t.v.Load().(topicsPartitionsData)
}
func (t *topicsPartitions) storeData(d topicsPartitionsData) { t.v.Store(d) }
func (t *topicsPartitions) storeTopics(topics []string) { t.v.Store(t.ensureTopics(topics)) }
func (t *topicsPartitions) clone() topicsPartitionsData {
current := t.load()
clone := make(map[string]*topicPartitions, len(current))
for k, v := range current {
clone[k] = v
}
return clone
}
// Ensures that the topics exist in the returned map, but does not store the
// update. This can be used to update the data and store later, rather than
// storing immediately.
func (t *topicsPartitions) ensureTopics(topics []string) topicsPartitionsData {
var cloned bool
current := t.load()
for _, topic := range topics {
if _, exists := current[topic]; !exists {
if !cloned {
current = t.clone()
cloned = true
}
current[topic] = newTopicPartitions()
}
}
return current
}
// Updates the topic partitions data atomic value.
//
// If this is the first time seeing partitions, we do processing of unknown
// partitions that may be buffered for producing.
func (cl *Client) storePartitionsUpdate(topic string, l *topicPartitions, lv *topicPartitionsData, hadPartitions bool) {
// If the topic already had partitions, then there would be no
// unknown topic waiting and we do not need to notify anything.
if hadPartitions |
p := &cl.producer
p.unknownTopicsMu.Lock()
defer p.unknownTopicsMu.Unlock()
// If the topic did not have partitions, then we need to store the
// partition update BEFORE unlocking the mutex to guard against this
// sequence of events:
//
// - unlock waiters
// - delete waiter
// - new produce recreates waiter
// - we store update
// - we never notify the recreated waiter
//
// By storing before releasing the locks, we ensure that later
// partition loads for this topic under the mu will see our update.
defer l.v.Store(lv)
// If there are no unknown topics or this topic is not unknown, then we
// have nothing to do.
if len(p.unknownTopics) == 0 {
return
}
unknown, exists := p.unknownTopics[topic]
if !exists {
return
}
// If we loaded no partitions because of a retriable error, we signal
// the waiting goroutine that a try happened. It is possible the
// goroutine is quitting and will not be draining unknownWait, so we do
// not require the send.
if len(lv.partitions) == 0 && kerr.IsRetriable(lv.loadErr) {
select {
case unknown.wait <- lv.loadErr:
default:
}
return
}
// Either we have a fatal error or we can successfully partition.
//
// Even with a fatal error, if we loaded any partitions, we partition.
// If we only had a fatal error, we can finish promises in a goroutine.
// If we are partitioning, we have to do it under the unknownMu to
// ensure prior buffered records are produced in order before we
// release the mu.
delete(p.unknownTopics, topic)
close(unknown.wait) // allow waiting goroutine to quit
if len(lv.partitions) == 0 {
cl.failUnknownTopicRecords(topic, unknown, lv.loadErr)
} else {
for _, pr := range unknown.buffered {
cl.doPartitionRecord(l, lv, pr)
}
}
}
// If a metadata request fails after retrying (internally retrying, so only a
// few times), or the metadata request does not return topics that we requested
// (which may also happen additionally consuming via regex), then we need to
// bump errors for topics that were previously loaded, and bump errors for
// topics awaiting load.
//
// This has two modes of operation:
//
// 1) if no topics were missing, then the metadata request failed outright,
// and we need to bump errors on all stored topics and unknown topics.
//
// 2) if topics were missing, then the metadata request was successful but
// had missing data, and we need to bump errors on only what was mising.
//
func (cl *Client) bumpMetadataFailForTopics(requested map[string]*topicPartitions, err error, missingTopics ...string) {
p := &cl.producer
// mode 1
if len(missingTopics) == 0 {
for _, topic := range requested {
for _, topicPartition := range topic.load().partitions {
topicPartition.records.bumpRepeatedLoadErr(err)
}
}
}
// mode 2
var missing map[string]bool
for _, failTopic := range missingTopics {
if missing == nil {
missing = make(map[string]bool, len(missingTopics))
}
missing[failTopic] = true
if topic, exists := requested[failTopic]; exists {
for _, topicPartition := range topic.load().partitions {
topicPartition.records.bumpRepeatedLoadErr(err)
}
}
}
p.unknownTopicsMu.Lock()
defer p.unknownTopicsMu.Unlock()
for topic, unknown := range p.unknownTopics {
// if nil, mode 1, else mode 2
if missing != nil && !missing[topic] {
continue
}
select {
case unknown.wait <- err:
default:
}
}
}
// topicPartitionsData is the data behind a topicPartitions' v.
//
// We keep this in an atomic because it is expected to be extremely read heavy,
// and if it were behind a lock, the lock would need to be held for a while.
type topicPartitionsData struct {
// NOTE if adding anything to this struct, be sure to fix meta merge.
loadErr error // could be auth, unknown, leader not avail, or creation err
isInternal bool
partitions []*topicPartition // partition num => partition
writablePartitions []*topicPartition // subset of above
}
// topicPartition contains all information from Kafka for a topic's partition,
// as well as what a client is producing to it or info about consuming from it.
type topicPartition struct {
// If we have a load error (leader/listener/replica not available), we
// keep the old topicPartition data and the new error.
loadErr error
// If we do not have a load error, we determine if the new
// topicPartition is the same or different from the old based on
// whether the data changed (leader or leader epoch, etc.).
topicPartitionData
// If we do not have a load error, we copy the records and cursor
// pointers from the old after updating any necessary fields in them
// (see migrate functions below).
//
// Only one of records or cursor is non-nil.
records *recBuf
cursor *cursor
}
// Contains stuff that changes on metadata update that we copy into a cursor or
// recBuf.
type topicPartitionData struct {
// Our leader; if metadata sees this change, the metadata update
// migrates the cursor to a different source with the session stopped,
// and the recBuf to a different sink under a tight mutex.
leader int32
// What we believe to be the epoch of the leader for this partition.
//
// For cursors, for KIP-320, if a broker receives a fetch request where
// the current leader epoch does not match the brokers, either the
// broker is behind and returns UnknownLeaderEpoch, or we are behind
// and the broker returns FencedLeaderEpoch. For the former, we back
// off and retry. For the latter, we update our metadata.
leaderEpoch int32
}
// migrateProductionTo is called on metadata update if a topic partition's sink
// has changed. This moves record production from one sink to the other; this
// must be done such that records produced during migration follow those
// already buffered.
func (old *topicPartition) migrateProductionTo(new *topicPartition) {
// First, remove our record buffer from the old sink.
old.records.sink.removeRecBuf(old.records)
// Before this next lock, record producing will buffer to the
// in-migration-progress records and may trigger draining to
// the old sink. That is fine, the old sink no longer consumes
// from these records. We just have wasted drain triggers.
old.records.mu.Lock() // guard setting sink and topic partition data
old.records.sink = new.records.sink
old.records.topicPartitionData = new.topicPartitionData
old | {
l.v.Store(lv)
return
} | conditional_block |
lib.rs | ::new(),
skip: Default::default(),
}
}
}
/// A pretty-printable value from Javascript.
pub struct Prettified {
/// The current value we're visiting.
value: JsValue,
/// We just use a JS array here to avoid relying on wasm-bindgen's unstable
/// ABI.
seen: WeakSet,
/// Properties we don't want serialized.
skip: Rc<HashSet<String>>,
}
impl Prettified {
/// Skip printing the property with `name` if it exists on any object
/// visited (transitively).
pub fn skip_property(&mut self, name: &str) -> &mut Self {
let mut with_name = HashSet::to_owned(&self.skip);
with_name.insert(name.to_owned());
self.skip = Rc::new(with_name);
self
}
fn child(&self, v: &JsValue) -> Self {
Self { seen: self.seen.clone(), skip: self.skip.clone(), value: v.as_ref().clone() }
}
// TODO get a serde_json::Value from this too
}
impl Debug for Prettified {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
// detect and break cycles before trying to figure out Object subclass
// keeps a single path here rather than separately in each branch below
let mut _reset = None;
if let Some(obj) = self.value.dyn_ref::<Object>() {
if self.seen.has(obj) {
return write!(f, "[Cycle]");
}
self.seen.add(obj);
_reset = Some(scopeguard::guard(obj.to_owned(), |obj| {
self.seen.delete(&obj);
}));
}
if self.value.is_null() {
write!(f, "null")
} else if self.value.is_undefined() {
write!(f, "undefined")
} else if self.value.dyn_ref::<Function>().is_some() {
JsFunction.fmt(f)
} else if self.value.dyn_ref::<Promise>().is_some() {
write!(f, "[Promise]")
} else if self.value.dyn_ref::<Document>().is_some() {
write!(f, "[Document]")
} else if self.value.dyn_ref::<Window>().is_some() {
write!(f, "[Window]")
} else if let Some(s) = self.value.dyn_ref::<JsString>() {
write!(f, "{:?}", s.as_string().unwrap())
} else if let Some(n) = self.value.as_f64() {
write!(f, "{}", n)
} else if let Some(b) = self.value.as_bool() {
write!(f, "{:?}", b)
} else if let Some(d) = self.value.dyn_ref::<Date>() {
write!(f, "{}", d.to_iso_string().as_string().unwrap())
} else if let Some(d) = self.value.dyn_ref::<Element>() {
let name = d.tag_name().to_ascii_lowercase();
let (mut class, mut id) = (d.class_name(), d.id());
if !class.is_empty() {
class.insert_str(0, " .");
}
if !id.is_empty() {
id.insert_str(0, " #");
}
write!(f, "<{}{}{}/>", name, id, class)
} else if let Some(e) = self.value.dyn_ref::<Error>() {
write!(f, "Error: {}", e.to_string().as_string().unwrap())
} else if let Some(r) = self.value.dyn_ref::<RegExp>() {
write!(f, "/{}/", r.to_string().as_string().unwrap())
} else if let Some(s) = self.value.dyn_ref::<Symbol>() {
write!(f, "{}", s.to_string().as_string().unwrap())
} else if let Some(a) = self.value.dyn_ref::<Array>() {
let mut f = f.debug_list();
for val in a.iter() {
f.entry(&self.child(&val));
}
f.finish()
} else if let Some(s) = self.value.dyn_ref::<Set>() {
let mut f = f.debug_set();
let entries = s.entries();
while let Ok(next) = entries.next() {
if next.done() {
break;
}
f.entry(&self.child(&next.value()));
}
f.finish()
} else if let Some(m) = self.value.dyn_ref::<Map>() {
let mut f = f.debug_map();
let keys = m.keys();
while let Ok(next) = keys.next() {
if next.done() {
break;
}
let key = next.value();
let value = m.get(&key);
f.entry(&self.child(&key), &self.child(&value));
}
f.finish()
} else if let Some(obj) = self.value.dyn_ref::<Object>() {
let mut proto = obj.clone();
let mut props_seen = HashSet::new();
let name = obj.constructor().name().as_string().unwrap();
let mut f = f.debug_struct(&name);
loop {
let mut functions = BTreeSet::new();
let mut props = BTreeMap::new();
for raw_key in Object::get_own_property_names(&proto).iter() {
let key = raw_key.as_string().expect("object keys are always strings");
if (key.starts_with("__") && key.ends_with("__"))
|| props_seen.contains(&key)
|| functions.contains(&key)
|| self.skip.contains(&key)
{
continue;
}
if let Ok(value) = Reflect::get(obj, &raw_key) {
props_seen.insert(key.clone());
if value.is_function() {
functions.insert(key);
} else {
props.insert(key, self.child(&value));
}
}
}
for (key, value) in props {
f.field(&key, &value);
}
for key in functions {
f.field(&key, &JsFunction);
}
proto = Object::get_prototype_of(proto.as_ref());
if proto.is_falsy() || proto.constructor().name().as_string().unwrap() == "Object" {
// we've reached the end of the prototype chain
break;
}
}
f.finish()
} else {
write!(f, "unknown ({:?})", &self.value)
}
}
}
impl Display for Prettified {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
write!(f, "{:#?}", self)
}
}
struct | ;
impl Debug for JsFunction {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
write!(f, "[Function]")
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures::channel::oneshot::channel;
use wasm_bindgen::closure::Closure;
use wasm_bindgen_test::{wasm_bindgen_test, wasm_bindgen_test_configure};
use web_sys::{Event, EventTarget};
wasm_bindgen_test_configure!(run_in_browser);
#[wasm_bindgen_test]
fn cycle_is_broken() {
let with_cycles = js_sys::Function::new_no_args(
r#"
let root = { child: { nested: [] } };
root.child.nested.push(root);
return root;
"#,
)
.call0(&JsValue::null())
.unwrap();
assert_eq!(
with_cycles.pretty().to_string(),
r#"Object {
child: Object {
nested: [
[Cycle],
],
},
}"#
);
}
#[wasm_bindgen_test]
fn repeated_siblings_are_not_cycles() {
let with_siblings = js_sys::Function::new_no_args(
r#"
let root = { child: { nested: [] } };
let repeated_child = { foo: "bar" };
root.child.nested.push(repeated_child);
root.child.nested.push(repeated_child);
return root;
"#,
)
.call0(&JsValue::null())
.unwrap();
assert_eq!(
with_siblings.pretty().to_string(),
r#"Object {
child: Object {
nested: [
Object {
foo: "bar",
},
Object {
foo: "bar",
},
],
},
}"#
);
}
#[wasm_bindgen_test]
async fn live_keyboard_event() {
// create an input element and bind it to the document
let window = web_sys::window().unwrap();
let document = window.document().unwrap();
let input = document.create_element("input").unwrap();
// input.set_attribute("type", "text").unwrap();
document.body().unwrap().append_child(input.as_ref()).unwrap();
// create & add an event listener that will send the event back the test
let (send, recv) = channel();
let callback = Closure::once_into_js(move |ev: Event| {
send.send(ev).unwrap();
});
let target: &EventTarget = input.as_ref();
let event_type = "keydown";
target.add_event_listener_with_callback(event_type, callback.dyn_ref().unwrap()).unwrap();
// create & dispatch an event to the input element
let sent_event = web_sys::KeyboardEvent::new_with_keyboard_event_init_dict(
event_type,
web_sys::KeyboardEventInit::new()
.char_code(b'F' as u3 | JsFunction | identifier_name |
lib.rs | where
T: AsRef<JsValue>,
{
fn pretty(&self) -> Prettified {
Prettified {
value: self.as_ref().to_owned(),
seen: WeakSet::new(),
skip: Default::default(),
}
}
}
/// A pretty-printable value from Javascript.
pub struct Prettified {
/// The current value we're visiting.
value: JsValue,
/// We just use a JS array here to avoid relying on wasm-bindgen's unstable
/// ABI.
seen: WeakSet,
/// Properties we don't want serialized.
skip: Rc<HashSet<String>>,
}
impl Prettified {
/// Skip printing the property with `name` if it exists on any object
/// visited (transitively).
pub fn skip_property(&mut self, name: &str) -> &mut Self {
let mut with_name = HashSet::to_owned(&self.skip);
with_name.insert(name.to_owned());
self.skip = Rc::new(with_name);
self
}
fn child(&self, v: &JsValue) -> Self {
Self { seen: self.seen.clone(), skip: self.skip.clone(), value: v.as_ref().clone() }
}
// TODO get a serde_json::Value from this too
}
impl Debug for Prettified {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
// detect and break cycles before trying to figure out Object subclass
// keeps a single path here rather than separately in each branch below
let mut _reset = None;
if let Some(obj) = self.value.dyn_ref::<Object>() {
if self.seen.has(obj) {
return write!(f, "[Cycle]");
}
self.seen.add(obj);
_reset = Some(scopeguard::guard(obj.to_owned(), |obj| {
self.seen.delete(&obj);
}));
}
if self.value.is_null() {
write!(f, "null")
} else if self.value.is_undefined() {
write!(f, "undefined")
} else if self.value.dyn_ref::<Function>().is_some() {
JsFunction.fmt(f)
} else if self.value.dyn_ref::<Promise>().is_some() {
write!(f, "[Promise]")
} else if self.value.dyn_ref::<Document>().is_some() {
write!(f, "[Document]")
} else if self.value.dyn_ref::<Window>().is_some() {
write!(f, "[Window]")
} else if let Some(s) = self.value.dyn_ref::<JsString>() {
write!(f, "{:?}", s.as_string().unwrap())
} else if let Some(n) = self.value.as_f64() {
write!(f, "{}", n)
} else if let Some(b) = self.value.as_bool() {
write!(f, "{:?}", b)
} else if let Some(d) = self.value.dyn_ref::<Date>() {
write!(f, "{}", d.to_iso_string().as_string().unwrap())
} else if let Some(d) = self.value.dyn_ref::<Element>() {
let name = d.tag_name().to_ascii_lowercase();
let (mut class, mut id) = (d.class_name(), d.id());
if !class.is_empty() {
class.insert_str(0, " .");
}
if !id.is_empty() {
id.insert_str(0, " #");
}
write!(f, "<{}{}{}/>", name, id, class)
} else if let Some(e) = self.value.dyn_ref::<Error>() {
write!(f, "Error: {}", e.to_string().as_string().unwrap())
} else if let Some(r) = self.value.dyn_ref::<RegExp>() {
write!(f, "/{}/", r.to_string().as_string().unwrap())
} else if let Some(s) = self.value.dyn_ref::<Symbol>() {
write!(f, "{}", s.to_string().as_string().unwrap())
} else if let Some(a) = self.value.dyn_ref::<Array>() {
let mut f = f.debug_list();
for val in a.iter() {
f.entry(&self.child(&val));
}
f.finish()
} else if let Some(s) = self.value.dyn_ref::<Set>() {
let mut f = f.debug_set();
let entries = s.entries();
while let Ok(next) = entries.next() {
if next.done() {
break;
}
f.entry(&self.child(&next.value()));
}
f.finish()
} else if let Some(m) = self.value.dyn_ref::<Map>() {
let mut f = f.debug_map();
let keys = m.keys();
while let Ok(next) = keys.next() {
if next.done() {
break;
}
let key = next.value();
let value = m.get(&key);
f.entry(&self.child(&key), &self.child(&value));
}
f.finish()
} else if let Some(obj) = self.value.dyn_ref::<Object>() {
let mut proto = obj.clone();
let mut props_seen = HashSet::new();
let name = obj.constructor().name().as_string().unwrap();
let mut f = f.debug_struct(&name);
loop {
let mut functions = BTreeSet::new();
let mut props = BTreeMap::new();
for raw_key in Object::get_own_property_names(&proto).iter() {
let key = raw_key.as_string().expect("object keys are always strings");
if (key.starts_with("__") && key.ends_with("__"))
|| props_seen.contains(&key)
|| functions.contains(&key)
|| self.skip.contains(&key)
{
continue;
}
if let Ok(value) = Reflect::get(obj, &raw_key) {
props_seen.insert(key.clone());
if value.is_function() {
functions.insert(key);
} else {
props.insert(key, self.child(&value));
}
}
}
for (key, value) in props {
f.field(&key, &value);
}
for key in functions {
f.field(&key, &JsFunction);
}
proto = Object::get_prototype_of(proto.as_ref());
if proto.is_falsy() || proto.constructor().name().as_string().unwrap() == "Object" {
// we've reached the end of the prototype chain
break;
}
}
f.finish()
} else {
write!(f, "unknown ({:?})", &self.value)
}
}
}
impl Display for Prettified {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
write!(f, "{:#?}", self)
}
}
struct JsFunction;
impl Debug for JsFunction {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
write!(f, "[Function]")
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures::channel::oneshot::channel;
use wasm_bindgen::closure::Closure;
use wasm_bindgen_test::{wasm_bindgen_test, wasm_bindgen_test_configure};
use web_sys::{Event, EventTarget};
wasm_bindgen_test_configure!(run_in_browser);
#[wasm_bindgen_test]
fn cycle_is_broken() {
let with_cycles = js_sys::Function::new_no_args(
r#"
let root = { child: { nested: [] } };
root.child.nested.push(root);
return root;
"#,
)
.call0(&JsValue::null())
.unwrap();
assert_eq!(
with_cycles.pretty().to_string(),
r#"Object {
child: Object {
nested: [
[Cycle],
],
},
}"#
);
}
#[wasm_bindgen_test]
fn repeated_siblings_are_not_cycles() {
let with_siblings = js_sys::Function::new_no_args(
r#"
let root = { child: { nested: [] } };
let repeated_child = { foo: "bar" };
root.child.nested.push(repeated_child);
root.child.nested.push(repeated_child);
return root;
"#,
)
.call0(&JsValue::null())
.unwrap();
assert_eq!(
with_siblings.pretty().to_string(),
r#"Object {
child: Object {
nested: [
Object {
foo: "bar",
},
Object {
foo: "bar",
},
],
},
}"#
);
}
#[wasm_bindgen_test]
async fn live_keyboard_event() {
// create an input element and bind it to the document
let window = web_sys::window().unwrap();
let document = window.document().unwrap();
let input = document.create_element("input").unwrap();
// input.set_attribute("type", "text").unwrap();
document.body().unwrap().append_child(input.as_ref()).unwrap();
// create & add an event listener that will send the event back the test
let (send, recv) = channel();
let callback = Closure::once_into_js(move |ev: Event| {
send.send(ev).unwrap();
});
let target: &EventTarget = input.as_ref();
let event_type = "keydown";
target.add_event_listener_with_callback(event_type, callback.dyn_ref().unwrap()).unwrap();
// create & dispatch an event | impl<T> Pretty for T | random_line_split |
|
lib.rs | ::new(),
skip: Default::default(),
}
}
}
/// A pretty-printable value from Javascript.
pub struct Prettified {
/// The current value we're visiting.
value: JsValue,
/// We just use a JS array here to avoid relying on wasm-bindgen's unstable
/// ABI.
seen: WeakSet,
/// Properties we don't want serialized.
skip: Rc<HashSet<String>>,
}
impl Prettified {
/// Skip printing the property with `name` if it exists on any object
/// visited (transitively).
pub fn skip_property(&mut self, name: &str) -> &mut Self {
let mut with_name = HashSet::to_owned(&self.skip);
with_name.insert(name.to_owned());
self.skip = Rc::new(with_name);
self
}
fn child(&self, v: &JsValue) -> Self {
Self { seen: self.seen.clone(), skip: self.skip.clone(), value: v.as_ref().clone() }
}
// TODO get a serde_json::Value from this too
}
impl Debug for Prettified {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
// detect and break cycles before trying to figure out Object subclass
// keeps a single path here rather than separately in each branch below
let mut _reset = None;
if let Some(obj) = self.value.dyn_ref::<Object>() {
if self.seen.has(obj) {
return write!(f, "[Cycle]");
}
self.seen.add(obj);
_reset = Some(scopeguard::guard(obj.to_owned(), |obj| {
self.seen.delete(&obj);
}));
}
if self.value.is_null() {
write!(f, "null")
} else if self.value.is_undefined() {
write!(f, "undefined")
} else if self.value.dyn_ref::<Function>().is_some() {
JsFunction.fmt(f)
} else if self.value.dyn_ref::<Promise>().is_some() {
write!(f, "[Promise]")
} else if self.value.dyn_ref::<Document>().is_some() {
write!(f, "[Document]")
} else if self.value.dyn_ref::<Window>().is_some() {
write!(f, "[Window]")
} else if let Some(s) = self.value.dyn_ref::<JsString>() {
write!(f, "{:?}", s.as_string().unwrap())
} else if let Some(n) = self.value.as_f64() {
write!(f, "{}", n)
} else if let Some(b) = self.value.as_bool() {
write!(f, "{:?}", b)
} else if let Some(d) = self.value.dyn_ref::<Date>() {
write!(f, "{}", d.to_iso_string().as_string().unwrap())
} else if let Some(d) = self.value.dyn_ref::<Element>() {
let name = d.tag_name().to_ascii_lowercase();
let (mut class, mut id) = (d.class_name(), d.id());
if !class.is_empty() {
class.insert_str(0, " .");
}
if !id.is_empty() {
id.insert_str(0, " #");
}
write!(f, "<{}{}{}/>", name, id, class)
} else if let Some(e) = self.value.dyn_ref::<Error>() {
write!(f, "Error: {}", e.to_string().as_string().unwrap())
} else if let Some(r) = self.value.dyn_ref::<RegExp>() {
write!(f, "/{}/", r.to_string().as_string().unwrap())
} else if let Some(s) = self.value.dyn_ref::<Symbol>() {
write!(f, "{}", s.to_string().as_string().unwrap())
} else if let Some(a) = self.value.dyn_ref::<Array>() {
let mut f = f.debug_list();
for val in a.iter() {
f.entry(&self.child(&val));
}
f.finish()
} else if let Some(s) = self.value.dyn_ref::<Set>() {
let mut f = f.debug_set();
let entries = s.entries();
while let Ok(next) = entries.next() {
if next.done() {
break;
}
f.entry(&self.child(&next.value()));
}
f.finish()
} else if let Some(m) = self.value.dyn_ref::<Map>() {
let mut f = f.debug_map();
let keys = m.keys();
while let Ok(next) = keys.next() {
if next.done() {
break;
}
let key = next.value();
let value = m.get(&key);
f.entry(&self.child(&key), &self.child(&value));
}
f.finish()
} else if let Some(obj) = self.value.dyn_ref::<Object>() {
let mut proto = obj.clone();
let mut props_seen = HashSet::new();
let name = obj.constructor().name().as_string().unwrap();
let mut f = f.debug_struct(&name);
loop {
let mut functions = BTreeSet::new();
let mut props = BTreeMap::new();
for raw_key in Object::get_own_property_names(&proto).iter() {
let key = raw_key.as_string().expect("object keys are always strings");
if (key.starts_with("__") && key.ends_with("__"))
|| props_seen.contains(&key)
|| functions.contains(&key)
|| self.skip.contains(&key)
{
continue;
}
if let Ok(value) = Reflect::get(obj, &raw_key) {
props_seen.insert(key.clone());
if value.is_function() {
functions.insert(key);
} else {
props.insert(key, self.child(&value));
}
}
}
for (key, value) in props {
f.field(&key, &value);
}
for key in functions {
f.field(&key, &JsFunction);
}
proto = Object::get_prototype_of(proto.as_ref());
if proto.is_falsy() || proto.constructor().name().as_string().unwrap() == "Object" {
// we've reached the end of the prototype chain
break;
}
}
f.finish()
} else {
write!(f, "unknown ({:?})", &self.value)
}
}
}
impl Display for Prettified {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
write!(f, "{:#?}", self)
}
}
struct JsFunction;
impl Debug for JsFunction {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
write!(f, "[Function]")
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures::channel::oneshot::channel;
use wasm_bindgen::closure::Closure;
use wasm_bindgen_test::{wasm_bindgen_test, wasm_bindgen_test_configure};
use web_sys::{Event, EventTarget};
wasm_bindgen_test_configure!(run_in_browser);
#[wasm_bindgen_test]
fn cycle_is_broken() | );
}
#[wasm_bindgen_test]
fn repeated_siblings_are_not_cycles() {
let with_siblings = js_sys::Function::new_no_args(
r#"
let root = { child: { nested: [] } };
let repeated_child = { foo: "bar" };
root.child.nested.push(repeated_child);
root.child.nested.push(repeated_child);
return root;
"#,
)
.call0(&JsValue::null())
.unwrap();
assert_eq!(
with_siblings.pretty().to_string(),
r#"Object {
child: Object {
nested: [
Object {
foo: "bar",
},
Object {
foo: "bar",
},
],
},
}"#
);
}
#[wasm_bindgen_test]
async fn live_keyboard_event() {
// create an input element and bind it to the document
let window = web_sys::window().unwrap();
let document = window.document().unwrap();
let input = document.create_element("input").unwrap();
// input.set_attribute("type", "text").unwrap();
document.body().unwrap().append_child(input.as_ref()).unwrap();
// create & add an event listener that will send the event back the test
let (send, recv) = channel();
let callback = Closure::once_into_js(move |ev: Event| {
send.send(ev).unwrap();
});
let target: &EventTarget = input.as_ref();
let event_type = "keydown";
target.add_event_listener_with_callback(event_type, callback.dyn_ref().unwrap()).unwrap();
// create & dispatch an event to the input element
let sent_event = web_sys::KeyboardEvent::new_with_keyboard_event_init_dict(
event_type,
web_sys::KeyboardEventInit::new()
.char_code(b'F' as u3 | {
let with_cycles = js_sys::Function::new_no_args(
r#"
let root = { child: { nested: [] } };
root.child.nested.push(root);
return root;
"#,
)
.call0(&JsValue::null())
.unwrap();
assert_eq!(
with_cycles.pretty().to_string(),
r#"Object {
child: Object {
nested: [
[Cycle],
],
},
}"# | identifier_body |
readfile.py | map.josm.data.Preferences as Preferences;
import org.openstreetmap.josm.data.osm.OsmPrimitive as OsmPrimitive;
import org.openstreetmap.josm.data.projection.Projection;
import org.openstreetmap.josm.Main as Main;
import org.openstreetmap.josm.gui.layer.OsmDataLayer;
import org.openstreetmap.josm.gui.progress.NullProgressMonitor as NullProgressMonitor;
import org.openstreetmap.josm.gui.progress.ProgressMonitor;
import org.openstreetmap.josm.io.IllegalDataException;
import org.openstreetmap.josm.io.OsmImporter as OsmImporter;
import org.openstreetmap.josm.io.OsmImporter.OsmImporterData;
import org.openstreetmap.josm.gui.preferences.projection.ProjectionChoice;
import org.openstreetmap.josm.gui.preferences.projection.ProjectionPreference as ProjectionPreference;
from java.awt.event import KeyEvent
from javax.swing import ImageIcon
from javax.swing import JMenu
from javax.swing import JMenuBar
from javax.swing import JMenuItem
from java.awt.event import MouseListener
from java.awt.event import KeyListener
class ObjectTableModel(AbstractTableModel):
__columns__ = ()
def __init__(self, delegate, columns):
AbstractTableModel.__init__(self)
self.__columns__ = columns
self.delegate= delegate
self._getters = [None] * len(self.__columns__)
for index, column in enumerate(self.__columns__):
self.__columns__[index] = self._validateColumn(column, index)
def _fireItemsChanged(self, start, end):
self.fireTableRowsUpdated(start, end)
def _fireItemsAdded(self, start, end):
self.fireTableRowsInserted(start, end)
def _fireItemsRemoved(self, start, end):
self.fireTableRowsDeleted(start, end)
def setDelegate(self, value):
self._delegate = value
self.fireTableDataChanged()
def getColumnCount(self):
return len(self.__columns__)
def getRowCount(self):
n= len(self.delegate)
# print "row count %d " % n
return n
def getColumnClass(self, columnIndex):
return basestring
# return self.__columns__[columnIndex][1]
def getColumnName(self, columnIndex):
return self.__columns__[columnIndex][0]
def setValueAt(self, aValue, rowIndex, columnIndex):
self[rowIndex][columnIndex] = aValue
def refresh(self):
if len(self) > 0:
self.fireTableRowsUpdated(0, len(self) - 1)
def _validateColumn(self, column, index):
#column = DelegateTableModel._validateColumn(self, column, index)
self._getters[index] = lambda row: row.get(column[2])
return column
def getValueAt(self, rowIndex, columnIndex):
print "getValueAt " + str(rowIndex) + ":"+ str(columnIndex)
#line = self.delegate[rowIndex]
return self.delegate[rowIndex].get(self.__columns__[columnIndex][1])
#return self._getters[columnIndex](line)
def setValueAt(self, aValue, rowIndex, columnIndex):
attrname = self.__columns__[columnIndex][2]
setattr(self[rowIndex], attrname, aValue)
self.fireTableCellUpdated(rowIndex, columnIndex)
def getObjectIndex(self, obj):
for i, row in enumerate(self):
if row == obj:
return i
return - 1
def getSelectedObject(self, table):
assert table.model is self
if table.selectedRow >= 0:
modelRow = table.convertRowIndexToModel(table.selectedRow)
return self[modelRow]
def getSelectedObjects(self, table):
assert table.model is self
selected = []
for viewRow in table.selectedRows:
modelRow = table.convertRowIndexToModel(viewRow)
selected.append(self[modelRow])
return selected
def getVisibleObjects(self, table):
assert table.model is self
visible = []
for viewRow in xrange(table.rowCount):
modelRow = table.convertRowIndexToModel(viewRow)
visible.append(self[modelRow])
return visible
#def EventListener():
class MyListener (MouseListener,KeyListener ) :
def __init__(self, table):
self.table=table
# def mouseReleased(self, e):
# print("Mouse released; # of clicks: " + str(e.getClickCount()))
# print("button: " + str(e.getButton()))
def keyPressed( self,e) :
print("key pressed; " + str(e.getKeyChar()))
# print self.table
# r = self.table.getSelectedRow ()
# print r
rs= self.table.getSelectedRows()
if (e.getKeyChar() == 'l') :
print("lookup; ")
for r in rs :
print r
obj=self.table.getValueAt(r,0)
print obj
#d = LookupDialog()
# def mouseReleased(self,e) :
# print("Mouse released; # of clicks: " + str(e.getClickCount()))
def mouseClicked(self, e):
# print("Mouse clicked; # of clicks: " + str(e.getClickCount()))
# print("button: " + str(e.getButton()))
if (e.getButton() ==3) :
print "Mouse3 clicked; # of clicks: " + str(e.getClickCount())
else :
if (e.getButton() ==2):
print "Mouse2 clicked; # of clicks: " + str(e.getClickCount())
else:
if (e.getButton() ==1):
print "Mouse1 clicked; # of clicks: " + str(e.getClickCount())
class MyFrame (JFrame ) :
def __init__(self,name):
super(MyFrame, self).__init__(name)
def LookupEvent(self, event) :
print self
print event
def DisplayTable (collection):
columns=list(
(
("Street","addr:street"),
("Num","addr:housenumber")
)
)
tm= ObjectTableModel(collection,columns)
frame = MyFrame("street")
frame.setSize(800, 1200)
frame.setLayout(BorderLayout())
table = JTable(tm)
table.setAutoResizeMode(JTable.AUTO_RESIZE_ALL_COLUMNS)
header = table.getTableHeader()
header.setUpdateTableInRealTime(True)
header.setReorderingAllowed(True);
scrollPane = JScrollPane()
scrollPane.getViewport().setView((table))
frame.add(scrollPane)
frame.pack();
frame.setSize(frame.getPreferredSize());
frame.show()
def DisplayStreetTable (collection):
columns=list(
(
("Name","name"),
)
)
tm= ObjectTableModel(collection,columns)
frame = MyFrame("Street Table")
frame.setSize(800, 1200)
frame.setLayout(BorderLayout())
table = JTable(tm)
table.setAutoResizeMode(JTable.AUTO_RESIZE_ALL_COLUMNS)
header = table.getTableHeader()
header.setUpdateTableInRealTime(True)
header.setReorderingAllowed(True);
scrollPane = JScrollPane()
scrollPane.getViewport().setView((table))
# copyButton = JButton('Merge') #,actionPerformed=self.noAction
# frame.add(copyButton)
listener=MyListener(table)
table.addMouseListener(listener)
table.addKeyListener(listener)
menubar = JMenuBar()
file = JMenu("Edit")
file.setMnemonic(KeyEvent.VK_E)
lookup = JMenuItem("Lookup",actionPerformed=frame.LookupEvent)
lookup.setMnemonic(KeyEvent.VK_L)
file.add(lookup)
menubar.add(file)
frame.setJMenuBar(menubar)
frame.add(scrollPane)
frame.pack();
frame.setSize(frame.getPreferredSize());
frame.show()
def isBuilding(p):
v = p.get("building");
if v is not None and v != "no" and v != "entrance":
return True
else:
return False
class BuildingInBuilding :
BUILDING_INSIDE_BUILDING = 2001;
def __init__ (self):
self.primitivesToCheck = LinkedList();
self.index = QuadBuckets();
print 'building in building'
#super(tr("Building inside building"), tr("Checks for building areas inside of buildings."));
def visitn(self,n) :
# print "visitn:"
# print n
if (n.isUsable() and isBuilding(n)) :
if not self.primitivesToCheck.contains(n):
# print "adding :" n
self.primitivesToCheck.add(n);
else:
print "duplicate p :"
# print n
def visitw(self,w) :
|
def isInPolygon(n, polygon) :
return Geometry.nodeInsidePolygon(n, polygon);
def sameLayers( w1, w2) :
if w1.get("layer") is not None :
l1 = w1.get("layer")
else :
l1 = "0";
if w2.get("layer") is not None :
l2 = w2.get("layer")
else :
l2 ="0";
return l1.equals(l2);
def evaluate | print "visitw:"
# print w
if (w.isUsable() and w.isClosed() and isBuilding(w)) :
self.primitivesToCheck.add(w)
self.index.add(w)
print "added" | identifier_body |
readfile.py | map.josm.data.Preferences as Preferences;
import org.openstreetmap.josm.data.osm.OsmPrimitive as OsmPrimitive;
import org.openstreetmap.josm.data.projection.Projection;
import org.openstreetmap.josm.Main as Main;
import org.openstreetmap.josm.gui.layer.OsmDataLayer;
import org.openstreetmap.josm.gui.progress.NullProgressMonitor as NullProgressMonitor;
import org.openstreetmap.josm.gui.progress.ProgressMonitor;
import org.openstreetmap.josm.io.IllegalDataException;
import org.openstreetmap.josm.io.OsmImporter as OsmImporter;
import org.openstreetmap.josm.io.OsmImporter.OsmImporterData;
import org.openstreetmap.josm.gui.preferences.projection.ProjectionChoice;
import org.openstreetmap.josm.gui.preferences.projection.ProjectionPreference as ProjectionPreference;
from java.awt.event import KeyEvent
from javax.swing import ImageIcon
from javax.swing import JMenu
from javax.swing import JMenuBar
from javax.swing import JMenuItem
from java.awt.event import MouseListener
from java.awt.event import KeyListener
class ObjectTableModel(AbstractTableModel):
__columns__ = ()
def __init__(self, delegate, columns):
AbstractTableModel.__init__(self)
self.__columns__ = columns
self.delegate= delegate
self._getters = [None] * len(self.__columns__)
for index, column in enumerate(self.__columns__):
self.__columns__[index] = self._validateColumn(column, index)
def _fireItemsChanged(self, start, end):
self.fireTableRowsUpdated(start, end)
def _fireItemsAdded(self, start, end):
self.fireTableRowsInserted(start, end)
def _fireItemsRemoved(self, start, end):
self.fireTableRowsDeleted(start, end)
def setDelegate(self, value):
self._delegate = value
self.fireTableDataChanged()
def getColumnCount(self):
return len(self.__columns__)
def getRowCount(self):
n= len(self.delegate)
# print "row count %d " % n
return n
def getColumnClass(self, columnIndex):
return basestring
# return self.__columns__[columnIndex][1]
def getColumnName(self, columnIndex):
return self.__columns__[columnIndex][0]
def setValueAt(self, aValue, rowIndex, columnIndex):
self[rowIndex][columnIndex] = aValue
def refresh(self):
if len(self) > 0:
self.fireTableRowsUpdated(0, len(self) - 1)
def _validateColumn(self, column, index):
#column = DelegateTableModel._validateColumn(self, column, index)
self._getters[index] = lambda row: row.get(column[2])
return column
def getValueAt(self, rowIndex, columnIndex):
print "getValueAt " + str(rowIndex) + ":"+ str(columnIndex)
#line = self.delegate[rowIndex]
return self.delegate[rowIndex].get(self.__columns__[columnIndex][1])
#return self._getters[columnIndex](line)
def setValueAt(self, aValue, rowIndex, columnIndex):
attrname = self.__columns__[columnIndex][2]
setattr(self[rowIndex], attrname, aValue)
self.fireTableCellUpdated(rowIndex, columnIndex)
def getObjectIndex(self, obj):
for i, row in enumerate(self):
if row == obj:
return i
return - 1
def getSelectedObject(self, table):
assert table.model is self
if table.selectedRow >= 0:
modelRow = table.convertRowIndexToModel(table.selectedRow)
return self[modelRow]
def getSelectedObjects(self, table):
assert table.model is self
selected = []
for viewRow in table.selectedRows:
modelRow = table.convertRowIndexToModel(viewRow)
selected.append(self[modelRow])
return selected
def getVisibleObjects(self, table):
assert table.model is self
visible = []
for viewRow in xrange(table.rowCount):
modelRow = table.convertRowIndexToModel(viewRow)
visible.append(self[modelRow])
return visible
#def EventListener():
class MyListener (MouseListener,KeyListener ) :
def __init__(self, table):
self.table=table
# def mouseReleased(self, e):
# print("Mouse released; # of clicks: " + str(e.getClickCount()))
# print("button: " + str(e.getButton()))
def keyPressed( self,e) :
print("key pressed; " + str(e.getKeyChar()))
# print self.table
# r = self.table.getSelectedRow ()
# print r
rs= self.table.getSelectedRows()
if (e.getKeyChar() == 'l') :
print("lookup; ")
for r in rs :
print r
obj=self.table.getValueAt(r,0)
print obj
#d = LookupDialog()
# def mouseReleased(self,e) :
# print("Mouse released; # of clicks: " + str(e.getClickCount()))
def mouseClicked(self, e):
# print("Mouse clicked; # of clicks: " + str(e.getClickCount()))
# print("button: " + str(e.getButton()))
if (e.getButton() ==3) :
print "Mouse3 clicked; # of clicks: " + str(e.getClickCount())
else :
if (e.getButton() ==2):
print "Mouse2 clicked; # of clicks: " + str(e.getClickCount())
else:
if (e.getButton() ==1):
print "Mouse1 clicked; # of clicks: " + str(e.getClickCount())
class MyFrame (JFrame ) :
def __init__(self,name):
super(MyFrame, self).__init__(name)
def LookupEvent(self, event) :
print self
print event
def DisplayTable (collection):
columns=list(
(
("Street","addr:street"),
("Num","addr:housenumber")
)
)
tm= ObjectTableModel(collection,columns)
frame = MyFrame("street")
frame.setSize(800, 1200)
frame.setLayout(BorderLayout())
table = JTable(tm)
table.setAutoResizeMode(JTable.AUTO_RESIZE_ALL_COLUMNS)
header = table.getTableHeader()
header.setUpdateTableInRealTime(True)
header.setReorderingAllowed(True);
scrollPane = JScrollPane()
scrollPane.getViewport().setView((table))
frame.add(scrollPane)
frame.pack();
frame.setSize(frame.getPreferredSize());
frame.show()
def DisplayStreetTable (collection):
columns=list(
(
("Name","name"),
)
)
tm= ObjectTableModel(collection,columns)
frame = MyFrame("Street Table")
frame.setSize(800, 1200)
frame.setLayout(BorderLayout())
table = JTable(tm)
table.setAutoResizeMode(JTable.AUTO_RESIZE_ALL_COLUMNS)
header = table.getTableHeader()
header.setUpdateTableInRealTime(True)
header.setReorderingAllowed(True);
scrollPane = JScrollPane()
scrollPane.getViewport().setView((table))
# copyButton = JButton('Merge') #,actionPerformed=self.noAction
# frame.add(copyButton)
listener=MyListener(table)
table.addMouseListener(listener)
table.addKeyListener(listener)
menubar = JMenuBar()
file = JMenu("Edit")
file.setMnemonic(KeyEvent.VK_E)
lookup = JMenuItem("Lookup",actionPerformed=frame.LookupEvent)
lookup.setMnemonic(KeyEvent.VK_L)
file.add(lookup)
menubar.add(file)
frame.setJMenuBar(menubar)
frame.add(scrollPane)
frame.pack();
frame.setSize(frame.getPreferredSize());
frame.show()
def isBuilding(p):
v = p.get("building");
if v is not None and v != "no" and v != "entrance":
return True
else:
return False
class BuildingInBuilding :
BUILDING_INSIDE_BUILDING = 2001;
def __init__ (self):
self.primitivesToCheck = LinkedList();
self.index = QuadBuckets();
print 'building in building'
#super(tr("Building inside building"), tr("Checks for building areas inside of buildings."));
def visitn(self,n) :
# print "visitn:"
# print n
if (n.isUsable() and isBuilding(n)) :
if not self.primitivesToCheck.contains(n):
# print "adding :" n
self.primitivesToCheck.add(n);
else:
print "duplicate p :"
# print n
def visitw(self,w) :
print "visitw:"
# print w
if (w.isUsable() and w.isClosed() and isBuilding(w)) :
self.primitivesToCheck.add(w)
self.index.add(w)
print "added"
def isInPolygon(n, polygon) :
return Geometry.nodeInsidePolygon(n, polygon);
def | ( w1, w2) :
if w1.get("layer") is not None :
l1 = w1.get("layer")
else :
l1 = "0";
if w2.get("layer") is not None :
l2 = w2.get("layer")
else :
l2 ="0";
return l1.equals(l2);
def evaluate | sameLayers | identifier_name |
readfile.py | ][columnIndex] = aValue
def refresh(self):
if len(self) > 0:
self.fireTableRowsUpdated(0, len(self) - 1)
def _validateColumn(self, column, index):
#column = DelegateTableModel._validateColumn(self, column, index)
self._getters[index] = lambda row: row.get(column[2])
return column
def getValueAt(self, rowIndex, columnIndex):
print "getValueAt " + str(rowIndex) + ":"+ str(columnIndex)
#line = self.delegate[rowIndex]
return self.delegate[rowIndex].get(self.__columns__[columnIndex][1])
#return self._getters[columnIndex](line)
def setValueAt(self, aValue, rowIndex, columnIndex):
attrname = self.__columns__[columnIndex][2]
setattr(self[rowIndex], attrname, aValue)
self.fireTableCellUpdated(rowIndex, columnIndex)
def getObjectIndex(self, obj):
for i, row in enumerate(self):
if row == obj:
return i
return - 1
def getSelectedObject(self, table):
assert table.model is self
if table.selectedRow >= 0:
modelRow = table.convertRowIndexToModel(table.selectedRow)
return self[modelRow]
def getSelectedObjects(self, table):
assert table.model is self
selected = []
for viewRow in table.selectedRows:
modelRow = table.convertRowIndexToModel(viewRow)
selected.append(self[modelRow])
return selected
def getVisibleObjects(self, table):
assert table.model is self
visible = []
for viewRow in xrange(table.rowCount):
modelRow = table.convertRowIndexToModel(viewRow)
visible.append(self[modelRow])
return visible
#def EventListener():
class MyListener (MouseListener,KeyListener ) :
def __init__(self, table):
self.table=table
# def mouseReleased(self, e):
# print("Mouse released; # of clicks: " + str(e.getClickCount()))
# print("button: " + str(e.getButton()))
def keyPressed( self,e) :
print("key pressed; " + str(e.getKeyChar()))
# print self.table
# r = self.table.getSelectedRow ()
# print r
rs= self.table.getSelectedRows()
if (e.getKeyChar() == 'l') :
print("lookup; ")
for r in rs :
print r
obj=self.table.getValueAt(r,0)
print obj
#d = LookupDialog()
# def mouseReleased(self,e) :
# print("Mouse released; # of clicks: " + str(e.getClickCount()))
def mouseClicked(self, e):
# print("Mouse clicked; # of clicks: " + str(e.getClickCount()))
# print("button: " + str(e.getButton()))
if (e.getButton() ==3) :
print "Mouse3 clicked; # of clicks: " + str(e.getClickCount())
else :
if (e.getButton() ==2):
print "Mouse2 clicked; # of clicks: " + str(e.getClickCount())
else:
if (e.getButton() ==1):
print "Mouse1 clicked; # of clicks: " + str(e.getClickCount())
class MyFrame (JFrame ) :
def __init__(self,name):
super(MyFrame, self).__init__(name)
def LookupEvent(self, event) :
print self
print event
def DisplayTable (collection):
columns=list(
(
("Street","addr:street"),
("Num","addr:housenumber")
)
)
tm= ObjectTableModel(collection,columns)
frame = MyFrame("street")
frame.setSize(800, 1200)
frame.setLayout(BorderLayout())
table = JTable(tm)
table.setAutoResizeMode(JTable.AUTO_RESIZE_ALL_COLUMNS)
header = table.getTableHeader()
header.setUpdateTableInRealTime(True)
header.setReorderingAllowed(True);
scrollPane = JScrollPane()
scrollPane.getViewport().setView((table))
frame.add(scrollPane)
frame.pack();
frame.setSize(frame.getPreferredSize());
frame.show()
def DisplayStreetTable (collection):
columns=list(
(
("Name","name"),
)
)
tm= ObjectTableModel(collection,columns)
frame = MyFrame("Street Table")
frame.setSize(800, 1200)
frame.setLayout(BorderLayout())
table = JTable(tm)
table.setAutoResizeMode(JTable.AUTO_RESIZE_ALL_COLUMNS)
header = table.getTableHeader()
header.setUpdateTableInRealTime(True)
header.setReorderingAllowed(True);
scrollPane = JScrollPane()
scrollPane.getViewport().setView((table))
# copyButton = JButton('Merge') #,actionPerformed=self.noAction
# frame.add(copyButton)
listener=MyListener(table)
table.addMouseListener(listener)
table.addKeyListener(listener)
menubar = JMenuBar()
file = JMenu("Edit")
file.setMnemonic(KeyEvent.VK_E)
lookup = JMenuItem("Lookup",actionPerformed=frame.LookupEvent)
lookup.setMnemonic(KeyEvent.VK_L)
file.add(lookup)
menubar.add(file)
frame.setJMenuBar(menubar)
frame.add(scrollPane)
frame.pack();
frame.setSize(frame.getPreferredSize());
frame.show()
def isBuilding(p):
v = p.get("building");
if v is not None and v != "no" and v != "entrance":
return True
else:
return False
class BuildingInBuilding :
BUILDING_INSIDE_BUILDING = 2001;
def __init__ (self):
self.primitivesToCheck = LinkedList();
self.index = QuadBuckets();
print 'building in building'
#super(tr("Building inside building"), tr("Checks for building areas inside of buildings."));
def visitn(self,n) :
# print "visitn:"
# print n
if (n.isUsable() and isBuilding(n)) :
if not self.primitivesToCheck.contains(n):
# print "adding :" n
self.primitivesToCheck.add(n);
else:
print "duplicate p :"
# print n
def visitw(self,w) :
print "visitw:"
# print w
if (w.isUsable() and w.isClosed() and isBuilding(w)) :
self.primitivesToCheck.add(w)
self.index.add(w)
print "added"
def isInPolygon(n, polygon) :
return Geometry.nodeInsidePolygon(n, polygon);
def sameLayers( w1, w2) :
if w1.get("layer") is not None :
l1 = w1.get("layer")
else :
l1 = "0";
if w2.get("layer") is not None :
l2 = w2.get("layer")
else :
l2 ="0";
return l1.equals(l2);
def evaluateNode(self,p,obj):
print "te"
# print p
# print obj
def endTest2(self):
for p in self.primitivesToCheck :
collection = self.index.search(p.getBBox())
for object in collection:
if (not p.equals(object)):
if (isinstance(p,Node)):
self.evaluateNode(p, object)
else :
print p
# else if (p instanceof Way)
# return evaluateWay((Way) p, object);
# else if (p instanceof Relation)
# return evaluateRelation((Relation) p, object);
# return false;
def endTest(self) :
print "end"
# bbox = BBox(-180,-90,180,90)
bbox = BBox(-1000,-900,1800,900)
print self.index
collection = self.index.search(bbox)
# print collection
def projection() :
print "projection"
pc = ProjectionPreference.mercator
id = pc.getId()
pref = None
Main.pref.putCollection("projection.sub."+id, pref)
pc.setPreferences(pref)
proj = pc.getProjection()
Main.setProjection(proj)
def prefs() :
print "prefs"
Main.pref = Preferences()
Main.pref.put("tags.reversed_direction", "false")
class JythonWay():
def __init__(self,x):
self.way=x
self.subobjects=[]
pass
# todo:
def lookup(self) :
# lookup this street name on the internet
print "ToDO"
# todo:
def merge(self) :
# merge these two streets, fix the names
print "ToDO fix the streets"
def get (self,k):
return self.way.get(k)
def name(self):
return self.way.get('name')
def addsubobject(self, other):
return self.subobjects.append(other)
import re
pattern = re.compile(r'\s+')
def streetlist(objs) :
objs2 = []
streets = {}
for p in objs:
if (not isinstance(p,Way)):
continue
s=p.get('name')
hw=p.get('highway')
if (s is None):
continue
if (hw is None):
| continue | conditional_block |
|
readfile.py | .osm.OsmPrimitive as OsmPrimitive;
import org.openstreetmap.josm.data.projection.Projection;
import org.openstreetmap.josm.Main as Main;
import org.openstreetmap.josm.gui.layer.OsmDataLayer;
import org.openstreetmap.josm.gui.progress.NullProgressMonitor as NullProgressMonitor;
import org.openstreetmap.josm.gui.progress.ProgressMonitor;
import org.openstreetmap.josm.io.IllegalDataException;
import org.openstreetmap.josm.io.OsmImporter as OsmImporter;
import org.openstreetmap.josm.io.OsmImporter.OsmImporterData;
import org.openstreetmap.josm.gui.preferences.projection.ProjectionChoice;
import org.openstreetmap.josm.gui.preferences.projection.ProjectionPreference as ProjectionPreference;
from java.awt.event import KeyEvent
from javax.swing import ImageIcon
from javax.swing import JMenu
from javax.swing import JMenuBar
from javax.swing import JMenuItem
from java.awt.event import MouseListener
from java.awt.event import KeyListener
class ObjectTableModel(AbstractTableModel):
__columns__ = ()
def __init__(self, delegate, columns):
AbstractTableModel.__init__(self)
self.__columns__ = columns
self.delegate= delegate
self._getters = [None] * len(self.__columns__)
for index, column in enumerate(self.__columns__):
self.__columns__[index] = self._validateColumn(column, index)
def _fireItemsChanged(self, start, end):
self.fireTableRowsUpdated(start, end)
def _fireItemsAdded(self, start, end):
self.fireTableRowsInserted(start, end)
def _fireItemsRemoved(self, start, end):
self.fireTableRowsDeleted(start, end)
def setDelegate(self, value):
self._delegate = value
self.fireTableDataChanged()
def getColumnCount(self):
return len(self.__columns__)
def getRowCount(self):
n= len(self.delegate)
# print "row count %d " % n
return n
def getColumnClass(self, columnIndex):
return basestring
# return self.__columns__[columnIndex][1]
def getColumnName(self, columnIndex):
return self.__columns__[columnIndex][0]
def setValueAt(self, aValue, rowIndex, columnIndex):
self[rowIndex][columnIndex] = aValue
def refresh(self):
if len(self) > 0:
self.fireTableRowsUpdated(0, len(self) - 1)
def _validateColumn(self, column, index):
#column = DelegateTableModel._validateColumn(self, column, index)
self._getters[index] = lambda row: row.get(column[2])
return column
def getValueAt(self, rowIndex, columnIndex):
print "getValueAt " + str(rowIndex) + ":"+ str(columnIndex)
#line = self.delegate[rowIndex]
return self.delegate[rowIndex].get(self.__columns__[columnIndex][1])
#return self._getters[columnIndex](line)
def setValueAt(self, aValue, rowIndex, columnIndex):
attrname = self.__columns__[columnIndex][2]
setattr(self[rowIndex], attrname, aValue)
self.fireTableCellUpdated(rowIndex, columnIndex)
def getObjectIndex(self, obj):
for i, row in enumerate(self):
if row == obj:
return i
return - 1
def getSelectedObject(self, table):
assert table.model is self
if table.selectedRow >= 0:
modelRow = table.convertRowIndexToModel(table.selectedRow)
return self[modelRow]
def getSelectedObjects(self, table):
assert table.model is self
selected = []
for viewRow in table.selectedRows:
modelRow = table.convertRowIndexToModel(viewRow)
selected.append(self[modelRow])
return selected
def getVisibleObjects(self, table):
assert table.model is self
visible = []
for viewRow in xrange(table.rowCount):
modelRow = table.convertRowIndexToModel(viewRow)
visible.append(self[modelRow])
return visible
#def EventListener():
class MyListener (MouseListener,KeyListener ) :
def __init__(self, table):
self.table=table
# def mouseReleased(self, e):
# print("Mouse released; # of clicks: " + str(e.getClickCount()))
# print("button: " + str(e.getButton()))
def keyPressed( self,e) :
print("key pressed; " + str(e.getKeyChar()))
# print self.table
# r = self.table.getSelectedRow ()
# print r
rs= self.table.getSelectedRows()
if (e.getKeyChar() == 'l') :
print("lookup; ")
for r in rs :
print r
obj=self.table.getValueAt(r,0)
print obj
#d = LookupDialog()
# def mouseReleased(self,e) :
# print("Mouse released; # of clicks: " + str(e.getClickCount()))
def mouseClicked(self, e):
# print("Mouse clicked; # of clicks: " + str(e.getClickCount()))
# print("button: " + str(e.getButton()))
if (e.getButton() ==3) :
print "Mouse3 clicked; # of clicks: " + str(e.getClickCount())
else :
if (e.getButton() ==2):
print "Mouse2 clicked; # of clicks: " + str(e.getClickCount())
else:
if (e.getButton() ==1):
print "Mouse1 clicked; # of clicks: " + str(e.getClickCount())
class MyFrame (JFrame ) :
def __init__(self,name):
super(MyFrame, self).__init__(name)
def LookupEvent(self, event) :
print self
print event
def DisplayTable (collection):
columns=list(
(
("Street","addr:street"),
("Num","addr:housenumber")
)
)
tm= ObjectTableModel(collection,columns)
frame = MyFrame("street")
frame.setSize(800, 1200)
frame.setLayout(BorderLayout())
table = JTable(tm)
table.setAutoResizeMode(JTable.AUTO_RESIZE_ALL_COLUMNS)
header = table.getTableHeader()
header.setUpdateTableInRealTime(True)
header.setReorderingAllowed(True);
scrollPane = JScrollPane()
scrollPane.getViewport().setView((table))
frame.add(scrollPane)
frame.pack();
frame.setSize(frame.getPreferredSize());
frame.show()
def DisplayStreetTable (collection):
columns=list(
(
("Name","name"),
)
)
tm= ObjectTableModel(collection,columns)
frame = MyFrame("Street Table")
frame.setSize(800, 1200)
frame.setLayout(BorderLayout())
table = JTable(tm)
table.setAutoResizeMode(JTable.AUTO_RESIZE_ALL_COLUMNS)
header = table.getTableHeader()
header.setUpdateTableInRealTime(True)
header.setReorderingAllowed(True);
scrollPane = JScrollPane()
scrollPane.getViewport().setView((table))
# copyButton = JButton('Merge') #,actionPerformed=self.noAction
# frame.add(copyButton)
listener=MyListener(table)
table.addMouseListener(listener)
table.addKeyListener(listener)
menubar = JMenuBar()
file = JMenu("Edit")
file.setMnemonic(KeyEvent.VK_E)
lookup = JMenuItem("Lookup",actionPerformed=frame.LookupEvent)
lookup.setMnemonic(KeyEvent.VK_L)
file.add(lookup)
menubar.add(file)
frame.setJMenuBar(menubar)
frame.add(scrollPane)
frame.pack();
frame.setSize(frame.getPreferredSize());
frame.show()
def isBuilding(p):
v = p.get("building");
if v is not None and v != "no" and v != "entrance":
return True
else:
return False
class BuildingInBuilding :
BUILDING_INSIDE_BUILDING = 2001;
def __init__ (self):
self.primitivesToCheck = LinkedList();
self.index = QuadBuckets();
print 'building in building'
#super(tr("Building inside building"), tr("Checks for building areas inside of buildings."));
def visitn(self,n) :
# print "visitn:"
# print n
if (n.isUsable() and isBuilding(n)) :
if not self.primitivesToCheck.contains(n):
# print "adding :" n
self.primitivesToCheck.add(n);
else:
print "duplicate p :"
# print n
def visitw(self,w) :
print "visitw:"
# print w
if (w.isUsable() and w.isClosed() and isBuilding(w)) :
self.primitivesToCheck.add(w)
self.index.add(w)
print "added"
def isInPolygon(n, polygon) :
return Geometry.nodeInsidePolygon(n, polygon);
def sameLayers( w1, w2) :
if w1.get("layer") is not None :
l1 = w1.get("layer")
else :
l1 = "0";
if w2.get("layer") is not None :
l2 = w2.get("layer")
else :
l2 ="0";
return l1.equals(l2);
| def evaluateNode(self,p,obj):
print "te"
# print p
# print obj
| random_line_split |
|
common.js | = new $.fn.Global();
// ajax json call - 비동기 방식(기본:로딩 화면 있음)
JsonCall = function (url, params, reCall, showLoading) {
//params = "paramList=" + JSON.stringify(params);
if (showLoading == undefined) {
showLoading = true;
}
try {
if (showLoading)
global.showLoading(true);
$.ajax({
type: "post",
//async:false,
url: url + "?nocashe=" + String(Math.random()),
//dataType: "json",
data: params,
contentType: "application/x-www-form-urlencoded; charset=UTF-8",
beforeSend: function (xhr) {
xhr.setRequestHeader("Content-Encoding", "gzip");
},
fail: function (data) {
if (showLoading)
global.showLoading(false);
},
complete: function (data) {
if (showLoading)
global.showLoading(false);
reCall(data);
}
});
}
catch (e) {
alert("JSON Error: " + e.message);
}
};
//ajax json call - 동기 방식(로딩 화면 없음)
JsonCallSync = function (url, params, reCall) {
//params = "paramList=" + JSON.stringify(params);
try {
$.ajax({
type: "post",
async: false,
url: url + "?nocashe=" + String(Math.random()),
//dataType: "json",
data: params,
contentType: "application/x-www-form-urlencoded; charset=UTF-8",
beforeSend: function (xhr) {
xhr.setRequestHeader("Content-Encoding", "gzip");
},
fail: function (data) {
},
complete: function (data) {
reCall(data);
}
});
}
catch (e) {
alert("JSON Error: " + e.message);
}
};
//ajax json call - 동기 방식(데이터 바로 받음)
JsonReturnDataSync = function (url, params) {
//params = "paramList=" + JSON.stringify(params);
var jsonVal = "";
try {
$.ajax({
type: "post",
async: false,
url: url + "?nocashe=" + String(Math.random()),
//dataType: "json",
data: params,
contentType: "application/x-www-form-urlencoded; charset=UTF-8",
beforeSend: function (xhr) {
xhr.setRequestHeader("Content-Encoding", "gzip");
},
fail: function (data) {
},
complete: function (data) {
jsonVal = data;
}
});
return jsonVal;
}
catch (e) {
alert("JSON Error: " + e.message);
}
};
/****************************************
* 타입에 따라 링크 이동 방법이 바뀐다.
* url : 링크 주소
* type : 3 :return url, 2:window.open(), 그외:windows.location
****************************************/
goUrl = function (url, type) {
// type 기본값 설정
type = (type == undefined) ? '1' : type;
if (type == "") {
type = "1"; //1:링크
}
if (type == "3") {
return url;
} else if (type == "2") {
if (url == "") {
alert(' 개발 중');
} else {
window.open(url, '_blank');//새창링크
}
} else {
if (url == "") {
alert(' 개발 중');
} else {
location.href = url;//링크
}
}
}
/**************************************
* 예외 처리
**************************************/
var _debug; //디버깅 모드 설정용, 각 페이지에서 true로 설정하면, 오류를 alert 창으로 표시한다.
var _notRedirection; //페이지 이동 금지 설정용, 각 페이지에서 true로 설정하면, 페이지 이동하지 않는다.
//오류발생시 처리
//params>
// e: Exception 객체
// headerText: 오류 구분용 Header Text
// menu: 메뉴 구분자, 페이지 이동 등의 메뉴별 오류 처리용 구분자, null || "" 일 때 이동하지 않는다.
//return>
checkException = function (e, headerText, menu) {
//디버깅 모드 이면 메시지 창으로 오류 표시
if (_debug) {
var sb = new StringBuilder();
sb.AppendFormat("* Error [ {0} ]\n\n* Message: {1}\n\n* Stack:\n{2}", headerText, e.message, e.stack);
alert(sb.ToString());
} else {
//디버딩 모드가 아닐 때 오류 공통 처리
}
//화면 이동 금지 상태 이면 return
if (_notRedirection) {
return;
} else {
//메뉴별 화면 이동 처리
switch (menu) {
case "main":
break;
}
}
}
// 파라미터 받아오기
// 사용법 : var param = getParameters();
function getParameters() {
var searchString = window.location.search.substring(1),
params = searchString.split("&"),
hash = {};
if (searchString == "") return {};
for (var i = 0; i < params.length; i++) {
var val = params[i].split("=");
hash[unescape(val[0])] = unescape(val[1]);
}
return hash;
}
//휴대폰 번호 Array 형식으로 변환
getMemberInfoHandPhoneArray = function (phoneNum) {
var hpArray = [];
if (phoneNum == undefined || phoneNum == '')
return hpArray;
var hpLen = phoneNum.length;
var hp1 = '';
var hp2 = '';
var hp3 = '';
if (hpLen == 13) {// 13자리 번호 일때 : 010-2222-3333
hp1 = phoneNum.substring(0, 3);
hp2 = phoneNum.substring(4, 8);
hp3 = phoneNum.substring(9);
} else if (hpLen == 12) | 때 : 010-222-3333
hp1 = phoneNum.substring(0, 3);
hp2 = phoneNum.substring(4, 7);
hp3 = phoneNum.substring(8);
} else if (hpLen == 11) {// 11자리 번호 일때 : 01022223333
hp1 = phoneNum.substring(0, 3);
hp2 = phoneNum.substring(3, 7);
hp3 = phoneNum.substring(7);
} else if (hpLen == 10) {// 10자리 번호 일때 : 0102223333
hp1 = phoneNum.substring(0, 3);
hp2 = phoneNum.substring(3, 6);
hp3 = phoneNum.substring(6);
} else {
hp1 = phoneNum.substring(0, 3);
hp2 = phoneNum.substring(3, 7);
hp3 = phoneNum.substring(7);
}
hpArray.push(hp1);
hpArray.push(hp2);
hpArray.push(hp3);
return hpArray;
}
//메인 화면으로 이동 한다.
//params>
//return>
goToMain = function () {
$(location).attr("href", "/com/main.do");
}
//팝업창을 띠운다
//sUrl - 띠울 URL
//sFrame - 띠울이름
//sFeature - 창 속성
openDialog = function (sUrl, sFrame, sFeature) {
var op = window.open(sUrl, sFrame, sFeature);
return op;
}
var ctrlDown = false;
//숫자만 입력 Input Key Event
//params>
//return>
//$("#userName").attr("onkeydown", "onlyNumberInputEvent(event);");
onlyNumberInputEvent = function (event) {
try {
var ctrlKey = 17, vKey = 86, cKey = 67;
var key = event.which || event.keyCode;
// backspace:8
// tab:9
// delete:46
if (key == 8 || key == 9 || key == 46) {
// 키 통과
} else {
if (ctrlDown && (key == 86 || key == 67)) {
}
else if (key >= 48 && key <= 57) {
// 숫자 확인
}
else if (key >= 96 && key <= 105) {
// 숫자 확인
} else {
//이벤트 해제
if (event.preventDefault) {
event.preventDefault();
} else {
event.returnValue = false;
}
}
}
} catch (e) {
}
}
$(function () {
$.fn.ClientInfo = function () {
this.channelType = "HO";
this.browser | {// 12자리 번호 일 | identifier_name |
common.js | = new $.fn.Global();
// ajax json call - 비동기 방식(기본:로딩 화면 있음)
JsonCall = function (url, params, reCall, showLoading) {
//params = "paramList=" + JSON.stringify(params);
if (showLoading == undefined) {
showLoading = true;
}
try {
if (showLoading)
global.showLoading(true);
$.ajax({
type: "post",
//async:false,
url: url + "?nocashe=" + String(Math.random()),
//dataType: "json",
data: params,
contentType: "application/x-www-form-urlencoded; charset=UTF-8",
beforeSend: function (xhr) {
xhr.setRequestHeader("Content-Encoding", "gzip");
},
fail: function (data) {
if (showLoading)
global.showLoading(false);
},
complete: function (data) {
if (showLoading)
global.showLoading(false);
reCall(data);
}
});
}
catch (e) {
alert("JSON Error: " + e.message);
} | //ajax json call - 동기 방식(로딩 화면 없음)
JsonCallSync = function (url, params, reCall) {
//params = "paramList=" + JSON.stringify(params);
try {
$.ajax({
type: "post",
async: false,
url: url + "?nocashe=" + String(Math.random()),
//dataType: "json",
data: params,
contentType: "application/x-www-form-urlencoded; charset=UTF-8",
beforeSend: function (xhr) {
xhr.setRequestHeader("Content-Encoding", "gzip");
},
fail: function (data) {
},
complete: function (data) {
reCall(data);
}
});
}
catch (e) {
alert("JSON Error: " + e.message);
}
};
//ajax json call - 동기 방식(데이터 바로 받음)
JsonReturnDataSync = function (url, params) {
//params = "paramList=" + JSON.stringify(params);
var jsonVal = "";
try {
$.ajax({
type: "post",
async: false,
url: url + "?nocashe=" + String(Math.random()),
//dataType: "json",
data: params,
contentType: "application/x-www-form-urlencoded; charset=UTF-8",
beforeSend: function (xhr) {
xhr.setRequestHeader("Content-Encoding", "gzip");
},
fail: function (data) {
},
complete: function (data) {
jsonVal = data;
}
});
return jsonVal;
}
catch (e) {
alert("JSON Error: " + e.message);
}
};
/****************************************
* 타입에 따라 링크 이동 방법이 바뀐다.
* url : 링크 주소
* type : 3 :return url, 2:window.open(), 그외:windows.location
****************************************/
goUrl = function (url, type) {
// type 기본값 설정
type = (type == undefined) ? '1' : type;
if (type == "") {
type = "1"; //1:링크
}
if (type == "3") {
return url;
} else if (type == "2") {
if (url == "") {
alert(' 개발 중');
} else {
window.open(url, '_blank');//새창링크
}
} else {
if (url == "") {
alert(' 개발 중');
} else {
location.href = url;//링크
}
}
}
/**************************************
* 예외 처리
**************************************/
var _debug; //디버깅 모드 설정용, 각 페이지에서 true로 설정하면, 오류를 alert 창으로 표시한다.
var _notRedirection; //페이지 이동 금지 설정용, 각 페이지에서 true로 설정하면, 페이지 이동하지 않는다.
//오류발생시 처리
//params>
// e: Exception 객체
// headerText: 오류 구분용 Header Text
// menu: 메뉴 구분자, 페이지 이동 등의 메뉴별 오류 처리용 구분자, null || "" 일 때 이동하지 않는다.
//return>
checkException = function (e, headerText, menu) {
//디버깅 모드 이면 메시지 창으로 오류 표시
if (_debug) {
var sb = new StringBuilder();
sb.AppendFormat("* Error [ {0} ]\n\n* Message: {1}\n\n* Stack:\n{2}", headerText, e.message, e.stack);
alert(sb.ToString());
} else {
//디버딩 모드가 아닐 때 오류 공통 처리
}
//화면 이동 금지 상태 이면 return
if (_notRedirection) {
return;
} else {
//메뉴별 화면 이동 처리
switch (menu) {
case "main":
break;
}
}
}
// 파라미터 받아오기
// 사용법 : var param = getParameters();
function getParameters() {
var searchString = window.location.search.substring(1),
params = searchString.split("&"),
hash = {};
if (searchString == "") return {};
for (var i = 0; i < params.length; i++) {
var val = params[i].split("=");
hash[unescape(val[0])] = unescape(val[1]);
}
return hash;
}
//휴대폰 번호 Array 형식으로 변환
getMemberInfoHandPhoneArray = function (phoneNum) {
var hpArray = [];
if (phoneNum == undefined || phoneNum == '')
return hpArray;
var hpLen = phoneNum.length;
var hp1 = '';
var hp2 = '';
var hp3 = '';
if (hpLen == 13) {// 13자리 번호 일때 : 010-2222-3333
hp1 = phoneNum.substring(0, 3);
hp2 = phoneNum.substring(4, 8);
hp3 = phoneNum.substring(9);
} else if (hpLen == 12) {// 12자리 번호 일때 : 010-222-3333
hp1 = phoneNum.substring(0, 3);
hp2 = phoneNum.substring(4, 7);
hp3 = phoneNum.substring(8);
} else if (hpLen == 11) {// 11자리 번호 일때 : 01022223333
hp1 = phoneNum.substring(0, 3);
hp2 = phoneNum.substring(3, 7);
hp3 = phoneNum.substring(7);
} else if (hpLen == 10) {// 10자리 번호 일때 : 0102223333
hp1 = phoneNum.substring(0, 3);
hp2 = phoneNum.substring(3, 6);
hp3 = phoneNum.substring(6);
} else {
hp1 = phoneNum.substring(0, 3);
hp2 = phoneNum.substring(3, 7);
hp3 = phoneNum.substring(7);
}
hpArray.push(hp1);
hpArray.push(hp2);
hpArray.push(hp3);
return hpArray;
}
//메인 화면으로 이동 한다.
//params>
//return>
goToMain = function () {
$(location).attr("href", "/com/main.do");
}
//팝업창을 띠운다
//sUrl - 띠울 URL
//sFrame - 띠울이름
//sFeature - 창 속성
openDialog = function (sUrl, sFrame, sFeature) {
var op = window.open(sUrl, sFrame, sFeature);
return op;
}
var ctrlDown = false;
//숫자만 입력 Input Key Event
//params>
//return>
//$("#userName").attr("onkeydown", "onlyNumberInputEvent(event);");
onlyNumberInputEvent = function (event) {
try {
var ctrlKey = 17, vKey = 86, cKey = 67;
var key = event.which || event.keyCode;
// backspace:8
// tab:9
// delete:46
if (key == 8 || key == 9 || key == 46) {
// 키 통과
} else {
if (ctrlDown && (key == 86 || key == 67)) {
}
else if (key >= 48 && key <= 57) {
// 숫자 확인
}
else if (key >= 96 && key <= 105) {
// 숫자 확인
} else {
//이벤트 해제
if (event.preventDefault) {
event.preventDefault();
} else {
event.returnValue = false;
}
}
}
} catch (e) {
}
}
$(function () {
$.fn.ClientInfo = function () {
this.channelType = "HO";
this.browserAgent | };
| random_line_split |
common.js | = new $.fn.Global();
// ajax json call - 비동기 방식(기본:로딩 화면 있음)
JsonCall = function (url, params, reCall, showLoading) {
//params = "paramList=" + JSON.stringify(params);
if (showLoading == undefined) {
showLoading = true;
}
try {
if (showLoading)
global.showLoading(true);
$.ajax({
type: "post",
//async:false,
url: url + "?nocashe=" + String(Math.random()),
//dataType: "json",
data: params,
contentType: "application/x-www-form-urlencoded; charset=UTF-8",
beforeSend: function (xhr) {
xhr.setRequestHeader("Content-Encoding", "gzip");
},
fail: function (data) {
if (showLoading)
global.showLoading(false);
},
complete: function (data) {
if (showLoading)
global.showLoading(false);
reCall(data);
}
});
}
catch (e) {
alert("JSON Error: " + e.message);
}
};
//ajax json call - 동기 방식(로딩 화면 없음)
JsonCallSync = function (url, params, reCall) {
//params = "paramList=" + JSON.stringify(params);
try {
$.ajax({
type: "post",
async: false,
url: url + "?nocashe=" + String(Math.random()),
//dataType: "json",
data: params,
contentType: "application/x-www-form-urlencoded; charset=UTF-8",
beforeSend: function (xhr) {
xhr.setRequestHeader("Content-Encoding", "gzip");
},
fail: function (data) {
},
complete: function (data) {
reCall(data);
}
});
}
catch (e) {
alert("JSON Error: " + e.message);
}
};
//ajax json call - 동기 방식(데이터 바로 받음)
JsonReturnDataSync = function (url, params) {
//params = "paramList=" + JSON.stringify(params);
var jsonVal = "";
try {
$.ajax({
type: "post",
async: false,
url: url + "?nocashe=" + String(Math.random()),
//dataType: "json",
data: params,
contentType: "application/x-www-form-urlencoded; charset=UTF-8",
beforeSend: function (xhr) {
xhr.setRequestHeader("Content-Encoding", "gzip");
},
fail: function (data) {
},
complete: function (data) {
jsonVal = data;
}
});
return jsonVal;
}
catch (e) {
alert("JSON Error: " + e.message);
}
};
/****************************************
* 타입에 따라 링크 이동 방법이 바뀐다.
* url : 링크 주소
* type : 3 :return url, 2:window.open(), 그외:windows.location
****************************************/
goUrl = function (url, type) {
// type 기본값 설정
type = (type == undefined) ? '1' : type;
if (type == "") {
type = "1"; //1:링크
}
if (type == "3") {
return url;
} else if (type == "2") {
if (url == "") {
alert(' 개발 중');
} else {
window.open(url, '_blank');//새창링크
}
} else {
if (url == "") {
alert(' 개발 중');
} else {
location.href = url;//링크
}
}
}
/**************************************
* 예외 처리
**************************************/
var _debug; //디버깅 모드 설정용, 각 페이지에서 true로 설정하면, 오류를 alert 창으로 표시한다.
var _notRedirection; //페이지 이동 금지 설정용, 각 페이지에서 true로 설정하면, 페이지 이동하지 않는다.
//오류발생시 처리
//params>
// e: Exception 객체
// headerText: 오류 구분용 Header Text
// menu: 메뉴 구분자, 페이지 이동 등의 메뉴별 오류 처리용 구분자, null || "" 일 때 이동하지 않는다.
//return>
checkException = function (e, headerText, menu) {
//디버깅 모드 이면 메시지 창으로 오류 표시
if (_debug) {
var sb = new StringBuilder();
sb.AppendFormat("* Error [ {0} ]\n\n* Message: {1}\n\n* Stack:\n{2}", headerText, e.message, e.stack);
alert(sb.ToString());
} else {
//디버딩 모드가 아닐 때 오류 공통 처리
}
//화면 이동 금지 상태 이면 return
if (_notRedirection) {
return;
} else {
//메뉴별 화면 이동 처리
switch (menu) {
case "main":
break;
}
}
}
// 파라미터 받아오기
// 사용법 : var param = getParameters();
function getParameters() {
var searchString = window.location.search.substring(1),
params = searchString.split("&"),
hash = {};
if (searchString == "") return {};
for (var i = 0; i < params.length; i++) {
var val = params[i].split("=");
hash[unescape(val[0])] = unescape(val[1]);
}
return hash;
}
//휴대폰 번호 Array 형식으로 변환
getMemberInfoHandPhoneArray = function (phoneNum) {
var hpArray = [];
if (phoneNum == undefined || phoneNum == '')
return hpArray;
var hpLen = phoneNum.length;
var hp1 = '';
var hp2 = '';
var hp3 = '';
if (hpLen == 13) {// 13자리 번호 일때 : 010-2222-3333
hp1 = phoneNum.substring(0, 3);
hp2 = phoneNum.substring(4, 8);
hp3 = phoneNum.substring(9);
} else if (hpLen == 12) {// 12자리 번호 일때 : | pLen == 10) {// 10자리 번호 일때 : 0102223333
hp1 = phoneNum.substring(0, 3);
hp2 = phoneNum.substring(3, 6);
hp3 = phoneNum.substring(6);
} else {
hp1 = phoneNum.substring(0, 3);
hp2 = phoneNum.substring(3, 7);
hp3 = phoneNum.substring(7);
}
hpArray.push(hp1);
hpArray.push(hp2);
hpArray.push(hp3);
return hpArray;
}
//메인 화면으로 이동 한다.
//params>
//return>
goToMain = function () {
$(location).attr("href", "/com/main.do");
}
//팝업창을 띠운다
//sUrl - 띠울 URL
//sFrame - 띠울이름
//sFeature - 창 속성
openDialog = function (sUrl, sFrame, sFeature) {
var op = window.open(sUrl, sFrame, sFeature);
return op;
}
var ctrlDown = false;
//숫자만 입력 Input Key Event
//params>
//return>
//$("#userName").attr("onkeydown", "onlyNumberInputEvent(event);");
onlyNumberInputEvent = function (event) {
try {
var ctrlKey = 17, vKey = 86, cKey = 67;
var key = event.which || event.keyCode;
// backspace:8
// tab:9
// delete:46
if (key == 8 || key == 9 || key == 46) {
// 키 통과
} else {
if (ctrlDown && (key == 86 || key == 67)) {
}
else if (key >= 48 && key <= 57) {
// 숫자 확인
}
else if (key >= 96 && key <= 105) {
// 숫자 확인
} else {
//이벤트 해제
if (event.preventDefault) {
event.preventDefault();
} else {
event.returnValue = false;
}
}
}
} catch (e) {
}
}
$(function () {
$.fn.ClientInfo = function () {
this.channelType = "HO";
this.browser | 010-222-3333
hp1 = phoneNum.substring(0, 3);
hp2 = phoneNum.substring(4, 7);
hp3 = phoneNum.substring(8);
} else if (hpLen == 11) {// 11자리 번호 일때 : 01022223333
hp1 = phoneNum.substring(0, 3);
hp2 = phoneNum.substring(3, 7);
hp3 = phoneNum.substring(7);
} else if (h | identifier_body |
common.js | = new $.fn.Global();
// ajax json call - 비동기 방식(기본:로딩 화면 있음)
JsonCall = function (url, params, reCall, showLoading) {
//params = "paramList=" + JSON.stringify(params);
if (showLoading == undefined) {
showLoading = true;
}
try {
if (showLoading)
global.showLoading(true);
$.ajax({
type: "post",
//async:false,
url: url + "?nocashe=" + String(Math.random()),
//dataType: "json",
data: params,
contentType: "application/x-www-form-urlencoded; charset=UTF-8",
beforeSend: function (xhr) {
xhr.setRequestHeader("Content-Encoding", "gzip");
},
fail: function (data) {
if (showLoading)
global.showLoading(false);
},
complete: function (data) {
if (showLoading)
global.showLoading(false);
reCall(data);
}
});
}
catch (e) {
alert("JSON Error: " + e.message);
}
};
//ajax json call - 동기 방식(로딩 화면 없음)
JsonCallSync = function (url, params, reCall) {
//params = "paramList=" + JSON.stringify(params);
try {
$.ajax({
type: "post",
async: false,
url: url + "?nocashe=" + String(Math.random()),
//dataType: "json",
data: params,
contentType: "application/x-www-form-urlencoded; charset=UTF-8",
beforeSend: function (xhr) {
xhr.setRequestHeader("Content-Encoding", "gzip");
},
fail: function (data) {
},
complete: function (data) {
reCall(data);
}
});
}
catch (e) {
alert("JSON Error: " + e.message);
}
};
//ajax json call - 동기 방식(데이터 바로 받음)
JsonReturnDataSync = function (url, params) {
//params = "paramList=" + JSON.stringify(params);
var jsonVal = "";
try {
$.ajax({
type: "post",
async: false,
url: url + "?nocashe=" + String(Math.random()),
//dataType: "json",
data: params,
contentType: "application/x-www-form-urlencoded; charset=UTF-8",
beforeSend: function (xhr) {
xhr.setRequestHeader("Content-Encoding", "gzip");
},
fail: function (data) {
},
complete: function (data) {
jsonVal = data;
}
});
return jsonVal;
}
catch (e) {
alert("JSON Error: " + e.message);
}
};
/****************************************
* 타입에 따라 링크 이동 방법이 바뀐다.
* url : 링크 주소
* type : 3 :return url, 2:window.open(), 그외:windows.location
****************************************/
goUrl = function (url, type) {
// type 기본값 설정
type = (type == undefined) ? '1' : type;
if (type == "") {
type = "1"; //1:링크
}
if (type == "3") {
return url;
} else if (type == "2") {
if (url == "") {
alert(' 개발 중');
} else {
window.open(url, '_blank');//새창링크
}
} else {
if (url == "") {
alert(' 개발 중');
} else {
location.href = url;//링크
}
}
}
/**************************************
* 예외 처리
**************************************/
var _debug; //디버깅 모드 설정용, 각 페이지에서 true로 설정하면, 오류를 alert 창으로 표시한다.
var _notRedirection; //페이지 이동 금지 설정용, 각 페이지에서 true로 설정하면, 페이지 이동하지 않는다.
//오류발생시 처리
//params>
// e: Exception 객체
// headerText: 오류 구분용 Header Text
// menu: 메뉴 구분자, 페이지 이동 등의 메뉴별 오류 처리용 구분자, null || "" 일 때 이동하지 않는다.
//return>
checkException = function (e, headerText, menu) {
//디버깅 모드 이면 메시지 창으로 오류 표시
if (_debug) {
var sb = new StringBuilder();
sb.AppendFormat("* Error [ {0} ]\n\n* Message: {1}\n\n* Stack:\n{2}", headerText, e.message, e.stack);
alert(sb.ToString());
} else {
//디버딩 모드가 아닐 때 오류 공통 처리
}
//화면 이동 금지 상태 이면 return
if (_notRedirection) {
return;
} else {
//메뉴별 화면 이동 처리
switch (menu) {
case "main":
break;
}
}
}
// 파라미터 받아오기
// 사용법 : var param = getParameters();
function getParameters() {
var searchString = window.location.search.substring(1),
params = searchString.split("&"),
hash = {};
if (searchString == "") return {};
for (var i = 0; i < params.length; i++) {
var val = params[i].split("=");
hash[unescape(val[0])] = unescape(val[1]);
}
return hash;
}
//휴대폰 번호 Array 형식으로 변환
getMemberInfoHandPhoneArray = function (phoneNum) {
var hpArray = [];
if (phoneNum == undefined || phoneNum == '')
return hpArray;
var hpLen = phoneNum.length;
var hp1 = '';
var hp2 = '';
var hp3 = '';
if (hpLen == 13) {// 13자리 번호 일때 : 010-2222-3333
hp1 = phoneNum.substring(0, 3);
hp2 = phoneNum.substring(4, 8);
hp3 = phoneNum.substring(9);
} else if (hpLen == 12) {// 12자리 번호 일때 : 010-222-3333
hp1 = phoneNum.substring(0, 3);
hp2 = phoneNum.substring(4, 7);
hp3 = phoneNum.substring(8);
} else if (hpLen == 11) {// 11자리 번호 일때 : 01022223333
hp1 = phoneNum.substring(0, 3);
hp2 = phoneNum.substring(3, 7);
hp3 = phoneNum.substring(7);
} else if (hpLen == 10) {// 10자리 번호 일때 : 0102223333
hp1 = phoneNum.substring(0, 3);
hp2 = phoneNum.substring(3, 6);
hp3 = phoneNum.substring(6);
} else {
hp1 = phoneNum.substring(0, 3);
hp2 = phoneNum.substring(3, 7);
hp3 = phoneNum.substring(7);
}
hpArray | "href", "/com/main.do");
}
//팝업창을 띠운다
//sUrl - 띠울 URL
//sFrame - 띠울이름
//sFeature - 창 속성
openDialog = function (sUrl, sFrame, sFeature) {
var op = window.open(sUrl, sFrame, sFeature);
return op;
}
var ctrlDown = false;
//숫자만 입력 Input Key Event
//params>
//return>
//$("#userName").attr("onkeydown", "onlyNumberInputEvent(event);");
onlyNumberInputEvent = function (event) {
try {
var ctrlKey = 17, vKey = 86, cKey = 67;
var key = event.which || event.keyCode;
// backspace:8
// tab:9
// delete:46
if (key == 8 || key == 9 || key == 46) {
// 키 통과
} else {
if (ctrlDown && (key == 86 || key == 67)) {
}
else if (key >= 48 && key <= 57) {
// 숫자 확인
}
else if (key >= 96 && key <= 105) {
// 숫자 확인
} else {
//이벤트 해제
if (event.preventDefault) {
event.preventDefault();
} else {
event.returnValue = false;
}
}
}
} catch (e) {
}
}
$(function () {
$.fn.ClientInfo = function () {
this.channelType = "HO";
this.browser | .push(hp1);
hpArray.push(hp2);
hpArray.push(hp3);
return hpArray;
}
//메인 화면으로 이동 한다.
//params>
//return>
goToMain = function () {
$(location).attr( | conditional_block |
renderer.rs | 32);
let number_of_cameras = self.scene.cameras.len();
self.renderer_output_pixels.push(RendererOutputPixel::new(y, x));
let last_pos = self.renderer_output_pixels.len()-1;
let mut pcg: Pcg32 = Pcg32::from_entropy();
let mut colors: Vec<[f64; 3]> = Vec::new();
let mut converged = false;
// Loop over this pixel until we estimate the error to be small enough.
let mut iterations = 0;
while !converged {
iterations += 1;
for _ in 0..self.spp_per_iteration {
// Create a hitpoint path starting from the camera. The first hitpoint will be the hitpoint of the ray coming from the retina, through the pinhole of the camera - that is, it is a point somewhere on a renderer shape.
let mut hitpoint_path_from_camera: Vec<Hitpoint> = Vec::new();
self.renderer_output_pixels[last_pos].number_of_rays += 1.0;
let index = (pcg.gen_range(0, random_exclusive_max_lights)%(number_of_cameras as u32)) as usize;
let mut ray = self.scene.cameras[index].create_ray(x, y, self.width, self.height, &mut pcg);
let mut color = self.scene.cameras[index].color;
self.create_hitpoint_path(&mut ray, &mut color, &mut hitpoint_path_from_camera, &mut pcg);
let mut total_color = [0.0, 0.0, 0.0];
if hitpoint_path_from_camera.is_empty() {
colors.push(total_color);
continue;
}
let direct_light_sampling = false;
if direct_light_sampling {
// Connect the camera path hitpoints with points on lights.
for hitpoint_in_camera_path in &hitpoint_path_from_camera {
let color = self.connect_and_compute_color(hitpoint_in_camera_path, &mut pcg);
total_color = add(total_color, color);
}
self.store(last_pos, total_color);
colors.push(total_color);
} else {
for hitpoint in hitpoint_path_from_camera.iter().rev() {
total_color = elementwise_mul(total_color, hitpoint.material.color);
total_color = add(total_color, hitpoint.material.emission);
}
self.store(last_pos, total_color);
colors.push(total_color);
}
}
if colors.is_empty() {
break;
}
// Estimate the error. If it's too large, create more rays for this pixel.
let number_of_batches: usize = 20;
let number_of_rays = colors.len();
let batch_size = number_of_rays/number_of_batches;
let mut averages = self.averages(&colors, number_of_batches, batch_size);
self.gamma_correct_averages(&mut averages);
let use_standard_deviation = true;
let error = if use_standard_deviation {
self.standard_deviation(&averages)
} else {
self.maximum_distance(&averages)
};
if error < self.maximum_error || iterations*self.spp_per_iteration >= self.maximum_spp {
converged = true;
} else if iterations%10 == 0 {
println!("Currently: {} iterations at ({}, {}) with an error of {}. Iterating until it is less than {}.", iterations, x, y, error, self.maximum_error);
}
}
}
// @TODO: Fix so that it works even if colors.len()%batch_size != 0.
fn | (&self, colors: &[[f64; 3]], number_of_batches: usize, batch_size: usize) -> Vec<[f64; 3]> {
let mut averages: Vec<[f64; 3]> = vec![[0.0, 0.0, 0.0]; number_of_batches];
for i in 0..colors.len() {
averages[i/batch_size] = add(averages[i/batch_size], colors[i]);
}
for average in &mut averages {
*average = mul(1.0/(batch_size as f64), *average);
}
averages
}
fn gamma_correct_averages(&self, averages: &mut Vec<[f64; 3]>) {
for average in averages {
*average = intensity_to_color(*average);
}
}
fn standard_deviation(&self, averages: &[[f64; 3]]) -> f64 {
let length = averages.len();
let mut r = 0.0;
let mut g = 0.0;
let mut b = 0.0;
let mut r_squared = 0.0;
let mut g_squared = 0.0;
let mut b_squared = 0.0;
for average in averages {
r += average[0];
g += average[1];
b += average[2];
r_squared += average[0]*average[0];
g_squared += average[1]*average[1];
b_squared += average[2]*average[2];
}
let variance = (r_squared + g_squared + b_squared - (r*r+g*g+b*b)/(length as f64))/(length as f64-1.0);
// Due to rounding erros, the computed variance could in rare cases be slightly lower than 0.0.
if variance < 0.0 {
0.0
} else {
variance.sqrt()
}
}
fn maximum_distance(&self, averages: &[[f64; 3]]) -> f64 {
let mut smallest = [f64::MAX, f64::MAX, f64::MAX];
let mut largest = [f64::MIN, f64::MIN, f64::MIN];
for average in averages {
for j in 0..3 {
if average[j] < smallest[j] {
smallest[j] = average[j];
}
if average[j] > largest[j] {
largest[j] = average[j];
}
}
}
let max_r_distance = (largest[0]-smallest[0]).abs();
let max_g_distance = (largest[1]-smallest[1]).abs();
let max_b_distance = (largest[2]-smallest[2]).abs();
max_r_distance+max_g_distance+max_b_distance
}
fn create_hitpoint_path(&mut self, mut ray: &mut Ray, color: &mut [f64; 3], hitpoint_path: &mut Vec<Hitpoint>, pcg: &mut Pcg32) {
let bullet_probability = 0.0;
let survival_boost_factor = 1.0/(1.0-bullet_probability);
loop {
let r = pcg.gen::<f64>();
if r < bullet_probability {
return;
} else {
*color = mul(survival_boost_factor, *color);
}
let hitpoint = self.closest_renderer_shape(&mut ray);
if let Some(mut hitpoint) = hitpoint {
let ingoing_direction = mul(-1.0, ray.direction);
let (refractive_index_1, refractive_index_2) = if hitpoint.hit_from_outside {
(1.0, hitpoint.material.refractive_index)
} else {
(hitpoint.material.refractive_index, 1.0)
};
let normal = if dot(ingoing_direction, hitpoint.normal) > 0.0 {
hitpoint.normal
} else {
mul(-1.0, hitpoint.normal)
};
let (direction, _) = random_from_brdf(ingoing_direction, normal, hitpoint.material, refractive_index_1, refractive_index_2, pcg);
ray.position = hitpoint.position;
ray.direction = direction;
*color = elementwise_mul(*color, hitpoint.material.color);
hitpoint.accumulated_color = *color;
hitpoint_path.push(hitpoint);
} else {
return;
}
}
}
// @TODO: Implement support of triangular lightsources.
fn connect_and_compute_color(&self, hitpoint_in_camera_path: &Hitpoint, mut pcg: &mut Pcg32) -> [f64; 3] {
let number_of_light_spheres = self.scene.light_spheres.len();
let number_of_cameras = self.scene.cameras.len();
let random_exclusive_max_cameras: u32 = <u32>::max_value() - <u32>::max_value()%(number_of_cameras as u32);
let index = (pcg.gen_range(0, random_exclusive_max_cameras)%(number_of_light_spheres as u32)) as usize;
let light_position = self.scene.light_spheres[index].get_position(&mut pcg);
// @TODO: Should it not be .emission rather than .color?
let light_color = self.scene.light_spheres[index].color;
let direction = sub(hitpoint_in_camera_path.position, light_position);
let distance | averages | identifier_name |
renderer.rs | 32);
let number_of_cameras = self.scene.cameras.len();
self.renderer_output_pixels.push(RendererOutputPixel::new(y, x));
let last_pos = self.renderer_output_pixels.len()-1;
let mut pcg: Pcg32 = Pcg32::from_entropy();
let mut colors: Vec<[f64; 3]> = Vec::new();
let mut converged = false;
// Loop over this pixel until we estimate the error to be small enough.
let mut iterations = 0;
while !converged {
iterations += 1;
for _ in 0..self.spp_per_iteration {
// Create a hitpoint path starting from the camera. The first hitpoint will be the hitpoint of the ray coming from the retina, through the pinhole of the camera - that is, it is a point somewhere on a renderer shape.
let mut hitpoint_path_from_camera: Vec<Hitpoint> = Vec::new();
self.renderer_output_pixels[last_pos].number_of_rays += 1.0;
let index = (pcg.gen_range(0, random_exclusive_max_lights)%(number_of_cameras as u32)) as usize;
let mut ray = self.scene.cameras[index].create_ray(x, y, self.width, self.height, &mut pcg);
let mut color = self.scene.cameras[index].color;
self.create_hitpoint_path(&mut ray, &mut color, &mut hitpoint_path_from_camera, &mut pcg);
let mut total_color = [0.0, 0.0, 0.0];
if hitpoint_path_from_camera.is_empty() {
colors.push(total_color);
continue;
}
let direct_light_sampling = false;
if direct_light_sampling {
// Connect the camera path hitpoints with points on lights.
for hitpoint_in_camera_path in &hitpoint_path_from_camera {
let color = self.connect_and_compute_color(hitpoint_in_camera_path, &mut pcg);
total_color = add(total_color, color);
}
self.store(last_pos, total_color);
colors.push(total_color);
} else {
for hitpoint in hitpoint_path_from_camera.iter().rev() {
total_color = elementwise_mul(total_color, hitpoint.material.color);
total_color = add(total_color, hitpoint.material.emission);
}
self.store(last_pos, total_color);
colors.push(total_color);
}
}
if colors.is_empty() {
break;
}
// Estimate the error. If it's too large, create more rays for this pixel.
let number_of_batches: usize = 20;
let number_of_rays = colors.len();
let batch_size = number_of_rays/number_of_batches;
let mut averages = self.averages(&colors, number_of_batches, batch_size);
self.gamma_correct_averages(&mut averages);
let use_standard_deviation = true;
let error = if use_standard_deviation {
self.standard_deviation(&averages)
} else {
self.maximum_distance(&averages)
};
if error < self.maximum_error || iterations*self.spp_per_iteration >= self.maximum_spp {
converged = true;
} else if iterations%10 == 0 {
println!("Currently: {} iterations at ({}, {}) with an error of {}. Iterating until it is less than {}.", iterations, x, y, error, self.maximum_error);
}
}
}
// @TODO: Fix so that it works even if colors.len()%batch_size != 0.
fn averages(&self, colors: &[[f64; 3]], number_of_batches: usize, batch_size: usize) -> Vec<[f64; 3]> {
let mut averages: Vec<[f64; 3]> = vec![[0.0, 0.0, 0.0]; number_of_batches];
for i in 0..colors.len() {
averages[i/batch_size] = add(averages[i/batch_size], colors[i]);
}
for average in &mut averages {
*average = mul(1.0/(batch_size as f64), *average);
}
averages
}
fn gamma_correct_averages(&self, averages: &mut Vec<[f64; 3]>) {
for average in averages {
*average = intensity_to_color(*average);
}
}
fn standard_deviation(&self, averages: &[[f64; 3]]) -> f64 {
let length = averages.len();
let mut r = 0.0;
let mut g = 0.0;
let mut b = 0.0;
let mut r_squared = 0.0;
let mut g_squared = 0.0;
let mut b_squared = 0.0;
for average in averages {
r += average[0];
g += average[1];
b += average[2];
r_squared += average[0]*average[0];
g_squared += average[1]*average[1];
b_squared += average[2]*average[2];
}
let variance = (r_squared + g_squared + b_squared - (r*r+g*g+b*b)/(length as f64))/(length as f64-1.0);
// Due to rounding erros, the computed variance could in rare cases be slightly lower than 0.0.
if variance < 0.0 {
0.0
} else {
variance.sqrt()
}
}
fn maximum_distance(&self, averages: &[[f64; 3]]) -> f64 {
let mut smallest = [f64::MAX, f64::MAX, f64::MAX];
let mut largest = [f64::MIN, f64::MIN, f64::MIN];
for average in averages {
for j in 0..3 {
if average[j] < smallest[j] {
smallest[j] = average[j];
}
if average[j] > largest[j] |
}
}
let max_r_distance = (largest[0]-smallest[0]).abs();
let max_g_distance = (largest[1]-smallest[1]).abs();
let max_b_distance = (largest[2]-smallest[2]).abs();
max_r_distance+max_g_distance+max_b_distance
}
fn create_hitpoint_path(&mut self, mut ray: &mut Ray, color: &mut [f64; 3], hitpoint_path: &mut Vec<Hitpoint>, pcg: &mut Pcg32) {
let bullet_probability = 0.0;
let survival_boost_factor = 1.0/(1.0-bullet_probability);
loop {
let r = pcg.gen::<f64>();
if r < bullet_probability {
return;
} else {
*color = mul(survival_boost_factor, *color);
}
let hitpoint = self.closest_renderer_shape(&mut ray);
if let Some(mut hitpoint) = hitpoint {
let ingoing_direction = mul(-1.0, ray.direction);
let (refractive_index_1, refractive_index_2) = if hitpoint.hit_from_outside {
(1.0, hitpoint.material.refractive_index)
} else {
(hitpoint.material.refractive_index, 1.0)
};
let normal = if dot(ingoing_direction, hitpoint.normal) > 0.0 {
hitpoint.normal
} else {
mul(-1.0, hitpoint.normal)
};
let (direction, _) = random_from_brdf(ingoing_direction, normal, hitpoint.material, refractive_index_1, refractive_index_2, pcg);
ray.position = hitpoint.position;
ray.direction = direction;
*color = elementwise_mul(*color, hitpoint.material.color);
hitpoint.accumulated_color = *color;
hitpoint_path.push(hitpoint);
} else {
return;
}
}
}
// @TODO: Implement support of triangular lightsources.
fn connect_and_compute_color(&self, hitpoint_in_camera_path: &Hitpoint, mut pcg: &mut Pcg32) -> [f64; 3] {
let number_of_light_spheres = self.scene.light_spheres.len();
let number_of_cameras = self.scene.cameras.len();
let random_exclusive_max_cameras: u32 = <u32>::max_value() - <u32>::max_value()%(number_of_cameras as u32);
let index = (pcg.gen_range(0, random_exclusive_max_cameras)%(number_of_light_spheres as u32)) as usize;
let light_position = self.scene.light_spheres[index].get_position(&mut pcg);
// @TODO: Should it not be .emission rather than .color?
let light_color = self.scene.light_spheres[index].color;
let direction = sub(hitpoint_in_camera_path.position, light_position);
let distance | {
largest[j] = average[j];
} | conditional_block |
renderer.rs | y, error, self.maximum_error);
}
}
}
// @TODO: Fix so that it works even if colors.len()%batch_size != 0.
fn averages(&self, colors: &[[f64; 3]], number_of_batches: usize, batch_size: usize) -> Vec<[f64; 3]> {
let mut averages: Vec<[f64; 3]> = vec![[0.0, 0.0, 0.0]; number_of_batches];
for i in 0..colors.len() {
averages[i/batch_size] = add(averages[i/batch_size], colors[i]);
}
for average in &mut averages {
*average = mul(1.0/(batch_size as f64), *average);
}
averages
}
fn gamma_correct_averages(&self, averages: &mut Vec<[f64; 3]>) {
for average in averages {
*average = intensity_to_color(*average);
}
}
fn standard_deviation(&self, averages: &[[f64; 3]]) -> f64 {
let length = averages.len();
let mut r = 0.0;
let mut g = 0.0;
let mut b = 0.0;
let mut r_squared = 0.0;
let mut g_squared = 0.0;
let mut b_squared = 0.0;
for average in averages {
r += average[0];
g += average[1];
b += average[2];
r_squared += average[0]*average[0];
g_squared += average[1]*average[1];
b_squared += average[2]*average[2];
}
let variance = (r_squared + g_squared + b_squared - (r*r+g*g+b*b)/(length as f64))/(length as f64-1.0);
// Due to rounding erros, the computed variance could in rare cases be slightly lower than 0.0.
if variance < 0.0 {
0.0
} else {
variance.sqrt()
}
}
fn maximum_distance(&self, averages: &[[f64; 3]]) -> f64 {
let mut smallest = [f64::MAX, f64::MAX, f64::MAX];
let mut largest = [f64::MIN, f64::MIN, f64::MIN];
for average in averages {
for j in 0..3 {
if average[j] < smallest[j] {
smallest[j] = average[j];
}
if average[j] > largest[j] {
largest[j] = average[j];
}
}
}
let max_r_distance = (largest[0]-smallest[0]).abs();
let max_g_distance = (largest[1]-smallest[1]).abs();
let max_b_distance = (largest[2]-smallest[2]).abs();
max_r_distance+max_g_distance+max_b_distance
}
fn create_hitpoint_path(&mut self, mut ray: &mut Ray, color: &mut [f64; 3], hitpoint_path: &mut Vec<Hitpoint>, pcg: &mut Pcg32) {
let bullet_probability = 0.0;
let survival_boost_factor = 1.0/(1.0-bullet_probability);
loop {
let r = pcg.gen::<f64>();
if r < bullet_probability {
return;
} else {
*color = mul(survival_boost_factor, *color);
}
let hitpoint = self.closest_renderer_shape(&mut ray);
if let Some(mut hitpoint) = hitpoint {
let ingoing_direction = mul(-1.0, ray.direction);
let (refractive_index_1, refractive_index_2) = if hitpoint.hit_from_outside {
(1.0, hitpoint.material.refractive_index)
} else {
(hitpoint.material.refractive_index, 1.0)
};
let normal = if dot(ingoing_direction, hitpoint.normal) > 0.0 {
hitpoint.normal
} else {
mul(-1.0, hitpoint.normal)
};
let (direction, _) = random_from_brdf(ingoing_direction, normal, hitpoint.material, refractive_index_1, refractive_index_2, pcg);
ray.position = hitpoint.position;
ray.direction = direction;
*color = elementwise_mul(*color, hitpoint.material.color);
hitpoint.accumulated_color = *color;
hitpoint_path.push(hitpoint);
} else {
return;
}
}
}
// @TODO: Implement support of triangular lightsources.
fn connect_and_compute_color(&self, hitpoint_in_camera_path: &Hitpoint, mut pcg: &mut Pcg32) -> [f64; 3] {
let number_of_light_spheres = self.scene.light_spheres.len();
let number_of_cameras = self.scene.cameras.len();
let random_exclusive_max_cameras: u32 = <u32>::max_value() - <u32>::max_value()%(number_of_cameras as u32);
let index = (pcg.gen_range(0, random_exclusive_max_cameras)%(number_of_light_spheres as u32)) as usize;
let light_position = self.scene.light_spheres[index].get_position(&mut pcg);
// @TODO: Should it not be .emission rather than .color?
let light_color = self.scene.light_spheres[index].color;
let direction = sub(hitpoint_in_camera_path.position, light_position);
let distance = norm(direction);
let direction_normalised = mul(1.0/distance, direction);
if dot(hitpoint_in_camera_path.normal, direction_normalised) > 0.0 {
return [0.0, 0.0, 0.0];
}
let closest_hitpoint = self.closest_renderer_shape(&mut Ray::new(light_position, direction_normalised));
if let Some(closest_hitpoint) = closest_hitpoint {
// @TODO Check if this is sane.
if distance-closest_hitpoint.distance > 1.0e-9 {
return [0.0, 0.0, 0.0];
}
}
let ingoing_direction = mul(-1.0, hitpoint_in_camera_path.incoming_direction);
let outgoing_direction = mul(-1.0, direction_normalised);
let normal = if dot(ingoing_direction, hitpoint_in_camera_path.normal) > 0.0 {
hitpoint_in_camera_path.normal
} else {
mul(-1.0, hitpoint_in_camera_path.normal)
};
let (refractive_index_1, refractive_index_2) = if hitpoint_in_camera_path.hit_from_outside {
(1.0, hitpoint_in_camera_path.material.refractive_index)
} else {
(hitpoint_in_camera_path.material.refractive_index, 1.0)
};
// @TODO: Get rid of the upper limit of the brdf.
let brdf = min(brdf(ingoing_direction, outgoing_direction, normal, refractive_index_1, refractive_index_2, hitpoint_in_camera_path.material), self.maximum_brdf_value);
mul(brdf/(distance*distance), elementwise_mul(hitpoint_in_camera_path.accumulated_color, light_color))
}
fn store(&mut self, last_pos: usize, color: [f64; 3]) {
self.renderer_output_pixels[last_pos].pixel.color = add(self.renderer_output_pixels[last_pos].pixel.color, color);
self.renderer_output_pixels[last_pos].color = add(self.renderer_output_pixels[last_pos].color, color);
self.renderer_output_pixels[last_pos].number_of_bin_elements += 1;
if self.perform_post_process {
let color = intensity_to_color(color);
self.renderer_output_pixels[last_pos].pixel.bins[(color[0]/255.0*(NUMBER_OF_BINS as f64)) as usize][0] += 1;
self.renderer_output_pixels[last_pos].pixel.bins[(color[1]/255.0*(NUMBER_OF_BINS as f64)) as usize][1] += 1;
self.renderer_output_pixels[last_pos].pixel.bins[(color[2]/255.0*(NUMBER_OF_BINS as f64)) as usize][2] += 1;
}
}
// Find the closest hitpoint.
fn closest_renderer_shape(&self, ray: &mut Ray) -> Option<Hitpoint> {
let mut min_distance = f64::MAX;
let mut closest_renderer_shape_index: Option<usize> = None;
let mut renderer_type = RendererType::Cylinder;
for (i, cylinder) in self.scene.renderer_cylinders.iter().enumerate() {
if !cylinder.active {
continue; | }
let distance = cylinder.distance(&ray);
if distance < min_distance {
min_distance = distance;
closest_renderer_shape_index = Some(i); | random_line_split |
|
renderer.rs | _of_batches, batch_size);
self.gamma_correct_averages(&mut averages);
let use_standard_deviation = true;
let error = if use_standard_deviation {
self.standard_deviation(&averages)
} else {
self.maximum_distance(&averages)
};
if error < self.maximum_error || iterations*self.spp_per_iteration >= self.maximum_spp {
converged = true;
} else if iterations%10 == 0 {
println!("Currently: {} iterations at ({}, {}) with an error of {}. Iterating until it is less than {}.", iterations, x, y, error, self.maximum_error);
}
}
}
// @TODO: Fix so that it works even if colors.len()%batch_size != 0.
fn averages(&self, colors: &[[f64; 3]], number_of_batches: usize, batch_size: usize) -> Vec<[f64; 3]> {
let mut averages: Vec<[f64; 3]> = vec![[0.0, 0.0, 0.0]; number_of_batches];
for i in 0..colors.len() {
averages[i/batch_size] = add(averages[i/batch_size], colors[i]);
}
for average in &mut averages {
*average = mul(1.0/(batch_size as f64), *average);
}
averages
}
fn gamma_correct_averages(&self, averages: &mut Vec<[f64; 3]>) {
for average in averages {
*average = intensity_to_color(*average);
}
}
fn standard_deviation(&self, averages: &[[f64; 3]]) -> f64 {
let length = averages.len();
let mut r = 0.0;
let mut g = 0.0;
let mut b = 0.0;
let mut r_squared = 0.0;
let mut g_squared = 0.0;
let mut b_squared = 0.0;
for average in averages {
r += average[0];
g += average[1];
b += average[2];
r_squared += average[0]*average[0];
g_squared += average[1]*average[1];
b_squared += average[2]*average[2];
}
let variance = (r_squared + g_squared + b_squared - (r*r+g*g+b*b)/(length as f64))/(length as f64-1.0);
// Due to rounding erros, the computed variance could in rare cases be slightly lower than 0.0.
if variance < 0.0 {
0.0
} else {
variance.sqrt()
}
}
fn maximum_distance(&self, averages: &[[f64; 3]]) -> f64 {
let mut smallest = [f64::MAX, f64::MAX, f64::MAX];
let mut largest = [f64::MIN, f64::MIN, f64::MIN];
for average in averages {
for j in 0..3 {
if average[j] < smallest[j] {
smallest[j] = average[j];
}
if average[j] > largest[j] {
largest[j] = average[j];
}
}
}
let max_r_distance = (largest[0]-smallest[0]).abs();
let max_g_distance = (largest[1]-smallest[1]).abs();
let max_b_distance = (largest[2]-smallest[2]).abs();
max_r_distance+max_g_distance+max_b_distance
}
fn create_hitpoint_path(&mut self, mut ray: &mut Ray, color: &mut [f64; 3], hitpoint_path: &mut Vec<Hitpoint>, pcg: &mut Pcg32) {
let bullet_probability = 0.0;
let survival_boost_factor = 1.0/(1.0-bullet_probability);
loop {
let r = pcg.gen::<f64>();
if r < bullet_probability {
return;
} else {
*color = mul(survival_boost_factor, *color);
}
let hitpoint = self.closest_renderer_shape(&mut ray);
if let Some(mut hitpoint) = hitpoint {
let ingoing_direction = mul(-1.0, ray.direction);
let (refractive_index_1, refractive_index_2) = if hitpoint.hit_from_outside {
(1.0, hitpoint.material.refractive_index)
} else {
(hitpoint.material.refractive_index, 1.0)
};
let normal = if dot(ingoing_direction, hitpoint.normal) > 0.0 {
hitpoint.normal
} else {
mul(-1.0, hitpoint.normal)
};
let (direction, _) = random_from_brdf(ingoing_direction, normal, hitpoint.material, refractive_index_1, refractive_index_2, pcg);
ray.position = hitpoint.position;
ray.direction = direction;
*color = elementwise_mul(*color, hitpoint.material.color);
hitpoint.accumulated_color = *color;
hitpoint_path.push(hitpoint);
} else {
return;
}
}
}
// @TODO: Implement support of triangular lightsources.
fn connect_and_compute_color(&self, hitpoint_in_camera_path: &Hitpoint, mut pcg: &mut Pcg32) -> [f64; 3] {
let number_of_light_spheres = self.scene.light_spheres.len();
let number_of_cameras = self.scene.cameras.len();
let random_exclusive_max_cameras: u32 = <u32>::max_value() - <u32>::max_value()%(number_of_cameras as u32);
let index = (pcg.gen_range(0, random_exclusive_max_cameras)%(number_of_light_spheres as u32)) as usize;
let light_position = self.scene.light_spheres[index].get_position(&mut pcg);
// @TODO: Should it not be .emission rather than .color?
let light_color = self.scene.light_spheres[index].color;
let direction = sub(hitpoint_in_camera_path.position, light_position);
let distance = norm(direction);
let direction_normalised = mul(1.0/distance, direction);
if dot(hitpoint_in_camera_path.normal, direction_normalised) > 0.0 {
return [0.0, 0.0, 0.0];
}
let closest_hitpoint = self.closest_renderer_shape(&mut Ray::new(light_position, direction_normalised));
if let Some(closest_hitpoint) = closest_hitpoint {
// @TODO Check if this is sane.
if distance-closest_hitpoint.distance > 1.0e-9 {
return [0.0, 0.0, 0.0];
}
}
let ingoing_direction = mul(-1.0, hitpoint_in_camera_path.incoming_direction);
let outgoing_direction = mul(-1.0, direction_normalised);
let normal = if dot(ingoing_direction, hitpoint_in_camera_path.normal) > 0.0 {
hitpoint_in_camera_path.normal
} else {
mul(-1.0, hitpoint_in_camera_path.normal)
};
let (refractive_index_1, refractive_index_2) = if hitpoint_in_camera_path.hit_from_outside {
(1.0, hitpoint_in_camera_path.material.refractive_index)
} else {
(hitpoint_in_camera_path.material.refractive_index, 1.0)
};
// @TODO: Get rid of the upper limit of the brdf.
let brdf = min(brdf(ingoing_direction, outgoing_direction, normal, refractive_index_1, refractive_index_2, hitpoint_in_camera_path.material), self.maximum_brdf_value);
mul(brdf/(distance*distance), elementwise_mul(hitpoint_in_camera_path.accumulated_color, light_color))
}
fn store(&mut self, last_pos: usize, color: [f64; 3]) | {
self.renderer_output_pixels[last_pos].pixel.color = add(self.renderer_output_pixels[last_pos].pixel.color, color);
self.renderer_output_pixels[last_pos].color = add(self.renderer_output_pixels[last_pos].color, color);
self.renderer_output_pixels[last_pos].number_of_bin_elements += 1;
if self.perform_post_process {
let color = intensity_to_color(color);
self.renderer_output_pixels[last_pos].pixel.bins[(color[0]/255.0*(NUMBER_OF_BINS as f64)) as usize][0] += 1;
self.renderer_output_pixels[last_pos].pixel.bins[(color[1]/255.0*(NUMBER_OF_BINS as f64)) as usize][1] += 1;
self.renderer_output_pixels[last_pos].pixel.bins[(color[2]/255.0*(NUMBER_OF_BINS as f64)) as usize][2] += 1;
}
} | identifier_body |
|
de.rs | (&self) -> &str {
"aa"
//self.msg.as_str()
}
}
pub fn from_bytes<'de, T>(s: &'de [u8]) -> Result<T>
where
T: Deserialize<'de>,
{
let mut de: Deserializer = Deserializer::new(s);
T::deserialize(&mut de)
}
pub fn from_bytes_with_hash<'de, T>(s: &'de [u8]) -> Result<(T, Vec<u8>)>
where
T: Deserialize<'de>,
{
let mut de: Deserializer = Deserializer::new(s);
let res = T::deserialize(&mut de)?;
let info_hash = if !de.start_info.is_null() && de.end_info > de.start_info {
let len = de.end_info as usize - de.start_info as usize;
let slice = unsafe { std::slice::from_raw_parts(de.start_info, len) };
sha1::Sha1::from(&slice[..]).digest().bytes().to_vec()
} else {
//eprintln!("START={:?} END={:?}", de.start_info, de.end_info);
return Err(DeserializeError::InfoHashMissing);
};
Ok((res, info_hash))
}
use crate::metadata::{InfoFile, MetaTorrent, Torrent};
pub fn read_meta(s: &[u8]) -> Result<Torrent> {
let (meta, info_hash): (MetaTorrent, Vec<u8>) = from_bytes_with_hash(s)?;
if meta.info.pieces.len() % 20 != 0 {
return Err(DeserializeError::UnalignedPieces);
}
match &meta.info.files {
InfoFile::Multiple { files, .. } => {
if files.is_empty() {
return Err(DeserializeError::NoFile);
}
}
InfoFile::Single { length, .. } => {
if *length == 0 {
return Err(DeserializeError::EmptyFile);
}
}
}
Ok(Torrent {
meta,
info_hash: info_hash.into(),
})
}
// 4b3ea6a5b1e62537dceb67230248ff092a723e4d
// 4b3ea6a5b1e62537dceb67230248ff092a723e4d
#[doc(hidden)]
pub struct Deserializer<'de> {
input: &'de [u8],
start_info: *const u8,
end_info: *const u8,
info_depth: i64,
// Fix v2_deep_recursion.torrent
depth: Cell<u16>,
}
#[doc(hidbn)]
impl<'de> Deserializer<'de> {
fn new(input: &'de [u8]) -> Self {
Deserializer {
input,
start_info: std::ptr::null(),
end_info: std::ptr::null(),
info_depth: 0,
depth: Cell::new(0),
}
}
fn peek(&self) -> Option<u8> {
self.input.get(0).copied()
}
fn next(&mut self) -> Result<u8> {
if let Some(c) = self.peek() {
let _ = self.consume();
return Ok(c);
}
Err(DeserializeError::UnexpectedEOF)
}
fn consume(&mut self) -> Result<()> {
self.input = &self.input.get(1..).ok_or(DeserializeError::UnexpectedEOF)?;
Ok(())
}
fn skip(&mut self, n: i64) -> Result<()> {
self.input = &self
.input
.get(n as usize..)
.ok_or(DeserializeError::UnexpectedEOF)?;
Ok(())
}
fn read_integer(&mut self, stop: u8) -> Result<i64> {
let mut n: i64 = 0;
loop {
match self.next()? {
c @ b'0'..=b'9' => n = (n * 10) + (c - b'0') as i64,
c if c == stop => break,
c => return Err(DeserializeError::WrongCharacter(c)),
}
}
Ok(n)
}
fn read_number(&mut self) -> Result<i64> {
self.consume()?; // 'i'
let negative = match self.peek() {
Some(b'-') => {
self.consume()?;
true
}
_ => false,
};
let n = self.read_integer(b'e')?;
Ok(if negative { -n } else { n })
}
fn read_string(&mut self) -> Result<&'de [u8]> {
let len = self.read_integer(b':')?;
let s = self
.input
.get(..len as usize)
.ok_or(DeserializeError::UnexpectedEOF)?;
self.skip(len)?;
if s == b"info" {
//println!("INFO FOUND: {:?}", String::from_utf8(s.to_vec()));
self.start_info = self.input.as_ptr();
self.info_depth = 1;
}
//println!("STRING={:?}", String::from_utf8((&s[0..std::cmp::min(100, s.len())]).to_vec()));
Ok(s)
}
}
#[doc(hidden)]
impl<'a, 'de> serde::de::Deserializer<'de> for &'a mut Deserializer<'de> {
type Error = DeserializeError;
fn deserialize_any<V>(self, visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
// println!("NEXT: {:?}", self.peek());
match self.peek().ok_or(DeserializeError::UnexpectedEOF)? {
b'i' => {
// println!("FOUND NUMBER", );
visitor.visit_i64(self.read_number()?)
}
b'l' => {
self.consume()?;
// println!("FOUND LIST {:?}", &self.input[..10]);
visitor.visit_seq(BencAccess::new(self))
}
b'd' => {
let depth = self.depth.get();
if depth > 100 {
return Err(DeserializeError::TooDeep);
}
self.depth.set(depth + 1);
// println!("FOUND DICT {}", self.depth.get());
self.consume()?;
visitor.visit_map(BencAccess::new(self))
}
_n @ b'0'..=b'9' => {
// println!("FOUND STRING", );
visitor.visit_borrowed_bytes(self.read_string()?)
}
c => Err(DeserializeError::WrongCharacter(c)),
}
}
fn deserialize_option<V>(self, visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
visitor.visit_some(self)
}
forward_to_deserialize_any! {
bool i8 i16 i32 i64 u8 u16 u32 u64 f32 f64 char str string
unit unit_struct seq tuple tuple_struct map struct identifier
newtype_struct ignored_any enum bytes byte_buf
}
}
struct BencAccess<'a, 'de> {
de: &'a mut Deserializer<'de>,
}
impl<'a, 'de> BencAccess<'a, 'de> {
fn new(de: &'a mut Deserializer<'de>) -> BencAccess<'a, 'de> {
if de.info_depth >= 1 {
de.info_depth += 1;
let _s = de.input;
//println!("DEPTH[NEW]={:?} {:?}", de.info_depth, String::from_utf8((&s[0..std::cmp::min(50, s.len())]).to_vec()));
}
BencAccess { de }
}
}
impl<'a, 'de> MapAccess<'de> for BencAccess<'a, 'de> {
type Error = DeserializeError;
fn next_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>>
where
K: DeserializeSeed<'de>,
{
if self.de.peek() == Some(b'e') {
let _ = self.de.consume();
self.de.info_depth -= 1;
if self.de.info_depth == 1 {
//println!("FOUND END !");
self.de.end_info = self.de.input.as_ptr();
}
let _s = self.de.input;
//println!("DEPTH[END_DICT]={:?} {:?}", self.de.info_depth, String::from_utf8((&s[0..std::cmp::min(50, s.len())]).to_vec()));
return Ok(None);
}
seed.deserialize(&mut *self.de).map(Some)
}
fn next_value_seed<V>(&mut self, seed: V) -> Result<V::Value>
where
V: DeserializeSeed<'de>,
{
seed.deserialize(&mut *self.de)
}
}
impl<'a, 'de> SeqAccess<'de> for BencAccess<'a, 'de> {
type Error = DeserializeError;
fn next_element_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>>
where
T: DeserializeSeed<'de>,
{
// println!("LAAA {:?}", &self.de.input[..5]);
if self.de.peek() == Some(b'e') {
let _ = self.de.consume();
if self.de.info_depth >= 1 {
self | description | identifier_name |
|
de.rs | &[u8]) -> Result<Torrent> {
let (meta, info_hash): (MetaTorrent, Vec<u8>) = from_bytes_with_hash(s)?;
if meta.info.pieces.len() % 20 != 0 {
return Err(DeserializeError::UnalignedPieces);
}
match &meta.info.files {
InfoFile::Multiple { files, .. } => {
if files.is_empty() {
return Err(DeserializeError::NoFile);
}
}
InfoFile::Single { length, .. } => {
if *length == 0 {
return Err(DeserializeError::EmptyFile);
}
}
}
Ok(Torrent {
meta,
info_hash: info_hash.into(),
})
}
// 4b3ea6a5b1e62537dceb67230248ff092a723e4d
// 4b3ea6a5b1e62537dceb67230248ff092a723e4d
#[doc(hidden)]
pub struct Deserializer<'de> {
input: &'de [u8],
start_info: *const u8,
end_info: *const u8,
info_depth: i64,
// Fix v2_deep_recursion.torrent
depth: Cell<u16>,
}
#[doc(hidbn)]
impl<'de> Deserializer<'de> {
fn new(input: &'de [u8]) -> Self {
Deserializer {
input,
start_info: std::ptr::null(),
end_info: std::ptr::null(),
info_depth: 0,
depth: Cell::new(0),
}
}
fn peek(&self) -> Option<u8> {
self.input.get(0).copied()
}
fn next(&mut self) -> Result<u8> {
if let Some(c) = self.peek() {
let _ = self.consume();
return Ok(c);
}
Err(DeserializeError::UnexpectedEOF)
}
fn consume(&mut self) -> Result<()> {
self.input = &self.input.get(1..).ok_or(DeserializeError::UnexpectedEOF)?;
Ok(())
}
fn skip(&mut self, n: i64) -> Result<()> {
self.input = &self
.input
.get(n as usize..)
.ok_or(DeserializeError::UnexpectedEOF)?;
Ok(())
}
fn read_integer(&mut self, stop: u8) -> Result<i64> {
let mut n: i64 = 0;
loop {
match self.next()? {
c @ b'0'..=b'9' => n = (n * 10) + (c - b'0') as i64,
c if c == stop => break,
c => return Err(DeserializeError::WrongCharacter(c)),
}
}
Ok(n)
}
fn read_number(&mut self) -> Result<i64> {
self.consume()?; // 'i'
let negative = match self.peek() {
Some(b'-') => {
self.consume()?;
true
}
_ => false,
};
let n = self.read_integer(b'e')?;
Ok(if negative { -n } else { n })
}
fn read_string(&mut self) -> Result<&'de [u8]> {
let len = self.read_integer(b':')?;
let s = self
.input
.get(..len as usize)
.ok_or(DeserializeError::UnexpectedEOF)?;
self.skip(len)?;
if s == b"info" {
//println!("INFO FOUND: {:?}", String::from_utf8(s.to_vec()));
self.start_info = self.input.as_ptr();
self.info_depth = 1;
}
//println!("STRING={:?}", String::from_utf8((&s[0..std::cmp::min(100, s.len())]).to_vec()));
Ok(s)
}
}
#[doc(hidden)]
impl<'a, 'de> serde::de::Deserializer<'de> for &'a mut Deserializer<'de> {
type Error = DeserializeError;
fn deserialize_any<V>(self, visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
// println!("NEXT: {:?}", self.peek());
match self.peek().ok_or(DeserializeError::UnexpectedEOF)? {
b'i' => {
// println!("FOUND NUMBER", );
visitor.visit_i64(self.read_number()?)
}
b'l' => {
self.consume()?;
// println!("FOUND LIST {:?}", &self.input[..10]);
visitor.visit_seq(BencAccess::new(self))
}
b'd' => {
let depth = self.depth.get();
if depth > 100 {
return Err(DeserializeError::TooDeep);
}
self.depth.set(depth + 1);
// println!("FOUND DICT {}", self.depth.get());
self.consume()?;
visitor.visit_map(BencAccess::new(self))
}
_n @ b'0'..=b'9' => {
// println!("FOUND STRING", );
visitor.visit_borrowed_bytes(self.read_string()?)
}
c => Err(DeserializeError::WrongCharacter(c)),
}
}
fn deserialize_option<V>(self, visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
visitor.visit_some(self)
}
forward_to_deserialize_any! {
bool i8 i16 i32 i64 u8 u16 u32 u64 f32 f64 char str string
unit unit_struct seq tuple tuple_struct map struct identifier
newtype_struct ignored_any enum bytes byte_buf
}
}
struct BencAccess<'a, 'de> {
de: &'a mut Deserializer<'de>,
}
impl<'a, 'de> BencAccess<'a, 'de> {
fn new(de: &'a mut Deserializer<'de>) -> BencAccess<'a, 'de> {
if de.info_depth >= 1 {
de.info_depth += 1;
let _s = de.input;
//println!("DEPTH[NEW]={:?} {:?}", de.info_depth, String::from_utf8((&s[0..std::cmp::min(50, s.len())]).to_vec()));
}
BencAccess { de }
}
}
impl<'a, 'de> MapAccess<'de> for BencAccess<'a, 'de> {
type Error = DeserializeError;
fn next_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>>
where
K: DeserializeSeed<'de>,
{
if self.de.peek() == Some(b'e') {
let _ = self.de.consume();
self.de.info_depth -= 1;
if self.de.info_depth == 1 {
//println!("FOUND END !");
self.de.end_info = self.de.input.as_ptr();
}
let _s = self.de.input;
//println!("DEPTH[END_DICT]={:?} {:?}", self.de.info_depth, String::from_utf8((&s[0..std::cmp::min(50, s.len())]).to_vec()));
return Ok(None);
}
seed.deserialize(&mut *self.de).map(Some)
}
fn next_value_seed<V>(&mut self, seed: V) -> Result<V::Value>
where
V: DeserializeSeed<'de>,
{
seed.deserialize(&mut *self.de)
}
}
impl<'a, 'de> SeqAccess<'de> for BencAccess<'a, 'de> {
type Error = DeserializeError;
fn next_element_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>>
where
T: DeserializeSeed<'de>,
{
// println!("LAAA {:?}", &self.de.input[..5]);
if self.de.peek() == Some(b'e') {
let _ = self.de.consume();
if self.de.info_depth >= 1 {
self.de.info_depth -= 1;
}
let _s = self.de.input;
//println!("DEPTH[END_LIST]={:?} {:?}", self.de.info_depth, String::from_utf8((&s[0..std::cmp::min(50, s.len())]).to_vec()));
// println!("DEPTH={}", self.de.info_depth);
return Ok(None);
}
seed.deserialize(&mut *self.de).map(Some)
}
}
#[allow(non_snake_case)]
#[cfg(test)]
mod tests {
use super::{from_bytes, DeserializeError, Result};
use serde::Deserialize;
#[test]
fn test_dict() | {
#[derive(Deserialize, PartialEq, Debug)]
struct Dict<'b> {
a: i64,
b: &'b str,
c: &'b str,
X: &'b str,
}
let bc: Dict = from_bytes(b"d1:ai12453e1:b3:aaa1:c3:bbb1:X10:0123456789e").unwrap();
assert_eq!(
bc,
Dict {
a: 12453,
b: "aaa",
c: "bbb",
X: "0123456789",
}
); | identifier_body |
|
de.rs | description(&self) -> &str {
"aa"
//self.msg.as_str()
}
}
pub fn from_bytes<'de, T>(s: &'de [u8]) -> Result<T>
where
T: Deserialize<'de>,
{
let mut de: Deserializer = Deserializer::new(s);
T::deserialize(&mut de)
}
pub fn from_bytes_with_hash<'de, T>(s: &'de [u8]) -> Result<(T, Vec<u8>)>
where
T: Deserialize<'de>,
{
let mut de: Deserializer = Deserializer::new(s);
let res = T::deserialize(&mut de)?;
let info_hash = if !de.start_info.is_null() && de.end_info > de.start_info {
let len = de.end_info as usize - de.start_info as usize;
let slice = unsafe { std::slice::from_raw_parts(de.start_info, len) };
sha1::Sha1::from(&slice[..]).digest().bytes().to_vec()
} else {
//eprintln!("START={:?} END={:?}", de.start_info, de.end_info);
return Err(DeserializeError::InfoHashMissing);
};
Ok((res, info_hash))
}
use crate::metadata::{InfoFile, MetaTorrent, Torrent};
pub fn read_meta(s: &[u8]) -> Result<Torrent> {
let (meta, info_hash): (MetaTorrent, Vec<u8>) = from_bytes_with_hash(s)?;
if meta.info.pieces.len() % 20 != 0 {
return Err(DeserializeError::UnalignedPieces);
}
match &meta.info.files {
InfoFile::Multiple { files, .. } => {
if files.is_empty() {
return Err(DeserializeError::NoFile);
}
}
InfoFile::Single { length, .. } => {
if *length == 0 {
return Err(DeserializeError::EmptyFile);
}
}
}
Ok(Torrent {
meta,
info_hash: info_hash.into(),
})
}
// 4b3ea6a5b1e62537dceb67230248ff092a723e4d
// 4b3ea6a5b1e62537dceb67230248ff092a723e4d
#[doc(hidden)]
pub struct Deserializer<'de> {
input: &'de [u8],
start_info: *const u8,
end_info: *const u8,
info_depth: i64,
// Fix v2_deep_recursion.torrent
depth: Cell<u16>,
}
#[doc(hidbn)]
impl<'de> Deserializer<'de> {
fn new(input: &'de [u8]) -> Self {
Deserializer {
input,
start_info: std::ptr::null(),
end_info: std::ptr::null(),
info_depth: 0,
depth: Cell::new(0),
}
}
fn peek(&self) -> Option<u8> {
self.input.get(0).copied()
}
fn next(&mut self) -> Result<u8> {
if let Some(c) = self.peek() {
let _ = self.consume();
return Ok(c);
}
Err(DeserializeError::UnexpectedEOF)
}
fn consume(&mut self) -> Result<()> {
self.input = &self.input.get(1..).ok_or(DeserializeError::UnexpectedEOF)?;
Ok(())
}
fn skip(&mut self, n: i64) -> Result<()> {
self.input = &self
.input
.get(n as usize..)
.ok_or(DeserializeError::UnexpectedEOF)?;
Ok(())
}
fn read_integer(&mut self, stop: u8) -> Result<i64> {
let mut n: i64 = 0;
loop {
match self.next()? {
c @ b'0'..=b'9' => n = (n * 10) + (c - b'0') as i64,
c if c == stop => break,
c => return Err(DeserializeError::WrongCharacter(c)),
}
}
Ok(n)
}
fn read_number(&mut self) -> Result<i64> {
self.consume()?; // 'i'
let negative = match self.peek() {
Some(b'-') => {
self.consume()?;
true
}
_ => false,
};
let n = self.read_integer(b'e')?;
Ok(if negative { -n } else { n })
}
fn read_string(&mut self) -> Result<&'de [u8]> {
let len = self.read_integer(b':')?;
let s = self
.input
.get(..len as usize)
.ok_or(DeserializeError::UnexpectedEOF)?;
self.skip(len)?;
if s == b"info" {
//println!("INFO FOUND: {:?}", String::from_utf8(s.to_vec()));
self.start_info = self.input.as_ptr();
self.info_depth = 1;
}
//println!("STRING={:?}", String::from_utf8((&s[0..std::cmp::min(100, s.len())]).to_vec()));
Ok(s)
}
} |
fn deserialize_any<V>(self, visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
// println!("NEXT: {:?}", self.peek());
match self.peek().ok_or(DeserializeError::UnexpectedEOF)? {
b'i' => {
// println!("FOUND NUMBER", );
visitor.visit_i64(self.read_number()?)
}
b'l' => {
self.consume()?;
// println!("FOUND LIST {:?}", &self.input[..10]);
visitor.visit_seq(BencAccess::new(self))
}
b'd' => {
let depth = self.depth.get();
if depth > 100 {
return Err(DeserializeError::TooDeep);
}
self.depth.set(depth + 1);
// println!("FOUND DICT {}", self.depth.get());
self.consume()?;
visitor.visit_map(BencAccess::new(self))
}
_n @ b'0'..=b'9' => {
// println!("FOUND STRING", );
visitor.visit_borrowed_bytes(self.read_string()?)
}
c => Err(DeserializeError::WrongCharacter(c)),
}
}
fn deserialize_option<V>(self, visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
visitor.visit_some(self)
}
forward_to_deserialize_any! {
bool i8 i16 i32 i64 u8 u16 u32 u64 f32 f64 char str string
unit unit_struct seq tuple tuple_struct map struct identifier
newtype_struct ignored_any enum bytes byte_buf
}
}
struct BencAccess<'a, 'de> {
de: &'a mut Deserializer<'de>,
}
impl<'a, 'de> BencAccess<'a, 'de> {
fn new(de: &'a mut Deserializer<'de>) -> BencAccess<'a, 'de> {
if de.info_depth >= 1 {
de.info_depth += 1;
let _s = de.input;
//println!("DEPTH[NEW]={:?} {:?}", de.info_depth, String::from_utf8((&s[0..std::cmp::min(50, s.len())]).to_vec()));
}
BencAccess { de }
}
}
impl<'a, 'de> MapAccess<'de> for BencAccess<'a, 'de> {
type Error = DeserializeError;
fn next_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>>
where
K: DeserializeSeed<'de>,
{
if self.de.peek() == Some(b'e') {
let _ = self.de.consume();
self.de.info_depth -= 1;
if self.de.info_depth == 1 {
//println!("FOUND END !");
self.de.end_info = self.de.input.as_ptr();
}
let _s = self.de.input;
//println!("DEPTH[END_DICT]={:?} {:?}", self.de.info_depth, String::from_utf8((&s[0..std::cmp::min(50, s.len())]).to_vec()));
return Ok(None);
}
seed.deserialize(&mut *self.de).map(Some)
}
fn next_value_seed<V>(&mut self, seed: V) -> Result<V::Value>
where
V: DeserializeSeed<'de>,
{
seed.deserialize(&mut *self.de)
}
}
impl<'a, 'de> SeqAccess<'de> for BencAccess<'a, 'de> {
type Error = DeserializeError;
fn next_element_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>>
where
T: DeserializeSeed<'de>,
{
// println!("LAAA {:?}", &self.de.input[..5]);
if self.de.peek() == Some(b'e') {
let _ = self.de.consume();
if self.de.info_depth >= 1 {
|
#[doc(hidden)]
impl<'a, 'de> serde::de::Deserializer<'de> for &'a mut Deserializer<'de> {
type Error = DeserializeError; | random_line_split |
main.rs |
let device_extensions = DeviceExtensions {
khr_swapchain: true,
..DeviceExtensions::empty()
};
let (physical_device, queue_family_index) = instance
.enumerate_physical_devices()
.unwrap()
.filter(|p| p.supported_extensions().contains(&device_extensions))
.filter_map(|p| {
p.queue_family_properties()
.iter()
.enumerate()
.position(|(i, q)| {
q.queue_flags.intersects(QueueFlags::GRAPHICS)
&& p.surface_support(i as u32, &surface).unwrap_or(false)
})
.map(|i| (p, i as u32))
})
.min_by_key(|(p, _)| match p.properties().device_type {
PhysicalDeviceType::DiscreteGpu => 0,
PhysicalDeviceType::IntegratedGpu => 1,
PhysicalDeviceType::VirtualGpu => 2,
PhysicalDeviceType::Cpu => 3,
PhysicalDeviceType::Other => 4,
_ => 5,
})
.unwrap();
println!(
"Using device: {} (type: {:?})",
physical_device.properties().device_name,
physical_device.properties().device_type,
);
let (device, mut queues) = Device::new(
physical_device,
DeviceCreateInfo {
enabled_extensions: device_extensions,
queue_create_infos: vec![QueueCreateInfo {
queue_family_index,
..Default::default()
}],
..Default::default()
},
)
.unwrap();
let queue = queues.next().unwrap();
let (mut swapchain, images) = {
let surface_capabilities = device
.physical_device()
.surface_capabilities(&surface, Default::default())
.unwrap();
let image_format = device
.physical_device()
.surface_formats(&surface, Default::default())
.unwrap()[0]
.0;
Swapchain::new(
device.clone(),
surface,
SwapchainCreateInfo {
min_image_count: surface_capabilities.min_image_count.max(2),
image_format,
image_extent: window.inner_size().into(),
image_usage: ImageUsage::COLOR_ATTACHMENT,
composite_alpha: surface_capabilities
.supported_composite_alpha
.into_iter()
.next()
.unwrap(),
..Default::default()
},
)
.unwrap()
};
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let vertex_buffer = Buffer::from_iter(
&memory_allocator,
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
POSITIONS,
)
.unwrap();
let normals_buffer = Buffer::from_iter(
&memory_allocator,
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
NORMALS,
)
.unwrap();
let index_buffer = Buffer::from_iter(
&memory_allocator,
BufferCreateInfo {
usage: BufferUsage::INDEX_BUFFER,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
INDICES,
)
.unwrap();
let uniform_buffer = SubbufferAllocator::new(
memory_allocator.clone(),
SubbufferAllocatorCreateInfo {
buffer_usage: BufferUsage::UNIFORM_BUFFER,
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
);
let render_pass = vulkano::single_pass_renderpass!(
device.clone(),
attachments: {
color: {
format: swapchain.image_format(),
samples: 1,
load_op: Clear,
store_op: Store,
},
depth_stencil: {
format: Format::D16_UNORM,
samples: 1,
load_op: Clear,
store_op: DontCare,
},
},
pass: {
color: [color],
depth_stencil: {depth_stencil},
},
)
.unwrap();
let vs = vs::load(device.clone())
.unwrap()
.entry_point("main")
.unwrap();
let fs = fs::load(device.clone())
.unwrap()
.entry_point("main")
.unwrap();
let (mut pipeline, mut framebuffers) = window_size_dependent_setup(
&memory_allocator,
vs.clone(),
fs.clone(),
&images,
render_pass.clone(),
);
let mut recreate_swapchain = false;
let mut previous_frame_end = Some(sync::now(device.clone()).boxed());
let rotation_start = Instant::now();
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
let command_buffer_allocator =
StandardCommandBufferAllocator::new(device.clone(), Default::default());
event_loop.run(move |event, _, control_flow| {
match event {
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => {
*control_flow = ControlFlow::Exit;
}
Event::WindowEvent {
event: WindowEvent::Resized(_),
..
} => {
recreate_swapchain = true;
}
Event::RedrawEventsCleared => {
let image_extent: [u32; 2] = window.inner_size().into();
if image_extent.contains(&0) {
return;
}
previous_frame_end.as_mut().unwrap().cleanup_finished();
if recreate_swapchain {
let (new_swapchain, new_images) = swapchain
.recreate(SwapchainCreateInfo {
image_extent,
..swapchain.create_info()
})
.expect("failed to recreate swapchain");
swapchain = new_swapchain;
let (new_pipeline, new_framebuffers) = window_size_dependent_setup(
&memory_allocator,
vs.clone(),
fs.clone(),
&new_images,
render_pass.clone(),
);
pipeline = new_pipeline;
framebuffers = new_framebuffers;
recreate_swapchain = false;
}
let uniform_buffer_subbuffer = {
let elapsed = rotation_start.elapsed();
let rotation =
elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1_000_000_000.0;
let rotation = Matrix3::from_angle_y(Rad(rotation as f32));
// note: this teapot was meant for OpenGL where the origin is at the lower left
// instead the origin is at the upper left in Vulkan, so we reverse the Y axis
let aspect_ratio =
swapchain.image_extent()[0] as f32 / swapchain.image_extent()[1] as f32;
let proj = cgmath::perspective(
Rad(std::f32::consts::FRAC_PI_2),
aspect_ratio,
0.01,
100.0,
);
let view = Matrix4::look_at_rh(
Point3::new(0.3, 0.3, 1.0),
Point3::new(0.0, 0.0, 0.0),
Vector3::new(0.0, -1.0, 0.0),
);
let scale = Matrix4::from_scale(0.01);
let uniform_data = vs::Data {
world: Matrix4::from(rotation).into(),
view: (view * scale).into(),
proj: proj.into(),
};
let subbuffer = uniform_buffer.allocate_sized().unwrap();
*subbuffer.write().unwrap() = uniform_data;
subbuffer
};
let layout = pipeline.layout().set_layouts().get(0).unwrap();
let set = PersistentDescriptorSet::new(
&descriptor_set_allocator,
layout.clone(),
[WriteDescriptorSet::buffer(0, uniform_buffer_subbuffer)],
[],
)
.unwrap();
let (image_index, suboptimal, acquire_future) =
match acquire_next_image(swapchain.clone(), None).map_err(Validated::unwrap) {
Ok(r) => r,
Err(VulkanError::OutOfDate) => {
recreate_swapchain = true;
return;
}
Err(e) => panic!("failed to acquire next image: {e}"),
};
if suboptimal {
recreate | {
// The start of this example is exactly the same as `triangle`. You should read the `triangle`
// example if you haven't done so yet.
let event_loop = EventLoop::new();
let library = VulkanLibrary::new().unwrap();
let required_extensions = Surface::required_extensions(&event_loop);
let instance = Instance::new(
library,
InstanceCreateInfo {
flags: InstanceCreateFlags::ENUMERATE_PORTABILITY,
enabled_extensions: required_extensions,
..Default::default()
},
)
.unwrap();
let window = Arc::new(WindowBuilder::new().build(&event_loop).unwrap());
let surface = Surface::from_window(instance.clone(), window.clone()).unwrap(); | identifier_body |
|
main.rs | () {
// The start of this example is exactly the same as `triangle`. You should read the `triangle`
// example if you haven't done so yet.
let event_loop = EventLoop::new();
let library = VulkanLibrary::new().unwrap();
let required_extensions = Surface::required_extensions(&event_loop);
let instance = Instance::new(
library,
InstanceCreateInfo {
flags: InstanceCreateFlags::ENUMERATE_PORTABILITY,
enabled_extensions: required_extensions,
..Default::default()
},
)
.unwrap();
let window = Arc::new(WindowBuilder::new().build(&event_loop).unwrap());
let surface = Surface::from_window(instance.clone(), window.clone()).unwrap();
let device_extensions = DeviceExtensions {
khr_swapchain: true,
..DeviceExtensions::empty()
};
let (physical_device, queue_family_index) = instance
.enumerate_physical_devices()
.unwrap()
.filter(|p| p.supported_extensions().contains(&device_extensions))
.filter_map(|p| {
p.queue_family_properties()
.iter()
.enumerate()
.position(|(i, q)| {
q.queue_flags.intersects(QueueFlags::GRAPHICS)
&& p.surface_support(i as u32, &surface).unwrap_or(false)
})
.map(|i| (p, i as u32))
})
.min_by_key(|(p, _)| match p.properties().device_type {
PhysicalDeviceType::DiscreteGpu => 0,
PhysicalDeviceType::IntegratedGpu => 1,
PhysicalDeviceType::VirtualGpu => 2,
PhysicalDeviceType::Cpu => 3,
PhysicalDeviceType::Other => 4,
_ => 5,
})
.unwrap();
println!(
"Using device: {} (type: {:?})",
physical_device.properties().device_name,
physical_device.properties().device_type,
);
let (device, mut queues) = Device::new(
physical_device,
DeviceCreateInfo {
enabled_extensions: device_extensions,
queue_create_infos: vec![QueueCreateInfo {
queue_family_index,
..Default::default()
}],
..Default::default()
},
)
.unwrap();
let queue = queues.next().unwrap();
let (mut swapchain, images) = {
let surface_capabilities = device
.physical_device()
.surface_capabilities(&surface, Default::default())
.unwrap();
let image_format = device
.physical_device()
.surface_formats(&surface, Default::default())
.unwrap()[0]
.0;
Swapchain::new(
device.clone(),
surface,
SwapchainCreateInfo {
min_image_count: surface_capabilities.min_image_count.max(2),
image_format,
image_extent: window.inner_size().into(),
image_usage: ImageUsage::COLOR_ATTACHMENT,
composite_alpha: surface_capabilities
.supported_composite_alpha
.into_iter()
.next()
.unwrap(),
..Default::default()
},
)
.unwrap()
};
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let vertex_buffer = Buffer::from_iter(
&memory_allocator,
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
POSITIONS,
)
.unwrap();
let normals_buffer = Buffer::from_iter(
&memory_allocator,
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
NORMALS,
)
.unwrap();
let index_buffer = Buffer::from_iter(
&memory_allocator,
BufferCreateInfo {
usage: BufferUsage::INDEX_BUFFER,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
INDICES,
)
.unwrap();
let uniform_buffer = SubbufferAllocator::new(
memory_allocator.clone(),
SubbufferAllocatorCreateInfo {
buffer_usage: BufferUsage::UNIFORM_BUFFER,
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
);
let render_pass = vulkano::single_pass_renderpass!(
device.clone(),
attachments: {
color: {
format: swapchain.image_format(),
samples: 1,
load_op: Clear,
store_op: Store,
},
depth_stencil: {
format: Format::D16_UNORM,
samples: 1,
load_op: Clear,
store_op: DontCare,
},
},
pass: {
color: [color],
depth_stencil: {depth_stencil},
},
)
.unwrap();
let vs = vs::load(device.clone())
.unwrap()
.entry_point("main")
.unwrap();
let fs = fs::load(device.clone())
.unwrap()
.entry_point("main")
.unwrap();
let (mut pipeline, mut framebuffers) = window_size_dependent_setup(
&memory_allocator,
vs.clone(),
fs.clone(),
&images,
render_pass.clone(),
);
let mut recreate_swapchain = false;
let mut previous_frame_end = Some(sync::now(device.clone()).boxed());
let rotation_start = Instant::now();
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
let command_buffer_allocator =
StandardCommandBufferAllocator::new(device.clone(), Default::default());
event_loop.run(move |event, _, control_flow| {
match event {
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => {
*control_flow = ControlFlow::Exit;
}
Event::WindowEvent {
event: WindowEvent::Resized(_),
..
} => {
recreate_swapchain = true;
}
Event::RedrawEventsCleared => {
let image_extent: [u32; 2] = window.inner_size().into();
if image_extent.contains(&0) {
return;
}
previous_frame_end.as_mut().unwrap().cleanup_finished();
if recreate_swapchain {
let (new_swapchain, new_images) = swapchain
.recreate(SwapchainCreateInfo {
image_extent,
..swapchain.create_info()
})
.expect("failed to recreate swapchain");
swapchain = new_swapchain;
let (new_pipeline, new_framebuffers) = window_size_dependent_setup(
&memory_allocator,
vs.clone(),
fs.clone(),
&new_images,
render_pass.clone(),
);
pipeline = new_pipeline;
framebuffers = new_framebuffers;
recreate_swapchain = false;
}
let uniform_buffer_subbuffer = {
let elapsed = rotation_start.elapsed();
let rotation =
elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1_000_000_000.0;
let rotation = Matrix3::from_angle_y(Rad(rotation as f32));
// note: this teapot was meant for OpenGL where the origin is at the lower left
// instead the origin is at the upper left in Vulkan, so we reverse the Y axis
let aspect_ratio =
swapchain.image_extent()[0] as f32 / swapchain.image_extent()[1] as f32;
let proj = cgmath::perspective(
Rad(std::f32::consts::FRAC_PI_2),
aspect_ratio,
0.01,
100.0,
);
let view = Matrix4::look_at_rh(
Point3::new(0.3, 0.3, 1.0),
Point3::new(0.0, 0.0, 0.0),
Vector3::new(0.0, -1.0, 0.0),
);
let scale = Matrix4::from_scale(0.01);
let uniform_data = vs::Data {
world: Matrix4::from(rotation).into(),
view: (view * scale).into(),
proj: proj.into(),
};
let subbuffer = uniform_buffer.allocate_sized().unwrap();
*subbuffer.write().unwrap() = uniform_data;
subbuffer
};
let layout = pipeline.layout().set_layouts().get(0).unwrap();
let set = PersistentDescriptorSet::new(
&descriptor_set_allocator,
layout.clone(),
[WriteDescriptorSet::buffer(0, uniform_buffer_subbuffer)],
[],
)
.unwrap();
let (image_index, suboptimal, acquire_future) =
match acquire_next_image(swapchain.clone(), None).map_err(Validated::unwrap) {
Ok(r) => r,
Err(VulkanError::OutOfDate) => {
recreate_swapchain = true;
return;
}
Err(e) => panic!("failed to acquire next image: {e}"),
};
if suboptimal {
| main | identifier_name |
|
main.rs | as `triangle`. You should read the `triangle`
// example if you haven't done so yet.
let event_loop = EventLoop::new();
let library = VulkanLibrary::new().unwrap();
let required_extensions = Surface::required_extensions(&event_loop);
let instance = Instance::new(
library,
InstanceCreateInfo {
flags: InstanceCreateFlags::ENUMERATE_PORTABILITY,
enabled_extensions: required_extensions,
..Default::default()
},
)
.unwrap();
let window = Arc::new(WindowBuilder::new().build(&event_loop).unwrap());
let surface = Surface::from_window(instance.clone(), window.clone()).unwrap();
let device_extensions = DeviceExtensions {
khr_swapchain: true,
..DeviceExtensions::empty()
};
let (physical_device, queue_family_index) = instance
.enumerate_physical_devices()
.unwrap()
.filter(|p| p.supported_extensions().contains(&device_extensions))
.filter_map(|p| {
p.queue_family_properties()
.iter()
.enumerate()
.position(|(i, q)| {
q.queue_flags.intersects(QueueFlags::GRAPHICS)
&& p.surface_support(i as u32, &surface).unwrap_or(false)
})
.map(|i| (p, i as u32))
})
.min_by_key(|(p, _)| match p.properties().device_type {
PhysicalDeviceType::DiscreteGpu => 0,
PhysicalDeviceType::IntegratedGpu => 1,
PhysicalDeviceType::VirtualGpu => 2,
PhysicalDeviceType::Cpu => 3,
PhysicalDeviceType::Other => 4,
_ => 5,
})
.unwrap();
println!(
"Using device: {} (type: {:?})",
physical_device.properties().device_name,
physical_device.properties().device_type,
);
let (device, mut queues) = Device::new(
physical_device,
DeviceCreateInfo {
enabled_extensions: device_extensions,
queue_create_infos: vec![QueueCreateInfo {
queue_family_index,
..Default::default()
}],
..Default::default()
},
)
.unwrap();
let queue = queues.next().unwrap();
let (mut swapchain, images) = {
let surface_capabilities = device
.physical_device()
.surface_capabilities(&surface, Default::default())
.unwrap();
let image_format = device
.physical_device()
.surface_formats(&surface, Default::default())
.unwrap()[0]
.0;
Swapchain::new(
device.clone(),
surface,
SwapchainCreateInfo {
min_image_count: surface_capabilities.min_image_count.max(2),
image_format,
image_extent: window.inner_size().into(),
image_usage: ImageUsage::COLOR_ATTACHMENT,
composite_alpha: surface_capabilities
.supported_composite_alpha
.into_iter()
.next()
.unwrap(),
..Default::default()
},
)
.unwrap()
};
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let vertex_buffer = Buffer::from_iter(
&memory_allocator,
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
POSITIONS,
)
.unwrap();
let normals_buffer = Buffer::from_iter(
&memory_allocator,
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
NORMALS,
)
.unwrap();
let index_buffer = Buffer::from_iter(
&memory_allocator,
BufferCreateInfo {
usage: BufferUsage::INDEX_BUFFER,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
INDICES,
)
.unwrap();
let uniform_buffer = SubbufferAllocator::new(
memory_allocator.clone(),
SubbufferAllocatorCreateInfo {
buffer_usage: BufferUsage::UNIFORM_BUFFER,
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
);
let render_pass = vulkano::single_pass_renderpass!(
device.clone(),
attachments: {
color: {
format: swapchain.image_format(),
samples: 1,
load_op: Clear,
store_op: Store,
},
depth_stencil: {
format: Format::D16_UNORM,
samples: 1,
load_op: Clear,
store_op: DontCare,
},
},
pass: {
color: [color],
depth_stencil: {depth_stencil},
},
)
.unwrap();
let vs = vs::load(device.clone())
.unwrap()
.entry_point("main")
.unwrap();
let fs = fs::load(device.clone())
.unwrap()
.entry_point("main")
.unwrap();
let (mut pipeline, mut framebuffers) = window_size_dependent_setup(
&memory_allocator,
vs.clone(),
fs.clone(),
&images,
render_pass.clone(),
);
let mut recreate_swapchain = false;
let mut previous_frame_end = Some(sync::now(device.clone()).boxed());
let rotation_start = Instant::now(); |
event_loop.run(move |event, _, control_flow| {
match event {
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => {
*control_flow = ControlFlow::Exit;
}
Event::WindowEvent {
event: WindowEvent::Resized(_),
..
} => {
recreate_swapchain = true;
}
Event::RedrawEventsCleared => {
let image_extent: [u32; 2] = window.inner_size().into();
if image_extent.contains(&0) {
return;
}
previous_frame_end.as_mut().unwrap().cleanup_finished();
if recreate_swapchain {
let (new_swapchain, new_images) = swapchain
.recreate(SwapchainCreateInfo {
image_extent,
..swapchain.create_info()
})
.expect("failed to recreate swapchain");
swapchain = new_swapchain;
let (new_pipeline, new_framebuffers) = window_size_dependent_setup(
&memory_allocator,
vs.clone(),
fs.clone(),
&new_images,
render_pass.clone(),
);
pipeline = new_pipeline;
framebuffers = new_framebuffers;
recreate_swapchain = false;
}
let uniform_buffer_subbuffer = {
let elapsed = rotation_start.elapsed();
let rotation =
elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1_000_000_000.0;
let rotation = Matrix3::from_angle_y(Rad(rotation as f32));
// note: this teapot was meant for OpenGL where the origin is at the lower left
// instead the origin is at the upper left in Vulkan, so we reverse the Y axis
let aspect_ratio =
swapchain.image_extent()[0] as f32 / swapchain.image_extent()[1] as f32;
let proj = cgmath::perspective(
Rad(std::f32::consts::FRAC_PI_2),
aspect_ratio,
0.01,
100.0,
);
let view = Matrix4::look_at_rh(
Point3::new(0.3, 0.3, 1.0),
Point3::new(0.0, 0.0, 0.0),
Vector3::new(0.0, -1.0, 0.0),
);
let scale = Matrix4::from_scale(0.01);
let uniform_data = vs::Data {
world: Matrix4::from(rotation).into(),
view: (view * scale).into(),
proj: proj.into(),
};
let subbuffer = uniform_buffer.allocate_sized().unwrap();
*subbuffer.write().unwrap() = uniform_data;
subbuffer
};
let layout = pipeline.layout().set_layouts().get(0).unwrap();
let set = PersistentDescriptorSet::new(
&descriptor_set_allocator,
layout.clone(),
[WriteDescriptorSet::buffer(0, uniform_buffer_subbuffer)],
[],
)
.unwrap();
let (image_index, suboptimal, acquire_future) =
match acquire_next_image(swapchain.clone(), None).map_err(Validated::unwrap) {
Ok(r) => r,
Err(VulkanError::OutOfDate) => {
recreate_swapchain = true;
return;
}
Err(e) => panic!("failed to acquire next image: {e}"),
};
if suboptimal {
recreate_swapchain = true;
}
let mut builder |
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
let command_buffer_allocator =
StandardCommandBufferAllocator::new(device.clone(), Default::default()); | random_line_split |
c8.go | else {
cmd = Next{}
}
case 0x5:
log.Println("5xy0 - SE")
if vx == vy {
cmd = Skip{}
} else {
cmd = Next{}
}
case 0x6:
log.Println("6xkk - LD")
cpu.v[x] = kk
cmd = Next{}
case 0x7:
log.Println("7xkk - ADD")
cpu.v[x] += kk
cmd = Next{}
case 0x8:
switch o4 {
case 0x0:
log.Println("8xk0 - LD Vx, Vy")
cpu.v[x] = cpu.v[y]
case 0x1:
log.Println("8xk1 - OR Vx, Vy")
cpu.v[x] |= cpu.v[y]
case 0x2:
log.Println("8xk2 - AND Vx, Vy")
cpu.v[x] &= cpu.v[y]
case 0x3:
log.Println("8xk3 - XOR Vx, Vy")
cpu.v[x] ^= cpu.v[y]
case 0x4:
log.Println("8xk4 - ADD Vx, Vy")
if xy > 0xFF {
cpu.v[0xF] = 1
} else {
cpu.v[0xF] = 0
}
cpu.v[x] = uint8(xy & 0xFF)
case 0x5:
log.Println("8xk5 - SUB Vx, Vy")
if vx > vy {
cpu.v[0xF] = 1
} else {
cpu.v[0xF] = 0
}
cpu.v[x] = uint8(vx - vy)
case 0x6:
log.Println("8xk6 - SHR Vx, Vy")
cpu.v[0xF] = uint8(vx & 0x1)
cpu.v[x] /= 2
case 0x7:
log.Println("8xk7 - SUBN Vx, Vy")
if vy > vx {
cpu.v[0xF] = 1
} else {
cpu.v[0xF] = 0
}
cpu.v[x] = uint8(vy - vx)
case 0xE:
log.Println("8xkE - SHL Vx, Vy")
cpu.v[0xF] = cpu.v[x] >> 7
cpu.v[x] *= 2
}
cmd = Next{}
case 0x9:
log.Println("9xy0 - SNE")
if vx != vy {
cmd = Skip{}
} else {
cmd = Next{}
}
case 0xA:
log.Println("Annn - LD I")
cpu.i = nnn
cmd = Next{}
case 0xB:
log.Println("Bnnn - JP")
cmd = Jump{nnn + uint16(cpu.v[0])}
case 0xC:
log.Println("Cxkk - RND")
cpu.v[x] = cpu.rand() & kk
cmd = Next{}
case 0xD:
log.Println("DRW - Vx, Vy, nibble")
n := o4
bytes := mem.buf[cpu.i : cpu.i+uint16(n)]
cpu.v[0xF] = vme.draw(vx, vy, bytes)
cmd = Next{}
case 0xE:
switch o3 {
case 0x9:
log.Println("Ex9E - SKP")
pressed := false
for true {
key := kb.Pop()
if key == nil {
break
}
if vx == *key {
pressed = true
}
}
if pressed {
cmd = Skip{}
} else {
cmd = Next{}
}
case 0xA:
log.Println("ExA1 - SKNP")
pressed := false
for true {
key := kb.Pop()
if key == nil {
break
}
if vx == *key {
pressed = true
}
}
if !pressed {
cmd = Skip{}
} else {
cmd = Next{}
}
}
case 0xF:
switch o3 {
case 0x0:
switch o4 {
case 0x7:
log.Println("Fx07 - LD Vx, DT")
cpu.v[x] = uint8(cpu.dt)
cmd = Next{}
case 0xA:
log.Println("Fx0A - LD Vx, K")
key := kb.Pop()
if key != nil {
cpu.v[x] = uint8(*key)
cmd = Next{}
} else {
// Do nothing.
}
}
case 0x1:
switch o4 {
case 0x5:
log.Println("Fx15 - LD DT")
cpu.dt = vx
cpu.lastd = time.Now()
cmd = Next{}
case 0x8:
log.Println("Fx18 - LD ST")
cpu.st = vx
cpu.lasts = time.Now()
cmd = Next{}
case 0xE:
log.Println("Fx1E - ADD I Vx")
cpu.i += vx
cmd = Next{}
}
case 0x2:
log.Println("Fx29 - LD F")
cpu.i = vx * 5
cmd = Next{}
case 0x3:
log.Println("Fx33 - LD B")
mem.buf[cpu.i] = (uint8(vx) / 100) % 10
mem.buf[cpu.i+1] = (uint8(vx) / 10) % 10
mem.buf[cpu.i+2] = uint8(vx) % 10
cmd = Next{}
case 0x5:
log.Println("Fx55 - LD [I]")
for n := 0; n <= int(x); n++ {
mem.buf[cpu.i+uint16(n)] = cpu.v[n]
}
cmd = Next{}
case 0x6:
log.Println("Fx65 - LD")
for n := 0; n <= int(x); n++ {
cpu.v[n] = mem.buf[cpu.i+uint16(n)]
}
cmd = Next{}
}
}
if cmd != nil {
cmd.exec(cpu)
}
now := time.Now()
elapsed := now.Sub(cpu.lastd)
if elapsed.Seconds() > 1.0/60 && cpu.dt > 0 {
cpu.dt -= 1
cpu.lastd = now
}
elapsed = now.Sub(cpu.lasts)
if elapsed.Seconds() > 1.0/60 && cpu.st > 0 {
audio.Play()
audio.Rewind()
cpu.st -= 1
cpu.lasts = now
}
return nil
}
type Command interface {
exec(cpu *Cpu)
}
type Next struct{}
func (c Next) exec(cpu *Cpu) {
cpu.pc += 2
}
type Jump struct {
addr uint16
}
func (c Jump) exec(cpu *Cpu) {
cpu.pc = c.addr
}
type Skip struct{}
func (c Skip) exec(cpu *Cpu) {
cpu.pc += 4
}
type Memory struct {
buf [0xFFF]byte // Chip-8 has 0xFFFF (4096) bytes of RAM.
}
func (m *Memory) Load(path string) error {
f, err := os.Open(path)
if err != nil {
return err
}
n, err := f.Read(m.buf[0x200:])
log.Printf("%d bytes read from \"%s\".", n, path)
return nil
}
func NewMemory() *Memory {
m := new(Memory)
// Load fontsets.
m.buf = [0xFFF]byte{0xF0, 0x90, 0x90, 0x90, 0xF0, 0x20, 0x60, 0x20, 0x20, 0x70, 0xF0, 0x10, 0xF0, 0x80, 0xF0, 0xF0, 0x10, 0xF0, 0x10, 0xF0, 0x90, 0x90, 0xF0, 0x10, 0x10, 0xF0, 0x80, 0xF0, 0x10, 0xF0, 0xF0, 0x80, 0xF0, 0x90, 0xF0, 0xF0, 0x10, 0x20, 0x40, 0x40, 0xF0, 0x90, 0xF0, 0x90, 0xF0, 0xF0, 0x90, 0xF0, 0x | {
cmd = Skip{}
} | conditional_block |
|
c8.go |
func keytohex(key ebiten.Key) uint16 {
if key >= 43 && key <= 52 {
return uint16(key) - 43
} else {
return uint16(key) + 0x10
}
}
type Cpu struct {
v [64]uint8
i uint16
stack [16]uint16
sp uint16
pc uint16
dt uint16
st uint16
rnd *rand.Rand
lastd time.Time
lasts time.Time
}
func NewCpu() *Cpu {
cpu := new(Cpu)
cpu.pc = 0x200
cpu.rnd = rand.New(rand.NewSource(time.Now().UnixNano()))
cpu.lastd = time.Now()
cpu.lasts = time.Now()
return cpu
}
func (cpu *Cpu) rand() uint8 {
return uint8(cpu.rnd.Intn(256))
}
func (cpu *Cpu) Tick(mem *Memory, vme *VideoMemory, audio *audio.Player, kb *Keyboard) error {
o1 := mem.buf[cpu.pc] >> 4
o2 := mem.buf[cpu.pc] & 0x0F
o3 := mem.buf[cpu.pc+1] >> 4
o4 := mem.buf[cpu.pc+1] & 0x0F
opcode := fmt.Sprintf("%02X%02X%02X%02X", o1, o2, o3, o4)
log.Printf("Tick sp=%d pc=%d dt=%d st=%d opcode=%s", cpu.sp, cpu.pc, cpu.dt, cpu.st, opcode)
nnn := (uint16(o2) << 8) + (uint16(o3) << 4) + uint16(o4)
kk := (uint8(o3) << 4) + uint8(o4)
x := o2
y := o3
vx := uint16(cpu.v[o2])
vy := uint16(cpu.v[o3])
xy := vx + vy
var cmd Command
switch o1 {
case 0x0:
switch o2 {
case 0x0:
switch o3 {
case 0xE:
switch o4 {
case 0x0:
log.Println("CLS")
vme.clear()
cmd = Next{}
case 0xE:
log.Println("00EE RET")
pc := cpu.stack[cpu.sp-1]
cpu.sp -= 1
cmd = Jump{pc + 2}
}
}
default:
log.Println("SYS addr")
cmd = Jump{nnn}
}
case 0x1:
log.Println("1nnn JP")
cmd = Jump{nnn}
case 0x2:
log.Println("2nnn CALL")
cpu.stack[cpu.sp] = cpu.pc
cpu.sp += 1
cmd = Jump{nnn}
case 0x3:
log.Println("3xkk SE")
if vx == uint16(kk) {
cmd = Skip{}
} else {
cmd = Next{}
}
case 0x4:
log.Println("4xkk SNE")
if vx != uint16(kk) {
cmd = Skip{}
} else {
cmd = Next{}
}
case 0x5:
log.Println("5xy0 - SE")
if vx == vy {
cmd = Skip{}
} else {
cmd = Next{}
}
case 0x6:
log.Println("6xkk - LD")
cpu.v[x] = kk
cmd = Next{}
case 0x7:
log.Println("7xkk - ADD")
cpu.v[x] += kk
cmd = Next{}
case 0x8:
switch o4 {
case 0x0:
log.Println("8xk0 - LD Vx, Vy")
cpu.v[x] = cpu.v[y]
case 0x1:
log.Println("8xk1 - OR Vx, Vy")
cpu.v[x] |= cpu.v[y]
case 0x2:
log.Println("8xk2 - AND Vx, Vy")
cpu.v[x] &= cpu.v[y]
case 0x3:
log.Println("8xk3 - XOR Vx, Vy")
cpu.v[x] ^= cpu.v[y]
case 0x4:
log.Println("8xk4 - ADD Vx, Vy")
if xy > 0xFF {
cpu.v[0xF] = 1
} else {
cpu.v[0xF] = 0
}
cpu.v[x] = uint8(xy & 0xFF)
case 0x5:
log.Println("8xk5 - SUB Vx, Vy")
if vx > vy {
cpu.v[0xF] = 1
} else {
cpu.v[0xF] = 0
}
cpu.v[x] = uint8(vx - vy)
case 0x6:
log.Println("8xk6 - SHR Vx, Vy")
cpu.v[0xF] = uint8(vx & 0x1)
cpu.v[x] /= 2
case 0x7:
log.Println("8xk7 - SUBN Vx, Vy")
if vy > vx {
cpu.v[0xF] = 1
} else {
cpu.v[0xF] = 0
}
cpu.v[x] = uint8(vy - vx)
case 0xE:
log.Println("8xkE - SHL Vx, Vy")
cpu.v[0xF] = cpu.v[x] >> 7
cpu.v[x] *= 2
}
cmd = Next{}
case 0x9:
log.Println("9xy0 - SNE")
if vx != vy {
cmd = Skip{}
} else {
cmd = Next{}
}
case 0xA:
log.Println("Annn - LD I")
cpu.i = nnn
cmd = Next{}
case 0xB:
log.Println("Bnnn - JP")
cmd = Jump{nnn + uint16(cpu.v[0])}
case 0xC:
log.Println("Cxkk - RND")
cpu.v[x] = cpu.rand() & kk
cmd = Next{}
case 0xD:
log.Println("DRW - Vx, Vy, nibble")
n := o4
bytes := mem.buf[cpu.i : cpu.i+uint16(n)]
cpu.v[0xF] = vme.draw(vx, vy, bytes)
cmd = Next{}
case 0xE:
switch o3 {
case 0x9:
log.Println("Ex9E - SKP")
pressed := false
for true {
key := kb.Pop()
if key == nil {
break
}
if vx == *key {
pressed = true
}
}
if pressed {
cmd = Skip{}
} else {
cmd = Next{}
}
case 0xA:
log.Println("ExA1 - SKNP")
pressed := false
for true {
key := kb.Pop()
if key == nil {
break
}
if vx == *key {
pressed = true
}
}
if !pressed {
cmd = Skip{}
} else {
cmd = Next{}
}
}
case 0xF:
switch o3 {
case 0x0:
switch o4 {
case 0x7:
log.Println("Fx07 - LD Vx, DT")
cpu.v[x] = uint8(cpu.dt)
cmd = Next{}
case 0xA:
log.Println("Fx0A - LD Vx, K")
key := kb.Pop()
if key != nil {
cpu.v[x] = uint8(*key)
cmd = Next{}
} else {
// Do nothing.
}
}
case 0x1:
switch o4 {
case 0x5:
log.Println("Fx15 - LD DT")
cpu.dt = vx
cpu.lastd = time.Now()
cmd = Next{}
case 0x8:
log.Println("Fx18 - LD ST")
cpu.st = vx
cpu.lasts = time.Now()
cmd = Next{}
case 0xE:
log.Println("Fx1E - ADD I Vx")
cpu.i += vx
cmd = Next{}
}
case 0x2:
log.Println("Fx29 - LD F")
cpu.i = vx * 5
cmd = Next{}
case 0x3:
log.Println("Fx33 - LD B")
mem.buf[cpu.i] = (uint8(vx) / 100) % 10
mem.buf[cpu.i+1] = (uint8(vx) / 10 | {
kb.queue = []uint16{}
} | identifier_body |
|
c8.go | log.Println("Fx29 - LD F")
cpu.i = vx * 5
cmd = Next{}
case 0x3:
log.Println("Fx33 - LD B")
mem.buf[cpu.i] = (uint8(vx) / 100) % 10
mem.buf[cpu.i+1] = (uint8(vx) / 10) % 10
mem.buf[cpu.i+2] = uint8(vx) % 10
cmd = Next{}
case 0x5:
log.Println("Fx55 - LD [I]")
for n := 0; n <= int(x); n++ {
mem.buf[cpu.i+uint16(n)] = cpu.v[n]
}
cmd = Next{}
case 0x6:
log.Println("Fx65 - LD")
for n := 0; n <= int(x); n++ {
cpu.v[n] = mem.buf[cpu.i+uint16(n)]
}
cmd = Next{}
}
}
if cmd != nil {
cmd.exec(cpu)
}
now := time.Now()
elapsed := now.Sub(cpu.lastd)
if elapsed.Seconds() > 1.0/60 && cpu.dt > 0 {
cpu.dt -= 1
cpu.lastd = now
}
elapsed = now.Sub(cpu.lasts)
if elapsed.Seconds() > 1.0/60 && cpu.st > 0 {
audio.Play()
audio.Rewind()
cpu.st -= 1
cpu.lasts = now
}
return nil
}
type Command interface {
exec(cpu *Cpu)
}
type Next struct{}
func (c Next) exec(cpu *Cpu) {
cpu.pc += 2
}
type Jump struct {
addr uint16
}
func (c Jump) exec(cpu *Cpu) {
cpu.pc = c.addr
}
type Skip struct{}
func (c Skip) exec(cpu *Cpu) {
cpu.pc += 4
}
type Memory struct {
buf [0xFFF]byte // Chip-8 has 0xFFFF (4096) bytes of RAM.
}
func (m *Memory) Load(path string) error {
f, err := os.Open(path)
if err != nil {
return err
}
n, err := f.Read(m.buf[0x200:])
log.Printf("%d bytes read from \"%s\".", n, path)
return nil
}
func NewMemory() *Memory {
m := new(Memory)
// Load fontsets.
m.buf = [0xFFF]byte{0xF0, 0x90, 0x90, 0x90, 0xF0, 0x20, 0x60, 0x20, 0x20, 0x70, 0xF0, 0x10, 0xF0, 0x80, 0xF0, 0xF0, 0x10, 0xF0, 0x10, 0xF0, 0x90, 0x90, 0xF0, 0x10, 0x10, 0xF0, 0x80, 0xF0, 0x10, 0xF0, 0xF0, 0x80, 0xF0, 0x90, 0xF0, 0xF0, 0x10, 0x20, 0x40, 0x40, 0xF0, 0x90, 0xF0, 0x90, 0xF0, 0xF0, 0x90, 0xF0, 0x10, 0xF0, 0xF0, 0x90, 0xF0, 0x90, 0x90, 0xE0, 0x90, 0xE0, 0x90, 0xE0, 0xF0, 0x80, 0x80, 0x80, 0xF0, 0xE0, 0x90, 0x90, 0x90, 0xE0, 0xF0, 0x80, 0xF0, 0x80, 0xF0, 0xF0, 0x80, 0xF0, 0x80, 0x80}
return m
}
// VideoMemory implements double buffer.
type VideoMemory struct {
buf [H_PIXELS][V_PIXELS]byte
mem [H_PIXELS][V_PIXELS]byte
}
func NewVideoMemory() *VideoMemory {
return new(VideoMemory)
}
func (vme *VideoMemory) clear() {
for x := 0; x < H_PIXELS; x++ {
for y := 0; y < V_PIXELS; y++ {
vme.buf[x][y] = 0
}
}
}
func (vme *VideoMemory) draw(x uint16, y uint16, buf []byte) uint8 {
vf := uint16(0)
for i, byte := range buf {
i := uint16(i)
vf += vme.draw_pixcel(x, y+i, (byte>>7)&0x1)
vf += vme.draw_pixcel(x+1, y+i, (byte>>6)&0x1)
vf += vme.draw_pixcel(x+2, y+i, (byte>>5)&0x1)
vf += vme.draw_pixcel(x+3, y+i, (byte>>4)&0x1)
vf += vme.draw_pixcel(x+4, y+i, (byte>>3)&0x1)
vf += vme.draw_pixcel(x+5, y+i, (byte>>2)&0x1)
vf += vme.draw_pixcel(x+6, y+i, (byte>>1)&0x1)
vf += vme.draw_pixcel(x+7, y+i, (byte>>0)&0x1)
}
if vf > 0 {
return 1
} else {
return 0
}
}
func (vme *VideoMemory) draw_pixcel(x uint16, y uint16, new byte) uint16 {
var vf uint16
// Check collision.
if vme.buf[x][y] == 1 && new == 1 {
vf = 1
} else {
vf = 0
}
vme.buf[x][y] ^= new
return vf
}
type Button struct {
text string
img *ebiten.Image
x int
y int
onclicked func(*Button)
font *font.Face
rom Rom
}
func NewButton(text string, font *font.Face, x, y int, rom Rom, onclicked func(*Button)) *Button {
btn := new(Button)
img := ebiten.NewImage(BUTTON_WIDTH-1, BUTTON_HIGHT-1)
img.Fill(color.White)
btn.img = img
btn.text = text
btn.font = font
btn.x = x
btn.y = y
btn.rom = rom
btn.onclicked = onclicked
return btn
}
func (btn *Button) Draw(screen *ebiten.Image) {
opts := &ebiten.DrawImageOptions{}
opts.GeoM.Translate(float64(btn.x*BUTTON_WIDTH), float64(btn.y*BUTTON_HIGHT)+float64(SELECT_HIGHT))
screen.DrawImage(btn.img, opts)
text.Draw(screen, btn.text, *btn.font, btn.x*BUTTON_WIDTH+10, btn.y*BUTTON_HIGHT+17+SELECT_HIGHT, color.Black)
}
type UI struct {
btns []*Button
oncompleted func(rom Rom)
font *font.Face
}
func (ui *UI) Draw(screen *ebiten.Image) {
text.Draw(screen, "SELECT A GAME", *ui.font, 160, 36, color.White)
for _, btn := range ui.btns {
btn.Draw(screen)
}
}
func (ui *UI) Update() {
clicked := ebiten.IsMouseButtonPressed(ebiten.MouseButtonLeft)
x, y := ebiten.CursorPosition()
if clicked {
log.Printf("Clicked: %v on (%d, %d)", clicked, x, y)
for _, btn := range ui.btns {
minx := btn.img.Bounds().Min.X + btn.x*BUTTON_WIDTH
maxx := btn.img.Bounds().Max.X + btn.x*BUTTON_WIDTH
miny := btn.img.Bounds().Min.Y + btn.y*BUTTON_HIGHT + SELECT_HIGHT
maxy := btn.img.Bounds().Max.Y + btn.y*BUTTON_HIGHT + SELECT_HIGHT
log.Printf("x=%d y=%d minx=%d maxx=%d miny=%d maxy=%d", x, y, minx, maxx, miny, maxy)
if x >= minx && x <= maxx && y >= miny && y <= maxy {
btn.onclicked(btn)
}
}
}
}
type Rom struct {
name string
path string
}
func | NewUI | identifier_name |
|
c8.go | = Skip{}
} else {
cmd = Next{}
}
case 0xA:
log.Println("Annn - LD I")
cpu.i = nnn
cmd = Next{}
case 0xB:
log.Println("Bnnn - JP")
cmd = Jump{nnn + uint16(cpu.v[0])}
case 0xC:
log.Println("Cxkk - RND")
cpu.v[x] = cpu.rand() & kk
cmd = Next{}
case 0xD:
log.Println("DRW - Vx, Vy, nibble")
n := o4
bytes := mem.buf[cpu.i : cpu.i+uint16(n)]
cpu.v[0xF] = vme.draw(vx, vy, bytes)
cmd = Next{}
case 0xE:
switch o3 {
case 0x9:
log.Println("Ex9E - SKP")
pressed := false
for true {
key := kb.Pop()
if key == nil {
break
}
if vx == *key {
pressed = true
}
}
if pressed {
cmd = Skip{}
} else {
cmd = Next{}
}
case 0xA:
log.Println("ExA1 - SKNP")
pressed := false
for true {
key := kb.Pop()
if key == nil {
break
}
if vx == *key {
pressed = true
}
}
if !pressed {
cmd = Skip{}
} else {
cmd = Next{}
}
}
case 0xF:
switch o3 {
case 0x0:
switch o4 {
case 0x7:
log.Println("Fx07 - LD Vx, DT")
cpu.v[x] = uint8(cpu.dt)
cmd = Next{}
case 0xA:
log.Println("Fx0A - LD Vx, K")
key := kb.Pop()
if key != nil {
cpu.v[x] = uint8(*key)
cmd = Next{}
} else {
// Do nothing.
}
}
case 0x1:
switch o4 {
case 0x5:
log.Println("Fx15 - LD DT")
cpu.dt = vx
cpu.lastd = time.Now()
cmd = Next{}
case 0x8:
log.Println("Fx18 - LD ST")
cpu.st = vx
cpu.lasts = time.Now()
cmd = Next{}
case 0xE:
log.Println("Fx1E - ADD I Vx")
cpu.i += vx
cmd = Next{}
}
case 0x2:
log.Println("Fx29 - LD F")
cpu.i = vx * 5
cmd = Next{}
case 0x3:
log.Println("Fx33 - LD B")
mem.buf[cpu.i] = (uint8(vx) / 100) % 10
mem.buf[cpu.i+1] = (uint8(vx) / 10) % 10
mem.buf[cpu.i+2] = uint8(vx) % 10
cmd = Next{}
case 0x5:
log.Println("Fx55 - LD [I]")
for n := 0; n <= int(x); n++ {
mem.buf[cpu.i+uint16(n)] = cpu.v[n]
}
cmd = Next{}
case 0x6:
log.Println("Fx65 - LD")
for n := 0; n <= int(x); n++ {
cpu.v[n] = mem.buf[cpu.i+uint16(n)]
}
cmd = Next{}
}
}
if cmd != nil {
cmd.exec(cpu)
}
now := time.Now()
elapsed := now.Sub(cpu.lastd)
if elapsed.Seconds() > 1.0/60 && cpu.dt > 0 {
cpu.dt -= 1
cpu.lastd = now
}
elapsed = now.Sub(cpu.lasts)
if elapsed.Seconds() > 1.0/60 && cpu.st > 0 {
audio.Play()
audio.Rewind()
cpu.st -= 1
cpu.lasts = now
}
return nil
}
type Command interface {
exec(cpu *Cpu)
}
type Next struct{}
func (c Next) exec(cpu *Cpu) {
cpu.pc += 2
}
type Jump struct {
addr uint16
}
func (c Jump) exec(cpu *Cpu) {
cpu.pc = c.addr
}
type Skip struct{}
func (c Skip) exec(cpu *Cpu) {
cpu.pc += 4
}
type Memory struct {
buf [0xFFF]byte // Chip-8 has 0xFFFF (4096) bytes of RAM.
}
func (m *Memory) Load(path string) error {
f, err := os.Open(path)
if err != nil {
return err
}
n, err := f.Read(m.buf[0x200:])
log.Printf("%d bytes read from \"%s\".", n, path)
return nil
}
func NewMemory() *Memory {
m := new(Memory)
// Load fontsets.
m.buf = [0xFFF]byte{0xF0, 0x90, 0x90, 0x90, 0xF0, 0x20, 0x60, 0x20, 0x20, 0x70, 0xF0, 0x10, 0xF0, 0x80, 0xF0, 0xF0, 0x10, 0xF0, 0x10, 0xF0, 0x90, 0x90, 0xF0, 0x10, 0x10, 0xF0, 0x80, 0xF0, 0x10, 0xF0, 0xF0, 0x80, 0xF0, 0x90, 0xF0, 0xF0, 0x10, 0x20, 0x40, 0x40, 0xF0, 0x90, 0xF0, 0x90, 0xF0, 0xF0, 0x90, 0xF0, 0x10, 0xF0, 0xF0, 0x90, 0xF0, 0x90, 0x90, 0xE0, 0x90, 0xE0, 0x90, 0xE0, 0xF0, 0x80, 0x80, 0x80, 0xF0, 0xE0, 0x90, 0x90, 0x90, 0xE0, 0xF0, 0x80, 0xF0, 0x80, 0xF0, 0xF0, 0x80, 0xF0, 0x80, 0x80}
return m
}
// VideoMemory implements double buffer.
type VideoMemory struct {
buf [H_PIXELS][V_PIXELS]byte
mem [H_PIXELS][V_PIXELS]byte
}
func NewVideoMemory() *VideoMemory {
return new(VideoMemory)
}
func (vme *VideoMemory) clear() {
for x := 0; x < H_PIXELS; x++ {
for y := 0; y < V_PIXELS; y++ {
vme.buf[x][y] = 0
}
}
}
func (vme *VideoMemory) draw(x uint16, y uint16, buf []byte) uint8 {
vf := uint16(0)
for i, byte := range buf {
i := uint16(i)
vf += vme.draw_pixcel(x, y+i, (byte>>7)&0x1)
vf += vme.draw_pixcel(x+1, y+i, (byte>>6)&0x1)
vf += vme.draw_pixcel(x+2, y+i, (byte>>5)&0x1)
vf += vme.draw_pixcel(x+3, y+i, (byte>>4)&0x1)
vf += vme.draw_pixcel(x+4, y+i, (byte>>3)&0x1)
vf += vme.draw_pixcel(x+5, y+i, (byte>>2)&0x1)
vf += vme.draw_pixcel(x+6, y+i, (byte>>1)&0x1)
vf += vme.draw_pixcel(x+7, y+i, (byte>>0)&0x1)
}
if vf > 0 {
return 1
} else {
return 0
}
}
func (vme *VideoMemory) draw_pixcel(x uint16, y uint16, new byte) uint16 {
var vf uint16 |
// Check collision. | random_line_split |
|
fmlrc2.rs | () {
//initialize logging for our benefit later
env_logger::from_env(env_logger::Env::default().default_filter_or("info")).init();
//non-cli parameters
const JOB_SLOTS: u64 = 10000;
const UPDATE_INTERVAL: u64 = 10000;
//this is the CLI block, params that get populated appear before
let bwt_fn: String;
let long_read_fn: String;
let corrected_read_fn: String;
let mut kmer_sizes: Vec<usize> = vec![21, 59];
let mut threads: usize = 1;
let mut begin_id: u64 = 0;
let mut end_id: u64 = 0xFFFFFFFFFFFFFFFF;
let mut min_count: u64 = 5;
let mut min_frac: f64 = 0.1;
let mut branch_factor: f64 = 4.0;
let mut cache_size: usize = 8;
let verbose_mode: bool;
let matches = App::new("FMLRC2")
.version(VERSION.unwrap_or("?"))
.author("J. Matthew Holt <[email protected]>")
.about("FM-index Long Read Corrector - Rust implementation")
.arg(Arg::with_name("verbose_mode")
.short("v")
.long("verbose")
.help("enable verbose output"))
.arg(Arg::with_name("kmer_sizes")
.short("k")
.long("K")
.multiple(true)
.takes_value(true)
.help("k-mer sizes for correction, can be specified multiple times (default: \"-k 21 59\")"))
.arg(Arg::with_name("threads")
.short("t")
.long("threads")
.takes_value(true)
.help("number of correction threads (default: 1)"))
.arg(Arg::with_name("begin_id")
.short("b")
.long("begin_index")
.takes_value(true)
.help("index of read to start with (default: 0)"))
.arg(Arg::with_name("end_id")
.short("e")
.long("end_index")
.takes_value(true)
.help("index of read to end with (default: end of file)"))
.arg(Arg::with_name("min_count")
.short("m")
.long("min_count")
.takes_value(true)
.help("absolute minimum k-mer count to consisder a path (default: 5)"))
.arg(Arg::with_name("min_frac")
.short("f")
.long("min_dynamic_count")
.takes_value(true)
.help("dynamic minimum k-mer count fraction of median to consider a path (default: 0.1)"))
.arg(Arg::with_name("branch_factor")
.short("B")
.long("branch_factor")
.takes_value(true)
.help("branching factor for correction, scaled by k (default: 4.0)"))
.arg(Arg::with_name("cache_size")
.short("C")
.long("cache_size")
.takes_value(true)
.help("the length of k-mer to precompute in cache (default: 8)"))
.arg(Arg::with_name("COMP_MSBWT.NPY")
.help("The compressed BWT file with high accuracy reads")
.required(true)
.index(1))
.arg(Arg::with_name("LONG_READS.FA")
.help("The FASTX file with uncorrected reads")
.required(true)
.index(2))
.arg(Arg::with_name("CORRECTED_READS.FA")
.help("The FASTA file to write corrected reads to")
.required(true)
.index(3))
.get_matches();
//pull out required values
bwt_fn = matches.value_of("COMP_MSBWT.NPY").unwrap().to_string();
long_read_fn = matches.value_of("LONG_READS.FA").unwrap().to_string();
corrected_read_fn = matches.value_of("CORRECTED_READS.FA").unwrap().to_string();
//now check options
verbose_mode = matches.is_present("verbose_mode");
kmer_sizes = values_t!(matches.values_of("kmer_sizes"), usize).unwrap_or(kmer_sizes);
threads = value_t!(matches.value_of("threads"), usize).unwrap_or(threads);
begin_id = value_t!(matches.value_of("begin_id"), u64).unwrap_or(begin_id);
end_id = value_t!(matches.value_of("end_id"), u64).unwrap_or(end_id);
min_count = value_t!(matches.value_of("min_count"), u64).unwrap_or(min_count);
min_frac = value_t!(matches.value_of("min_frac"), f64).unwrap_or(min_frac);
branch_factor = value_t!(matches.value_of("branch_factor"), f64).unwrap_or(branch_factor);
cache_size = value_t!(matches.value_of("cache_size"), usize).unwrap_or(cache_size);
info!("Input parameters (required):");
info!("\tBWT: \"{}\"", bwt_fn);
match File::open(&bwt_fn) {
Ok(_) => {},
Err(e) => {
error!("Failed to open BWT file: {:?}", e);
std::process::exit(exitcode::NOINPUT);
}
};
info!("\tInput reads: \"{}\"", long_read_fn);
match File::open(&long_read_fn) {
Ok(_) => {},
Err(e) => {
error!("Failed to open input reads file: {:?}", e);
std::process::exit(exitcode::NOINPUT);
}
};
info!("\tOutput corrected reads: \"{}\"", corrected_read_fn);
let write_file: File = match File::create(&corrected_read_fn) {
Ok(file) => file,
Err(e) => {
error!("Failed to create output corrected reads file: {:?}", e);
std::process::exit(exitcode::NOINPUT);
}
};
let mut fasta_writer = OrderedFastaWriter::new(&write_file);
info!("Execution Parameters:");
info!("\tverbose: {}", verbose_mode);
info!("\tthreads: {}", threads);
info!("\tcache size: {}", cache_size);
info!("Correction Parameters:");
info!("\treads to correct: [{}, {})", begin_id, end_id);
if begin_id > end_id {
error!("--begin_index set to value larger than --end_index");
std::process::exit(exitcode::DATAERR);
}
kmer_sizes.sort_unstable();
info!("\tk-mer sizes: {:?}", kmer_sizes);
info!("\tabs. mininimum count: {}", min_count);
info!("\tdyn. minimimum fraction: {}", min_frac);
if !(0.0..=1.0).contains(&min_frac) {
error!("--min_dynamic_count must be within the range [0, 1]");
std::process::exit(exitcode::DATAERR);
}
info!("\tbranching factor: {}", branch_factor);
if branch_factor <= 0.0 {
error!("--branch_factor must be greater than 0.0");
std::process::exit(exitcode::DATAERR);
}
//TODO make some of these hard-coded into params?
let my_params: CorrectionParameters = CorrectionParameters {
kmer_sizes,
min_count,
max_branch_attempt_length: 10000,
branch_limit_factor: branch_factor,
branch_buffer_factor: 1.3,
//TODO: make this 0.4 a CLI param? we did a prelim test with 0.3 and prec/recall went up but total indels/subs went up *shrug*
midpoint_ed_factor: 0.4,
tail_buffer_factor: 1.05,
frac: min_frac,
verbose: verbose_mode
};
let arc_params: Arc<CorrectionParameters> = Arc::new(my_params);
//first load the BWT into memory
let mut bwt: BitVectorBWT = BitVectorBWT::with_cache_size(cache_size);
match bwt.load_numpy_file(&bwt_fn) {
Ok(_) => {},
Err(e) => {
error!("Failed to load BWT file: {:?}", e);
std::process::exit(exitcode::IOERR);
}
};
let arc_bwt: Arc<BitVectorBWT> = Arc::new(bwt);
//we need to set up the multiprocessing components now
let pool = ThreadPool::new(threads);
let (tx, rx) = mpsc::channel();
//now needletail open the reads to correct
let mut read_index: u64 = 0;
let mut jobs_queued: u64 = 0;
let mut results_received: u64 = 0;
info!("Starting read correction processes...");
match parse_fastx_file(&long_read_fn) {
Ok(mut fastx_reader) => {
while let Some(raw_record) = fastx_reader.next() {
let record = match raw_record {
Ok(record) => { record },
Err(e) => {
error!("Invalid record while parsing long read file: {:?}", e);
std::process::exit | main | identifier_name |
|
fmlrc2.rs | = 10000;
//this is the CLI block, params that get populated appear before
let bwt_fn: String;
let long_read_fn: String;
let corrected_read_fn: String;
let mut kmer_sizes: Vec<usize> = vec![21, 59];
let mut threads: usize = 1;
let mut begin_id: u64 = 0;
let mut end_id: u64 = 0xFFFFFFFFFFFFFFFF;
let mut min_count: u64 = 5;
let mut min_frac: f64 = 0.1;
let mut branch_factor: f64 = 4.0;
let mut cache_size: usize = 8;
let verbose_mode: bool;
let matches = App::new("FMLRC2")
.version(VERSION.unwrap_or("?"))
.author("J. Matthew Holt <[email protected]>")
.about("FM-index Long Read Corrector - Rust implementation")
.arg(Arg::with_name("verbose_mode")
.short("v")
.long("verbose")
.help("enable verbose output"))
.arg(Arg::with_name("kmer_sizes")
.short("k")
.long("K")
.multiple(true)
.takes_value(true)
.help("k-mer sizes for correction, can be specified multiple times (default: \"-k 21 59\")"))
.arg(Arg::with_name("threads")
.short("t")
.long("threads")
.takes_value(true)
.help("number of correction threads (default: 1)"))
.arg(Arg::with_name("begin_id")
.short("b")
.long("begin_index")
.takes_value(true)
.help("index of read to start with (default: 0)"))
.arg(Arg::with_name("end_id")
.short("e")
.long("end_index")
.takes_value(true)
.help("index of read to end with (default: end of file)"))
.arg(Arg::with_name("min_count")
.short("m")
.long("min_count")
.takes_value(true)
.help("absolute minimum k-mer count to consisder a path (default: 5)"))
.arg(Arg::with_name("min_frac")
.short("f")
.long("min_dynamic_count")
.takes_value(true)
.help("dynamic minimum k-mer count fraction of median to consider a path (default: 0.1)"))
.arg(Arg::with_name("branch_factor")
.short("B")
.long("branch_factor")
.takes_value(true)
.help("branching factor for correction, scaled by k (default: 4.0)"))
.arg(Arg::with_name("cache_size")
.short("C")
.long("cache_size")
.takes_value(true)
.help("the length of k-mer to precompute in cache (default: 8)"))
.arg(Arg::with_name("COMP_MSBWT.NPY")
.help("The compressed BWT file with high accuracy reads")
.required(true)
.index(1))
.arg(Arg::with_name("LONG_READS.FA")
.help("The FASTX file with uncorrected reads")
.required(true) | .help("The FASTA file to write corrected reads to")
.required(true)
.index(3))
.get_matches();
//pull out required values
bwt_fn = matches.value_of("COMP_MSBWT.NPY").unwrap().to_string();
long_read_fn = matches.value_of("LONG_READS.FA").unwrap().to_string();
corrected_read_fn = matches.value_of("CORRECTED_READS.FA").unwrap().to_string();
//now check options
verbose_mode = matches.is_present("verbose_mode");
kmer_sizes = values_t!(matches.values_of("kmer_sizes"), usize).unwrap_or(kmer_sizes);
threads = value_t!(matches.value_of("threads"), usize).unwrap_or(threads);
begin_id = value_t!(matches.value_of("begin_id"), u64).unwrap_or(begin_id);
end_id = value_t!(matches.value_of("end_id"), u64).unwrap_or(end_id);
min_count = value_t!(matches.value_of("min_count"), u64).unwrap_or(min_count);
min_frac = value_t!(matches.value_of("min_frac"), f64).unwrap_or(min_frac);
branch_factor = value_t!(matches.value_of("branch_factor"), f64).unwrap_or(branch_factor);
cache_size = value_t!(matches.value_of("cache_size"), usize).unwrap_or(cache_size);
info!("Input parameters (required):");
info!("\tBWT: \"{}\"", bwt_fn);
match File::open(&bwt_fn) {
Ok(_) => {},
Err(e) => {
error!("Failed to open BWT file: {:?}", e);
std::process::exit(exitcode::NOINPUT);
}
};
info!("\tInput reads: \"{}\"", long_read_fn);
match File::open(&long_read_fn) {
Ok(_) => {},
Err(e) => {
error!("Failed to open input reads file: {:?}", e);
std::process::exit(exitcode::NOINPUT);
}
};
info!("\tOutput corrected reads: \"{}\"", corrected_read_fn);
let write_file: File = match File::create(&corrected_read_fn) {
Ok(file) => file,
Err(e) => {
error!("Failed to create output corrected reads file: {:?}", e);
std::process::exit(exitcode::NOINPUT);
}
};
let mut fasta_writer = OrderedFastaWriter::new(&write_file);
info!("Execution Parameters:");
info!("\tverbose: {}", verbose_mode);
info!("\tthreads: {}", threads);
info!("\tcache size: {}", cache_size);
info!("Correction Parameters:");
info!("\treads to correct: [{}, {})", begin_id, end_id);
if begin_id > end_id {
error!("--begin_index set to value larger than --end_index");
std::process::exit(exitcode::DATAERR);
}
kmer_sizes.sort_unstable();
info!("\tk-mer sizes: {:?}", kmer_sizes);
info!("\tabs. mininimum count: {}", min_count);
info!("\tdyn. minimimum fraction: {}", min_frac);
if !(0.0..=1.0).contains(&min_frac) {
error!("--min_dynamic_count must be within the range [0, 1]");
std::process::exit(exitcode::DATAERR);
}
info!("\tbranching factor: {}", branch_factor);
if branch_factor <= 0.0 {
error!("--branch_factor must be greater than 0.0");
std::process::exit(exitcode::DATAERR);
}
//TODO make some of these hard-coded into params?
let my_params: CorrectionParameters = CorrectionParameters {
kmer_sizes,
min_count,
max_branch_attempt_length: 10000,
branch_limit_factor: branch_factor,
branch_buffer_factor: 1.3,
//TODO: make this 0.4 a CLI param? we did a prelim test with 0.3 and prec/recall went up but total indels/subs went up *shrug*
midpoint_ed_factor: 0.4,
tail_buffer_factor: 1.05,
frac: min_frac,
verbose: verbose_mode
};
let arc_params: Arc<CorrectionParameters> = Arc::new(my_params);
//first load the BWT into memory
let mut bwt: BitVectorBWT = BitVectorBWT::with_cache_size(cache_size);
match bwt.load_numpy_file(&bwt_fn) {
Ok(_) => {},
Err(e) => {
error!("Failed to load BWT file: {:?}", e);
std::process::exit(exitcode::IOERR);
}
};
let arc_bwt: Arc<BitVectorBWT> = Arc::new(bwt);
//we need to set up the multiprocessing components now
let pool = ThreadPool::new(threads);
let (tx, rx) = mpsc::channel();
//now needletail open the reads to correct
let mut read_index: u64 = 0;
let mut jobs_queued: u64 = 0;
let mut results_received: u64 = 0;
info!("Starting read correction processes...");
match parse_fastx_file(&long_read_fn) {
Ok(mut fastx_reader) => {
while let Some(raw_record) = fastx_reader.next() {
let record = match raw_record {
Ok(record) => { record },
Err(e) => {
error!("Invalid record while parsing long read file: {:?}", e);
std::process::exit(exitcode::IOERR);
}
};
if read_index >= begin_id && read_index < end_id {
//if we've filled our queue, then we should wait until we get some results back
if jobs_queued - results_received >= JOB_SLOTS {
let rx_value: CorrectionResults | .index(2))
.arg(Arg::with_name("CORRECTED_READS.FA") | random_line_split |
fmlrc2.rs | f64 = 0.1;
let mut branch_factor: f64 = 4.0;
let mut cache_size: usize = 8;
let verbose_mode: bool;
let matches = App::new("FMLRC2")
.version(VERSION.unwrap_or("?"))
.author("J. Matthew Holt <[email protected]>")
.about("FM-index Long Read Corrector - Rust implementation")
.arg(Arg::with_name("verbose_mode")
.short("v")
.long("verbose")
.help("enable verbose output"))
.arg(Arg::with_name("kmer_sizes")
.short("k")
.long("K")
.multiple(true)
.takes_value(true)
.help("k-mer sizes for correction, can be specified multiple times (default: \"-k 21 59\")"))
.arg(Arg::with_name("threads")
.short("t")
.long("threads")
.takes_value(true)
.help("number of correction threads (default: 1)"))
.arg(Arg::with_name("begin_id")
.short("b")
.long("begin_index")
.takes_value(true)
.help("index of read to start with (default: 0)"))
.arg(Arg::with_name("end_id")
.short("e")
.long("end_index")
.takes_value(true)
.help("index of read to end with (default: end of file)"))
.arg(Arg::with_name("min_count")
.short("m")
.long("min_count")
.takes_value(true)
.help("absolute minimum k-mer count to consisder a path (default: 5)"))
.arg(Arg::with_name("min_frac")
.short("f")
.long("min_dynamic_count")
.takes_value(true)
.help("dynamic minimum k-mer count fraction of median to consider a path (default: 0.1)"))
.arg(Arg::with_name("branch_factor")
.short("B")
.long("branch_factor")
.takes_value(true)
.help("branching factor for correction, scaled by k (default: 4.0)"))
.arg(Arg::with_name("cache_size")
.short("C")
.long("cache_size")
.takes_value(true)
.help("the length of k-mer to precompute in cache (default: 8)"))
.arg(Arg::with_name("COMP_MSBWT.NPY")
.help("The compressed BWT file with high accuracy reads")
.required(true)
.index(1))
.arg(Arg::with_name("LONG_READS.FA")
.help("The FASTX file with uncorrected reads")
.required(true)
.index(2))
.arg(Arg::with_name("CORRECTED_READS.FA")
.help("The FASTA file to write corrected reads to")
.required(true)
.index(3))
.get_matches();
//pull out required values
bwt_fn = matches.value_of("COMP_MSBWT.NPY").unwrap().to_string();
long_read_fn = matches.value_of("LONG_READS.FA").unwrap().to_string();
corrected_read_fn = matches.value_of("CORRECTED_READS.FA").unwrap().to_string();
//now check options
verbose_mode = matches.is_present("verbose_mode");
kmer_sizes = values_t!(matches.values_of("kmer_sizes"), usize).unwrap_or(kmer_sizes);
threads = value_t!(matches.value_of("threads"), usize).unwrap_or(threads);
begin_id = value_t!(matches.value_of("begin_id"), u64).unwrap_or(begin_id);
end_id = value_t!(matches.value_of("end_id"), u64).unwrap_or(end_id);
min_count = value_t!(matches.value_of("min_count"), u64).unwrap_or(min_count);
min_frac = value_t!(matches.value_of("min_frac"), f64).unwrap_or(min_frac);
branch_factor = value_t!(matches.value_of("branch_factor"), f64).unwrap_or(branch_factor);
cache_size = value_t!(matches.value_of("cache_size"), usize).unwrap_or(cache_size);
info!("Input parameters (required):");
info!("\tBWT: \"{}\"", bwt_fn);
match File::open(&bwt_fn) {
Ok(_) => {},
Err(e) => {
error!("Failed to open BWT file: {:?}", e);
std::process::exit(exitcode::NOINPUT);
}
};
info!("\tInput reads: \"{}\"", long_read_fn);
match File::open(&long_read_fn) {
Ok(_) => {},
Err(e) => {
error!("Failed to open input reads file: {:?}", e);
std::process::exit(exitcode::NOINPUT);
}
};
info!("\tOutput corrected reads: \"{}\"", corrected_read_fn);
let write_file: File = match File::create(&corrected_read_fn) {
Ok(file) => file,
Err(e) => {
error!("Failed to create output corrected reads file: {:?}", e);
std::process::exit(exitcode::NOINPUT);
}
};
let mut fasta_writer = OrderedFastaWriter::new(&write_file);
info!("Execution Parameters:");
info!("\tverbose: {}", verbose_mode);
info!("\tthreads: {}", threads);
info!("\tcache size: {}", cache_size);
info!("Correction Parameters:");
info!("\treads to correct: [{}, {})", begin_id, end_id);
if begin_id > end_id {
error!("--begin_index set to value larger than --end_index");
std::process::exit(exitcode::DATAERR);
}
kmer_sizes.sort_unstable();
info!("\tk-mer sizes: {:?}", kmer_sizes);
info!("\tabs. mininimum count: {}", min_count);
info!("\tdyn. minimimum fraction: {}", min_frac);
if !(0.0..=1.0).contains(&min_frac) {
error!("--min_dynamic_count must be within the range [0, 1]");
std::process::exit(exitcode::DATAERR);
}
info!("\tbranching factor: {}", branch_factor);
if branch_factor <= 0.0 {
error!("--branch_factor must be greater than 0.0");
std::process::exit(exitcode::DATAERR);
}
//TODO make some of these hard-coded into params?
let my_params: CorrectionParameters = CorrectionParameters {
kmer_sizes,
min_count,
max_branch_attempt_length: 10000,
branch_limit_factor: branch_factor,
branch_buffer_factor: 1.3,
//TODO: make this 0.4 a CLI param? we did a prelim test with 0.3 and prec/recall went up but total indels/subs went up *shrug*
midpoint_ed_factor: 0.4,
tail_buffer_factor: 1.05,
frac: min_frac,
verbose: verbose_mode
};
let arc_params: Arc<CorrectionParameters> = Arc::new(my_params);
//first load the BWT into memory
let mut bwt: BitVectorBWT = BitVectorBWT::with_cache_size(cache_size);
match bwt.load_numpy_file(&bwt_fn) {
Ok(_) => {},
Err(e) => {
error!("Failed to load BWT file: {:?}", e);
std::process::exit(exitcode::IOERR);
}
};
let arc_bwt: Arc<BitVectorBWT> = Arc::new(bwt);
//we need to set up the multiprocessing components now
let pool = ThreadPool::new(threads);
let (tx, rx) = mpsc::channel();
//now needletail open the reads to correct
let mut read_index: u64 = 0;
let mut jobs_queued: u64 = 0;
let mut results_received: u64 = 0;
info!("Starting read correction processes...");
match parse_fastx_file(&long_read_fn) {
Ok(mut fastx_reader) => {
while let Some(raw_record) = fastx_reader.next() {
let record = match raw_record {
Ok(record) => { record },
Err(e) => {
error!("Invalid record while parsing long read file: {:?}", e);
std::process::exit(exitcode::IOERR);
}
};
if read_index >= begin_id && read_index < end_id {
//if we've filled our queue, then we should wait until we get some results back
if jobs_queued - results_received >= JOB_SLOTS | {
let rx_value: CorrectionResults = rx.recv().unwrap();
if verbose_mode {
info!("Job #{:?}: {:.2} -> {:.2}", rx_value.read_index, rx_value.avg_before, rx_value.avg_after);
}
match fasta_writer.write_correction(rx_value) {
Ok(()) => {},
Err(e) => {
error!("Failed while writing read correction: {:?}", e);
std::process::exit(exitcode::IOERR);
}
};
results_received += 1;
if results_received % UPDATE_INTERVAL == 0 {
info!("Processed {} reads...", results_received);
}
} | conditional_block |
|
fmlrc2.rs | let verbose_mode: bool;
let matches = App::new("FMLRC2")
.version(VERSION.unwrap_or("?"))
.author("J. Matthew Holt <[email protected]>")
.about("FM-index Long Read Corrector - Rust implementation")
.arg(Arg::with_name("verbose_mode")
.short("v")
.long("verbose")
.help("enable verbose output"))
.arg(Arg::with_name("kmer_sizes")
.short("k")
.long("K")
.multiple(true)
.takes_value(true)
.help("k-mer sizes for correction, can be specified multiple times (default: \"-k 21 59\")"))
.arg(Arg::with_name("threads")
.short("t")
.long("threads")
.takes_value(true)
.help("number of correction threads (default: 1)"))
.arg(Arg::with_name("begin_id")
.short("b")
.long("begin_index")
.takes_value(true)
.help("index of read to start with (default: 0)"))
.arg(Arg::with_name("end_id")
.short("e")
.long("end_index")
.takes_value(true)
.help("index of read to end with (default: end of file)"))
.arg(Arg::with_name("min_count")
.short("m")
.long("min_count")
.takes_value(true)
.help("absolute minimum k-mer count to consisder a path (default: 5)"))
.arg(Arg::with_name("min_frac")
.short("f")
.long("min_dynamic_count")
.takes_value(true)
.help("dynamic minimum k-mer count fraction of median to consider a path (default: 0.1)"))
.arg(Arg::with_name("branch_factor")
.short("B")
.long("branch_factor")
.takes_value(true)
.help("branching factor for correction, scaled by k (default: 4.0)"))
.arg(Arg::with_name("cache_size")
.short("C")
.long("cache_size")
.takes_value(true)
.help("the length of k-mer to precompute in cache (default: 8)"))
.arg(Arg::with_name("COMP_MSBWT.NPY")
.help("The compressed BWT file with high accuracy reads")
.required(true)
.index(1))
.arg(Arg::with_name("LONG_READS.FA")
.help("The FASTX file with uncorrected reads")
.required(true)
.index(2))
.arg(Arg::with_name("CORRECTED_READS.FA")
.help("The FASTA file to write corrected reads to")
.required(true)
.index(3))
.get_matches();
//pull out required values
bwt_fn = matches.value_of("COMP_MSBWT.NPY").unwrap().to_string();
long_read_fn = matches.value_of("LONG_READS.FA").unwrap().to_string();
corrected_read_fn = matches.value_of("CORRECTED_READS.FA").unwrap().to_string();
//now check options
verbose_mode = matches.is_present("verbose_mode");
kmer_sizes = values_t!(matches.values_of("kmer_sizes"), usize).unwrap_or(kmer_sizes);
threads = value_t!(matches.value_of("threads"), usize).unwrap_or(threads);
begin_id = value_t!(matches.value_of("begin_id"), u64).unwrap_or(begin_id);
end_id = value_t!(matches.value_of("end_id"), u64).unwrap_or(end_id);
min_count = value_t!(matches.value_of("min_count"), u64).unwrap_or(min_count);
min_frac = value_t!(matches.value_of("min_frac"), f64).unwrap_or(min_frac);
branch_factor = value_t!(matches.value_of("branch_factor"), f64).unwrap_or(branch_factor);
cache_size = value_t!(matches.value_of("cache_size"), usize).unwrap_or(cache_size);
info!("Input parameters (required):");
info!("\tBWT: \"{}\"", bwt_fn);
match File::open(&bwt_fn) {
Ok(_) => {},
Err(e) => {
error!("Failed to open BWT file: {:?}", e);
std::process::exit(exitcode::NOINPUT);
}
};
info!("\tInput reads: \"{}\"", long_read_fn);
match File::open(&long_read_fn) {
Ok(_) => {},
Err(e) => {
error!("Failed to open input reads file: {:?}", e);
std::process::exit(exitcode::NOINPUT);
}
};
info!("\tOutput corrected reads: \"{}\"", corrected_read_fn);
let write_file: File = match File::create(&corrected_read_fn) {
Ok(file) => file,
Err(e) => {
error!("Failed to create output corrected reads file: {:?}", e);
std::process::exit(exitcode::NOINPUT);
}
};
let mut fasta_writer = OrderedFastaWriter::new(&write_file);
info!("Execution Parameters:");
info!("\tverbose: {}", verbose_mode);
info!("\tthreads: {}", threads);
info!("\tcache size: {}", cache_size);
info!("Correction Parameters:");
info!("\treads to correct: [{}, {})", begin_id, end_id);
if begin_id > end_id {
error!("--begin_index set to value larger than --end_index");
std::process::exit(exitcode::DATAERR);
}
kmer_sizes.sort_unstable();
info!("\tk-mer sizes: {:?}", kmer_sizes);
info!("\tabs. mininimum count: {}", min_count);
info!("\tdyn. minimimum fraction: {}", min_frac);
if !(0.0..=1.0).contains(&min_frac) {
error!("--min_dynamic_count must be within the range [0, 1]");
std::process::exit(exitcode::DATAERR);
}
info!("\tbranching factor: {}", branch_factor);
if branch_factor <= 0.0 {
error!("--branch_factor must be greater than 0.0");
std::process::exit(exitcode::DATAERR);
}
//TODO make some of these hard-coded into params?
let my_params: CorrectionParameters = CorrectionParameters {
kmer_sizes,
min_count,
max_branch_attempt_length: 10000,
branch_limit_factor: branch_factor,
branch_buffer_factor: 1.3,
//TODO: make this 0.4 a CLI param? we did a prelim test with 0.3 and prec/recall went up but total indels/subs went up *shrug*
midpoint_ed_factor: 0.4,
tail_buffer_factor: 1.05,
frac: min_frac,
verbose: verbose_mode
};
let arc_params: Arc<CorrectionParameters> = Arc::new(my_params);
//first load the BWT into memory
let mut bwt: BitVectorBWT = BitVectorBWT::with_cache_size(cache_size);
match bwt.load_numpy_file(&bwt_fn) {
Ok(_) => {},
Err(e) => {
error!("Failed to load BWT file: {:?}", e);
std::process::exit(exitcode::IOERR);
}
};
let arc_bwt: Arc<BitVectorBWT> = Arc::new(bwt);
//we need to set up the multiprocessing components now
let pool = ThreadPool::new(threads);
let (tx, rx) = mpsc::channel();
//now needletail open the reads to correct
let mut read_index: u64 = 0;
let mut jobs_queued: u64 = 0;
let mut results_received: u64 = 0;
info!("Starting read correction processes...");
match parse_fastx_file(&long_read_fn) {
Ok(mut fastx_reader) => {
while let Some(raw_record) = fastx_reader.next() {
let record = match raw_record {
Ok(record) => { record },
Err(e) => {
error!("Invalid record while parsing long read file: {:?}", e);
std::process::exit(exitcode | {
//initialize logging for our benefit later
env_logger::from_env(env_logger::Env::default().default_filter_or("info")).init();
//non-cli parameters
const JOB_SLOTS: u64 = 10000;
const UPDATE_INTERVAL: u64 = 10000;
//this is the CLI block, params that get populated appear before
let bwt_fn: String;
let long_read_fn: String;
let corrected_read_fn: String;
let mut kmer_sizes: Vec<usize> = vec![21, 59];
let mut threads: usize = 1;
let mut begin_id: u64 = 0;
let mut end_id: u64 = 0xFFFFFFFFFFFFFFFF;
let mut min_count: u64 = 5;
let mut min_frac: f64 = 0.1;
let mut branch_factor: f64 = 4.0;
let mut cache_size: usize = 8; | identifier_body |
|
flask_main.py | not in flask.session:
init_session_values()
return render_template('index.html')
@app.route("/choose")
def choose():
## We'll need authorization to list calendars
## I wanted to put what follows into a function, but had
## to pull it back here because the redirect has to be a
## 'return'
app.logger.debug("Checking credentials for Google calendar access")
credentials = valid_credentials()
if not credentials:
app.logger.debug("Redirecting to authorization")
return flask.redirect(flask.url_for('oauth2callback'))
gcal_service = get_gcal_service(credentials)
app.logger.debug("Returned from get_gcal_service")
flask.g.calendars = list_calendars(gcal_service)
return render_template('index.html')
####
#
# Google calendar authorization:
# Returns us to the main /choose screen after inserting
# the calendar_service object in the session state. May
# redirect to OAuth server first, and may take multiple
# trips through the oauth2 callback function.
#
# Protocol for use ON EACH REQUEST:
# First, check for valid credentials
# If we don't have valid credentials
# Get credentials (jump to the oauth2 protocol)
# (redirects back to /choose, this time with credentials)
# If we do have valid credentials
# Get the service object
#
# The final result of successful authorization is a 'service'
# object. We use a 'service' object to actually retrieve data
# from the Google services. Service objects are NOT serializable ---
# we can't stash one in a cookie. Instead, on each request we
# get a fresh serivce object from our credentials, which are
# serializable.
#
# Note that after authorization we always redirect to /choose;
# If this is unsatisfactory, we'll need a session variable to use
# as a 'continuation' or 'return address' to use instead.
#
####
def valid_credentials():
"""
Returns OAuth2 credentials if we have valid
credentials in the session. This is a 'truthy' value.
Return None if we don't have credentials, or if they
have expired or are otherwise invalid. This is a 'falsy' value.
"""
if 'credentials' not in flask.session:
return None
credentials = client.OAuth2Credentials.from_json(
flask.session['credentials'])
if (credentials.invalid or
credentials.access_token_expired):
|
return credentials
def get_gcal_service(credentials):
"""
We need a Google calendar 'service' object to obtain
list of calendars, busy times, etc. This requires
authorization. If authorization is already in effect,
we'll just return with the authorization. Otherwise,
control flow will be interrupted by authorization, and we'll
end up redirected back to /choose *without a service object*.
Then the second call will succeed without additional authorization.
"""
app.logger.debug("Entering get_gcal_service")
http_auth = credentials.authorize(httplib2.Http())
service = discovery.build('calendar', 'v3', http=http_auth)
app.logger.debug("Returning service")
return service
@app.route('/oauth2callback')
def oauth2callback():
"""
The 'flow' has this one place to call back to. We'll enter here
more than once as steps in the flow are completed, and need to keep
track of how far we've gotten. The first time we'll do the first
step, the second time we'll skip the first step and do the second,
and so on.
"""
app.logger.debug("Entering oauth2callback")
flow = client.flow_from_clientsecrets(
CLIENT_SECRET_FILE,
scope= SCOPES,
redirect_uri=flask.url_for('oauth2callback', _external=True))
## Note we are *not* redirecting above. We are noting *where*
## we will redirect to, which is this function.
## The *second* time we enter here, it's a callback
## with 'code' set in the URL parameter. If we don't
## see that, it must be the first time through, so we
## need to do step 1.
app.logger.debug("Got flow")
if 'code' not in flask.request.args:
app.logger.debug("Code not in flask.request.args")
auth_uri = flow.step1_get_authorize_url()
return flask.redirect(auth_uri)
## This will redirect back here, but the second time through
## we'll have the 'code' parameter set
else:
## It's the second time through ... we can tell because
## we got the 'code' argument in the URL.
app.logger.debug("Code was in flask.request.args")
auth_code = flask.request.args.get('code')
credentials = flow.step2_exchange(auth_code)
flask.session['credentials'] = credentials.to_json()
## Now I can build the service and execute the query,
## but for the moment I'll just log it and go back to
## the main screen
app.logger.debug("Got credentials")
return flask.redirect(flask.url_for('choose'))
#####
#
# Option setting: Buttons or forms that add some
# information into session state. Don't do the
# computation here; use of the information might
# depend on what other information we have.
# Setting an option sends us back to the main display
# page, where we may put the new information to use.
#
#####
@app.route('/setrange', methods=['POST'])
def setrange():
"""
User chose a date range with the bootstrap daterange
widget.
"""
app.logger.debug("Entering setrange")
flask.flash("Setrange gave us '{}'".format(
request.form.get('daterange')))
daterange = request.form.get('daterange')
flask.session['daterange'] = daterange
daterange_parts = daterange.split()
flask.session['begin_date'] = interpret_date(daterange_parts[0])
flask.session['end_date'] = interpret_date(daterange_parts[2])
app.logger.debug("Setrange parsed {} - {} dates as {} - {}".format(
daterange_parts[0], daterange_parts[1],
flask.session['begin_date'], flask.session['end_date']))
return flask.redirect(flask.url_for("choose"))
####
#
# Initialize session variables
#
####
def init_session_values():
"""
Start with some reasonable defaults for date and time ranges.
Note this must be run in app context ... can't call from main.
"""
# Default date span = tomorrow to 1 week from now
now = arrow.now('local') # We really should be using tz from browser
tomorrow = now.replace(days=+1)
nextweek = now.replace(days=+7)
flask.session["begin_date"] = tomorrow.floor('day').isoformat()
flask.session["end_date"] = nextweek.ceil('day').isoformat()
flask.session["daterange"] = "{} - {}".format(
tomorrow.format("MM/DD/YYYY"),
nextweek.format("MM/DD/YYYY"))
# Default time span each day, 8 to 5
flask.session["begin_time"] = interpret_time("9am")
flask.session["end_time"] = interpret_time("5pm")
def interpret_time( text ):
"""
Read time in a human-compatible format and
interpret as ISO format with local timezone.
May throw exception if time can't be interpreted. In that
case it will also flash a message explaining accepted formats.
"""
app.logger.debug("Decoding time '{}'".format(text))
time_formats = ["ha", "h:mma", "h:mm a", "H:mm"]
try:
as_arrow = arrow.get(text, time_formats).replace(tzinfo=tz.tzlocal())
as_arrow = as_arrow.replace(year=2016) #HACK see below
app.logger.debug("Succeeded interpreting time")
except:
app.logger.debug("Failed to interpret time")
flask.flash("Time '{}' didn't match accepted formats 13:30 or 1:30pm"
.format(text))
raise
return as_arrow.isoformat()
#HACK #Workaround
# isoformat() on raspberry Pi does not work for some dates
# far from now. It will fail with an overflow from time stamp out
# of range while checking for daylight savings time. Workaround is
# to force the date-time combination into the year 2016, which seems to
# get the timestamp into a reasonable range. This workaround should be
# removed when Arrow or Dateutil.tz is fixed.
# FIXME: Remove the workaround when arrow is fixed (but only after testing
# on raspberry Pi --- failure is likely due to 32-bit integers on that platform)
def interpret_date( text ):
"""
Convert text of date to ISO format used internally,
with the local time zone.
"""
try:
as_arrow = arrow.get(text, "MM/DD/YYYY").replace(
tzinfo=tz.tzlocal())
except:
flask.flash("Date '{}' didn't fit expected format 12/31/2001")
raise
return as_arrow | return None | conditional_block |
flask_main.py | not in flask.session:
init_session_values()
return render_template('index.html')
@app.route("/choose")
def choose():
## We'll need authorization to list calendars
## I wanted to put what follows into a function, but had
## to pull it back here because the redirect has to be a
## 'return'
app.logger.debug("Checking credentials for Google calendar access")
credentials = valid_credentials()
if not credentials:
app.logger.debug("Redirecting to authorization")
return flask.redirect(flask.url_for('oauth2callback'))
gcal_service = get_gcal_service(credentials)
app.logger.debug("Returned from get_gcal_service")
flask.g.calendars = list_calendars(gcal_service)
return render_template('index.html')
####
#
# Google calendar authorization:
# Returns us to the main /choose screen after inserting
# the calendar_service object in the session state. May
# redirect to OAuth server first, and may take multiple
# trips through the oauth2 callback function.
#
# Protocol for use ON EACH REQUEST:
# First, check for valid credentials
# If we don't have valid credentials
# Get credentials (jump to the oauth2 protocol)
# (redirects back to /choose, this time with credentials)
# If we do have valid credentials
# Get the service object
#
# The final result of successful authorization is a 'service'
# object. We use a 'service' object to actually retrieve data
# from the Google services. Service objects are NOT serializable ---
# we can't stash one in a cookie. Instead, on each request we
# get a fresh serivce object from our credentials, which are
# serializable.
#
# Note that after authorization we always redirect to /choose;
# If this is unsatisfactory, we'll need a session variable to use
# as a 'continuation' or 'return address' to use instead.
#
####
def valid_credentials():
"""
Returns OAuth2 credentials if we have valid
credentials in the session. This is a 'truthy' value.
Return None if we don't have credentials, or if they
have expired or are otherwise invalid. This is a 'falsy' value.
"""
if 'credentials' not in flask.session:
return None
credentials = client.OAuth2Credentials.from_json(
flask.session['credentials'])
if (credentials.invalid or
credentials.access_token_expired):
return None
return credentials
def get_gcal_service(credentials):
"""
We need a Google calendar 'service' object to obtain
list of calendars, busy times, etc. This requires
authorization. If authorization is already in effect,
we'll just return with the authorization. Otherwise,
control flow will be interrupted by authorization, and we'll
end up redirected back to /choose *without a service object*.
Then the second call will succeed without additional authorization.
"""
app.logger.debug("Entering get_gcal_service")
http_auth = credentials.authorize(httplib2.Http()) | service = discovery.build('calendar', 'v3', http=http_auth)
app.logger.debug("Returning service")
return service
@app.route('/oauth2callback')
def oauth2callback():
"""
The 'flow' has this one place to call back to. We'll enter here
more than once as steps in the flow are completed, and need to keep
track of how far we've gotten. The first time we'll do the first
step, the second time we'll skip the first step and do the second,
and so on.
"""
app.logger.debug("Entering oauth2callback")
flow = client.flow_from_clientsecrets(
CLIENT_SECRET_FILE,
scope= SCOPES,
redirect_uri=flask.url_for('oauth2callback', _external=True))
## Note we are *not* redirecting above. We are noting *where*
## we will redirect to, which is this function.
## The *second* time we enter here, it's a callback
## with 'code' set in the URL parameter. If we don't
## see that, it must be the first time through, so we
## need to do step 1.
app.logger.debug("Got flow")
if 'code' not in flask.request.args:
app.logger.debug("Code not in flask.request.args")
auth_uri = flow.step1_get_authorize_url()
return flask.redirect(auth_uri)
## This will redirect back here, but the second time through
## we'll have the 'code' parameter set
else:
## It's the second time through ... we can tell because
## we got the 'code' argument in the URL.
app.logger.debug("Code was in flask.request.args")
auth_code = flask.request.args.get('code')
credentials = flow.step2_exchange(auth_code)
flask.session['credentials'] = credentials.to_json()
## Now I can build the service and execute the query,
## but for the moment I'll just log it and go back to
## the main screen
app.logger.debug("Got credentials")
return flask.redirect(flask.url_for('choose'))
#####
#
# Option setting: Buttons or forms that add some
# information into session state. Don't do the
# computation here; use of the information might
# depend on what other information we have.
# Setting an option sends us back to the main display
# page, where we may put the new information to use.
#
#####
@app.route('/setrange', methods=['POST'])
def setrange():
"""
User chose a date range with the bootstrap daterange
widget.
"""
app.logger.debug("Entering setrange")
flask.flash("Setrange gave us '{}'".format(
request.form.get('daterange')))
daterange = request.form.get('daterange')
flask.session['daterange'] = daterange
daterange_parts = daterange.split()
flask.session['begin_date'] = interpret_date(daterange_parts[0])
flask.session['end_date'] = interpret_date(daterange_parts[2])
app.logger.debug("Setrange parsed {} - {} dates as {} - {}".format(
daterange_parts[0], daterange_parts[1],
flask.session['begin_date'], flask.session['end_date']))
return flask.redirect(flask.url_for("choose"))
####
#
# Initialize session variables
#
####
def init_session_values():
"""
Start with some reasonable defaults for date and time ranges.
Note this must be run in app context ... can't call from main.
"""
# Default date span = tomorrow to 1 week from now
now = arrow.now('local') # We really should be using tz from browser
tomorrow = now.replace(days=+1)
nextweek = now.replace(days=+7)
flask.session["begin_date"] = tomorrow.floor('day').isoformat()
flask.session["end_date"] = nextweek.ceil('day').isoformat()
flask.session["daterange"] = "{} - {}".format(
tomorrow.format("MM/DD/YYYY"),
nextweek.format("MM/DD/YYYY"))
# Default time span each day, 8 to 5
flask.session["begin_time"] = interpret_time("9am")
flask.session["end_time"] = interpret_time("5pm")
def interpret_time( text ):
"""
Read time in a human-compatible format and
interpret as ISO format with local timezone.
May throw exception if time can't be interpreted. In that
case it will also flash a message explaining accepted formats.
"""
app.logger.debug("Decoding time '{}'".format(text))
time_formats = ["ha", "h:mma", "h:mm a", "H:mm"]
try:
as_arrow = arrow.get(text, time_formats).replace(tzinfo=tz.tzlocal())
as_arrow = as_arrow.replace(year=2016) #HACK see below
app.logger.debug("Succeeded interpreting time")
except:
app.logger.debug("Failed to interpret time")
flask.flash("Time '{}' didn't match accepted formats 13:30 or 1:30pm"
.format(text))
raise
return as_arrow.isoformat()
#HACK #Workaround
# isoformat() on raspberry Pi does not work for some dates
# far from now. It will fail with an overflow from time stamp out
# of range while checking for daylight savings time. Workaround is
# to force the date-time combination into the year 2016, which seems to
# get the timestamp into a reasonable range. This workaround should be
# removed when Arrow or Dateutil.tz is fixed.
# FIXME: Remove the workaround when arrow is fixed (but only after testing
# on raspberry Pi --- failure is likely due to 32-bit integers on that platform)
def interpret_date( text ):
"""
Convert text of date to ISO format used internally,
with the local time zone.
"""
try:
as_arrow = arrow.get(text, "MM/DD/YYYY").replace(
tzinfo=tz.tzlocal())
except:
flask.flash("Date '{}' didn't fit expected format 12/31/2001")
raise
return as_arrow | random_line_split |
|
flask_main.py | not in flask.session:
init_session_values()
return render_template('index.html')
@app.route("/choose")
def choose():
## We'll need authorization to list calendars
## I wanted to put what follows into a function, but had
## to pull it back here because the redirect has to be a
## 'return'
app.logger.debug("Checking credentials for Google calendar access")
credentials = valid_credentials()
if not credentials:
app.logger.debug("Redirecting to authorization")
return flask.redirect(flask.url_for('oauth2callback'))
gcal_service = get_gcal_service(credentials)
app.logger.debug("Returned from get_gcal_service")
flask.g.calendars = list_calendars(gcal_service)
return render_template('index.html')
####
#
# Google calendar authorization:
# Returns us to the main /choose screen after inserting
# the calendar_service object in the session state. May
# redirect to OAuth server first, and may take multiple
# trips through the oauth2 callback function.
#
# Protocol for use ON EACH REQUEST:
# First, check for valid credentials
# If we don't have valid credentials
# Get credentials (jump to the oauth2 protocol)
# (redirects back to /choose, this time with credentials)
# If we do have valid credentials
# Get the service object
#
# The final result of successful authorization is a 'service'
# object. We use a 'service' object to actually retrieve data
# from the Google services. Service objects are NOT serializable ---
# we can't stash one in a cookie. Instead, on each request we
# get a fresh serivce object from our credentials, which are
# serializable.
#
# Note that after authorization we always redirect to /choose;
# If this is unsatisfactory, we'll need a session variable to use
# as a 'continuation' or 'return address' to use instead.
#
####
def valid_credentials():
"""
Returns OAuth2 credentials if we have valid
credentials in the session. This is a 'truthy' value.
Return None if we don't have credentials, or if they
have expired or are otherwise invalid. This is a 'falsy' value.
"""
if 'credentials' not in flask.session:
return None
credentials = client.OAuth2Credentials.from_json(
flask.session['credentials'])
if (credentials.invalid or
credentials.access_token_expired):
return None
return credentials
def get_gcal_service(credentials):
"""
We need a Google calendar 'service' object to obtain
list of calendars, busy times, etc. This requires
authorization. If authorization is already in effect,
we'll just return with the authorization. Otherwise,
control flow will be interrupted by authorization, and we'll
end up redirected back to /choose *without a service object*.
Then the second call will succeed without additional authorization.
"""
app.logger.debug("Entering get_gcal_service")
http_auth = credentials.authorize(httplib2.Http())
service = discovery.build('calendar', 'v3', http=http_auth)
app.logger.debug("Returning service")
return service
@app.route('/oauth2callback')
def oauth2callback():
"""
The 'flow' has this one place to call back to. We'll enter here
more than once as steps in the flow are completed, and need to keep
track of how far we've gotten. The first time we'll do the first
step, the second time we'll skip the first step and do the second,
and so on.
"""
app.logger.debug("Entering oauth2callback")
flow = client.flow_from_clientsecrets(
CLIENT_SECRET_FILE,
scope= SCOPES,
redirect_uri=flask.url_for('oauth2callback', _external=True))
## Note we are *not* redirecting above. We are noting *where*
## we will redirect to, which is this function.
## The *second* time we enter here, it's a callback
## with 'code' set in the URL parameter. If we don't
## see that, it must be the first time through, so we
## need to do step 1.
app.logger.debug("Got flow")
if 'code' not in flask.request.args:
app.logger.debug("Code not in flask.request.args")
auth_uri = flow.step1_get_authorize_url()
return flask.redirect(auth_uri)
## This will redirect back here, but the second time through
## we'll have the 'code' parameter set
else:
## It's the second time through ... we can tell because
## we got the 'code' argument in the URL.
app.logger.debug("Code was in flask.request.args")
auth_code = flask.request.args.get('code')
credentials = flow.step2_exchange(auth_code)
flask.session['credentials'] = credentials.to_json()
## Now I can build the service and execute the query,
## but for the moment I'll just log it and go back to
## the main screen
app.logger.debug("Got credentials")
return flask.redirect(flask.url_for('choose'))
#####
#
# Option setting: Buttons or forms that add some
# information into session state. Don't do the
# computation here; use of the information might
# depend on what other information we have.
# Setting an option sends us back to the main display
# page, where we may put the new information to use.
#
#####
@app.route('/setrange', methods=['POST'])
def setrange():
"""
User chose a date range with the bootstrap daterange
widget.
"""
app.logger.debug("Entering setrange")
flask.flash("Setrange gave us '{}'".format(
request.form.get('daterange')))
daterange = request.form.get('daterange')
flask.session['daterange'] = daterange
daterange_parts = daterange.split()
flask.session['begin_date'] = interpret_date(daterange_parts[0])
flask.session['end_date'] = interpret_date(daterange_parts[2])
app.logger.debug("Setrange parsed {} - {} dates as {} - {}".format(
daterange_parts[0], daterange_parts[1],
flask.session['begin_date'], flask.session['end_date']))
return flask.redirect(flask.url_for("choose"))
####
#
# Initialize session variables
#
####
def init_session_values():
"""
Start with some reasonable defaults for date and time ranges.
Note this must be run in app context ... can't call from main.
"""
# Default date span = tomorrow to 1 week from now
now = arrow.now('local') # We really should be using tz from browser
tomorrow = now.replace(days=+1)
nextweek = now.replace(days=+7)
flask.session["begin_date"] = tomorrow.floor('day').isoformat()
flask.session["end_date"] = nextweek.ceil('day').isoformat()
flask.session["daterange"] = "{} - {}".format(
tomorrow.format("MM/DD/YYYY"),
nextweek.format("MM/DD/YYYY"))
# Default time span each day, 8 to 5
flask.session["begin_time"] = interpret_time("9am")
flask.session["end_time"] = interpret_time("5pm")
def | ( text ):
"""
Read time in a human-compatible format and
interpret as ISO format with local timezone.
May throw exception if time can't be interpreted. In that
case it will also flash a message explaining accepted formats.
"""
app.logger.debug("Decoding time '{}'".format(text))
time_formats = ["ha", "h:mma", "h:mm a", "H:mm"]
try:
as_arrow = arrow.get(text, time_formats).replace(tzinfo=tz.tzlocal())
as_arrow = as_arrow.replace(year=2016) #HACK see below
app.logger.debug("Succeeded interpreting time")
except:
app.logger.debug("Failed to interpret time")
flask.flash("Time '{}' didn't match accepted formats 13:30 or 1:30pm"
.format(text))
raise
return as_arrow.isoformat()
#HACK #Workaround
# isoformat() on raspberry Pi does not work for some dates
# far from now. It will fail with an overflow from time stamp out
# of range while checking for daylight savings time. Workaround is
# to force the date-time combination into the year 2016, which seems to
# get the timestamp into a reasonable range. This workaround should be
# removed when Arrow or Dateutil.tz is fixed.
# FIXME: Remove the workaround when arrow is fixed (but only after testing
# on raspberry Pi --- failure is likely due to 32-bit integers on that platform)
def interpret_date( text ):
"""
Convert text of date to ISO format used internally,
with the local time zone.
"""
try:
as_arrow = arrow.get(text, "MM/DD/YYYY").replace(
tzinfo=tz.tzlocal())
except:
flask.flash("Date '{}' didn't fit expected format 12/31/2001")
raise
return as | interpret_time | identifier_name |
flask_main.py | .session:
init_session_values()
return render_template('index.html')
@app.route("/choose")
def choose():
## We'll need authorization to list calendars
## I wanted to put what follows into a function, but had
## to pull it back here because the redirect has to be a
## 'return'
app.logger.debug("Checking credentials for Google calendar access")
credentials = valid_credentials()
if not credentials:
app.logger.debug("Redirecting to authorization")
return flask.redirect(flask.url_for('oauth2callback'))
gcal_service = get_gcal_service(credentials)
app.logger.debug("Returned from get_gcal_service")
flask.g.calendars = list_calendars(gcal_service)
return render_template('index.html')
####
#
# Google calendar authorization:
# Returns us to the main /choose screen after inserting
# the calendar_service object in the session state. May
# redirect to OAuth server first, and may take multiple
# trips through the oauth2 callback function.
#
# Protocol for use ON EACH REQUEST:
# First, check for valid credentials
# If we don't have valid credentials
# Get credentials (jump to the oauth2 protocol)
# (redirects back to /choose, this time with credentials)
# If we do have valid credentials
# Get the service object
#
# The final result of successful authorization is a 'service'
# object. We use a 'service' object to actually retrieve data
# from the Google services. Service objects are NOT serializable ---
# we can't stash one in a cookie. Instead, on each request we
# get a fresh serivce object from our credentials, which are
# serializable.
#
# Note that after authorization we always redirect to /choose;
# If this is unsatisfactory, we'll need a session variable to use
# as a 'continuation' or 'return address' to use instead.
#
####
def valid_credentials():
"""
Returns OAuth2 credentials if we have valid
credentials in the session. This is a 'truthy' value.
Return None if we don't have credentials, or if they
have expired or are otherwise invalid. This is a 'falsy' value.
"""
if 'credentials' not in flask.session:
return None
credentials = client.OAuth2Credentials.from_json(
flask.session['credentials'])
if (credentials.invalid or
credentials.access_token_expired):
return None
return credentials
def get_gcal_service(credentials):
"""
We need a Google calendar 'service' object to obtain
list of calendars, busy times, etc. This requires
authorization. If authorization is already in effect,
we'll just return with the authorization. Otherwise,
control flow will be interrupted by authorization, and we'll
end up redirected back to /choose *without a service object*.
Then the second call will succeed without additional authorization.
"""
app.logger.debug("Entering get_gcal_service")
http_auth = credentials.authorize(httplib2.Http())
service = discovery.build('calendar', 'v3', http=http_auth)
app.logger.debug("Returning service")
return service
@app.route('/oauth2callback')
def oauth2callback():
"""
The 'flow' has this one place to call back to. We'll enter here
more than once as steps in the flow are completed, and need to keep
track of how far we've gotten. The first time we'll do the first
step, the second time we'll skip the first step and do the second,
and so on.
"""
app.logger.debug("Entering oauth2callback")
flow = client.flow_from_clientsecrets(
CLIENT_SECRET_FILE,
scope= SCOPES,
redirect_uri=flask.url_for('oauth2callback', _external=True))
## Note we are *not* redirecting above. We are noting *where*
## we will redirect to, which is this function.
## The *second* time we enter here, it's a callback
## with 'code' set in the URL parameter. If we don't
## see that, it must be the first time through, so we
## need to do step 1.
app.logger.debug("Got flow")
if 'code' not in flask.request.args:
app.logger.debug("Code not in flask.request.args")
auth_uri = flow.step1_get_authorize_url()
return flask.redirect(auth_uri)
## This will redirect back here, but the second time through
## we'll have the 'code' parameter set
else:
## It's the second time through ... we can tell because
## we got the 'code' argument in the URL.
app.logger.debug("Code was in flask.request.args")
auth_code = flask.request.args.get('code')
credentials = flow.step2_exchange(auth_code)
flask.session['credentials'] = credentials.to_json()
## Now I can build the service and execute the query,
## but for the moment I'll just log it and go back to
## the main screen
app.logger.debug("Got credentials")
return flask.redirect(flask.url_for('choose'))
#####
#
# Option setting: Buttons or forms that add some
# information into session state. Don't do the
# computation here; use of the information might
# depend on what other information we have.
# Setting an option sends us back to the main display
# page, where we may put the new information to use.
#
#####
@app.route('/setrange', methods=['POST'])
def setrange():
"""
User chose a date range with the bootstrap daterange
widget.
"""
app.logger.debug("Entering setrange")
flask.flash("Setrange gave us '{}'".format(
request.form.get('daterange')))
daterange = request.form.get('daterange')
flask.session['daterange'] = daterange
daterange_parts = daterange.split()
flask.session['begin_date'] = interpret_date(daterange_parts[0])
flask.session['end_date'] = interpret_date(daterange_parts[2])
app.logger.debug("Setrange parsed {} - {} dates as {} - {}".format(
daterange_parts[0], daterange_parts[1],
flask.session['begin_date'], flask.session['end_date']))
return flask.redirect(flask.url_for("choose"))
####
#
# Initialize session variables
#
####
def init_session_values():
"""
Start with some reasonable defaults for date and time ranges.
Note this must be run in app context ... can't call from main.
"""
# Default date span = tomorrow to 1 week from now
now = arrow.now('local') # We really should be using tz from browser
tomorrow = now.replace(days=+1)
nextweek = now.replace(days=+7)
flask.session["begin_date"] = tomorrow.floor('day').isoformat()
flask.session["end_date"] = nextweek.ceil('day').isoformat()
flask.session["daterange"] = "{} - {}".format(
tomorrow.format("MM/DD/YYYY"),
nextweek.format("MM/DD/YYYY"))
# Default time span each day, 8 to 5
flask.session["begin_time"] = interpret_time("9am")
flask.session["end_time"] = interpret_time("5pm")
def interpret_time( text ):
"""
Read time in a human-compatible format and
interpret as ISO format with local timezone.
May throw exception if time can't be interpreted. In that
case it will also flash a message explaining accepted formats.
"""
app.logger.debug("Decoding time '{}'".format(text))
time_formats = ["ha", "h:mma", "h:mm a", "H:mm"]
try:
as_arrow = arrow.get(text, time_formats).replace(tzinfo=tz.tzlocal())
as_arrow = as_arrow.replace(year=2016) #HACK see below
app.logger.debug("Succeeded interpreting time")
except:
app.logger.debug("Failed to interpret time")
flask.flash("Time '{}' didn't match accepted formats 13:30 or 1:30pm"
.format(text))
raise
return as_arrow.isoformat()
#HACK #Workaround
# isoformat() on raspberry Pi does not work for some dates
# far from now. It will fail with an overflow from time stamp out
# of range while checking for daylight savings time. Workaround is
# to force the date-time combination into the year 2016, which seems to
# get the timestamp into a reasonable range. This workaround should be
# removed when Arrow or Dateutil.tz is fixed.
# FIXME: Remove the workaround when arrow is fixed (but only after testing
# on raspberry Pi --- failure is likely due to 32-bit integers on that platform)
def interpret_date( text ):
| """
Convert text of date to ISO format used internally,
with the local time zone.
"""
try:
as_arrow = arrow.get(text, "MM/DD/YYYY").replace(
tzinfo=tz.tzlocal())
except:
flask.flash("Date '{}' didn't fit expected format 12/31/2001")
raise
return as_arrow.isoformat() | identifier_body |
|
root.go | // Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
if err := RootCmd.Execute(); err != nil {
fatal(err.Error(), 1)
}
}
var varApiKey = "CRONITOR_API_KEY"
var varHostname = "CRONITOR_HOSTNAME"
var varLog = "CRONITOR_LOG"
var varPingApiKey = "CRONITOR_PING_API_KEY"
var varExcludeText = "CRONITOR_EXCLUDE_TEXT"
var varConfig = "CRONITOR_CONFIG"
func init() {
userAgent = fmt.Sprintf("CronitorCLI/%s", Version)
cobra.OnInitialize(initConfig)
// Here you will define your flags and configuration settings.
// Cobra supports persistent flags, which, if defined here,
// will be global for your application.
RootCmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", cfgFile, "Config file")
RootCmd.PersistentFlags().StringVarP(&apiKey, "api-key", "k", apiKey, "Cronitor API Key")
RootCmd.PersistentFlags().StringVarP(&pingApiKey, "ping-api-key", "p", pingApiKey, "Ping API Key")
RootCmd.PersistentFlags().StringVarP(&hostname, "hostname", "n", hostname, "A unique identifier for this host (default: system hostname)")
RootCmd.PersistentFlags().StringVarP(&debugLog, "log", "l", debugLog, "Write debug logs to supplied file")
RootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", verbose, "Verbose output")
RootCmd.PersistentFlags().BoolVar(&dev, "use-dev", dev, "Dev mode")
RootCmd.PersistentFlags().MarkHidden("use-dev")
viper.BindPFlag(varApiKey, RootCmd.PersistentFlags().Lookup("api-key"))
viper.BindPFlag(varHostname, RootCmd.PersistentFlags().Lookup("hostname"))
viper.BindPFlag(varLog, RootCmd.PersistentFlags().Lookup("log"))
viper.BindPFlag(varPingApiKey, RootCmd.PersistentFlags().Lookup("ping-api-key"))
viper.BindPFlag(varConfig, RootCmd.PersistentFlags().Lookup("config"))
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
viper.AutomaticEnv() // read in environment variables that match
// If a custom config file is specified by flag or env var, use it. Otherwise use default file.
if len(viper.GetString(varConfig)) > 0 {
viper.SetConfigFile(viper.GetString(varConfig))
} else {
viper.AddConfigPath(defaultConfigFileDirectory())
viper.SetConfigName("cronitor")
}
// If a config file is found, read it in.
if err := viper.ReadInConfig(); err == nil {
log("Reading config from " + viper.ConfigFileUsed())
}
}
func sendPing(endpoint string, uniqueIdentifier string, message string, series string, timestamp float64, duration *float64, exitCode *int, group *sync.WaitGroup) {
defer group.Done()
Client := &http.Client{
Timeout: time.Second * 10,
}
hostname := effectiveHostname()
pingApiAuthKey := viper.GetString(varPingApiKey)
pingApiHost := ""
formattedStamp := ""
formattedDuration := ""
formattedStatusCode := ""
if timestamp > 0 {
formattedStamp = fmt.Sprintf("&stamp=%s", formatStamp(timestamp))
}
if len(message) > 0 {
message = fmt.Sprintf("&msg=%s", url.QueryEscape(truncateString(message, 1000)))
}
if len(pingApiAuthKey) > 0 {
pingApiAuthKey = fmt.Sprintf("&auth_key=%s", truncateString(pingApiAuthKey, 50))
}
if len(hostname) > 0 {
hostname = fmt.Sprintf("&host=%s", url.QueryEscape(truncateString(hostname, 50)))
}
// By passing duration up, we save the computation on the server side
if duration != nil {
formattedDuration = fmt.Sprintf("&duration=%s", formatStamp(*duration))
}
// We aren't using exit code at time of writing, but we have the field available for healthcheck monitors.
if exitCode != nil {
formattedStatusCode = fmt.Sprintf("&status_code=%d", *exitCode)
}
// The `series` data is used to match run events with complete or fail. Useful if multiple instances of a job are running.
if len(series) > 0 {
series = fmt.Sprintf("&series=%s", series)
}
pingSent := false
uri := ""
for i := 1; i <= 6; i++ {
if dev {
pingApiHost = "http://dev.cronitor.io"
} else if i > 2 && pingApiHost == "https://cronitor.link" {
pingApiHost = "https://cronitor.io"
} else {
pingApiHost = "https://cronitor.link"
}
// After 2 failed attempts, take a brief random break before trying again
if i > 2 {
time.Sleep(time.Second * time.Duration(float32(i)*1.5*rand.Float32()))
}
uri = fmt.Sprintf("%s/%s/%s?try=%d%s%s%s%s%s%s%s", pingApiHost, uniqueIdentifier, endpoint, i, formattedStamp, message, pingApiAuthKey, hostname, formattedDuration, series, formattedStatusCode)
log("Sending ping " + uri)
request, _ := http.NewRequest("GET", uri, nil)
request.Header.Add("User-Agent", userAgent)
response, err := Client.Do(request)
if err != nil {
log(err.Error())
continue
}
_, err = ioutil.ReadAll(response.Body)
response.Body.Close()
// Any 2xx is considered a successful response
if response.StatusCode >= 200 && response.StatusCode < 300 {
pingSent = true
break
}
// Backoff on any 4xx request, e.g. 429 Too Many Requests
if response.StatusCode >= 400 && response.StatusCode < 500 {
pingSent = true
break
}
}
if !pingSent {
raven.CaptureErrorAndWait(errors.New("Ping failure; retries exhausted: "+uri), nil)
}
}
func effectiveHostname() string {
if len(viper.GetString(varHostname)) > 0 {
return viper.GetString(varHostname)
}
hostname, _ := os.Hostname()
return hostname
}
func effectiveTimezoneLocationName() lib.TimezoneLocationName {
// First, check if a TZ or CRON_TZ environemnt variable is set -- Diff var used by diff distros
if locale, isSetFlag := os.LookupEnv("TZ"); isSetFlag {
return lib.TimezoneLocationName{locale}
}
if locale, isSetFlag := os.LookupEnv("CRON_TZ"); isSetFlag {
return lib.TimezoneLocationName{locale}
}
// Attempt to parse timedatectl (should work on FreeBSD, many linux distros)
if output, err := exec.Command("timedatectl").Output(); err == nil {
outputString := strings.Replace(string(output), "Time zone", "Timezone", -1)
r := regexp.MustCompile(`(?m:Timezone:\s+(\S+).+$)`)
if ret := r.FindStringSubmatch(outputString); ret != nil && len(ret) > 1 {
return lib.TimezoneLocationName{ret[1]}
}
}
// If /etc/localtime is a symlink, check what it is linking to
if localtimeFile, err := os.Lstat("/etc/localtime"); err == nil && localtimeFile.Mode()&os.ModeSymlink == os.ModeSymlink {
if symlink, _ := os.Readlink("/etc/localtime"); len(symlink) > 0 {
if strings.Contains(symlink, "UTC") {
return lib.TimezoneLocationName{"UTC"}
}
symlinkParts := strings.Split(symlink, "/")
return lib.TimezoneLocationName{strings.Join(symlinkParts[len(symlinkParts)-2:], "/")}
}
}
// If we happen to have an /etc/timezone, no guarantee it's used, but read that
if locale, err := ioutil.ReadFile("/etc/timezone"); err == nil {
return lib.TimezoneLocationName{string(locale)}
}
return lib.TimezoneLocationName{""}
}
func | () string {
if runtime.GOOS == "windows" {
return fmt.Sprintf("%s\\ProgramData\\Cronitor", os.Getenv("SYSTEMDRIVE"))
}
return "/etc/cronitor"
}
func truncateString(s string, length int) string {
if len(s) <= length {
return s
}
return s[:length]
}
func printSuccessText(message string, indent bool) {
if isAutoDiscover || isSilent {
log(message)
} else {
color := color.New(color.FgHiGreen)
if indent {
color.Println(fmt.Sprintf(" |--► %s", message))
} else {
color.Println(fmt.Sprintf("----► %s", message))
}
}
}
func printDoneText(message string, indent bool) {
if isAuto | defaultConfigFileDirectory | identifier_name |
root.go | // Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
if err := RootCmd.Execute(); err != nil {
fatal(err.Error(), 1)
}
}
var varApiKey = "CRONITOR_API_KEY"
var varHostname = "CRONITOR_HOSTNAME"
var varLog = "CRONITOR_LOG"
var varPingApiKey = "CRONITOR_PING_API_KEY"
var varExcludeText = "CRONITOR_EXCLUDE_TEXT"
var varConfig = "CRONITOR_CONFIG"
func init() {
userAgent = fmt.Sprintf("CronitorCLI/%s", Version)
cobra.OnInitialize(initConfig)
// Here you will define your flags and configuration settings.
// Cobra supports persistent flags, which, if defined here,
// will be global for your application.
RootCmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", cfgFile, "Config file")
RootCmd.PersistentFlags().StringVarP(&apiKey, "api-key", "k", apiKey, "Cronitor API Key")
RootCmd.PersistentFlags().StringVarP(&pingApiKey, "ping-api-key", "p", pingApiKey, "Ping API Key")
RootCmd.PersistentFlags().StringVarP(&hostname, "hostname", "n", hostname, "A unique identifier for this host (default: system hostname)")
RootCmd.PersistentFlags().StringVarP(&debugLog, "log", "l", debugLog, "Write debug logs to supplied file")
RootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", verbose, "Verbose output")
RootCmd.PersistentFlags().BoolVar(&dev, "use-dev", dev, "Dev mode")
RootCmd.PersistentFlags().MarkHidden("use-dev")
viper.BindPFlag(varApiKey, RootCmd.PersistentFlags().Lookup("api-key"))
viper.BindPFlag(varHostname, RootCmd.PersistentFlags().Lookup("hostname"))
viper.BindPFlag(varLog, RootCmd.PersistentFlags().Lookup("log"))
viper.BindPFlag(varPingApiKey, RootCmd.PersistentFlags().Lookup("ping-api-key"))
viper.BindPFlag(varConfig, RootCmd.PersistentFlags().Lookup("config"))
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
viper.AutomaticEnv() // read in environment variables that match
// If a custom config file is specified by flag or env var, use it. Otherwise use default file.
if len(viper.GetString(varConfig)) > 0 {
viper.SetConfigFile(viper.GetString(varConfig))
} else {
viper.AddConfigPath(defaultConfigFileDirectory())
viper.SetConfigName("cronitor")
}
// If a config file is found, read it in.
if err := viper.ReadInConfig(); err == nil {
log("Reading config from " + viper.ConfigFileUsed())
}
}
func sendPing(endpoint string, uniqueIdentifier string, message string, series string, timestamp float64, duration *float64, exitCode *int, group *sync.WaitGroup) {
defer group.Done()
Client := &http.Client{
Timeout: time.Second * 10,
}
hostname := effectiveHostname()
pingApiAuthKey := viper.GetString(varPingApiKey)
pingApiHost := ""
formattedStamp := ""
formattedDuration := ""
formattedStatusCode := ""
if timestamp > 0 {
formattedStamp = fmt.Sprintf("&stamp=%s", formatStamp(timestamp))
}
if len(message) > 0 {
message = fmt.Sprintf("&msg=%s", url.QueryEscape(truncateString(message, 1000)))
}
if len(pingApiAuthKey) > 0 {
pingApiAuthKey = fmt.Sprintf("&auth_key=%s", truncateString(pingApiAuthKey, 50))
}
if len(hostname) > 0 {
hostname = fmt.Sprintf("&host=%s", url.QueryEscape(truncateString(hostname, 50)))
}
// By passing duration up, we save the computation on the server side
if duration != nil {
formattedDuration = fmt.Sprintf("&duration=%s", formatStamp(*duration))
}
// We aren't using exit code at time of writing, but we have the field available for healthcheck monitors.
if exitCode != nil {
formattedStatusCode = fmt.Sprintf("&status_code=%d", *exitCode)
}
// The `series` data is used to match run events with complete or fail. Useful if multiple instances of a job are running.
if len(series) > 0 {
series = fmt.Sprintf("&series=%s", series)
}
pingSent := false
uri := ""
for i := 1; i <= 6; i++ {
if dev {
pingApiHost = "http://dev.cronitor.io"
} else if i > 2 && pingApiHost == "https://cronitor.link" {
pingApiHost = "https://cronitor.io"
} else {
pingApiHost = "https://cronitor.link"
}
// After 2 failed attempts, take a brief random break before trying again
if i > 2 {
time.Sleep(time.Second * time.Duration(float32(i)*1.5*rand.Float32()))
}
uri = fmt.Sprintf("%s/%s/%s?try=%d%s%s%s%s%s%s%s", pingApiHost, uniqueIdentifier, endpoint, i, formattedStamp, message, pingApiAuthKey, hostname, formattedDuration, series, formattedStatusCode)
log("Sending ping " + uri)
request, _ := http.NewRequest("GET", uri, nil)
request.Header.Add("User-Agent", userAgent)
response, err := Client.Do(request)
if err != nil {
log(err.Error())
continue
}
_, err = ioutil.ReadAll(response.Body)
response.Body.Close()
// Any 2xx is considered a successful response
if response.StatusCode >= 200 && response.StatusCode < 300 {
pingSent = true
break
}
// Backoff on any 4xx request, e.g. 429 Too Many Requests
if response.StatusCode >= 400 && response.StatusCode < 500 {
pingSent = true
break
}
}
if !pingSent {
raven.CaptureErrorAndWait(errors.New("Ping failure; retries exhausted: "+uri), nil)
}
}
func effectiveHostname() string {
if len(viper.GetString(varHostname)) > 0 {
return viper.GetString(varHostname)
}
hostname, _ := os.Hostname()
return hostname
}
func effectiveTimezoneLocationName() lib.TimezoneLocationName {
// First, check if a TZ or CRON_TZ environemnt variable is set -- Diff var used by diff distros
if locale, isSetFlag := os.LookupEnv("TZ"); isSetFlag {
return lib.TimezoneLocationName{locale}
}
if locale, isSetFlag := os.LookupEnv("CRON_TZ"); isSetFlag {
return lib.TimezoneLocationName{locale}
}
// Attempt to parse timedatectl (should work on FreeBSD, many linux distros)
if output, err := exec.Command("timedatectl").Output(); err == nil {
outputString := strings.Replace(string(output), "Time zone", "Timezone", -1)
r := regexp.MustCompile(`(?m:Timezone:\s+(\S+).+$)`)
if ret := r.FindStringSubmatch(outputString); ret != nil && len(ret) > 1 {
return lib.TimezoneLocationName{ret[1]}
}
}
// If /etc/localtime is a symlink, check what it is linking to
if localtimeFile, err := os.Lstat("/etc/localtime"); err == nil && localtimeFile.Mode()&os.ModeSymlink == os.ModeSymlink {
if symlink, _ := os.Readlink("/etc/localtime"); len(symlink) > 0 {
if strings.Contains(symlink, "UTC") {
return lib.TimezoneLocationName{"UTC"}
}
symlinkParts := strings.Split(symlink, "/")
return lib.TimezoneLocationName{strings.Join(symlinkParts[len(symlinkParts)-2:], "/")}
}
}
// If we happen to have an /etc/timezone, no guarantee it's used, but read that
if locale, err := ioutil.ReadFile("/etc/timezone"); err == nil {
return lib.TimezoneLocationName{string(locale)}
}
return lib.TimezoneLocationName{""}
}
func defaultConfigFileDirectory() string {
if runtime.GOOS == "windows" {
return fmt.Sprintf("%s\\ProgramData\\Cronitor", os.Getenv("SYSTEMDRIVE"))
}
return "/etc/cronitor"
}
func truncateString(s string, length int) string {
if len(s) <= length {
return s
}
return s[:length]
} | color := color.New(color.FgHiGreen)
if indent {
color.Println(fmt.Sprintf(" |--► %s", message))
} else {
color.Println(fmt.Sprintf("----► %s", message))
}
}
}
func printDoneText(message string, indent bool) {
if isAuto |
func printSuccessText(message string, indent bool) {
if isAutoDiscover || isSilent {
log(message)
} else { | random_line_split |
root.go | CRONITOR_LOG"
var varPingApiKey = "CRONITOR_PING_API_KEY"
var varExcludeText = "CRONITOR_EXCLUDE_TEXT"
var varConfig = "CRONITOR_CONFIG"
func init() {
userAgent = fmt.Sprintf("CronitorCLI/%s", Version)
cobra.OnInitialize(initConfig)
// Here you will define your flags and configuration settings.
// Cobra supports persistent flags, which, if defined here,
// will be global for your application.
RootCmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", cfgFile, "Config file")
RootCmd.PersistentFlags().StringVarP(&apiKey, "api-key", "k", apiKey, "Cronitor API Key")
RootCmd.PersistentFlags().StringVarP(&pingApiKey, "ping-api-key", "p", pingApiKey, "Ping API Key")
RootCmd.PersistentFlags().StringVarP(&hostname, "hostname", "n", hostname, "A unique identifier for this host (default: system hostname)")
RootCmd.PersistentFlags().StringVarP(&debugLog, "log", "l", debugLog, "Write debug logs to supplied file")
RootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", verbose, "Verbose output")
RootCmd.PersistentFlags().BoolVar(&dev, "use-dev", dev, "Dev mode")
RootCmd.PersistentFlags().MarkHidden("use-dev")
viper.BindPFlag(varApiKey, RootCmd.PersistentFlags().Lookup("api-key"))
viper.BindPFlag(varHostname, RootCmd.PersistentFlags().Lookup("hostname"))
viper.BindPFlag(varLog, RootCmd.PersistentFlags().Lookup("log"))
viper.BindPFlag(varPingApiKey, RootCmd.PersistentFlags().Lookup("ping-api-key"))
viper.BindPFlag(varConfig, RootCmd.PersistentFlags().Lookup("config"))
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
viper.AutomaticEnv() // read in environment variables that match
// If a custom config file is specified by flag or env var, use it. Otherwise use default file.
if len(viper.GetString(varConfig)) > 0 {
viper.SetConfigFile(viper.GetString(varConfig))
} else {
viper.AddConfigPath(defaultConfigFileDirectory())
viper.SetConfigName("cronitor")
}
// If a config file is found, read it in.
if err := viper.ReadInConfig(); err == nil {
log("Reading config from " + viper.ConfigFileUsed())
}
}
func sendPing(endpoint string, uniqueIdentifier string, message string, series string, timestamp float64, duration *float64, exitCode *int, group *sync.WaitGroup) {
defer group.Done()
Client := &http.Client{
Timeout: time.Second * 10,
}
hostname := effectiveHostname()
pingApiAuthKey := viper.GetString(varPingApiKey)
pingApiHost := ""
formattedStamp := ""
formattedDuration := ""
formattedStatusCode := ""
if timestamp > 0 {
formattedStamp = fmt.Sprintf("&stamp=%s", formatStamp(timestamp))
}
if len(message) > 0 {
message = fmt.Sprintf("&msg=%s", url.QueryEscape(truncateString(message, 1000)))
}
if len(pingApiAuthKey) > 0 {
pingApiAuthKey = fmt.Sprintf("&auth_key=%s", truncateString(pingApiAuthKey, 50))
}
if len(hostname) > 0 {
hostname = fmt.Sprintf("&host=%s", url.QueryEscape(truncateString(hostname, 50)))
}
// By passing duration up, we save the computation on the server side
if duration != nil {
formattedDuration = fmt.Sprintf("&duration=%s", formatStamp(*duration))
}
// We aren't using exit code at time of writing, but we have the field available for healthcheck monitors.
if exitCode != nil {
formattedStatusCode = fmt.Sprintf("&status_code=%d", *exitCode)
}
// The `series` data is used to match run events with complete or fail. Useful if multiple instances of a job are running.
if len(series) > 0 {
series = fmt.Sprintf("&series=%s", series)
}
pingSent := false
uri := ""
for i := 1; i <= 6; i++ {
if dev {
pingApiHost = "http://dev.cronitor.io"
} else if i > 2 && pingApiHost == "https://cronitor.link" {
pingApiHost = "https://cronitor.io"
} else {
pingApiHost = "https://cronitor.link"
}
// After 2 failed attempts, take a brief random break before trying again
if i > 2 {
time.Sleep(time.Second * time.Duration(float32(i)*1.5*rand.Float32()))
}
uri = fmt.Sprintf("%s/%s/%s?try=%d%s%s%s%s%s%s%s", pingApiHost, uniqueIdentifier, endpoint, i, formattedStamp, message, pingApiAuthKey, hostname, formattedDuration, series, formattedStatusCode)
log("Sending ping " + uri)
request, _ := http.NewRequest("GET", uri, nil)
request.Header.Add("User-Agent", userAgent)
response, err := Client.Do(request)
if err != nil {
log(err.Error())
continue
}
_, err = ioutil.ReadAll(response.Body)
response.Body.Close()
// Any 2xx is considered a successful response
if response.StatusCode >= 200 && response.StatusCode < 300 {
pingSent = true
break
}
// Backoff on any 4xx request, e.g. 429 Too Many Requests
if response.StatusCode >= 400 && response.StatusCode < 500 {
pingSent = true
break
}
}
if !pingSent {
raven.CaptureErrorAndWait(errors.New("Ping failure; retries exhausted: "+uri), nil)
}
}
func effectiveHostname() string {
if len(viper.GetString(varHostname)) > 0 {
return viper.GetString(varHostname)
}
hostname, _ := os.Hostname()
return hostname
}
func effectiveTimezoneLocationName() lib.TimezoneLocationName {
// First, check if a TZ or CRON_TZ environemnt variable is set -- Diff var used by diff distros
if locale, isSetFlag := os.LookupEnv("TZ"); isSetFlag {
return lib.TimezoneLocationName{locale}
}
if locale, isSetFlag := os.LookupEnv("CRON_TZ"); isSetFlag {
return lib.TimezoneLocationName{locale}
}
// Attempt to parse timedatectl (should work on FreeBSD, many linux distros)
if output, err := exec.Command("timedatectl").Output(); err == nil {
outputString := strings.Replace(string(output), "Time zone", "Timezone", -1)
r := regexp.MustCompile(`(?m:Timezone:\s+(\S+).+$)`)
if ret := r.FindStringSubmatch(outputString); ret != nil && len(ret) > 1 {
return lib.TimezoneLocationName{ret[1]}
}
}
// If /etc/localtime is a symlink, check what it is linking to
if localtimeFile, err := os.Lstat("/etc/localtime"); err == nil && localtimeFile.Mode()&os.ModeSymlink == os.ModeSymlink {
if symlink, _ := os.Readlink("/etc/localtime"); len(symlink) > 0 {
if strings.Contains(symlink, "UTC") {
return lib.TimezoneLocationName{"UTC"}
}
symlinkParts := strings.Split(symlink, "/")
return lib.TimezoneLocationName{strings.Join(symlinkParts[len(symlinkParts)-2:], "/")}
}
}
// If we happen to have an /etc/timezone, no guarantee it's used, but read that
if locale, err := ioutil.ReadFile("/etc/timezone"); err == nil {
return lib.TimezoneLocationName{string(locale)}
}
return lib.TimezoneLocationName{""}
}
func defaultConfigFileDirectory() string {
if runtime.GOOS == "windows" {
return fmt.Sprintf("%s\\ProgramData\\Cronitor", os.Getenv("SYSTEMDRIVE"))
}
return "/etc/cronitor"
}
func truncateString(s string, length int) string {
if len(s) <= length {
return s
}
return s[:length]
}
func printSuccessText(message string, indent bool) {
if isAutoDiscover || isSilent {
log(message)
} else {
color := color.New(color.FgHiGreen)
if indent {
color.Println(fmt.Sprintf(" |--► %s", message))
} else {
color.Println(fmt.Sprintf("----► %s", message))
}
}
}
func printDoneText(message string, indent bool) {
if isAutoDiscover || isSilent {
log(message)
} else {
printSuccessText(message+" ✔", indent)
}
}
func printWarningText(message string, indent bool) {
if isAutoDiscover || isSilent {
log(message)
} else {
color := color.New(color.FgHiYellow)
if indent {
c | olor.Println(fmt.Sprintf(" |--► %s", message))
} else {
| conditional_block |
|
root.go | // Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
if err := RootCmd.Execute(); err != nil {
fatal(err.Error(), 1)
}
}
var varApiKey = "CRONITOR_API_KEY"
var varHostname = "CRONITOR_HOSTNAME"
var varLog = "CRONITOR_LOG"
var varPingApiKey = "CRONITOR_PING_API_KEY"
var varExcludeText = "CRONITOR_EXCLUDE_TEXT"
var varConfig = "CRONITOR_CONFIG"
func init() {
userAgent = fmt.Sprintf("CronitorCLI/%s", Version)
cobra.OnInitialize(initConfig)
// Here you will define your flags and configuration settings.
// Cobra supports persistent flags, which, if defined here,
// will be global for your application.
RootCmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", cfgFile, "Config file")
RootCmd.PersistentFlags().StringVarP(&apiKey, "api-key", "k", apiKey, "Cronitor API Key")
RootCmd.PersistentFlags().StringVarP(&pingApiKey, "ping-api-key", "p", pingApiKey, "Ping API Key")
RootCmd.PersistentFlags().StringVarP(&hostname, "hostname", "n", hostname, "A unique identifier for this host (default: system hostname)")
RootCmd.PersistentFlags().StringVarP(&debugLog, "log", "l", debugLog, "Write debug logs to supplied file")
RootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", verbose, "Verbose output")
RootCmd.PersistentFlags().BoolVar(&dev, "use-dev", dev, "Dev mode")
RootCmd.PersistentFlags().MarkHidden("use-dev")
viper.BindPFlag(varApiKey, RootCmd.PersistentFlags().Lookup("api-key"))
viper.BindPFlag(varHostname, RootCmd.PersistentFlags().Lookup("hostname"))
viper.BindPFlag(varLog, RootCmd.PersistentFlags().Lookup("log"))
viper.BindPFlag(varPingApiKey, RootCmd.PersistentFlags().Lookup("ping-api-key"))
viper.BindPFlag(varConfig, RootCmd.PersistentFlags().Lookup("config"))
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
viper.AutomaticEnv() // read in environment variables that match
// If a custom config file is specified by flag or env var, use it. Otherwise use default file.
if len(viper.GetString(varConfig)) > 0 {
viper.SetConfigFile(viper.GetString(varConfig))
} else {
viper.AddConfigPath(defaultConfigFileDirectory())
viper.SetConfigName("cronitor")
}
// If a config file is found, read it in.
if err := viper.ReadInConfig(); err == nil {
log("Reading config from " + viper.ConfigFileUsed())
}
}
func sendPing(endpoint string, uniqueIdentifier string, message string, series string, timestamp float64, duration *float64, exitCode *int, group *sync.WaitGroup) {
defer group.Done()
Client := &http.Client{
Timeout: time.Second * 10,
}
hostname := effectiveHostname()
pingApiAuthKey := viper.GetString(varPingApiKey)
pingApiHost := ""
formattedStamp := ""
formattedDuration := ""
formattedStatusCode := ""
if timestamp > 0 {
formattedStamp = fmt.Sprintf("&stamp=%s", formatStamp(timestamp))
}
if len(message) > 0 {
message = fmt.Sprintf("&msg=%s", url.QueryEscape(truncateString(message, 1000)))
}
if len(pingApiAuthKey) > 0 {
pingApiAuthKey = fmt.Sprintf("&auth_key=%s", truncateString(pingApiAuthKey, 50))
}
if len(hostname) > 0 {
hostname = fmt.Sprintf("&host=%s", url.QueryEscape(truncateString(hostname, 50)))
}
// By passing duration up, we save the computation on the server side
if duration != nil {
formattedDuration = fmt.Sprintf("&duration=%s", formatStamp(*duration))
}
// We aren't using exit code at time of writing, but we have the field available for healthcheck monitors.
if exitCode != nil {
formattedStatusCode = fmt.Sprintf("&status_code=%d", *exitCode)
}
// The `series` data is used to match run events with complete or fail. Useful if multiple instances of a job are running.
if len(series) > 0 {
series = fmt.Sprintf("&series=%s", series)
}
pingSent := false
uri := ""
for i := 1; i <= 6; i++ {
if dev {
pingApiHost = "http://dev.cronitor.io"
} else if i > 2 && pingApiHost == "https://cronitor.link" {
pingApiHost = "https://cronitor.io"
} else {
pingApiHost = "https://cronitor.link"
}
// After 2 failed attempts, take a brief random break before trying again
if i > 2 {
time.Sleep(time.Second * time.Duration(float32(i)*1.5*rand.Float32()))
}
uri = fmt.Sprintf("%s/%s/%s?try=%d%s%s%s%s%s%s%s", pingApiHost, uniqueIdentifier, endpoint, i, formattedStamp, message, pingApiAuthKey, hostname, formattedDuration, series, formattedStatusCode)
log("Sending ping " + uri)
request, _ := http.NewRequest("GET", uri, nil)
request.Header.Add("User-Agent", userAgent)
response, err := Client.Do(request)
if err != nil {
log(err.Error())
continue
}
_, err = ioutil.ReadAll(response.Body)
response.Body.Close()
// Any 2xx is considered a successful response
if response.StatusCode >= 200 && response.StatusCode < 300 {
pingSent = true
break
}
// Backoff on any 4xx request, e.g. 429 Too Many Requests
if response.StatusCode >= 400 && response.StatusCode < 500 {
pingSent = true
break
}
}
if !pingSent {
raven.CaptureErrorAndWait(errors.New("Ping failure; retries exhausted: "+uri), nil)
}
}
func effectiveHostname() string {
if len(viper.GetString(varHostname)) > 0 {
return viper.GetString(varHostname)
}
hostname, _ := os.Hostname()
return hostname
}
func effectiveTimezoneLocationName() lib.TimezoneLocationName {
// First, check if a TZ or CRON_TZ environemnt variable is set -- Diff var used by diff distros
if locale, isSetFlag := os.LookupEnv("TZ"); isSetFlag {
return lib.TimezoneLocationName{locale}
}
if locale, isSetFlag := os.LookupEnv("CRON_TZ"); isSetFlag {
return lib.TimezoneLocationName{locale}
}
// Attempt to parse timedatectl (should work on FreeBSD, many linux distros)
if output, err := exec.Command("timedatectl").Output(); err == nil {
outputString := strings.Replace(string(output), "Time zone", "Timezone", -1)
r := regexp.MustCompile(`(?m:Timezone:\s+(\S+).+$)`)
if ret := r.FindStringSubmatch(outputString); ret != nil && len(ret) > 1 {
return lib.TimezoneLocationName{ret[1]}
}
}
// If /etc/localtime is a symlink, check what it is linking to
if localtimeFile, err := os.Lstat("/etc/localtime"); err == nil && localtimeFile.Mode()&os.ModeSymlink == os.ModeSymlink {
if symlink, _ := os.Readlink("/etc/localtime"); len(symlink) > 0 {
if strings.Contains(symlink, "UTC") {
return lib.TimezoneLocationName{"UTC"}
}
symlinkParts := strings.Split(symlink, "/")
return lib.TimezoneLocationName{strings.Join(symlinkParts[len(symlinkParts)-2:], "/")}
}
}
// If we happen to have an /etc/timezone, no guarantee it's used, but read that
if locale, err := ioutil.ReadFile("/etc/timezone"); err == nil {
return lib.TimezoneLocationName{string(locale)}
}
return lib.TimezoneLocationName{""}
}
func defaultConfigFileDirectory() string {
if runtime.GOOS == "windows" {
return fmt.Sprintf("%s\\ProgramData\\Cronitor", os.Getenv("SYSTEMDRIVE"))
}
return "/etc/cronitor"
}
func truncateString(s string, length int) string |
func printSuccessText(message string, indent bool) {
if isAutoDiscover || isSilent {
log(message)
} else {
color := color.New(color.FgHiGreen)
if indent {
color.Println(fmt.Sprintf(" |--► %s", message))
} else {
color.Println(fmt.Sprintf("----► %s", message))
}
}
}
func printDoneText(message string, indent bool) {
if is | {
if len(s) <= length {
return s
}
return s[:length]
} | identifier_body |
browser.rs | page
pub async fn new_page(&self, params: impl Into<CreateTargetParams>) -> Result<Page> {
let (tx, rx) = oneshot_channel();
let mut params = params.into();
if let Some(id) = self.browser_context.id() {
if params.browser_context_id.is_none() {
params.browser_context_id = Some(id.clone());
}
}
self.sender
.clone()
.send(HandlerMessage::CreatePage(params, tx))
.await?;
rx.await?
}
/// Version information about the browser
pub async fn version(&self) -> Result<GetVersionReturns> {
Ok(self.execute(GetVersionParams::default()).await?.result)
}
/// Returns the user agent of the browser
pub async fn user_agent(&self) -> Result<String> {
Ok(self.version().await?.user_agent)
}
/// Call a browser method.
pub async fn execute<T: Command>(&self, cmd: T) -> Result<CommandResponse<T::Response>> {
let (tx, rx) = oneshot_channel();
let method = cmd.identifier();
let msg = CommandMessage::new(cmd, tx)?;
self.sender
.clone()
.send(HandlerMessage::Command(msg))
.await?;
let resp = rx.await??;
to_command_response::<T>(resp, method)
}
/// Return all of the pages of the browser
pub async fn pages(&self) -> Result<Vec<Page>> {
let (tx, rx) = oneshot_channel();
self.sender
.clone()
.send(HandlerMessage::GetPages(tx))
.await?;
Ok(rx.await?)
}
/// Return page of given target_id
pub async fn get_page(&self, target_id: TargetId) -> Result<Page> {
let (tx, rx) = oneshot_channel();
self.sender
.clone()
.send(HandlerMessage::GetPage(target_id, tx))
.await?;
rx.await?.ok_or(CdpError::NotFound)
}
//Set listener for browser event
pub async fn event_listener<T: IntoEventKind>(&self) -> Result<EventStream<T>> {
let (tx, rx) = unbounded();
self.sender
.clone()
.send(HandlerMessage::AddEventListener(
EventListenerRequest::new::<T>(tx),
))
.await?;
Ok(EventStream::new(rx))
}
}
impl Drop for Browser {
fn drop(&mut self) {
if let Some(child) = self.child.as_mut() {
child.kill().expect("!kill");
}
}
}
async fn ws_url_from_output(child_process: &mut Child) -> String {
let stdout = child_process.stderr.take().expect("no stderror");
fn read_debug_url(stdout: std::process::ChildStderr) -> String {
let mut buf = BufReader::new(stdout);
let mut line = String::new();
loop {
if buf.read_line(&mut line).is_ok() {
// check for ws in line
if let Some(ws) = line.rsplit("listening on ").next() {
if ws.starts_with("ws") && ws.contains("devtools/browser") {
return ws.trim().to_string();
}
}
} else {
line = String::new();
}
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "async-std-runtime")] {
async_std::task::spawn_blocking(|| read_debug_url(stdout)).await
} else if #[cfg(feature = "tokio-runtime")] {
tokio::task::spawn_blocking(move || read_debug_url(stdout)).await.expect("Failed to read debug url from process output")
}
}
}
#[derive(Debug, Clone)]
pub struct BrowserConfig {
/// Determines whether to run headless version of the browser. Defaults to
/// true.
headless: bool,
/// Determines whether to run the browser with a sandbox.
sandbox: bool,
/// Launch the browser with a specific window width and height.
window_size: Option<(u32, u32)>,
/// Launch the browser with a specific debugging port.
port: u16,
/// Path for Chrome or Chromium.
///
/// If unspecified, the create will try to automatically detect a suitable
/// binary.
executable: std::path::PathBuf,
/// A list of Chrome extensions to load.
///
/// An extension should be a path to a folder containing the extension code.
/// CRX files cannot be used directly and must be first extracted.
///
/// Note that Chrome does not support loading extensions in headless-mode.
/// See https://bugs.chromium.org/p/chromium/issues/detail?id=706008#c5
extensions: Vec<String>,
/// Environment variables to set for the Chromium process.
/// Passes value through to std::process::Command::envs.
pub process_envs: Option<HashMap<String, String>>,
/// Data dir for user data
pub user_data_dir: Option<PathBuf>,
/// Whether to launch the `Browser` in incognito mode
incognito: bool,
/// Ignore https errors, default is true
ignore_https_errors: bool,
viewport: Viewport,
/// The duration after a request with no response should time out
request_timeout: Duration,
/// Additional command line arguments to pass to the browser instance.
args: Vec<String>,
}
#[derive(Debug, Clone)]
pub struct BrowserConfigBuilder {
headless: bool,
sandbox: bool,
window_size: Option<(u32, u32)>,
port: u16,
executable: Option<PathBuf>,
extensions: Vec<String>,
process_envs: Option<HashMap<String, String>>,
user_data_dir: Option<PathBuf>,
incognito: bool,
ignore_https_errors: bool,
viewport: Viewport,
request_timeout: Duration,
args: Vec<String>,
}
impl BrowserConfig {
pub fn builder() -> BrowserConfigBuilder {
BrowserConfigBuilder::default()
}
pub fn with_executable(path: impl AsRef<Path>) -> Self {
Self::builder().chrome_executable(path).build().unwrap()
}
}
impl Default for BrowserConfigBuilder {
fn default() -> Self {
Self {
headless: true,
sandbox: true,
window_size: None,
port: 0,
executable: None,
extensions: Vec::new(),
process_envs: None,
user_data_dir: None,
incognito: false,
ignore_https_errors: true,
viewport: Default::default(),
request_timeout: Duration::from_millis(REQUEST_TIMEOUT),
args: Vec::new(),
}
}
}
impl BrowserConfigBuilder {
pub fn window_size(mut self, width: u32, height: u32) -> Self {
self.window_size = Some((width, height));
self
}
pub fn no_sandbox(mut self) -> Self {
self.sandbox = false;
self
}
pub fn with_head(mut self) -> Self {
self.headless = false;
self
}
pub fn incognito(mut self) -> Self {
self.incognito = true;
self
}
pub fn respect_https_errors(mut self) -> Self {
self.ignore_https_errors = false;
self
}
pub fn request_timeout(mut self, timeout: Duration) -> Self {
self.request_timeout = timeout;
self
}
pub fn viewport(mut self, viewport: Viewport) -> Self {
self.viewport = viewport;
self
}
pub fn user_data_dir(mut self, data_dir: impl AsRef<Path>) -> Self {
self.user_data_dir = Some(data_dir.as_ref().to_path_buf());
self
}
pub fn chrome_executable(mut self, path: impl AsRef<Path>) -> Self {
self.executable = Some(path.as_ref().to_path_buf());
self
}
pub fn extension(mut self, extension: impl Into<String>) -> Self {
self.extensions.push(extension.into());
self
}
pub fn extensions<I, S>(mut self, extensions: I) -> Self
where
I: IntoIterator<Item = S>,
S: Into<String>,
{
for ext in extensions {
self.extensions.push(ext.into());
}
self
}
pub fn env(mut self, key: impl Into<String>, val: impl Into<String>) -> Self {
self.process_envs
.get_or_insert(HashMap::new())
.insert(key.into(), val.into());
self
}
pub fn envs<I, K, V>(mut self, envs: I) -> Self
where
I: IntoIterator<Item = (K, V)>,
K: Into<String>,
V: Into<String>,
{
self.process_envs
.get_or_insert(HashMap::new())
.extend(envs.into_iter().map(|(k, v)| (k.into(), v.into())));
self
}
pub fn arg(mut self, arg: impl Into<String>) -> Self {
self.args.push(arg.into());
self
}
pub fn args<I, S>(mut self, args: I) -> Self
where
I: IntoIterator<Item = S>,
S: Into<String>,
{
for arg in args { | self.args.push(arg.into()); | random_line_split |
|
browser.rs | _timeout: config.request_timeout,
};
let fut = Handler::new(conn, rx, handler_config);
let browser_context = fut.default_browser_context().clone();
let browser = Self {
sender: tx,
config: Some(config),
child: Some(child),
debug_ws_url,
browser_context,
};
Ok((browser, fut))
}
/// If not launched as incognito this creates a new incognito browser
/// context. After that this browser exists within the incognito session.
/// New pages created while being in incognito mode will also run in the
/// incognito context. Incognito contexts won't share cookies/cache with
/// other browser contexts.
pub async fn start_incognito_context(&mut self) -> Result<&mut Self> {
if !self.is_incognito_configured() {
let resp = self
.execute(CreateBrowserContextParams::default())
.await?
.result;
self.browser_context = BrowserContext::from(resp.browser_context_id);
self.sender
.clone()
.send(HandlerMessage::InsertContext(self.browser_context.clone()))
.await?;
}
Ok(self)
}
/// If a incognito session was created with
/// `Browser::start_incognito_context` this disposes this context.
///
/// # Note This will also dispose all pages that were running within the
/// incognito context.
pub async fn quit_incognito_context(&mut self) -> Result<&mut Self> {
if let Some(id) = self.browser_context.take() {
self.execute(DisposeBrowserContextParams::new(id.clone()))
.await?;
self.sender
.clone()
.send(HandlerMessage::DisposeContext(BrowserContext::from(id)))
.await?;
}
Ok(self)
}
/// Whether incognito mode was configured from the start
fn is_incognito_configured(&self) -> bool {
self.config
.as_ref()
.map(|c| c.incognito)
.unwrap_or_default()
}
/// Returns the address of the websocket this browser is attached to
pub fn websocket_address(&self) -> &String {
&self.debug_ws_url
}
/// Whether the BrowserContext is incognito.
pub fn is_incognito(&self) -> bool {
self.is_incognito_configured() || self.browser_context.is_incognito()
}
/// The config of the spawned chromium instance if any.
pub fn config(&self) -> Option<&BrowserConfig> {
self.config.as_ref()
}
/// Create a new browser page
pub async fn new_page(&self, params: impl Into<CreateTargetParams>) -> Result<Page> {
let (tx, rx) = oneshot_channel();
let mut params = params.into();
if let Some(id) = self.browser_context.id() {
if params.browser_context_id.is_none() {
params.browser_context_id = Some(id.clone());
}
}
self.sender
.clone()
.send(HandlerMessage::CreatePage(params, tx))
.await?;
rx.await?
}
/// Version information about the browser
pub async fn version(&self) -> Result<GetVersionReturns> {
Ok(self.execute(GetVersionParams::default()).await?.result)
}
/// Returns the user agent of the browser
pub async fn user_agent(&self) -> Result<String> {
Ok(self.version().await?.user_agent)
}
/// Call a browser method.
pub async fn execute<T: Command>(&self, cmd: T) -> Result<CommandResponse<T::Response>> {
let (tx, rx) = oneshot_channel();
let method = cmd.identifier();
let msg = CommandMessage::new(cmd, tx)?;
self.sender
.clone()
.send(HandlerMessage::Command(msg))
.await?;
let resp = rx.await??;
to_command_response::<T>(resp, method)
}
/// Return all of the pages of the browser
pub async fn pages(&self) -> Result<Vec<Page>> {
let (tx, rx) = oneshot_channel();
self.sender
.clone()
.send(HandlerMessage::GetPages(tx))
.await?;
Ok(rx.await?)
}
/// Return page of given target_id
pub async fn get_page(&self, target_id: TargetId) -> Result<Page> {
let (tx, rx) = oneshot_channel();
self.sender
.clone()
.send(HandlerMessage::GetPage(target_id, tx))
.await?;
rx.await?.ok_or(CdpError::NotFound)
}
//Set listener for browser event
pub async fn event_listener<T: IntoEventKind>(&self) -> Result<EventStream<T>> {
let (tx, rx) = unbounded();
self.sender
.clone()
.send(HandlerMessage::AddEventListener(
EventListenerRequest::new::<T>(tx),
))
.await?;
Ok(EventStream::new(rx))
}
}
impl Drop for Browser {
fn drop(&mut self) {
if let Some(child) = self.child.as_mut() {
child.kill().expect("!kill");
}
}
}
async fn ws_url_from_output(child_process: &mut Child) -> String {
let stdout = child_process.stderr.take().expect("no stderror");
fn read_debug_url(stdout: std::process::ChildStderr) -> String {
let mut buf = BufReader::new(stdout);
let mut line = String::new();
loop {
if buf.read_line(&mut line).is_ok() {
// check for ws in line
if let Some(ws) = line.rsplit("listening on ").next() {
if ws.starts_with("ws") && ws.contains("devtools/browser") |
}
} else {
line = String::new();
}
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "async-std-runtime")] {
async_std::task::spawn_blocking(|| read_debug_url(stdout)).await
} else if #[cfg(feature = "tokio-runtime")] {
tokio::task::spawn_blocking(move || read_debug_url(stdout)).await.expect("Failed to read debug url from process output")
}
}
}
#[derive(Debug, Clone)]
pub struct BrowserConfig {
/// Determines whether to run headless version of the browser. Defaults to
/// true.
headless: bool,
/// Determines whether to run the browser with a sandbox.
sandbox: bool,
/// Launch the browser with a specific window width and height.
window_size: Option<(u32, u32)>,
/// Launch the browser with a specific debugging port.
port: u16,
/// Path for Chrome or Chromium.
///
/// If unspecified, the create will try to automatically detect a suitable
/// binary.
executable: std::path::PathBuf,
/// A list of Chrome extensions to load.
///
/// An extension should be a path to a folder containing the extension code.
/// CRX files cannot be used directly and must be first extracted.
///
/// Note that Chrome does not support loading extensions in headless-mode.
/// See https://bugs.chromium.org/p/chromium/issues/detail?id=706008#c5
extensions: Vec<String>,
/// Environment variables to set for the Chromium process.
/// Passes value through to std::process::Command::envs.
pub process_envs: Option<HashMap<String, String>>,
/// Data dir for user data
pub user_data_dir: Option<PathBuf>,
/// Whether to launch the `Browser` in incognito mode
incognito: bool,
/// Ignore https errors, default is true
ignore_https_errors: bool,
viewport: Viewport,
/// The duration after a request with no response should time out
request_timeout: Duration,
/// Additional command line arguments to pass to the browser instance.
args: Vec<String>,
}
#[derive(Debug, Clone)]
pub struct BrowserConfigBuilder {
headless: bool,
sandbox: bool,
window_size: Option<(u32, u32)>,
port: u16,
executable: Option<PathBuf>,
extensions: Vec<String>,
process_envs: Option<HashMap<String, String>>,
user_data_dir: Option<PathBuf>,
incognito: bool,
ignore_https_errors: bool,
viewport: Viewport,
request_timeout: Duration,
args: Vec<String>,
}
impl BrowserConfig {
pub fn builder() -> BrowserConfigBuilder {
BrowserConfigBuilder::default()
}
pub fn with_executable(path: impl AsRef<Path>) -> Self {
Self::builder().chrome_executable(path).build().unwrap()
}
}
impl Default for BrowserConfigBuilder {
fn default() -> Self {
Self {
headless: true,
sandbox: true,
window_size: None,
port: 0,
executable: None,
extensions: Vec::new(),
process_envs: None,
user_data_dir: None,
incognito: false,
ignore_https_errors: true,
viewport: Default::default(),
request_timeout: Duration::from_millis(REQUEST_TIMEOUT),
args: Vec::new(),
}
}
}
impl BrowserConfigBuilder {
pub fn window_size(mut self, width: u32, height: u32) -> Self {
| {
return ws.trim().to_string();
} | conditional_block |
browser.rs | height.
window_size: Option<(u32, u32)>,
/// Launch the browser with a specific debugging port.
port: u16,
/// Path for Chrome or Chromium.
///
/// If unspecified, the create will try to automatically detect a suitable
/// binary.
executable: std::path::PathBuf,
/// A list of Chrome extensions to load.
///
/// An extension should be a path to a folder containing the extension code.
/// CRX files cannot be used directly and must be first extracted.
///
/// Note that Chrome does not support loading extensions in headless-mode.
/// See https://bugs.chromium.org/p/chromium/issues/detail?id=706008#c5
extensions: Vec<String>,
/// Environment variables to set for the Chromium process.
/// Passes value through to std::process::Command::envs.
pub process_envs: Option<HashMap<String, String>>,
/// Data dir for user data
pub user_data_dir: Option<PathBuf>,
/// Whether to launch the `Browser` in incognito mode
incognito: bool,
/// Ignore https errors, default is true
ignore_https_errors: bool,
viewport: Viewport,
/// The duration after a request with no response should time out
request_timeout: Duration,
/// Additional command line arguments to pass to the browser instance.
args: Vec<String>,
}
#[derive(Debug, Clone)]
pub struct BrowserConfigBuilder {
headless: bool,
sandbox: bool,
window_size: Option<(u32, u32)>,
port: u16,
executable: Option<PathBuf>,
extensions: Vec<String>,
process_envs: Option<HashMap<String, String>>,
user_data_dir: Option<PathBuf>,
incognito: bool,
ignore_https_errors: bool,
viewport: Viewport,
request_timeout: Duration,
args: Vec<String>,
}
impl BrowserConfig {
pub fn builder() -> BrowserConfigBuilder {
BrowserConfigBuilder::default()
}
pub fn with_executable(path: impl AsRef<Path>) -> Self {
Self::builder().chrome_executable(path).build().unwrap()
}
}
impl Default for BrowserConfigBuilder {
fn default() -> Self {
Self {
headless: true,
sandbox: true,
window_size: None,
port: 0,
executable: None,
extensions: Vec::new(),
process_envs: None,
user_data_dir: None,
incognito: false,
ignore_https_errors: true,
viewport: Default::default(),
request_timeout: Duration::from_millis(REQUEST_TIMEOUT),
args: Vec::new(),
}
}
}
impl BrowserConfigBuilder {
pub fn window_size(mut self, width: u32, height: u32) -> Self {
self.window_size = Some((width, height));
self
}
pub fn no_sandbox(mut self) -> Self {
self.sandbox = false;
self
}
pub fn with_head(mut self) -> Self {
self.headless = false;
self
}
pub fn incognito(mut self) -> Self {
self.incognito = true;
self
}
pub fn respect_https_errors(mut self) -> Self {
self.ignore_https_errors = false;
self
}
pub fn request_timeout(mut self, timeout: Duration) -> Self {
self.request_timeout = timeout;
self
}
pub fn viewport(mut self, viewport: Viewport) -> Self {
self.viewport = viewport;
self
}
pub fn user_data_dir(mut self, data_dir: impl AsRef<Path>) -> Self {
self.user_data_dir = Some(data_dir.as_ref().to_path_buf());
self
}
pub fn chrome_executable(mut self, path: impl AsRef<Path>) -> Self {
self.executable = Some(path.as_ref().to_path_buf());
self
}
pub fn extension(mut self, extension: impl Into<String>) -> Self {
self.extensions.push(extension.into());
self
}
pub fn extensions<I, S>(mut self, extensions: I) -> Self
where
I: IntoIterator<Item = S>,
S: Into<String>,
{
for ext in extensions {
self.extensions.push(ext.into());
}
self
}
pub fn env(mut self, key: impl Into<String>, val: impl Into<String>) -> Self {
self.process_envs
.get_or_insert(HashMap::new())
.insert(key.into(), val.into());
self
}
pub fn envs<I, K, V>(mut self, envs: I) -> Self
where
I: IntoIterator<Item = (K, V)>,
K: Into<String>,
V: Into<String>,
{
self.process_envs
.get_or_insert(HashMap::new())
.extend(envs.into_iter().map(|(k, v)| (k.into(), v.into())));
self
}
pub fn arg(mut self, arg: impl Into<String>) -> Self {
self.args.push(arg.into());
self
}
pub fn args<I, S>(mut self, args: I) -> Self
where
I: IntoIterator<Item = S>,
S: Into<String>,
{
for arg in args {
self.args.push(arg.into());
}
self
}
pub fn build(self) -> std::result::Result<BrowserConfig, String> {
let executable = if let Some(e) = self.executable {
e
} else {
default_executable()?
};
Ok(BrowserConfig {
headless: self.headless,
sandbox: self.sandbox,
window_size: self.window_size,
port: self.port,
executable,
extensions: self.extensions,
process_envs: self.process_envs,
user_data_dir: self.user_data_dir,
incognito: self.incognito,
ignore_https_errors: self.ignore_https_errors,
viewport: self.viewport,
request_timeout: self.request_timeout,
args: self.args,
})
}
}
impl BrowserConfig {
pub fn launch(&self) -> io::Result<Child> {
let dbg_port = format!("--remote-debugging-port={}", self.port);
let args = [
dbg_port.as_str(),
"--disable-background-networking",
"--enable-features=NetworkService,NetworkServiceInProcess",
"--disable-background-timer-throttling",
"--disable-backgrounding-occluded-windows",
"--disable-breakpad",
"--disable-client-side-phishing-detection",
"--disable-component-extensions-with-background-pages",
"--disable-default-apps",
"--disable-dev-shm-usage",
"--disable-extensions",
"--disable-features=TranslateUI",
"--disable-hang-monitor",
"--disable-ipc-flooding-protection",
"--disable-popup-blocking",
"--disable-prompt-on-repost",
"--disable-renderer-backgrounding",
"--disable-sync",
"--force-color-profile=srgb",
"--metrics-recording-only",
"--no-first-run",
"--enable-automation",
"--password-store=basic",
"--use-mock-keychain",
"--enable-blink-features=IdleDetection",
];
let mut cmd = process::Command::new(&self.executable);
cmd.args(&args).args(&DEFAULT_ARGS).args(&self.args).args(
self.extensions
.iter()
.map(|e| format!("--load-extension={}", e)),
);
if let Some(ref user_data) = self.user_data_dir {
cmd.arg(format!("--user-data-dir={}", user_data.display()));
}
if let Some((width, height)) = self.window_size {
cmd.arg(format!("--window-size={},{}", width, height));
}
if !self.sandbox {
cmd.args(&["--no-sandbox", "--disable-setuid-sandbox"]);
}
if self.headless {
cmd.args(&["--headless", "--hide-scrollbars", "--mute-audio"]);
}
if self.incognito {
cmd.arg("--incognito");
}
if let Some(ref envs) = self.process_envs {
cmd.envs(envs);
}
cmd.stderr(Stdio::piped()).spawn()
}
}
/// Returns the path to Chrome's executable.
///
/// If the `CHROME` environment variable is set, `default_executable` will
/// use it as the default path. Otherwise, the filenames `google-chrome-stable`
/// `chromium`, `chromium-browser`, `chrome` and `chrome-browser` are
/// searched for in standard places. If that fails,
/// `/Applications/Google Chrome.app/...` (on MacOS) or the registry (on
/// Windows) is consulted. If all of the above fail, an error is returned.
pub fn default_executable() -> Result<std::path::PathBuf, String> | {
if let Ok(path) = std::env::var("CHROME") {
if std::path::Path::new(&path).exists() {
return Ok(path.into());
}
}
for app in &[
"google-chrome-stable",
"chromium",
"chromium-browser",
"chrome",
"chrome-browser",
] {
if let Ok(path) = which::which(app) {
return Ok(path);
}
}
#[cfg(target_os = "macos")] | identifier_body |
|
browser.rs | (config: BrowserConfig) -> Result<(Self, Handler)> {
// launch a new chromium instance
let mut child = config.launch()?;
// extract the ws:
let get_ws_url = ws_url_from_output(&mut child);
let dur = Duration::from_secs(20);
cfg_if::cfg_if! {
if #[cfg(feature = "async-std-runtime")] {
let debug_ws_url = async_std::future::timeout(dur, get_ws_url)
.await
.map_err(|_| CdpError::Timeout)?;
} else if #[cfg(feature = "tokio-runtime")] {
let debug_ws_url = tokio::time::timeout(dur, get_ws_url).await
.map_err(|_| CdpError::Timeout)?;
}
}
let conn = Connection::<CdpEventMessage>::connect(&debug_ws_url).await?;
let (tx, rx) = channel(1);
let handler_config = HandlerConfig {
ignore_https_errors: config.ignore_https_errors,
viewport: Some(config.viewport.clone()),
context_ids: Vec::new(),
request_timeout: config.request_timeout,
};
let fut = Handler::new(conn, rx, handler_config);
let browser_context = fut.default_browser_context().clone();
let browser = Self {
sender: tx,
config: Some(config),
child: Some(child),
debug_ws_url,
browser_context,
};
Ok((browser, fut))
}
/// If not launched as incognito this creates a new incognito browser
/// context. After that this browser exists within the incognito session.
/// New pages created while being in incognito mode will also run in the
/// incognito context. Incognito contexts won't share cookies/cache with
/// other browser contexts.
pub async fn start_incognito_context(&mut self) -> Result<&mut Self> {
if !self.is_incognito_configured() {
let resp = self
.execute(CreateBrowserContextParams::default())
.await?
.result;
self.browser_context = BrowserContext::from(resp.browser_context_id);
self.sender
.clone()
.send(HandlerMessage::InsertContext(self.browser_context.clone()))
.await?;
}
Ok(self)
}
/// If a incognito session was created with
/// `Browser::start_incognito_context` this disposes this context.
///
/// # Note This will also dispose all pages that were running within the
/// incognito context.
pub async fn quit_incognito_context(&mut self) -> Result<&mut Self> {
if let Some(id) = self.browser_context.take() {
self.execute(DisposeBrowserContextParams::new(id.clone()))
.await?;
self.sender
.clone()
.send(HandlerMessage::DisposeContext(BrowserContext::from(id)))
.await?;
}
Ok(self)
}
/// Whether incognito mode was configured from the start
fn is_incognito_configured(&self) -> bool {
self.config
.as_ref()
.map(|c| c.incognito)
.unwrap_or_default()
}
/// Returns the address of the websocket this browser is attached to
pub fn websocket_address(&self) -> &String {
&self.debug_ws_url
}
/// Whether the BrowserContext is incognito.
pub fn is_incognito(&self) -> bool {
self.is_incognito_configured() || self.browser_context.is_incognito()
}
/// The config of the spawned chromium instance if any.
pub fn config(&self) -> Option<&BrowserConfig> {
self.config.as_ref()
}
/// Create a new browser page
pub async fn new_page(&self, params: impl Into<CreateTargetParams>) -> Result<Page> {
let (tx, rx) = oneshot_channel();
let mut params = params.into();
if let Some(id) = self.browser_context.id() {
if params.browser_context_id.is_none() {
params.browser_context_id = Some(id.clone());
}
}
self.sender
.clone()
.send(HandlerMessage::CreatePage(params, tx))
.await?;
rx.await?
}
/// Version information about the browser
pub async fn version(&self) -> Result<GetVersionReturns> {
Ok(self.execute(GetVersionParams::default()).await?.result)
}
/// Returns the user agent of the browser
pub async fn user_agent(&self) -> Result<String> {
Ok(self.version().await?.user_agent)
}
/// Call a browser method.
pub async fn execute<T: Command>(&self, cmd: T) -> Result<CommandResponse<T::Response>> {
let (tx, rx) = oneshot_channel();
let method = cmd.identifier();
let msg = CommandMessage::new(cmd, tx)?;
self.sender
.clone()
.send(HandlerMessage::Command(msg))
.await?;
let resp = rx.await??;
to_command_response::<T>(resp, method)
}
/// Return all of the pages of the browser
pub async fn pages(&self) -> Result<Vec<Page>> {
let (tx, rx) = oneshot_channel();
self.sender
.clone()
.send(HandlerMessage::GetPages(tx))
.await?;
Ok(rx.await?)
}
/// Return page of given target_id
pub async fn get_page(&self, target_id: TargetId) -> Result<Page> {
let (tx, rx) = oneshot_channel();
self.sender
.clone()
.send(HandlerMessage::GetPage(target_id, tx))
.await?;
rx.await?.ok_or(CdpError::NotFound)
}
//Set listener for browser event
pub async fn event_listener<T: IntoEventKind>(&self) -> Result<EventStream<T>> {
let (tx, rx) = unbounded();
self.sender
.clone()
.send(HandlerMessage::AddEventListener(
EventListenerRequest::new::<T>(tx),
))
.await?;
Ok(EventStream::new(rx))
}
}
impl Drop for Browser {
fn drop(&mut self) {
if let Some(child) = self.child.as_mut() {
child.kill().expect("!kill");
}
}
}
async fn ws_url_from_output(child_process: &mut Child) -> String {
let stdout = child_process.stderr.take().expect("no stderror");
fn read_debug_url(stdout: std::process::ChildStderr) -> String {
let mut buf = BufReader::new(stdout);
let mut line = String::new();
loop {
if buf.read_line(&mut line).is_ok() {
// check for ws in line
if let Some(ws) = line.rsplit("listening on ").next() {
if ws.starts_with("ws") && ws.contains("devtools/browser") {
return ws.trim().to_string();
}
}
} else {
line = String::new();
}
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "async-std-runtime")] {
async_std::task::spawn_blocking(|| read_debug_url(stdout)).await
} else if #[cfg(feature = "tokio-runtime")] {
tokio::task::spawn_blocking(move || read_debug_url(stdout)).await.expect("Failed to read debug url from process output")
}
}
}
#[derive(Debug, Clone)]
pub struct BrowserConfig {
/// Determines whether to run headless version of the browser. Defaults to
/// true.
headless: bool,
/// Determines whether to run the browser with a sandbox.
sandbox: bool,
/// Launch the browser with a specific window width and height.
window_size: Option<(u32, u32)>,
/// Launch the browser with a specific debugging port.
port: u16,
/// Path for Chrome or Chromium.
///
/// If unspecified, the create will try to automatically detect a suitable
/// binary.
executable: std::path::PathBuf,
/// A list of Chrome extensions to load.
///
/// An extension should be a path to a folder containing the extension code.
/// CRX files cannot be used directly and must be first extracted.
///
/// Note that Chrome does not support loading extensions in headless-mode.
/// See https://bugs.chromium.org/p/chromium/issues/detail?id=706008#c5
extensions: Vec<String>,
/// Environment variables to set for the Chromium process.
/// Passes value through to std::process::Command::envs.
pub process_envs: Option<HashMap<String, String>>,
/// Data dir for user data
pub user_data_dir: Option<PathBuf>,
/// Whether to launch the `Browser` in incognito mode
incognito: bool,
/// Ignore https errors, default is true
ignore_https_errors: bool,
viewport: Viewport,
/// The duration after a request with no response should time out
request_timeout: Duration,
/// Additional command line arguments to pass to the browser instance.
args: Vec<String>,
}
#[derive(Debug, Clone)]
pub struct BrowserConfigBuilder {
headless: bool,
sandbox: bool,
window_size: Option<(u32, u32)>,
port: u16,
executable: Option<PathBuf>,
extensions: Vec<String>,
process_envs: Option<HashMap<String, String>>,
user | launch | identifier_name |
|
pybatch_mast.py | None,
]:
# NOTE n_genes is always assumed as covariate
if bys is None:
if min_perc is not None:
adata = adata.copy()
if on_total:
total_cells = adata.shape[0]
else:
total_cells = adata.obs[group].value_counts().min()
min_cells = max(total_cells * min_perc, min_cells_limit)
print(
f'Filtering genes detected in fewer than {min_cells} cells'
)
sc.pp.filter_genes(adata, min_cells=min_cells)
enough_genes = adata.shape[1] > 0
job_collection = {}
if enough_genes:
job_collection = self._mast(
job_collection, adata, covs, group, keys, jobs=jobs,
)
else:
print('Not enough genes, computation skipped')
try:
de, top = self.mast_prep_output(job_collection, lfc, fdr)
except ClientError as e:
raise MASTCollectionError(e, job_collection) from e
except Exception as e:
raise MASTCollectionError(e.message, job_collection) from e
yield de, top, None
else:
for by, groups in bys:
job_collection = {}
for b in groups:
adata_b = adata[adata.obs[by] == b].copy()
if min_perc is not None:
if on_total:
total_cells = adata_b.shape[0]
else:
total_cells = adata_b.obs[group].value_counts(
).min()
min_cells = max(
total_cells * min_perc[b], min_cells_limit
)
print(
'Filtering genes detected in fewer '
f'than {min_cells} cells'
)
sc.pp.filter_genes(
adata_b, min_cells=min_cells,
)
enough_groups = (
adata_b.obs[group].value_counts() >= 3
).sum() > 1
enough_genes = adata_b.shape[1] > 0
if enough_groups and enough_genes:
job_collection = self._mast(
job_collection, adata_b, covs, group,
keys, by=by, b=b, jobs=jobs,
)
else:
print(f'Computation for {b} skipped')
try:
de, top = self.mast_prep_output(job_collection, lfc, fdr)
except ClientError as e:
raise MASTCollectionError(e, job_collection) from e
except Exception as e:
raise MASTCollectionError(e.message, job_collection) from e
yield de, top, by
def _mast(
self,
job_collection: Dict[str, Dict[str, str]],
adata: AnnData,
covs: str,
group: str,
keys: Sequence[str],
by: Optional[str] = None,
b: Optional[str] = None,
jobs: int = 1,
) -> Dict[str, Dict[str, str]]:
if by is None:
b = 'Sheet0'
new_covs = BatchMAST._clean_covs(adata, covs, group, by=by)
remote_dir, job_id, job_name, content = self.mast_compute(
adata, keys, group=group, covs=new_covs, block=False, jobs=jobs,
)
job_collection[job_id] = {'group': b, 'remote_dir': remote_dir}
return job_collection
def mast_prep_output(
self,
job_collection: Dict[str, Dict[str, str]],
lfc: float,
fdr: float,
wait: float = 30,
) -> Tuple[DataFrame, Dict[str, Dict[str, List[str]]]]:
de = {}
top = {}
for job_id, status, metadata, content in self.mast_collect(
job_collection, wait=wait,
):
if status == 'SUCCEEDED':
b = metadata['group']
de[b] = content
elif status == 'FAILED':
print(f'Job Failed: group {metadata["group"]}')
else:
raise NotImplementedError(f'Status {status} not managed')
top = BatchMAST.mast_filter(de, lfc, fdr)
return de, top
@staticmethod
def mast_filter(
de: Dict[str, DataFrame],
lfc: float,
fdr: float,
) -> Dict[str, Dict[str, List[str]]]:
top = {}
for b in de.keys():
cols = [
'_'.join(c.split('_')[:-1])
for c in de[b].columns[de[b].columns.str.endswith('_coef')]
]
top[b] = {}
for c in cols:
top[b][c] = de[b][
(de[b][f'{c}_fdr'] < fdr) & (de[b][f'{c}_coef'] > lfc)
].sort_values([
f'{c}_fdr', f'{c}_coef'
], ascending=[True, False]).index.tolist()
return top
@staticmethod
def _clean_covs(
adata: AnnData,
covs: str,
group: str,
by: Optional[str] = None,
) -> str:
covs_s = covs.split('+')[1:]
new_covs = ''
for c in covs_s:
# Including only covariates with more than 1 level
# Otherwise, MAST will stop execution with error:
# contrasts can be applied only to factors with 2 or more levels
if c not in (group, by) and adata.obs[c].nunique() > 1:
new_covs += f'+{c}'
return new_covs
def | (
self,
adata: AnnData,
remote_dir: str,
keys: Sequence[str],
group: str,
covs: str = '',
ready: Optional[Sequence[str]] = None,
jobs: int = 1,
) -> str:
s3 = bt.resource('s3')
if ready is None:
ready = []
with tempfile.TemporaryDirectory() as td:
if 'mat' not in ready:
local_mat = os.path.join(td, 'mat.fth')
adata = adata.copy()
adata.X = adata.layers[self.layer]
sc.pp.normalize_total(adata, target_sum=1e6)
sc.pp.log1p(adata, base=2)
adata.to_df().reset_index().to_feather(
local_mat, compression='uncompressed',
)
remote_mat = os.path.join(remote_dir, 'mat.fth')
print(f'Uploading matrix ({adata.shape}) to s3...')
s3.meta.client.upload_file(local_mat, self.bucket, remote_mat)
if 'cdat' not in ready:
local_cdat = os.path.join(td, 'cdat.csv')
adata.obs[keys].to_csv(local_cdat)
remote_cdat = os.path.join(remote_dir, 'cdat.csv')
print('Uploading metadata to s3...')
s3.meta.client.upload_file(
local_cdat, self.bucket, remote_cdat)
remote = os.path.join(self.bucket, remote_dir)
manifest = '\n'.join([
f'WORKSPACE={remote}', 'BATCH_INDEX_OFFSET=0', 'CDAT=cdat.csv',
'MAT=mat.fth', f'GROUP={group}', 'OUT_NAME=out.csv',
f'MODEL=\'~group+n_genes{covs}\'', f'JOBS={jobs}',
])
local_manifest = os.path.join(td, 'manifest.txt')
with open(local_manifest, 'w') as m:
m.write(manifest + '\n')
remote_manifest = os.path.join(remote_dir, 'manifest.txt')
print('Uploading manifest to s3...')
s3.meta.client.upload_file(
local_manifest, self.bucket, remote_manifest,
)
return remote_manifest
def _mast_submit(
self,
manifest: str,
block: bool = False,
job_name: str = 'mast',
) -> str:
batch = bt.client('batch')
job_manifest = f's3://{os.path.join(self.bucket, manifest)}'
job_id = None
try:
print(
f'Submitting job {job_name} to the job queue {self.job_queue}'
)
submit_job_response = batch.submit_job(
jobName=job_name, jobQueue=self.job_queue,
jobDefinition=self.job_def,
containerOverrides={'command': [job_manifest]}
)
job_id = submit_job_response['jobId']
print(
f'Submitted job {job_name} {job_id} to the job queue'
f' {self.job_queue}'
)
except Exception as err:
print(f'error: {str(err)}')
return job_id
def mast_compute(
self,
adata: AnnData,
keys: Sequence[str],
group: str,
covs: str = '',
block: bool = False,
remote_dir: Optional[str] = None,
jobs: int = 1,
) -> Tuple[str, str, str, Optional[DataFrame]]:
content = None
if remote_dir is None:
remote_dir = os.path.join('mast', | _mast_prep | identifier_name |
pybatch_mast.py | s, group, keys, jobs=jobs,
)
else:
print('Not enough genes, computation skipped')
try:
de, top = self.mast_prep_output(job_collection, lfc, fdr)
except ClientError as e:
raise MASTCollectionError(e, job_collection) from e
except Exception as e:
raise MASTCollectionError(e.message, job_collection) from e
yield de, top, None
else:
for by, groups in bys:
job_collection = {}
for b in groups:
adata_b = adata[adata.obs[by] == b].copy()
if min_perc is not None:
if on_total:
total_cells = adata_b.shape[0]
else:
total_cells = adata_b.obs[group].value_counts(
).min()
min_cells = max(
total_cells * min_perc[b], min_cells_limit
)
print(
'Filtering genes detected in fewer '
f'than {min_cells} cells'
)
sc.pp.filter_genes(
adata_b, min_cells=min_cells,
)
enough_groups = (
adata_b.obs[group].value_counts() >= 3
).sum() > 1
enough_genes = adata_b.shape[1] > 0
if enough_groups and enough_genes:
job_collection = self._mast(
job_collection, adata_b, covs, group,
keys, by=by, b=b, jobs=jobs,
)
else:
print(f'Computation for {b} skipped')
try:
de, top = self.mast_prep_output(job_collection, lfc, fdr)
except ClientError as e:
raise MASTCollectionError(e, job_collection) from e
except Exception as e:
raise MASTCollectionError(e.message, job_collection) from e
yield de, top, by
def _mast(
self,
job_collection: Dict[str, Dict[str, str]],
adata: AnnData,
covs: str,
group: str,
keys: Sequence[str],
by: Optional[str] = None,
b: Optional[str] = None,
jobs: int = 1,
) -> Dict[str, Dict[str, str]]:
if by is None:
b = 'Sheet0'
new_covs = BatchMAST._clean_covs(adata, covs, group, by=by)
remote_dir, job_id, job_name, content = self.mast_compute(
adata, keys, group=group, covs=new_covs, block=False, jobs=jobs,
)
job_collection[job_id] = {'group': b, 'remote_dir': remote_dir}
return job_collection
def mast_prep_output(
self,
job_collection: Dict[str, Dict[str, str]],
lfc: float,
fdr: float,
wait: float = 30,
) -> Tuple[DataFrame, Dict[str, Dict[str, List[str]]]]:
de = {}
top = {}
for job_id, status, metadata, content in self.mast_collect(
job_collection, wait=wait,
):
if status == 'SUCCEEDED':
b = metadata['group']
de[b] = content
elif status == 'FAILED':
print(f'Job Failed: group {metadata["group"]}')
else:
raise NotImplementedError(f'Status {status} not managed')
top = BatchMAST.mast_filter(de, lfc, fdr)
return de, top
@staticmethod
def mast_filter(
de: Dict[str, DataFrame],
lfc: float,
fdr: float,
) -> Dict[str, Dict[str, List[str]]]:
top = {}
for b in de.keys():
cols = [
'_'.join(c.split('_')[:-1])
for c in de[b].columns[de[b].columns.str.endswith('_coef')]
]
top[b] = {}
for c in cols:
top[b][c] = de[b][
(de[b][f'{c}_fdr'] < fdr) & (de[b][f'{c}_coef'] > lfc)
].sort_values([
f'{c}_fdr', f'{c}_coef'
], ascending=[True, False]).index.tolist()
return top
@staticmethod
def _clean_covs(
adata: AnnData,
covs: str,
group: str,
by: Optional[str] = None,
) -> str:
covs_s = covs.split('+')[1:]
new_covs = ''
for c in covs_s:
# Including only covariates with more than 1 level
# Otherwise, MAST will stop execution with error:
# contrasts can be applied only to factors with 2 or more levels
if c not in (group, by) and adata.obs[c].nunique() > 1:
new_covs += f'+{c}'
return new_covs
def _mast_prep(
self,
adata: AnnData,
remote_dir: str,
keys: Sequence[str],
group: str,
covs: str = '',
ready: Optional[Sequence[str]] = None,
jobs: int = 1,
) -> str:
s3 = bt.resource('s3')
if ready is None:
ready = []
with tempfile.TemporaryDirectory() as td:
if 'mat' not in ready:
local_mat = os.path.join(td, 'mat.fth')
adata = adata.copy()
adata.X = adata.layers[self.layer]
sc.pp.normalize_total(adata, target_sum=1e6)
sc.pp.log1p(adata, base=2)
adata.to_df().reset_index().to_feather(
local_mat, compression='uncompressed',
)
remote_mat = os.path.join(remote_dir, 'mat.fth')
print(f'Uploading matrix ({adata.shape}) to s3...')
s3.meta.client.upload_file(local_mat, self.bucket, remote_mat)
if 'cdat' not in ready:
local_cdat = os.path.join(td, 'cdat.csv')
adata.obs[keys].to_csv(local_cdat)
remote_cdat = os.path.join(remote_dir, 'cdat.csv')
print('Uploading metadata to s3...')
s3.meta.client.upload_file(
local_cdat, self.bucket, remote_cdat)
remote = os.path.join(self.bucket, remote_dir)
manifest = '\n'.join([
f'WORKSPACE={remote}', 'BATCH_INDEX_OFFSET=0', 'CDAT=cdat.csv',
'MAT=mat.fth', f'GROUP={group}', 'OUT_NAME=out.csv',
f'MODEL=\'~group+n_genes{covs}\'', f'JOBS={jobs}',
])
local_manifest = os.path.join(td, 'manifest.txt')
with open(local_manifest, 'w') as m:
m.write(manifest + '\n')
remote_manifest = os.path.join(remote_dir, 'manifest.txt')
print('Uploading manifest to s3...')
s3.meta.client.upload_file(
local_manifest, self.bucket, remote_manifest,
)
return remote_manifest
def _mast_submit(
self,
manifest: str,
block: bool = False,
job_name: str = 'mast',
) -> str:
batch = bt.client('batch')
job_manifest = f's3://{os.path.join(self.bucket, manifest)}'
job_id = None
try:
print(
f'Submitting job {job_name} to the job queue {self.job_queue}'
)
submit_job_response = batch.submit_job(
jobName=job_name, jobQueue=self.job_queue,
jobDefinition=self.job_def,
containerOverrides={'command': [job_manifest]}
)
job_id = submit_job_response['jobId']
print(
f'Submitted job {job_name} {job_id} to the job queue'
f' {self.job_queue}'
)
except Exception as err:
print(f'error: {str(err)}')
return job_id
def mast_compute(
self,
adata: AnnData,
keys: Sequence[str],
group: str,
covs: str = '',
block: bool = False,
remote_dir: Optional[str] = None,
jobs: int = 1,
) -> Tuple[str, str, str, Optional[DataFrame]]:
| content = None
if remote_dir is None:
remote_dir = os.path.join('mast', str(uuid.uuid4()))
ready = []
else:
ready = ['mat', 'cdat']
manifest = self._mast_prep(
adata, remote_dir, keys, group, covs=covs, ready=ready, jobs=jobs,
)
job_name = f'mast-{"".join(filter(str.isalnum, group))}-{"".join(filter(str.isalnum, covs))}'
job_id = self._mast_submit(
manifest, block=block, job_name=job_name,
)
if block:
status = BatchMAST._batch_job_status(job_id, wait=60)
if status == 'SUCCEEDED':
content = self._mast_results(remote_dir)
return remote_dir, job_id, job_name, content | identifier_body |
|
pybatch_mast.py | None,
]:
# NOTE n_genes is always assumed as covariate
if bys is None:
if min_perc is not None:
adata = adata.copy()
if on_total:
total_cells = adata.shape[0]
else:
total_cells = adata.obs[group].value_counts().min()
min_cells = max(total_cells * min_perc, min_cells_limit)
print(
f'Filtering genes detected in fewer than {min_cells} cells'
)
sc.pp.filter_genes(adata, min_cells=min_cells)
enough_genes = adata.shape[1] > 0
job_collection = {}
if enough_genes:
job_collection = self._mast(
job_collection, adata, covs, group, keys, jobs=jobs,
)
else:
print('Not enough genes, computation skipped')
try:
de, top = self.mast_prep_output(job_collection, lfc, fdr)
except ClientError as e:
raise MASTCollectionError(e, job_collection) from e
except Exception as e:
raise MASTCollectionError(e.message, job_collection) from e
yield de, top, None
else:
for by, groups in bys:
job_collection = {}
for b in groups:
adata_b = adata[adata.obs[by] == b].copy()
if min_perc is not None:
if on_total:
total_cells = adata_b.shape[0]
else:
total_cells = adata_b.obs[group].value_counts(
).min()
min_cells = max(
total_cells * min_perc[b], min_cells_limit
)
print(
'Filtering genes detected in fewer '
f'than {min_cells} cells'
)
sc.pp.filter_genes(
adata_b, min_cells=min_cells,
)
enough_groups = (
adata_b.obs[group].value_counts() >= 3
).sum() > 1
enough_genes = adata_b.shape[1] > 0
if enough_groups and enough_genes:
job_collection = self._mast(
job_collection, adata_b, covs, group,
keys, by=by, b=b, jobs=jobs,
)
else:
print(f'Computation for {b} skipped')
try:
de, top = self.mast_prep_output(job_collection, lfc, fdr)
except ClientError as e:
raise MASTCollectionError(e, job_collection) from e
except Exception as e:
raise MASTCollectionError(e.message, job_collection) from e
yield de, top, by
def _mast(
self,
job_collection: Dict[str, Dict[str, str]],
adata: AnnData,
covs: str,
group: str,
keys: Sequence[str],
by: Optional[str] = None,
b: Optional[str] = None,
jobs: int = 1,
) -> Dict[str, Dict[str, str]]:
if by is None:
b = 'Sheet0'
new_covs = BatchMAST._clean_covs(adata, covs, group, by=by)
remote_dir, job_id, job_name, content = self.mast_compute(
adata, keys, group=group, covs=new_covs, block=False, jobs=jobs,
)
job_collection[job_id] = {'group': b, 'remote_dir': remote_dir}
return job_collection
def mast_prep_output(
self,
job_collection: Dict[str, Dict[str, str]],
lfc: float,
fdr: float,
wait: float = 30,
) -> Tuple[DataFrame, Dict[str, Dict[str, List[str]]]]:
de = {}
top = {}
for job_id, status, metadata, content in self.mast_collect(
job_collection, wait=wait,
):
if status == 'SUCCEEDED':
b = metadata['group']
de[b] = content
elif status == 'FAILED':
print(f'Job Failed: group {metadata["group"]}')
else:
raise NotImplementedError(f'Status {status} not managed')
top = BatchMAST.mast_filter(de, lfc, fdr)
return de, top
@staticmethod
def mast_filter(
de: Dict[str, DataFrame],
lfc: float,
fdr: float,
) -> Dict[str, Dict[str, List[str]]]:
top = {}
for b in de.keys():
cols = [
'_'.join(c.split('_')[:-1])
for c in de[b].columns[de[b].columns.str.endswith('_coef')]
]
top[b] = {}
for c in cols:
top[b][c] = de[b][
(de[b][f'{c}_fdr'] < fdr) & (de[b][f'{c}_coef'] > lfc)
].sort_values([
f'{c}_fdr', f'{c}_coef'
], ascending=[True, False]).index.tolist()
return top
@staticmethod
def _clean_covs(
adata: AnnData,
covs: str,
group: str,
by: Optional[str] = None,
) -> str:
covs_s = covs.split('+')[1:]
new_covs = ''
for c in covs_s:
# Including only covariates with more than 1 level
# Otherwise, MAST will stop execution with error:
# contrasts can be applied only to factors with 2 or more levels
if c not in (group, by) and adata.obs[c].nunique() > 1:
new_covs += f'+{c}'
return new_covs
def _mast_prep(
self,
adata: AnnData,
remote_dir: str,
keys: Sequence[str],
group: str,
covs: str = '',
ready: Optional[Sequence[str]] = None,
jobs: int = 1,
) -> str:
s3 = bt.resource('s3')
if ready is None:
ready = []
with tempfile.TemporaryDirectory() as td:
if 'mat' not in ready:
local_mat = os.path.join(td, 'mat.fth')
adata = adata.copy()
adata.X = adata.layers[self.layer]
sc.pp.normalize_total(adata, target_sum=1e6)
sc.pp.log1p(adata, base=2)
adata.to_df().reset_index().to_feather(
local_mat, compression='uncompressed',
)
remote_mat = os.path.join(remote_dir, 'mat.fth')
print(f'Uploading matrix ({adata.shape}) to s3...')
s3.meta.client.upload_file(local_mat, self.bucket, remote_mat)
if 'cdat' not in ready:
local_cdat = os.path.join(td, 'cdat.csv')
adata.obs[keys].to_csv(local_cdat)
remote_cdat = os.path.join(remote_dir, 'cdat.csv')
print('Uploading metadata to s3...')
s3.meta.client.upload_file(
local_cdat, self.bucket, remote_cdat)
remote = os.path.join(self.bucket, remote_dir)
manifest = '\n'.join([
f'WORKSPACE={remote}', 'BATCH_INDEX_OFFSET=0', 'CDAT=cdat.csv',
'MAT=mat.fth', f'GROUP={group}', 'OUT_NAME=out.csv',
f'MODEL=\'~group+n_genes{covs}\'', f'JOBS={jobs}',
])
local_manifest = os.path.join(td, 'manifest.txt')
with open(local_manifest, 'w') as m:
m.write(manifest + '\n')
remote_manifest = os.path.join(remote_dir, 'manifest.txt') | local_manifest, self.bucket, remote_manifest,
)
return remote_manifest
def _mast_submit(
self,
manifest: str,
block: bool = False,
job_name: str = 'mast',
) -> str:
batch = bt.client('batch')
job_manifest = f's3://{os.path.join(self.bucket, manifest)}'
job_id = None
try:
print(
f'Submitting job {job_name} to the job queue {self.job_queue}'
)
submit_job_response = batch.submit_job(
jobName=job_name, jobQueue=self.job_queue,
jobDefinition=self.job_def,
containerOverrides={'command': [job_manifest]}
)
job_id = submit_job_response['jobId']
print(
f'Submitted job {job_name} {job_id} to the job queue'
f' {self.job_queue}'
)
except Exception as err:
print(f'error: {str(err)}')
return job_id
def mast_compute(
self,
adata: AnnData,
keys: Sequence[str],
group: str,
covs: str = '',
block: bool = False,
remote_dir: Optional[str] = None,
jobs: int = 1,
) -> Tuple[str, str, str, Optional[DataFrame]]:
content = None
if remote_dir is None:
remote_dir = os.path.join('mast', str | print('Uploading manifest to s3...')
s3.meta.client.upload_file( | random_line_split |
pybatch_mast.py | None,
]:
# NOTE n_genes is always assumed as covariate
if bys is None:
if min_perc is not None:
adata = adata.copy()
if on_total:
total_cells = adata.shape[0]
else:
total_cells = adata.obs[group].value_counts().min()
min_cells = max(total_cells * min_perc, min_cells_limit)
print(
f'Filtering genes detected in fewer than {min_cells} cells'
)
sc.pp.filter_genes(adata, min_cells=min_cells)
enough_genes = adata.shape[1] > 0
job_collection = {}
if enough_genes:
job_collection = self._mast(
job_collection, adata, covs, group, keys, jobs=jobs,
)
else:
|
try:
de, top = self.mast_prep_output(job_collection, lfc, fdr)
except ClientError as e:
raise MASTCollectionError(e, job_collection) from e
except Exception as e:
raise MASTCollectionError(e.message, job_collection) from e
yield de, top, None
else:
for by, groups in bys:
job_collection = {}
for b in groups:
adata_b = adata[adata.obs[by] == b].copy()
if min_perc is not None:
if on_total:
total_cells = adata_b.shape[0]
else:
total_cells = adata_b.obs[group].value_counts(
).min()
min_cells = max(
total_cells * min_perc[b], min_cells_limit
)
print(
'Filtering genes detected in fewer '
f'than {min_cells} cells'
)
sc.pp.filter_genes(
adata_b, min_cells=min_cells,
)
enough_groups = (
adata_b.obs[group].value_counts() >= 3
).sum() > 1
enough_genes = adata_b.shape[1] > 0
if enough_groups and enough_genes:
job_collection = self._mast(
job_collection, adata_b, covs, group,
keys, by=by, b=b, jobs=jobs,
)
else:
print(f'Computation for {b} skipped')
try:
de, top = self.mast_prep_output(job_collection, lfc, fdr)
except ClientError as e:
raise MASTCollectionError(e, job_collection) from e
except Exception as e:
raise MASTCollectionError(e.message, job_collection) from e
yield de, top, by
def _mast(
self,
job_collection: Dict[str, Dict[str, str]],
adata: AnnData,
covs: str,
group: str,
keys: Sequence[str],
by: Optional[str] = None,
b: Optional[str] = None,
jobs: int = 1,
) -> Dict[str, Dict[str, str]]:
if by is None:
b = 'Sheet0'
new_covs = BatchMAST._clean_covs(adata, covs, group, by=by)
remote_dir, job_id, job_name, content = self.mast_compute(
adata, keys, group=group, covs=new_covs, block=False, jobs=jobs,
)
job_collection[job_id] = {'group': b, 'remote_dir': remote_dir}
return job_collection
def mast_prep_output(
self,
job_collection: Dict[str, Dict[str, str]],
lfc: float,
fdr: float,
wait: float = 30,
) -> Tuple[DataFrame, Dict[str, Dict[str, List[str]]]]:
de = {}
top = {}
for job_id, status, metadata, content in self.mast_collect(
job_collection, wait=wait,
):
if status == 'SUCCEEDED':
b = metadata['group']
de[b] = content
elif status == 'FAILED':
print(f'Job Failed: group {metadata["group"]}')
else:
raise NotImplementedError(f'Status {status} not managed')
top = BatchMAST.mast_filter(de, lfc, fdr)
return de, top
@staticmethod
def mast_filter(
de: Dict[str, DataFrame],
lfc: float,
fdr: float,
) -> Dict[str, Dict[str, List[str]]]:
top = {}
for b in de.keys():
cols = [
'_'.join(c.split('_')[:-1])
for c in de[b].columns[de[b].columns.str.endswith('_coef')]
]
top[b] = {}
for c in cols:
top[b][c] = de[b][
(de[b][f'{c}_fdr'] < fdr) & (de[b][f'{c}_coef'] > lfc)
].sort_values([
f'{c}_fdr', f'{c}_coef'
], ascending=[True, False]).index.tolist()
return top
@staticmethod
def _clean_covs(
adata: AnnData,
covs: str,
group: str,
by: Optional[str] = None,
) -> str:
covs_s = covs.split('+')[1:]
new_covs = ''
for c in covs_s:
# Including only covariates with more than 1 level
# Otherwise, MAST will stop execution with error:
# contrasts can be applied only to factors with 2 or more levels
if c not in (group, by) and adata.obs[c].nunique() > 1:
new_covs += f'+{c}'
return new_covs
def _mast_prep(
self,
adata: AnnData,
remote_dir: str,
keys: Sequence[str],
group: str,
covs: str = '',
ready: Optional[Sequence[str]] = None,
jobs: int = 1,
) -> str:
s3 = bt.resource('s3')
if ready is None:
ready = []
with tempfile.TemporaryDirectory() as td:
if 'mat' not in ready:
local_mat = os.path.join(td, 'mat.fth')
adata = adata.copy()
adata.X = adata.layers[self.layer]
sc.pp.normalize_total(adata, target_sum=1e6)
sc.pp.log1p(adata, base=2)
adata.to_df().reset_index().to_feather(
local_mat, compression='uncompressed',
)
remote_mat = os.path.join(remote_dir, 'mat.fth')
print(f'Uploading matrix ({adata.shape}) to s3...')
s3.meta.client.upload_file(local_mat, self.bucket, remote_mat)
if 'cdat' not in ready:
local_cdat = os.path.join(td, 'cdat.csv')
adata.obs[keys].to_csv(local_cdat)
remote_cdat = os.path.join(remote_dir, 'cdat.csv')
print('Uploading metadata to s3...')
s3.meta.client.upload_file(
local_cdat, self.bucket, remote_cdat)
remote = os.path.join(self.bucket, remote_dir)
manifest = '\n'.join([
f'WORKSPACE={remote}', 'BATCH_INDEX_OFFSET=0', 'CDAT=cdat.csv',
'MAT=mat.fth', f'GROUP={group}', 'OUT_NAME=out.csv',
f'MODEL=\'~group+n_genes{covs}\'', f'JOBS={jobs}',
])
local_manifest = os.path.join(td, 'manifest.txt')
with open(local_manifest, 'w') as m:
m.write(manifest + '\n')
remote_manifest = os.path.join(remote_dir, 'manifest.txt')
print('Uploading manifest to s3...')
s3.meta.client.upload_file(
local_manifest, self.bucket, remote_manifest,
)
return remote_manifest
def _mast_submit(
self,
manifest: str,
block: bool = False,
job_name: str = 'mast',
) -> str:
batch = bt.client('batch')
job_manifest = f's3://{os.path.join(self.bucket, manifest)}'
job_id = None
try:
print(
f'Submitting job {job_name} to the job queue {self.job_queue}'
)
submit_job_response = batch.submit_job(
jobName=job_name, jobQueue=self.job_queue,
jobDefinition=self.job_def,
containerOverrides={'command': [job_manifest]}
)
job_id = submit_job_response['jobId']
print(
f'Submitted job {job_name} {job_id} to the job queue'
f' {self.job_queue}'
)
except Exception as err:
print(f'error: {str(err)}')
return job_id
def mast_compute(
self,
adata: AnnData,
keys: Sequence[str],
group: str,
covs: str = '',
block: bool = False,
remote_dir: Optional[str] = None,
jobs: int = 1,
) -> Tuple[str, str, str, Optional[DataFrame]]:
content = None
if remote_dir is None:
remote_dir = os.path.join('mast', | print('Not enough genes, computation skipped') | conditional_block |
imap-client.js | : function(){
this.parser = null;
},
onEvent: function(event, message, data, request){
log("onEvent: " + event);
log(data);
this.block(false);
switch(event){
case "onLogin":
this.onLogin(true);
//this.select();
break;
case "onLoginFailed":
this.onLogin(false);
break;
case "onStatus":
this.onUnseen(data[0].matches[0]);
break;
case "onSelect":
var total = {
exists: 0,
recent: 0
};
for(var i = 0; i < data.length; i++){
for(var k in total){
if(data[i].name == k)
total[k] = data[i].matches;
}
}
Each(this.mailboxes, function(m, k){
if(request.params == m)
this.mailbox = k;
}, this);
this.onMailboxTotal(this.mailbox, total.exists, total.recent);
break;
case "noMailbox":
this.createMailbox(request.params);
break;
case "onCreate":
this.select(request.params);
break;
case "onFetch":
var list = [];
var mail = {};
for(var i = 0; i < data.length; i++){
if(data[i].name == "list"){
var mail = this.parseEnvelope(data[i].matches);
//var headers = this.parseHeaders(data[i].data);
list.push(mail);
} else if(data[i].name == "contact"){
var item = {
id: data[i].matches[0]
};
var contact = this.parseContacts(data[i].data);
contact.Mail = this.cutFreemailAddress(contact.Long);
log(contact);
list.push(Apply(item, contact));
} else if(data[i].name == "mail"){
var n = data[i].data.indexOf("\r\n\r\n");
mail.headers = this.parseHeaders( data[i].data.substring(0, n) );
var body = data[i].data.substring(n + 4);
if(mail.headers["Content-Type"] && mail.headers["Content-Type"].match(/^Multipart\/Mixed/)){
var boundary, m;
if(m = /boundary="(.*?)"/.exec(mail.headers["Content-Type"])){
boundary = m[1];
var parts = this.parseAttachments(boundary, body);
mail.body = parts[1].body;
mail.attachments = parts.slice(2);
}
} else {
mail.body = body;
}
}
}
if(request.type == "mail")
this.onFetch(Apply(mail, {mailbox: this.mailbox}));
else
this.onMailboxData(list, request.mailbox);
break;
/*
case "onStore":
this.expunge();
break;
*/
}
if(request.callback)
request.callback();
if(!this.blocked)
this.releaseQueue();
},
parseAttachments: function(boundary, body){
var parts = body.split("--" + boundary);
for(var k in parts){
var n = parts[k].indexOf("\r\n\r\n");
var headers = this.parseHeaders( parts[k].substring(0, n) );
var body = parts[k].substring(n + 4);
parts[k] = {
body: body,
headers: headers
};
}
return parts;
},
parseHeaders: function(headers){
// From: Dave Baker <[email protected]>\r\nSubject: Welcome to Freemail!\r\n\r\n
var fields = ["To", "From", "Subject", "Date", "Content-Type"];
var result = {};
var m;
for(var k in fields){
var re = new RegExp("^" + fields[k] + ":\\s?(.*)", "m");
if(m = re.exec(headers))
result[fields[k]] = m[1]; |
parseEnvelope: function(matches){
var fields = qw("id flags date subject from to");
var data = {};
Every(fields, function(k, i){
if(k == "from" || k == "to"){
var m = /(NIL|\".*?\")\s+(NIL|\".*?\")\s+(NIL|\".*?\")\s+(NIL|\".*?\")/
.exec(matches[i]).slice(1);
Every(m, function(v, i){
m[i] = v == "NIL" ? null : v.substring(0, v.length - 1).substring(1);
});
data[k] = {
name: m[0],
mail: this.cutFreemailAddress(m[2] + "@" + m[3]),
};
} else if(k == "date" || k == "subject"){
if(matches[i] != "NIL")
data[k] = matches[i].substring(0, matches[i].length - 1).substring(1);
} else if(k == "flags"){
var f = matches[i].split(/\s+/);
data[k] = {};
Every(f, function(v){
if(v)
data[k][v.substring(1)] = true;
});
} else {
data[k] = matches[i];
}
}, this);
return data;
},
parseContacts: function(data){
var result = {};
var m;
var n = data.indexOf("\r\n\r\n");
result.Notes = data.substring(n + 4);
data = data.substring(0, n);
for(var k in this.contact_fields){
var re = new RegExp("^" + this.contact_fields[k] + ":\\s(.*)", "m");
if(m = re.exec(data))
result[this.contact_fields[k]] = m[1];
}
return result;
},
cutFreemailAddress: function(address){
if(address && (m = /([\w\d\_]+\@)([\w\d\_]{30,154})(\.freemail)/.exec(address))){
var key = m[2];
//var cutted = m[1] + key.substring(0, 3) + "..." + key.substring(key.length - 3, key.length) + m[3];
var cutted = m[1] + key.substring(0, 6) + ".." + m[3];
address = address.replace(m[0], cutted);
}
return address;
},
// ****** Handlers *******
onConnect: function(){
//log("onConnect");
this.onStatus("connected");
},
onConnectFailed: function(){
//log("onConnectFailed");
this.onStatus("failed");
},
onLogin: function(logged){
//log(logged ? "Logged in" : "Login failed");
this.status();
this.onStatus("logged", logged);
},
onMailboxTotal: function(mailbox, exists, recent){
//log("exists/recent = " + exists + "/" + recent);
this.onStatus("total", {
exists: exists,
recent: recent,
mailbox: mailbox
});
},
onMailboxData: function(list, mailbox){
//log(list);
this.onStatus("data", {
list: list,
mailbox: mailbox
});
},
onFetch: function(mail){
//log(mail);
this.onStatus("mail", mail);
},
onConnectEstablished: function(){
this.onStatus("ready");
if(this.auth)
this.login(this.auth.login, this.auth.password);
},
onUnseen: function(count){
this.onStatus("unseen", count);
},
onDisconnect: function(){
this.block(false);
this.parser = null;
},
onStatus: function(status, data){}, // OCERRIDE
// ******* PUBLIC ********
login: function(login, password){
this.auth = {
login: login,
password: password
};
if(this.connected){
log("Logging with session " + this.session + "... ");
this.exec({
command: "LOGIN",
params: login + " " + password
});
} else {
/*
this.onConnectEstablished = F(this, function(){
this.onStatus("ready");
this.login();
});
*/
this.connect();
}
},
logout: function(){
this.auth = null;
this.exec({
command: "LOGOUT"
});
this.disconnect();
},
// mailbox = inbox | inbox.Sent | inbox.Contacts
select: function(mailbox, callback){
//this.mailbox = mailbox || this.mailbox;
this.exec({
command: "SELECT",
params: mailbox || this.mailbox,
callback: F(this, this.expunge, [callback])
| }
return result;
}, | random_line_split |
kubernetes.go | // Get services
p.log.Debugf("fetching kubernetes services")
services, servicesErr := p.client.ListServices("", nil)
if nodesErr != nil || servicesErr != nil {
if nodesErr != nil {
p.log.Warningf("Failed to fetch kubernetes nodes: %#v (using previous ones)", nodesErr)
}
if servicesErr != nil {
p.log.Warningf("Failed to fetch kubernetes services: %#v (using previous ones)", servicesErr)
}
p.recentErrors++
if p.recentErrors > maxRecentErrors {
p.log.Warningf("Too many recent kubernetes errors, restarting")
os.Exit(1)
}
return p.lastUpdate, nil
} else {
p.recentErrors = 0
update := &k8sUpdate{
log: p.log,
nodeExporterPort: p.nodeExporterPort,
nodes: nodes.Items,
services: services.Items,
etcdTLSConfig: p.ETCDTLSConfig,
kubeletTLSConfig: p.KubeletTLSConfig,
}
p.lastUpdate = update
return update, nil
}
}
// Extract data from fleet to create node_exporter targets
func (p *k8sUpdate) CreateNodes() ([]service.ScrapeConfig, error) {
// Build scrape config list
scNode := service.StaticConfig{}
scNode.Label("source", "node")
scEtcd := service.StaticConfig{}
scEtcd.Label("source", "etcd")
for _, node := range p.nodes {
for _, addr := range node.Status.Addresses {
if addr.Type == "InternalIP" {
ip := addr.Address
p.log.Debugf("found kubernetes node %s", ip)
scNode.Targets = append(scNode.Targets, fmt.Sprintf("%s:%d", ip, p.nodeExporterPort))
if node.Labels["core"] == "true" {
scEtcd.Targets = append(scEtcd.Targets, fmt.Sprintf("%s:2379", ip))
}
}
}
}
scrapeConfigNode := service.ScrapeConfig{
JobName: "node",
StaticConfigs: []service.StaticConfig{scNode},
RelabelConfigs: []service.RelabelConfig{
service.RelabelConfig{
SourceLabels: []string{"__address__"},
Action: "replace",
TargetLabel: "instance",
Regex: `(.+)(?::)(\d+)`,
Replacement: "$1",
},
service.RelabelConfig{
SourceLabels: []string{"__address__"},
Action: "replace",
TargetLabel: "port",
Regex: `(.+)(?::)(\d+)`,
Replacement: "$2",
},
},
}
scrapeConfigETCD := service.ScrapeConfig{
JobName: "etcd",
StaticConfigs: []service.StaticConfig{scEtcd},
RelabelConfigs: []service.RelabelConfig{
service.RelabelConfig{
Action: "labeldrop",
Regex: "etcd_debugging.*",
},
service.RelabelConfig{
SourceLabels: []string{"__address__"},
Action: "replace",
TargetLabel: "instance",
Regex: `(.+)(?::)(\d+)`,
Replacement: "$1",
},
service.RelabelConfig{
SourceLabels: []string{"__address__"},
Action: "replace",
TargetLabel: "port",
Regex: `(.+)(?::)(\d+)`,
Replacement: "$2",
},
},
}
if p.etcdTLSConfig.IsConfigured() {
scrapeConfigETCD.Scheme = "https"
scrapeConfigETCD.TLSConfig = &service.TLSConfig{
CAFile: p.etcdTLSConfig.CAFile,
CertFile: p.etcdTLSConfig.CertFile,
KeyFile: p.etcdTLSConfig.KeyFile,
InsecureSkipVerify: true,
}
}
scrapeConfigK8sNodes := service.ScrapeConfig{
JobName: "kubernetes-nodes",
ScrapeInterval: "5m",
KubernetesConfigs: []service.KubernetesSDConfig{
service.KubernetesSDConfig{
Role: "node",
},
},
RelabelConfigs: []service.RelabelConfig{
service.RelabelConfig{
Action: "labelmap",
Regex: "__meta_kubernetes_node_label_(.+)",
},
},
}
if p.kubeletTLSConfig.IsConfigured() {
scrapeConfigK8sNodes.Scheme = "https"
scrapeConfigK8sNodes.TLSConfig = &service.TLSConfig{
CAFile: p.kubeletTLSConfig.CAFile,
CertFile: p.kubeletTLSConfig.CertFile,
KeyFile: p.kubeletTLSConfig.KeyFile,
InsecureSkipVerify: true,
}
}
scrapeConfigK8sEndpoinds := service.ScrapeConfig{
JobName: "kubernetes-endpoints",
KubernetesConfigs: []service.KubernetesSDConfig{
service.KubernetesSDConfig{
Role: "endpoints",
},
},
RelabelConfigs: []service.RelabelConfig{
service.RelabelConfig{
SourceLabels: []string{"__meta_kubernetes_service_annotation_prometheus_io_scrape"},
Action: "keep",
Regex: "true",
},
service.RelabelConfig{
SourceLabels: []string{"__meta_kubernetes_service_annotation_prometheus_io_scheme"},
Action: "replace",
TargetLabel: "__scheme__",
Regex: "(https?)",
},
service.RelabelConfig{
SourceLabels: []string{"__meta_kubernetes_service_annotation_prometheus_io_path"},
Action: "replace",
TargetLabel: "__metrics_path__",
Regex: "(.+)",
},
service.RelabelConfig{
SourceLabels: []string{"__address__", "__meta_kubernetes_service_annotation_prometheus_io_port"},
Action: "replace",
TargetLabel: "__address__",
Regex: `(.+)(?::\d+);(\d+)`,
Replacement: "$1:$2",
},
service.RelabelConfig{
Action: "labelmap",
Regex: "__meta_kubernetes_service_label_(.+)",
},
service.RelabelConfig{
SourceLabels: []string{"__meta_kubernetes_namespace"},
Action: "replace",
TargetLabel: "kubernetes_namespace",
},
service.RelabelConfig{
SourceLabels: []string{"__meta_kubernetes_pod_name"},
Action: "replace",
TargetLabel: "kubernetes_pod_name",
},
service.RelabelConfig{
SourceLabels: []string{"__address__"},
Action: "replace",
TargetLabel: "instance",
Regex: `(.+)(?::)(\d+)`,
Replacement: "$1",
},
service.RelabelConfig{
SourceLabels: []string{"__address__"},
Action: "replace",
TargetLabel: "port",
Regex: `(.+)(?::)(\d+)`,
Replacement: "$2",
},
service.RelabelConfig{
SourceLabels: []string{"j2_job_name"},
Action: "replace",
TargetLabel: "job",
},
service.RelabelConfig{
SourceLabels: []string{"j2_taskgroup_name"},
Action: "replace",
TargetLabel: "taskgroup",
},
},
}
return []service.ScrapeConfig{scrapeConfigNode, scrapeConfigETCD, scrapeConfigK8sNodes, scrapeConfigK8sEndpoinds}, nil
}
// CreateRules creates all rules this plugin is aware of.
// The returns string list should contain the content of the various rules.
func (p *k8sUpdate) CreateRules() ([]string, error) {
// Build URL list
var urls []string
for _, svc := range p.services {
ann, ok := svc.Annotations[metricsAnnotation]
if !ok || ann == "" {
continue
}
var metricsRecords []api.MetricsServiceRecord
if err := json.Unmarshal([]byte(ann), &metricsRecords); err != nil {
p.log.Errorf("Failed to unmarshal metrics annotation in service '%s.%s': %#v", svc.Namespace, svc.Name, err)
continue
}
// Get service IP
if svc.Spec.Type != k8s.ServiceTypeClusterIP {
p.log.Errorf("Cannot put metrics rules in services of type other than ClusterIP ('%s.%s')", svc.Namespace, svc.Name)
continue
}
clusterIP := svc.Spec.ClusterIP
// Collect URLs
for _, m := range metricsRecords {
if m.RulesPath == "" | {
continue
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.