file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
graph.rs | use crate::{CommandEncoder, CommandEncoderOutput};
use generational_arena::Arena;
use moonwave_resources::{BindGroup, Buffer, ResourceRc, SampledTexture, TextureView};
use multimap::MultiMap;
use parking_lot::{RwLock, RwLockReadGuard};
use rayon::{prelude::*, ThreadPool};
use std::{
collections::HashMap,
fmt::{Debug, Formatter},
sync::Arc,
};
pub use generational_arena::Index;
pub trait FrameGraphNode: Send + Sync + 'static {
fn execute(
&self,
_inputs: &[Option<FrameNodeValue>],
_outputs: &mut [Option<FrameNodeValue>],
_encoder: &mut CommandEncoder,
) {
}
fn execute_raw(
&self,
inputs: &[Option<FrameNodeValue>],
outputs: &mut [Option<FrameNodeValue>],
device: &wgpu::Device,
_queue: &wgpu::Queue,
_sc_frame: &wgpu::SwapChainFrame,
) -> CommandEncoderOutput {
let mut encoder = CommandEncoder::new(device, "NodeGraphEncoder");
self.execute(inputs, outputs, &mut encoder);
encoder.finish()
}
}
const MAX_LAYERS: usize = 8;
const MAX_NODES_PER_LAYER: usize = 8;
const MAX_INPUT_OUTPUTS_PER_NODE: usize = 16;
struct ConnectedNode {
name: String,
node: Arc<dyn FrameGraphNode>,
inputs: [Option<Index>; MAX_INPUT_OUTPUTS_PER_NODE],
}
struct ConnectedEdges {
owner_node_index: Index,
output_index: usize,
}
pub struct FrameGraph {
node_arena: RwLock<Arena<ConnectedNode>>,
edges_arena: RwLock<Arena<ConnectedEdges>>,
end_node: Index,
output_map: Vec<Vec<Option<FrameNodeValue>>>,
levels_map: MultiMap<usize, TraversedGraphNode>,
traversed_node_cache: HashMap<Index, usize>,
}
impl FrameGraph {
/// Creates a new empty graph.
pub fn new<T: FrameGraphNode>(end_node: T) -> Self {
let mut node_arena = Arena::with_capacity(MAX_LAYERS * MAX_NODES_PER_LAYER);
let end_node = node_arena.insert(ConnectedNode {
name: "EndNode".to_string(),
node: Arc::new(end_node),
inputs: [None; MAX_INPUT_OUTPUTS_PER_NODE],
});
Self {
node_arena: RwLock::new(node_arena),
edges_arena: RwLock::new(Arena::with_capacity(
MAX_LAYERS * MAX_INPUT_OUTPUTS_PER_NODE * MAX_NODES_PER_LAYER,
)),
output_map: vec![vec![None; MAX_NODES_PER_LAYER * MAX_INPUT_OUTPUTS_PER_NODE]; MAX_LAYERS],
levels_map: MultiMap::with_capacity(MAX_LAYERS),
traversed_node_cache: HashMap::with_capacity(
MAX_LAYERS * MAX_INPUT_OUTPUTS_PER_NODE * MAX_NODES_PER_LAYER,
),
end_node,
}
}
/// Returns the end node.
pub fn get_end_node(&self) -> Index {
self.end_node
}
/// Resets the frame graph by removing all nodes and sets up a new end node.
pub fn reset(&mut self) {
let mut nodes = self.node_arena.write();
let end_node_impl = nodes.get(self.end_node).unwrap().node.clone();
nodes.clear();
self.traversed_node_cache.clear();
self.edges_arena.write().clear();
self.end_node = nodes.insert(ConnectedNode {
name: "EndNode".to_string(),
node: end_node_impl,
inputs: [None; MAX_INPUT_OUTPUTS_PER_NODE],
});
}
/// Add a new node into the graph.
pub fn add_node<T: FrameGraphNode>(&self, node: T, name: &str) -> Index {
self.node_arena.write().insert(ConnectedNode {
name: name.to_string(),
node: Arc::new(node),
inputs: [None; MAX_INPUT_OUTPUTS_PER_NODE],
})
}
/// Connects one nodes output to another nodes input.
pub fn connect(
&self,
source: Index,
source_output: usize,
destination: Index,
destination_input: usize,
) -> Result<(), GraphConnectError> {
// Validate connection parameters.
if destination_input >= MAX_INPUT_OUTPUTS_PER_NODE {
return Err(GraphConnectError::MaximumInputsReached);
};
if source_output >= MAX_INPUT_OUTPUTS_PER_NODE {
return Err(GraphConnectError::MaximumInputsReached);
};
let mut edges = self.edges_arena.write();
let mut nodes = self.node_arena.write();
let destination_node = nodes
.get_mut(destination)
.ok_or(GraphConnectError::InvalidDestination)?;
// Target input is already connected.
if destination_node.inputs[destination_input].is_some() {
return Err(GraphConnectError::AlreadyConnected);
}
// Target input is empty so simply create the connection.
let edge = edges.insert(ConnectedEdges {
owner_node_index: source,
output_index: source_output,
});
destination_node.inputs[destination_input] = Some(edge);
Ok(())
}
fn traverse_node(
cache: &mut HashMap<Index, usize>,
levels_map: &mut MultiMap<usize, TraversedGraphNode>,
nodes: &RwLockReadGuard<Arena<ConnectedNode>>,
edges: &RwLockReadGuard<Arena<ConnectedEdges>>,
node_index: Index,
level: usize,
) {
//Build traverse node with input/output mapping info.
let mut traversed_node = TraversedGraphNode {
index: node_index,
inputs: [None; MAX_INPUT_OUTPUTS_PER_NODE],
};
// Remove from dependencies from all levels lower
let mut has_retained = false;
for l in level..0 {
// Remove previous traversed node from level.
let vec = levels_map.get_vec_mut(&l).unwrap();
let before_len = vec.len();
vec.retain(|x| x.index != node_index);
if before_len != vec.len() {
has_retained = true;
}
}
// Update all inputs that still reference kicked out node.
if has_retained {
for l in level..0 {
let vec = levels_map.get_vec_mut(&l).unwrap();
for node in vec {
for input in &mut node.inputs {
if let Some((nlevel, _, index)) = input {
if index == &node_index {
*nlevel = level;
}
}
}
}
}
}
// Loop through all inputs
let next_level = level + 1;
let node = nodes.get(node_index).unwrap();
for (input_index, input) in node.inputs.iter().enumerate() {
if let Some(input) = input {
let edge = edges.get(*input).unwrap();
let inner_node = edge.owner_node_index;
traversed_node.inputs[input_index] = Some((next_level, edge.output_index, inner_node));
Self::traverse_node(cache, levels_map, nodes, edges, inner_node, next_level);
}
}
// Store traversed node at level.
//let traversed_index = levels_map.get_vec(&level).map(|x| x.len()).unwrap_or(0);
//cache.insert(node_index, traversed_index);
// TODO: Due to retaining this index breaks currently :'(
levels_map.insert(level, traversed_node);
}
/// Executes the graph using the given scheduler.
pub fn execute<T: DeviceHost>(
&mut self,
sc_frame: Arc<wgpu::SwapChainFrame>,
device_host: &'static T,
pool: &ThreadPool,
) {
{
{
optick::event!("FrameGraph::traverse");
// Gain read access to nodes and connections.
let nodes = self.node_arena.read();
let edges = self.edges_arena.read();
// Start traversing from end.
self.levels_map.clear();
Self::traverse_node(
&mut self.traversed_node_cache,
&mut self.levels_map,
&nodes,
&edges,
self.end_node,
0,
);
}
let cache = &mut self.traversed_node_cache;
// Create async executer.
let mut local_pool = futures::executor::LocalPool::new();
let local_spawner = local_pool.spawner();
// Execute in levels order
let mut all_levels = self.levels_map.keys().cloned().collect::<Vec<_>>();
all_levels.sort_unstable();
let max_levels = all_levels.len();
for level in all_levels.into_iter().rev() {
optick::event!("FrameGraph::execute_level");
optick::tag!("level", level as u32);
// Get rid of duplicated nodes.
let mut nodes_in_level = self.levels_map.get_vec_mut(&level).unwrap().clone();
nodes_in_level.sort_unstable_by_key(|x| x.index);
nodes_in_level.dedup_by_key(|x| x.index);
// Build cache for this level
for (index, node) in nodes_in_level.iter().enumerate() {
cache.insert(node.index, index);
}
// Get chunks
let nodes = self.node_arena.read();
let read_nodes = nodes_in_level
.iter()
.map(|node| (nodes.get(node.index).unwrap(), node.inputs))
.collect::<Vec<_>>();
let mut empty = [Vec::with_capacity(0)];
#[allow(clippy::type_complexity)]
let (outputs, previous_outputs): (
&mut [Vec<Option<FrameNodeValue>>],
&mut [Vec<Option<FrameNodeValue>>],
) = if level == (max_levels - 1) {
(&mut self.output_map, &mut empty)
} else {
self.output_map.split_at_mut(level + 1)
};
let outputs_per_node = outputs[outputs.len() - 1]
.chunks_mut(MAX_INPUT_OUTPUTS_PER_NODE)
.enumerate()
.collect::<Vec<_>>();
// Execute
let encoder_outputs = pool.install(|| {
read_nodes
.par_iter()
.zip(outputs_per_node)
.enumerate()
.map(|(_i, ((node, inputs), (_oi, outputs)))| {
optick::event!("FrameGraph::node");
// Prepare node execution
optick::tag!("name", node.name);
let node_trait = node.node.clone();
let label = format!("NodeCommandEncoder_{}", node.name);
// Map outputs -> inputs.
/*
for (idx, input) in inputs.iter().enumerate() {
if let Some((target_level, output_index, node_index)) = input {
let i = cache.get(&node_index).unwrap();
println!(
"Mapping input #{} to level = {} ({}) and index = {} ({}, {})",
idx,
target_level,
previous_outputs.len() - (target_level - level),
i * MAX_INPUT_OUTPUTS_PER_NODE + output_index,
i,
output_index
);
} else {
println!("Mapping input #{} to None", i);
}
}
*/
let inputs = inputs
.iter()
.map(|input| {
input.map(|(target_level, output_index, node_index)| {
let i = cache.get(&node_index).unwrap();
&previous_outputs[previous_outputs.len() - (target_level - level)]
[i * MAX_INPUT_OUTPUTS_PER_NODE + output_index]
})
})
.map(|input| match input {
Some(Some(rf)) => Some(rf.clone()),
_ => None,
})
.collect::<Vec<_>>();
let sc_cloned = sc_frame.clone();
let out = {
optick::event!("FrameGraph::record_commands");
optick::tag!("name", label);
// Execute node asynchronisly.
node_trait.execute_raw(
&inputs,
outputs,
device_host.get_device(),
device_host.get_queue(),
&*sc_cloned,
)
};
out
})
.collect::<Vec<_>>()
});
{
optick::event!("FrameGraph::submit_level");
optick::tag!("level", level as u32);
let mut buffers = Vec::with_capacity(encoder_outputs.len());
for out in encoder_outputs {
if let Some(buffer) = out.command_buffer |
}
device_host.get_queue().submit(buffers);
}
}
}
// Reset
optick::event!("FrameGraph::reset");
self.reset();
}
}
#[derive(Clone)]
pub enum FrameNodeValue {
Buffer(ResourceRc<Buffer>),
BindGroup(ResourceRc<BindGroup>),
TextureView(ResourceRc<TextureView>),
SampledTexture(SampledTexture),
}
impl std::fmt::Debug for FrameNodeValue {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::Buffer(_) => f.write_str("Buffer"),
Self::BindGroup(_) => f.write_str("BindGroup"),
Self::TextureView(_) => f.write_str("Texture"),
Self::SampledTexture(_) => f.write_str("SampledTexture"),
}
}
}
use thiserror::Error;
#[derive(Error, Debug)]
pub enum GraphConnectError {
#[error("The target node has reached its input limit")]
MaximumInputsReached,
#[error("The source node has reached its outputs limit")]
MaximumOutputsReached,
#[error("The target node does not exist")]
InvalidDestination,
#[error("The target nodes input is already connected")]
AlreadyConnected,
}
#[derive(Clone)]
struct TraversedGraphNode {
index: Index,
inputs: [Option<(usize, usize, Index)>; MAX_INPUT_OUTPUTS_PER_NODE],
}
pub trait DeviceHost: Send + Sync + 'static {
fn get_device(&self) -> &wgpu::Device;
fn get_queue(&self) -> &wgpu::Queue;
}
macro_rules! impl_get_node_specific {
($getter:ident, $ty:ident, $rty:ty) => {
impl FrameNodeValue {
pub fn $getter(&self) -> &$rty {
match self {
FrameNodeValue::$ty(group) => group,
_ => panic!(
"Unexpected frame node value, expected '{}' but received '{:?}'",
stringify!($ty),
self
),
}
}
}
};
}
impl_get_node_specific!(get_bind_group, BindGroup, ResourceRc<BindGroup>);
impl_get_node_specific!(get_texture_view, TextureView, ResourceRc<TextureView>);
impl_get_node_specific!(get_sampled_texture, SampledTexture, SampledTexture);
| {
buffers.push(buffer);
} | conditional_block |
signature_format.py | #!/usr/bin/python3
#-*- encoding: Utf-8 -*-
from typing import Dict, List, Set, Sequence, Union, Any
from base64 import b64decode, b64encode
from math import log, exp, sqrt
from binascii import crc32
from enum import IntEnum
from io import BytesIO
from ctypes import *
DATA_URI_PREFIX = 'data:audio/vnd.shazam.sig;base64,'
class SampleRate(IntEnum): # Enum keys are sample rates in Hz
_8000 = 1
_11025 = 2
_16000 = 3
_32000 = 4
_44100 = 5
_48000 = 6
class FrequencyBand(IntEnum): # Enum keys are frequency ranges in Hz
_0_250 = -1 # Nothing above 250 Hz is actually stored
_250_520 = 0
_520_1450 = 1
_1450_3500 = 2
_3500_5500 = 3 # This one (3.5 KHz - 5.5 KHz) should not be used in legacy mode
class RawSignatureHeader(LittleEndianStructure):
_pack = True
_fields_ = [
('magic1', c_uint32), # Fixed 0xcafe2580 - 80 25 fe ca
('crc32', c_uint32), # CRC-32 for all of the following (so excluding these first 8 bytes)
('size_minus_header', c_uint32), # Total size of the message, minus the size of the current header (which is 48 bytes)
('magic2', c_uint32), # Fixed 0x94119c00 - 00 9c 11 94
('void1', c_uint32 * 3), # Void
('shifted_sample_rate_id', c_uint32), # A member of SampleRate (usually 3 for 16000 Hz), left-shifted by 27 (usually giving 0x18000000 - 00 00 00 18)
('void2', c_uint32 * 2), # Void, or maybe used only in "rolling window" mode?
('number_samples_plus_divided_sample_rate', c_uint32), # int(number_of_samples + sample_rate * 0.24) - As the sample rate is known thanks to the field above, it can be inferred and substracted so that we obtain the number of samples, and from the number of samples and sample rate we can obtain the length of the recording
('fixed_value', c_uint32) # Calculated as ((15 << 19) + 0x40000) - 0x7c0000 or 00 00 7c 00 - seems pretty constant, may be different in the "SigType.STREAMING" mode
]
class FrequencyPeak:
fft_pass_number : int = None
peak_magnitude : int = None
corrected_peak_frequency_bin : int = None
sample_rate_hz : int = None
def __init__(self, fft_pass_number : int, peak_magnitude : int, corrected_peak_frequency_bin : int, sample_rate_hz : int):
self.fft_pass_number = fft_pass_number
self.peak_magnitude = peak_magnitude
self.corrected_peak_frequency_bin = corrected_peak_frequency_bin
self.sample_rate_hz = sample_rate_hz
def get_frequency_hz(self) -> float:
return self.corrected_peak_frequency_bin * (self.sample_rate_hz / 2 / 1024 / 64)
# ^ Convert back a FFT bin to a frequency, given a 16 KHz sample
# rate, 1024 useful bins and the multiplication by 64 made before
# storing the information
def get_amplitude_pcm(self) -> float:
return sqrt(exp((self.peak_magnitude - 6144) / 1477.3) * (1 << 17) / 2) / 1024
# ^ Not sure about this calculation but gives small enough numbers
def get_seconds(self) -> float:
return (self.fft_pass_number * 128) / self.sample_rate_hz
# ^ Assume that new FFT bins are emitted every 128 samples, on a
# standard 16 KHz sample rate basis.
class DecodedMessage:
sample_rate_hz : int = None
number_samples : int = None
frequency_band_to_sound_peaks : Dict[FrequencyBand, List[FrequencyPeak]] = None
@classmethod
def decode_from_binary(cls, data : bytes):
self = cls()
buf = BytesIO(data)
buf.seek(8)
checksummable_data = buf.read()
buf.seek(0)
# Read and check the header
header = RawSignatureHeader()
buf.readinto(header)
assert header.magic1 == 0xcafe2580
assert header.size_minus_header == len(data) - 48
assert crc32(checksummable_data) & 0xffffffff == header.crc32
assert header.magic2 == 0x94119c00
self.sample_rate_hz = int(SampleRate(header.shifted_sample_rate_id >> 27).name.strip('_'))
self.number_samples = int(header.number_samples_plus_divided_sample_rate - self.sample_rate_hz * 0.24)
# Read the type-length-value sequence that follows the header
# The first chunk is fixed and has no value, but instead just repeats
# the length of the message size minus the header:
assert int.from_bytes(buf.read(4), 'little') == 0x40000000
assert int.from_bytes(buf.read(4), 'little') == len(data) - 48
# Then, lists of frequency peaks for respective bands follow
self.frequency_band_to_sound_peaks = {}
while True:
tlv_header = buf.read(8)
if not tlv_header:
break
frequency_band_id = int.from_bytes(tlv_header[:4], 'little')
frequency_peaks_size = int.from_bytes(tlv_header[4:], 'little')
frequency_peaks_padding = -frequency_peaks_size % 4
frequency_peaks_buf = BytesIO(buf.read(frequency_peaks_size))
buf.read(frequency_peaks_padding)
# Decode frequency peaks
frequency_band = FrequencyBand(frequency_band_id - 0x60030040)
fft_pass_number = 0
self.frequency_band_to_sound_peaks[frequency_band] = []
while True:
raw_fft_pass : bytes = frequency_peaks_buf.read(1)
if not raw_fft_pass:
break
fft_pass_offset : int = raw_fft_pass[0]
if fft_pass_offset == 0xff:
fft_pass_number = int.from_bytes(frequency_peaks_buf.read(4), 'little')
continue
else:
fft_pass_number += fft_pass_offset
peak_magnitude = int.from_bytes(frequency_peaks_buf.read(2), 'little')
corrected_peak_frequency_bin = int.from_bytes(frequency_peaks_buf.read(2), 'little')
self.frequency_band_to_sound_peaks[frequency_band].append(
FrequencyPeak(fft_pass_number, peak_magnitude, corrected_peak_frequency_bin, self.sample_rate_hz)
)
return self
@classmethod
def decode_from_uri(cls, uri : str):
assert uri.startswith(DATA_URI_PREFIX)
return cls.decode_from_binary(b64decode(uri.replace(DATA_URI_PREFIX, '', 1)))
"""
Encode the current object to a readable JSON format, for debugging
purposes.
"""
def encode_to_json(self) -> dict:
return {
"sample_rate_hz": self.sample_rate_hz,
"number_samples": self.number_samples,
"_seconds": self.number_samples / self.sample_rate_hz,
"frequency_band_to_peaks": {
frequency_band.name.strip('_'): [
{ | "_seconds": frequency_peak.get_seconds()
}
for frequency_peak in frequency_peaks
]
for frequency_band, frequency_peaks in sorted(self.frequency_band_to_sound_peaks.items())
}
}
def encode_to_binary(self) -> bytes:
header = RawSignatureHeader()
header.magic1 = 0xcafe2580
header.magic2 = 0x94119c00
header.shifted_sample_rate_id = int(getattr(SampleRate, '_%s' % self.sample_rate_hz)) << 27
header.fixed_value = ((15 << 19) + 0x40000)
header.number_samples_plus_divided_sample_rate = int(self.number_samples + self.sample_rate_hz * 0.24)
contents_buf = BytesIO()
for frequency_band, frequency_peaks in sorted(self.frequency_band_to_sound_peaks.items()):
peaks_buf = BytesIO()
fft_pass_number = 0
# NOTE: Correctly filtering and sorting the peaks within the members
# of "self.frequency_band_to_sound_peaks" is the responsability of the
# caller
for frequency_peak in frequency_peaks:
assert frequency_peak.fft_pass_number >= fft_pass_number
if frequency_peak.fft_pass_number - fft_pass_number >= 255:
peaks_buf.write(b'\xff')
peaks_buf.write((frequency_peak.fft_pass_number).to_bytes(4, 'little'))
fft_pass_number = frequency_peak.fft_pass_number
peaks_buf.write(bytes([frequency_peak.fft_pass_number - fft_pass_number]))
peaks_buf.write((frequency_peak.peak_magnitude).to_bytes(2, 'little'))
peaks_buf.write((frequency_peak.corrected_peak_frequency_bin).to_bytes(2, 'little'))
fft_pass_number = frequency_peak.fft_pass_number
contents_buf.write((0x60030040 + int(frequency_band)).to_bytes(4, 'little'))
contents_buf.write(len(peaks_buf.getvalue()).to_bytes(4, 'little'))
contents_buf.write(peaks_buf.getvalue())
contents_buf.write(b'\x00' * (-len(peaks_buf.getvalue()) % 4))
# Below, write the full message as a binary stream
header.size_minus_header = len(contents_buf.getvalue()) + 8
buf = BytesIO()
buf.write(bytes(header)) # We will rewrite it just after in order to include the final CRC-32
buf.write((0x40000000).to_bytes(4, 'little'))
buf.write((len(contents_buf.getvalue()) + 8).to_bytes(4, 'little'))
buf.write(contents_buf.getvalue())
buf.seek(8)
header.crc32 = crc32(buf.read()) & 0xffffffff
buf.seek(0)
buf.write(bytes(header))
return buf.getvalue()
def encode_to_uri(self) -> str:
return DATA_URI_PREFIX + b64encode(self.encode_to_binary()).decode('ascii') | "fft_pass_number": frequency_peak.fft_pass_number,
"peak_magnitude": frequency_peak.peak_magnitude,
"corrected_peak_frequency_bin": frequency_peak.corrected_peak_frequency_bin,
"_frequency_hz": frequency_peak.get_frequency_hz(),
"_amplitude_pcm": frequency_peak.get_amplitude_pcm(), | random_line_split |
signature_format.py | #!/usr/bin/python3
#-*- encoding: Utf-8 -*-
from typing import Dict, List, Set, Sequence, Union, Any
from base64 import b64decode, b64encode
from math import log, exp, sqrt
from binascii import crc32
from enum import IntEnum
from io import BytesIO
from ctypes import *
DATA_URI_PREFIX = 'data:audio/vnd.shazam.sig;base64,'
class SampleRate(IntEnum): # Enum keys are sample rates in Hz
_8000 = 1
_11025 = 2
_16000 = 3
_32000 = 4
_44100 = 5
_48000 = 6
class FrequencyBand(IntEnum): # Enum keys are frequency ranges in Hz
_0_250 = -1 # Nothing above 250 Hz is actually stored
_250_520 = 0
_520_1450 = 1
_1450_3500 = 2
_3500_5500 = 3 # This one (3.5 KHz - 5.5 KHz) should not be used in legacy mode
class RawSignatureHeader(LittleEndianStructure):
_pack = True
_fields_ = [
('magic1', c_uint32), # Fixed 0xcafe2580 - 80 25 fe ca
('crc32', c_uint32), # CRC-32 for all of the following (so excluding these first 8 bytes)
('size_minus_header', c_uint32), # Total size of the message, minus the size of the current header (which is 48 bytes)
('magic2', c_uint32), # Fixed 0x94119c00 - 00 9c 11 94
('void1', c_uint32 * 3), # Void
('shifted_sample_rate_id', c_uint32), # A member of SampleRate (usually 3 for 16000 Hz), left-shifted by 27 (usually giving 0x18000000 - 00 00 00 18)
('void2', c_uint32 * 2), # Void, or maybe used only in "rolling window" mode?
('number_samples_plus_divided_sample_rate', c_uint32), # int(number_of_samples + sample_rate * 0.24) - As the sample rate is known thanks to the field above, it can be inferred and substracted so that we obtain the number of samples, and from the number of samples and sample rate we can obtain the length of the recording
('fixed_value', c_uint32) # Calculated as ((15 << 19) + 0x40000) - 0x7c0000 or 00 00 7c 00 - seems pretty constant, may be different in the "SigType.STREAMING" mode
]
class FrequencyPeak:
fft_pass_number : int = None
peak_magnitude : int = None
corrected_peak_frequency_bin : int = None
sample_rate_hz : int = None
def __init__(self, fft_pass_number : int, peak_magnitude : int, corrected_peak_frequency_bin : int, sample_rate_hz : int):
self.fft_pass_number = fft_pass_number
self.peak_magnitude = peak_magnitude
self.corrected_peak_frequency_bin = corrected_peak_frequency_bin
self.sample_rate_hz = sample_rate_hz
def get_frequency_hz(self) -> float:
return self.corrected_peak_frequency_bin * (self.sample_rate_hz / 2 / 1024 / 64)
# ^ Convert back a FFT bin to a frequency, given a 16 KHz sample
# rate, 1024 useful bins and the multiplication by 64 made before
# storing the information
def get_amplitude_pcm(self) -> float:
return sqrt(exp((self.peak_magnitude - 6144) / 1477.3) * (1 << 17) / 2) / 1024
# ^ Not sure about this calculation but gives small enough numbers
def get_seconds(self) -> float:
return (self.fft_pass_number * 128) / self.sample_rate_hz
# ^ Assume that new FFT bins are emitted every 128 samples, on a
# standard 16 KHz sample rate basis.
class DecodedMessage:
sample_rate_hz : int = None
number_samples : int = None
frequency_band_to_sound_peaks : Dict[FrequencyBand, List[FrequencyPeak]] = None
@classmethod
def decode_from_binary(cls, data : bytes):
|
@classmethod
def decode_from_uri(cls, uri : str):
assert uri.startswith(DATA_URI_PREFIX)
return cls.decode_from_binary(b64decode(uri.replace(DATA_URI_PREFIX, '', 1)))
"""
Encode the current object to a readable JSON format, for debugging
purposes.
"""
def encode_to_json(self) -> dict:
return {
"sample_rate_hz": self.sample_rate_hz,
"number_samples": self.number_samples,
"_seconds": self.number_samples / self.sample_rate_hz,
"frequency_band_to_peaks": {
frequency_band.name.strip('_'): [
{
"fft_pass_number": frequency_peak.fft_pass_number,
"peak_magnitude": frequency_peak.peak_magnitude,
"corrected_peak_frequency_bin": frequency_peak.corrected_peak_frequency_bin,
"_frequency_hz": frequency_peak.get_frequency_hz(),
"_amplitude_pcm": frequency_peak.get_amplitude_pcm(),
"_seconds": frequency_peak.get_seconds()
}
for frequency_peak in frequency_peaks
]
for frequency_band, frequency_peaks in sorted(self.frequency_band_to_sound_peaks.items())
}
}
def encode_to_binary(self) -> bytes:
header = RawSignatureHeader()
header.magic1 = 0xcafe2580
header.magic2 = 0x94119c00
header.shifted_sample_rate_id = int(getattr(SampleRate, '_%s' % self.sample_rate_hz)) << 27
header.fixed_value = ((15 << 19) + 0x40000)
header.number_samples_plus_divided_sample_rate = int(self.number_samples + self.sample_rate_hz * 0.24)
contents_buf = BytesIO()
for frequency_band, frequency_peaks in sorted(self.frequency_band_to_sound_peaks.items()):
peaks_buf = BytesIO()
fft_pass_number = 0
# NOTE: Correctly filtering and sorting the peaks within the members
# of "self.frequency_band_to_sound_peaks" is the responsability of the
# caller
for frequency_peak in frequency_peaks:
assert frequency_peak.fft_pass_number >= fft_pass_number
if frequency_peak.fft_pass_number - fft_pass_number >= 255:
peaks_buf.write(b'\xff')
peaks_buf.write((frequency_peak.fft_pass_number).to_bytes(4, 'little'))
fft_pass_number = frequency_peak.fft_pass_number
peaks_buf.write(bytes([frequency_peak.fft_pass_number - fft_pass_number]))
peaks_buf.write((frequency_peak.peak_magnitude).to_bytes(2, 'little'))
peaks_buf.write((frequency_peak.corrected_peak_frequency_bin).to_bytes(2, 'little'))
fft_pass_number = frequency_peak.fft_pass_number
contents_buf.write((0x60030040 + int(frequency_band)).to_bytes(4, 'little'))
contents_buf.write(len(peaks_buf.getvalue()).to_bytes(4, 'little'))
contents_buf.write(peaks_buf.getvalue())
contents_buf.write(b'\x00' * (-len(peaks_buf.getvalue()) % 4))
# Below, write the full message as a binary stream
header.size_minus_header = len(contents_buf.getvalue()) + 8
buf = BytesIO()
buf.write(bytes(header)) # We will rewrite it just after in order to include the final CRC-32
buf.write((0x40000000).to_bytes(4, 'little'))
buf.write((len(contents_buf.getvalue()) + 8).to_bytes(4, 'little'))
buf.write(contents_buf.getvalue())
buf.seek(8)
header.crc32 = crc32(buf.read()) & 0xffffffff
buf.seek(0)
buf.write(bytes(header))
return buf.getvalue()
def encode_to_uri(self) -> str:
return DATA_URI_PREFIX + b64encode(self.encode_to_binary()).decode('ascii')
| self = cls()
buf = BytesIO(data)
buf.seek(8)
checksummable_data = buf.read()
buf.seek(0)
# Read and check the header
header = RawSignatureHeader()
buf.readinto(header)
assert header.magic1 == 0xcafe2580
assert header.size_minus_header == len(data) - 48
assert crc32(checksummable_data) & 0xffffffff == header.crc32
assert header.magic2 == 0x94119c00
self.sample_rate_hz = int(SampleRate(header.shifted_sample_rate_id >> 27).name.strip('_'))
self.number_samples = int(header.number_samples_plus_divided_sample_rate - self.sample_rate_hz * 0.24)
# Read the type-length-value sequence that follows the header
# The first chunk is fixed and has no value, but instead just repeats
# the length of the message size minus the header:
assert int.from_bytes(buf.read(4), 'little') == 0x40000000
assert int.from_bytes(buf.read(4), 'little') == len(data) - 48
# Then, lists of frequency peaks for respective bands follow
self.frequency_band_to_sound_peaks = {}
while True:
tlv_header = buf.read(8)
if not tlv_header:
break
frequency_band_id = int.from_bytes(tlv_header[:4], 'little')
frequency_peaks_size = int.from_bytes(tlv_header[4:], 'little')
frequency_peaks_padding = -frequency_peaks_size % 4
frequency_peaks_buf = BytesIO(buf.read(frequency_peaks_size))
buf.read(frequency_peaks_padding)
# Decode frequency peaks
frequency_band = FrequencyBand(frequency_band_id - 0x60030040)
fft_pass_number = 0
self.frequency_band_to_sound_peaks[frequency_band] = []
while True:
raw_fft_pass : bytes = frequency_peaks_buf.read(1)
if not raw_fft_pass:
break
fft_pass_offset : int = raw_fft_pass[0]
if fft_pass_offset == 0xff:
fft_pass_number = int.from_bytes(frequency_peaks_buf.read(4), 'little')
continue
else:
fft_pass_number += fft_pass_offset
peak_magnitude = int.from_bytes(frequency_peaks_buf.read(2), 'little')
corrected_peak_frequency_bin = int.from_bytes(frequency_peaks_buf.read(2), 'little')
self.frequency_band_to_sound_peaks[frequency_band].append(
FrequencyPeak(fft_pass_number, peak_magnitude, corrected_peak_frequency_bin, self.sample_rate_hz)
)
return self | identifier_body |
signature_format.py | #!/usr/bin/python3
#-*- encoding: Utf-8 -*-
from typing import Dict, List, Set, Sequence, Union, Any
from base64 import b64decode, b64encode
from math import log, exp, sqrt
from binascii import crc32
from enum import IntEnum
from io import BytesIO
from ctypes import *
DATA_URI_PREFIX = 'data:audio/vnd.shazam.sig;base64,'
class SampleRate(IntEnum): # Enum keys are sample rates in Hz
_8000 = 1
_11025 = 2
_16000 = 3
_32000 = 4
_44100 = 5
_48000 = 6
class FrequencyBand(IntEnum): # Enum keys are frequency ranges in Hz
_0_250 = -1 # Nothing above 250 Hz is actually stored
_250_520 = 0
_520_1450 = 1
_1450_3500 = 2
_3500_5500 = 3 # This one (3.5 KHz - 5.5 KHz) should not be used in legacy mode
class RawSignatureHeader(LittleEndianStructure):
_pack = True
_fields_ = [
('magic1', c_uint32), # Fixed 0xcafe2580 - 80 25 fe ca
('crc32', c_uint32), # CRC-32 for all of the following (so excluding these first 8 bytes)
('size_minus_header', c_uint32), # Total size of the message, minus the size of the current header (which is 48 bytes)
('magic2', c_uint32), # Fixed 0x94119c00 - 00 9c 11 94
('void1', c_uint32 * 3), # Void
('shifted_sample_rate_id', c_uint32), # A member of SampleRate (usually 3 for 16000 Hz), left-shifted by 27 (usually giving 0x18000000 - 00 00 00 18)
('void2', c_uint32 * 2), # Void, or maybe used only in "rolling window" mode?
('number_samples_plus_divided_sample_rate', c_uint32), # int(number_of_samples + sample_rate * 0.24) - As the sample rate is known thanks to the field above, it can be inferred and substracted so that we obtain the number of samples, and from the number of samples and sample rate we can obtain the length of the recording
('fixed_value', c_uint32) # Calculated as ((15 << 19) + 0x40000) - 0x7c0000 or 00 00 7c 00 - seems pretty constant, may be different in the "SigType.STREAMING" mode
]
class FrequencyPeak:
fft_pass_number : int = None
peak_magnitude : int = None
corrected_peak_frequency_bin : int = None
sample_rate_hz : int = None
def __init__(self, fft_pass_number : int, peak_magnitude : int, corrected_peak_frequency_bin : int, sample_rate_hz : int):
self.fft_pass_number = fft_pass_number
self.peak_magnitude = peak_magnitude
self.corrected_peak_frequency_bin = corrected_peak_frequency_bin
self.sample_rate_hz = sample_rate_hz
def get_frequency_hz(self) -> float:
return self.corrected_peak_frequency_bin * (self.sample_rate_hz / 2 / 1024 / 64)
# ^ Convert back a FFT bin to a frequency, given a 16 KHz sample
# rate, 1024 useful bins and the multiplication by 64 made before
# storing the information
def get_amplitude_pcm(self) -> float:
return sqrt(exp((self.peak_magnitude - 6144) / 1477.3) * (1 << 17) / 2) / 1024
# ^ Not sure about this calculation but gives small enough numbers
def | (self) -> float:
return (self.fft_pass_number * 128) / self.sample_rate_hz
# ^ Assume that new FFT bins are emitted every 128 samples, on a
# standard 16 KHz sample rate basis.
class DecodedMessage:
sample_rate_hz : int = None
number_samples : int = None
frequency_band_to_sound_peaks : Dict[FrequencyBand, List[FrequencyPeak]] = None
@classmethod
def decode_from_binary(cls, data : bytes):
self = cls()
buf = BytesIO(data)
buf.seek(8)
checksummable_data = buf.read()
buf.seek(0)
# Read and check the header
header = RawSignatureHeader()
buf.readinto(header)
assert header.magic1 == 0xcafe2580
assert header.size_minus_header == len(data) - 48
assert crc32(checksummable_data) & 0xffffffff == header.crc32
assert header.magic2 == 0x94119c00
self.sample_rate_hz = int(SampleRate(header.shifted_sample_rate_id >> 27).name.strip('_'))
self.number_samples = int(header.number_samples_plus_divided_sample_rate - self.sample_rate_hz * 0.24)
# Read the type-length-value sequence that follows the header
# The first chunk is fixed and has no value, but instead just repeats
# the length of the message size minus the header:
assert int.from_bytes(buf.read(4), 'little') == 0x40000000
assert int.from_bytes(buf.read(4), 'little') == len(data) - 48
# Then, lists of frequency peaks for respective bands follow
self.frequency_band_to_sound_peaks = {}
while True:
tlv_header = buf.read(8)
if not tlv_header:
break
frequency_band_id = int.from_bytes(tlv_header[:4], 'little')
frequency_peaks_size = int.from_bytes(tlv_header[4:], 'little')
frequency_peaks_padding = -frequency_peaks_size % 4
frequency_peaks_buf = BytesIO(buf.read(frequency_peaks_size))
buf.read(frequency_peaks_padding)
# Decode frequency peaks
frequency_band = FrequencyBand(frequency_band_id - 0x60030040)
fft_pass_number = 0
self.frequency_band_to_sound_peaks[frequency_band] = []
while True:
raw_fft_pass : bytes = frequency_peaks_buf.read(1)
if not raw_fft_pass:
break
fft_pass_offset : int = raw_fft_pass[0]
if fft_pass_offset == 0xff:
fft_pass_number = int.from_bytes(frequency_peaks_buf.read(4), 'little')
continue
else:
fft_pass_number += fft_pass_offset
peak_magnitude = int.from_bytes(frequency_peaks_buf.read(2), 'little')
corrected_peak_frequency_bin = int.from_bytes(frequency_peaks_buf.read(2), 'little')
self.frequency_band_to_sound_peaks[frequency_band].append(
FrequencyPeak(fft_pass_number, peak_magnitude, corrected_peak_frequency_bin, self.sample_rate_hz)
)
return self
@classmethod
def decode_from_uri(cls, uri : str):
assert uri.startswith(DATA_URI_PREFIX)
return cls.decode_from_binary(b64decode(uri.replace(DATA_URI_PREFIX, '', 1)))
"""
Encode the current object to a readable JSON format, for debugging
purposes.
"""
def encode_to_json(self) -> dict:
return {
"sample_rate_hz": self.sample_rate_hz,
"number_samples": self.number_samples,
"_seconds": self.number_samples / self.sample_rate_hz,
"frequency_band_to_peaks": {
frequency_band.name.strip('_'): [
{
"fft_pass_number": frequency_peak.fft_pass_number,
"peak_magnitude": frequency_peak.peak_magnitude,
"corrected_peak_frequency_bin": frequency_peak.corrected_peak_frequency_bin,
"_frequency_hz": frequency_peak.get_frequency_hz(),
"_amplitude_pcm": frequency_peak.get_amplitude_pcm(),
"_seconds": frequency_peak.get_seconds()
}
for frequency_peak in frequency_peaks
]
for frequency_band, frequency_peaks in sorted(self.frequency_band_to_sound_peaks.items())
}
}
def encode_to_binary(self) -> bytes:
header = RawSignatureHeader()
header.magic1 = 0xcafe2580
header.magic2 = 0x94119c00
header.shifted_sample_rate_id = int(getattr(SampleRate, '_%s' % self.sample_rate_hz)) << 27
header.fixed_value = ((15 << 19) + 0x40000)
header.number_samples_plus_divided_sample_rate = int(self.number_samples + self.sample_rate_hz * 0.24)
contents_buf = BytesIO()
for frequency_band, frequency_peaks in sorted(self.frequency_band_to_sound_peaks.items()):
peaks_buf = BytesIO()
fft_pass_number = 0
# NOTE: Correctly filtering and sorting the peaks within the members
# of "self.frequency_band_to_sound_peaks" is the responsability of the
# caller
for frequency_peak in frequency_peaks:
assert frequency_peak.fft_pass_number >= fft_pass_number
if frequency_peak.fft_pass_number - fft_pass_number >= 255:
peaks_buf.write(b'\xff')
peaks_buf.write((frequency_peak.fft_pass_number).to_bytes(4, 'little'))
fft_pass_number = frequency_peak.fft_pass_number
peaks_buf.write(bytes([frequency_peak.fft_pass_number - fft_pass_number]))
peaks_buf.write((frequency_peak.peak_magnitude).to_bytes(2, 'little'))
peaks_buf.write((frequency_peak.corrected_peak_frequency_bin).to_bytes(2, 'little'))
fft_pass_number = frequency_peak.fft_pass_number
contents_buf.write((0x60030040 + int(frequency_band)).to_bytes(4, 'little'))
contents_buf.write(len(peaks_buf.getvalue()).to_bytes(4, 'little'))
contents_buf.write(peaks_buf.getvalue())
contents_buf.write(b'\x00' * (-len(peaks_buf.getvalue()) % 4))
# Below, write the full message as a binary stream
header.size_minus_header = len(contents_buf.getvalue()) + 8
buf = BytesIO()
buf.write(bytes(header)) # We will rewrite it just after in order to include the final CRC-32
buf.write((0x40000000).to_bytes(4, 'little'))
buf.write((len(contents_buf.getvalue()) + 8).to_bytes(4, 'little'))
buf.write(contents_buf.getvalue())
buf.seek(8)
header.crc32 = crc32(buf.read()) & 0xffffffff
buf.seek(0)
buf.write(bytes(header))
return buf.getvalue()
def encode_to_uri(self) -> str:
return DATA_URI_PREFIX + b64encode(self.encode_to_binary()).decode('ascii')
| get_seconds | identifier_name |
signature_format.py | #!/usr/bin/python3
#-*- encoding: Utf-8 -*-
from typing import Dict, List, Set, Sequence, Union, Any
from base64 import b64decode, b64encode
from math import log, exp, sqrt
from binascii import crc32
from enum import IntEnum
from io import BytesIO
from ctypes import *
DATA_URI_PREFIX = 'data:audio/vnd.shazam.sig;base64,'
class SampleRate(IntEnum): # Enum keys are sample rates in Hz
_8000 = 1
_11025 = 2
_16000 = 3
_32000 = 4
_44100 = 5
_48000 = 6
class FrequencyBand(IntEnum): # Enum keys are frequency ranges in Hz
_0_250 = -1 # Nothing above 250 Hz is actually stored
_250_520 = 0
_520_1450 = 1
_1450_3500 = 2
_3500_5500 = 3 # This one (3.5 KHz - 5.5 KHz) should not be used in legacy mode
class RawSignatureHeader(LittleEndianStructure):
_pack = True
_fields_ = [
('magic1', c_uint32), # Fixed 0xcafe2580 - 80 25 fe ca
('crc32', c_uint32), # CRC-32 for all of the following (so excluding these first 8 bytes)
('size_minus_header', c_uint32), # Total size of the message, minus the size of the current header (which is 48 bytes)
('magic2', c_uint32), # Fixed 0x94119c00 - 00 9c 11 94
('void1', c_uint32 * 3), # Void
('shifted_sample_rate_id', c_uint32), # A member of SampleRate (usually 3 for 16000 Hz), left-shifted by 27 (usually giving 0x18000000 - 00 00 00 18)
('void2', c_uint32 * 2), # Void, or maybe used only in "rolling window" mode?
('number_samples_plus_divided_sample_rate', c_uint32), # int(number_of_samples + sample_rate * 0.24) - As the sample rate is known thanks to the field above, it can be inferred and substracted so that we obtain the number of samples, and from the number of samples and sample rate we can obtain the length of the recording
('fixed_value', c_uint32) # Calculated as ((15 << 19) + 0x40000) - 0x7c0000 or 00 00 7c 00 - seems pretty constant, may be different in the "SigType.STREAMING" mode
]
class FrequencyPeak:
fft_pass_number : int = None
peak_magnitude : int = None
corrected_peak_frequency_bin : int = None
sample_rate_hz : int = None
def __init__(self, fft_pass_number : int, peak_magnitude : int, corrected_peak_frequency_bin : int, sample_rate_hz : int):
self.fft_pass_number = fft_pass_number
self.peak_magnitude = peak_magnitude
self.corrected_peak_frequency_bin = corrected_peak_frequency_bin
self.sample_rate_hz = sample_rate_hz
def get_frequency_hz(self) -> float:
return self.corrected_peak_frequency_bin * (self.sample_rate_hz / 2 / 1024 / 64)
# ^ Convert back a FFT bin to a frequency, given a 16 KHz sample
# rate, 1024 useful bins and the multiplication by 64 made before
# storing the information
def get_amplitude_pcm(self) -> float:
return sqrt(exp((self.peak_magnitude - 6144) / 1477.3) * (1 << 17) / 2) / 1024
# ^ Not sure about this calculation but gives small enough numbers
def get_seconds(self) -> float:
return (self.fft_pass_number * 128) / self.sample_rate_hz
# ^ Assume that new FFT bins are emitted every 128 samples, on a
# standard 16 KHz sample rate basis.
class DecodedMessage:
sample_rate_hz : int = None
number_samples : int = None
frequency_band_to_sound_peaks : Dict[FrequencyBand, List[FrequencyPeak]] = None
@classmethod
def decode_from_binary(cls, data : bytes):
self = cls()
buf = BytesIO(data)
buf.seek(8)
checksummable_data = buf.read()
buf.seek(0)
# Read and check the header
header = RawSignatureHeader()
buf.readinto(header)
assert header.magic1 == 0xcafe2580
assert header.size_minus_header == len(data) - 48
assert crc32(checksummable_data) & 0xffffffff == header.crc32
assert header.magic2 == 0x94119c00
self.sample_rate_hz = int(SampleRate(header.shifted_sample_rate_id >> 27).name.strip('_'))
self.number_samples = int(header.number_samples_plus_divided_sample_rate - self.sample_rate_hz * 0.24)
# Read the type-length-value sequence that follows the header
# The first chunk is fixed and has no value, but instead just repeats
# the length of the message size minus the header:
assert int.from_bytes(buf.read(4), 'little') == 0x40000000
assert int.from_bytes(buf.read(4), 'little') == len(data) - 48
# Then, lists of frequency peaks for respective bands follow
self.frequency_band_to_sound_peaks = {}
while True:
tlv_header = buf.read(8)
if not tlv_header:
break
frequency_band_id = int.from_bytes(tlv_header[:4], 'little')
frequency_peaks_size = int.from_bytes(tlv_header[4:], 'little')
frequency_peaks_padding = -frequency_peaks_size % 4
frequency_peaks_buf = BytesIO(buf.read(frequency_peaks_size))
buf.read(frequency_peaks_padding)
# Decode frequency peaks
frequency_band = FrequencyBand(frequency_band_id - 0x60030040)
fft_pass_number = 0
self.frequency_band_to_sound_peaks[frequency_band] = []
while True:
raw_fft_pass : bytes = frequency_peaks_buf.read(1)
if not raw_fft_pass:
break
fft_pass_offset : int = raw_fft_pass[0]
if fft_pass_offset == 0xff:
fft_pass_number = int.from_bytes(frequency_peaks_buf.read(4), 'little')
continue
else:
fft_pass_number += fft_pass_offset
peak_magnitude = int.from_bytes(frequency_peaks_buf.read(2), 'little')
corrected_peak_frequency_bin = int.from_bytes(frequency_peaks_buf.read(2), 'little')
self.frequency_band_to_sound_peaks[frequency_band].append(
FrequencyPeak(fft_pass_number, peak_magnitude, corrected_peak_frequency_bin, self.sample_rate_hz)
)
return self
@classmethod
def decode_from_uri(cls, uri : str):
assert uri.startswith(DATA_URI_PREFIX)
return cls.decode_from_binary(b64decode(uri.replace(DATA_URI_PREFIX, '', 1)))
"""
Encode the current object to a readable JSON format, for debugging
purposes.
"""
def encode_to_json(self) -> dict:
return {
"sample_rate_hz": self.sample_rate_hz,
"number_samples": self.number_samples,
"_seconds": self.number_samples / self.sample_rate_hz,
"frequency_band_to_peaks": {
frequency_band.name.strip('_'): [
{
"fft_pass_number": frequency_peak.fft_pass_number,
"peak_magnitude": frequency_peak.peak_magnitude,
"corrected_peak_frequency_bin": frequency_peak.corrected_peak_frequency_bin,
"_frequency_hz": frequency_peak.get_frequency_hz(),
"_amplitude_pcm": frequency_peak.get_amplitude_pcm(),
"_seconds": frequency_peak.get_seconds()
}
for frequency_peak in frequency_peaks
]
for frequency_band, frequency_peaks in sorted(self.frequency_band_to_sound_peaks.items())
}
}
def encode_to_binary(self) -> bytes:
header = RawSignatureHeader()
header.magic1 = 0xcafe2580
header.magic2 = 0x94119c00
header.shifted_sample_rate_id = int(getattr(SampleRate, '_%s' % self.sample_rate_hz)) << 27
header.fixed_value = ((15 << 19) + 0x40000)
header.number_samples_plus_divided_sample_rate = int(self.number_samples + self.sample_rate_hz * 0.24)
contents_buf = BytesIO()
for frequency_band, frequency_peaks in sorted(self.frequency_band_to_sound_peaks.items()):
|
# Below, write the full message as a binary stream
header.size_minus_header = len(contents_buf.getvalue()) + 8
buf = BytesIO()
buf.write(bytes(header)) # We will rewrite it just after in order to include the final CRC-32
buf.write((0x40000000).to_bytes(4, 'little'))
buf.write((len(contents_buf.getvalue()) + 8).to_bytes(4, 'little'))
buf.write(contents_buf.getvalue())
buf.seek(8)
header.crc32 = crc32(buf.read()) & 0xffffffff
buf.seek(0)
buf.write(bytes(header))
return buf.getvalue()
def encode_to_uri(self) -> str:
return DATA_URI_PREFIX + b64encode(self.encode_to_binary()).decode('ascii')
| peaks_buf = BytesIO()
fft_pass_number = 0
# NOTE: Correctly filtering and sorting the peaks within the members
# of "self.frequency_band_to_sound_peaks" is the responsability of the
# caller
for frequency_peak in frequency_peaks:
assert frequency_peak.fft_pass_number >= fft_pass_number
if frequency_peak.fft_pass_number - fft_pass_number >= 255:
peaks_buf.write(b'\xff')
peaks_buf.write((frequency_peak.fft_pass_number).to_bytes(4, 'little'))
fft_pass_number = frequency_peak.fft_pass_number
peaks_buf.write(bytes([frequency_peak.fft_pass_number - fft_pass_number]))
peaks_buf.write((frequency_peak.peak_magnitude).to_bytes(2, 'little'))
peaks_buf.write((frequency_peak.corrected_peak_frequency_bin).to_bytes(2, 'little'))
fft_pass_number = frequency_peak.fft_pass_number
contents_buf.write((0x60030040 + int(frequency_band)).to_bytes(4, 'little'))
contents_buf.write(len(peaks_buf.getvalue()).to_bytes(4, 'little'))
contents_buf.write(peaks_buf.getvalue())
contents_buf.write(b'\x00' * (-len(peaks_buf.getvalue()) % 4)) | conditional_block |
mp3.py |
from kivy.app import App
from kivy.uix.scatter import Scatter
from kivy.uix.label import Label
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.textinput import TextInput
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.scrollview import ScrollView
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty
from kivy.graphics import Color
from kivy.uix.screenmanager import ScreenManager, Screen, SwapTransition, FadeTransition
from kivy.uix.settings import SettingsWithTabbedPanel
from kivy.config import Config
import pdb
import threading
import time
import os
import subprocess
import sys
import json
import pprint
import signal
import re
#GPIO Stuff
import RPi.GPIO as GPIO
#from networking import NetworkManagerWrapper
from nmcli import nmcli
from radiostations import RadioStations
from audio import AlsaInterface
from screensaver import Rpi_ScreenSaver
reload(sys)
sys.setdefaultencoding('utf-8')
import select
import markup
from kivy.logger import Logger
from signal import SIGTSTP, SIGTERM, SIGABRT
import string,cgi,time
from os import curdir, sep
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
RootApp = "init"
ConfigObject = None
class Mp3PiAppLayout(Screen):
global RootApp, last_activity_time, ConfigObject
isPlaying = False
proc = None
stop = threading.Event()
mythread = None
statusthread_stop = threading.Event()
statusthread = None
def args_converter(self, row_index, an_obj):
if row_index % 2:
background = [1, 1, 1, 0]
else:
background = [1, 1, 1, .5]
return {'text': an_obj['name'],
'size_hint_y': None,
'deselected_color': background}
def __init__(self, **kwargs):
global RootApp
super(Mp3PiAppLayout, self).__init__(**kwargs)
RootApp = self
self.ids['search_results_list'].adapter.bind(on_selection_change=self.change_selection)
self.ids.volume_slider.value = Alsa.get_mixer("", {})
# XXX validate!!
#self.ids.volume_slider.value = 0# int(subprocess.check_output(["pulseaudio-ctl", "full-status"]).split(" ")[0])
self.statusthread = threading.Thread(target=self.status_thread)
self.statusthread.daemon = True
self.statusthread.start()
def change_volume(self, args):
#os.system("amixer set Master %s%%" % int(args))
#os.system("pactl set-sink-volume bluez_sink.0C_A6_94_E3_76_DA %s%%" % int(args))
Alsa.set_mixer("", int(args), {})
#os.system("pulseaudio-ctl set %s%%" % int(args))
def change_selection(self, args):
if args.selection:
self.change_image(args.selection[0].text)
self.stop_second_thread()
self.start_second_thread(Stations.getStreamURLbyName(args.selection[0].text))
else:
self.stop_second_thread()
def stop_second_thread(self):
if self.isPlaying == True: # stop playing
if self.proc is not None:
if self.mythread.isAlive():
print("set stop")
self.stop.set()
#self.proc.kill() ??
Logger.info("mpg123: killing %s" % self.proc.pid)
os.kill(self.proc.pid, SIGTERM)
self.proc = None
self.isPlaying = False
def start_second_thread(self, l_text):
if self.isPlaying == False:
Logger.info("Player: starting player " + l_text)
self.isPlaying = True
self.mythread = threading.Thread(target=self.infinite_loop, args=(l_text,))
self.mythread.daemon = True
self.mythread.start()
else:
|
def infinite_loop(self, url):
iteration = 0
self.proc = subprocess.Popen(["mpg123","-o", "alsa", "-@", url], stderr=subprocess.PIPE, bufsize = 0)
line = []
while True:
if self.stop.is_set():
Logger.info("Player: stopping thread")
self.stop.clear()
return
while (select.select([self.proc.stderr], [], [], 0)[0]):
# check if mpg123 is died
#print(self.proc.returncode)
#print(self.proc.pid)
if self.proc.returncode is not None:
print("died")
return
if self.stop.is_set():
Logger.info("Player: stopping thread")
self.stop.clear()
return
char = self.proc.stderr.read(1)
if char != '\n':
line.append(char)
else:
line_joined = "".join(line)
Logger.info("MPG123: says %s " % line_joined)
if "ICY-META: StreamTitle=" in line_joined:
pairs = {}
elements = line_joined.split(";")
for element in elements:
if element:
res = re.search(r"([A-Za-z]*)='(.*)'", element)
pairs[res.group(1)] = res.group(2)
self.ids.icytags.text = pairs['StreamTitle']
if "ICY-NAME: " in line_joined:
Logger.debug("ICYTAGS: ICY name found: %s " % line_joined.replace("ICY-NAME: ", ""))
if "ICY-URL: " in line_joined:
Logger.debug("ICYTAGS: ICY url found: %s " % line_joined.replace("ICY-URL: ", ""))
if "ICY-META: StreamTitle=" in line_joined:
Logger.debug("ICYTAGS: ICY StreamTitle found: %s " % line_joined.replace("ICY-META: StreamTitle=", ""))
line = []
iteration += 1
#print('Infinite loop, iteration {}.'.format(iteration))
time.sleep(.1)
def status_thread(self):
global ConfigObject
connection = NMCLI.current_connection()
while True:
if self.statusthread_stop.is_set():
self.statusthread_stop.clear()
return
if not int(time.time()) % 5:
connection = NMCLI.current_connection()
ip = NMCLI.get_ip()
if ip is None:
self.ids.wlanstatus.text = "No network connection"
else:
self.ids.wlanstatus.text = "%s %s%%\n%s\n%s" % (connection.get('SSID', None), connection.get('SIGNAL', None), ip, time.strftime("%H:%M", time.localtime()))
#self.ids.wlanstatus.text = "%s %s%%\n%s" % ("myNetwork", Network.strength, "192.168.47.11")
# wlan symbol
lines = []
for i in self.ids.wlanstatus.canvas.get_group(None)[1:]:
if type(i) is Color:
lines.append(i)
i.a = 1
if connection is not None:
if connection['SIGNAL'] < 50:
for i in lines[0:3]:
i.a = .5
if connection['SIGNAL'] < 60:
for i in lines[0:2]:
i.a = .5
if connection['SIGNAL'] < 70:
for i in lines[0:1]:
i.a = .5
if Stations.no_data == True:
print("no data")
if ConfigObject.get('General', 'playlist') == "radio.de":
Stations.update()
if Stations.no_data == False:
del self.search_results.adapter.data[:]
self.search_results.adapter.data.extend((Stations.data))
if ConfigObject.get('General', 'playlist') == "custom":
Stations.load_playlist("custom")
if Stations.no_data == False:
del self.search_results.adapter.data[:]
self.search_results.adapter.data.extend((Stations.data))
# screensaver
timeout = ConfigObject.get('General', 'screensaver')
if timeout < 60:
timeout = 60
if (time.time() - last_activity_time) > int(timeout):
if ScreenSaver.display_state is True:
Logger.info("ScreenSaver: enabling screensaver")
ScreenSaver.display_off()
else:
if ScreenSaver.display_state is False:
Logger.info("ScreenSaver: disabling screensaver")
ScreenSaver.display_on()
time.sleep(.5)
def change_image(self, station_name):
imageUrl = Stations.getImageUrl(Stations.getIdByName(station_name))
Logger.info("ImageLoader: Loading Image from %s" % (imageUrl))
self.ids.imageid.source = imageUrl
def pause(self):
self.stop.set()
self.search_results.adapter.deselect_list(self.search_results.adapter.selection)
def next(self):
self.stop.set()
#browse(self.search_results.adapter)
if self.search_results.adapter.selection:
index = self.search_results.adapter.selection[0].index
if index < len(self.search_results.adapter.data):
self.search_results.adapter.get_view(index+1).trigger_action(duration=0)
def prev(self):
self.stop.set()
if self.search_results.adapter.selection:
index = self.search_results.adapter.selection[0].index
if index >= 1:
self.search_results.adapter.get_view(index-1).trigger_action(duration=0)
def poweroff(self):
print("poweroff")
os.system("poweroff")
def reboot(self):
print("reboot")
os.system("reboot")
class Mp3PiApp(App):
global last_activity_time, ConfigObject
# initialize GPIO stuff
GPIO.setmode(GPIO.BOARD)
GPIO_PIR = 7
GPIO.setup(GPIO_PIR,GPIO.IN)
def my_callback(channel):
Logger.debug("Presence detector triggered!")
global last_activity_time
last_activity_time = time.time()
GPIO.add_event_detect(GPIO_PIR, GPIO.RISING, callback=my_callback, bouncetime=300)
def build_config(self, config):
config.setdefaults('General', {'screensaver': "60"})
config.setdefaults('General', {'name': "name"})
config.setdefaults('General', {'playlist': "radio.de"})
def build_settings(self, settings):
settings.add_json_panel("General", self.config, data="""
[
{"type": "numeric",
"title": "Screensaver Timeout",
"section": "General",
"key": "screensaver"
},
{"type": "string",
"title": "String",
"section": "General",
"key": "name"
},
{"type": "options",
"title": "Playlist",
"section": "General",
"options": ["radio.de", "custom"],
"key": "playlist"
}
]"""
)
def on_stop(self):
# The Kivy event loop is about to stop, set a stop signal;
# otherwise the app window will close, but the Python process will
# keep running until all secondary threads exit.
#layout.clear_widgets()
#browse(self)
True
#main = self.root.manager.get_screen('main').layout
#main.stop.set()
#self.root.stop.set()
#self.root.statusthread_stop.set()
def build(self):
global last_activity_time, ConfigObject
#sm = ScreenManager(transition=FadeTransition())
self.settings_cls = MySettingsWithTabbedPanel
from kivy.core.window import Window
# Window.size = (800, 480)
def on_motion(self, etype, motionevent):
global last_activity_time
last_activity_time = time.time()
Window.bind(on_motion=on_motion)
ConfigObject = self.config
sm = ScreenManager()
sm.add_widget(Mp3PiAppLayout())
sm.add_widget(SettingsScreen())
return sm
#return Mp3PiAppLayout()
class SettingsScreen(Screen):
def __init__(self, **kwargs):
super(SettingsScreen, self).__init__(**kwargs)
networklist = []
# for net in Network.visible_aps:
# networklist.append(net['ssid'])
# if net['ssid'] is Network.ssid:
# self.ids['wlan_list'].text = net[Network.ssid]
# self.ids['wlan_list'].values = networklist
# self.ids['wlan_list'].bind(text=self.change_wlan_selection)
def change_wlan_selection(self, spinner, args):
Logger.info("WLAN: user selection %s" % args)
# Logger.info("WLAN: current WLAN %s" % Network.ssid)
# if args != Network.ssid:
# Logger.info("WLAN: changing WLAN to %s" % args)
# Network.activate([args])
def signal_handler(signal, frame):
print("exit");
sys.exit(0);
class HTTPHandler(BaseHTTPRequestHandler):
global RootApp
#print(Mp3PiAppClass)
def do_GET(self):
if self.path == "/":
self.page = markup.page()
self.page.init(title="Title")
self.page.table(border="true")
firstline = True
for row in RootApp.search_results.adapter.data:
if firstline is True:
self.page.tr()
for column in row:
#pdb.set_trace()
string1 = column
if type(column) == 'float':
string1 = str(column)
if type(column) == 'str':
string1 = unicode(column, "utf8")
self.page.th(string1, align="left")
self.page.tr.close()
firstline = False
continue
self.page.tr()
for column in row:
#pdb.set_trace()
string1 = row[column]
if type(row[column]) == 'float':
string1 = str(row[column])
if type(row[column]) == 'str':
string1 = unicode(row[column], "utf8")
self.page.td(string1)
self.page.tr.close()
self.page.p(time.time())
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(RootApp.isPlaying)
self.wfile.write(self.page)
#self.wfile.write(json.dumps(RootApp.search_results.adapter.data, indent=4, separators=('.', ': ')))
else:
print(self.path)
class MySettingsWithTabbedPanel(SettingsWithTabbedPanel):
def on_close(self):
Logger.info("main.py: MySettingsWithTabbedPanel.on_close")
def on_config_change(self, config, section, key, value):
if key == "playlist":
Stations.no_data = True
Logger.info(
"main.py: MySettingsWithTabbedPanel.on_config_change: "
"{0}, {1}, {2}, {3}".format(config, section, key, value))
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal_handler)
#Network = NetworkManagerWrapper()
NMCLI = nmcli()
Alsa = AlsaInterface()
Stations = RadioStations()
ScreenSaver = Rpi_ScreenSaver()
ScreenSaver.display_on()
httpd = HTTPServer(('', 8080), HTTPHandler)
httpd_thread = threading.Thread(target=httpd.serve_forever)
httpd_thread.daemon = True
httpd_thread.start()
last_activity_time = time.time()
Mp3PiApp().run()
| Logger.info("Player: already playing") | conditional_block |
mp3.py | from kivy.app import App
from kivy.uix.scatter import Scatter
from kivy.uix.label import Label
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.textinput import TextInput
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.scrollview import ScrollView
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty
from kivy.graphics import Color
from kivy.uix.screenmanager import ScreenManager, Screen, SwapTransition, FadeTransition
from kivy.uix.settings import SettingsWithTabbedPanel
from kivy.config import Config
import pdb
import threading
import time
import os
import subprocess
import sys
import json
import pprint
import signal
import re
#GPIO Stuff
import RPi.GPIO as GPIO
#from networking import NetworkManagerWrapper
from nmcli import nmcli
from radiostations import RadioStations
from audio import AlsaInterface
from screensaver import Rpi_ScreenSaver
reload(sys)
sys.setdefaultencoding('utf-8')
import select
import markup
from kivy.logger import Logger
from signal import SIGTSTP, SIGTERM, SIGABRT
import string,cgi,time
from os import curdir, sep
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
RootApp = "init"
ConfigObject = None
class Mp3PiAppLayout(Screen):
global RootApp, last_activity_time, ConfigObject
isPlaying = False
proc = None
stop = threading.Event()
mythread = None
statusthread_stop = threading.Event()
statusthread = None
def args_converter(self, row_index, an_obj):
if row_index % 2:
background = [1, 1, 1, 0]
else:
background = [1, 1, 1, .5]
return {'text': an_obj['name'],
'size_hint_y': None,
'deselected_color': background}
def __init__(self, **kwargs):
global RootApp
super(Mp3PiAppLayout, self).__init__(**kwargs)
RootApp = self
self.ids['search_results_list'].adapter.bind(on_selection_change=self.change_selection)
self.ids.volume_slider.value = Alsa.get_mixer("", {})
# XXX validate!!
#self.ids.volume_slider.value = 0# int(subprocess.check_output(["pulseaudio-ctl", "full-status"]).split(" ")[0])
self.statusthread = threading.Thread(target=self.status_thread)
self.statusthread.daemon = True
self.statusthread.start()
def change_volume(self, args):
#os.system("amixer set Master %s%%" % int(args))
#os.system("pactl set-sink-volume bluez_sink.0C_A6_94_E3_76_DA %s%%" % int(args))
Alsa.set_mixer("", int(args), {})
#os.system("pulseaudio-ctl set %s%%" % int(args))
def change_selection(self, args):
if args.selection:
self.change_image(args.selection[0].text)
self.stop_second_thread()
self.start_second_thread(Stations.getStreamURLbyName(args.selection[0].text))
else:
self.stop_second_thread()
def stop_second_thread(self):
if self.isPlaying == True: # stop playing
if self.proc is not None:
if self.mythread.isAlive():
print("set stop")
self.stop.set()
#self.proc.kill() ??
Logger.info("mpg123: killing %s" % self.proc.pid)
os.kill(self.proc.pid, SIGTERM)
self.proc = None
self.isPlaying = False
def start_second_thread(self, l_text):
if self.isPlaying == False:
Logger.info("Player: starting player " + l_text)
self.isPlaying = True
self.mythread = threading.Thread(target=self.infinite_loop, args=(l_text,))
self.mythread.daemon = True
self.mythread.start()
else:
Logger.info("Player: already playing")
def infinite_loop(self, url):
iteration = 0
self.proc = subprocess.Popen(["mpg123","-o", "alsa", "-@", url], stderr=subprocess.PIPE, bufsize = 0)
line = []
while True:
if self.stop.is_set():
Logger.info("Player: stopping thread")
self.stop.clear()
return
while (select.select([self.proc.stderr], [], [], 0)[0]):
# check if mpg123 is died
#print(self.proc.returncode)
#print(self.proc.pid)
if self.proc.returncode is not None:
print("died")
return
if self.stop.is_set():
Logger.info("Player: stopping thread")
self.stop.clear()
return
char = self.proc.stderr.read(1)
if char != '\n':
line.append(char)
else:
line_joined = "".join(line)
Logger.info("MPG123: says %s " % line_joined)
if "ICY-META: StreamTitle=" in line_joined:
pairs = {}
elements = line_joined.split(";")
for element in elements:
if element:
res = re.search(r"([A-Za-z]*)='(.*)'", element)
pairs[res.group(1)] = res.group(2)
self.ids.icytags.text = pairs['StreamTitle']
if "ICY-NAME: " in line_joined:
Logger.debug("ICYTAGS: ICY name found: %s " % line_joined.replace("ICY-NAME: ", ""))
if "ICY-URL: " in line_joined:
Logger.debug("ICYTAGS: ICY url found: %s " % line_joined.replace("ICY-URL: ", ""))
if "ICY-META: StreamTitle=" in line_joined:
Logger.debug("ICYTAGS: ICY StreamTitle found: %s " % line_joined.replace("ICY-META: StreamTitle=", ""))
line = []
iteration += 1
#print('Infinite loop, iteration {}.'.format(iteration))
time.sleep(.1)
def status_thread(self):
global ConfigObject
connection = NMCLI.current_connection()
while True:
if self.statusthread_stop.is_set():
self.statusthread_stop.clear()
return
if not int(time.time()) % 5:
connection = NMCLI.current_connection()
ip = NMCLI.get_ip()
if ip is None:
self.ids.wlanstatus.text = "No network connection"
else:
self.ids.wlanstatus.text = "%s %s%%\n%s\n%s" % (connection.get('SSID', None), connection.get('SIGNAL', None), ip, time.strftime("%H:%M", time.localtime()))
#self.ids.wlanstatus.text = "%s %s%%\n%s" % ("myNetwork", Network.strength, "192.168.47.11")
# wlan symbol
lines = []
for i in self.ids.wlanstatus.canvas.get_group(None)[1:]:
if type(i) is Color:
lines.append(i)
i.a = 1
if connection is not None:
if connection['SIGNAL'] < 50:
for i in lines[0:3]:
i.a = .5
if connection['SIGNAL'] < 60:
for i in lines[0:2]:
i.a = .5
if connection['SIGNAL'] < 70:
for i in lines[0:1]:
i.a = .5
if Stations.no_data == True:
print("no data")
if ConfigObject.get('General', 'playlist') == "radio.de":
Stations.update()
if Stations.no_data == False:
del self.search_results.adapter.data[:]
self.search_results.adapter.data.extend((Stations.data))
if ConfigObject.get('General', 'playlist') == "custom":
Stations.load_playlist("custom")
if Stations.no_data == False:
del self.search_results.adapter.data[:]
self.search_results.adapter.data.extend((Stations.data))
# screensaver
timeout = ConfigObject.get('General', 'screensaver')
if timeout < 60:
timeout = 60
if (time.time() - last_activity_time) > int(timeout):
if ScreenSaver.display_state is True:
Logger.info("ScreenSaver: enabling screensaver")
ScreenSaver.display_off()
else:
if ScreenSaver.display_state is False:
Logger.info("ScreenSaver: disabling screensaver")
ScreenSaver.display_on()
time.sleep(.5)
def change_image(self, station_name):
imageUrl = Stations.getImageUrl(Stations.getIdByName(station_name))
Logger.info("ImageLoader: Loading Image from %s" % (imageUrl))
self.ids.imageid.source = imageUrl
def pause(self):
self.stop.set()
self.search_results.adapter.deselect_list(self.search_results.adapter.selection)
def next(self):
self.stop.set()
#browse(self.search_results.adapter)
if self.search_results.adapter.selection:
index = self.search_results.adapter.selection[0].index
if index < len(self.search_results.adapter.data):
self.search_results.adapter.get_view(index+1).trigger_action(duration=0)
def prev(self):
self.stop.set()
if self.search_results.adapter.selection:
index = self.search_results.adapter.selection[0].index
if index >= 1:
self.search_results.adapter.get_view(index-1).trigger_action(duration=0)
def poweroff(self):
print("poweroff")
os.system("poweroff")
def reboot(self):
print("reboot")
os.system("reboot")
class Mp3PiApp(App):
global last_activity_time, ConfigObject
# initialize GPIO stuff
GPIO.setmode(GPIO.BOARD)
GPIO_PIR = 7
GPIO.setup(GPIO_PIR,GPIO.IN)
def my_callback(channel):
Logger.debug("Presence detector triggered!")
global last_activity_time
last_activity_time = time.time()
GPIO.add_event_detect(GPIO_PIR, GPIO.RISING, callback=my_callback, bouncetime=300)
def build_config(self, config):
config.setdefaults('General', {'screensaver': "60"})
config.setdefaults('General', {'name': "name"})
config.setdefaults('General', {'playlist': "radio.de"})
def build_settings(self, settings):
settings.add_json_panel("General", self.config, data="""
[
{"type": "numeric",
"title": "Screensaver Timeout",
"section": "General",
"key": "screensaver"
},
{"type": "string",
"title": "String",
"section": "General",
"key": "name"
},
{"type": "options",
"title": "Playlist",
"section": "General",
"options": ["radio.de", "custom"],
"key": "playlist"
}
]"""
)
def on_stop(self):
# The Kivy event loop is about to stop, set a stop signal;
# otherwise the app window will close, but the Python process will
# keep running until all secondary threads exit.
#layout.clear_widgets()
#browse(self)
True
#main = self.root.manager.get_screen('main').layout
#main.stop.set()
#self.root.stop.set()
#self.root.statusthread_stop.set()
def build(self):
global last_activity_time, ConfigObject
#sm = ScreenManager(transition=FadeTransition())
self.settings_cls = MySettingsWithTabbedPanel
from kivy.core.window import Window
# Window.size = (800, 480)
def on_motion(self, etype, motionevent):
global last_activity_time
last_activity_time = time.time()
Window.bind(on_motion=on_motion)
ConfigObject = self.config
sm = ScreenManager()
sm.add_widget(Mp3PiAppLayout())
sm.add_widget(SettingsScreen())
return sm
#return Mp3PiAppLayout()
class SettingsScreen(Screen):
def __init__(self, **kwargs):
super(SettingsScreen, self).__init__(**kwargs)
networklist = []
# for net in Network.visible_aps:
# networklist.append(net['ssid']) | # self.ids['wlan_list'].bind(text=self.change_wlan_selection)
def change_wlan_selection(self, spinner, args):
Logger.info("WLAN: user selection %s" % args)
# Logger.info("WLAN: current WLAN %s" % Network.ssid)
# if args != Network.ssid:
# Logger.info("WLAN: changing WLAN to %s" % args)
# Network.activate([args])
def signal_handler(signal, frame):
print("exit");
sys.exit(0);
class HTTPHandler(BaseHTTPRequestHandler):
global RootApp
#print(Mp3PiAppClass)
def do_GET(self):
if self.path == "/":
self.page = markup.page()
self.page.init(title="Title")
self.page.table(border="true")
firstline = True
for row in RootApp.search_results.adapter.data:
if firstline is True:
self.page.tr()
for column in row:
#pdb.set_trace()
string1 = column
if type(column) == 'float':
string1 = str(column)
if type(column) == 'str':
string1 = unicode(column, "utf8")
self.page.th(string1, align="left")
self.page.tr.close()
firstline = False
continue
self.page.tr()
for column in row:
#pdb.set_trace()
string1 = row[column]
if type(row[column]) == 'float':
string1 = str(row[column])
if type(row[column]) == 'str':
string1 = unicode(row[column], "utf8")
self.page.td(string1)
self.page.tr.close()
self.page.p(time.time())
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(RootApp.isPlaying)
self.wfile.write(self.page)
#self.wfile.write(json.dumps(RootApp.search_results.adapter.data, indent=4, separators=('.', ': ')))
else:
print(self.path)
class MySettingsWithTabbedPanel(SettingsWithTabbedPanel):
def on_close(self):
Logger.info("main.py: MySettingsWithTabbedPanel.on_close")
def on_config_change(self, config, section, key, value):
if key == "playlist":
Stations.no_data = True
Logger.info(
"main.py: MySettingsWithTabbedPanel.on_config_change: "
"{0}, {1}, {2}, {3}".format(config, section, key, value))
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal_handler)
#Network = NetworkManagerWrapper()
NMCLI = nmcli()
Alsa = AlsaInterface()
Stations = RadioStations()
ScreenSaver = Rpi_ScreenSaver()
ScreenSaver.display_on()
httpd = HTTPServer(('', 8080), HTTPHandler)
httpd_thread = threading.Thread(target=httpd.serve_forever)
httpd_thread.daemon = True
httpd_thread.start()
last_activity_time = time.time()
Mp3PiApp().run() | # if net['ssid'] is Network.ssid:
# self.ids['wlan_list'].text = net[Network.ssid]
# self.ids['wlan_list'].values = networklist | random_line_split |
mp3.py |
from kivy.app import App
from kivy.uix.scatter import Scatter
from kivy.uix.label import Label
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.textinput import TextInput
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.scrollview import ScrollView
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty
from kivy.graphics import Color
from kivy.uix.screenmanager import ScreenManager, Screen, SwapTransition, FadeTransition
from kivy.uix.settings import SettingsWithTabbedPanel
from kivy.config import Config
import pdb
import threading
import time
import os
import subprocess
import sys
import json
import pprint
import signal
import re
#GPIO Stuff
import RPi.GPIO as GPIO
#from networking import NetworkManagerWrapper
from nmcli import nmcli
from radiostations import RadioStations
from audio import AlsaInterface
from screensaver import Rpi_ScreenSaver
reload(sys)
sys.setdefaultencoding('utf-8')
import select
import markup
from kivy.logger import Logger
from signal import SIGTSTP, SIGTERM, SIGABRT
import string,cgi,time
from os import curdir, sep
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
RootApp = "init"
ConfigObject = None
class Mp3PiAppLayout(Screen):
global RootApp, last_activity_time, ConfigObject
isPlaying = False
proc = None
stop = threading.Event()
mythread = None
statusthread_stop = threading.Event()
statusthread = None
def args_converter(self, row_index, an_obj):
if row_index % 2:
background = [1, 1, 1, 0]
else:
background = [1, 1, 1, .5]
return {'text': an_obj['name'],
'size_hint_y': None,
'deselected_color': background}
def __init__(self, **kwargs):
global RootApp
super(Mp3PiAppLayout, self).__init__(**kwargs)
RootApp = self
self.ids['search_results_list'].adapter.bind(on_selection_change=self.change_selection)
self.ids.volume_slider.value = Alsa.get_mixer("", {})
# XXX validate!!
#self.ids.volume_slider.value = 0# int(subprocess.check_output(["pulseaudio-ctl", "full-status"]).split(" ")[0])
self.statusthread = threading.Thread(target=self.status_thread)
self.statusthread.daemon = True
self.statusthread.start()
def change_volume(self, args):
#os.system("amixer set Master %s%%" % int(args))
#os.system("pactl set-sink-volume bluez_sink.0C_A6_94_E3_76_DA %s%%" % int(args))
Alsa.set_mixer("", int(args), {})
#os.system("pulseaudio-ctl set %s%%" % int(args))
def change_selection(self, args):
if args.selection:
self.change_image(args.selection[0].text)
self.stop_second_thread()
self.start_second_thread(Stations.getStreamURLbyName(args.selection[0].text))
else:
self.stop_second_thread()
def stop_second_thread(self):
if self.isPlaying == True: # stop playing
if self.proc is not None:
if self.mythread.isAlive():
print("set stop")
self.stop.set()
#self.proc.kill() ??
Logger.info("mpg123: killing %s" % self.proc.pid)
os.kill(self.proc.pid, SIGTERM)
self.proc = None
self.isPlaying = False
def start_second_thread(self, l_text):
if self.isPlaying == False:
Logger.info("Player: starting player " + l_text)
self.isPlaying = True
self.mythread = threading.Thread(target=self.infinite_loop, args=(l_text,))
self.mythread.daemon = True
self.mythread.start()
else:
Logger.info("Player: already playing")
def infinite_loop(self, url):
iteration = 0
self.proc = subprocess.Popen(["mpg123","-o", "alsa", "-@", url], stderr=subprocess.PIPE, bufsize = 0)
line = []
while True:
if self.stop.is_set():
Logger.info("Player: stopping thread")
self.stop.clear()
return
while (select.select([self.proc.stderr], [], [], 0)[0]):
# check if mpg123 is died
#print(self.proc.returncode)
#print(self.proc.pid)
if self.proc.returncode is not None:
print("died")
return
if self.stop.is_set():
Logger.info("Player: stopping thread")
self.stop.clear()
return
char = self.proc.stderr.read(1)
if char != '\n':
line.append(char)
else:
line_joined = "".join(line)
Logger.info("MPG123: says %s " % line_joined)
if "ICY-META: StreamTitle=" in line_joined:
pairs = {}
elements = line_joined.split(";")
for element in elements:
if element:
res = re.search(r"([A-Za-z]*)='(.*)'", element)
pairs[res.group(1)] = res.group(2)
self.ids.icytags.text = pairs['StreamTitle']
if "ICY-NAME: " in line_joined:
Logger.debug("ICYTAGS: ICY name found: %s " % line_joined.replace("ICY-NAME: ", ""))
if "ICY-URL: " in line_joined:
Logger.debug("ICYTAGS: ICY url found: %s " % line_joined.replace("ICY-URL: ", ""))
if "ICY-META: StreamTitle=" in line_joined:
Logger.debug("ICYTAGS: ICY StreamTitle found: %s " % line_joined.replace("ICY-META: StreamTitle=", ""))
line = []
iteration += 1
#print('Infinite loop, iteration {}.'.format(iteration))
time.sleep(.1)
def status_thread(self):
global ConfigObject
connection = NMCLI.current_connection()
while True:
if self.statusthread_stop.is_set():
self.statusthread_stop.clear()
return
if not int(time.time()) % 5:
connection = NMCLI.current_connection()
ip = NMCLI.get_ip()
if ip is None:
self.ids.wlanstatus.text = "No network connection"
else:
self.ids.wlanstatus.text = "%s %s%%\n%s\n%s" % (connection.get('SSID', None), connection.get('SIGNAL', None), ip, time.strftime("%H:%M", time.localtime()))
#self.ids.wlanstatus.text = "%s %s%%\n%s" % ("myNetwork", Network.strength, "192.168.47.11")
# wlan symbol
lines = []
for i in self.ids.wlanstatus.canvas.get_group(None)[1:]:
if type(i) is Color:
lines.append(i)
i.a = 1
if connection is not None:
if connection['SIGNAL'] < 50:
for i in lines[0:3]:
i.a = .5
if connection['SIGNAL'] < 60:
for i in lines[0:2]:
i.a = .5
if connection['SIGNAL'] < 70:
for i in lines[0:1]:
i.a = .5
if Stations.no_data == True:
print("no data")
if ConfigObject.get('General', 'playlist') == "radio.de":
Stations.update()
if Stations.no_data == False:
del self.search_results.adapter.data[:]
self.search_results.adapter.data.extend((Stations.data))
if ConfigObject.get('General', 'playlist') == "custom":
Stations.load_playlist("custom")
if Stations.no_data == False:
del self.search_results.adapter.data[:]
self.search_results.adapter.data.extend((Stations.data))
# screensaver
timeout = ConfigObject.get('General', 'screensaver')
if timeout < 60:
timeout = 60
if (time.time() - last_activity_time) > int(timeout):
if ScreenSaver.display_state is True:
Logger.info("ScreenSaver: enabling screensaver")
ScreenSaver.display_off()
else:
if ScreenSaver.display_state is False:
Logger.info("ScreenSaver: disabling screensaver")
ScreenSaver.display_on()
time.sleep(.5)
def change_image(self, station_name):
imageUrl = Stations.getImageUrl(Stations.getIdByName(station_name))
Logger.info("ImageLoader: Loading Image from %s" % (imageUrl))
self.ids.imageid.source = imageUrl
def pause(self):
self.stop.set()
self.search_results.adapter.deselect_list(self.search_results.adapter.selection)
def next(self):
self.stop.set()
#browse(self.search_results.adapter)
if self.search_results.adapter.selection:
index = self.search_results.adapter.selection[0].index
if index < len(self.search_results.adapter.data):
self.search_results.adapter.get_view(index+1).trigger_action(duration=0)
def prev(self):
self.stop.set()
if self.search_results.adapter.selection:
index = self.search_results.adapter.selection[0].index
if index >= 1:
self.search_results.adapter.get_view(index-1).trigger_action(duration=0)
def poweroff(self):
print("poweroff")
os.system("poweroff")
def reboot(self):
print("reboot")
os.system("reboot")
class Mp3PiApp(App):
global last_activity_time, ConfigObject
# initialize GPIO stuff
GPIO.setmode(GPIO.BOARD)
GPIO_PIR = 7
GPIO.setup(GPIO_PIR,GPIO.IN)
def my_callback(channel):
Logger.debug("Presence detector triggered!")
global last_activity_time
last_activity_time = time.time()
GPIO.add_event_detect(GPIO_PIR, GPIO.RISING, callback=my_callback, bouncetime=300)
def build_config(self, config):
config.setdefaults('General', {'screensaver': "60"})
config.setdefaults('General', {'name': "name"})
config.setdefaults('General', {'playlist': "radio.de"})
def build_settings(self, settings):
settings.add_json_panel("General", self.config, data="""
[
{"type": "numeric",
"title": "Screensaver Timeout",
"section": "General",
"key": "screensaver"
},
{"type": "string",
"title": "String",
"section": "General",
"key": "name"
},
{"type": "options",
"title": "Playlist",
"section": "General",
"options": ["radio.de", "custom"],
"key": "playlist"
}
]"""
)
def on_stop(self):
# The Kivy event loop is about to stop, set a stop signal;
# otherwise the app window will close, but the Python process will
# keep running until all secondary threads exit.
#layout.clear_widgets()
#browse(self)
True
#main = self.root.manager.get_screen('main').layout
#main.stop.set()
#self.root.stop.set()
#self.root.statusthread_stop.set()
def build(self):
global last_activity_time, ConfigObject
#sm = ScreenManager(transition=FadeTransition())
self.settings_cls = MySettingsWithTabbedPanel
from kivy.core.window import Window
# Window.size = (800, 480)
def on_motion(self, etype, motionevent):
global last_activity_time
last_activity_time = time.time()
Window.bind(on_motion=on_motion)
ConfigObject = self.config
sm = ScreenManager()
sm.add_widget(Mp3PiAppLayout())
sm.add_widget(SettingsScreen())
return sm
#return Mp3PiAppLayout()
class SettingsScreen(Screen):
def __init__(self, **kwargs):
super(SettingsScreen, self).__init__(**kwargs)
networklist = []
# for net in Network.visible_aps:
# networklist.append(net['ssid'])
# if net['ssid'] is Network.ssid:
# self.ids['wlan_list'].text = net[Network.ssid]
# self.ids['wlan_list'].values = networklist
# self.ids['wlan_list'].bind(text=self.change_wlan_selection)
def change_wlan_selection(self, spinner, args):
Logger.info("WLAN: user selection %s" % args)
# Logger.info("WLAN: current WLAN %s" % Network.ssid)
# if args != Network.ssid:
# Logger.info("WLAN: changing WLAN to %s" % args)
# Network.activate([args])
def signal_handler(signal, frame):
print("exit");
sys.exit(0);
class HTTPHandler(BaseHTTPRequestHandler):
|
class MySettingsWithTabbedPanel(SettingsWithTabbedPanel):
def on_close(self):
Logger.info("main.py: MySettingsWithTabbedPanel.on_close")
def on_config_change(self, config, section, key, value):
if key == "playlist":
Stations.no_data = True
Logger.info(
"main.py: MySettingsWithTabbedPanel.on_config_change: "
"{0}, {1}, {2}, {3}".format(config, section, key, value))
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal_handler)
#Network = NetworkManagerWrapper()
NMCLI = nmcli()
Alsa = AlsaInterface()
Stations = RadioStations()
ScreenSaver = Rpi_ScreenSaver()
ScreenSaver.display_on()
httpd = HTTPServer(('', 8080), HTTPHandler)
httpd_thread = threading.Thread(target=httpd.serve_forever)
httpd_thread.daemon = True
httpd_thread.start()
last_activity_time = time.time()
Mp3PiApp().run()
| global RootApp
#print(Mp3PiAppClass)
def do_GET(self):
if self.path == "/":
self.page = markup.page()
self.page.init(title="Title")
self.page.table(border="true")
firstline = True
for row in RootApp.search_results.adapter.data:
if firstline is True:
self.page.tr()
for column in row:
#pdb.set_trace()
string1 = column
if type(column) == 'float':
string1 = str(column)
if type(column) == 'str':
string1 = unicode(column, "utf8")
self.page.th(string1, align="left")
self.page.tr.close()
firstline = False
continue
self.page.tr()
for column in row:
#pdb.set_trace()
string1 = row[column]
if type(row[column]) == 'float':
string1 = str(row[column])
if type(row[column]) == 'str':
string1 = unicode(row[column], "utf8")
self.page.td(string1)
self.page.tr.close()
self.page.p(time.time())
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(RootApp.isPlaying)
self.wfile.write(self.page)
#self.wfile.write(json.dumps(RootApp.search_results.adapter.data, indent=4, separators=('.', ': ')))
else:
print(self.path) | identifier_body |
mp3.py |
from kivy.app import App
from kivy.uix.scatter import Scatter
from kivy.uix.label import Label
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.textinput import TextInput
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.scrollview import ScrollView
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty
from kivy.graphics import Color
from kivy.uix.screenmanager import ScreenManager, Screen, SwapTransition, FadeTransition
from kivy.uix.settings import SettingsWithTabbedPanel
from kivy.config import Config
import pdb
import threading
import time
import os
import subprocess
import sys
import json
import pprint
import signal
import re
#GPIO Stuff
import RPi.GPIO as GPIO
#from networking import NetworkManagerWrapper
from nmcli import nmcli
from radiostations import RadioStations
from audio import AlsaInterface
from screensaver import Rpi_ScreenSaver
reload(sys)
sys.setdefaultencoding('utf-8')
import select
import markup
from kivy.logger import Logger
from signal import SIGTSTP, SIGTERM, SIGABRT
import string,cgi,time
from os import curdir, sep
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
RootApp = "init"
ConfigObject = None
class Mp3PiAppLayout(Screen):
global RootApp, last_activity_time, ConfigObject
isPlaying = False
proc = None
stop = threading.Event()
mythread = None
statusthread_stop = threading.Event()
statusthread = None
def args_converter(self, row_index, an_obj):
if row_index % 2:
background = [1, 1, 1, 0]
else:
background = [1, 1, 1, .5]
return {'text': an_obj['name'],
'size_hint_y': None,
'deselected_color': background}
def __init__(self, **kwargs):
global RootApp
super(Mp3PiAppLayout, self).__init__(**kwargs)
RootApp = self
self.ids['search_results_list'].adapter.bind(on_selection_change=self.change_selection)
self.ids.volume_slider.value = Alsa.get_mixer("", {})
# XXX validate!!
#self.ids.volume_slider.value = 0# int(subprocess.check_output(["pulseaudio-ctl", "full-status"]).split(" ")[0])
self.statusthread = threading.Thread(target=self.status_thread)
self.statusthread.daemon = True
self.statusthread.start()
def change_volume(self, args):
#os.system("amixer set Master %s%%" % int(args))
#os.system("pactl set-sink-volume bluez_sink.0C_A6_94_E3_76_DA %s%%" % int(args))
Alsa.set_mixer("", int(args), {})
#os.system("pulseaudio-ctl set %s%%" % int(args))
def change_selection(self, args):
if args.selection:
self.change_image(args.selection[0].text)
self.stop_second_thread()
self.start_second_thread(Stations.getStreamURLbyName(args.selection[0].text))
else:
self.stop_second_thread()
def stop_second_thread(self):
if self.isPlaying == True: # stop playing
if self.proc is not None:
if self.mythread.isAlive():
print("set stop")
self.stop.set()
#self.proc.kill() ??
Logger.info("mpg123: killing %s" % self.proc.pid)
os.kill(self.proc.pid, SIGTERM)
self.proc = None
self.isPlaying = False
def start_second_thread(self, l_text):
if self.isPlaying == False:
Logger.info("Player: starting player " + l_text)
self.isPlaying = True
self.mythread = threading.Thread(target=self.infinite_loop, args=(l_text,))
self.mythread.daemon = True
self.mythread.start()
else:
Logger.info("Player: already playing")
def infinite_loop(self, url):
iteration = 0
self.proc = subprocess.Popen(["mpg123","-o", "alsa", "-@", url], stderr=subprocess.PIPE, bufsize = 0)
line = []
while True:
if self.stop.is_set():
Logger.info("Player: stopping thread")
self.stop.clear()
return
while (select.select([self.proc.stderr], [], [], 0)[0]):
# check if mpg123 is died
#print(self.proc.returncode)
#print(self.proc.pid)
if self.proc.returncode is not None:
print("died")
return
if self.stop.is_set():
Logger.info("Player: stopping thread")
self.stop.clear()
return
char = self.proc.stderr.read(1)
if char != '\n':
line.append(char)
else:
line_joined = "".join(line)
Logger.info("MPG123: says %s " % line_joined)
if "ICY-META: StreamTitle=" in line_joined:
pairs = {}
elements = line_joined.split(";")
for element in elements:
if element:
res = re.search(r"([A-Za-z]*)='(.*)'", element)
pairs[res.group(1)] = res.group(2)
self.ids.icytags.text = pairs['StreamTitle']
if "ICY-NAME: " in line_joined:
Logger.debug("ICYTAGS: ICY name found: %s " % line_joined.replace("ICY-NAME: ", ""))
if "ICY-URL: " in line_joined:
Logger.debug("ICYTAGS: ICY url found: %s " % line_joined.replace("ICY-URL: ", ""))
if "ICY-META: StreamTitle=" in line_joined:
Logger.debug("ICYTAGS: ICY StreamTitle found: %s " % line_joined.replace("ICY-META: StreamTitle=", ""))
line = []
iteration += 1
#print('Infinite loop, iteration {}.'.format(iteration))
time.sleep(.1)
def | (self):
global ConfigObject
connection = NMCLI.current_connection()
while True:
if self.statusthread_stop.is_set():
self.statusthread_stop.clear()
return
if not int(time.time()) % 5:
connection = NMCLI.current_connection()
ip = NMCLI.get_ip()
if ip is None:
self.ids.wlanstatus.text = "No network connection"
else:
self.ids.wlanstatus.text = "%s %s%%\n%s\n%s" % (connection.get('SSID', None), connection.get('SIGNAL', None), ip, time.strftime("%H:%M", time.localtime()))
#self.ids.wlanstatus.text = "%s %s%%\n%s" % ("myNetwork", Network.strength, "192.168.47.11")
# wlan symbol
lines = []
for i in self.ids.wlanstatus.canvas.get_group(None)[1:]:
if type(i) is Color:
lines.append(i)
i.a = 1
if connection is not None:
if connection['SIGNAL'] < 50:
for i in lines[0:3]:
i.a = .5
if connection['SIGNAL'] < 60:
for i in lines[0:2]:
i.a = .5
if connection['SIGNAL'] < 70:
for i in lines[0:1]:
i.a = .5
if Stations.no_data == True:
print("no data")
if ConfigObject.get('General', 'playlist') == "radio.de":
Stations.update()
if Stations.no_data == False:
del self.search_results.adapter.data[:]
self.search_results.adapter.data.extend((Stations.data))
if ConfigObject.get('General', 'playlist') == "custom":
Stations.load_playlist("custom")
if Stations.no_data == False:
del self.search_results.adapter.data[:]
self.search_results.adapter.data.extend((Stations.data))
# screensaver
timeout = ConfigObject.get('General', 'screensaver')
if timeout < 60:
timeout = 60
if (time.time() - last_activity_time) > int(timeout):
if ScreenSaver.display_state is True:
Logger.info("ScreenSaver: enabling screensaver")
ScreenSaver.display_off()
else:
if ScreenSaver.display_state is False:
Logger.info("ScreenSaver: disabling screensaver")
ScreenSaver.display_on()
time.sleep(.5)
def change_image(self, station_name):
imageUrl = Stations.getImageUrl(Stations.getIdByName(station_name))
Logger.info("ImageLoader: Loading Image from %s" % (imageUrl))
self.ids.imageid.source = imageUrl
def pause(self):
self.stop.set()
self.search_results.adapter.deselect_list(self.search_results.adapter.selection)
def next(self):
self.stop.set()
#browse(self.search_results.adapter)
if self.search_results.adapter.selection:
index = self.search_results.adapter.selection[0].index
if index < len(self.search_results.adapter.data):
self.search_results.adapter.get_view(index+1).trigger_action(duration=0)
def prev(self):
self.stop.set()
if self.search_results.adapter.selection:
index = self.search_results.adapter.selection[0].index
if index >= 1:
self.search_results.adapter.get_view(index-1).trigger_action(duration=0)
def poweroff(self):
print("poweroff")
os.system("poweroff")
def reboot(self):
print("reboot")
os.system("reboot")
class Mp3PiApp(App):
global last_activity_time, ConfigObject
# initialize GPIO stuff
GPIO.setmode(GPIO.BOARD)
GPIO_PIR = 7
GPIO.setup(GPIO_PIR,GPIO.IN)
def my_callback(channel):
Logger.debug("Presence detector triggered!")
global last_activity_time
last_activity_time = time.time()
GPIO.add_event_detect(GPIO_PIR, GPIO.RISING, callback=my_callback, bouncetime=300)
def build_config(self, config):
config.setdefaults('General', {'screensaver': "60"})
config.setdefaults('General', {'name': "name"})
config.setdefaults('General', {'playlist': "radio.de"})
def build_settings(self, settings):
settings.add_json_panel("General", self.config, data="""
[
{"type": "numeric",
"title": "Screensaver Timeout",
"section": "General",
"key": "screensaver"
},
{"type": "string",
"title": "String",
"section": "General",
"key": "name"
},
{"type": "options",
"title": "Playlist",
"section": "General",
"options": ["radio.de", "custom"],
"key": "playlist"
}
]"""
)
def on_stop(self):
# The Kivy event loop is about to stop, set a stop signal;
# otherwise the app window will close, but the Python process will
# keep running until all secondary threads exit.
#layout.clear_widgets()
#browse(self)
True
#main = self.root.manager.get_screen('main').layout
#main.stop.set()
#self.root.stop.set()
#self.root.statusthread_stop.set()
def build(self):
global last_activity_time, ConfigObject
#sm = ScreenManager(transition=FadeTransition())
self.settings_cls = MySettingsWithTabbedPanel
from kivy.core.window import Window
# Window.size = (800, 480)
def on_motion(self, etype, motionevent):
global last_activity_time
last_activity_time = time.time()
Window.bind(on_motion=on_motion)
ConfigObject = self.config
sm = ScreenManager()
sm.add_widget(Mp3PiAppLayout())
sm.add_widget(SettingsScreen())
return sm
#return Mp3PiAppLayout()
class SettingsScreen(Screen):
def __init__(self, **kwargs):
super(SettingsScreen, self).__init__(**kwargs)
networklist = []
# for net in Network.visible_aps:
# networklist.append(net['ssid'])
# if net['ssid'] is Network.ssid:
# self.ids['wlan_list'].text = net[Network.ssid]
# self.ids['wlan_list'].values = networklist
# self.ids['wlan_list'].bind(text=self.change_wlan_selection)
def change_wlan_selection(self, spinner, args):
Logger.info("WLAN: user selection %s" % args)
# Logger.info("WLAN: current WLAN %s" % Network.ssid)
# if args != Network.ssid:
# Logger.info("WLAN: changing WLAN to %s" % args)
# Network.activate([args])
def signal_handler(signal, frame):
print("exit");
sys.exit(0);
class HTTPHandler(BaseHTTPRequestHandler):
global RootApp
#print(Mp3PiAppClass)
def do_GET(self):
if self.path == "/":
self.page = markup.page()
self.page.init(title="Title")
self.page.table(border="true")
firstline = True
for row in RootApp.search_results.adapter.data:
if firstline is True:
self.page.tr()
for column in row:
#pdb.set_trace()
string1 = column
if type(column) == 'float':
string1 = str(column)
if type(column) == 'str':
string1 = unicode(column, "utf8")
self.page.th(string1, align="left")
self.page.tr.close()
firstline = False
continue
self.page.tr()
for column in row:
#pdb.set_trace()
string1 = row[column]
if type(row[column]) == 'float':
string1 = str(row[column])
if type(row[column]) == 'str':
string1 = unicode(row[column], "utf8")
self.page.td(string1)
self.page.tr.close()
self.page.p(time.time())
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(RootApp.isPlaying)
self.wfile.write(self.page)
#self.wfile.write(json.dumps(RootApp.search_results.adapter.data, indent=4, separators=('.', ': ')))
else:
print(self.path)
class MySettingsWithTabbedPanel(SettingsWithTabbedPanel):
def on_close(self):
Logger.info("main.py: MySettingsWithTabbedPanel.on_close")
def on_config_change(self, config, section, key, value):
if key == "playlist":
Stations.no_data = True
Logger.info(
"main.py: MySettingsWithTabbedPanel.on_config_change: "
"{0}, {1}, {2}, {3}".format(config, section, key, value))
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal_handler)
#Network = NetworkManagerWrapper()
NMCLI = nmcli()
Alsa = AlsaInterface()
Stations = RadioStations()
ScreenSaver = Rpi_ScreenSaver()
ScreenSaver.display_on()
httpd = HTTPServer(('', 8080), HTTPHandler)
httpd_thread = threading.Thread(target=httpd.serve_forever)
httpd_thread.daemon = True
httpd_thread.start()
last_activity_time = time.time()
Mp3PiApp().run()
| status_thread | identifier_name |
base.py | import logging
import os
import re
from collections import OrderedDict
import numexpr as ne
import numpy as np
import pandas as pd
import yaml
from tardis import constants
from astropy import units as u
from pyne import nucname
import tardis
from tardis.io.util import get_internal_data_path
k_B_cgs = constants.k_B.cgs.value
c_cgs = constants.c.cgs.value
h_cgs = constants.h.cgs.value
m_e_cgs = constants.m_e.cgs.value
e_charge_gauss = constants.e.gauss.value
logger = logging.getLogger(__name__)
tardis_dir = os.path.realpath(tardis.__path__[0])
ATOMIC_SYMBOLS_DATA = pd.read_csv(get_internal_data_path('atomic_symbols.dat'), delim_whitespace=True,
names=['atomic_number', 'symbol']).set_index('atomic_number').squeeze()
ATOMIC_NUMBER2SYMBOL = OrderedDict(ATOMIC_SYMBOLS_DATA.to_dict())
SYMBOL2ATOMIC_NUMBER = OrderedDict((y, x) for x, y in ATOMIC_NUMBER2SYMBOL.items())
synpp_default_yaml_fname = get_internal_data_path('synpp_default.yaml')
NUMERAL_MAP = tuple(zip(
(1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1),
('M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I')
))
class MalformedError(Exception):
pass
class MalformedSpeciesError(MalformedError):
def __init__(self, malformed_element_symbol):
self.malformed_element_symbol = malformed_element_symbol
def __str__(self):
return ('Expecting a species notation (e.g. "Si 2", "Si II", "Fe IV") '
'- supplied {0}'.format(self.malformed_element_symbol))
class MalformedElementSymbolError(MalformedError):
def __init__(self, malformed_element_symbol):
self.malformed_element_symbol = malformed_element_symbol
def __str__(self):
return ('Expecting an atomic symbol (e.g. Fe) - supplied {0}').format(
self.malformed_element_symbol)
class MalformedQuantityError(MalformedError):
def __init__(self, malformed_quantity_string):
self.malformed_quantity_string = malformed_quantity_string
def __str__(self):
return ('Expecting a quantity string(e.g. "5 km/s") for keyword '
'- supplied {0}').format(self.malformed_quantity_string)
def int_to_roman(i):
"""
Convert an integer into its roman numeral representation.
Parameters
----------
i : int
Integer to be converted into roman numerals
Returns
-------
str
Returns roman numeral representation of i in str format.
"""
result = []
for integer, numeral in NUMERAL_MAP:
count = i // integer
result.append(numeral * count)
i -= integer * count
return ''.join(result)
def roman_to_int(roman_string):
"""
Convert a roman numeral into its corresponding integer.
Parameters
----------
roman_string : str
Roman numeral to be converted into an integer
Returns
-------
int
Returns integer representation of roman_string
"""
NUMERALS_SET = set(list(zip(*NUMERAL_MAP))[1])
roman_string = roman_string.upper()
if len(set(list(roman_string.upper())) - NUMERALS_SET) != 0:
raise ValueError('{0} does not seem to be a roman numeral'.format(
roman_string))
i = result = 0
for integer, numeral in NUMERAL_MAP:
while roman_string[i:i + len(numeral)] == numeral:
result += integer
i += len(numeral)
if result < 1:
raise ValueError('Can not interpret Roman Numeral {0}'.format(roman_string))
return result
def calculate_luminosity(
spec_fname, distance, wavelength_column=0,
wavelength_unit=u.angstrom, flux_column=1,
flux_unit=u.Unit('erg / (Angstrom cm2 s)')):
"""
Calculates luminosity of star.
Parameters
----------
spec_fname : file or str
File or file name to be read
distance : float
Distance to star
wavelength_column : int, optional(default = 0)
Column index in which the wavelength is stored
wavelength_unit : float, optional(default = u.angstrom)
Dictates units used for calculating wavelength.
flux_column : int, optional(default = 1)
Column index in which the flux is stored
flux_unit : str, optional(default = u.Unit('erg / (Angstrom cm2 s)')
Dictates units used for flux
Returns
-------
luminosity.value : float
Returned luminosity value of star.
wavelength.min() : float
Minimum value of wavelength of light
wavelength.max() : float
Maximum value of wavelength of light
"""
#BAD STYLE change to parse quantity
distance = u.Unit(distance)
wavelength, flux = np.loadtxt(spec_fname, usecols=(wavelength_column, flux_column), unpack=True)
flux_density = np.trapz(flux, wavelength) * (flux_unit * wavelength_unit)
luminosity = (flux_density * 4 * np.pi * distance**2).to('erg/s')
return luminosity.value, wavelength.min(), wavelength.max()
def create_synpp_yaml(radial1d_mdl, fname, shell_no=0, lines_db=None):
"""
Create a yaml file that is readable from syn++
Parameters
----------
radial1d_mdl : Radial1DModel
Inputted object that will be read into YAML file
fname : str
File name for the synpp yaml
shell_no : int, optional(default = 0)
Number of shells
lines_db : file, optional(default = None)
Raises
------
ValueError
If the current dataset does not contain necessary reference files
"""
logger.warning('Currently only works with Si and a special setup')
if radial1d_mdl.atom_data.synpp_refs is not None:
raise ValueError(
'The current atom dataset does not contain the '
'necessary reference files (please contact the authors)')
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] = -99.0
for key, value in radial1d_mdl.atom_data.synpp_refs.iterrows():
try:
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'].loc[key] = np.log10(
radial1d_mdl.plasma.tau_sobolevs[0].loc[value['line_id']])
except KeyError:
pass
relevant_synpp_refs = radial1d_mdl.atom_data.synpp_refs[
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] > -50]
with open(synpp_default_yaml_fname) as stream:
yaml_reference = yaml.load(stream, Loader=yaml.CLoader)
if lines_db is not None:
yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'lines')
yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'refs.dat')
yaml_reference['output']['min_wl'] = float(
radial1d_mdl.runner.spectrum.wavelength.to('angstrom').value.min())
yaml_reference['output']['max_wl'] = float(
radial1d_mdl.runner.spectrum.wavelength.to('angstrom').value.max())
#raise Exception("there's a problem here with units what units does synpp expect?")
yaml_reference['opacity']['v_ref'] = float(
(radial1d_mdl.tardis_config.structure.v_inner[0].to('km/s') /
(1000. * u.km / u.s)).value)
yaml_reference['grid']['v_outer_max'] = float(
(radial1d_mdl.tardis_config.structure.v_outer[-1].to('km/s') /
(1000. * u.km / u.s)).value)
#pdb.set_trace()
yaml_setup = yaml_reference['setups'][0]
yaml_setup['ions'] = []
yaml_setup['log_tau'] = []
yaml_setup['active'] = []
yaml_setup['temp'] = []
yaml_setup['v_min'] = []
yaml_setup['v_max'] = []
yaml_setup['aux'] = []
for species, synpp_ref in relevant_synpp_refs.iterrows():
yaml_setup['ions'].append(100 * species[0] + species[1])
yaml_setup['log_tau'].append(float(synpp_ref['ref_log_tau']))
yaml_setup['active'].append(True)
yaml_setup['temp'].append(yaml_setup['t_phot'])
yaml_setup['v_min'].append(yaml_reference['opacity']['v_ref'])
yaml_setup['v_max'].append(yaml_reference['grid']['v_outer_max'])
yaml_setup['aux'].append(1e200)
with open(fname, 'w') as f:
yaml.dump(yaml_reference, stream=f, explicit_start=True)
def | (nu, T):
"""
Calculate the intensity of a black-body according to the following formula
.. math::
I(\\nu, T) = \\frac{2h\\nu^3}{c^2}\frac{1}
{e^{h\\nu \\beta_\\textrm{rad}} - 1}
Parameters
----------
nu : float
Frequency of light
T : float
Temperature in kelvin
Returns
-------
Intensity : float
Returns the intensity of the black body
"""
beta_rad = 1 / (k_B_cgs * T)
coefficient = 2 * h_cgs / c_cgs ** 2
intensity = ne.evaluate('coefficient * nu**3 / '
'(exp(h_cgs * nu * beta_rad) -1 )')
return intensity
def species_tuple_to_string(species_tuple, roman_numerals=True):
"""
Convert a species tuple to its corresponding string representation.
Parameters
----------
species_tuple : tuple
Tuple of 2 values indicated atomic number and number of
electrons missing
roman_numerals : bool, optional(default = TRUE)
Indicates whether the returned ion number is in roman numerals
Returns
-------
element_symbol, roman_ion_number : str
Returns corresponding string representation of given tuple
"""
atomic_number, ion_number = species_tuple
element_symbol = ATOMIC_NUMBER2SYMBOL[atomic_number]
if roman_numerals:
roman_ion_number = int_to_roman(ion_number+1)
return '{0} {1}'.format(str(element_symbol), roman_ion_number)
else:
return '{0} {1:d}'.format(element_symbol, ion_number)
def species_string_to_tuple(species_string):
"""
Convert a species string to its corresponding tuple representation
Parameters
----------
species_string : str
String containing species symbol (e.g. Si II, Fe III)
Returns
-------
atomic_number, ion_number : tuple
Returns tuple of length 2 indicating atomic number and ion number
Raises
------
MalformedSpeciesError
If the inputted string does not match the species format
"""
try:
element_symbol, ion_number_string = re.match(r'^(\w+)\s*(\d+)',
species_string).groups()
except AttributeError:
try:
element_symbol, ion_number_string = species_string.split()
except ValueError:
raise MalformedSpeciesError(
'Species string "{0}" is not of format <element_symbol><number>'
' (e.g. Fe 2, Fe2, ..)'.format(species_string))
atomic_number = element_symbol2atomic_number(element_symbol)
try:
ion_number = roman_to_int(ion_number_string)
except ValueError:
try:
ion_number = int(ion_number_string)
except ValueError:
raise MalformedSpeciesError(
"Given ion number ('{}') could not be parsed".format(
ion_number_string))
if ion_number > atomic_number:
raise ValueError(
'Species given does not exist: ion number > atomic number')
return atomic_number, ion_number - 1
def parse_quantity(quantity_string):
"""
Changes a string into it's corresponding astropy.Quantity object.
Parameters
----------
quantity_string : str
String to be converted into astropy.Quantity
Returns
-------
q : ~u.Quantity
Corresponding astropy.Quantity object for passed string
Raises
------
MalformedQuantityError
If string is not properly formatted for Astropy Quantity
"""
if not isinstance(quantity_string, str):
raise MalformedQuantityError(quantity_string)
try:
value_string, unit_string = quantity_string.split()
except ValueError:
raise MalformedQuantityError(quantity_string)
try:
value = float(value_string)
except ValueError:
raise MalformedQuantityError(quantity_string)
try:
q = u.Quantity(value, unit_string)
except ValueError:
raise MalformedQuantityError(quantity_string)
return q
def element_symbol2atomic_number(element_string):
"""
Takes an element symbol and returns its corresponding atomic number
Parameters
----------
element_string : str
Inputted element symbol
Returns
-------
int
Returned atomic number
"""
reformatted_element_string = reformat_element_symbol(element_string)
if reformatted_element_string not in SYMBOL2ATOMIC_NUMBER:
raise MalformedElementSymbolError(element_string)
return SYMBOL2ATOMIC_NUMBER[reformatted_element_string]
def atomic_number2element_symbol(atomic_number):
"""
Convert atomic number to string
Parameters
----------
atomic_number : int
Inputted atomic number
Returns
-------
str
Returned corresponding element symbol
"""
return ATOMIC_NUMBER2SYMBOL[atomic_number]
def reformat_element_symbol(element_string):
"""
Reformat the string so the first letter is uppercase and all subsequent
letters lowercase.
Parameters
----------
element_string : str
Inputted element symbol
Returns
-------
str
Returned reformatted element symbol
"""
return element_string[0].upper() + element_string[1:].lower()
def quantity_linspace(start, stop, num, **kwargs):
"""
Essentially the same input parameters as linspace, but
calculated for an astropy quantity start and stop.
Parameters
----------
start : ~astropy.Quantity
Starting value of the sequence
stop : ~astropy.Quantity
End value of the sequence
num : int
Number of samples to generate
Returns
-------
~astropy.Quantity
Returns num evenly spaced characters of type astropy.Quantity
Raises
------
ValueError
If start and stop values have no unit attribute.
"""
if not (hasattr(start, 'unit') and hasattr(stop, 'unit')):
raise ValueError('Both start and stop need to be quantities with a '
'unit attribute')
return (np.linspace(start.value, stop.to(start.unit).value, num, **kwargs)
* start.unit)
def convert_abundances_format(fname, delimiter=r'\s+'):
"""
Changes format of file containing abundances into data frame
Parameters
----------
fname : file, str
File or file name that contains abundance info
delimiter : str, optional(default = '\\s+')
Determines the separator for splitting file
Returns
-------
DataFrame
Corresponding data frame
"""
df = pd.read_csv(fname, delimiter=delimiter, comment='#', header=None)
# Drop shell index column
df.drop(df.columns[0], axis=1, inplace=True)
# Assign header row
df.columns = [nucname.name(i)
for i in range(1, df.shape[1] + 1)]
return df | intensity_black_body | identifier_name |
base.py | import logging
import os
import re
from collections import OrderedDict
import numexpr as ne
import numpy as np
import pandas as pd
import yaml
from tardis import constants
from astropy import units as u
from pyne import nucname
import tardis
from tardis.io.util import get_internal_data_path
k_B_cgs = constants.k_B.cgs.value
c_cgs = constants.c.cgs.value
h_cgs = constants.h.cgs.value
m_e_cgs = constants.m_e.cgs.value
e_charge_gauss = constants.e.gauss.value
logger = logging.getLogger(__name__)
tardis_dir = os.path.realpath(tardis.__path__[0])
ATOMIC_SYMBOLS_DATA = pd.read_csv(get_internal_data_path('atomic_symbols.dat'), delim_whitespace=True,
names=['atomic_number', 'symbol']).set_index('atomic_number').squeeze()
ATOMIC_NUMBER2SYMBOL = OrderedDict(ATOMIC_SYMBOLS_DATA.to_dict())
SYMBOL2ATOMIC_NUMBER = OrderedDict((y, x) for x, y in ATOMIC_NUMBER2SYMBOL.items())
synpp_default_yaml_fname = get_internal_data_path('synpp_default.yaml')
NUMERAL_MAP = tuple(zip(
(1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1),
('M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I')
))
class MalformedError(Exception):
pass
class MalformedSpeciesError(MalformedError):
def __init__(self, malformed_element_symbol):
self.malformed_element_symbol = malformed_element_symbol
def __str__(self):
return ('Expecting a species notation (e.g. "Si 2", "Si II", "Fe IV") '
'- supplied {0}'.format(self.malformed_element_symbol))
class MalformedElementSymbolError(MalformedError):
def __init__(self, malformed_element_symbol):
self.malformed_element_symbol = malformed_element_symbol
def __str__(self):
return ('Expecting an atomic symbol (e.g. Fe) - supplied {0}').format(
self.malformed_element_symbol)
class MalformedQuantityError(MalformedError):
def __init__(self, malformed_quantity_string):
self.malformed_quantity_string = malformed_quantity_string
def __str__(self):
return ('Expecting a quantity string(e.g. "5 km/s") for keyword '
'- supplied {0}').format(self.malformed_quantity_string)
def int_to_roman(i):
"""
Convert an integer into its roman numeral representation.
Parameters
----------
i : int
Integer to be converted into roman numerals
Returns
-------
str
Returns roman numeral representation of i in str format.
"""
result = []
for integer, numeral in NUMERAL_MAP:
count = i // integer
result.append(numeral * count)
i -= integer * count
return ''.join(result)
def roman_to_int(roman_string):
"""
Convert a roman numeral into its corresponding integer.
Parameters
----------
roman_string : str
Roman numeral to be converted into an integer
Returns
-------
int
Returns integer representation of roman_string
"""
NUMERALS_SET = set(list(zip(*NUMERAL_MAP))[1])
roman_string = roman_string.upper()
if len(set(list(roman_string.upper())) - NUMERALS_SET) != 0:
raise ValueError('{0} does not seem to be a roman numeral'.format(
roman_string))
i = result = 0
for integer, numeral in NUMERAL_MAP:
while roman_string[i:i + len(numeral)] == numeral:
result += integer
i += len(numeral)
if result < 1:
raise ValueError('Can not interpret Roman Numeral {0}'.format(roman_string))
return result
def calculate_luminosity(
spec_fname, distance, wavelength_column=0,
wavelength_unit=u.angstrom, flux_column=1,
flux_unit=u.Unit('erg / (Angstrom cm2 s)')):
"""
Calculates luminosity of star.
Parameters
----------
spec_fname : file or str
File or file name to be read
distance : float
Distance to star
wavelength_column : int, optional(default = 0)
Column index in which the wavelength is stored
wavelength_unit : float, optional(default = u.angstrom)
Dictates units used for calculating wavelength.
flux_column : int, optional(default = 1)
Column index in which the flux is stored
flux_unit : str, optional(default = u.Unit('erg / (Angstrom cm2 s)')
Dictates units used for flux
Returns
-------
luminosity.value : float
Returned luminosity value of star.
wavelength.min() : float
Minimum value of wavelength of light
wavelength.max() : float
Maximum value of wavelength of light
"""
#BAD STYLE change to parse quantity
distance = u.Unit(distance)
wavelength, flux = np.loadtxt(spec_fname, usecols=(wavelength_column, flux_column), unpack=True)
flux_density = np.trapz(flux, wavelength) * (flux_unit * wavelength_unit)
luminosity = (flux_density * 4 * np.pi * distance**2).to('erg/s')
return luminosity.value, wavelength.min(), wavelength.max()
def create_synpp_yaml(radial1d_mdl, fname, shell_no=0, lines_db=None):
"""
Create a yaml file that is readable from syn++
Parameters
----------
radial1d_mdl : Radial1DModel
Inputted object that will be read into YAML file
fname : str
File name for the synpp yaml
shell_no : int, optional(default = 0)
Number of shells
lines_db : file, optional(default = None)
Raises
------
ValueError
If the current dataset does not contain necessary reference files
"""
logger.warning('Currently only works with Si and a special setup')
if radial1d_mdl.atom_data.synpp_refs is not None:
raise ValueError(
'The current atom dataset does not contain the '
'necessary reference files (please contact the authors)')
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] = -99.0
for key, value in radial1d_mdl.atom_data.synpp_refs.iterrows():
try:
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'].loc[key] = np.log10(
radial1d_mdl.plasma.tau_sobolevs[0].loc[value['line_id']])
except KeyError:
pass
relevant_synpp_refs = radial1d_mdl.atom_data.synpp_refs[
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] > -50]
with open(synpp_default_yaml_fname) as stream:
yaml_reference = yaml.load(stream, Loader=yaml.CLoader)
if lines_db is not None:
yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'lines')
yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'refs.dat')
yaml_reference['output']['min_wl'] = float(
radial1d_mdl.runner.spectrum.wavelength.to('angstrom').value.min())
yaml_reference['output']['max_wl'] = float(
radial1d_mdl.runner.spectrum.wavelength.to('angstrom').value.max())
#raise Exception("there's a problem here with units what units does synpp expect?")
yaml_reference['opacity']['v_ref'] = float(
(radial1d_mdl.tardis_config.structure.v_inner[0].to('km/s') /
(1000. * u.km / u.s)).value)
yaml_reference['grid']['v_outer_max'] = float(
(radial1d_mdl.tardis_config.structure.v_outer[-1].to('km/s') /
(1000. * u.km / u.s)).value)
#pdb.set_trace()
yaml_setup = yaml_reference['setups'][0]
yaml_setup['ions'] = []
yaml_setup['log_tau'] = []
yaml_setup['active'] = []
yaml_setup['temp'] = []
yaml_setup['v_min'] = []
yaml_setup['v_max'] = []
yaml_setup['aux'] = []
for species, synpp_ref in relevant_synpp_refs.iterrows():
yaml_setup['ions'].append(100 * species[0] + species[1])
yaml_setup['log_tau'].append(float(synpp_ref['ref_log_tau']))
yaml_setup['active'].append(True)
yaml_setup['temp'].append(yaml_setup['t_phot'])
yaml_setup['v_min'].append(yaml_reference['opacity']['v_ref'])
yaml_setup['v_max'].append(yaml_reference['grid']['v_outer_max'])
yaml_setup['aux'].append(1e200)
with open(fname, 'w') as f:
yaml.dump(yaml_reference, stream=f, explicit_start=True)
def intensity_black_body(nu, T):
"""
Calculate the intensity of a black-body according to the following formula
.. math::
I(\\nu, T) = \\frac{2h\\nu^3}{c^2}\frac{1}
{e^{h\\nu \\beta_\\textrm{rad}} - 1}
Parameters
----------
nu : float
Frequency of light
T : float
Temperature in kelvin
Returns
-------
Intensity : float
Returns the intensity of the black body
"""
beta_rad = 1 / (k_B_cgs * T)
coefficient = 2 * h_cgs / c_cgs ** 2
intensity = ne.evaluate('coefficient * nu**3 / '
'(exp(h_cgs * nu * beta_rad) -1 )')
return intensity
def species_tuple_to_string(species_tuple, roman_numerals=True):
"""
Convert a species tuple to its corresponding string representation.
Parameters
----------
species_tuple : tuple
Tuple of 2 values indicated atomic number and number of
electrons missing
roman_numerals : bool, optional(default = TRUE)
Indicates whether the returned ion number is in roman numerals
Returns
-------
element_symbol, roman_ion_number : str
Returns corresponding string representation of given tuple
"""
atomic_number, ion_number = species_tuple
element_symbol = ATOMIC_NUMBER2SYMBOL[atomic_number]
if roman_numerals:
roman_ion_number = int_to_roman(ion_number+1)
return '{0} {1}'.format(str(element_symbol), roman_ion_number)
else:
return '{0} {1:d}'.format(element_symbol, ion_number)
def species_string_to_tuple(species_string):
"""
Convert a species string to its corresponding tuple representation
Parameters
----------
species_string : str
String containing species symbol (e.g. Si II, Fe III)
Returns
-------
atomic_number, ion_number : tuple
Returns tuple of length 2 indicating atomic number and ion number
Raises
------
MalformedSpeciesError
If the inputted string does not match the species format
"""
try:
element_symbol, ion_number_string = re.match(r'^(\w+)\s*(\d+)',
species_string).groups()
except AttributeError:
try:
element_symbol, ion_number_string = species_string.split()
except ValueError:
raise MalformedSpeciesError(
'Species string "{0}" is not of format <element_symbol><number>'
' (e.g. Fe 2, Fe2, ..)'.format(species_string))
atomic_number = element_symbol2atomic_number(element_symbol)
try:
ion_number = roman_to_int(ion_number_string)
except ValueError:
try:
ion_number = int(ion_number_string)
except ValueError:
raise MalformedSpeciesError(
"Given ion number ('{}') could not be parsed".format(
ion_number_string))
if ion_number > atomic_number:
raise ValueError(
'Species given does not exist: ion number > atomic number')
return atomic_number, ion_number - 1
def parse_quantity(quantity_string):
"""
Changes a string into it's corresponding astropy.Quantity object.
Parameters
----------
quantity_string : str
String to be converted into astropy.Quantity
Returns
-------
q : ~u.Quantity
Corresponding astropy.Quantity object for passed string
Raises
------
MalformedQuantityError
If string is not properly formatted for Astropy Quantity
"""
if not isinstance(quantity_string, str):
raise MalformedQuantityError(quantity_string)
try:
value_string, unit_string = quantity_string.split()
except ValueError:
raise MalformedQuantityError(quantity_string)
try:
value = float(value_string)
except ValueError:
raise MalformedQuantityError(quantity_string)
try:
q = u.Quantity(value, unit_string)
except ValueError:
raise MalformedQuantityError(quantity_string)
return q
def element_symbol2atomic_number(element_string):
"""
Takes an element symbol and returns its corresponding atomic number
Parameters
----------
element_string : str
Inputted element symbol
Returns
-------
int
Returned atomic number
"""
reformatted_element_string = reformat_element_symbol(element_string)
if reformatted_element_string not in SYMBOL2ATOMIC_NUMBER:
raise MalformedElementSymbolError(element_string)
return SYMBOL2ATOMIC_NUMBER[reformatted_element_string]
def atomic_number2element_symbol(atomic_number):
"""
Convert atomic number to string
Parameters
----------
atomic_number : int
Inputted atomic number
Returns
-------
str
Returned corresponding element symbol
"""
return ATOMIC_NUMBER2SYMBOL[atomic_number]
def reformat_element_symbol(element_string):
"""
Reformat the string so the first letter is uppercase and all subsequent
letters lowercase.
Parameters
----------
element_string : str
Inputted element symbol
Returns
-------
str
Returned reformatted element symbol
"""
return element_string[0].upper() + element_string[1:].lower()
def quantity_linspace(start, stop, num, **kwargs):
"""
Essentially the same input parameters as linspace, but
calculated for an astropy quantity start and stop.
Parameters
----------
start : ~astropy.Quantity
Starting value of the sequence
stop : ~astropy.Quantity
End value of the sequence
num : int
Number of samples to generate
Returns
-------
~astropy.Quantity
Returns num evenly spaced characters of type astropy.Quantity
Raises
------
ValueError
If start and stop values have no unit attribute.
"""
if not (hasattr(start, 'unit') and hasattr(stop, 'unit')): |
return (np.linspace(start.value, stop.to(start.unit).value, num, **kwargs)
* start.unit)
def convert_abundances_format(fname, delimiter=r'\s+'):
"""
Changes format of file containing abundances into data frame
Parameters
----------
fname : file, str
File or file name that contains abundance info
delimiter : str, optional(default = '\\s+')
Determines the separator for splitting file
Returns
-------
DataFrame
Corresponding data frame
"""
df = pd.read_csv(fname, delimiter=delimiter, comment='#', header=None)
# Drop shell index column
df.drop(df.columns[0], axis=1, inplace=True)
# Assign header row
df.columns = [nucname.name(i)
for i in range(1, df.shape[1] + 1)]
return df | raise ValueError('Both start and stop need to be quantities with a '
'unit attribute') | random_line_split |
base.py | import logging
import os
import re
from collections import OrderedDict
import numexpr as ne
import numpy as np
import pandas as pd
import yaml
from tardis import constants
from astropy import units as u
from pyne import nucname
import tardis
from tardis.io.util import get_internal_data_path
k_B_cgs = constants.k_B.cgs.value
c_cgs = constants.c.cgs.value
h_cgs = constants.h.cgs.value
m_e_cgs = constants.m_e.cgs.value
e_charge_gauss = constants.e.gauss.value
logger = logging.getLogger(__name__)
tardis_dir = os.path.realpath(tardis.__path__[0])
ATOMIC_SYMBOLS_DATA = pd.read_csv(get_internal_data_path('atomic_symbols.dat'), delim_whitespace=True,
names=['atomic_number', 'symbol']).set_index('atomic_number').squeeze()
ATOMIC_NUMBER2SYMBOL = OrderedDict(ATOMIC_SYMBOLS_DATA.to_dict())
SYMBOL2ATOMIC_NUMBER = OrderedDict((y, x) for x, y in ATOMIC_NUMBER2SYMBOL.items())
synpp_default_yaml_fname = get_internal_data_path('synpp_default.yaml')
NUMERAL_MAP = tuple(zip(
(1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1),
('M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I')
))
class MalformedError(Exception):
pass
class MalformedSpeciesError(MalformedError):
def __init__(self, malformed_element_symbol):
self.malformed_element_symbol = malformed_element_symbol
def __str__(self):
return ('Expecting a species notation (e.g. "Si 2", "Si II", "Fe IV") '
'- supplied {0}'.format(self.malformed_element_symbol))
class MalformedElementSymbolError(MalformedError):
def __init__(self, malformed_element_symbol):
self.malformed_element_symbol = malformed_element_symbol
def __str__(self):
return ('Expecting an atomic symbol (e.g. Fe) - supplied {0}').format(
self.malformed_element_symbol)
class MalformedQuantityError(MalformedError):
def __init__(self, malformed_quantity_string):
self.malformed_quantity_string = malformed_quantity_string
def __str__(self):
return ('Expecting a quantity string(e.g. "5 km/s") for keyword '
'- supplied {0}').format(self.malformed_quantity_string)
def int_to_roman(i):
"""
Convert an integer into its roman numeral representation.
Parameters
----------
i : int
Integer to be converted into roman numerals
Returns
-------
str
Returns roman numeral representation of i in str format.
"""
result = []
for integer, numeral in NUMERAL_MAP:
count = i // integer
result.append(numeral * count)
i -= integer * count
return ''.join(result)
def roman_to_int(roman_string):
"""
Convert a roman numeral into its corresponding integer.
Parameters
----------
roman_string : str
Roman numeral to be converted into an integer
Returns
-------
int
Returns integer representation of roman_string
"""
NUMERALS_SET = set(list(zip(*NUMERAL_MAP))[1])
roman_string = roman_string.upper()
if len(set(list(roman_string.upper())) - NUMERALS_SET) != 0:
raise ValueError('{0} does not seem to be a roman numeral'.format(
roman_string))
i = result = 0
for integer, numeral in NUMERAL_MAP:
while roman_string[i:i + len(numeral)] == numeral:
result += integer
i += len(numeral)
if result < 1:
raise ValueError('Can not interpret Roman Numeral {0}'.format(roman_string))
return result
def calculate_luminosity(
spec_fname, distance, wavelength_column=0,
wavelength_unit=u.angstrom, flux_column=1,
flux_unit=u.Unit('erg / (Angstrom cm2 s)')):
"""
Calculates luminosity of star.
Parameters
----------
spec_fname : file or str
File or file name to be read
distance : float
Distance to star
wavelength_column : int, optional(default = 0)
Column index in which the wavelength is stored
wavelength_unit : float, optional(default = u.angstrom)
Dictates units used for calculating wavelength.
flux_column : int, optional(default = 1)
Column index in which the flux is stored
flux_unit : str, optional(default = u.Unit('erg / (Angstrom cm2 s)')
Dictates units used for flux
Returns
-------
luminosity.value : float
Returned luminosity value of star.
wavelength.min() : float
Minimum value of wavelength of light
wavelength.max() : float
Maximum value of wavelength of light
"""
#BAD STYLE change to parse quantity
distance = u.Unit(distance)
wavelength, flux = np.loadtxt(spec_fname, usecols=(wavelength_column, flux_column), unpack=True)
flux_density = np.trapz(flux, wavelength) * (flux_unit * wavelength_unit)
luminosity = (flux_density * 4 * np.pi * distance**2).to('erg/s')
return luminosity.value, wavelength.min(), wavelength.max()
def create_synpp_yaml(radial1d_mdl, fname, shell_no=0, lines_db=None):
"""
Create a yaml file that is readable from syn++
Parameters
----------
radial1d_mdl : Radial1DModel
Inputted object that will be read into YAML file
fname : str
File name for the synpp yaml
shell_no : int, optional(default = 0)
Number of shells
lines_db : file, optional(default = None)
Raises
------
ValueError
If the current dataset does not contain necessary reference files
"""
logger.warning('Currently only works with Si and a special setup')
if radial1d_mdl.atom_data.synpp_refs is not None:
raise ValueError(
'The current atom dataset does not contain the '
'necessary reference files (please contact the authors)')
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] = -99.0
for key, value in radial1d_mdl.atom_data.synpp_refs.iterrows():
try:
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'].loc[key] = np.log10(
radial1d_mdl.plasma.tau_sobolevs[0].loc[value['line_id']])
except KeyError:
pass
relevant_synpp_refs = radial1d_mdl.atom_data.synpp_refs[
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] > -50]
with open(synpp_default_yaml_fname) as stream:
yaml_reference = yaml.load(stream, Loader=yaml.CLoader)
if lines_db is not None:
yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'lines')
yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'refs.dat')
yaml_reference['output']['min_wl'] = float(
radial1d_mdl.runner.spectrum.wavelength.to('angstrom').value.min())
yaml_reference['output']['max_wl'] = float(
radial1d_mdl.runner.spectrum.wavelength.to('angstrom').value.max())
#raise Exception("there's a problem here with units what units does synpp expect?")
yaml_reference['opacity']['v_ref'] = float(
(radial1d_mdl.tardis_config.structure.v_inner[0].to('km/s') /
(1000. * u.km / u.s)).value)
yaml_reference['grid']['v_outer_max'] = float(
(radial1d_mdl.tardis_config.structure.v_outer[-1].to('km/s') /
(1000. * u.km / u.s)).value)
#pdb.set_trace()
yaml_setup = yaml_reference['setups'][0]
yaml_setup['ions'] = []
yaml_setup['log_tau'] = []
yaml_setup['active'] = []
yaml_setup['temp'] = []
yaml_setup['v_min'] = []
yaml_setup['v_max'] = []
yaml_setup['aux'] = []
for species, synpp_ref in relevant_synpp_refs.iterrows():
yaml_setup['ions'].append(100 * species[0] + species[1])
yaml_setup['log_tau'].append(float(synpp_ref['ref_log_tau']))
yaml_setup['active'].append(True)
yaml_setup['temp'].append(yaml_setup['t_phot'])
yaml_setup['v_min'].append(yaml_reference['opacity']['v_ref'])
yaml_setup['v_max'].append(yaml_reference['grid']['v_outer_max'])
yaml_setup['aux'].append(1e200)
with open(fname, 'w') as f:
yaml.dump(yaml_reference, stream=f, explicit_start=True)
def intensity_black_body(nu, T):
"""
Calculate the intensity of a black-body according to the following formula
.. math::
I(\\nu, T) = \\frac{2h\\nu^3}{c^2}\frac{1}
{e^{h\\nu \\beta_\\textrm{rad}} - 1}
Parameters
----------
nu : float
Frequency of light
T : float
Temperature in kelvin
Returns
-------
Intensity : float
Returns the intensity of the black body
"""
beta_rad = 1 / (k_B_cgs * T)
coefficient = 2 * h_cgs / c_cgs ** 2
intensity = ne.evaluate('coefficient * nu**3 / '
'(exp(h_cgs * nu * beta_rad) -1 )')
return intensity
def species_tuple_to_string(species_tuple, roman_numerals=True):
"""
Convert a species tuple to its corresponding string representation.
Parameters
----------
species_tuple : tuple
Tuple of 2 values indicated atomic number and number of
electrons missing
roman_numerals : bool, optional(default = TRUE)
Indicates whether the returned ion number is in roman numerals
Returns
-------
element_symbol, roman_ion_number : str
Returns corresponding string representation of given tuple
"""
atomic_number, ion_number = species_tuple
element_symbol = ATOMIC_NUMBER2SYMBOL[atomic_number]
if roman_numerals:
roman_ion_number = int_to_roman(ion_number+1)
return '{0} {1}'.format(str(element_symbol), roman_ion_number)
else:
return '{0} {1:d}'.format(element_symbol, ion_number)
def species_string_to_tuple(species_string):
"""
Convert a species string to its corresponding tuple representation
Parameters
----------
species_string : str
String containing species symbol (e.g. Si II, Fe III)
Returns
-------
atomic_number, ion_number : tuple
Returns tuple of length 2 indicating atomic number and ion number
Raises
------
MalformedSpeciesError
If the inputted string does not match the species format
"""
try:
element_symbol, ion_number_string = re.match(r'^(\w+)\s*(\d+)',
species_string).groups()
except AttributeError:
try:
element_symbol, ion_number_string = species_string.split()
except ValueError:
raise MalformedSpeciesError(
'Species string "{0}" is not of format <element_symbol><number>'
' (e.g. Fe 2, Fe2, ..)'.format(species_string))
atomic_number = element_symbol2atomic_number(element_symbol)
try:
ion_number = roman_to_int(ion_number_string)
except ValueError:
try:
ion_number = int(ion_number_string)
except ValueError:
raise MalformedSpeciesError(
"Given ion number ('{}') could not be parsed".format(
ion_number_string))
if ion_number > atomic_number:
raise ValueError(
'Species given does not exist: ion number > atomic number')
return atomic_number, ion_number - 1
def parse_quantity(quantity_string):
"""
Changes a string into it's corresponding astropy.Quantity object.
Parameters
----------
quantity_string : str
String to be converted into astropy.Quantity
Returns
-------
q : ~u.Quantity
Corresponding astropy.Quantity object for passed string
Raises
------
MalformedQuantityError
If string is not properly formatted for Astropy Quantity
"""
if not isinstance(quantity_string, str):
raise MalformedQuantityError(quantity_string)
try:
value_string, unit_string = quantity_string.split()
except ValueError:
raise MalformedQuantityError(quantity_string)
try:
value = float(value_string)
except ValueError:
raise MalformedQuantityError(quantity_string)
try:
q = u.Quantity(value, unit_string)
except ValueError:
raise MalformedQuantityError(quantity_string)
return q
def element_symbol2atomic_number(element_string):
"""
Takes an element symbol and returns its corresponding atomic number
Parameters
----------
element_string : str
Inputted element symbol
Returns
-------
int
Returned atomic number
"""
reformatted_element_string = reformat_element_symbol(element_string)
if reformatted_element_string not in SYMBOL2ATOMIC_NUMBER:
raise MalformedElementSymbolError(element_string)
return SYMBOL2ATOMIC_NUMBER[reformatted_element_string]
def atomic_number2element_symbol(atomic_number):
"""
Convert atomic number to string
Parameters
----------
atomic_number : int
Inputted atomic number
Returns
-------
str
Returned corresponding element symbol
"""
return ATOMIC_NUMBER2SYMBOL[atomic_number]
def reformat_element_symbol(element_string):
|
def quantity_linspace(start, stop, num, **kwargs):
"""
Essentially the same input parameters as linspace, but
calculated for an astropy quantity start and stop.
Parameters
----------
start : ~astropy.Quantity
Starting value of the sequence
stop : ~astropy.Quantity
End value of the sequence
num : int
Number of samples to generate
Returns
-------
~astropy.Quantity
Returns num evenly spaced characters of type astropy.Quantity
Raises
------
ValueError
If start and stop values have no unit attribute.
"""
if not (hasattr(start, 'unit') and hasattr(stop, 'unit')):
raise ValueError('Both start and stop need to be quantities with a '
'unit attribute')
return (np.linspace(start.value, stop.to(start.unit).value, num, **kwargs)
* start.unit)
def convert_abundances_format(fname, delimiter=r'\s+'):
"""
Changes format of file containing abundances into data frame
Parameters
----------
fname : file, str
File or file name that contains abundance info
delimiter : str, optional(default = '\\s+')
Determines the separator for splitting file
Returns
-------
DataFrame
Corresponding data frame
"""
df = pd.read_csv(fname, delimiter=delimiter, comment='#', header=None)
# Drop shell index column
df.drop(df.columns[0], axis=1, inplace=True)
# Assign header row
df.columns = [nucname.name(i)
for i in range(1, df.shape[1] + 1)]
return df | """
Reformat the string so the first letter is uppercase and all subsequent
letters lowercase.
Parameters
----------
element_string : str
Inputted element symbol
Returns
-------
str
Returned reformatted element symbol
"""
return element_string[0].upper() + element_string[1:].lower() | identifier_body |
base.py | import logging
import os
import re
from collections import OrderedDict
import numexpr as ne
import numpy as np
import pandas as pd
import yaml
from tardis import constants
from astropy import units as u
from pyne import nucname
import tardis
from tardis.io.util import get_internal_data_path
k_B_cgs = constants.k_B.cgs.value
c_cgs = constants.c.cgs.value
h_cgs = constants.h.cgs.value
m_e_cgs = constants.m_e.cgs.value
e_charge_gauss = constants.e.gauss.value
logger = logging.getLogger(__name__)
tardis_dir = os.path.realpath(tardis.__path__[0])
ATOMIC_SYMBOLS_DATA = pd.read_csv(get_internal_data_path('atomic_symbols.dat'), delim_whitespace=True,
names=['atomic_number', 'symbol']).set_index('atomic_number').squeeze()
ATOMIC_NUMBER2SYMBOL = OrderedDict(ATOMIC_SYMBOLS_DATA.to_dict())
SYMBOL2ATOMIC_NUMBER = OrderedDict((y, x) for x, y in ATOMIC_NUMBER2SYMBOL.items())
synpp_default_yaml_fname = get_internal_data_path('synpp_default.yaml')
NUMERAL_MAP = tuple(zip(
(1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1),
('M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I')
))
class MalformedError(Exception):
pass
class MalformedSpeciesError(MalformedError):
def __init__(self, malformed_element_symbol):
self.malformed_element_symbol = malformed_element_symbol
def __str__(self):
return ('Expecting a species notation (e.g. "Si 2", "Si II", "Fe IV") '
'- supplied {0}'.format(self.malformed_element_symbol))
class MalformedElementSymbolError(MalformedError):
def __init__(self, malformed_element_symbol):
self.malformed_element_symbol = malformed_element_symbol
def __str__(self):
return ('Expecting an atomic symbol (e.g. Fe) - supplied {0}').format(
self.malformed_element_symbol)
class MalformedQuantityError(MalformedError):
def __init__(self, malformed_quantity_string):
self.malformed_quantity_string = malformed_quantity_string
def __str__(self):
return ('Expecting a quantity string(e.g. "5 km/s") for keyword '
'- supplied {0}').format(self.malformed_quantity_string)
def int_to_roman(i):
"""
Convert an integer into its roman numeral representation.
Parameters
----------
i : int
Integer to be converted into roman numerals
Returns
-------
str
Returns roman numeral representation of i in str format.
"""
result = []
for integer, numeral in NUMERAL_MAP:
count = i // integer
result.append(numeral * count)
i -= integer * count
return ''.join(result)
def roman_to_int(roman_string):
"""
Convert a roman numeral into its corresponding integer.
Parameters
----------
roman_string : str
Roman numeral to be converted into an integer
Returns
-------
int
Returns integer representation of roman_string
"""
NUMERALS_SET = set(list(zip(*NUMERAL_MAP))[1])
roman_string = roman_string.upper()
if len(set(list(roman_string.upper())) - NUMERALS_SET) != 0:
raise ValueError('{0} does not seem to be a roman numeral'.format(
roman_string))
i = result = 0
for integer, numeral in NUMERAL_MAP:
while roman_string[i:i + len(numeral)] == numeral:
result += integer
i += len(numeral)
if result < 1:
raise ValueError('Can not interpret Roman Numeral {0}'.format(roman_string))
return result
def calculate_luminosity(
spec_fname, distance, wavelength_column=0,
wavelength_unit=u.angstrom, flux_column=1,
flux_unit=u.Unit('erg / (Angstrom cm2 s)')):
"""
Calculates luminosity of star.
Parameters
----------
spec_fname : file or str
File or file name to be read
distance : float
Distance to star
wavelength_column : int, optional(default = 0)
Column index in which the wavelength is stored
wavelength_unit : float, optional(default = u.angstrom)
Dictates units used for calculating wavelength.
flux_column : int, optional(default = 1)
Column index in which the flux is stored
flux_unit : str, optional(default = u.Unit('erg / (Angstrom cm2 s)')
Dictates units used for flux
Returns
-------
luminosity.value : float
Returned luminosity value of star.
wavelength.min() : float
Minimum value of wavelength of light
wavelength.max() : float
Maximum value of wavelength of light
"""
#BAD STYLE change to parse quantity
distance = u.Unit(distance)
wavelength, flux = np.loadtxt(spec_fname, usecols=(wavelength_column, flux_column), unpack=True)
flux_density = np.trapz(flux, wavelength) * (flux_unit * wavelength_unit)
luminosity = (flux_density * 4 * np.pi * distance**2).to('erg/s')
return luminosity.value, wavelength.min(), wavelength.max()
def create_synpp_yaml(radial1d_mdl, fname, shell_no=0, lines_db=None):
"""
Create a yaml file that is readable from syn++
Parameters
----------
radial1d_mdl : Radial1DModel
Inputted object that will be read into YAML file
fname : str
File name for the synpp yaml
shell_no : int, optional(default = 0)
Number of shells
lines_db : file, optional(default = None)
Raises
------
ValueError
If the current dataset does not contain necessary reference files
"""
logger.warning('Currently only works with Si and a special setup')
if radial1d_mdl.atom_data.synpp_refs is not None:
raise ValueError(
'The current atom dataset does not contain the '
'necessary reference files (please contact the authors)')
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] = -99.0
for key, value in radial1d_mdl.atom_data.synpp_refs.iterrows():
|
relevant_synpp_refs = radial1d_mdl.atom_data.synpp_refs[
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] > -50]
with open(synpp_default_yaml_fname) as stream:
yaml_reference = yaml.load(stream, Loader=yaml.CLoader)
if lines_db is not None:
yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'lines')
yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'refs.dat')
yaml_reference['output']['min_wl'] = float(
radial1d_mdl.runner.spectrum.wavelength.to('angstrom').value.min())
yaml_reference['output']['max_wl'] = float(
radial1d_mdl.runner.spectrum.wavelength.to('angstrom').value.max())
#raise Exception("there's a problem here with units what units does synpp expect?")
yaml_reference['opacity']['v_ref'] = float(
(radial1d_mdl.tardis_config.structure.v_inner[0].to('km/s') /
(1000. * u.km / u.s)).value)
yaml_reference['grid']['v_outer_max'] = float(
(radial1d_mdl.tardis_config.structure.v_outer[-1].to('km/s') /
(1000. * u.km / u.s)).value)
#pdb.set_trace()
yaml_setup = yaml_reference['setups'][0]
yaml_setup['ions'] = []
yaml_setup['log_tau'] = []
yaml_setup['active'] = []
yaml_setup['temp'] = []
yaml_setup['v_min'] = []
yaml_setup['v_max'] = []
yaml_setup['aux'] = []
for species, synpp_ref in relevant_synpp_refs.iterrows():
yaml_setup['ions'].append(100 * species[0] + species[1])
yaml_setup['log_tau'].append(float(synpp_ref['ref_log_tau']))
yaml_setup['active'].append(True)
yaml_setup['temp'].append(yaml_setup['t_phot'])
yaml_setup['v_min'].append(yaml_reference['opacity']['v_ref'])
yaml_setup['v_max'].append(yaml_reference['grid']['v_outer_max'])
yaml_setup['aux'].append(1e200)
with open(fname, 'w') as f:
yaml.dump(yaml_reference, stream=f, explicit_start=True)
def intensity_black_body(nu, T):
"""
Calculate the intensity of a black-body according to the following formula
.. math::
I(\\nu, T) = \\frac{2h\\nu^3}{c^2}\frac{1}
{e^{h\\nu \\beta_\\textrm{rad}} - 1}
Parameters
----------
nu : float
Frequency of light
T : float
Temperature in kelvin
Returns
-------
Intensity : float
Returns the intensity of the black body
"""
beta_rad = 1 / (k_B_cgs * T)
coefficient = 2 * h_cgs / c_cgs ** 2
intensity = ne.evaluate('coefficient * nu**3 / '
'(exp(h_cgs * nu * beta_rad) -1 )')
return intensity
def species_tuple_to_string(species_tuple, roman_numerals=True):
"""
Convert a species tuple to its corresponding string representation.
Parameters
----------
species_tuple : tuple
Tuple of 2 values indicated atomic number and number of
electrons missing
roman_numerals : bool, optional(default = TRUE)
Indicates whether the returned ion number is in roman numerals
Returns
-------
element_symbol, roman_ion_number : str
Returns corresponding string representation of given tuple
"""
atomic_number, ion_number = species_tuple
element_symbol = ATOMIC_NUMBER2SYMBOL[atomic_number]
if roman_numerals:
roman_ion_number = int_to_roman(ion_number+1)
return '{0} {1}'.format(str(element_symbol), roman_ion_number)
else:
return '{0} {1:d}'.format(element_symbol, ion_number)
def species_string_to_tuple(species_string):
"""
Convert a species string to its corresponding tuple representation
Parameters
----------
species_string : str
String containing species symbol (e.g. Si II, Fe III)
Returns
-------
atomic_number, ion_number : tuple
Returns tuple of length 2 indicating atomic number and ion number
Raises
------
MalformedSpeciesError
If the inputted string does not match the species format
"""
try:
element_symbol, ion_number_string = re.match(r'^(\w+)\s*(\d+)',
species_string).groups()
except AttributeError:
try:
element_symbol, ion_number_string = species_string.split()
except ValueError:
raise MalformedSpeciesError(
'Species string "{0}" is not of format <element_symbol><number>'
' (e.g. Fe 2, Fe2, ..)'.format(species_string))
atomic_number = element_symbol2atomic_number(element_symbol)
try:
ion_number = roman_to_int(ion_number_string)
except ValueError:
try:
ion_number = int(ion_number_string)
except ValueError:
raise MalformedSpeciesError(
"Given ion number ('{}') could not be parsed".format(
ion_number_string))
if ion_number > atomic_number:
raise ValueError(
'Species given does not exist: ion number > atomic number')
return atomic_number, ion_number - 1
def parse_quantity(quantity_string):
"""
Changes a string into it's corresponding astropy.Quantity object.
Parameters
----------
quantity_string : str
String to be converted into astropy.Quantity
Returns
-------
q : ~u.Quantity
Corresponding astropy.Quantity object for passed string
Raises
------
MalformedQuantityError
If string is not properly formatted for Astropy Quantity
"""
if not isinstance(quantity_string, str):
raise MalformedQuantityError(quantity_string)
try:
value_string, unit_string = quantity_string.split()
except ValueError:
raise MalformedQuantityError(quantity_string)
try:
value = float(value_string)
except ValueError:
raise MalformedQuantityError(quantity_string)
try:
q = u.Quantity(value, unit_string)
except ValueError:
raise MalformedQuantityError(quantity_string)
return q
def element_symbol2atomic_number(element_string):
"""
Takes an element symbol and returns its corresponding atomic number
Parameters
----------
element_string : str
Inputted element symbol
Returns
-------
int
Returned atomic number
"""
reformatted_element_string = reformat_element_symbol(element_string)
if reformatted_element_string not in SYMBOL2ATOMIC_NUMBER:
raise MalformedElementSymbolError(element_string)
return SYMBOL2ATOMIC_NUMBER[reformatted_element_string]
def atomic_number2element_symbol(atomic_number):
"""
Convert atomic number to string
Parameters
----------
atomic_number : int
Inputted atomic number
Returns
-------
str
Returned corresponding element symbol
"""
return ATOMIC_NUMBER2SYMBOL[atomic_number]
def reformat_element_symbol(element_string):
"""
Reformat the string so the first letter is uppercase and all subsequent
letters lowercase.
Parameters
----------
element_string : str
Inputted element symbol
Returns
-------
str
Returned reformatted element symbol
"""
return element_string[0].upper() + element_string[1:].lower()
def quantity_linspace(start, stop, num, **kwargs):
"""
Essentially the same input parameters as linspace, but
calculated for an astropy quantity start and stop.
Parameters
----------
start : ~astropy.Quantity
Starting value of the sequence
stop : ~astropy.Quantity
End value of the sequence
num : int
Number of samples to generate
Returns
-------
~astropy.Quantity
Returns num evenly spaced characters of type astropy.Quantity
Raises
------
ValueError
If start and stop values have no unit attribute.
"""
if not (hasattr(start, 'unit') and hasattr(stop, 'unit')):
raise ValueError('Both start and stop need to be quantities with a '
'unit attribute')
return (np.linspace(start.value, stop.to(start.unit).value, num, **kwargs)
* start.unit)
def convert_abundances_format(fname, delimiter=r'\s+'):
"""
Changes format of file containing abundances into data frame
Parameters
----------
fname : file, str
File or file name that contains abundance info
delimiter : str, optional(default = '\\s+')
Determines the separator for splitting file
Returns
-------
DataFrame
Corresponding data frame
"""
df = pd.read_csv(fname, delimiter=delimiter, comment='#', header=None)
# Drop shell index column
df.drop(df.columns[0], axis=1, inplace=True)
# Assign header row
df.columns = [nucname.name(i)
for i in range(1, df.shape[1] + 1)]
return df | try:
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'].loc[key] = np.log10(
radial1d_mdl.plasma.tau_sobolevs[0].loc[value['line_id']])
except KeyError:
pass | conditional_block |
profiling_data.go | // Copyright (C) 2019 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adreno
import (
"context"
"fmt"
"github.com/google/gapid/core/log"
"github.com/google/gapid/core/os/device"
"github.com/google/gapid/gapis/api"
"github.com/google/gapid/gapis/api/sync"
"github.com/google/gapid/gapis/perfetto"
perfetto_service "github.com/google/gapid/gapis/perfetto/service"
"github.com/google/gapid/gapis/service"
"github.com/google/gapid/gapis/service/path"
"github.com/google/gapid/gapis/trace/android/profile"
"github.com/google/gapid/gapis/trace/android/utils"
)
var (
slicesQuery = "" +
"SELECT s.context_id, s.render_target, s.frame_id, s.submission_id, s.hw_queue_id, s.command_buffer, s.render_pass, s.ts, s.dur, s.id, s.name, depth, arg_set_id, track_id, t.name " +
"FROM gpu_track t LEFT JOIN gpu_slice s " +
"ON s.track_id = t.id WHERE t.scope = 'gpu_render_stage' ORDER BY s.ts"
argsQueryFmt = "" +
"SELECT key, string_value FROM args WHERE args.arg_set_id = %d"
queueSubmitQuery = "" +
"SELECT submission_id FROM gpu_slice s JOIN track t ON s.track_id = t.id WHERE s.name = 'vkQueueSubmit' AND t.name = 'Vulkan Events' ORDER BY submission_id"
counterTracksQuery = "" +
"SELECT id, name, unit, description FROM gpu_counter_track ORDER BY id"
countersQueryFmt = "" +
"SELECT ts, value FROM counter c WHERE c.track_id = %d ORDER BY ts"
renderPassSliceName = "Surface"
)
func ProcessProfilingData(ctx context.Context, processor *perfetto.Processor, capture *path.Capture, desc *device.GpuCounterDescriptor, handleMapping *map[uint64][]service.VulkanHandleMappingItem, syncData *sync.Data) (*service.ProfilingData, error) {
slices, err := processGpuSlices(ctx, processor, capture, handleMapping, syncData)
if err != nil {
log.Err(ctx, err, "Failed to get GPU slices")
}
counters, err := processCounters(ctx, processor, desc)
if err != nil {
log.Err(ctx, err, "Failed to get GPU counters")
}
gpuCounters, err := profile.ComputeCounters(ctx, slices, counters)
if err != nil {
log.Err(ctx, err, "Failed to calculate performance data based on GPU slices and counters")
}
return &service.ProfilingData{
Slices: slices,
Counters: counters,
GpuCounters: gpuCounters,
}, nil
}
func extractTraceHandles(ctx context.Context, replayHandles *[]int64, replayHandleType string, handleMapping *map[uint64][]service.VulkanHandleMappingItem) {
for i, v := range *replayHandles {
handles, ok := (*handleMapping)[uint64(v)]
if !ok {
log.E(ctx, "%v not found in replay: %v", replayHandleType, v)
continue
}
found := false
for _, handle := range handles {
if handle.HandleType == replayHandleType {
(*replayHandles)[i] = int64(handle.TraceValue)
found = true
break
}
}
if !found {
log.E(ctx, "Incorrect Handle type for %v: %v", replayHandleType, v)
}
}
}
func fixContextIds(contextIDs []int64) {
// This is a workaround a QC bug(b/192546534)
// that causes first deviceID to be zero after a
// renderpass change in the same queue submit.
// So, we fill the zero devices with the existing
// device id, where there is only one device id.
zeroIndices := make([]int, 0)
contextID := int64(0)
for i, v := range contextIDs {
if v == 0 {
zeroIndices = append(zeroIndices, i)
continue
}
if contextID == 0 {
contextID = v
continue
}
if contextID != v {
// There are multiple devices
// We cannot know which one to fill
return
}
}
for _, v := range zeroIndices {
// If there is only one device in entire trace
// We can assume that we possibly have only one device
contextIDs[v] = contextID
}
}
func processGpuSlices(ctx context.Context, processor *perfetto.Processor, capture *path.Capture, handleMapping *map[uint64][]service.VulkanHandleMappingItem, syncData *sync.Data) (*service.ProfilingData_GpuSlices, error) {
slicesQueryResult, err := processor.Query(slicesQuery)
if err != nil {
return nil, log.Errf(ctx, err, "SQL query failed: %v", slicesQuery)
}
queueSubmitQueryResult, err := processor.Query(queueSubmitQuery)
if err != nil {
return nil, log.Errf(ctx, err, "SQL query failed: %v", queueSubmitQuery)
}
queueSubmitColumns := queueSubmitQueryResult.GetColumns()
queueSubmitIds := queueSubmitColumns[0].GetLongValues()
submissionOrdering := make(map[int64]uint64)
for i, v := range queueSubmitIds {
submissionOrdering[v] = uint64(i)
}
trackIdCache := make(map[int64]bool)
argsQueryCache := make(map[int64]*perfetto_service.QueryResult)
slicesColumns := slicesQueryResult.GetColumns()
numSliceRows := slicesQueryResult.GetNumRecords()
slices := make([]*service.ProfilingData_GpuSlices_Slice, numSliceRows)
groupParentLookup := map[api.CmdSubmissionKey]*service.ProfilingData_GpuSlices_Group{}
groups := []*service.ProfilingData_GpuSlices_Group{}
groupIds := make([]int32, numSliceRows)
var tracks []*service.ProfilingData_GpuSlices_Track
// Grab all the column values. Depends on the order of columns selected in slicesQuery
contextIds := slicesColumns[0].GetLongValues()
fixContextIds(contextIds)
extractTraceHandles(ctx, &contextIds, "VkDevice", handleMapping)
renderTargets := slicesColumns[1].GetLongValues()
extractTraceHandles(ctx, &renderTargets, "VkFramebuffer", handleMapping)
commandBuffers := slicesColumns[5].GetLongValues()
extractTraceHandles(ctx, &commandBuffers, "VkCommandBuffer", handleMapping)
renderPasses := slicesColumns[6].GetLongValues()
extractTraceHandles(ctx, &renderPasses, "VkRenderPass", handleMapping)
frameIds := slicesColumns[2].GetLongValues()
submissionIds := slicesColumns[3].GetLongValues()
hwQueueIds := slicesColumns[4].GetLongValues()
timestamps := slicesColumns[7].GetLongValues()
durations := slicesColumns[8].GetLongValues()
ids := slicesColumns[9].GetLongValues()
names := slicesColumns[10].GetStringValues()
depths := slicesColumns[11].GetLongValues()
argSetIds := slicesColumns[12].GetLongValues()
trackIds := slicesColumns[13].GetLongValues()
trackNames := slicesColumns[14].GetStringValues()
subCommandGroupMap := make(map[api.CmdSubmissionKey]int)
for i, v := range submissionIds {
subOrder, ok := submissionOrdering[v]
if ok {
cb := uint64(commandBuffers[i])
key := api.CmdSubmissionKey{subOrder, cb, uint64(renderPasses[i]), uint64(renderTargets[i])}
// Create a new group for each main renderPass slice.
if indices, ok := syncData.SubmissionIndices[key]; ok && names[i] == renderPassSliceName {
var idx []uint64
if c, ok := subCommandGroupMap[key]; ok { // Sometimes multiple renderPass slices shares the same renderPass and renderTarget.
idx = indices[c]
} else {
idx = indices[0]
subCommandGroupMap[key] = 0
}
names[i] = fmt.Sprintf("%v", idx)
parent := utils.FindParentGroup(ctx, subOrder, cb, groupParentLookup, &groups, syncData.SubmissionIndices, capture)
group := &service.ProfilingData_GpuSlices_Group{
Id: int32(len(groups)),
Name: fmt.Sprintf("RenderPass %v, RenderTarget %v", uint64(renderPasses[i]), uint64(renderTargets[i])),
Parent: parent,
Link: &path.Command{Capture: capture, Indices: idx},
}
groups = append(groups, group)
subCommandGroupMap[key]++
}
} else {
log.W(ctx, "Encountered submission ID mismatch %v", v)
}
// Find the group that the current slice belongs to and mark down group id.
if len(groups) > 0 {
groupIds[i] = groups[len(groups)-1].Id // Slices were time sorted and main renderPass slice comes first.
} else {
log.W(ctx, "Group missing for slice %v at submission %v, commandBuffer %v, renderPass %v, renderTarget %v", names[i], submissionIds[i], commandBuffers[i], renderPasses[i], renderTargets[i])
groupIds[i] = -1
}
}
for i := uint64(0); i < numSliceRows; i++ {
var argsQueryResult *perfetto_service.QueryResult
var ok bool
if argsQueryResult, ok = argsQueryCache[argSetIds[i]]; !ok {
argsQuery := fmt.Sprintf(argsQueryFmt, argSetIds[i])
argsQueryResult, err = processor.Query(argsQuery)
if err != nil {
log.W(ctx, "SQL query failed: %v", argsQuery)
}
argsQueryCache[argSetIds[i]] = argsQueryResult
}
argsColumns := argsQueryResult.GetColumns()
numArgsRows := argsQueryResult.GetNumRecords()
var extras []*service.ProfilingData_GpuSlices_Slice_Extra
for j := uint64(0); j < numArgsRows; j++ {
keys := argsColumns[0].GetStringValues()
values := argsColumns[1].GetStringValues()
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: keys[j],
Value: &service.ProfilingData_GpuSlices_Slice_Extra_StringValue{StringValue: values[j]},
})
}
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "contextId",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(contextIds[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "renderTarget",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(renderTargets[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "commandBuffer",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(commandBuffers[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "renderPass",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(renderPasses[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "frameId",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(frameIds[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "submissionId",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(submissionIds[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "hwQueueId",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(hwQueueIds[i])},
})
slices[i] = &service.ProfilingData_GpuSlices_Slice{
Ts: uint64(timestamps[i]),
Dur: uint64(durations[i]),
Id: uint64(ids[i]),
Label: names[i],
Depth: int32(depths[i]),
Extras: extras,
TrackId: int32(trackIds[i]),
GroupId: groupIds[i],
}
if _, ok := trackIdCache[trackIds[i]]; !ok {
trackIdCache[trackIds[i]] = true
tracks = append(tracks, &service.ProfilingData_GpuSlices_Track{
Id: int32(trackIds[i]),
Name: trackNames[i],
})
}
}
return &service.ProfilingData_GpuSlices{
Slices: slices,
Tracks: tracks,
Groups: groups,
}, nil
}
func processCounters(ctx context.Context, processor *perfetto.Processor, desc *device.GpuCounterDescriptor) ([]*service.ProfilingData_Counter, error) | {
counterTracksQueryResult, err := processor.Query(counterTracksQuery)
if err != nil {
return nil, log.Errf(ctx, err, "SQL query failed: %v", counterTracksQuery)
}
// t.id, name, unit, description, ts, value
tracksColumns := counterTracksQueryResult.GetColumns()
numTracksRows := counterTracksQueryResult.GetNumRecords()
counters := make([]*service.ProfilingData_Counter, numTracksRows)
// Grab all the column values. Depends on the order of columns selected in countersQuery
trackIds := tracksColumns[0].GetLongValues()
names := tracksColumns[1].GetStringValues()
units := tracksColumns[2].GetStringValues()
descriptions := tracksColumns[3].GetStringValues()
nameToSpec := map[string]*device.GpuCounterDescriptor_GpuCounterSpec{}
if desc != nil {
for _, spec := range desc.Specs {
nameToSpec[spec.Name] = spec
}
}
for i := uint64(0); i < numTracksRows; i++ {
countersQuery := fmt.Sprintf(countersQueryFmt, trackIds[i])
countersQueryResult, err := processor.Query(countersQuery)
countersColumns := countersQueryResult.GetColumns()
if err != nil {
return nil, log.Errf(ctx, err, "SQL query failed: %v", counterTracksQuery)
}
timestampsLong := countersColumns[0].GetLongValues()
timestamps := make([]uint64, len(timestampsLong))
for i, t := range timestampsLong {
timestamps[i] = uint64(t)
}
values := countersColumns[1].GetDoubleValues()
spec, _ := nameToSpec[names[i]]
// TODO(apbodnar) Populate the `default` field once the trace processor supports it (b/147432390)
counters[i] = &service.ProfilingData_Counter{
Id: uint32(trackIds[i]),
Name: names[i],
Unit: units[i],
Description: descriptions[i],
Spec: spec,
Timestamps: timestamps,
Values: values,
}
}
return counters, nil
} | identifier_body |
|
profiling_data.go | // Copyright (C) 2019 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adreno
import (
"context"
"fmt"
"github.com/google/gapid/core/log"
"github.com/google/gapid/core/os/device"
"github.com/google/gapid/gapis/api"
"github.com/google/gapid/gapis/api/sync"
"github.com/google/gapid/gapis/perfetto"
perfetto_service "github.com/google/gapid/gapis/perfetto/service"
"github.com/google/gapid/gapis/service"
"github.com/google/gapid/gapis/service/path"
"github.com/google/gapid/gapis/trace/android/profile"
"github.com/google/gapid/gapis/trace/android/utils"
)
var (
slicesQuery = "" +
"SELECT s.context_id, s.render_target, s.frame_id, s.submission_id, s.hw_queue_id, s.command_buffer, s.render_pass, s.ts, s.dur, s.id, s.name, depth, arg_set_id, track_id, t.name " +
"FROM gpu_track t LEFT JOIN gpu_slice s " +
"ON s.track_id = t.id WHERE t.scope = 'gpu_render_stage' ORDER BY s.ts"
argsQueryFmt = "" +
"SELECT key, string_value FROM args WHERE args.arg_set_id = %d"
queueSubmitQuery = "" +
"SELECT submission_id FROM gpu_slice s JOIN track t ON s.track_id = t.id WHERE s.name = 'vkQueueSubmit' AND t.name = 'Vulkan Events' ORDER BY submission_id"
counterTracksQuery = "" +
"SELECT id, name, unit, description FROM gpu_counter_track ORDER BY id"
countersQueryFmt = "" +
"SELECT ts, value FROM counter c WHERE c.track_id = %d ORDER BY ts"
renderPassSliceName = "Surface"
)
func ProcessProfilingData(ctx context.Context, processor *perfetto.Processor, capture *path.Capture, desc *device.GpuCounterDescriptor, handleMapping *map[uint64][]service.VulkanHandleMappingItem, syncData *sync.Data) (*service.ProfilingData, error) {
slices, err := processGpuSlices(ctx, processor, capture, handleMapping, syncData)
if err != nil {
log.Err(ctx, err, "Failed to get GPU slices")
}
counters, err := processCounters(ctx, processor, desc)
if err != nil {
log.Err(ctx, err, "Failed to get GPU counters")
}
gpuCounters, err := profile.ComputeCounters(ctx, slices, counters)
if err != nil {
log.Err(ctx, err, "Failed to calculate performance data based on GPU slices and counters")
}
return &service.ProfilingData{
Slices: slices,
Counters: counters,
GpuCounters: gpuCounters,
}, nil
}
func | (ctx context.Context, replayHandles *[]int64, replayHandleType string, handleMapping *map[uint64][]service.VulkanHandleMappingItem) {
for i, v := range *replayHandles {
handles, ok := (*handleMapping)[uint64(v)]
if !ok {
log.E(ctx, "%v not found in replay: %v", replayHandleType, v)
continue
}
found := false
for _, handle := range handles {
if handle.HandleType == replayHandleType {
(*replayHandles)[i] = int64(handle.TraceValue)
found = true
break
}
}
if !found {
log.E(ctx, "Incorrect Handle type for %v: %v", replayHandleType, v)
}
}
}
func fixContextIds(contextIDs []int64) {
// This is a workaround a QC bug(b/192546534)
// that causes first deviceID to be zero after a
// renderpass change in the same queue submit.
// So, we fill the zero devices with the existing
// device id, where there is only one device id.
zeroIndices := make([]int, 0)
contextID := int64(0)
for i, v := range contextIDs {
if v == 0 {
zeroIndices = append(zeroIndices, i)
continue
}
if contextID == 0 {
contextID = v
continue
}
if contextID != v {
// There are multiple devices
// We cannot know which one to fill
return
}
}
for _, v := range zeroIndices {
// If there is only one device in entire trace
// We can assume that we possibly have only one device
contextIDs[v] = contextID
}
}
func processGpuSlices(ctx context.Context, processor *perfetto.Processor, capture *path.Capture, handleMapping *map[uint64][]service.VulkanHandleMappingItem, syncData *sync.Data) (*service.ProfilingData_GpuSlices, error) {
slicesQueryResult, err := processor.Query(slicesQuery)
if err != nil {
return nil, log.Errf(ctx, err, "SQL query failed: %v", slicesQuery)
}
queueSubmitQueryResult, err := processor.Query(queueSubmitQuery)
if err != nil {
return nil, log.Errf(ctx, err, "SQL query failed: %v", queueSubmitQuery)
}
queueSubmitColumns := queueSubmitQueryResult.GetColumns()
queueSubmitIds := queueSubmitColumns[0].GetLongValues()
submissionOrdering := make(map[int64]uint64)
for i, v := range queueSubmitIds {
submissionOrdering[v] = uint64(i)
}
trackIdCache := make(map[int64]bool)
argsQueryCache := make(map[int64]*perfetto_service.QueryResult)
slicesColumns := slicesQueryResult.GetColumns()
numSliceRows := slicesQueryResult.GetNumRecords()
slices := make([]*service.ProfilingData_GpuSlices_Slice, numSliceRows)
groupParentLookup := map[api.CmdSubmissionKey]*service.ProfilingData_GpuSlices_Group{}
groups := []*service.ProfilingData_GpuSlices_Group{}
groupIds := make([]int32, numSliceRows)
var tracks []*service.ProfilingData_GpuSlices_Track
// Grab all the column values. Depends on the order of columns selected in slicesQuery
contextIds := slicesColumns[0].GetLongValues()
fixContextIds(contextIds)
extractTraceHandles(ctx, &contextIds, "VkDevice", handleMapping)
renderTargets := slicesColumns[1].GetLongValues()
extractTraceHandles(ctx, &renderTargets, "VkFramebuffer", handleMapping)
commandBuffers := slicesColumns[5].GetLongValues()
extractTraceHandles(ctx, &commandBuffers, "VkCommandBuffer", handleMapping)
renderPasses := slicesColumns[6].GetLongValues()
extractTraceHandles(ctx, &renderPasses, "VkRenderPass", handleMapping)
frameIds := slicesColumns[2].GetLongValues()
submissionIds := slicesColumns[3].GetLongValues()
hwQueueIds := slicesColumns[4].GetLongValues()
timestamps := slicesColumns[7].GetLongValues()
durations := slicesColumns[8].GetLongValues()
ids := slicesColumns[9].GetLongValues()
names := slicesColumns[10].GetStringValues()
depths := slicesColumns[11].GetLongValues()
argSetIds := slicesColumns[12].GetLongValues()
trackIds := slicesColumns[13].GetLongValues()
trackNames := slicesColumns[14].GetStringValues()
subCommandGroupMap := make(map[api.CmdSubmissionKey]int)
for i, v := range submissionIds {
subOrder, ok := submissionOrdering[v]
if ok {
cb := uint64(commandBuffers[i])
key := api.CmdSubmissionKey{subOrder, cb, uint64(renderPasses[i]), uint64(renderTargets[i])}
// Create a new group for each main renderPass slice.
if indices, ok := syncData.SubmissionIndices[key]; ok && names[i] == renderPassSliceName {
var idx []uint64
if c, ok := subCommandGroupMap[key]; ok { // Sometimes multiple renderPass slices shares the same renderPass and renderTarget.
idx = indices[c]
} else {
idx = indices[0]
subCommandGroupMap[key] = 0
}
names[i] = fmt.Sprintf("%v", idx)
parent := utils.FindParentGroup(ctx, subOrder, cb, groupParentLookup, &groups, syncData.SubmissionIndices, capture)
group := &service.ProfilingData_GpuSlices_Group{
Id: int32(len(groups)),
Name: fmt.Sprintf("RenderPass %v, RenderTarget %v", uint64(renderPasses[i]), uint64(renderTargets[i])),
Parent: parent,
Link: &path.Command{Capture: capture, Indices: idx},
}
groups = append(groups, group)
subCommandGroupMap[key]++
}
} else {
log.W(ctx, "Encountered submission ID mismatch %v", v)
}
// Find the group that the current slice belongs to and mark down group id.
if len(groups) > 0 {
groupIds[i] = groups[len(groups)-1].Id // Slices were time sorted and main renderPass slice comes first.
} else {
log.W(ctx, "Group missing for slice %v at submission %v, commandBuffer %v, renderPass %v, renderTarget %v", names[i], submissionIds[i], commandBuffers[i], renderPasses[i], renderTargets[i])
groupIds[i] = -1
}
}
for i := uint64(0); i < numSliceRows; i++ {
var argsQueryResult *perfetto_service.QueryResult
var ok bool
if argsQueryResult, ok = argsQueryCache[argSetIds[i]]; !ok {
argsQuery := fmt.Sprintf(argsQueryFmt, argSetIds[i])
argsQueryResult, err = processor.Query(argsQuery)
if err != nil {
log.W(ctx, "SQL query failed: %v", argsQuery)
}
argsQueryCache[argSetIds[i]] = argsQueryResult
}
argsColumns := argsQueryResult.GetColumns()
numArgsRows := argsQueryResult.GetNumRecords()
var extras []*service.ProfilingData_GpuSlices_Slice_Extra
for j := uint64(0); j < numArgsRows; j++ {
keys := argsColumns[0].GetStringValues()
values := argsColumns[1].GetStringValues()
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: keys[j],
Value: &service.ProfilingData_GpuSlices_Slice_Extra_StringValue{StringValue: values[j]},
})
}
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "contextId",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(contextIds[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "renderTarget",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(renderTargets[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "commandBuffer",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(commandBuffers[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "renderPass",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(renderPasses[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "frameId",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(frameIds[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "submissionId",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(submissionIds[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "hwQueueId",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(hwQueueIds[i])},
})
slices[i] = &service.ProfilingData_GpuSlices_Slice{
Ts: uint64(timestamps[i]),
Dur: uint64(durations[i]),
Id: uint64(ids[i]),
Label: names[i],
Depth: int32(depths[i]),
Extras: extras,
TrackId: int32(trackIds[i]),
GroupId: groupIds[i],
}
if _, ok := trackIdCache[trackIds[i]]; !ok {
trackIdCache[trackIds[i]] = true
tracks = append(tracks, &service.ProfilingData_GpuSlices_Track{
Id: int32(trackIds[i]),
Name: trackNames[i],
})
}
}
return &service.ProfilingData_GpuSlices{
Slices: slices,
Tracks: tracks,
Groups: groups,
}, nil
}
func processCounters(ctx context.Context, processor *perfetto.Processor, desc *device.GpuCounterDescriptor) ([]*service.ProfilingData_Counter, error) {
counterTracksQueryResult, err := processor.Query(counterTracksQuery)
if err != nil {
return nil, log.Errf(ctx, err, "SQL query failed: %v", counterTracksQuery)
}
// t.id, name, unit, description, ts, value
tracksColumns := counterTracksQueryResult.GetColumns()
numTracksRows := counterTracksQueryResult.GetNumRecords()
counters := make([]*service.ProfilingData_Counter, numTracksRows)
// Grab all the column values. Depends on the order of columns selected in countersQuery
trackIds := tracksColumns[0].GetLongValues()
names := tracksColumns[1].GetStringValues()
units := tracksColumns[2].GetStringValues()
descriptions := tracksColumns[3].GetStringValues()
nameToSpec := map[string]*device.GpuCounterDescriptor_GpuCounterSpec{}
if desc != nil {
for _, spec := range desc.Specs {
nameToSpec[spec.Name] = spec
}
}
for i := uint64(0); i < numTracksRows; i++ {
countersQuery := fmt.Sprintf(countersQueryFmt, trackIds[i])
countersQueryResult, err := processor.Query(countersQuery)
countersColumns := countersQueryResult.GetColumns()
if err != nil {
return nil, log.Errf(ctx, err, "SQL query failed: %v", counterTracksQuery)
}
timestampsLong := countersColumns[0].GetLongValues()
timestamps := make([]uint64, len(timestampsLong))
for i, t := range timestampsLong {
timestamps[i] = uint64(t)
}
values := countersColumns[1].GetDoubleValues()
spec, _ := nameToSpec[names[i]]
// TODO(apbodnar) Populate the `default` field once the trace processor supports it (b/147432390)
counters[i] = &service.ProfilingData_Counter{
Id: uint32(trackIds[i]),
Name: names[i],
Unit: units[i],
Description: descriptions[i],
Spec: spec,
Timestamps: timestamps,
Values: values,
}
}
return counters, nil
}
| extractTraceHandles | identifier_name |
profiling_data.go | // Copyright (C) 2019 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adreno
import (
"context"
"fmt"
"github.com/google/gapid/core/log"
"github.com/google/gapid/core/os/device"
"github.com/google/gapid/gapis/api"
"github.com/google/gapid/gapis/api/sync"
"github.com/google/gapid/gapis/perfetto"
perfetto_service "github.com/google/gapid/gapis/perfetto/service"
"github.com/google/gapid/gapis/service"
"github.com/google/gapid/gapis/service/path"
"github.com/google/gapid/gapis/trace/android/profile"
"github.com/google/gapid/gapis/trace/android/utils"
)
var (
slicesQuery = "" +
"SELECT s.context_id, s.render_target, s.frame_id, s.submission_id, s.hw_queue_id, s.command_buffer, s.render_pass, s.ts, s.dur, s.id, s.name, depth, arg_set_id, track_id, t.name " +
"FROM gpu_track t LEFT JOIN gpu_slice s " +
"ON s.track_id = t.id WHERE t.scope = 'gpu_render_stage' ORDER BY s.ts"
argsQueryFmt = "" +
"SELECT key, string_value FROM args WHERE args.arg_set_id = %d"
queueSubmitQuery = "" +
"SELECT submission_id FROM gpu_slice s JOIN track t ON s.track_id = t.id WHERE s.name = 'vkQueueSubmit' AND t.name = 'Vulkan Events' ORDER BY submission_id"
counterTracksQuery = "" +
"SELECT id, name, unit, description FROM gpu_counter_track ORDER BY id"
countersQueryFmt = "" +
"SELECT ts, value FROM counter c WHERE c.track_id = %d ORDER BY ts"
renderPassSliceName = "Surface"
)
func ProcessProfilingData(ctx context.Context, processor *perfetto.Processor, capture *path.Capture, desc *device.GpuCounterDescriptor, handleMapping *map[uint64][]service.VulkanHandleMappingItem, syncData *sync.Data) (*service.ProfilingData, error) {
slices, err := processGpuSlices(ctx, processor, capture, handleMapping, syncData)
if err != nil {
log.Err(ctx, err, "Failed to get GPU slices")
}
counters, err := processCounters(ctx, processor, desc)
if err != nil {
log.Err(ctx, err, "Failed to get GPU counters")
}
gpuCounters, err := profile.ComputeCounters(ctx, slices, counters)
if err != nil {
log.Err(ctx, err, "Failed to calculate performance data based on GPU slices and counters")
}
return &service.ProfilingData{
Slices: slices,
Counters: counters,
GpuCounters: gpuCounters,
}, nil
}
func extractTraceHandles(ctx context.Context, replayHandles *[]int64, replayHandleType string, handleMapping *map[uint64][]service.VulkanHandleMappingItem) {
for i, v := range *replayHandles {
handles, ok := (*handleMapping)[uint64(v)]
if !ok {
log.E(ctx, "%v not found in replay: %v", replayHandleType, v)
continue
}
found := false
for _, handle := range handles |
if !found {
log.E(ctx, "Incorrect Handle type for %v: %v", replayHandleType, v)
}
}
}
func fixContextIds(contextIDs []int64) {
// This is a workaround a QC bug(b/192546534)
// that causes first deviceID to be zero after a
// renderpass change in the same queue submit.
// So, we fill the zero devices with the existing
// device id, where there is only one device id.
zeroIndices := make([]int, 0)
contextID := int64(0)
for i, v := range contextIDs {
if v == 0 {
zeroIndices = append(zeroIndices, i)
continue
}
if contextID == 0 {
contextID = v
continue
}
if contextID != v {
// There are multiple devices
// We cannot know which one to fill
return
}
}
for _, v := range zeroIndices {
// If there is only one device in entire trace
// We can assume that we possibly have only one device
contextIDs[v] = contextID
}
}
func processGpuSlices(ctx context.Context, processor *perfetto.Processor, capture *path.Capture, handleMapping *map[uint64][]service.VulkanHandleMappingItem, syncData *sync.Data) (*service.ProfilingData_GpuSlices, error) {
slicesQueryResult, err := processor.Query(slicesQuery)
if err != nil {
return nil, log.Errf(ctx, err, "SQL query failed: %v", slicesQuery)
}
queueSubmitQueryResult, err := processor.Query(queueSubmitQuery)
if err != nil {
return nil, log.Errf(ctx, err, "SQL query failed: %v", queueSubmitQuery)
}
queueSubmitColumns := queueSubmitQueryResult.GetColumns()
queueSubmitIds := queueSubmitColumns[0].GetLongValues()
submissionOrdering := make(map[int64]uint64)
for i, v := range queueSubmitIds {
submissionOrdering[v] = uint64(i)
}
trackIdCache := make(map[int64]bool)
argsQueryCache := make(map[int64]*perfetto_service.QueryResult)
slicesColumns := slicesQueryResult.GetColumns()
numSliceRows := slicesQueryResult.GetNumRecords()
slices := make([]*service.ProfilingData_GpuSlices_Slice, numSliceRows)
groupParentLookup := map[api.CmdSubmissionKey]*service.ProfilingData_GpuSlices_Group{}
groups := []*service.ProfilingData_GpuSlices_Group{}
groupIds := make([]int32, numSliceRows)
var tracks []*service.ProfilingData_GpuSlices_Track
// Grab all the column values. Depends on the order of columns selected in slicesQuery
contextIds := slicesColumns[0].GetLongValues()
fixContextIds(contextIds)
extractTraceHandles(ctx, &contextIds, "VkDevice", handleMapping)
renderTargets := slicesColumns[1].GetLongValues()
extractTraceHandles(ctx, &renderTargets, "VkFramebuffer", handleMapping)
commandBuffers := slicesColumns[5].GetLongValues()
extractTraceHandles(ctx, &commandBuffers, "VkCommandBuffer", handleMapping)
renderPasses := slicesColumns[6].GetLongValues()
extractTraceHandles(ctx, &renderPasses, "VkRenderPass", handleMapping)
frameIds := slicesColumns[2].GetLongValues()
submissionIds := slicesColumns[3].GetLongValues()
hwQueueIds := slicesColumns[4].GetLongValues()
timestamps := slicesColumns[7].GetLongValues()
durations := slicesColumns[8].GetLongValues()
ids := slicesColumns[9].GetLongValues()
names := slicesColumns[10].GetStringValues()
depths := slicesColumns[11].GetLongValues()
argSetIds := slicesColumns[12].GetLongValues()
trackIds := slicesColumns[13].GetLongValues()
trackNames := slicesColumns[14].GetStringValues()
subCommandGroupMap := make(map[api.CmdSubmissionKey]int)
for i, v := range submissionIds {
subOrder, ok := submissionOrdering[v]
if ok {
cb := uint64(commandBuffers[i])
key := api.CmdSubmissionKey{subOrder, cb, uint64(renderPasses[i]), uint64(renderTargets[i])}
// Create a new group for each main renderPass slice.
if indices, ok := syncData.SubmissionIndices[key]; ok && names[i] == renderPassSliceName {
var idx []uint64
if c, ok := subCommandGroupMap[key]; ok { // Sometimes multiple renderPass slices shares the same renderPass and renderTarget.
idx = indices[c]
} else {
idx = indices[0]
subCommandGroupMap[key] = 0
}
names[i] = fmt.Sprintf("%v", idx)
parent := utils.FindParentGroup(ctx, subOrder, cb, groupParentLookup, &groups, syncData.SubmissionIndices, capture)
group := &service.ProfilingData_GpuSlices_Group{
Id: int32(len(groups)),
Name: fmt.Sprintf("RenderPass %v, RenderTarget %v", uint64(renderPasses[i]), uint64(renderTargets[i])),
Parent: parent,
Link: &path.Command{Capture: capture, Indices: idx},
}
groups = append(groups, group)
subCommandGroupMap[key]++
}
} else {
log.W(ctx, "Encountered submission ID mismatch %v", v)
}
// Find the group that the current slice belongs to and mark down group id.
if len(groups) > 0 {
groupIds[i] = groups[len(groups)-1].Id // Slices were time sorted and main renderPass slice comes first.
} else {
log.W(ctx, "Group missing for slice %v at submission %v, commandBuffer %v, renderPass %v, renderTarget %v", names[i], submissionIds[i], commandBuffers[i], renderPasses[i], renderTargets[i])
groupIds[i] = -1
}
}
for i := uint64(0); i < numSliceRows; i++ {
var argsQueryResult *perfetto_service.QueryResult
var ok bool
if argsQueryResult, ok = argsQueryCache[argSetIds[i]]; !ok {
argsQuery := fmt.Sprintf(argsQueryFmt, argSetIds[i])
argsQueryResult, err = processor.Query(argsQuery)
if err != nil {
log.W(ctx, "SQL query failed: %v", argsQuery)
}
argsQueryCache[argSetIds[i]] = argsQueryResult
}
argsColumns := argsQueryResult.GetColumns()
numArgsRows := argsQueryResult.GetNumRecords()
var extras []*service.ProfilingData_GpuSlices_Slice_Extra
for j := uint64(0); j < numArgsRows; j++ {
keys := argsColumns[0].GetStringValues()
values := argsColumns[1].GetStringValues()
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: keys[j],
Value: &service.ProfilingData_GpuSlices_Slice_Extra_StringValue{StringValue: values[j]},
})
}
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "contextId",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(contextIds[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "renderTarget",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(renderTargets[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "commandBuffer",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(commandBuffers[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "renderPass",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(renderPasses[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "frameId",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(frameIds[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "submissionId",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(submissionIds[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "hwQueueId",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(hwQueueIds[i])},
})
slices[i] = &service.ProfilingData_GpuSlices_Slice{
Ts: uint64(timestamps[i]),
Dur: uint64(durations[i]),
Id: uint64(ids[i]),
Label: names[i],
Depth: int32(depths[i]),
Extras: extras,
TrackId: int32(trackIds[i]),
GroupId: groupIds[i],
}
if _, ok := trackIdCache[trackIds[i]]; !ok {
trackIdCache[trackIds[i]] = true
tracks = append(tracks, &service.ProfilingData_GpuSlices_Track{
Id: int32(trackIds[i]),
Name: trackNames[i],
})
}
}
return &service.ProfilingData_GpuSlices{
Slices: slices,
Tracks: tracks,
Groups: groups,
}, nil
}
func processCounters(ctx context.Context, processor *perfetto.Processor, desc *device.GpuCounterDescriptor) ([]*service.ProfilingData_Counter, error) {
counterTracksQueryResult, err := processor.Query(counterTracksQuery)
if err != nil {
return nil, log.Errf(ctx, err, "SQL query failed: %v", counterTracksQuery)
}
// t.id, name, unit, description, ts, value
tracksColumns := counterTracksQueryResult.GetColumns()
numTracksRows := counterTracksQueryResult.GetNumRecords()
counters := make([]*service.ProfilingData_Counter, numTracksRows)
// Grab all the column values. Depends on the order of columns selected in countersQuery
trackIds := tracksColumns[0].GetLongValues()
names := tracksColumns[1].GetStringValues()
units := tracksColumns[2].GetStringValues()
descriptions := tracksColumns[3].GetStringValues()
nameToSpec := map[string]*device.GpuCounterDescriptor_GpuCounterSpec{}
if desc != nil {
for _, spec := range desc.Specs {
nameToSpec[spec.Name] = spec
}
}
for i := uint64(0); i < numTracksRows; i++ {
countersQuery := fmt.Sprintf(countersQueryFmt, trackIds[i])
countersQueryResult, err := processor.Query(countersQuery)
countersColumns := countersQueryResult.GetColumns()
if err != nil {
return nil, log.Errf(ctx, err, "SQL query failed: %v", counterTracksQuery)
}
timestampsLong := countersColumns[0].GetLongValues()
timestamps := make([]uint64, len(timestampsLong))
for i, t := range timestampsLong {
timestamps[i] = uint64(t)
}
values := countersColumns[1].GetDoubleValues()
spec, _ := nameToSpec[names[i]]
// TODO(apbodnar) Populate the `default` field once the trace processor supports it (b/147432390)
counters[i] = &service.ProfilingData_Counter{
Id: uint32(trackIds[i]),
Name: names[i],
Unit: units[i],
Description: descriptions[i],
Spec: spec,
Timestamps: timestamps,
Values: values,
}
}
return counters, nil
}
| {
if handle.HandleType == replayHandleType {
(*replayHandles)[i] = int64(handle.TraceValue)
found = true
break
}
} | conditional_block |
profiling_data.go | // Copyright (C) 2019 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adreno
import (
"context"
"fmt"
"github.com/google/gapid/core/log"
"github.com/google/gapid/core/os/device"
"github.com/google/gapid/gapis/api"
"github.com/google/gapid/gapis/api/sync" | "github.com/google/gapid/gapis/perfetto"
perfetto_service "github.com/google/gapid/gapis/perfetto/service"
"github.com/google/gapid/gapis/service"
"github.com/google/gapid/gapis/service/path"
"github.com/google/gapid/gapis/trace/android/profile"
"github.com/google/gapid/gapis/trace/android/utils"
)
var (
slicesQuery = "" +
"SELECT s.context_id, s.render_target, s.frame_id, s.submission_id, s.hw_queue_id, s.command_buffer, s.render_pass, s.ts, s.dur, s.id, s.name, depth, arg_set_id, track_id, t.name " +
"FROM gpu_track t LEFT JOIN gpu_slice s " +
"ON s.track_id = t.id WHERE t.scope = 'gpu_render_stage' ORDER BY s.ts"
argsQueryFmt = "" +
"SELECT key, string_value FROM args WHERE args.arg_set_id = %d"
queueSubmitQuery = "" +
"SELECT submission_id FROM gpu_slice s JOIN track t ON s.track_id = t.id WHERE s.name = 'vkQueueSubmit' AND t.name = 'Vulkan Events' ORDER BY submission_id"
counterTracksQuery = "" +
"SELECT id, name, unit, description FROM gpu_counter_track ORDER BY id"
countersQueryFmt = "" +
"SELECT ts, value FROM counter c WHERE c.track_id = %d ORDER BY ts"
renderPassSliceName = "Surface"
)
func ProcessProfilingData(ctx context.Context, processor *perfetto.Processor, capture *path.Capture, desc *device.GpuCounterDescriptor, handleMapping *map[uint64][]service.VulkanHandleMappingItem, syncData *sync.Data) (*service.ProfilingData, error) {
slices, err := processGpuSlices(ctx, processor, capture, handleMapping, syncData)
if err != nil {
log.Err(ctx, err, "Failed to get GPU slices")
}
counters, err := processCounters(ctx, processor, desc)
if err != nil {
log.Err(ctx, err, "Failed to get GPU counters")
}
gpuCounters, err := profile.ComputeCounters(ctx, slices, counters)
if err != nil {
log.Err(ctx, err, "Failed to calculate performance data based on GPU slices and counters")
}
return &service.ProfilingData{
Slices: slices,
Counters: counters,
GpuCounters: gpuCounters,
}, nil
}
func extractTraceHandles(ctx context.Context, replayHandles *[]int64, replayHandleType string, handleMapping *map[uint64][]service.VulkanHandleMappingItem) {
for i, v := range *replayHandles {
handles, ok := (*handleMapping)[uint64(v)]
if !ok {
log.E(ctx, "%v not found in replay: %v", replayHandleType, v)
continue
}
found := false
for _, handle := range handles {
if handle.HandleType == replayHandleType {
(*replayHandles)[i] = int64(handle.TraceValue)
found = true
break
}
}
if !found {
log.E(ctx, "Incorrect Handle type for %v: %v", replayHandleType, v)
}
}
}
func fixContextIds(contextIDs []int64) {
// This is a workaround a QC bug(b/192546534)
// that causes first deviceID to be zero after a
// renderpass change in the same queue submit.
// So, we fill the zero devices with the existing
// device id, where there is only one device id.
zeroIndices := make([]int, 0)
contextID := int64(0)
for i, v := range contextIDs {
if v == 0 {
zeroIndices = append(zeroIndices, i)
continue
}
if contextID == 0 {
contextID = v
continue
}
if contextID != v {
// There are multiple devices
// We cannot know which one to fill
return
}
}
for _, v := range zeroIndices {
// If there is only one device in entire trace
// We can assume that we possibly have only one device
contextIDs[v] = contextID
}
}
func processGpuSlices(ctx context.Context, processor *perfetto.Processor, capture *path.Capture, handleMapping *map[uint64][]service.VulkanHandleMappingItem, syncData *sync.Data) (*service.ProfilingData_GpuSlices, error) {
slicesQueryResult, err := processor.Query(slicesQuery)
if err != nil {
return nil, log.Errf(ctx, err, "SQL query failed: %v", slicesQuery)
}
queueSubmitQueryResult, err := processor.Query(queueSubmitQuery)
if err != nil {
return nil, log.Errf(ctx, err, "SQL query failed: %v", queueSubmitQuery)
}
queueSubmitColumns := queueSubmitQueryResult.GetColumns()
queueSubmitIds := queueSubmitColumns[0].GetLongValues()
submissionOrdering := make(map[int64]uint64)
for i, v := range queueSubmitIds {
submissionOrdering[v] = uint64(i)
}
trackIdCache := make(map[int64]bool)
argsQueryCache := make(map[int64]*perfetto_service.QueryResult)
slicesColumns := slicesQueryResult.GetColumns()
numSliceRows := slicesQueryResult.GetNumRecords()
slices := make([]*service.ProfilingData_GpuSlices_Slice, numSliceRows)
groupParentLookup := map[api.CmdSubmissionKey]*service.ProfilingData_GpuSlices_Group{}
groups := []*service.ProfilingData_GpuSlices_Group{}
groupIds := make([]int32, numSliceRows)
var tracks []*service.ProfilingData_GpuSlices_Track
// Grab all the column values. Depends on the order of columns selected in slicesQuery
contextIds := slicesColumns[0].GetLongValues()
fixContextIds(contextIds)
extractTraceHandles(ctx, &contextIds, "VkDevice", handleMapping)
renderTargets := slicesColumns[1].GetLongValues()
extractTraceHandles(ctx, &renderTargets, "VkFramebuffer", handleMapping)
commandBuffers := slicesColumns[5].GetLongValues()
extractTraceHandles(ctx, &commandBuffers, "VkCommandBuffer", handleMapping)
renderPasses := slicesColumns[6].GetLongValues()
extractTraceHandles(ctx, &renderPasses, "VkRenderPass", handleMapping)
frameIds := slicesColumns[2].GetLongValues()
submissionIds := slicesColumns[3].GetLongValues()
hwQueueIds := slicesColumns[4].GetLongValues()
timestamps := slicesColumns[7].GetLongValues()
durations := slicesColumns[8].GetLongValues()
ids := slicesColumns[9].GetLongValues()
names := slicesColumns[10].GetStringValues()
depths := slicesColumns[11].GetLongValues()
argSetIds := slicesColumns[12].GetLongValues()
trackIds := slicesColumns[13].GetLongValues()
trackNames := slicesColumns[14].GetStringValues()
subCommandGroupMap := make(map[api.CmdSubmissionKey]int)
for i, v := range submissionIds {
subOrder, ok := submissionOrdering[v]
if ok {
cb := uint64(commandBuffers[i])
key := api.CmdSubmissionKey{subOrder, cb, uint64(renderPasses[i]), uint64(renderTargets[i])}
// Create a new group for each main renderPass slice.
if indices, ok := syncData.SubmissionIndices[key]; ok && names[i] == renderPassSliceName {
var idx []uint64
if c, ok := subCommandGroupMap[key]; ok { // Sometimes multiple renderPass slices shares the same renderPass and renderTarget.
idx = indices[c]
} else {
idx = indices[0]
subCommandGroupMap[key] = 0
}
names[i] = fmt.Sprintf("%v", idx)
parent := utils.FindParentGroup(ctx, subOrder, cb, groupParentLookup, &groups, syncData.SubmissionIndices, capture)
group := &service.ProfilingData_GpuSlices_Group{
Id: int32(len(groups)),
Name: fmt.Sprintf("RenderPass %v, RenderTarget %v", uint64(renderPasses[i]), uint64(renderTargets[i])),
Parent: parent,
Link: &path.Command{Capture: capture, Indices: idx},
}
groups = append(groups, group)
subCommandGroupMap[key]++
}
} else {
log.W(ctx, "Encountered submission ID mismatch %v", v)
}
// Find the group that the current slice belongs to and mark down group id.
if len(groups) > 0 {
groupIds[i] = groups[len(groups)-1].Id // Slices were time sorted and main renderPass slice comes first.
} else {
log.W(ctx, "Group missing for slice %v at submission %v, commandBuffer %v, renderPass %v, renderTarget %v", names[i], submissionIds[i], commandBuffers[i], renderPasses[i], renderTargets[i])
groupIds[i] = -1
}
}
for i := uint64(0); i < numSliceRows; i++ {
var argsQueryResult *perfetto_service.QueryResult
var ok bool
if argsQueryResult, ok = argsQueryCache[argSetIds[i]]; !ok {
argsQuery := fmt.Sprintf(argsQueryFmt, argSetIds[i])
argsQueryResult, err = processor.Query(argsQuery)
if err != nil {
log.W(ctx, "SQL query failed: %v", argsQuery)
}
argsQueryCache[argSetIds[i]] = argsQueryResult
}
argsColumns := argsQueryResult.GetColumns()
numArgsRows := argsQueryResult.GetNumRecords()
var extras []*service.ProfilingData_GpuSlices_Slice_Extra
for j := uint64(0); j < numArgsRows; j++ {
keys := argsColumns[0].GetStringValues()
values := argsColumns[1].GetStringValues()
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: keys[j],
Value: &service.ProfilingData_GpuSlices_Slice_Extra_StringValue{StringValue: values[j]},
})
}
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "contextId",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(contextIds[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "renderTarget",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(renderTargets[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "commandBuffer",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(commandBuffers[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "renderPass",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(renderPasses[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "frameId",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(frameIds[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "submissionId",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(submissionIds[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "hwQueueId",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(hwQueueIds[i])},
})
slices[i] = &service.ProfilingData_GpuSlices_Slice{
Ts: uint64(timestamps[i]),
Dur: uint64(durations[i]),
Id: uint64(ids[i]),
Label: names[i],
Depth: int32(depths[i]),
Extras: extras,
TrackId: int32(trackIds[i]),
GroupId: groupIds[i],
}
if _, ok := trackIdCache[trackIds[i]]; !ok {
trackIdCache[trackIds[i]] = true
tracks = append(tracks, &service.ProfilingData_GpuSlices_Track{
Id: int32(trackIds[i]),
Name: trackNames[i],
})
}
}
return &service.ProfilingData_GpuSlices{
Slices: slices,
Tracks: tracks,
Groups: groups,
}, nil
}
func processCounters(ctx context.Context, processor *perfetto.Processor, desc *device.GpuCounterDescriptor) ([]*service.ProfilingData_Counter, error) {
counterTracksQueryResult, err := processor.Query(counterTracksQuery)
if err != nil {
return nil, log.Errf(ctx, err, "SQL query failed: %v", counterTracksQuery)
}
// t.id, name, unit, description, ts, value
tracksColumns := counterTracksQueryResult.GetColumns()
numTracksRows := counterTracksQueryResult.GetNumRecords()
counters := make([]*service.ProfilingData_Counter, numTracksRows)
// Grab all the column values. Depends on the order of columns selected in countersQuery
trackIds := tracksColumns[0].GetLongValues()
names := tracksColumns[1].GetStringValues()
units := tracksColumns[2].GetStringValues()
descriptions := tracksColumns[3].GetStringValues()
nameToSpec := map[string]*device.GpuCounterDescriptor_GpuCounterSpec{}
if desc != nil {
for _, spec := range desc.Specs {
nameToSpec[spec.Name] = spec
}
}
for i := uint64(0); i < numTracksRows; i++ {
countersQuery := fmt.Sprintf(countersQueryFmt, trackIds[i])
countersQueryResult, err := processor.Query(countersQuery)
countersColumns := countersQueryResult.GetColumns()
if err != nil {
return nil, log.Errf(ctx, err, "SQL query failed: %v", counterTracksQuery)
}
timestampsLong := countersColumns[0].GetLongValues()
timestamps := make([]uint64, len(timestampsLong))
for i, t := range timestampsLong {
timestamps[i] = uint64(t)
}
values := countersColumns[1].GetDoubleValues()
spec, _ := nameToSpec[names[i]]
// TODO(apbodnar) Populate the `default` field once the trace processor supports it (b/147432390)
counters[i] = &service.ProfilingData_Counter{
Id: uint32(trackIds[i]),
Name: names[i],
Unit: units[i],
Description: descriptions[i],
Spec: spec,
Timestamps: timestamps,
Values: values,
}
}
return counters, nil
} | random_line_split |
|
config.js | ๏ปฟ/*global define */
/*jslint browser:true,sloppy:true */
/*
| Copyright 2014 Esri
|
| Licensed under the Apache License, Version 2.0 (the "License");
| you may not use this file except in compliance with the License.
| You may obtain a copy of the License at
|
| http://www.apache.org/licenses/LICENSE-2.0
|
| Unless required by applicable law or agreed to in writing, software
| distributed under the License is distributed on an "AS IS" BASIS,
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
| See the License for the specific language governing permissions and
| limitations under the License.
*/
define([], function () {
return {
// This file contains various configuration settings for esri template
//
// Use this file to perform the following:
//
// 1. Customize application settings here - [ Tag(s) to look for: ApplicationSettings ]
// 2. Specify header widget settings - [ Tag(s) to look for: AppHeaderWidgets ]
// 3. Specify URLs for base maps - [ Tag(s) to look for: BaseMapLayers ]
// 4. Customize address search settings - [ Tag(s) to look for: LocatorSettings]
//------------------------------------------------------------------------------------------------------------------------
// GENERAL SETTINGS
//------------------------------------------------------------------------------------------------------------------------
// group: Set the Group id for the application
// appid: ID of application on ArcGIS.com containing your settings for this template
// applicationName: Set application title
// applicationIcon: Set application icon path
// applicationFavicon: Set application Favicon path
// customLogoUrl: Set custom map logo path
// itemSearchDefaultValue: Set the default value to search
// theme: Set the application theme. If blank, default blue theme will be loaded. Supported theme keys are blueTheme, greenTheme and redTheme.
// showCategoriesTagCloud: Set this variable to enable or disable categories tag cloud
// showGeographiesTagCloud: Set this variable to enable or disable geographies tag cloud
// geographiesTagText: This identifies the tag for geographies tag cloud. If set to blank,
// geographies tag cloud will not be displayed irrespective of the value for showGeographiesTagCloud.
// geographiesPrefixText: Set this variable to trim prefix text (eg. arcgis.) from geographies tag cloud. If set to blank,
// geographies tag cloud will be displayed as is. Case sensitive.
// enableAutoComplete: Set this variable to enable or disable autocomplete on item search
// tagCloudFontMinValue: Set min value of the tag cloud font,
// tagCloudFontMaxValue: set the max value of the tag cloud font,
// tagCloudFontUnits: Set the units for the text in tag cloud. UI will be distorted if font sizes have inappropriate values
// showMaxTopTags: Set this variable to the maximum number of results to be displayed in geographies and categories tag clouds
// displaySharingAttribute: If set to true, display sharing attributes ("ALL", "GRP" or "ORG").
// If set to false, sharing attributes ("ALL", "GRP" or "ORG") should not be displayed in item thumbnail
// useItemPage: If set to true then display Item Info Page
// If set to false and item is of type webmap then load the Item
// If set to false and item is of type other than webmap then download the Item
// portalURL: Set the portal URL
// geometryService: Set the URL for geometry service
// groupDescription: Displayed on the left panel of the index page. Defaults to group description.
// mapTitle: If not specified, the ArcGIS.com map's title is used.
// mapSnippet: If not specified, the ArcGIS.com web map's summary is used
// mapItemDescription: Displayed on item details page. Defaults to map description.
// mapLicenseInfo: Displayed on item details page. Defaults to map licenseInfo.
// defaultLayout: Default layout to use. "grid" or "list".
// sortField: Order to display the group items. Valid fields are: modified, numViews.
// sortOrder: Order to sort the group: "asc" or "desc".
// mapViewer: URL to open the gallery items to. "simple","arcgis".
// searchString: Performs a default search on the group with the set string.
// searchType: Performs a default search on the group for the specified item type. Valid fields are valid item types, eg. web map, feature service, map service, etc.
// showBasemapGallery: Show basemap gallery on map: true or false.
// showMapSearch: Show textbox for address search on map: true or false
// showOverviewMap: Show overview on map: true or false.
// showMoreInfo: Show more info link on item details page: true or false.
// showRatings: Show ratings of items on item details page.
// showViews: Show ratings of items on item details page.
// showLicenseInfo: Show Use Constraints on item details page.
// showAttribution: Show sources on item details page.
// showComments: Show comments on item details page.
// defaultLocatorSymbol: Set the image path for locator symbol. e.g. pushpin.
// markupSymbolWidth: Set the image width in pixels for locator symbol.
// markupSymbolHeight: Set the image height in pixels for locator symbol.
// zoomLevel: Following zoom level will be set for the map upon searching an address
// locatorDefaultAddress: Set the default address to search.
ApplicationSettings: {
group: "801cffe54b004008a8c316469c1e8326",
appid: "",
applicationName: "Map Gallery",
applicationIcon: "/themes/images/logo.png",
applicationFavicon: "/themes/images/favicon.ico",
customLogoUrl: "",
itemSearchDefaultValue: "Web Map",
theme: "",
showCategoriesTagCloud: true,
showGeographiesTagCloud: true,
geographiesTagText: "arcgis.",
geographiesPrefixText: "",
enableAutoComplete: true,
tagCloudFontMinValue: 15,
tagCloudFontMaxValue: 20,
tagCloudFontUnits: "px",
showMaxTopTags: 10,
displaySharingAttribute: false,
useItemPage: false,
portalURL: "http://www.arcgis.com",
geometryService: "http://tasks.arcgisonline.com/ArcGIS/rest/services/Geometry/GeometryServer",
groupDescription: "",
mapTitle: "",
mapSnippet: "",
mapItemDescription: "",
mapLicenseInfo: "",
defaultLayout: "list",
sortField: "numViews",
sortOrder: "desc",
mapViewer: "",
searchString: "",
searchType: "",
showBasemapGallery: true,
showMapSearch: true,
showOverviewMap: false,
showMoreInfo: true,
showRatings: true,
showViews: true,
showLicenseInfo: true,
showAttribution: false,
showComments: true,
defaultLocatorSymbol: "/themes/images/redpushpin.png",
markupSymbolWidth: 35,
markupSymbolHeight: 35,
zoomLevel: 12,
locatorDefaultAddress: "Lake Echo Rd Tracy City TN 37387"
},
//------------------------------------------------------------------------------------------------------------------------
// Header Widget Settings
//------------------------------------------------------------------------------------------------------------------------
// Set widgets settings such as widget title, widgetPath to be displayed in header panel
// Title: Name of the widget, will displayed as title of widget in header panel
// WidgetPath: path of the widget respective to the widgets package.
AppHeaderWidgets: [{
Title: "Settings",
WidgetPath: "widgets/settings/settings"
}, {
Title: "Item Search",
WidgetPath: "widgets/locator/locator"
}, { | }, {
Title: "Layout",
WidgetPath: "widgets/layout/layout"
}, {
Title: "Sign In",
WidgetPath: "widgets/portalSignin/portalSignin"
}],
// ------------------------------------------------------------------------------------------------------------------------
// BASEMAP SETTINGS
// ------------------------------------------------------------------------------------------------------------------------
// Set baseMap layers
// Please note: All base-maps need to use the same spatial reference. By default, the first base-map will be loaded
BaseMapLayers: [{
Key: "topo",
ThumbnailSource: "themes/images/Topographic.jpg",
Name: "Topographic Map",
MapURL: "http://services.arcgisonline.com/ArcGIS/rest/services/World_Topo_Map/MapServer"
}, {
Key: "streets",
ThumbnailSource: "themes/images/streets.png",
Name: "Street Map",
MapURL: "http://services.arcgisonline.com/ArcGIS/rest/services/World_Street_Map/MapServer"
}, {
Key: "imagery",
ThumbnailSource: "themes/images/imagery.png",
Name: "Imagery Map",
MapURL: "http://services.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer"
}],
// ------------------------------------------------------------------------------------------------------------------------
// ADDRESS SEARCH SETTINGS
// ------------------------------------------------------------------------------------------------------------------------
// Set locator settings such as locator display fields, match score
// LocatorParameters: Required parameters to search the address candidates.
// SearchField: The name of geocode service input field that accepts the search address. e.g. 'SingleLine' or 'Address'.
// SearchBoundaryField: The name of geocode service input field that accepts an extent to search an input address within. e.g."searchExtent".
// LocatorURL: Specify URL for geocode service.
// LocatorOutFields: The list of outfields to be included in the result set provided by geocode service.
// DisplayField: Specify the outfield of geocode service. The value in this field will be displayed for search results in the application.
// AddressMatchScore: Required parameters to specify the accuracy of address match.
// Field: Set the outfield of geocode service that contains the Address Match Score.
// Value: Set the minimum score value for filtering the candidate results. The value should a number between 0-100.
// FilterFieldName,FilterFieldValues: Candidates based on which the address search will be performed.
// FilterFieldName: Set the outfield that contains the match level for geocode request. e.g. For World GeoCode, the field that contains the match level is 'Addr_type'.
// FilterFieldValues: Specify the desired match levels to filter address search results. e.g. 'StreetAddress', 'StreetName' etc.
LocatorSettings: {
LocatorParameters: {
SearchField: "SingleLine",
SearchBoundaryField: "searchExtent"
},
LocatorURL: "http://geocode.arcgis.com/arcgis/rest/services/World/GeocodeServer",
LocatorOutFields: ["Addr_Type", "Type", "Score", "Match_Addr", "xmin", "xmax", "ymin", "ymax"],
DisplayField: "${Match_Addr}",
AddressMatchScore: {
Field: "Score",
Value: 80
},
FilterFieldName: 'Addr_Type',
FilterFieldValues: ["StreetAddress", "StreetName", "PointAddress", "POI"]
}
};
}); | Title: "Info",
WidgetPath: "widgets/info/info"
}, {
Title: "Sort By",
WidgetPath: "widgets/sortby/sortby" | random_line_split |
manager.go | package manager
import (
"fmt"
"log"
"net"
"os"
"reflect"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"github.com/coreos/etcd/client"
"github.com/samuelngs/axis/health"
"github.com/samuelngs/axis/launcher"
"github.com/samuelngs/axis/models"
"github.com/samuelngs/axis/pkg/network"
)
type (
// Client - the etcd client
Client struct {
// lock
sync.RWMutex
// client props
endpoints []string
events chan *models.Event
cancel chan struct{}
client client.KeysAPI
// service address and directory
address string
dir *models.Directory
// service state
running bool
started bool
locked bool
// election state
leader *models.Leader
}
)
var (
// ServiceTTL - a period of time after-which the defined service node
// will be expired and removed from the etcd cluster
ServiceTTL = time.Second * 10
)
const (
// DirectoryElection - the path of the election
DirectoryElection string = "election"
// DirectoryMasters - the path of the masters
DirectoryMasters = "masters"
// DirectoryNodes - the path of the nodes
DirectoryNodes = "nodes"
// DirectoryRunning - the list of running nodes
DirectoryRunning = "running"
// DirectoryQueue - the list of starting nodes in queue
DirectoryQueue = "queue"
// EventElected - the leader election is completed
EventElected string = "elected"
// EventReElected - the leader election is completed
EventReElected string = "re-elected"
// EventElection - the leader election is started
EventElection = "election"
// EventReady - the service is ready to run
EventReady = "ready"
// EventWait - the election wait lock
EventWait = "wait"
// GroupLeader - the leader node
GroupLeader string = "leader"
// GroupWorker - the worker node
GroupWorker = "worker"
)
// NewClient - create a etcd client instance
func NewClient(endpoints ...[]string) *Client {
client := &Client{
events: make(chan *models.Event),
cancel: make(chan struct{}),
}
for _, v := range endpoints {
client.endpoints = v
}
client.address = client.GetServiceIP()
return client
}
// Events - the etcd events
func (c *Client) Events() chan *models.Event {
return c.events
}
// Leader - the leader node
func (c *Client) Leader() *models.Leader {
var leader *models.Leader
c.RLock()
leader = c.leader
c.RUnlock()
return leader
}
// SetupDirectory - setup directory for service
func (c *Client) SetupDirectory() {
v := reflect.ValueOf(c.dir)
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
if v.Kind() != reflect.Struct {
log.Fatal("only accepts structs")
}
for i := 0; i < v.NumField(); i++ {
key := v.Field(i).String()
c.client.Set(context.Background(), key, "", &client.SetOptions{
Dir: true,
PrevExist: client.PrevNoExist,
})
}
}
// SetDir - set discovery directory
func (c *Client) SetDir(prefix, name string) {
c.Lock()
c.dir = &models.Directory{
Base: fmt.Sprintf("%v/%v", prefix, name),
Election: fmt.Sprintf("%v/%v/%v", prefix, name, DirectoryElection),
Running: fmt.Sprintf("%v/%v/%v", prefix, name, DirectoryRunning),
Queue: fmt.Sprintf("%v/%v/%v", prefix, name, DirectoryQueue),
Nodes: fmt.Sprintf("%v/%v/%v", prefix, name, DirectoryNodes),
Masters: fmt.Sprintf("%v/%v/%v", prefix, name, DirectoryMasters),
}
c.Unlock()
}
// Observe - observe directory
func (c *Client) Observe() {
// register service
c.SetupDirectory()
c.RegisterNode(c.dir.Node(c.address))
c.RegisterNode(c.dir.QueueNode(c.address))
c.RegisterNode(c.dir.ElectionNode(c.address))
// create a interval timer to monitor service nodes
interval := time.NewTicker(ServiceTTL / 2)
defer interval.Stop()
for {
select {
case <-interval.C:
go func() {
// read running state
c.RLock()
var running = c.running
c.RUnlock()
// renew nodes
c.RenewNode(c.dir.Node(c.address))
c.RenewNode(c.dir.ElectionNode(c.address))
if running {
c.RenewNode(c.dir.RunningNode(c.address))
if c.IsLeader() {
c.RenewNode(c.dir.MasterNode(c.address))
}
} else {
c.RenewNode(c.dir.QueueNode(c.address))
}
c.LeaderDiscovery()
}()
}
}
}
// Election - to start leader election task
func (c *Client) Election() {
defer func() {
// recover if panic
if r := recover(); r != nil {
c.Election()
}
}()
// determine if context is already cancelled
isCancelled := false
// create context with cancel
ctx, cancel := context.WithCancel(context.Background())
defer func() {
if !isCancelled {
cancel()
isCancelled = true
}
}()
// generate election key
key := c.dir.ElectionNode(c.address)
// create election directory if it does not exist
c.client.Set(ctx, key, c.address, &client.SetOptions{
Dir: false,
TTL: ServiceTTL,
})
// create watcher
watcher := c.client.Watcher(c.dir.Election, &client.WatcherOptions{
AfterIndex: 0,
Recursive: true,
})
go func() {
for {
select {
case <-c.cancel:
if !isCancelled {
cancel()
isCancelled = true
}
return
}
}
}()
// observe election changes
for {
resp, err := watcher.Next(ctx)
if err != nil {
panic(err)
}
if resp.Node.Dir {
continue
}
if c.Leader() == nil {
continue
}
switch resp.Action {
case "set", "update":
case "delete":
if leader := c.Leader(); leader.Key == resp.Node.Key {
c.events <- &models.Event{Type: EventElection, Group: GroupWorker}
go c.LeaderDiscovery()
}
}
}
}
// RegisterNode - register node to etcd
func (c *Client) RegisterNode(dir string) {
c.client.Set(context.Background(), dir, c.address, &client.SetOptions{
Dir: false,
TTL: ServiceTTL,
})
}
// UnsetNode - unregister node and extend ttl
func (c *Client) UnsetNode(dir string) {
c.client.Delete(context.Background(), dir, nil)
}
// RenewNode - renew node and extend ttl
func (c *Client) RenewNode(dir string) {
c.client.Set(context.Background(), dir, c.address, &client.SetOptions{
PrevExist: client.PrevExist,
TTL: ServiceTTL,
})
}
// RunApplication - run application
func (c *Client) RunApplication(entrypoint *models.ApplicationEntryPoint) {
c.RLock()
if c.started {
return
}
c.RUnlock()
c.Lock()
c.started = true
c.Unlock()
receive := make(chan string)
// generate scope
scope := c.GenerateScope()
// launcher start daemon
go launcher.Start(scope, entrypoint)
// health check
if entrypoint.Health != nil && entrypoint.Health.Ports != nil {
go health.Check(receive, entrypoint.Health.Ports...)
}
for {
select {
case event := <-receive:
switch event {
case health.Pass:
c.Lock()
if !c.running {
fmt.Println("service is now running")
}
c.running = true
c.Unlock()
c.RegisterNode(c.dir.RunningNode(c.address))
if c.IsLeader() {
c.RegisterNode(c.dir.MasterNode(c.address))
}
case health.Fail:
c.Lock()
if c.running {
fmt.Println("service is now stopped")
}
c.running = false
c.Unlock()
c.UnsetNode(c.dir.RunningNode(c.address))
}
}
}
}
// IsLeader - is current node a leader
func (c *Client) IsLeader() bool {
// self node key
self := c.dir.ElectionNode(c.address)
if c.leader != nil && c.leader.Key == self {
return true
}
return false
}
// LeaderDiscovery - get leader/master node information
func (c *Client) LeaderDiscovery() {
dir := c.dir.Election
// self node key
self := fmt.Sprintf("%v/%v", dir, c.address)
// get a list of election nodes
resp, err := c.client.Get(context.Background(), dir, &client.GetOptions{Sort: true})
if err != nil {
log.Fatal(err)
}
// leader key and address
var key, addr string
// current lowest node index
var idx uint64
if len(resp.Node.Nodes) > 0 {
for _, v := range resp.Node.Nodes {
if v.Dir {
continue
}
if idx == 0 || v.CreatedIndex < idx {
key = v.Key
addr = v.Value
idx = v.CreatedIndex
}
}
}
if key == "" || addr == "" {
fmt.Println("# no nodes were found")
c.Lock()
c.leader = nil
c.Unlock()
} else {
leader := &models.Leader{Key: key, Address: addr}
if c.leader == nil {
if leader.Key == self {
fmt.Println("# elected as leader")
c.events <- &models.Event{Type: EventElected, Group: GroupLeader}
} else {
fmt.Println("# elected as worker")
// do not send any event until leader node is ready
if nodes := c.GetRunningNodes(); len(nodes) > 0 {
c.events <- &models.Event{Type: EventElected, Group: GroupWorker}
} else {
go c.WaitForLeader()
}
}
} else if c.leader != nil && leader.Key != c.leader.Key {
if leader.Key == self {
fmt.Println("# re-elected as leader")
c.events <- &models.Event{Type: EventReElected, Group: GroupLeader}
}
}
c.Lock()
c.leader = leader
c.Unlock()
}
}
// WaitForLeader - wait for leader node is ready
func (c *Client) WaitForLeader() {
defer func() {
c.Lock()
c.locked = false
c.Unlock()
}()
c.RLock()
var locked = c.locked
c.RUnlock()
if !locked {
fmt.Println("# waiting for leader node")
c.Lock()
c.locked = true
c.Unlock()
interval := time.NewTicker(ServiceTTL)
defer interval.Stop()
for {
select {
case <-interval.C:
if c.IsLeader() {
return
}
fmt.Println("# scanning running nodes...")
if nodes := c.GetRunningNodes(); len(nodes) > 0 {
c.events <- &models.Event{Type: EventElected, Group: GroupWorker}
} else {
fmt.Println("# no nodes are ready yet")
}
}
}
}
}
// GenerateScope - generate scope base
func (c *Client) GenerateScope() *models.Scope {
return models.SetupEnvironment(
c.GetServiceHostname(),
c.GetServiceIP(),
c.GetRunningNodes(),
)
}
// GetRunningNodes to get existed nodes
func (c *Client) GetRunningNodes() []models.Node {
dir := c.dir.Running
res := []models.Node{}
if c.client == nil {
return res
}
resp, err := c.client.Get(context.Background(), dir, nil)
if err != nil {
return res
}
if !resp.Node.Dir {
return res
}
for _, node := range resp.Node.Nodes {
res = append(res, models.Node(node.Value))
}
return res
}
// GetEnvEndPoint - to extract etcd endpoint environment from shell
func (c *Client) GetEnvEndPoint() string {
whitelist := []string{"ETCD_ENDPOINT", "ETCDCTL_ENDPOINT", "ETCD_HOST", "COREOS_PRIVATE_IPV4", "COREOS_PUBLIC_IPV4"}
for _, i := range whitelist {
if v := os.Getenv(i); v != "" {
return v
}
}
return ""
}
// GetEndPoint - to get endpoint from config, env or docker host
func (c *Client) GetEndPoint() []string {
for i := 0; i < 3; i++ {
switch i {
case 0:
if c.endpoints != nil && len(c.endpoints) > 0 {
return c.endpoints
}
case 1:
env := c.GetEnvEndPoint()
if strings.TrimSpace(env) == "" {
continue
}
if arr := strings.Split(env, ","); len(arr) > 0 {
return arr
}
case 2:
addr := c.GetServiceHostIP()
return []string{
fmt.Sprintf("http://%v:2379", addr),
fmt.Sprintf("http://%v:4001", addr),
}
}
}
return []string{"http://127.0.0.1:2379", "http://127.0.0.1:4001"}
}
// GetServiceHostname - extract FQDN hostname from kernel
func (c *Client) GetServiceHostname() string {
hostname, err := os.Hostname()
if err != nil {
return ""
}
return hostname
}
// GetServiceHostIP - return service host ip (container host)
func (c *Client) GetServiceHostIP() string {
output, err := network.IP("route")
if err != nil {
log.Fatal(err)
}
for _, line := range strings.Split(output, "\n") {
if !strings.Contains(line, "default") {
continue
}
parts := strings.Split(line, " ")
for _, part := range parts {
if ip := net.ParseIP(part); ip != nil {
return part
}
}
}
return ""
}
// GetServiceIP - get service ip address
func (c *Client) GetServiceIP() string {
ifaces, err := net.Interfaces()
if err != nil |
for _, iface := range ifaces {
if iface.Flags&net.FlagUp == 0 {
continue // interface down
}
if iface.Flags&net.FlagLoopback != 0 {
continue // loopback interface
}
addrs, err := iface.Addrs()
if err != nil {
return ""
}
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ip == nil || ip.IsLoopback() {
continue
}
ip = ip.To4()
if ip == nil {
continue // not an ipv4 address
}
return ip.String()
}
}
return ""
}
// Connect to connect etcd client
func (c *Client) Connect() error {
endpoints := c.GetEndPoint()
cfg := client.Config{
Endpoints: endpoints,
Transport: client.DefaultTransport,
HeaderTimeoutPerRequest: time.Second,
}
fmt.Printf("# connect to %v\n", endpoints)
conn, err := client.New(cfg)
if err != nil {
return err
}
kapi := client.NewKeysAPI(conn)
c.client = kapi
return nil
}
| {
return ""
} | conditional_block |
manager.go | package manager
import (
"fmt"
"log"
"net"
"os"
"reflect"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"github.com/coreos/etcd/client"
"github.com/samuelngs/axis/health"
"github.com/samuelngs/axis/launcher"
"github.com/samuelngs/axis/models"
"github.com/samuelngs/axis/pkg/network"
)
type (
// Client - the etcd client
Client struct {
// lock
sync.RWMutex
// client props
endpoints []string
events chan *models.Event
cancel chan struct{}
client client.KeysAPI
// service address and directory
address string
dir *models.Directory
// service state
running bool
started bool
locked bool
// election state
leader *models.Leader
}
)
var (
// ServiceTTL - a period of time after-which the defined service node
// will be expired and removed from the etcd cluster
ServiceTTL = time.Second * 10
)
const (
// DirectoryElection - the path of the election
DirectoryElection string = "election"
// DirectoryMasters - the path of the masters
DirectoryMasters = "masters"
// DirectoryNodes - the path of the nodes
DirectoryNodes = "nodes"
// DirectoryRunning - the list of running nodes
DirectoryRunning = "running"
// DirectoryQueue - the list of starting nodes in queue
DirectoryQueue = "queue"
// EventElected - the leader election is completed
EventElected string = "elected"
// EventReElected - the leader election is completed
EventReElected string = "re-elected"
// EventElection - the leader election is started
EventElection = "election"
// EventReady - the service is ready to run
EventReady = "ready"
// EventWait - the election wait lock
EventWait = "wait"
// GroupLeader - the leader node
GroupLeader string = "leader"
// GroupWorker - the worker node
GroupWorker = "worker"
)
// NewClient - create a etcd client instance
func NewClient(endpoints ...[]string) *Client {
client := &Client{
events: make(chan *models.Event),
cancel: make(chan struct{}),
}
for _, v := range endpoints {
client.endpoints = v
}
client.address = client.GetServiceIP()
return client
}
// Events - the etcd events
func (c *Client) Events() chan *models.Event {
return c.events
}
// Leader - the leader node
func (c *Client) Leader() *models.Leader {
var leader *models.Leader
c.RLock()
leader = c.leader
c.RUnlock()
return leader
}
// SetupDirectory - setup directory for service
func (c *Client) SetupDirectory() {
v := reflect.ValueOf(c.dir)
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
if v.Kind() != reflect.Struct {
log.Fatal("only accepts structs")
}
for i := 0; i < v.NumField(); i++ {
key := v.Field(i).String()
c.client.Set(context.Background(), key, "", &client.SetOptions{
Dir: true,
PrevExist: client.PrevNoExist,
})
}
}
// SetDir - set discovery directory
func (c *Client) SetDir(prefix, name string) {
c.Lock()
c.dir = &models.Directory{
Base: fmt.Sprintf("%v/%v", prefix, name),
Election: fmt.Sprintf("%v/%v/%v", prefix, name, DirectoryElection),
Running: fmt.Sprintf("%v/%v/%v", prefix, name, DirectoryRunning),
Queue: fmt.Sprintf("%v/%v/%v", prefix, name, DirectoryQueue),
Nodes: fmt.Sprintf("%v/%v/%v", prefix, name, DirectoryNodes),
Masters: fmt.Sprintf("%v/%v/%v", prefix, name, DirectoryMasters),
}
c.Unlock()
}
// Observe - observe directory
func (c *Client) Observe() {
// register service
c.SetupDirectory()
c.RegisterNode(c.dir.Node(c.address))
c.RegisterNode(c.dir.QueueNode(c.address))
c.RegisterNode(c.dir.ElectionNode(c.address))
// create a interval timer to monitor service nodes
interval := time.NewTicker(ServiceTTL / 2)
defer interval.Stop()
for {
select {
case <-interval.C:
go func() {
// read running state
c.RLock()
var running = c.running
c.RUnlock()
// renew nodes
c.RenewNode(c.dir.Node(c.address))
c.RenewNode(c.dir.ElectionNode(c.address))
if running {
c.RenewNode(c.dir.RunningNode(c.address))
if c.IsLeader() {
c.RenewNode(c.dir.MasterNode(c.address))
}
} else {
c.RenewNode(c.dir.QueueNode(c.address))
}
c.LeaderDiscovery()
}()
}
}
}
// Election - to start leader election task
func (c *Client) Election() {
defer func() {
// recover if panic
if r := recover(); r != nil {
c.Election()
}
}()
// determine if context is already cancelled
isCancelled := false
// create context with cancel
ctx, cancel := context.WithCancel(context.Background())
defer func() {
if !isCancelled {
cancel()
isCancelled = true
}
}()
// generate election key
key := c.dir.ElectionNode(c.address)
// create election directory if it does not exist
c.client.Set(ctx, key, c.address, &client.SetOptions{
Dir: false,
TTL: ServiceTTL,
})
// create watcher
watcher := c.client.Watcher(c.dir.Election, &client.WatcherOptions{
AfterIndex: 0,
Recursive: true,
})
go func() {
for {
select {
case <-c.cancel:
if !isCancelled {
cancel()
isCancelled = true
}
return
}
}
}()
// observe election changes
for {
resp, err := watcher.Next(ctx)
if err != nil {
panic(err)
}
if resp.Node.Dir {
continue
}
if c.Leader() == nil {
continue
}
switch resp.Action {
case "set", "update":
case "delete":
if leader := c.Leader(); leader.Key == resp.Node.Key {
c.events <- &models.Event{Type: EventElection, Group: GroupWorker}
go c.LeaderDiscovery()
}
}
}
}
// RegisterNode - register node to etcd
func (c *Client) RegisterNode(dir string) {
c.client.Set(context.Background(), dir, c.address, &client.SetOptions{
Dir: false,
TTL: ServiceTTL,
})
}
// UnsetNode - unregister node and extend ttl
func (c *Client) UnsetNode(dir string) {
c.client.Delete(context.Background(), dir, nil)
}
// RenewNode - renew node and extend ttl
func (c *Client) RenewNode(dir string) {
c.client.Set(context.Background(), dir, c.address, &client.SetOptions{
PrevExist: client.PrevExist,
TTL: ServiceTTL,
})
}
// RunApplication - run application
func (c *Client) RunApplication(entrypoint *models.ApplicationEntryPoint) {
c.RLock()
if c.started {
return
}
c.RUnlock()
c.Lock()
c.started = true
c.Unlock()
receive := make(chan string)
// generate scope
scope := c.GenerateScope()
// launcher start daemon
go launcher.Start(scope, entrypoint)
// health check
if entrypoint.Health != nil && entrypoint.Health.Ports != nil {
go health.Check(receive, entrypoint.Health.Ports...)
}
for {
select {
case event := <-receive:
switch event {
case health.Pass:
c.Lock()
if !c.running {
fmt.Println("service is now running")
}
c.running = true
c.Unlock()
c.RegisterNode(c.dir.RunningNode(c.address))
if c.IsLeader() {
c.RegisterNode(c.dir.MasterNode(c.address))
}
case health.Fail:
c.Lock()
if c.running {
fmt.Println("service is now stopped")
}
c.running = false
c.Unlock()
c.UnsetNode(c.dir.RunningNode(c.address))
}
}
}
}
// IsLeader - is current node a leader
func (c *Client) IsLeader() bool {
// self node key
self := c.dir.ElectionNode(c.address)
if c.leader != nil && c.leader.Key == self {
return true
}
return false
}
// LeaderDiscovery - get leader/master node information
func (c *Client) LeaderDiscovery() {
dir := c.dir.Election
// self node key
self := fmt.Sprintf("%v/%v", dir, c.address)
// get a list of election nodes
resp, err := c.client.Get(context.Background(), dir, &client.GetOptions{Sort: true})
if err != nil {
log.Fatal(err)
}
// leader key and address
var key, addr string
// current lowest node index
var idx uint64
if len(resp.Node.Nodes) > 0 {
for _, v := range resp.Node.Nodes {
if v.Dir {
continue
}
if idx == 0 || v.CreatedIndex < idx {
key = v.Key
addr = v.Value
idx = v.CreatedIndex
}
}
}
if key == "" || addr == "" {
fmt.Println("# no nodes were found")
c.Lock()
c.leader = nil
c.Unlock()
} else {
leader := &models.Leader{Key: key, Address: addr}
if c.leader == nil {
if leader.Key == self {
fmt.Println("# elected as leader")
c.events <- &models.Event{Type: EventElected, Group: GroupLeader}
} else {
fmt.Println("# elected as worker")
// do not send any event until leader node is ready
if nodes := c.GetRunningNodes(); len(nodes) > 0 {
c.events <- &models.Event{Type: EventElected, Group: GroupWorker}
} else {
go c.WaitForLeader()
}
}
} else if c.leader != nil && leader.Key != c.leader.Key {
if leader.Key == self {
fmt.Println("# re-elected as leader")
c.events <- &models.Event{Type: EventReElected, Group: GroupLeader}
}
}
c.Lock()
c.leader = leader
c.Unlock()
}
}
// WaitForLeader - wait for leader node is ready
func (c *Client) WaitForLeader() {
defer func() {
c.Lock()
c.locked = false
c.Unlock()
}()
c.RLock()
var locked = c.locked
c.RUnlock()
if !locked {
fmt.Println("# waiting for leader node")
c.Lock()
c.locked = true
c.Unlock()
interval := time.NewTicker(ServiceTTL)
defer interval.Stop()
for {
select {
case <-interval.C:
if c.IsLeader() {
return
}
fmt.Println("# scanning running nodes...")
if nodes := c.GetRunningNodes(); len(nodes) > 0 {
c.events <- &models.Event{Type: EventElected, Group: GroupWorker}
} else {
fmt.Println("# no nodes are ready yet")
}
}
}
}
}
// GenerateScope - generate scope base
func (c *Client) GenerateScope() *models.Scope |
// GetRunningNodes to get existed nodes
func (c *Client) GetRunningNodes() []models.Node {
dir := c.dir.Running
res := []models.Node{}
if c.client == nil {
return res
}
resp, err := c.client.Get(context.Background(), dir, nil)
if err != nil {
return res
}
if !resp.Node.Dir {
return res
}
for _, node := range resp.Node.Nodes {
res = append(res, models.Node(node.Value))
}
return res
}
// GetEnvEndPoint - to extract etcd endpoint environment from shell
func (c *Client) GetEnvEndPoint() string {
whitelist := []string{"ETCD_ENDPOINT", "ETCDCTL_ENDPOINT", "ETCD_HOST", "COREOS_PRIVATE_IPV4", "COREOS_PUBLIC_IPV4"}
for _, i := range whitelist {
if v := os.Getenv(i); v != "" {
return v
}
}
return ""
}
// GetEndPoint - to get endpoint from config, env or docker host
func (c *Client) GetEndPoint() []string {
for i := 0; i < 3; i++ {
switch i {
case 0:
if c.endpoints != nil && len(c.endpoints) > 0 {
return c.endpoints
}
case 1:
env := c.GetEnvEndPoint()
if strings.TrimSpace(env) == "" {
continue
}
if arr := strings.Split(env, ","); len(arr) > 0 {
return arr
}
case 2:
addr := c.GetServiceHostIP()
return []string{
fmt.Sprintf("http://%v:2379", addr),
fmt.Sprintf("http://%v:4001", addr),
}
}
}
return []string{"http://127.0.0.1:2379", "http://127.0.0.1:4001"}
}
// GetServiceHostname - extract FQDN hostname from kernel
func (c *Client) GetServiceHostname() string {
hostname, err := os.Hostname()
if err != nil {
return ""
}
return hostname
}
// GetServiceHostIP - return service host ip (container host)
func (c *Client) GetServiceHostIP() string {
output, err := network.IP("route")
if err != nil {
log.Fatal(err)
}
for _, line := range strings.Split(output, "\n") {
if !strings.Contains(line, "default") {
continue
}
parts := strings.Split(line, " ")
for _, part := range parts {
if ip := net.ParseIP(part); ip != nil {
return part
}
}
}
return ""
}
// GetServiceIP - get service ip address
func (c *Client) GetServiceIP() string {
ifaces, err := net.Interfaces()
if err != nil {
return ""
}
for _, iface := range ifaces {
if iface.Flags&net.FlagUp == 0 {
continue // interface down
}
if iface.Flags&net.FlagLoopback != 0 {
continue // loopback interface
}
addrs, err := iface.Addrs()
if err != nil {
return ""
}
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ip == nil || ip.IsLoopback() {
continue
}
ip = ip.To4()
if ip == nil {
continue // not an ipv4 address
}
return ip.String()
}
}
return ""
}
// Connect to connect etcd client
func (c *Client) Connect() error {
endpoints := c.GetEndPoint()
cfg := client.Config{
Endpoints: endpoints,
Transport: client.DefaultTransport,
HeaderTimeoutPerRequest: time.Second,
}
fmt.Printf("# connect to %v\n", endpoints)
conn, err := client.New(cfg)
if err != nil {
return err
}
kapi := client.NewKeysAPI(conn)
c.client = kapi
return nil
}
| {
return models.SetupEnvironment(
c.GetServiceHostname(),
c.GetServiceIP(),
c.GetRunningNodes(),
)
} | identifier_body |
manager.go | package manager
import (
"fmt"
"log"
"net"
"os"
"reflect"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"github.com/coreos/etcd/client"
"github.com/samuelngs/axis/health"
"github.com/samuelngs/axis/launcher"
"github.com/samuelngs/axis/models"
"github.com/samuelngs/axis/pkg/network"
)
type (
// Client - the etcd client
Client struct {
// lock
sync.RWMutex
// client props
endpoints []string
events chan *models.Event
cancel chan struct{}
client client.KeysAPI
// service address and directory
address string
dir *models.Directory
// service state
running bool
started bool
locked bool
// election state
leader *models.Leader
}
)
var (
// ServiceTTL - a period of time after-which the defined service node
// will be expired and removed from the etcd cluster
ServiceTTL = time.Second * 10
)
const (
// DirectoryElection - the path of the election
DirectoryElection string = "election"
// DirectoryMasters - the path of the masters
DirectoryMasters = "masters"
// DirectoryNodes - the path of the nodes
DirectoryNodes = "nodes"
// DirectoryRunning - the list of running nodes
DirectoryRunning = "running"
// DirectoryQueue - the list of starting nodes in queue
DirectoryQueue = "queue"
// EventElected - the leader election is completed
EventElected string = "elected"
// EventReElected - the leader election is completed
EventReElected string = "re-elected"
// EventElection - the leader election is started
EventElection = "election"
// EventReady - the service is ready to run
EventReady = "ready"
// EventWait - the election wait lock
EventWait = "wait"
// GroupLeader - the leader node
GroupLeader string = "leader"
// GroupWorker - the worker node
GroupWorker = "worker"
)
// NewClient - create a etcd client instance
func NewClient(endpoints ...[]string) *Client {
client := &Client{
events: make(chan *models.Event),
cancel: make(chan struct{}),
}
for _, v := range endpoints {
client.endpoints = v
}
client.address = client.GetServiceIP()
return client
}
// Events - the etcd events
func (c *Client) Events() chan *models.Event {
return c.events
}
// Leader - the leader node
func (c *Client) Leader() *models.Leader {
var leader *models.Leader
c.RLock()
leader = c.leader
c.RUnlock()
return leader
}
// SetupDirectory - setup directory for service
func (c *Client) SetupDirectory() {
v := reflect.ValueOf(c.dir)
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
if v.Kind() != reflect.Struct {
log.Fatal("only accepts structs")
}
for i := 0; i < v.NumField(); i++ {
key := v.Field(i).String()
c.client.Set(context.Background(), key, "", &client.SetOptions{
Dir: true,
PrevExist: client.PrevNoExist,
})
}
}
// SetDir - set discovery directory
func (c *Client) SetDir(prefix, name string) {
c.Lock()
c.dir = &models.Directory{
Base: fmt.Sprintf("%v/%v", prefix, name),
Election: fmt.Sprintf("%v/%v/%v", prefix, name, DirectoryElection),
Running: fmt.Sprintf("%v/%v/%v", prefix, name, DirectoryRunning),
Queue: fmt.Sprintf("%v/%v/%v", prefix, name, DirectoryQueue),
Nodes: fmt.Sprintf("%v/%v/%v", prefix, name, DirectoryNodes),
Masters: fmt.Sprintf("%v/%v/%v", prefix, name, DirectoryMasters),
}
c.Unlock()
}
// Observe - observe directory
func (c *Client) Observe() {
// register service
c.SetupDirectory()
c.RegisterNode(c.dir.Node(c.address))
c.RegisterNode(c.dir.QueueNode(c.address))
c.RegisterNode(c.dir.ElectionNode(c.address))
// create a interval timer to monitor service nodes
interval := time.NewTicker(ServiceTTL / 2)
defer interval.Stop()
for {
select {
case <-interval.C:
go func() {
// read running state
c.RLock()
var running = c.running
c.RUnlock()
// renew nodes
c.RenewNode(c.dir.Node(c.address))
c.RenewNode(c.dir.ElectionNode(c.address))
if running {
c.RenewNode(c.dir.RunningNode(c.address))
if c.IsLeader() {
c.RenewNode(c.dir.MasterNode(c.address))
}
} else {
c.RenewNode(c.dir.QueueNode(c.address))
}
c.LeaderDiscovery()
}()
}
}
}
// Election - to start leader election task
func (c *Client) Election() {
defer func() {
// recover if panic
if r := recover(); r != nil {
c.Election()
}
}()
// determine if context is already cancelled
isCancelled := false
// create context with cancel
ctx, cancel := context.WithCancel(context.Background())
defer func() {
if !isCancelled {
cancel()
isCancelled = true
}
}()
// generate election key
key := c.dir.ElectionNode(c.address)
// create election directory if it does not exist
c.client.Set(ctx, key, c.address, &client.SetOptions{
Dir: false,
TTL: ServiceTTL,
})
// create watcher
watcher := c.client.Watcher(c.dir.Election, &client.WatcherOptions{
AfterIndex: 0,
Recursive: true,
})
go func() {
for {
select {
case <-c.cancel:
if !isCancelled {
cancel()
isCancelled = true
}
return
}
}
}()
// observe election changes
for {
resp, err := watcher.Next(ctx)
if err != nil {
panic(err)
}
if resp.Node.Dir {
continue
}
if c.Leader() == nil {
continue
}
switch resp.Action {
case "set", "update":
case "delete":
if leader := c.Leader(); leader.Key == resp.Node.Key {
c.events <- &models.Event{Type: EventElection, Group: GroupWorker}
go c.LeaderDiscovery()
}
}
}
}
// RegisterNode - register node to etcd
func (c *Client) RegisterNode(dir string) {
c.client.Set(context.Background(), dir, c.address, &client.SetOptions{
Dir: false,
TTL: ServiceTTL,
})
}
// UnsetNode - unregister node and extend ttl
func (c *Client) UnsetNode(dir string) {
c.client.Delete(context.Background(), dir, nil)
}
// RenewNode - renew node and extend ttl
func (c *Client) RenewNode(dir string) {
c.client.Set(context.Background(), dir, c.address, &client.SetOptions{
PrevExist: client.PrevExist,
TTL: ServiceTTL,
})
}
// RunApplication - run application
func (c *Client) RunApplication(entrypoint *models.ApplicationEntryPoint) {
c.RLock()
if c.started {
return
}
c.RUnlock()
c.Lock()
c.started = true
c.Unlock()
receive := make(chan string)
// generate scope
scope := c.GenerateScope()
// launcher start daemon
go launcher.Start(scope, entrypoint)
// health check
if entrypoint.Health != nil && entrypoint.Health.Ports != nil {
go health.Check(receive, entrypoint.Health.Ports...)
}
for {
select {
case event := <-receive:
switch event {
case health.Pass:
c.Lock()
if !c.running {
fmt.Println("service is now running")
}
c.running = true
c.Unlock()
c.RegisterNode(c.dir.RunningNode(c.address))
if c.IsLeader() {
c.RegisterNode(c.dir.MasterNode(c.address))
}
case health.Fail:
c.Lock()
if c.running {
fmt.Println("service is now stopped")
}
c.running = false
c.Unlock()
c.UnsetNode(c.dir.RunningNode(c.address))
}
}
}
}
// IsLeader - is current node a leader
func (c *Client) IsLeader() bool {
// self node key
self := c.dir.ElectionNode(c.address)
if c.leader != nil && c.leader.Key == self {
return true
}
return false
}
// LeaderDiscovery - get leader/master node information
func (c *Client) LeaderDiscovery() {
dir := c.dir.Election
// self node key
self := fmt.Sprintf("%v/%v", dir, c.address)
// get a list of election nodes
resp, err := c.client.Get(context.Background(), dir, &client.GetOptions{Sort: true})
if err != nil {
log.Fatal(err)
}
// leader key and address
var key, addr string
// current lowest node index
var idx uint64
if len(resp.Node.Nodes) > 0 {
for _, v := range resp.Node.Nodes {
if v.Dir {
continue
}
if idx == 0 || v.CreatedIndex < idx {
key = v.Key
addr = v.Value
idx = v.CreatedIndex
}
}
}
if key == "" || addr == "" {
fmt.Println("# no nodes were found")
c.Lock()
c.leader = nil
c.Unlock()
} else {
leader := &models.Leader{Key: key, Address: addr}
if c.leader == nil {
if leader.Key == self {
fmt.Println("# elected as leader")
c.events <- &models.Event{Type: EventElected, Group: GroupLeader}
} else {
fmt.Println("# elected as worker")
// do not send any event until leader node is ready
if nodes := c.GetRunningNodes(); len(nodes) > 0 {
c.events <- &models.Event{Type: EventElected, Group: GroupWorker}
} else {
go c.WaitForLeader()
}
}
} else if c.leader != nil && leader.Key != c.leader.Key {
if leader.Key == self {
fmt.Println("# re-elected as leader")
c.events <- &models.Event{Type: EventReElected, Group: GroupLeader}
}
}
c.Lock()
c.leader = leader
c.Unlock()
}
}
// WaitForLeader - wait for leader node is ready
func (c *Client) WaitForLeader() {
defer func() {
c.Lock()
c.locked = false
c.Unlock()
}()
c.RLock()
var locked = c.locked
c.RUnlock()
if !locked {
fmt.Println("# waiting for leader node")
c.Lock()
c.locked = true
c.Unlock()
interval := time.NewTicker(ServiceTTL)
defer interval.Stop()
for {
select {
case <-interval.C:
if c.IsLeader() {
return
}
fmt.Println("# scanning running nodes...")
if nodes := c.GetRunningNodes(); len(nodes) > 0 {
c.events <- &models.Event{Type: EventElected, Group: GroupWorker}
} else {
fmt.Println("# no nodes are ready yet")
}
}
}
}
}
// GenerateScope - generate scope base
func (c *Client) GenerateScope() *models.Scope {
return models.SetupEnvironment(
c.GetServiceHostname(),
c.GetServiceIP(),
c.GetRunningNodes(),
)
}
// GetRunningNodes to get existed nodes
func (c *Client) GetRunningNodes() []models.Node {
dir := c.dir.Running
res := []models.Node{}
if c.client == nil {
return res
}
resp, err := c.client.Get(context.Background(), dir, nil)
if err != nil {
return res
}
if !resp.Node.Dir {
return res
}
for _, node := range resp.Node.Nodes {
res = append(res, models.Node(node.Value))
}
return res
}
// GetEnvEndPoint - to extract etcd endpoint environment from shell
func (c *Client) GetEnvEndPoint() string {
whitelist := []string{"ETCD_ENDPOINT", "ETCDCTL_ENDPOINT", "ETCD_HOST", "COREOS_PRIVATE_IPV4", "COREOS_PUBLIC_IPV4"}
for _, i := range whitelist {
if v := os.Getenv(i); v != "" {
return v
}
}
return ""
}
// GetEndPoint - to get endpoint from config, env or docker host
func (c *Client) GetEndPoint() []string {
for i := 0; i < 3; i++ {
switch i {
case 0:
if c.endpoints != nil && len(c.endpoints) > 0 {
return c.endpoints
}
case 1:
env := c.GetEnvEndPoint()
if strings.TrimSpace(env) == "" {
continue
}
if arr := strings.Split(env, ","); len(arr) > 0 {
return arr
}
case 2:
addr := c.GetServiceHostIP()
return []string{
fmt.Sprintf("http://%v:2379", addr),
fmt.Sprintf("http://%v:4001", addr),
}
}
}
return []string{"http://127.0.0.1:2379", "http://127.0.0.1:4001"}
}
// GetServiceHostname - extract FQDN hostname from kernel
func (c *Client) GetServiceHostname() string {
hostname, err := os.Hostname()
if err != nil {
return ""
}
return hostname
}
// GetServiceHostIP - return service host ip (container host)
func (c *Client) GetServiceHostIP() string {
output, err := network.IP("route")
if err != nil {
log.Fatal(err)
}
for _, line := range strings.Split(output, "\n") {
if !strings.Contains(line, "default") {
continue
}
parts := strings.Split(line, " ")
for _, part := range parts {
if ip := net.ParseIP(part); ip != nil {
return part
}
}
}
return ""
}
// GetServiceIP - get service ip address
func (c *Client) GetServiceIP() string {
ifaces, err := net.Interfaces()
if err != nil {
return ""
}
for _, iface := range ifaces {
if iface.Flags&net.FlagUp == 0 {
continue // interface down
}
if iface.Flags&net.FlagLoopback != 0 {
continue // loopback interface
}
addrs, err := iface.Addrs()
if err != nil {
return ""
}
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ip == nil || ip.IsLoopback() {
continue
}
ip = ip.To4()
if ip == nil {
continue // not an ipv4 address
}
return ip.String()
}
}
return ""
}
// Connect to connect etcd client
func (c *Client) | () error {
endpoints := c.GetEndPoint()
cfg := client.Config{
Endpoints: endpoints,
Transport: client.DefaultTransport,
HeaderTimeoutPerRequest: time.Second,
}
fmt.Printf("# connect to %v\n", endpoints)
conn, err := client.New(cfg)
if err != nil {
return err
}
kapi := client.NewKeysAPI(conn)
c.client = kapi
return nil
}
| Connect | identifier_name |
manager.go | package manager
import (
"fmt"
"log"
"net"
"os"
"reflect"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"github.com/coreos/etcd/client"
"github.com/samuelngs/axis/health"
"github.com/samuelngs/axis/launcher"
"github.com/samuelngs/axis/models"
"github.com/samuelngs/axis/pkg/network"
)
type (
// Client - the etcd client
Client struct {
// lock
sync.RWMutex
// client props
endpoints []string
events chan *models.Event
cancel chan struct{}
client client.KeysAPI
// service address and directory
address string
dir *models.Directory
// service state
running bool
started bool
locked bool
// election state
leader *models.Leader
}
)
var (
// ServiceTTL - a period of time after-which the defined service node
// will be expired and removed from the etcd cluster
ServiceTTL = time.Second * 10
)
const (
// DirectoryElection - the path of the election
DirectoryElection string = "election"
// DirectoryMasters - the path of the masters
DirectoryMasters = "masters"
// DirectoryNodes - the path of the nodes
DirectoryNodes = "nodes"
// DirectoryRunning - the list of running nodes
DirectoryRunning = "running"
// DirectoryQueue - the list of starting nodes in queue
DirectoryQueue = "queue"
// EventElected - the leader election is completed
EventElected string = "elected"
// EventReElected - the leader election is completed
EventReElected string = "re-elected"
// EventElection - the leader election is started
EventElection = "election"
// EventReady - the service is ready to run
EventReady = "ready"
// EventWait - the election wait lock
EventWait = "wait"
// GroupLeader - the leader node
GroupLeader string = "leader"
// GroupWorker - the worker node
GroupWorker = "worker"
)
// NewClient - create a etcd client instance
func NewClient(endpoints ...[]string) *Client {
client := &Client{
events: make(chan *models.Event),
cancel: make(chan struct{}),
}
for _, v := range endpoints {
client.endpoints = v
}
client.address = client.GetServiceIP()
return client
}
// Events - the etcd events
func (c *Client) Events() chan *models.Event {
return c.events
}
// Leader - the leader node
func (c *Client) Leader() *models.Leader {
var leader *models.Leader
c.RLock()
leader = c.leader
c.RUnlock()
return leader
}
// SetupDirectory - setup directory for service
func (c *Client) SetupDirectory() {
v := reflect.ValueOf(c.dir)
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
if v.Kind() != reflect.Struct {
log.Fatal("only accepts structs")
}
for i := 0; i < v.NumField(); i++ {
key := v.Field(i).String()
c.client.Set(context.Background(), key, "", &client.SetOptions{
Dir: true,
PrevExist: client.PrevNoExist,
})
}
}
// SetDir - set discovery directory
func (c *Client) SetDir(prefix, name string) {
c.Lock()
c.dir = &models.Directory{
Base: fmt.Sprintf("%v/%v", prefix, name),
Election: fmt.Sprintf("%v/%v/%v", prefix, name, DirectoryElection),
Running: fmt.Sprintf("%v/%v/%v", prefix, name, DirectoryRunning),
Queue: fmt.Sprintf("%v/%v/%v", prefix, name, DirectoryQueue),
Nodes: fmt.Sprintf("%v/%v/%v", prefix, name, DirectoryNodes),
Masters: fmt.Sprintf("%v/%v/%v", prefix, name, DirectoryMasters),
}
c.Unlock()
}
// Observe - observe directory
func (c *Client) Observe() {
// register service
c.SetupDirectory()
c.RegisterNode(c.dir.Node(c.address))
c.RegisterNode(c.dir.QueueNode(c.address))
c.RegisterNode(c.dir.ElectionNode(c.address))
// create a interval timer to monitor service nodes
interval := time.NewTicker(ServiceTTL / 2)
defer interval.Stop()
for {
select {
case <-interval.C:
go func() {
// read running state
c.RLock()
var running = c.running
c.RUnlock()
// renew nodes
c.RenewNode(c.dir.Node(c.address))
c.RenewNode(c.dir.ElectionNode(c.address))
if running {
c.RenewNode(c.dir.RunningNode(c.address))
if c.IsLeader() {
c.RenewNode(c.dir.MasterNode(c.address))
}
} else {
c.RenewNode(c.dir.QueueNode(c.address))
}
c.LeaderDiscovery()
}()
}
}
}
// Election - to start leader election task
func (c *Client) Election() {
defer func() {
// recover if panic
if r := recover(); r != nil {
c.Election()
}
}()
// determine if context is already cancelled
isCancelled := false
// create context with cancel
ctx, cancel := context.WithCancel(context.Background())
defer func() {
if !isCancelled {
cancel()
isCancelled = true
}
}()
// generate election key
key := c.dir.ElectionNode(c.address)
// create election directory if it does not exist
c.client.Set(ctx, key, c.address, &client.SetOptions{
Dir: false,
TTL: ServiceTTL,
})
// create watcher
watcher := c.client.Watcher(c.dir.Election, &client.WatcherOptions{
AfterIndex: 0,
Recursive: true,
})
go func() {
for {
select {
case <-c.cancel:
if !isCancelled {
cancel()
isCancelled = true
}
return
}
}
}()
// observe election changes
for {
resp, err := watcher.Next(ctx)
if err != nil {
panic(err)
}
if resp.Node.Dir {
continue
}
if c.Leader() == nil {
continue
}
switch resp.Action {
case "set", "update":
case "delete":
if leader := c.Leader(); leader.Key == resp.Node.Key {
c.events <- &models.Event{Type: EventElection, Group: GroupWorker}
go c.LeaderDiscovery()
}
}
}
}
// RegisterNode - register node to etcd
func (c *Client) RegisterNode(dir string) {
c.client.Set(context.Background(), dir, c.address, &client.SetOptions{
Dir: false,
TTL: ServiceTTL,
})
}
// UnsetNode - unregister node and extend ttl
func (c *Client) UnsetNode(dir string) {
c.client.Delete(context.Background(), dir, nil)
}
// RenewNode - renew node and extend ttl
func (c *Client) RenewNode(dir string) {
c.client.Set(context.Background(), dir, c.address, &client.SetOptions{
PrevExist: client.PrevExist,
TTL: ServiceTTL,
})
}
// RunApplication - run application
func (c *Client) RunApplication(entrypoint *models.ApplicationEntryPoint) {
c.RLock()
if c.started {
return
}
c.RUnlock()
c.Lock()
c.started = true
c.Unlock()
receive := make(chan string)
// generate scope
scope := c.GenerateScope()
// launcher start daemon
go launcher.Start(scope, entrypoint)
// health check
if entrypoint.Health != nil && entrypoint.Health.Ports != nil {
go health.Check(receive, entrypoint.Health.Ports...)
}
for {
select {
case event := <-receive:
switch event {
case health.Pass:
c.Lock()
if !c.running {
fmt.Println("service is now running")
}
c.running = true
c.Unlock()
c.RegisterNode(c.dir.RunningNode(c.address))
if c.IsLeader() {
c.RegisterNode(c.dir.MasterNode(c.address))
}
case health.Fail:
c.Lock()
if c.running {
fmt.Println("service is now stopped")
}
c.running = false
c.Unlock()
c.UnsetNode(c.dir.RunningNode(c.address))
}
}
}
}
// IsLeader - is current node a leader
func (c *Client) IsLeader() bool {
// self node key
self := c.dir.ElectionNode(c.address)
if c.leader != nil && c.leader.Key == self {
return true
}
return false
}
// LeaderDiscovery - get leader/master node information
func (c *Client) LeaderDiscovery() {
dir := c.dir.Election
// self node key
self := fmt.Sprintf("%v/%v", dir, c.address)
// get a list of election nodes
resp, err := c.client.Get(context.Background(), dir, &client.GetOptions{Sort: true})
if err != nil {
log.Fatal(err)
}
// leader key and address
var key, addr string
// current lowest node index
var idx uint64
if len(resp.Node.Nodes) > 0 {
for _, v := range resp.Node.Nodes {
if v.Dir {
continue
}
if idx == 0 || v.CreatedIndex < idx {
key = v.Key
addr = v.Value
idx = v.CreatedIndex
}
}
}
if key == "" || addr == "" {
fmt.Println("# no nodes were found")
c.Lock()
c.leader = nil
c.Unlock()
} else {
leader := &models.Leader{Key: key, Address: addr}
if c.leader == nil {
if leader.Key == self {
fmt.Println("# elected as leader")
c.events <- &models.Event{Type: EventElected, Group: GroupLeader}
} else {
fmt.Println("# elected as worker")
// do not send any event until leader node is ready
if nodes := c.GetRunningNodes(); len(nodes) > 0 {
c.events <- &models.Event{Type: EventElected, Group: GroupWorker}
} else {
go c.WaitForLeader()
}
}
} else if c.leader != nil && leader.Key != c.leader.Key {
if leader.Key == self {
fmt.Println("# re-elected as leader")
c.events <- &models.Event{Type: EventReElected, Group: GroupLeader}
}
}
c.Lock()
c.leader = leader
c.Unlock()
}
}
// WaitForLeader - wait for leader node is ready
func (c *Client) WaitForLeader() {
defer func() {
c.Lock()
c.locked = false
c.Unlock()
}()
c.RLock()
var locked = c.locked
c.RUnlock()
if !locked {
fmt.Println("# waiting for leader node")
c.Lock()
c.locked = true
c.Unlock()
interval := time.NewTicker(ServiceTTL)
defer interval.Stop()
for {
select {
case <-interval.C:
if c.IsLeader() {
return
}
fmt.Println("# scanning running nodes...")
if nodes := c.GetRunningNodes(); len(nodes) > 0 {
c.events <- &models.Event{Type: EventElected, Group: GroupWorker}
} else {
fmt.Println("# no nodes are ready yet")
}
}
}
}
}
// GenerateScope - generate scope base
func (c *Client) GenerateScope() *models.Scope {
return models.SetupEnvironment(
c.GetServiceHostname(),
c.GetServiceIP(),
c.GetRunningNodes(),
)
}
// GetRunningNodes to get existed nodes
func (c *Client) GetRunningNodes() []models.Node {
dir := c.dir.Running
res := []models.Node{}
if c.client == nil {
return res
}
resp, err := c.client.Get(context.Background(), dir, nil)
if err != nil {
return res
}
if !resp.Node.Dir {
return res
}
for _, node := range resp.Node.Nodes {
res = append(res, models.Node(node.Value))
}
return res
}
// GetEnvEndPoint - to extract etcd endpoint environment from shell
func (c *Client) GetEnvEndPoint() string {
whitelist := []string{"ETCD_ENDPOINT", "ETCDCTL_ENDPOINT", "ETCD_HOST", "COREOS_PRIVATE_IPV4", "COREOS_PUBLIC_IPV4"}
for _, i := range whitelist {
if v := os.Getenv(i); v != "" {
return v
}
}
return ""
}
// GetEndPoint - to get endpoint from config, env or docker host
func (c *Client) GetEndPoint() []string {
for i := 0; i < 3; i++ {
switch i {
case 0:
if c.endpoints != nil && len(c.endpoints) > 0 {
return c.endpoints
}
case 1:
env := c.GetEnvEndPoint()
if strings.TrimSpace(env) == "" {
continue
}
if arr := strings.Split(env, ","); len(arr) > 0 {
return arr
}
case 2:
addr := c.GetServiceHostIP()
return []string{
fmt.Sprintf("http://%v:2379", addr),
fmt.Sprintf("http://%v:4001", addr),
}
}
}
return []string{"http://127.0.0.1:2379", "http://127.0.0.1:4001"}
}
// GetServiceHostname - extract FQDN hostname from kernel
func (c *Client) GetServiceHostname() string {
hostname, err := os.Hostname()
if err != nil {
return ""
}
return hostname
}
// GetServiceHostIP - return service host ip (container host)
func (c *Client) GetServiceHostIP() string {
output, err := network.IP("route")
if err != nil {
log.Fatal(err)
}
for _, line := range strings.Split(output, "\n") {
if !strings.Contains(line, "default") {
continue
}
parts := strings.Split(line, " ")
for _, part := range parts {
if ip := net.ParseIP(part); ip != nil {
return part
}
}
}
return ""
}
// GetServiceIP - get service ip address
func (c *Client) GetServiceIP() string {
ifaces, err := net.Interfaces()
if err != nil {
return ""
}
for _, iface := range ifaces {
if iface.Flags&net.FlagUp == 0 {
continue // interface down
}
if iface.Flags&net.FlagLoopback != 0 { | return ""
}
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ip == nil || ip.IsLoopback() {
continue
}
ip = ip.To4()
if ip == nil {
continue // not an ipv4 address
}
return ip.String()
}
}
return ""
}
// Connect to connect etcd client
func (c *Client) Connect() error {
endpoints := c.GetEndPoint()
cfg := client.Config{
Endpoints: endpoints,
Transport: client.DefaultTransport,
HeaderTimeoutPerRequest: time.Second,
}
fmt.Printf("# connect to %v\n", endpoints)
conn, err := client.New(cfg)
if err != nil {
return err
}
kapi := client.NewKeysAPI(conn)
c.client = kapi
return nil
} | continue // loopback interface
}
addrs, err := iface.Addrs()
if err != nil { | random_line_split |
main.rs | use num_traits::PrimInt;
use pest::Parser;
use pest_derive::Parser;
use std::{
collections::HashMap,
error::Error,
fmt,
io::{self, Read},
str::FromStr,
};
#[cfg(debug_assertions)]
macro_rules! dbg_print {
($( $args:expr ),*) => { print!( $( $args ),* ); }
}
#[cfg(not(debug_assertions))]
macro_rules! dbg_print {
($( $args:expr ),*) => {};
}
#[derive(Parser)]
#[grammar = "input.pest"]
pub struct InputParser;
#[derive(Copy, Clone)]
struct AttackTypes(u8);
impl AttackTypes {
fn to(&self, other: AttackTypes) -> bool {
(other.0 & self.0) != 0
}
}
impl fmt::Debug for AttackTypes {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "0b{:b}", self.0)
}
}
#[derive(Clone)]
struct Group {
units: u32,
hits: u32,
damages: u16,
boost: u16,
initiative: i8,
attack: AttackTypes,
immunity: AttackTypes,
weakness: AttackTypes,
}
impl Group {
fn effective_power(&self) -> u32 {
self.units * (self.damages as u32 + self.boost as u32)
} |
fn calc_hit(&self, enemy: &Group) -> u32 {
match (
self.immunity.to(enemy.attack),
self.weakness.to(enemy.attack),
) {
(false, false) => enemy.effective_power(),
(true, false) => 0,
(false, true) => enemy.effective_power() * 2,
(true, true) => unreachable!(),
}
}
fn hit(&mut self, points: u32) -> u32 {
let org_units = self.units;
let units_kill = points / self.hits;
self.units = self.units.saturating_sub(units_kill);
let units_lost = org_units - self.units;
dbg_print!("Units lost: {}\n", units_lost);
units_lost
}
}
#[derive(Default, Clone)]
struct Army<'a> {
groups: Vec<Group>,
name: &'a str,
}
impl Army<'_> {
fn sort_for_attack(&self) -> Vec<u16> {
let mut ids: Vec<u16> = (0..self.groups.len() as u16).collect();
ids.sort_by_key(|i|
// descending sort
(
!self.groups[*i as usize].is_alive(),
-(self.groups[*i as usize].effective_power() as i32),
-self.groups[*i as usize].initiative,
));
ids
}
fn choose_enemy(&self, order: &Vec<u16>, enemy: &Army) -> Vec<Option<u16>> {
let mut chosen = vec![false; enemy.groups.len()];
order
.iter()
.map(|idx| {
let i = *idx as usize;
if !self.groups[i].is_alive() {
return None;
}
let mut enemy_ids: Vec<_> = (0..enemy.groups.len()).collect();
enemy_ids.sort_by_cached_key(|&j| {
(
!enemy.groups[j].is_alive(),
chosen[j],
-(enemy.groups[j].calc_hit(&self.groups[i]) as i32),
-(enemy.groups[j].effective_power() as i32),
-enemy.groups[j].initiative,
)
});
// If chosen[j] wasnโt a field in sorting, weโve to use |filter|, not
// |take_while| as top results mightโve been already chosen.
match enemy_ids
.iter()
.take_while(|&&j| {
// Although not explicitly stated in puzzle, if this unit canโt deal
// any damage to any enemy unit, then donโt mark chosen.
enemy.groups[j].is_alive()
&& !chosen[j]
&& enemy.groups[j].calc_hit(&self.groups[i]) > 0
})
.next()
{
Some(&c) => {
chosen[c] = true;
Some(c as u16)
}
None => None,
}
})
.collect()
}
fn is_alive(&self) -> bool {
self.groups.iter().any(|g| g.is_alive())
}
fn boost(&mut self, points: u16) {
for g in &mut self.groups {
g.boost = points;
}
}
}
// PrimInt is yet to get the BITS member; make a new trait.
// https://stackoverflow.com/q/73711297/183120
trait Bits {
const BITS: usize;
}
macro_rules! impl_bits {
( $($ty:ident)* ) => {
$(
impl Bits for $ty {
const BITS: usize = Self::BITS as usize;
}
)*
};
}
impl_bits!(u8 u16 u32 u64 u128);
fn to_flag<'a, T: Bits + PrimInt>(
attack: &'a str,
attack_to_flag: &mut HashMap<&'a str, T>,
) -> Result<T, Box<dyn Error>> {
let n = attack_to_flag.len();
let mask = T::one() << n;
match n < T::BITS {
true => Ok(*attack_to_flag.entry(attack).or_insert(mask)),
false => Err(Box::<dyn Error>::from(
"More than {T::BITS} distinct attacks; insufficient bit-width.",
)),
}
}
struct Attack {
army: usize,
group: usize,
enemy: usize,
}
impl Attack {
fn enemy_army(&self) -> usize {
// make a bool and convert to integral as !1u8 = 254
(self.army == 0) as usize
}
}
// Army ID and remaining units
struct Victor(Option<u8>, u32);
fn fight(mut armies: [Army; 2]) -> Victor {
while armies.iter().all(|a| a.is_alive()) {
let ids = [armies[0].sort_for_attack(), armies[1].sort_for_attack()];
let choices = [
armies[0].choose_enemy(&ids[0], &armies[1]),
armies[1].choose_enemy(&ids[1], &armies[0]),
];
// Excessive debugging; turn on if needed.
// for (i, _) in armies.iter().enumerate() {
// dbg_print!("Army {}\n", i);
// for (idx, &j) in ids[i].iter().enumerate() {
// dbg_print!(
// " Group {}: {} --> {:?}\n",
// j,
// armies[i].groups[j as usize].units,
// choices[i][idx]
// );
// }
// }
// collect all alive groups with respective army ID
let mut fight: Vec<Attack> = ids[0]
.iter()
.zip(choices[0].iter())
.filter_map(|(&i, &choice)| {
match (armies[0].groups[i as usize].is_alive(), choice) {
(true, Some(enemy)) => Some(Attack {
army: 0,
group: i as usize,
enemy: enemy.into(),
}),
_ => None,
}
})
.chain(ids[1].iter().zip(choices[1].iter()).filter_map(
|(&j, &choice)| match (armies[1].groups[j as usize].is_alive(), choice)
{
(true, Some(enemy)) => Some(Attack {
army: 1,
group: j as usize,
enemy: enemy.into(),
}),
_ => None,
},
))
.collect::<Vec<Attack>>();
// Attacks in this fight are only b/w alive groups from here on.
fight.sort_by_key(|a| -armies[a.army].groups[a.group].initiative);
let mut total_units_lost = 0;
for attack in &fight {
dbg_print!(
"{}'s Group {} --> {}'s Group {}; ",
armies[attack.army].name,
attack.group,
armies[attack.enemy_army()].name,
attack.enemy
);
let attacker = &armies[attack.army].groups[attack.group];
let defender = &armies[attack.enemy_army()].groups[attack.enemy];
let damage = defender.calc_hit(attacker);
let defender_mut = &mut armies[attack.enemy_army()].groups[attack.enemy];
total_units_lost += defender_mut.hit(damage);
}
if total_units_lost == 0 {
return Victor(None, 0);
}
dbg_print!("--------------\n");
}
match armies[0].is_alive() {
true => Victor(
Some(0),
armies[0].groups.iter().fold(0, |units, g| units + g.units),
),
false => Victor(
Some(1),
armies[1].groups.iter().fold(0, |units, g| units + g.units),
),
}
}
fn main() -> Result<(), Box<dyn Error>> {
let mut input_str = String::new();
let mut stdin = io::stdin();
stdin.read_to_string(&mut input_str)?;
let input = InputParser::parse(Rule::file, &input_str)
.expect("Invalid input")
.next()
.unwrap();
let mut armies = [Army::default(), Army::default()];
let mut next_army: u8 = 0;
let mut attack_to_flag: HashMap<&str, u8> = HashMap::new();
for line in input.into_inner() {
match line.as_rule() {
Rule::army_name => {
armies[next_army as usize].name = line.as_str();
next_army += 1;
}
Rule::group => {
let mut counts = [0u32; 4];
let mut idx = 0;
let mut attack = AttackTypes(0);
let mut immunities = 0u8;
let mut weaknesses = 0u8;
for r in line.into_inner() {
match r.as_rule() {
Rule::count => {
counts[idx] = u32::from_str(r.as_str())?;
idx += 1;
}
Rule::attack => {
attack = AttackTypes(to_flag(r.as_str(), &mut attack_to_flag)?);
}
Rule::traits => {
for t in r.into_inner() {
match t.as_rule() {
Rule::immunities => {
for i in t.into_inner() {
immunities |= to_flag(i.as_str(), &mut attack_to_flag)?;
}
}
Rule::weaknesses => {
for w in t.into_inner() {
weaknesses |= to_flag(w.as_str(), &mut attack_to_flag)?;
}
}
_ => unreachable!(),
}
}
}
_ => unreachable!(),
}
}
armies[(next_army - 1) as usize].groups.push(Group {
units: counts[0],
hits: counts[1],
damages: counts[2] as u16,
boost: 0,
initiative: counts[3] as i8,
attack,
immunity: AttackTypes(immunities),
weakness: AttackTypes(weaknesses),
});
}
Rule::EOI => (),
_ => unreachable!(),
}
}
// Part 1
if let Victor(Some(army), units_alive) = fight(armies.clone()) {
println!(
"{} wins with units: {}",
armies[army as usize].name, units_alive
);
}
// Part 2: binary search for minimal boost
let (mut lo_boost, mut hi_boost) = (1, 1500);
while lo_boost != hi_boost {
// Using integers means below is implicitly floor((L + R) / 2); a ceil
// implementation sets hi_boost = boost - 1 and lo_boost = boost. Floor
// route stops on the right, while ceil on the left side of target.
let boost = (hi_boost + lo_boost) / 2;
armies[0].boost(boost);
match fight(armies.clone()).0 {
Some(0) => hi_boost = boost,
_ => lo_boost = boost + 1,
}
}
armies[0].boost(hi_boost); // lo_boost = hi_boost anyway
println!(
"Immune System wins with minimal boost {hi_boost}; surviving units: {}",
fight(armies.clone()).1
);
Ok(())
} |
fn is_alive(&self) -> bool {
self.units > 0
} | random_line_split |
main.rs | use num_traits::PrimInt;
use pest::Parser;
use pest_derive::Parser;
use std::{
collections::HashMap,
error::Error,
fmt,
io::{self, Read},
str::FromStr,
};
#[cfg(debug_assertions)]
macro_rules! dbg_print {
($( $args:expr ),*) => { print!( $( $args ),* ); }
}
#[cfg(not(debug_assertions))]
macro_rules! dbg_print {
($( $args:expr ),*) => {};
}
#[derive(Parser)]
#[grammar = "input.pest"]
pub struct InputParser;
#[derive(Copy, Clone)]
struct AttackTypes(u8);
impl AttackTypes {
fn to(&self, other: AttackTypes) -> bool {
(other.0 & self.0) != 0
}
}
impl fmt::Debug for AttackTypes {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "0b{:b}", self.0)
}
}
#[derive(Clone)]
struct Group {
units: u32,
hits: u32,
damages: u16,
boost: u16,
initiative: i8,
attack: AttackTypes,
immunity: AttackTypes,
weakness: AttackTypes,
}
impl Group {
fn effective_power(&self) -> u32 {
self.units * (self.damages as u32 + self.boost as u32)
}
fn is_alive(&self) -> bool {
self.units > 0
}
fn calc_hit(&self, enemy: &Group) -> u32 {
match (
self.immunity.to(enemy.attack),
self.weakness.to(enemy.attack),
) {
(false, false) => enemy.effective_power(),
(true, false) => 0,
(false, true) => enemy.effective_power() * 2,
(true, true) => unreachable!(),
}
}
fn hit(&mut self, points: u32) -> u32 |
}
#[derive(Default, Clone)]
struct Army<'a> {
groups: Vec<Group>,
name: &'a str,
}
impl Army<'_> {
fn sort_for_attack(&self) -> Vec<u16> {
let mut ids: Vec<u16> = (0..self.groups.len() as u16).collect();
ids.sort_by_key(|i|
// descending sort
(
!self.groups[*i as usize].is_alive(),
-(self.groups[*i as usize].effective_power() as i32),
-self.groups[*i as usize].initiative,
));
ids
}
fn choose_enemy(&self, order: &Vec<u16>, enemy: &Army) -> Vec<Option<u16>> {
let mut chosen = vec![false; enemy.groups.len()];
order
.iter()
.map(|idx| {
let i = *idx as usize;
if !self.groups[i].is_alive() {
return None;
}
let mut enemy_ids: Vec<_> = (0..enemy.groups.len()).collect();
enemy_ids.sort_by_cached_key(|&j| {
(
!enemy.groups[j].is_alive(),
chosen[j],
-(enemy.groups[j].calc_hit(&self.groups[i]) as i32),
-(enemy.groups[j].effective_power() as i32),
-enemy.groups[j].initiative,
)
});
// If chosen[j] wasnโt a field in sorting, weโve to use |filter|, not
// |take_while| as top results mightโve been already chosen.
match enemy_ids
.iter()
.take_while(|&&j| {
// Although not explicitly stated in puzzle, if this unit canโt deal
// any damage to any enemy unit, then donโt mark chosen.
enemy.groups[j].is_alive()
&& !chosen[j]
&& enemy.groups[j].calc_hit(&self.groups[i]) > 0
})
.next()
{
Some(&c) => {
chosen[c] = true;
Some(c as u16)
}
None => None,
}
})
.collect()
}
fn is_alive(&self) -> bool {
self.groups.iter().any(|g| g.is_alive())
}
fn boost(&mut self, points: u16) {
for g in &mut self.groups {
g.boost = points;
}
}
}
// PrimInt is yet to get the BITS member; make a new trait.
// https://stackoverflow.com/q/73711297/183120
trait Bits {
const BITS: usize;
}
macro_rules! impl_bits {
( $($ty:ident)* ) => {
$(
impl Bits for $ty {
const BITS: usize = Self::BITS as usize;
}
)*
};
}
impl_bits!(u8 u16 u32 u64 u128);
fn to_flag<'a, T: Bits + PrimInt>(
attack: &'a str,
attack_to_flag: &mut HashMap<&'a str, T>,
) -> Result<T, Box<dyn Error>> {
let n = attack_to_flag.len();
let mask = T::one() << n;
match n < T::BITS {
true => Ok(*attack_to_flag.entry(attack).or_insert(mask)),
false => Err(Box::<dyn Error>::from(
"More than {T::BITS} distinct attacks; insufficient bit-width.",
)),
}
}
struct Attack {
army: usize,
group: usize,
enemy: usize,
}
impl Attack {
fn enemy_army(&self) -> usize {
// make a bool and convert to integral as !1u8 = 254
(self.army == 0) as usize
}
}
// Army ID and remaining units
struct Victor(Option<u8>, u32);
fn fight(mut armies: [Army; 2]) -> Victor {
while armies.iter().all(|a| a.is_alive()) {
let ids = [armies[0].sort_for_attack(), armies[1].sort_for_attack()];
let choices = [
armies[0].choose_enemy(&ids[0], &armies[1]),
armies[1].choose_enemy(&ids[1], &armies[0]),
];
// Excessive debugging; turn on if needed.
// for (i, _) in armies.iter().enumerate() {
// dbg_print!("Army {}\n", i);
// for (idx, &j) in ids[i].iter().enumerate() {
// dbg_print!(
// " Group {}: {} --> {:?}\n",
// j,
// armies[i].groups[j as usize].units,
// choices[i][idx]
// );
// }
// }
// collect all alive groups with respective army ID
let mut fight: Vec<Attack> = ids[0]
.iter()
.zip(choices[0].iter())
.filter_map(|(&i, &choice)| {
match (armies[0].groups[i as usize].is_alive(), choice) {
(true, Some(enemy)) => Some(Attack {
army: 0,
group: i as usize,
enemy: enemy.into(),
}),
_ => None,
}
})
.chain(ids[1].iter().zip(choices[1].iter()).filter_map(
|(&j, &choice)| match (armies[1].groups[j as usize].is_alive(), choice)
{
(true, Some(enemy)) => Some(Attack {
army: 1,
group: j as usize,
enemy: enemy.into(),
}),
_ => None,
},
))
.collect::<Vec<Attack>>();
// Attacks in this fight are only b/w alive groups from here on.
fight.sort_by_key(|a| -armies[a.army].groups[a.group].initiative);
let mut total_units_lost = 0;
for attack in &fight {
dbg_print!(
"{}'s Group {} --> {}'s Group {}; ",
armies[attack.army].name,
attack.group,
armies[attack.enemy_army()].name,
attack.enemy
);
let attacker = &armies[attack.army].groups[attack.group];
let defender = &armies[attack.enemy_army()].groups[attack.enemy];
let damage = defender.calc_hit(attacker);
let defender_mut = &mut armies[attack.enemy_army()].groups[attack.enemy];
total_units_lost += defender_mut.hit(damage);
}
if total_units_lost == 0 {
return Victor(None, 0);
}
dbg_print!("--------------\n");
}
match armies[0].is_alive() {
true => Victor(
Some(0),
armies[0].groups.iter().fold(0, |units, g| units + g.units),
),
false => Victor(
Some(1),
armies[1].groups.iter().fold(0, |units, g| units + g.units),
),
}
}
fn main() -> Result<(), Box<dyn Error>> {
let mut input_str = String::new();
let mut stdin = io::stdin();
stdin.read_to_string(&mut input_str)?;
let input = InputParser::parse(Rule::file, &input_str)
.expect("Invalid input")
.next()
.unwrap();
let mut armies = [Army::default(), Army::default()];
let mut next_army: u8 = 0;
let mut attack_to_flag: HashMap<&str, u8> = HashMap::new();
for line in input.into_inner() {
match line.as_rule() {
Rule::army_name => {
armies[next_army as usize].name = line.as_str();
next_army += 1;
}
Rule::group => {
let mut counts = [0u32; 4];
let mut idx = 0;
let mut attack = AttackTypes(0);
let mut immunities = 0u8;
let mut weaknesses = 0u8;
for r in line.into_inner() {
match r.as_rule() {
Rule::count => {
counts[idx] = u32::from_str(r.as_str())?;
idx += 1;
}
Rule::attack => {
attack = AttackTypes(to_flag(r.as_str(), &mut attack_to_flag)?);
}
Rule::traits => {
for t in r.into_inner() {
match t.as_rule() {
Rule::immunities => {
for i in t.into_inner() {
immunities |= to_flag(i.as_str(), &mut attack_to_flag)?;
}
}
Rule::weaknesses => {
for w in t.into_inner() {
weaknesses |= to_flag(w.as_str(), &mut attack_to_flag)?;
}
}
_ => unreachable!(),
}
}
}
_ => unreachable!(),
}
}
armies[(next_army - 1) as usize].groups.push(Group {
units: counts[0],
hits: counts[1],
damages: counts[2] as u16,
boost: 0,
initiative: counts[3] as i8,
attack,
immunity: AttackTypes(immunities),
weakness: AttackTypes(weaknesses),
});
}
Rule::EOI => (),
_ => unreachable!(),
}
}
// Part 1
if let Victor(Some(army), units_alive) = fight(armies.clone()) {
println!(
"{} wins with units: {}",
armies[army as usize].name, units_alive
);
}
// Part 2: binary search for minimal boost
let (mut lo_boost, mut hi_boost) = (1, 1500);
while lo_boost != hi_boost {
// Using integers means below is implicitly floor((L + R) / 2); a ceil
// implementation sets hi_boost = boost - 1 and lo_boost = boost. Floor
// route stops on the right, while ceil on the left side of target.
let boost = (hi_boost + lo_boost) / 2;
armies[0].boost(boost);
match fight(armies.clone()).0 {
Some(0) => hi_boost = boost,
_ => lo_boost = boost + 1,
}
}
armies[0].boost(hi_boost); // lo_boost = hi_boost anyway
println!(
"Immune System wins with minimal boost {hi_boost}; surviving units: {}",
fight(armies.clone()).1
);
Ok(())
}
| {
let org_units = self.units;
let units_kill = points / self.hits;
self.units = self.units.saturating_sub(units_kill);
let units_lost = org_units - self.units;
dbg_print!("Units lost: {}\n", units_lost);
units_lost
} | identifier_body |
main.rs | use num_traits::PrimInt;
use pest::Parser;
use pest_derive::Parser;
use std::{
collections::HashMap,
error::Error,
fmt,
io::{self, Read},
str::FromStr,
};
#[cfg(debug_assertions)]
macro_rules! dbg_print {
($( $args:expr ),*) => { print!( $( $args ),* ); }
}
#[cfg(not(debug_assertions))]
macro_rules! dbg_print {
($( $args:expr ),*) => {};
}
#[derive(Parser)]
#[grammar = "input.pest"]
pub struct InputParser;
#[derive(Copy, Clone)]
struct AttackTypes(u8);
impl AttackTypes {
fn to(&self, other: AttackTypes) -> bool {
(other.0 & self.0) != 0
}
}
impl fmt::Debug for AttackTypes {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "0b{:b}", self.0)
}
}
#[derive(Clone)]
struct Group {
units: u32,
hits: u32,
damages: u16,
boost: u16,
initiative: i8,
attack: AttackTypes,
immunity: AttackTypes,
weakness: AttackTypes,
}
impl Group {
fn | (&self) -> u32 {
self.units * (self.damages as u32 + self.boost as u32)
}
fn is_alive(&self) -> bool {
self.units > 0
}
fn calc_hit(&self, enemy: &Group) -> u32 {
match (
self.immunity.to(enemy.attack),
self.weakness.to(enemy.attack),
) {
(false, false) => enemy.effective_power(),
(true, false) => 0,
(false, true) => enemy.effective_power() * 2,
(true, true) => unreachable!(),
}
}
fn hit(&mut self, points: u32) -> u32 {
let org_units = self.units;
let units_kill = points / self.hits;
self.units = self.units.saturating_sub(units_kill);
let units_lost = org_units - self.units;
dbg_print!("Units lost: {}\n", units_lost);
units_lost
}
}
#[derive(Default, Clone)]
struct Army<'a> {
groups: Vec<Group>,
name: &'a str,
}
impl Army<'_> {
fn sort_for_attack(&self) -> Vec<u16> {
let mut ids: Vec<u16> = (0..self.groups.len() as u16).collect();
ids.sort_by_key(|i|
// descending sort
(
!self.groups[*i as usize].is_alive(),
-(self.groups[*i as usize].effective_power() as i32),
-self.groups[*i as usize].initiative,
));
ids
}
fn choose_enemy(&self, order: &Vec<u16>, enemy: &Army) -> Vec<Option<u16>> {
let mut chosen = vec![false; enemy.groups.len()];
order
.iter()
.map(|idx| {
let i = *idx as usize;
if !self.groups[i].is_alive() {
return None;
}
let mut enemy_ids: Vec<_> = (0..enemy.groups.len()).collect();
enemy_ids.sort_by_cached_key(|&j| {
(
!enemy.groups[j].is_alive(),
chosen[j],
-(enemy.groups[j].calc_hit(&self.groups[i]) as i32),
-(enemy.groups[j].effective_power() as i32),
-enemy.groups[j].initiative,
)
});
// If chosen[j] wasnโt a field in sorting, weโve to use |filter|, not
// |take_while| as top results mightโve been already chosen.
match enemy_ids
.iter()
.take_while(|&&j| {
// Although not explicitly stated in puzzle, if this unit canโt deal
// any damage to any enemy unit, then donโt mark chosen.
enemy.groups[j].is_alive()
&& !chosen[j]
&& enemy.groups[j].calc_hit(&self.groups[i]) > 0
})
.next()
{
Some(&c) => {
chosen[c] = true;
Some(c as u16)
}
None => None,
}
})
.collect()
}
fn is_alive(&self) -> bool {
self.groups.iter().any(|g| g.is_alive())
}
fn boost(&mut self, points: u16) {
for g in &mut self.groups {
g.boost = points;
}
}
}
// PrimInt is yet to get the BITS member; make a new trait.
// https://stackoverflow.com/q/73711297/183120
trait Bits {
const BITS: usize;
}
macro_rules! impl_bits {
( $($ty:ident)* ) => {
$(
impl Bits for $ty {
const BITS: usize = Self::BITS as usize;
}
)*
};
}
impl_bits!(u8 u16 u32 u64 u128);
fn to_flag<'a, T: Bits + PrimInt>(
attack: &'a str,
attack_to_flag: &mut HashMap<&'a str, T>,
) -> Result<T, Box<dyn Error>> {
let n = attack_to_flag.len();
let mask = T::one() << n;
match n < T::BITS {
true => Ok(*attack_to_flag.entry(attack).or_insert(mask)),
false => Err(Box::<dyn Error>::from(
"More than {T::BITS} distinct attacks; insufficient bit-width.",
)),
}
}
struct Attack {
army: usize,
group: usize,
enemy: usize,
}
impl Attack {
fn enemy_army(&self) -> usize {
// make a bool and convert to integral as !1u8 = 254
(self.army == 0) as usize
}
}
// Army ID and remaining units
struct Victor(Option<u8>, u32);
fn fight(mut armies: [Army; 2]) -> Victor {
while armies.iter().all(|a| a.is_alive()) {
let ids = [armies[0].sort_for_attack(), armies[1].sort_for_attack()];
let choices = [
armies[0].choose_enemy(&ids[0], &armies[1]),
armies[1].choose_enemy(&ids[1], &armies[0]),
];
// Excessive debugging; turn on if needed.
// for (i, _) in armies.iter().enumerate() {
// dbg_print!("Army {}\n", i);
// for (idx, &j) in ids[i].iter().enumerate() {
// dbg_print!(
// " Group {}: {} --> {:?}\n",
// j,
// armies[i].groups[j as usize].units,
// choices[i][idx]
// );
// }
// }
// collect all alive groups with respective army ID
let mut fight: Vec<Attack> = ids[0]
.iter()
.zip(choices[0].iter())
.filter_map(|(&i, &choice)| {
match (armies[0].groups[i as usize].is_alive(), choice) {
(true, Some(enemy)) => Some(Attack {
army: 0,
group: i as usize,
enemy: enemy.into(),
}),
_ => None,
}
})
.chain(ids[1].iter().zip(choices[1].iter()).filter_map(
|(&j, &choice)| match (armies[1].groups[j as usize].is_alive(), choice)
{
(true, Some(enemy)) => Some(Attack {
army: 1,
group: j as usize,
enemy: enemy.into(),
}),
_ => None,
},
))
.collect::<Vec<Attack>>();
// Attacks in this fight are only b/w alive groups from here on.
fight.sort_by_key(|a| -armies[a.army].groups[a.group].initiative);
let mut total_units_lost = 0;
for attack in &fight {
dbg_print!(
"{}'s Group {} --> {}'s Group {}; ",
armies[attack.army].name,
attack.group,
armies[attack.enemy_army()].name,
attack.enemy
);
let attacker = &armies[attack.army].groups[attack.group];
let defender = &armies[attack.enemy_army()].groups[attack.enemy];
let damage = defender.calc_hit(attacker);
let defender_mut = &mut armies[attack.enemy_army()].groups[attack.enemy];
total_units_lost += defender_mut.hit(damage);
}
if total_units_lost == 0 {
return Victor(None, 0);
}
dbg_print!("--------------\n");
}
match armies[0].is_alive() {
true => Victor(
Some(0),
armies[0].groups.iter().fold(0, |units, g| units + g.units),
),
false => Victor(
Some(1),
armies[1].groups.iter().fold(0, |units, g| units + g.units),
),
}
}
fn main() -> Result<(), Box<dyn Error>> {
let mut input_str = String::new();
let mut stdin = io::stdin();
stdin.read_to_string(&mut input_str)?;
let input = InputParser::parse(Rule::file, &input_str)
.expect("Invalid input")
.next()
.unwrap();
let mut armies = [Army::default(), Army::default()];
let mut next_army: u8 = 0;
let mut attack_to_flag: HashMap<&str, u8> = HashMap::new();
for line in input.into_inner() {
match line.as_rule() {
Rule::army_name => {
armies[next_army as usize].name = line.as_str();
next_army += 1;
}
Rule::group => {
let mut counts = [0u32; 4];
let mut idx = 0;
let mut attack = AttackTypes(0);
let mut immunities = 0u8;
let mut weaknesses = 0u8;
for r in line.into_inner() {
match r.as_rule() {
Rule::count => {
counts[idx] = u32::from_str(r.as_str())?;
idx += 1;
}
Rule::attack => {
attack = AttackTypes(to_flag(r.as_str(), &mut attack_to_flag)?);
}
Rule::traits => {
for t in r.into_inner() {
match t.as_rule() {
Rule::immunities => {
for i in t.into_inner() {
immunities |= to_flag(i.as_str(), &mut attack_to_flag)?;
}
}
Rule::weaknesses => {
for w in t.into_inner() {
weaknesses |= to_flag(w.as_str(), &mut attack_to_flag)?;
}
}
_ => unreachable!(),
}
}
}
_ => unreachable!(),
}
}
armies[(next_army - 1) as usize].groups.push(Group {
units: counts[0],
hits: counts[1],
damages: counts[2] as u16,
boost: 0,
initiative: counts[3] as i8,
attack,
immunity: AttackTypes(immunities),
weakness: AttackTypes(weaknesses),
});
}
Rule::EOI => (),
_ => unreachable!(),
}
}
// Part 1
if let Victor(Some(army), units_alive) = fight(armies.clone()) {
println!(
"{} wins with units: {}",
armies[army as usize].name, units_alive
);
}
// Part 2: binary search for minimal boost
let (mut lo_boost, mut hi_boost) = (1, 1500);
while lo_boost != hi_boost {
// Using integers means below is implicitly floor((L + R) / 2); a ceil
// implementation sets hi_boost = boost - 1 and lo_boost = boost. Floor
// route stops on the right, while ceil on the left side of target.
let boost = (hi_boost + lo_boost) / 2;
armies[0].boost(boost);
match fight(armies.clone()).0 {
Some(0) => hi_boost = boost,
_ => lo_boost = boost + 1,
}
}
armies[0].boost(hi_boost); // lo_boost = hi_boost anyway
println!(
"Immune System wins with minimal boost {hi_boost}; surviving units: {}",
fight(armies.clone()).1
);
Ok(())
}
| effective_power | identifier_name |
types.ts | import type {
AccountLimitsResponse,
Authorize,
ContractUpdate,
DetailsOfEachMT5Loginid,
GetAccountStatus,
GetLimits,
GetSettings,
LandingCompany,
LogOutResponse,
Portfolio1,
ProposalOpenContract,
ResidenceList,
SetFinancialAssessmentRequest,
SetFinancialAssessmentResponse,
StatesList,
} from '@deriv/api-types';
import type { Moment } from 'moment';
import type { RouteComponentProps } from 'react-router';
import type { ExchangeRatesStore, FeatureFlagsStore } from './src/stores';
type TRoutes =
| '/404'
| '/account'
| '/account/trading-assessment'
| '/account/languages'
| '/account/financial-assessment'
| '/account/personal-details'
| '/account/proof-of-identity'
| '/account/proof-of-address'
| '/account/proof-of-ownership'
| '/account/passwords'
| '/account/closing-account'
| '/account/deactivate-account'
| '/account-closed'
| '/account/account-limits'
| '/account/connected-apps'
| '/account/api-token'
| '/account/login-history'
| '/account/two-factor-authentication'
| '/account/self-exclusion'
| '/settings/account_password'
| '/settings/apps'
| '/settings/cashier_password'
| '/contract/:contract_id'
| '/settings/exclusion'
| '/settings/financial'
| '/settings/history'
| '/index'
| '/settings/limits'
| '/mt5'
| '/derivx'
| '/settings/personal'
| '/reports/positions'
| '/reports/profit'
| '/reports'
| '/'
| '/redirect'
| '/settings'
| '/reports/statement'
| '/settings/token'
| '/bot'
| '/cashier'
| '/cashier/deposit'
| '/cashier/withdrawal'
| '/cashier/payment-agent'
| '/cashier/account-transfer'
| '/cashier/crypto-transactions'
| '/cashier/on-ramp'
| '/cashier/p2p'
| '/cashier/p2p/profile'
| '/cashier/p2p/verification'
| '/cashier/payment-agent-transfer'
| '/endpoint'
| '/complaints-policy'
| '/appstore'
| '/appstore/traders-hub'
| '/appstore/onboarding';
type TPopulateSettingsExtensionsMenuItem = {
icon: string;
label: string;
value: <T extends object>(props: T) => JSX.Element;
};
type TPortfolioPosition = {
contract_info: ProposalOpenContract &
Portfolio1 & {
contract_update?: ContractUpdate;
};
details?: string;
display_name: string;
id?: number;
indicative: number;
payout?: number;
purchase?: number;
reference: number;
type?: string;
is_unsupported: boolean;
contract_update: ProposalOpenContract['limit_order'];
is_sell_requested: boolean;
profit_loss: number;
};
type TAppRoutingHistory = {
action: string;
hash: string;
key: string;
pathname: string;
search: string;
};
type TAccount = NonNullable<Authorize['account_list']>[0] & {
balance?: number;
};
type TAccountsList = {
account?: {
balance?: string | number;
currency?: string;
disabled?: boolean;
error?: JSX.Element | string;
is_crypto?: boolean;
is_dxtrade?: boolean;
is_mt?: boolean;
market_type?: string;
nativepicker_text?: string;
platform_icon?: {
Derived: React.SVGAttributes<SVGElement>;
Financial: React.SVGAttributes<SVGElement>;
Options: React.SVGAttributes<SVGElement>;
CFDs: React.SVGAttributes<SVGAElement>;
};
text?: JSX.Element | string;
value?: string;
};
icon?: string;
idx?: string | number;
is_dark_mode_on?: boolean;
is_virtual?: boolean | number;
loginid?: string;
mt5_login_list?: DetailsOfEachMT5Loginid[];
title?: string;
}[];
// balance is missing in @deriv/api-types
type TActiveAccount = TAccount & {
landing_company_shortcode: 'svg' | 'costarica' | 'maltainvest' | 'malta' | 'iom';
is_virtual: number;
};
type TTradingPlatformAvailableAccount = {
market_type: 'financial' | 'gaming' | 'all';
name: string;
requirements: {
after_first_deposit: {
financial_assessment: string[];
};
compliance: {
mt5: string[];
tax_information: string[];
};
signup: string[];
};
shortcode: 'bvi' | 'labuan' | 'svg' | 'vanuatu' | 'maltainvest';
sub_account_type: string;
};
type TAuthenticationStatus = { document_status: string; identity_status: string };
type TMenuItem = {
icon: JSX.Element;
id: string;
link_to: string | boolean;
login_only: boolean;
onClick: boolean | (() => void);
text: () => string;
};
type TAddToastProps = {
key: string;
content: string;
type: string;
};
type TButtonProps = {
onClick: () => void;
text: string;
};
type TNotificationMessage = {
action?: {
onClick: () => void;
route?: string;
text: string;
};
className?: string;
cta_btn?: TButtonProps;
header_popup?: string;
header: string;
img_alt?: string;
img_src?: string;
is_disposable?: boolean;
is_persistent?: boolean;
key: string;
message_popup?: string;
message: string | JSX.Element;
platform?: string;
primary_btn?: TButtonProps;
secondary_btn?: TButtonProps;
should_hide_close_btn?: boolean;
timeout?: number;
timeoutMessage?: (remaining: number | string) => string;
type: string;
};
type TNotification =
| TNotificationMessage
| ((withdrawal_locked: boolean, deposit_locked: boolean) => TNotificationMessage)
| ((excluded_until: number) => TNotificationMessage);
type TStandPoint = {
financial_company: string;
gaming_company: string;
iom: boolean;
malta: boolean;
maltainvest: boolean;
svg: boolean;
};
type TMt5StatusServerType = {
all: number;
platform: number;
server_number: number;
deposits?: number;
withdrawals?: number;
};
type TDXTraderStatusServerType = Record<'all' | 'demo' | 'real', number>;
type TMt5StatusServer = Record<'demo' | 'real', TMt5StatusServerType[]>;
type TClientStore = {
fetchStatesList: () => Promise<StatesList>;
accounts: { [k: string]: TActiveAccount };
active_accounts: TActiveAccount[];
active_account_landing_company: string;
trading_platform_available_accounts: TTradingPlatformAvailableAccount[];
account_limits: Partial<AccountLimitsResponse['get_limits']> & {
is_loading?: boolean;
api_initial_load_error?: string;
};
account_list: TAccountsList;
account_status: GetAccountStatus;
available_crypto_currencies: string[];
balance?: string | number;
can_change_fiat_currency: boolean;
cfd_score: number;
setCFDScore: (score: number) => void;
currency: string;
current_currency_type?: string;
current_fiat_currency?: string;
has_any_real_account: boolean;
getLimits: () => Promise<{ get_limits?: GetLimits }>;
has_active_real_account: boolean;
has_logged_out: boolean;
has_maltainvest_account: boolean;
initialized_broadcast: boolean;
is_account_setting_loaded: boolean;
is_deposit_lock: boolean;
is_dxtrade_allowed: boolean;
is_eu_country: boolean;
is_eu: boolean;
is_uk: boolean;
is_social_signup: boolean;
has_residence: boolean;
is_authorize: boolean;
is_financial_account: boolean;
is_financial_assessment_needed: boolean;
is_financial_information_incomplete: boolean;
is_identity_verification_needed: boolean;
is_landing_company_loaded: boolean;
is_logged_in: boolean;
is_logging_in: boolean;
is_low_risk: boolean;
is_pending_proof_of_ownership: boolean;
is_switching: boolean;
is_tnc_needed: boolean;
is_trading_experience_incomplete: boolean;
is_virtual: boolean;
is_withdrawal_lock: boolean;
landing_company_shortcode: string;
is_populating_account_list: boolean;
is_language_loaded: boolean;
local_currency_config: {
currency: string;
decimal_places?: number;
};
loginid?: string;
pre_switch_broadcast: boolean;
residence: string;
responseMt5LoginList: ({
mt5_login_list,
}: {
mt5_login_list: DetailsOfEachMT5Loginid[];
}) => DetailsOfEachMT5Loginid[];
responseTradingPlatformAccountsList: ({
trading_platform_accounts,
}: {
trading_platform_accounts: DetailsOfEachMT5Loginid[];
}) => DetailsOfEachMT5Loginid[];
standpoint: TStandPoint;
setAccountStatus: (status?: GetAccountStatus) => void;
setBalanceOtherAccounts: (balance: number) => void;
setInitialized: (status?: boolean) => void;
setLogout: (status?: boolean) => void;
setVisibilityRealityCheck: (value: boolean) => void;
setP2pAdvertiserInfo: () => void;
setPreSwitchAccount: (status?: boolean) => void;
switchAccount: (value?: string) => Promise<void>;
switched: boolean;
switch_broadcast: boolean;
switchEndSignal: () => void;
verification_code: {
payment_agent_withdraw: string;
payment_withdraw: string;
request_email: string;
reset_password: string;
signup: string;
system_email_change: string;
trading_platform_dxtrade_password_reset: string;
trading_platform_mt5_password_reset: string;
};
website_status: { mt5_status: TMt5StatusServer; dx_trade_status: TDXTraderStatusServerType };
email: string;
setVerificationCode: (code: string, action: string) => void;
updateAccountStatus: () => Promise<void>;
is_authentication_needed: boolean;
authentication_status: TAuthenticationStatus;
mt5_login_list: DetailsOfEachMT5Loginid[];
logout: () => Promise<LogOutResponse>;
should_allow_authentication: boolean;
isEligibleForMoreDemoMt5Svg: (market_type: 'synthetic' | 'financial' | 'gaming' | 'all') => boolean;
isEligibleForMoreRealMt5: (market_type: 'synthetic' | 'financial' | 'gaming' | 'all') => boolean;
fetchResidenceList?: () => Promise<void>;
account_settings: GetSettings & {
upload_file?: string;
poi_state?: string;
};
residence_list: ResidenceList;
is_high_risk: boolean;
should_restrict_bvi_account_creation: boolean;
should_restrict_vanuatu_account_creation: boolean;
updateMT5Status: () => Promise<void>;
fetchAccountSettings: () => Promise<void>;
setAccountSettings: (get_settings_response: GetSettings) => void;
upgradeable_landing_companies: unknown[];
is_populating_mt5_account_list: boolean;
landing_companies: LandingCompany;
getChangeableFields: () => string[];
landing_company: LandingCompany;
isAccountOfTypeDisabled: (account: Record<string, DetailsOfEachMT5Loginid>) => boolean;
is_mt5_allowed: boolean;
mt5_disabled_signup_types: {
real: boolean;
demo: boolean;
};
dxtrade_disabled_signup_types: {
real: boolean;
demo: boolean;
};
dxtrade_accounts_list_error: null;
has_account_error_in_mt5_real_list: boolean;
has_account_error_in_mt5_demo_list: boolean;
has_account_error_in_dxtrade_real_list: boolean;
has_account_error_in_dxtrade_demo_list: boolean;
is_fully_authenticated: boolean;
states_list: StatesList;
/** @deprecated Use `useCurrencyConfig` or `useCurrentCurrencyConfig` from `@deriv/hooks` package instead. */
is_crypto: (currency?: string) => boolean;
dxtrade_accounts_list: DetailsOfEachMT5Loginid[];
derivez_accounts_list: DetailsOfEachMT5Loginid[];
default_currency: string;
resetVirtualBalance: () => Promise<void>;
has_enabled_two_fa: boolean;
setTwoFAStatus: (status: boolean) => void;
has_changed_two_fa: boolean;
setTwoFAChangedStatus: (status: boolean) => void;
is_svg: boolean;
real_account_creation_unlock_date: string;
setPrevAccountType: (account_type: string) => void;
setFinancialAndTradingAssessment: (
payload: SetFinancialAssessmentRequest
) => Promise<SetFinancialAssessmentResponse>;
prev_account_type: string;
};
type TCommonStoreError = {
app_routing_history: TAppRoutingHistory[];
header: string | JSX.Element;
message: string | JSX.Element;
redirect_label: string;
redirect_to: string;
redirectOnClick: (() => void) | null;
setError: (has_error: boolean, error: React.ReactNode | null) => void;
should_clear_error_on_click: boolean;
should_show_refresh: boolean;
type?: string;
};
type TCommonStore = {
isCurrentLanguage(language_code: string): boolean;
error: TCommonStoreError;
services_error: { code: string; message: string; type: string } | Record<string, never>;
has_error: boolean;
is_from_derivgo: boolean;
is_network_online: boolean;
platform: 'dxtrade' | 'derivez' | 'mt5' | 'ctrader' | '';
routeBackInApp: (history: Pick<RouteComponentProps, 'history'>, additional_platform_path?: string[]) => void;
routeTo: (pathname: string) => void;
server_time?: Moment;
changeCurrentLanguage: (new_language: string) => void;
changeSelectedLanguage: (key: string) => void;
current_language: string;
is_language_changing: boolean;
is_socket_opened: boolean;
setAppstorePlatform: (value: string) => void;
app_routing_history: TAppRoutingHistory[];
getExchangeRate: (from_currency: string, to_currency: string) => Promise<number>;
network_status: Record<string, never> | { [key: string]: string };
};
type TUiStore = {
addToast: (toast_config: TAddToastProps) => void;
app_contents_scroll_ref: React.MutableRefObject<null | HTMLDivElement>;
current_focus: string | null;
disableApp: () => void;
enableApp: () => void;
has_real_account_signup_ended: boolean;
is_loading: boolean;
is_cashier_visible: boolean;
is_closing_create_real_account_modal: boolean;
is_unsupported_contract_modal_visible: boolean;
has_only_forward_starting_contracts: boolean;
is_dark_mode_on: boolean;
is_reports_visible: boolean;
is_language_settings_modal_on: boolean;
is_app_disabled: boolean;
is_link_expired_modal_visible: boolean;
is_mobile: boolean;
is_positions_drawer_on: boolean;
is_services_error_visible: boolean;
openRealAccountSignup: (
value: 'maltainvest' | 'svg' | 'add_crypto' | 'choose' | 'add_fiat' | 'set_currency' | 'manage'
) => void;
notification_messages_ui: React.ElementType;
setCurrentFocus: (value: string) => void;
setDarkMode: (is_dark_mode_on: boolean) => boolean;
setReportsTabIndex: (value: number) => void;
setIsClosingCreateRealAccountModal: (value: boolean) => void;
setRealAccountSignupEnd: (status: boolean) => void;
setHasOnlyForwardingContracts: (has_only_forward_starting_contracts: boolean) => void;
sub_section_index: number;
setSubSectionIndex: (index: number) => void;
shouldNavigateAfterChooseCrypto: (value: Omit<string, TRoutes> | TRoutes) => void;
toggleAccountsDialog: () => void;
toggleCashier: () => void;
toggleLanguageSettingsModal: () => void;
toggleLinkExpiredModal: (state_change: boolean) => void;
togglePositionsDrawer: () => void;
toggleReadyToDepositModal: () => void;
toggleSetCurrencyModal: () => void;
toggleShouldShowRealAccountsList: (value: boolean) => void;
toggleServicesErrorModal: () => void;
is_tablet: boolean;
removeToast: (key: string) => void;
is_ready_to_deposit_modal_visible: boolean;
reports_route_tab_index: number;
should_show_cancellation_warning: boolean;
toggleCancellationWarning: (state_change: boolean) => void;
toggleUnsupportedContractModal: (state_change: boolean) => void;
toggleReports: (is_visible: boolean) => void;
is_real_acc_signup_on: boolean;
is_need_real_account_for_cashier_modal_visible: boolean;
is_chart_layout_default: boolean;
toggleNeedRealAccountForCashierModal: () => void;
setIsAcuityModalOpen: (value: boolean) => void;
is_switch_to_deriv_account_modal_visible: boolean;
openSwitchToRealAccountModal: () => void;
openDerivRealAccountNeededModal: () => void;
is_top_up_virtual_open: boolean;
is_top_up_virtual_in_progress: boolean;
is_top_up_virtual_success: boolean;
closeSuccessTopUpModal: () => void;
closeTopUpModal: () => void;
is_cfd_reset_password_modal_enabled: boolean;
setCFDPasswordResetModal: (value: boolean) => void;
openAccountNeededModal: () => void;
is_accounts_switcher_on: boolean;
openTopUpModal: () => void;
is_reset_trading_password_modal_visible: boolean;
setResetTradingPasswordModalOpen: () => void;
populateHeaderExtensions: (header_items: JSX.Element | null) => void;
populateSettingsExtensions: (menu_items: Array<TPopulateSettingsExtensionsMenuItem> | null) => void;
setShouldShowCooldownModal: (value: boolean) => void;
setAppContentsScrollRef: (ref: React.MutableRefObject<null | HTMLDivElement>) => void;
populateFooterExtensions: (
footer_extensions:
| [
{
position?: string;
Component?: React.FunctionComponent;
has_right_separator?: boolean;
}
]
| []
) => void;
};
type TPortfolioStore = {
active_positions: TPortfolioPosition[];
error: string;
getPositionById: (id: number) => TPortfolioPosition;
is_accumulator: boolean;
is_loading: boolean;
is_multiplier: boolean;
is_turbos: boolean; | };
type TContractStore = {
getContractById: (id: number) => ProposalOpenContract;
};
type TMenuStore = {
attach: (item: TMenuItem) => void;
update: (menu: TMenuItem, index: number) => void;
};
type TNotificationStore = {
addNotificationMessage: (message: TNotification) => void;
addNotificationMessageByKey: (key: string) => void;
client_notifications: object;
filterNotificationMessages: () => void;
refreshNotifications: () => void;
removeNotificationByKey: (key: string) => void;
removeNotificationMessage: (key: string, should_show_again?: boolean) => void;
setP2POrderProps: () => void;
showAccountSwitchToRealNotification: (loginid: string, currency: string) => void;
setP2PRedirectTo: () => void;
};
type TBalance = {
currency: string;
balance: number;
};
type TTradersHubStore = {
closeModal: () => void;
content_flag: 'low_risk_cr_eu' | 'low_risk_cr_non_eu' | 'high_risk_cr' | 'cr_demo' | 'eu_demo' | 'eu_real' | '';
combined_cfd_mt5_accounts: DetailsOfEachMT5Loginid &
{
short_code_and_region: string;
login: string;
sub_title: string;
icon: 'Derived' | 'Financial' | 'Options' | 'CFDs';
}[];
openModal: (modal_id: string, props?: unknown) => void;
selected_account: {
login: string;
account_id: string;
};
handleTabItemClick: (idx: number) => void;
is_account_transfer_modal_open: boolean;
is_low_risk_cr_eu_real: boolean;
is_eu_user: boolean;
show_eu_related_content: boolean;
setTogglePlatformType: (platform_type: string) => void;
is_real: boolean;
is_regulators_compare_modal_visible: boolean;
is_tour_open: boolean;
selectRegion: (region: string) => void;
closeAccountTransferModal: () => void;
toggleRegulatorsCompareModal: () => void;
selected_region: string;
openFailedVerificationModal: (selected_account_type: string) => void;
multipliers_account_status: string;
financial_restricted_countries: boolean;
selected_account_type: string;
selected_platform_type: string;
setSelectedAccount: (account: { login?: string; account_id?: string }) => void;
no_CR_account: boolean;
no_MF_account: boolean;
CFDs_restricted_countries: boolean;
toggleAccountTransferModal: () => void;
is_demo: boolean;
platform_real_balance: TBalance;
cfd_demo_balance: TBalance;
platform_demo_balance: TBalance;
cfd_real_balance: TBalance;
selectAccountType: (account_type: string) => void;
toggleIsTourOpen: (is_tour_open: boolean) => void;
is_demo_low_risk: boolean;
is_mt5_notification_modal_visible: boolean;
setMT5NotificationModal: (value: boolean) => void;
available_dxtrade_accounts: DetailsOfEachMT5Loginid[];
available_derivez_accounts: DetailsOfEachMT5Loginid[];
has_any_real_account: boolean;
startTrade: () => void;
getExistingAccounts: () => void;
getAccount: () => void;
toggleAccountTypeModalVisibility: () => void;
can_get_more_cfd_mt5_accounts: boolean;
showTopUpModal: () => void;
};
/**
* This is the type that contains all the `core` package stores
*/
export type TCoreStores = {
client: TClientStore;
common: TCommonStore;
menu: TMenuStore;
ui: TUiStore;
portfolio: TPortfolioStore;
contract_trade: TContractStore;
// This should be `any` as this property will be handled in each package.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
modules: Record<string, any>;
notifications: TNotificationStore;
traders_hub: TTradersHubStore;
};
export type TStores = TCoreStores & {
exchange_rates: ExchangeRatesStore;
feature_flags: FeatureFlagsStore;
}; | onClickCancel: (contract_id?: number) => void;
onClickSell: (contract_id?: number) => void;
onMount: () => void;
positions: TPortfolioPosition[];
removePositionById: (id: number) => void; | random_line_split |
socket.rs | // SPDX-License-Identifier: MIT
use std::{
io::{Error, Result},
mem,
os::unix::io::{AsRawFd, FromRawFd, RawFd},
};
use crate::SocketAddr;
/// A netlink socket.
///
/// # Example
///
/// In this example we:
///
/// 1. open a new socket
/// 2. send a message to the kernel
/// 3. read the reponse
///
/// ```rust
/// use netlink_sys::{protocols::NETLINK_ROUTE, Socket, SocketAddr};
/// use std::process;
///
/// // open a new socket for the NETLINK_ROUTE subsystem (see "man 7 rtnetlink")
/// let mut socket = Socket::new(NETLINK_ROUTE).unwrap();
/// // address of the remote peer we'll send a message to. This particular address is for the kernel
/// let kernel_addr = SocketAddr::new(0, 0);
/// // this is a valid message for listing the network links on the system
/// let pkt = vec![
/// 0x14, 0x00, 0x00, 0x00, 0x12, 0x00, 0x01, 0x03, 0xfd, 0xfe, 0x38, 0x5c, 0x00, 0x00, 0x00,
/// 0x00, 0x00, 0x00, 0x00, 0x00,
/// ];
/// // send the message to the kernel
/// let n_sent = socket.send_to(&pkt[..], &kernel_addr, 0).unwrap();
/// assert_eq!(n_sent, pkt.len());
/// // buffer for receiving the response
/// let mut buf = vec![0; 4096];
/// loop {
/// // receive a datagram
/// let (n_received, sender_addr) = socket.recv_from(&mut &mut buf[..], 0).unwrap();
/// assert_eq!(sender_addr, kernel_addr);
/// println!("received datagram {:?}", &buf[..n_received]);
/// if buf[4] == 2 && buf[5] == 0 {
/// println!("the kernel responded with an error");
/// return;
/// }
/// if buf[4] == 3 && buf[5] == 0 {
/// println!("end of dump");
/// return;
/// }
/// }
/// ```
#[derive(Clone, Debug)]
pub struct Socket(RawFd);
impl AsRawFd for Socket {
fn as_raw_fd(&self) -> RawFd {
self.0
}
}
impl FromRawFd for Socket {
unsafe fn from_raw_fd(fd: RawFd) -> Self {
Socket(fd)
}
}
impl Drop for Socket {
fn drop(&mut self) {
unsafe { libc::close(self.0) };
}
}
impl Socket {
/// Open a new socket for the given netlink subsystem. `protocol` must be one of the
/// [`netlink_sys::protocols`][protos] constants.
///
/// [protos]: crate::protocols
pub fn new(protocol: isize) -> Result<Self> {
let res = unsafe {
libc::socket(
libc::PF_NETLINK,
libc::SOCK_DGRAM | libc::SOCK_CLOEXEC,
protocol as libc::c_int,
)
};
if res < 0 {
return Err(Error::last_os_error());
}
Ok(Socket(res))
}
/// Bind the socket to the given address
pub fn bind(&mut self, addr: &SocketAddr) -> Result<()> {
let (addr_ptr, addr_len) = addr.as_raw();
let res = unsafe { libc::bind(self.0, addr_ptr, addr_len) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(())
}
/// Bind the socket to an address assigned by the kernel, and return that address.
pub fn bind_auto(&mut self) -> Result<SocketAddr> {
let mut addr = SocketAddr::new(0, 0);
self.bind(&addr)?;
self.get_address(&mut addr)?;
Ok(addr)
}
/// Get the socket address
pub fn get_address(&self, addr: &mut SocketAddr) -> Result<()> {
let (addr_ptr, mut addr_len) = addr.as_raw_mut();
let addr_len_copy = addr_len;
let addr_len_ptr = &mut addr_len as *mut libc::socklen_t;
let res = unsafe { libc::getsockname(self.0, addr_ptr, addr_len_ptr) };
if res < 0 {
return Err(Error::last_os_error());
}
assert_eq!(addr_len, addr_len_copy);
Ok(())
}
// when building with --features smol we don't need this
#[allow(dead_code)]
/// Make this socket non-blocking
pub fn | (&self, non_blocking: bool) -> Result<()> {
let mut non_blocking = non_blocking as libc::c_int;
let res = unsafe { libc::ioctl(self.0, libc::FIONBIO, &mut non_blocking) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(())
}
/// Connect the socket to the given address. Netlink is a connection-less protocol, so a socket can communicate with
/// multiple peers with the [`Socket::send_to`] and [`Socket::recv_from`] methods. However, if the socket only needs
/// to communicate with one peer, it is convenient not to have to bother with the peer address. This is what
/// `connect` is for. After calling `connect`, [`Socket::send`] and [`Socket::recv`] respectively send and receive
/// datagrams to and from `remote_addr`.
///
/// # Examples
///
/// In this example we:
///
/// 1. open a socket
/// 2. connect it to the kernel with [`Socket::connect`]
/// 3. send a request to the kernel with [`Socket::send`]
/// 4. read the response (which can span over several messages) [`Socket::recv`]
///
/// ```rust
/// use netlink_sys::{protocols::NETLINK_ROUTE, Socket, SocketAddr};
/// use std::process;
///
/// let mut socket = Socket::new(NETLINK_ROUTE).unwrap();
/// let _ = socket.bind_auto().unwrap();
/// let kernel_addr = SocketAddr::new(0, 0);
/// socket.connect(&kernel_addr).unwrap();
/// // This is a valid message for listing the network links on the system
/// let msg = vec![
/// 0x14, 0x00, 0x00, 0x00, 0x12, 0x00, 0x01, 0x03, 0xfd, 0xfe, 0x38, 0x5c, 0x00, 0x00, 0x00,
/// 0x00, 0x00, 0x00, 0x00, 0x00,
/// ];
/// let n_sent = socket.send(&msg[..], 0).unwrap();
/// assert_eq!(n_sent, msg.len());
/// // buffer for receiving the response
/// let mut buf = vec![0; 4096];
/// loop {
/// let mut n_received = socket.recv(&mut &mut buf[..], 0).unwrap();
/// println!("received {:?}", &buf[..n_received]);
/// if buf[4] == 2 && buf[5] == 0 {
/// println!("the kernel responded with an error");
/// return;
/// }
/// if buf[4] == 3 && buf[5] == 0 {
/// println!("end of dump");
/// return;
/// }
/// }
/// ```
pub fn connect(&self, remote_addr: &SocketAddr) -> Result<()> {
// FIXME:
//
// Event though for SOCK_DGRAM sockets there's no IO, if our socket is non-blocking,
// connect() might return EINPROGRESS. In theory, the right way to treat EINPROGRESS would
// be to ignore the error, and let the user poll the socket to check when it becomes
// writable, indicating that the connection succeeded. The code already exists in mio for
// TcpStream:
//
// > pub fn connect(stream: net::TcpStream, addr: &SocketAddr) -> io::Result<TcpStream> {
// > set_non_block(stream.as_raw_fd())?;
// > match stream.connect(addr) {
// > Ok(..) => {}
// > Err(ref e) if e.raw_os_error() == Some(libc::EINPROGRESS) => {}
// > Err(e) => return Err(e),
// > }
// > Ok(TcpStream { inner: stream })
// > }
//
// In practice, since the connection does not require any IO for SOCK_DGRAM sockets, it
// almost never returns EINPROGRESS and so for now, we just return whatever libc::connect
// returns. If it returns EINPROGRESS, the caller will have to handle the error themself
//
// Refs:
//
// - https://stackoverflow.com/a/14046386/1836144
// - https://lists.isc.org/pipermail/bind-users/2009-August/077527.html
let (addr, addr_len) = remote_addr.as_raw();
let res = unsafe { libc::connect(self.0, addr, addr_len) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(())
}
// Most of the comments in this method come from a discussion on rust users forum.
// [thread]: https://users.rust-lang.org/t/help-understanding-libc-call/17308/9
//
/// Read a datagram from the socket and return the number of bytes that have been read and the address of the
/// sender. The data being read is copied into `buf`. If `buf` is too small, the datagram is truncated. The
/// supported flags are the `MSG_*` described in `man 2 recvmsg`
///
/// # Warning
///
/// In datagram oriented protocols, `recv` and `recvfrom` receive normally only ONE datagram, but this seems not to
/// be always true for netlink sockets: with some protocols like `NETLINK_AUDIT`, multiple netlink packets can be
/// read with a single call.
pub fn recv_from<B>(&self, buf: &mut B, flags: libc::c_int) -> Result<(usize, SocketAddr)>
where
B: bytes::BufMut,
{
// Create an empty storage for the address. Note that Rust standard library create a
// sockaddr_storage so that it works for any address family, but here, we already know that
// we'll have a Netlink address, so we can create the appropriate storage.
let mut addr = unsafe { mem::zeroed::<libc::sockaddr_nl>() };
// recvfrom takes a *sockaddr as parameter so that it can accept any kind of address
// storage, so we need to create such a pointer for the sockaddr_nl we just initialized.
//
// Create a raw pointer to Cast our raw pointer to a
// our storage. We cannot generic pointer to *sockaddr
// pass it to recvfrom yet. that recvfrom can use
// ^ ^
// | |
// +--------------+---------------+ +---------+--------+
// / \ / \
let addr_ptr = &mut addr as *mut libc::sockaddr_nl as *mut libc::sockaddr;
// Why do we need to pass the address length? We're passing a generic *sockaddr to
// recvfrom. Somehow recvfrom needs to make sure that the address of the received packet
// would fit into the actual type that is behind *sockaddr: it could be a sockaddr_nl but
// also a sockaddr_in, a sockaddr_in6, or even the generic sockaddr_storage that can store
// any address.
let mut addrlen = mem::size_of_val(&addr);
// recvfrom does not take the address length by value (see [thread]), so we need to create
// a pointer to it.
let addrlen_ptr = &mut addrlen as *mut usize as *mut libc::socklen_t;
let chunk = buf.chunk_mut();
// Cast the *mut u8 into *mut void.
// This is equivalent to casting a *char into *void
// See [thread]
// ^
// Create a *mut u8 |
// ^ |
// | |
// +------+-------+ +--------+-------+
// / \ / \
let buf_ptr = chunk.as_mut_ptr() as *mut libc::c_void;
let buf_len = chunk.len() as libc::size_t;
let res = unsafe { libc::recvfrom(self.0, buf_ptr, buf_len, flags, addr_ptr, addrlen_ptr) };
if res < 0 {
return Err(Error::last_os_error());
} else {
// with `MSG_TRUNC` `res` might exceed `buf_len`
let written = std::cmp::min(buf_len, res as usize);
unsafe {
buf.advance_mut(written);
}
}
Ok((res as usize, SocketAddr(addr)))
}
/// For a connected socket, `recv` reads a datagram from the socket. The sender is the remote peer the socket is
/// connected to (see [`Socket::connect`]). See also [`Socket::recv_from`]
pub fn recv<B>(&self, buf: &mut B, flags: libc::c_int) -> Result<usize>
where
B: bytes::BufMut,
{
let chunk = buf.chunk_mut();
let buf_ptr = chunk.as_mut_ptr() as *mut libc::c_void;
let buf_len = chunk.len() as libc::size_t;
let res = unsafe { libc::recv(self.0, buf_ptr, buf_len, flags) };
if res < 0 {
return Err(Error::last_os_error());
} else {
// with `MSG_TRUNC` `res` might exceed `buf_len`
let written = std::cmp::min(buf_len, res as usize);
unsafe {
buf.advance_mut(written);
}
}
Ok(res as usize)
}
/// Receive a full message. Unlike [`Socket::recv_from`], which truncates messages that exceed the length of the
/// buffer passed as argument, this method always reads a whole message, no matter its size.
pub fn recv_from_full(&self) -> Result<(Vec<u8>, SocketAddr)> {
// Peek
let mut buf: Vec<u8> = Vec::new();
let (peek_len, _) = self.recv_from(&mut buf, libc::MSG_PEEK | libc::MSG_TRUNC)?;
// Receive
buf.clear();
buf.reserve(peek_len);
let (rlen, addr) = self.recv_from(&mut buf, 0)?;
assert_eq!(rlen, peek_len);
Ok((buf, addr))
}
/// Send the given buffer `buf` to the remote peer with address `addr`. The supported flags are the `MSG_*` values
/// documented in `man 2 send`.
pub fn send_to(&self, buf: &[u8], addr: &SocketAddr, flags: libc::c_int) -> Result<usize> {
let (addr_ptr, addr_len) = addr.as_raw();
let buf_ptr = buf.as_ptr() as *const libc::c_void;
let buf_len = buf.len() as libc::size_t;
let res = unsafe { libc::sendto(self.0, buf_ptr, buf_len, flags, addr_ptr, addr_len) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(res as usize)
}
/// For a connected socket, `send` sends the given buffer `buf` to the remote peer the socket is connected to. See
/// also [`Socket::connect`] and [`Socket::send_to`].
pub fn send(&self, buf: &[u8], flags: libc::c_int) -> Result<usize> {
let buf_ptr = buf.as_ptr() as *const libc::c_void;
let buf_len = buf.len() as libc::size_t;
let res = unsafe { libc::send(self.0, buf_ptr, buf_len, flags) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(res as usize)
}
pub fn set_pktinfo(&mut self, value: bool) -> Result<()> {
let value: libc::c_int = if value { 1 } else { 0 };
setsockopt(self.0, libc::SOL_NETLINK, libc::NETLINK_PKTINFO, value)
}
pub fn get_pktinfo(&self) -> Result<bool> {
let res = getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_PKTINFO)?;
Ok(res == 1)
}
pub fn add_membership(&mut self, group: u32) -> Result<()> {
setsockopt(
self.0,
libc::SOL_NETLINK,
libc::NETLINK_ADD_MEMBERSHIP,
group,
)
}
pub fn drop_membership(&mut self, group: u32) -> Result<()> {
setsockopt(
self.0,
libc::SOL_NETLINK,
libc::NETLINK_DROP_MEMBERSHIP,
group,
)
}
// pub fn list_membership(&self) -> Vec<u32> {
// unimplemented!();
// // getsockopt won't be enough here, because we may need to perform 2 calls, and because the
// // length of the list returned by libc::getsockopt is returned by mutating the length
// // argument, which our implementation of getsockopt forbids.
// }
/// `NETLINK_BROADCAST_ERROR` (since Linux 2.6.30). When not set, `netlink_broadcast()` only
/// reports `ESRCH` errors and silently ignore `NOBUFS` errors.
pub fn set_broadcast_error(&mut self, value: bool) -> Result<()> {
let value: libc::c_int = if value { 1 } else { 0 };
setsockopt(
self.0,
libc::SOL_NETLINK,
libc::NETLINK_BROADCAST_ERROR,
value,
)
}
pub fn get_broadcast_error(&self) -> Result<bool> {
let res =
getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_BROADCAST_ERROR)?;
Ok(res == 1)
}
/// `NETLINK_NO_ENOBUFS` (since Linux 2.6.30). This flag can be used by unicast and broadcast
/// listeners to avoid receiving `ENOBUFS` errors.
pub fn set_no_enobufs(&mut self, value: bool) -> Result<()> {
let value: libc::c_int = if value { 1 } else { 0 };
setsockopt(self.0, libc::SOL_NETLINK, libc::NETLINK_NO_ENOBUFS, value)
}
pub fn get_no_enobufs(&self) -> Result<bool> {
let res = getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_NO_ENOBUFS)?;
Ok(res == 1)
}
/// `NETLINK_LISTEN_ALL_NSID` (since Linux 4.2). When set, this socket will receive netlink
/// notifications from all network namespaces that have an nsid assigned into the network
/// namespace where the socket has been opened. The nsid is sent to user space via an ancillary
/// data.
pub fn set_listen_all_namespaces(&mut self, value: bool) -> Result<()> {
let value: libc::c_int = if value { 1 } else { 0 };
setsockopt(
self.0,
libc::SOL_NETLINK,
libc::NETLINK_LISTEN_ALL_NSID,
value,
)
}
pub fn get_listen_all_namespaces(&self) -> Result<bool> {
let res =
getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_LISTEN_ALL_NSID)?;
Ok(res == 1)
}
/// `NETLINK_CAP_ACK` (since Linux 4.2). The kernel may fail to allocate the necessary room
/// for the acknowledgment message back to user space. This option trims off the payload of
/// the original netlink message. The netlink message header is still included, so the user can
/// guess from the sequence number which message triggered the acknowledgment.
pub fn set_cap_ack(&mut self, value: bool) -> Result<()> {
let value: libc::c_int = if value { 1 } else { 0 };
setsockopt(self.0, libc::SOL_NETLINK, libc::NETLINK_CAP_ACK, value)
}
pub fn get_cap_ack(&self) -> Result<bool> {
let res = getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_CAP_ACK)?;
Ok(res == 1)
}
}
/// Wrapper around `getsockopt`:
///
/// ```no_rust
/// int getsockopt(int socket, int level, int option_name, void *restrict option_value, socklen_t *restrict option_len);
/// ```
pub(crate) fn getsockopt<T: Copy>(fd: RawFd, level: libc::c_int, option: libc::c_int) -> Result<T> {
// Create storage for the options we're fetching
let mut slot: T = unsafe { mem::zeroed() };
// Create a mutable raw pointer to the storage so that getsockopt can fill the value
let slot_ptr = &mut slot as *mut T as *mut libc::c_void;
// Let getsockopt know how big our storage is
let mut slot_len = mem::size_of::<T>() as libc::socklen_t;
// getsockopt takes a mutable pointer to the length, because for some options like
// NETLINK_LIST_MEMBERSHIP where the option value is a list with arbitrary length,
// getsockopt uses this parameter to signal how big the storage needs to be.
let slot_len_ptr = &mut slot_len as *mut libc::socklen_t;
let res = unsafe { libc::getsockopt(fd, level, option, slot_ptr, slot_len_ptr) };
if res < 0 {
return Err(Error::last_os_error());
}
// Ignore the options that require the legnth to be set by getsockopt.
// We'll deal with them individually.
assert_eq!(slot_len as usize, mem::size_of::<T>());
Ok(slot)
}
// adapted from rust standard library
fn setsockopt<T>(fd: RawFd, level: libc::c_int, option: libc::c_int, payload: T) -> Result<()> {
let payload = &payload as *const T as *const libc::c_void;
let payload_len = mem::size_of::<T>() as libc::socklen_t;
let res = unsafe { libc::setsockopt(fd, level, option, payload, payload_len) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use crate::protocols::NETLINK_ROUTE;
#[test]
fn new() {
Socket::new(NETLINK_ROUTE).unwrap();
}
#[test]
fn connect() {
let sock = Socket::new(NETLINK_ROUTE).unwrap();
sock.connect(&SocketAddr::new(0, 0)).unwrap();
}
#[test]
fn bind() {
let mut sock = Socket::new(NETLINK_ROUTE).unwrap();
sock.bind(&SocketAddr::new(4321, 0)).unwrap();
}
#[test]
fn bind_auto() {
let mut sock = Socket::new(NETLINK_ROUTE).unwrap();
let addr = sock.bind_auto().unwrap();
// make sure that the address we got from the kernel is there
assert!(addr.port_number() != 0);
}
#[test]
fn set_non_blocking() {
let sock = Socket::new(NETLINK_ROUTE).unwrap();
sock.set_non_blocking(true).unwrap();
sock.set_non_blocking(false).unwrap();
}
#[test]
fn options() {
let mut sock = Socket::new(NETLINK_ROUTE).unwrap();
sock.set_cap_ack(true).unwrap();
assert!(sock.get_cap_ack().unwrap());
sock.set_cap_ack(false).unwrap();
assert!(!sock.get_cap_ack().unwrap());
sock.set_no_enobufs(true).unwrap();
assert!(sock.get_no_enobufs().unwrap());
sock.set_no_enobufs(false).unwrap();
assert!(!sock.get_no_enobufs().unwrap());
sock.set_broadcast_error(true).unwrap();
assert!(sock.get_broadcast_error().unwrap());
sock.set_broadcast_error(false).unwrap();
assert!(!sock.get_broadcast_error().unwrap());
// FIXME: these require root permissions
// sock.set_listen_all_namespaces(true).unwrap();
// assert!(sock.get_listen_all_namespaces().unwrap());
// sock.set_listen_all_namespaces(false).unwrap();
// assert!(!sock.get_listen_all_namespaces().unwrap());
}
}
| set_non_blocking | identifier_name |
socket.rs | // SPDX-License-Identifier: MIT
use std::{
io::{Error, Result},
mem,
os::unix::io::{AsRawFd, FromRawFd, RawFd},
};
use crate::SocketAddr;
/// A netlink socket.
///
/// # Example
///
/// In this example we:
///
/// 1. open a new socket
/// 2. send a message to the kernel
/// 3. read the reponse
///
/// ```rust
/// use netlink_sys::{protocols::NETLINK_ROUTE, Socket, SocketAddr};
/// use std::process;
///
/// // open a new socket for the NETLINK_ROUTE subsystem (see "man 7 rtnetlink")
/// let mut socket = Socket::new(NETLINK_ROUTE).unwrap();
/// // address of the remote peer we'll send a message to. This particular address is for the kernel
/// let kernel_addr = SocketAddr::new(0, 0);
/// // this is a valid message for listing the network links on the system
/// let pkt = vec![
/// 0x14, 0x00, 0x00, 0x00, 0x12, 0x00, 0x01, 0x03, 0xfd, 0xfe, 0x38, 0x5c, 0x00, 0x00, 0x00,
/// 0x00, 0x00, 0x00, 0x00, 0x00,
/// ];
/// // send the message to the kernel
/// let n_sent = socket.send_to(&pkt[..], &kernel_addr, 0).unwrap();
/// assert_eq!(n_sent, pkt.len());
/// // buffer for receiving the response
/// let mut buf = vec![0; 4096];
/// loop {
/// // receive a datagram
/// let (n_received, sender_addr) = socket.recv_from(&mut &mut buf[..], 0).unwrap();
/// assert_eq!(sender_addr, kernel_addr);
/// println!("received datagram {:?}", &buf[..n_received]);
/// if buf[4] == 2 && buf[5] == 0 {
/// println!("the kernel responded with an error");
/// return;
/// }
/// if buf[4] == 3 && buf[5] == 0 {
/// println!("end of dump");
/// return;
/// }
/// }
/// ```
#[derive(Clone, Debug)]
pub struct Socket(RawFd);
impl AsRawFd for Socket {
fn as_raw_fd(&self) -> RawFd {
self.0
}
}
impl FromRawFd for Socket {
unsafe fn from_raw_fd(fd: RawFd) -> Self {
Socket(fd)
}
}
impl Drop for Socket {
fn drop(&mut self) {
unsafe { libc::close(self.0) };
}
}
impl Socket {
/// Open a new socket for the given netlink subsystem. `protocol` must be one of the
/// [`netlink_sys::protocols`][protos] constants.
///
/// [protos]: crate::protocols
pub fn new(protocol: isize) -> Result<Self> {
let res = unsafe {
libc::socket(
libc::PF_NETLINK,
libc::SOCK_DGRAM | libc::SOCK_CLOEXEC,
protocol as libc::c_int,
)
};
if res < 0 {
return Err(Error::last_os_error());
}
Ok(Socket(res))
}
/// Bind the socket to the given address
pub fn bind(&mut self, addr: &SocketAddr) -> Result<()> {
let (addr_ptr, addr_len) = addr.as_raw();
let res = unsafe { libc::bind(self.0, addr_ptr, addr_len) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(())
}
/// Bind the socket to an address assigned by the kernel, and return that address.
pub fn bind_auto(&mut self) -> Result<SocketAddr> {
let mut addr = SocketAddr::new(0, 0);
self.bind(&addr)?;
self.get_address(&mut addr)?;
Ok(addr)
}
/// Get the socket address
pub fn get_address(&self, addr: &mut SocketAddr) -> Result<()> {
let (addr_ptr, mut addr_len) = addr.as_raw_mut();
let addr_len_copy = addr_len;
let addr_len_ptr = &mut addr_len as *mut libc::socklen_t;
let res = unsafe { libc::getsockname(self.0, addr_ptr, addr_len_ptr) };
if res < 0 {
return Err(Error::last_os_error());
}
assert_eq!(addr_len, addr_len_copy);
Ok(())
}
// when building with --features smol we don't need this
#[allow(dead_code)]
/// Make this socket non-blocking
pub fn set_non_blocking(&self, non_blocking: bool) -> Result<()> {
let mut non_blocking = non_blocking as libc::c_int;
let res = unsafe { libc::ioctl(self.0, libc::FIONBIO, &mut non_blocking) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(())
}
/// Connect the socket to the given address. Netlink is a connection-less protocol, so a socket can communicate with
/// multiple peers with the [`Socket::send_to`] and [`Socket::recv_from`] methods. However, if the socket only needs
/// to communicate with one peer, it is convenient not to have to bother with the peer address. This is what
/// `connect` is for. After calling `connect`, [`Socket::send`] and [`Socket::recv`] respectively send and receive
/// datagrams to and from `remote_addr`.
///
/// # Examples
///
/// In this example we:
///
/// 1. open a socket
/// 2. connect it to the kernel with [`Socket::connect`]
/// 3. send a request to the kernel with [`Socket::send`]
/// 4. read the response (which can span over several messages) [`Socket::recv`]
///
/// ```rust
/// use netlink_sys::{protocols::NETLINK_ROUTE, Socket, SocketAddr};
/// use std::process;
///
/// let mut socket = Socket::new(NETLINK_ROUTE).unwrap();
/// let _ = socket.bind_auto().unwrap();
/// let kernel_addr = SocketAddr::new(0, 0);
/// socket.connect(&kernel_addr).unwrap();
/// // This is a valid message for listing the network links on the system
/// let msg = vec![
/// 0x14, 0x00, 0x00, 0x00, 0x12, 0x00, 0x01, 0x03, 0xfd, 0xfe, 0x38, 0x5c, 0x00, 0x00, 0x00,
/// 0x00, 0x00, 0x00, 0x00, 0x00,
/// ];
/// let n_sent = socket.send(&msg[..], 0).unwrap();
/// assert_eq!(n_sent, msg.len());
/// // buffer for receiving the response
/// let mut buf = vec![0; 4096];
/// loop {
/// let mut n_received = socket.recv(&mut &mut buf[..], 0).unwrap();
/// println!("received {:?}", &buf[..n_received]);
/// if buf[4] == 2 && buf[5] == 0 {
/// println!("the kernel responded with an error");
/// return;
/// }
/// if buf[4] == 3 && buf[5] == 0 {
/// println!("end of dump");
/// return;
/// }
/// }
/// ```
pub fn connect(&self, remote_addr: &SocketAddr) -> Result<()> {
// FIXME:
//
// Event though for SOCK_DGRAM sockets there's no IO, if our socket is non-blocking,
// connect() might return EINPROGRESS. In theory, the right way to treat EINPROGRESS would
// be to ignore the error, and let the user poll the socket to check when it becomes
// writable, indicating that the connection succeeded. The code already exists in mio for
// TcpStream:
//
// > pub fn connect(stream: net::TcpStream, addr: &SocketAddr) -> io::Result<TcpStream> {
// > set_non_block(stream.as_raw_fd())?;
// > match stream.connect(addr) {
// > Ok(..) => {}
// > Err(ref e) if e.raw_os_error() == Some(libc::EINPROGRESS) => {}
// > Err(e) => return Err(e),
// > }
// > Ok(TcpStream { inner: stream })
// > }
//
// In practice, since the connection does not require any IO for SOCK_DGRAM sockets, it
// almost never returns EINPROGRESS and so for now, we just return whatever libc::connect
// returns. If it returns EINPROGRESS, the caller will have to handle the error themself
//
// Refs:
//
// - https://stackoverflow.com/a/14046386/1836144
// - https://lists.isc.org/pipermail/bind-users/2009-August/077527.html
let (addr, addr_len) = remote_addr.as_raw();
let res = unsafe { libc::connect(self.0, addr, addr_len) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(())
}
// Most of the comments in this method come from a discussion on rust users forum.
// [thread]: https://users.rust-lang.org/t/help-understanding-libc-call/17308/9
//
/// Read a datagram from the socket and return the number of bytes that have been read and the address of the
/// sender. The data being read is copied into `buf`. If `buf` is too small, the datagram is truncated. The
/// supported flags are the `MSG_*` described in `man 2 recvmsg`
///
/// # Warning
///
/// In datagram oriented protocols, `recv` and `recvfrom` receive normally only ONE datagram, but this seems not to
/// be always true for netlink sockets: with some protocols like `NETLINK_AUDIT`, multiple netlink packets can be
/// read with a single call.
pub fn recv_from<B>(&self, buf: &mut B, flags: libc::c_int) -> Result<(usize, SocketAddr)>
where
B: bytes::BufMut,
{
// Create an empty storage for the address. Note that Rust standard library create a
// sockaddr_storage so that it works for any address family, but here, we already know that
// we'll have a Netlink address, so we can create the appropriate storage.
let mut addr = unsafe { mem::zeroed::<libc::sockaddr_nl>() };
// recvfrom takes a *sockaddr as parameter so that it can accept any kind of address
// storage, so we need to create such a pointer for the sockaddr_nl we just initialized.
//
// Create a raw pointer to Cast our raw pointer to a
// our storage. We cannot generic pointer to *sockaddr
// pass it to recvfrom yet. that recvfrom can use
// ^ ^
// | |
// +--------------+---------------+ +---------+--------+
// / \ / \
let addr_ptr = &mut addr as *mut libc::sockaddr_nl as *mut libc::sockaddr;
// Why do we need to pass the address length? We're passing a generic *sockaddr to
// recvfrom. Somehow recvfrom needs to make sure that the address of the received packet
// would fit into the actual type that is behind *sockaddr: it could be a sockaddr_nl but | // a pointer to it.
let addrlen_ptr = &mut addrlen as *mut usize as *mut libc::socklen_t;
let chunk = buf.chunk_mut();
// Cast the *mut u8 into *mut void.
// This is equivalent to casting a *char into *void
// See [thread]
// ^
// Create a *mut u8 |
// ^ |
// | |
// +------+-------+ +--------+-------+
// / \ / \
let buf_ptr = chunk.as_mut_ptr() as *mut libc::c_void;
let buf_len = chunk.len() as libc::size_t;
let res = unsafe { libc::recvfrom(self.0, buf_ptr, buf_len, flags, addr_ptr, addrlen_ptr) };
if res < 0 {
return Err(Error::last_os_error());
} else {
// with `MSG_TRUNC` `res` might exceed `buf_len`
let written = std::cmp::min(buf_len, res as usize);
unsafe {
buf.advance_mut(written);
}
}
Ok((res as usize, SocketAddr(addr)))
}
/// For a connected socket, `recv` reads a datagram from the socket. The sender is the remote peer the socket is
/// connected to (see [`Socket::connect`]). See also [`Socket::recv_from`]
pub fn recv<B>(&self, buf: &mut B, flags: libc::c_int) -> Result<usize>
where
B: bytes::BufMut,
{
let chunk = buf.chunk_mut();
let buf_ptr = chunk.as_mut_ptr() as *mut libc::c_void;
let buf_len = chunk.len() as libc::size_t;
let res = unsafe { libc::recv(self.0, buf_ptr, buf_len, flags) };
if res < 0 {
return Err(Error::last_os_error());
} else {
// with `MSG_TRUNC` `res` might exceed `buf_len`
let written = std::cmp::min(buf_len, res as usize);
unsafe {
buf.advance_mut(written);
}
}
Ok(res as usize)
}
/// Receive a full message. Unlike [`Socket::recv_from`], which truncates messages that exceed the length of the
/// buffer passed as argument, this method always reads a whole message, no matter its size.
pub fn recv_from_full(&self) -> Result<(Vec<u8>, SocketAddr)> {
// Peek
let mut buf: Vec<u8> = Vec::new();
let (peek_len, _) = self.recv_from(&mut buf, libc::MSG_PEEK | libc::MSG_TRUNC)?;
// Receive
buf.clear();
buf.reserve(peek_len);
let (rlen, addr) = self.recv_from(&mut buf, 0)?;
assert_eq!(rlen, peek_len);
Ok((buf, addr))
}
/// Send the given buffer `buf` to the remote peer with address `addr`. The supported flags are the `MSG_*` values
/// documented in `man 2 send`.
pub fn send_to(&self, buf: &[u8], addr: &SocketAddr, flags: libc::c_int) -> Result<usize> {
let (addr_ptr, addr_len) = addr.as_raw();
let buf_ptr = buf.as_ptr() as *const libc::c_void;
let buf_len = buf.len() as libc::size_t;
let res = unsafe { libc::sendto(self.0, buf_ptr, buf_len, flags, addr_ptr, addr_len) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(res as usize)
}
/// For a connected socket, `send` sends the given buffer `buf` to the remote peer the socket is connected to. See
/// also [`Socket::connect`] and [`Socket::send_to`].
pub fn send(&self, buf: &[u8], flags: libc::c_int) -> Result<usize> {
let buf_ptr = buf.as_ptr() as *const libc::c_void;
let buf_len = buf.len() as libc::size_t;
let res = unsafe { libc::send(self.0, buf_ptr, buf_len, flags) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(res as usize)
}
pub fn set_pktinfo(&mut self, value: bool) -> Result<()> {
let value: libc::c_int = if value { 1 } else { 0 };
setsockopt(self.0, libc::SOL_NETLINK, libc::NETLINK_PKTINFO, value)
}
pub fn get_pktinfo(&self) -> Result<bool> {
let res = getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_PKTINFO)?;
Ok(res == 1)
}
pub fn add_membership(&mut self, group: u32) -> Result<()> {
setsockopt(
self.0,
libc::SOL_NETLINK,
libc::NETLINK_ADD_MEMBERSHIP,
group,
)
}
pub fn drop_membership(&mut self, group: u32) -> Result<()> {
setsockopt(
self.0,
libc::SOL_NETLINK,
libc::NETLINK_DROP_MEMBERSHIP,
group,
)
}
// pub fn list_membership(&self) -> Vec<u32> {
// unimplemented!();
// // getsockopt won't be enough here, because we may need to perform 2 calls, and because the
// // length of the list returned by libc::getsockopt is returned by mutating the length
// // argument, which our implementation of getsockopt forbids.
// }
/// `NETLINK_BROADCAST_ERROR` (since Linux 2.6.30). When not set, `netlink_broadcast()` only
/// reports `ESRCH` errors and silently ignore `NOBUFS` errors.
pub fn set_broadcast_error(&mut self, value: bool) -> Result<()> {
let value: libc::c_int = if value { 1 } else { 0 };
setsockopt(
self.0,
libc::SOL_NETLINK,
libc::NETLINK_BROADCAST_ERROR,
value,
)
}
pub fn get_broadcast_error(&self) -> Result<bool> {
let res =
getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_BROADCAST_ERROR)?;
Ok(res == 1)
}
/// `NETLINK_NO_ENOBUFS` (since Linux 2.6.30). This flag can be used by unicast and broadcast
/// listeners to avoid receiving `ENOBUFS` errors.
pub fn set_no_enobufs(&mut self, value: bool) -> Result<()> {
let value: libc::c_int = if value { 1 } else { 0 };
setsockopt(self.0, libc::SOL_NETLINK, libc::NETLINK_NO_ENOBUFS, value)
}
pub fn get_no_enobufs(&self) -> Result<bool> {
let res = getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_NO_ENOBUFS)?;
Ok(res == 1)
}
/// `NETLINK_LISTEN_ALL_NSID` (since Linux 4.2). When set, this socket will receive netlink
/// notifications from all network namespaces that have an nsid assigned into the network
/// namespace where the socket has been opened. The nsid is sent to user space via an ancillary
/// data.
pub fn set_listen_all_namespaces(&mut self, value: bool) -> Result<()> {
let value: libc::c_int = if value { 1 } else { 0 };
setsockopt(
self.0,
libc::SOL_NETLINK,
libc::NETLINK_LISTEN_ALL_NSID,
value,
)
}
pub fn get_listen_all_namespaces(&self) -> Result<bool> {
let res =
getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_LISTEN_ALL_NSID)?;
Ok(res == 1)
}
/// `NETLINK_CAP_ACK` (since Linux 4.2). The kernel may fail to allocate the necessary room
/// for the acknowledgment message back to user space. This option trims off the payload of
/// the original netlink message. The netlink message header is still included, so the user can
/// guess from the sequence number which message triggered the acknowledgment.
pub fn set_cap_ack(&mut self, value: bool) -> Result<()> {
let value: libc::c_int = if value { 1 } else { 0 };
setsockopt(self.0, libc::SOL_NETLINK, libc::NETLINK_CAP_ACK, value)
}
pub fn get_cap_ack(&self) -> Result<bool> {
let res = getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_CAP_ACK)?;
Ok(res == 1)
}
}
/// Wrapper around `getsockopt`:
///
/// ```no_rust
/// int getsockopt(int socket, int level, int option_name, void *restrict option_value, socklen_t *restrict option_len);
/// ```
pub(crate) fn getsockopt<T: Copy>(fd: RawFd, level: libc::c_int, option: libc::c_int) -> Result<T> {
// Create storage for the options we're fetching
let mut slot: T = unsafe { mem::zeroed() };
// Create a mutable raw pointer to the storage so that getsockopt can fill the value
let slot_ptr = &mut slot as *mut T as *mut libc::c_void;
// Let getsockopt know how big our storage is
let mut slot_len = mem::size_of::<T>() as libc::socklen_t;
// getsockopt takes a mutable pointer to the length, because for some options like
// NETLINK_LIST_MEMBERSHIP where the option value is a list with arbitrary length,
// getsockopt uses this parameter to signal how big the storage needs to be.
let slot_len_ptr = &mut slot_len as *mut libc::socklen_t;
let res = unsafe { libc::getsockopt(fd, level, option, slot_ptr, slot_len_ptr) };
if res < 0 {
return Err(Error::last_os_error());
}
// Ignore the options that require the legnth to be set by getsockopt.
// We'll deal with them individually.
assert_eq!(slot_len as usize, mem::size_of::<T>());
Ok(slot)
}
// adapted from rust standard library
fn setsockopt<T>(fd: RawFd, level: libc::c_int, option: libc::c_int, payload: T) -> Result<()> {
let payload = &payload as *const T as *const libc::c_void;
let payload_len = mem::size_of::<T>() as libc::socklen_t;
let res = unsafe { libc::setsockopt(fd, level, option, payload, payload_len) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use crate::protocols::NETLINK_ROUTE;
#[test]
fn new() {
Socket::new(NETLINK_ROUTE).unwrap();
}
#[test]
fn connect() {
let sock = Socket::new(NETLINK_ROUTE).unwrap();
sock.connect(&SocketAddr::new(0, 0)).unwrap();
}
#[test]
fn bind() {
let mut sock = Socket::new(NETLINK_ROUTE).unwrap();
sock.bind(&SocketAddr::new(4321, 0)).unwrap();
}
#[test]
fn bind_auto() {
let mut sock = Socket::new(NETLINK_ROUTE).unwrap();
let addr = sock.bind_auto().unwrap();
// make sure that the address we got from the kernel is there
assert!(addr.port_number() != 0);
}
#[test]
fn set_non_blocking() {
let sock = Socket::new(NETLINK_ROUTE).unwrap();
sock.set_non_blocking(true).unwrap();
sock.set_non_blocking(false).unwrap();
}
#[test]
fn options() {
let mut sock = Socket::new(NETLINK_ROUTE).unwrap();
sock.set_cap_ack(true).unwrap();
assert!(sock.get_cap_ack().unwrap());
sock.set_cap_ack(false).unwrap();
assert!(!sock.get_cap_ack().unwrap());
sock.set_no_enobufs(true).unwrap();
assert!(sock.get_no_enobufs().unwrap());
sock.set_no_enobufs(false).unwrap();
assert!(!sock.get_no_enobufs().unwrap());
sock.set_broadcast_error(true).unwrap();
assert!(sock.get_broadcast_error().unwrap());
sock.set_broadcast_error(false).unwrap();
assert!(!sock.get_broadcast_error().unwrap());
// FIXME: these require root permissions
// sock.set_listen_all_namespaces(true).unwrap();
// assert!(sock.get_listen_all_namespaces().unwrap());
// sock.set_listen_all_namespaces(false).unwrap();
// assert!(!sock.get_listen_all_namespaces().unwrap());
}
} | // also a sockaddr_in, a sockaddr_in6, or even the generic sockaddr_storage that can store
// any address.
let mut addrlen = mem::size_of_val(&addr);
// recvfrom does not take the address length by value (see [thread]), so we need to create | random_line_split |
socket.rs | // SPDX-License-Identifier: MIT
use std::{
io::{Error, Result},
mem,
os::unix::io::{AsRawFd, FromRawFd, RawFd},
};
use crate::SocketAddr;
/// A netlink socket.
///
/// # Example
///
/// In this example we:
///
/// 1. open a new socket
/// 2. send a message to the kernel
/// 3. read the reponse
///
/// ```rust
/// use netlink_sys::{protocols::NETLINK_ROUTE, Socket, SocketAddr};
/// use std::process;
///
/// // open a new socket for the NETLINK_ROUTE subsystem (see "man 7 rtnetlink")
/// let mut socket = Socket::new(NETLINK_ROUTE).unwrap();
/// // address of the remote peer we'll send a message to. This particular address is for the kernel
/// let kernel_addr = SocketAddr::new(0, 0);
/// // this is a valid message for listing the network links on the system
/// let pkt = vec![
/// 0x14, 0x00, 0x00, 0x00, 0x12, 0x00, 0x01, 0x03, 0xfd, 0xfe, 0x38, 0x5c, 0x00, 0x00, 0x00,
/// 0x00, 0x00, 0x00, 0x00, 0x00,
/// ];
/// // send the message to the kernel
/// let n_sent = socket.send_to(&pkt[..], &kernel_addr, 0).unwrap();
/// assert_eq!(n_sent, pkt.len());
/// // buffer for receiving the response
/// let mut buf = vec![0; 4096];
/// loop {
/// // receive a datagram
/// let (n_received, sender_addr) = socket.recv_from(&mut &mut buf[..], 0).unwrap();
/// assert_eq!(sender_addr, kernel_addr);
/// println!("received datagram {:?}", &buf[..n_received]);
/// if buf[4] == 2 && buf[5] == 0 {
/// println!("the kernel responded with an error");
/// return;
/// }
/// if buf[4] == 3 && buf[5] == 0 {
/// println!("end of dump");
/// return;
/// }
/// }
/// ```
#[derive(Clone, Debug)]
pub struct Socket(RawFd);
impl AsRawFd for Socket {
fn as_raw_fd(&self) -> RawFd {
self.0
}
}
impl FromRawFd for Socket {
unsafe fn from_raw_fd(fd: RawFd) -> Self {
Socket(fd)
}
}
impl Drop for Socket {
fn drop(&mut self) {
unsafe { libc::close(self.0) };
}
}
impl Socket {
/// Open a new socket for the given netlink subsystem. `protocol` must be one of the
/// [`netlink_sys::protocols`][protos] constants.
///
/// [protos]: crate::protocols
pub fn new(protocol: isize) -> Result<Self> {
let res = unsafe {
libc::socket(
libc::PF_NETLINK,
libc::SOCK_DGRAM | libc::SOCK_CLOEXEC,
protocol as libc::c_int,
)
};
if res < 0 {
return Err(Error::last_os_error());
}
Ok(Socket(res))
}
/// Bind the socket to the given address
pub fn bind(&mut self, addr: &SocketAddr) -> Result<()> {
let (addr_ptr, addr_len) = addr.as_raw();
let res = unsafe { libc::bind(self.0, addr_ptr, addr_len) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(())
}
/// Bind the socket to an address assigned by the kernel, and return that address.
pub fn bind_auto(&mut self) -> Result<SocketAddr> {
let mut addr = SocketAddr::new(0, 0);
self.bind(&addr)?;
self.get_address(&mut addr)?;
Ok(addr)
}
/// Get the socket address
pub fn get_address(&self, addr: &mut SocketAddr) -> Result<()> {
let (addr_ptr, mut addr_len) = addr.as_raw_mut();
let addr_len_copy = addr_len;
let addr_len_ptr = &mut addr_len as *mut libc::socklen_t;
let res = unsafe { libc::getsockname(self.0, addr_ptr, addr_len_ptr) };
if res < 0 {
return Err(Error::last_os_error());
}
assert_eq!(addr_len, addr_len_copy);
Ok(())
}
// when building with --features smol we don't need this
#[allow(dead_code)]
/// Make this socket non-blocking
pub fn set_non_blocking(&self, non_blocking: bool) -> Result<()> {
let mut non_blocking = non_blocking as libc::c_int;
let res = unsafe { libc::ioctl(self.0, libc::FIONBIO, &mut non_blocking) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(())
}
/// Connect the socket to the given address. Netlink is a connection-less protocol, so a socket can communicate with
/// multiple peers with the [`Socket::send_to`] and [`Socket::recv_from`] methods. However, if the socket only needs
/// to communicate with one peer, it is convenient not to have to bother with the peer address. This is what
/// `connect` is for. After calling `connect`, [`Socket::send`] and [`Socket::recv`] respectively send and receive
/// datagrams to and from `remote_addr`.
///
/// # Examples
///
/// In this example we:
///
/// 1. open a socket
/// 2. connect it to the kernel with [`Socket::connect`]
/// 3. send a request to the kernel with [`Socket::send`]
/// 4. read the response (which can span over several messages) [`Socket::recv`]
///
/// ```rust
/// use netlink_sys::{protocols::NETLINK_ROUTE, Socket, SocketAddr};
/// use std::process;
///
/// let mut socket = Socket::new(NETLINK_ROUTE).unwrap();
/// let _ = socket.bind_auto().unwrap();
/// let kernel_addr = SocketAddr::new(0, 0);
/// socket.connect(&kernel_addr).unwrap();
/// // This is a valid message for listing the network links on the system
/// let msg = vec![
/// 0x14, 0x00, 0x00, 0x00, 0x12, 0x00, 0x01, 0x03, 0xfd, 0xfe, 0x38, 0x5c, 0x00, 0x00, 0x00,
/// 0x00, 0x00, 0x00, 0x00, 0x00,
/// ];
/// let n_sent = socket.send(&msg[..], 0).unwrap();
/// assert_eq!(n_sent, msg.len());
/// // buffer for receiving the response
/// let mut buf = vec![0; 4096];
/// loop {
/// let mut n_received = socket.recv(&mut &mut buf[..], 0).unwrap();
/// println!("received {:?}", &buf[..n_received]);
/// if buf[4] == 2 && buf[5] == 0 {
/// println!("the kernel responded with an error");
/// return;
/// }
/// if buf[4] == 3 && buf[5] == 0 {
/// println!("end of dump");
/// return;
/// }
/// }
/// ```
pub fn connect(&self, remote_addr: &SocketAddr) -> Result<()> |
// Most of the comments in this method come from a discussion on rust users forum.
// [thread]: https://users.rust-lang.org/t/help-understanding-libc-call/17308/9
//
/// Read a datagram from the socket and return the number of bytes that have been read and the address of the
/// sender. The data being read is copied into `buf`. If `buf` is too small, the datagram is truncated. The
/// supported flags are the `MSG_*` described in `man 2 recvmsg`
///
/// # Warning
///
/// In datagram oriented protocols, `recv` and `recvfrom` receive normally only ONE datagram, but this seems not to
/// be always true for netlink sockets: with some protocols like `NETLINK_AUDIT`, multiple netlink packets can be
/// read with a single call.
pub fn recv_from<B>(&self, buf: &mut B, flags: libc::c_int) -> Result<(usize, SocketAddr)>
where
B: bytes::BufMut,
{
// Create an empty storage for the address. Note that Rust standard library create a
// sockaddr_storage so that it works for any address family, but here, we already know that
// we'll have a Netlink address, so we can create the appropriate storage.
let mut addr = unsafe { mem::zeroed::<libc::sockaddr_nl>() };
// recvfrom takes a *sockaddr as parameter so that it can accept any kind of address
// storage, so we need to create such a pointer for the sockaddr_nl we just initialized.
//
// Create a raw pointer to Cast our raw pointer to a
// our storage. We cannot generic pointer to *sockaddr
// pass it to recvfrom yet. that recvfrom can use
// ^ ^
// | |
// +--------------+---------------+ +---------+--------+
// / \ / \
let addr_ptr = &mut addr as *mut libc::sockaddr_nl as *mut libc::sockaddr;
// Why do we need to pass the address length? We're passing a generic *sockaddr to
// recvfrom. Somehow recvfrom needs to make sure that the address of the received packet
// would fit into the actual type that is behind *sockaddr: it could be a sockaddr_nl but
// also a sockaddr_in, a sockaddr_in6, or even the generic sockaddr_storage that can store
// any address.
let mut addrlen = mem::size_of_val(&addr);
// recvfrom does not take the address length by value (see [thread]), so we need to create
// a pointer to it.
let addrlen_ptr = &mut addrlen as *mut usize as *mut libc::socklen_t;
let chunk = buf.chunk_mut();
// Cast the *mut u8 into *mut void.
// This is equivalent to casting a *char into *void
// See [thread]
// ^
// Create a *mut u8 |
// ^ |
// | |
// +------+-------+ +--------+-------+
// / \ / \
let buf_ptr = chunk.as_mut_ptr() as *mut libc::c_void;
let buf_len = chunk.len() as libc::size_t;
let res = unsafe { libc::recvfrom(self.0, buf_ptr, buf_len, flags, addr_ptr, addrlen_ptr) };
if res < 0 {
return Err(Error::last_os_error());
} else {
// with `MSG_TRUNC` `res` might exceed `buf_len`
let written = std::cmp::min(buf_len, res as usize);
unsafe {
buf.advance_mut(written);
}
}
Ok((res as usize, SocketAddr(addr)))
}
/// For a connected socket, `recv` reads a datagram from the socket. The sender is the remote peer the socket is
/// connected to (see [`Socket::connect`]). See also [`Socket::recv_from`]
pub fn recv<B>(&self, buf: &mut B, flags: libc::c_int) -> Result<usize>
where
B: bytes::BufMut,
{
let chunk = buf.chunk_mut();
let buf_ptr = chunk.as_mut_ptr() as *mut libc::c_void;
let buf_len = chunk.len() as libc::size_t;
let res = unsafe { libc::recv(self.0, buf_ptr, buf_len, flags) };
if res < 0 {
return Err(Error::last_os_error());
} else {
// with `MSG_TRUNC` `res` might exceed `buf_len`
let written = std::cmp::min(buf_len, res as usize);
unsafe {
buf.advance_mut(written);
}
}
Ok(res as usize)
}
/// Receive a full message. Unlike [`Socket::recv_from`], which truncates messages that exceed the length of the
/// buffer passed as argument, this method always reads a whole message, no matter its size.
pub fn recv_from_full(&self) -> Result<(Vec<u8>, SocketAddr)> {
// Peek
let mut buf: Vec<u8> = Vec::new();
let (peek_len, _) = self.recv_from(&mut buf, libc::MSG_PEEK | libc::MSG_TRUNC)?;
// Receive
buf.clear();
buf.reserve(peek_len);
let (rlen, addr) = self.recv_from(&mut buf, 0)?;
assert_eq!(rlen, peek_len);
Ok((buf, addr))
}
/// Send the given buffer `buf` to the remote peer with address `addr`. The supported flags are the `MSG_*` values
/// documented in `man 2 send`.
pub fn send_to(&self, buf: &[u8], addr: &SocketAddr, flags: libc::c_int) -> Result<usize> {
let (addr_ptr, addr_len) = addr.as_raw();
let buf_ptr = buf.as_ptr() as *const libc::c_void;
let buf_len = buf.len() as libc::size_t;
let res = unsafe { libc::sendto(self.0, buf_ptr, buf_len, flags, addr_ptr, addr_len) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(res as usize)
}
/// For a connected socket, `send` sends the given buffer `buf` to the remote peer the socket is connected to. See
/// also [`Socket::connect`] and [`Socket::send_to`].
pub fn send(&self, buf: &[u8], flags: libc::c_int) -> Result<usize> {
let buf_ptr = buf.as_ptr() as *const libc::c_void;
let buf_len = buf.len() as libc::size_t;
let res = unsafe { libc::send(self.0, buf_ptr, buf_len, flags) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(res as usize)
}
pub fn set_pktinfo(&mut self, value: bool) -> Result<()> {
let value: libc::c_int = if value { 1 } else { 0 };
setsockopt(self.0, libc::SOL_NETLINK, libc::NETLINK_PKTINFO, value)
}
pub fn get_pktinfo(&self) -> Result<bool> {
let res = getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_PKTINFO)?;
Ok(res == 1)
}
pub fn add_membership(&mut self, group: u32) -> Result<()> {
setsockopt(
self.0,
libc::SOL_NETLINK,
libc::NETLINK_ADD_MEMBERSHIP,
group,
)
}
pub fn drop_membership(&mut self, group: u32) -> Result<()> {
setsockopt(
self.0,
libc::SOL_NETLINK,
libc::NETLINK_DROP_MEMBERSHIP,
group,
)
}
// pub fn list_membership(&self) -> Vec<u32> {
// unimplemented!();
// // getsockopt won't be enough here, because we may need to perform 2 calls, and because the
// // length of the list returned by libc::getsockopt is returned by mutating the length
// // argument, which our implementation of getsockopt forbids.
// }
/// `NETLINK_BROADCAST_ERROR` (since Linux 2.6.30). When not set, `netlink_broadcast()` only
/// reports `ESRCH` errors and silently ignore `NOBUFS` errors.
pub fn set_broadcast_error(&mut self, value: bool) -> Result<()> {
let value: libc::c_int = if value { 1 } else { 0 };
setsockopt(
self.0,
libc::SOL_NETLINK,
libc::NETLINK_BROADCAST_ERROR,
value,
)
}
pub fn get_broadcast_error(&self) -> Result<bool> {
let res =
getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_BROADCAST_ERROR)?;
Ok(res == 1)
}
/// `NETLINK_NO_ENOBUFS` (since Linux 2.6.30). This flag can be used by unicast and broadcast
/// listeners to avoid receiving `ENOBUFS` errors.
pub fn set_no_enobufs(&mut self, value: bool) -> Result<()> {
let value: libc::c_int = if value { 1 } else { 0 };
setsockopt(self.0, libc::SOL_NETLINK, libc::NETLINK_NO_ENOBUFS, value)
}
pub fn get_no_enobufs(&self) -> Result<bool> {
let res = getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_NO_ENOBUFS)?;
Ok(res == 1)
}
/// `NETLINK_LISTEN_ALL_NSID` (since Linux 4.2). When set, this socket will receive netlink
/// notifications from all network namespaces that have an nsid assigned into the network
/// namespace where the socket has been opened. The nsid is sent to user space via an ancillary
/// data.
pub fn set_listen_all_namespaces(&mut self, value: bool) -> Result<()> {
let value: libc::c_int = if value { 1 } else { 0 };
setsockopt(
self.0,
libc::SOL_NETLINK,
libc::NETLINK_LISTEN_ALL_NSID,
value,
)
}
pub fn get_listen_all_namespaces(&self) -> Result<bool> {
let res =
getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_LISTEN_ALL_NSID)?;
Ok(res == 1)
}
/// `NETLINK_CAP_ACK` (since Linux 4.2). The kernel may fail to allocate the necessary room
/// for the acknowledgment message back to user space. This option trims off the payload of
/// the original netlink message. The netlink message header is still included, so the user can
/// guess from the sequence number which message triggered the acknowledgment.
pub fn set_cap_ack(&mut self, value: bool) -> Result<()> {
let value: libc::c_int = if value { 1 } else { 0 };
setsockopt(self.0, libc::SOL_NETLINK, libc::NETLINK_CAP_ACK, value)
}
pub fn get_cap_ack(&self) -> Result<bool> {
let res = getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_CAP_ACK)?;
Ok(res == 1)
}
}
/// Wrapper around `getsockopt`:
///
/// ```no_rust
/// int getsockopt(int socket, int level, int option_name, void *restrict option_value, socklen_t *restrict option_len);
/// ```
pub(crate) fn getsockopt<T: Copy>(fd: RawFd, level: libc::c_int, option: libc::c_int) -> Result<T> {
// Create storage for the options we're fetching
let mut slot: T = unsafe { mem::zeroed() };
// Create a mutable raw pointer to the storage so that getsockopt can fill the value
let slot_ptr = &mut slot as *mut T as *mut libc::c_void;
// Let getsockopt know how big our storage is
let mut slot_len = mem::size_of::<T>() as libc::socklen_t;
// getsockopt takes a mutable pointer to the length, because for some options like
// NETLINK_LIST_MEMBERSHIP where the option value is a list with arbitrary length,
// getsockopt uses this parameter to signal how big the storage needs to be.
let slot_len_ptr = &mut slot_len as *mut libc::socklen_t;
let res = unsafe { libc::getsockopt(fd, level, option, slot_ptr, slot_len_ptr) };
if res < 0 {
return Err(Error::last_os_error());
}
// Ignore the options that require the legnth to be set by getsockopt.
// We'll deal with them individually.
assert_eq!(slot_len as usize, mem::size_of::<T>());
Ok(slot)
}
// adapted from rust standard library
fn setsockopt<T>(fd: RawFd, level: libc::c_int, option: libc::c_int, payload: T) -> Result<()> {
let payload = &payload as *const T as *const libc::c_void;
let payload_len = mem::size_of::<T>() as libc::socklen_t;
let res = unsafe { libc::setsockopt(fd, level, option, payload, payload_len) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use crate::protocols::NETLINK_ROUTE;
#[test]
fn new() {
Socket::new(NETLINK_ROUTE).unwrap();
}
#[test]
fn connect() {
let sock = Socket::new(NETLINK_ROUTE).unwrap();
sock.connect(&SocketAddr::new(0, 0)).unwrap();
}
#[test]
fn bind() {
let mut sock = Socket::new(NETLINK_ROUTE).unwrap();
sock.bind(&SocketAddr::new(4321, 0)).unwrap();
}
#[test]
fn bind_auto() {
let mut sock = Socket::new(NETLINK_ROUTE).unwrap();
let addr = sock.bind_auto().unwrap();
// make sure that the address we got from the kernel is there
assert!(addr.port_number() != 0);
}
#[test]
fn set_non_blocking() {
let sock = Socket::new(NETLINK_ROUTE).unwrap();
sock.set_non_blocking(true).unwrap();
sock.set_non_blocking(false).unwrap();
}
#[test]
fn options() {
let mut sock = Socket::new(NETLINK_ROUTE).unwrap();
sock.set_cap_ack(true).unwrap();
assert!(sock.get_cap_ack().unwrap());
sock.set_cap_ack(false).unwrap();
assert!(!sock.get_cap_ack().unwrap());
sock.set_no_enobufs(true).unwrap();
assert!(sock.get_no_enobufs().unwrap());
sock.set_no_enobufs(false).unwrap();
assert!(!sock.get_no_enobufs().unwrap());
sock.set_broadcast_error(true).unwrap();
assert!(sock.get_broadcast_error().unwrap());
sock.set_broadcast_error(false).unwrap();
assert!(!sock.get_broadcast_error().unwrap());
// FIXME: these require root permissions
// sock.set_listen_all_namespaces(true).unwrap();
// assert!(sock.get_listen_all_namespaces().unwrap());
// sock.set_listen_all_namespaces(false).unwrap();
// assert!(!sock.get_listen_all_namespaces().unwrap());
}
}
| {
// FIXME:
//
// Event though for SOCK_DGRAM sockets there's no IO, if our socket is non-blocking,
// connect() might return EINPROGRESS. In theory, the right way to treat EINPROGRESS would
// be to ignore the error, and let the user poll the socket to check when it becomes
// writable, indicating that the connection succeeded. The code already exists in mio for
// TcpStream:
//
// > pub fn connect(stream: net::TcpStream, addr: &SocketAddr) -> io::Result<TcpStream> {
// > set_non_block(stream.as_raw_fd())?;
// > match stream.connect(addr) {
// > Ok(..) => {}
// > Err(ref e) if e.raw_os_error() == Some(libc::EINPROGRESS) => {}
// > Err(e) => return Err(e),
// > }
// > Ok(TcpStream { inner: stream })
// > }
//
// In practice, since the connection does not require any IO for SOCK_DGRAM sockets, it
// almost never returns EINPROGRESS and so for now, we just return whatever libc::connect
// returns. If it returns EINPROGRESS, the caller will have to handle the error themself
//
// Refs:
//
// - https://stackoverflow.com/a/14046386/1836144
// - https://lists.isc.org/pipermail/bind-users/2009-August/077527.html
let (addr, addr_len) = remote_addr.as_raw();
let res = unsafe { libc::connect(self.0, addr, addr_len) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(())
} | identifier_body |
socket.rs | // SPDX-License-Identifier: MIT
use std::{
io::{Error, Result},
mem,
os::unix::io::{AsRawFd, FromRawFd, RawFd},
};
use crate::SocketAddr;
/// A netlink socket.
///
/// # Example
///
/// In this example we:
///
/// 1. open a new socket
/// 2. send a message to the kernel
/// 3. read the reponse
///
/// ```rust
/// use netlink_sys::{protocols::NETLINK_ROUTE, Socket, SocketAddr};
/// use std::process;
///
/// // open a new socket for the NETLINK_ROUTE subsystem (see "man 7 rtnetlink")
/// let mut socket = Socket::new(NETLINK_ROUTE).unwrap();
/// // address of the remote peer we'll send a message to. This particular address is for the kernel
/// let kernel_addr = SocketAddr::new(0, 0);
/// // this is a valid message for listing the network links on the system
/// let pkt = vec![
/// 0x14, 0x00, 0x00, 0x00, 0x12, 0x00, 0x01, 0x03, 0xfd, 0xfe, 0x38, 0x5c, 0x00, 0x00, 0x00,
/// 0x00, 0x00, 0x00, 0x00, 0x00,
/// ];
/// // send the message to the kernel
/// let n_sent = socket.send_to(&pkt[..], &kernel_addr, 0).unwrap();
/// assert_eq!(n_sent, pkt.len());
/// // buffer for receiving the response
/// let mut buf = vec![0; 4096];
/// loop {
/// // receive a datagram
/// let (n_received, sender_addr) = socket.recv_from(&mut &mut buf[..], 0).unwrap();
/// assert_eq!(sender_addr, kernel_addr);
/// println!("received datagram {:?}", &buf[..n_received]);
/// if buf[4] == 2 && buf[5] == 0 {
/// println!("the kernel responded with an error");
/// return;
/// }
/// if buf[4] == 3 && buf[5] == 0 {
/// println!("end of dump");
/// return;
/// }
/// }
/// ```
#[derive(Clone, Debug)]
pub struct Socket(RawFd);
impl AsRawFd for Socket {
fn as_raw_fd(&self) -> RawFd {
self.0
}
}
impl FromRawFd for Socket {
unsafe fn from_raw_fd(fd: RawFd) -> Self {
Socket(fd)
}
}
impl Drop for Socket {
fn drop(&mut self) {
unsafe { libc::close(self.0) };
}
}
impl Socket {
/// Open a new socket for the given netlink subsystem. `protocol` must be one of the
/// [`netlink_sys::protocols`][protos] constants.
///
/// [protos]: crate::protocols
pub fn new(protocol: isize) -> Result<Self> {
let res = unsafe {
libc::socket(
libc::PF_NETLINK,
libc::SOCK_DGRAM | libc::SOCK_CLOEXEC,
protocol as libc::c_int,
)
};
if res < 0 {
return Err(Error::last_os_error());
}
Ok(Socket(res))
}
/// Bind the socket to the given address
pub fn bind(&mut self, addr: &SocketAddr) -> Result<()> {
let (addr_ptr, addr_len) = addr.as_raw();
let res = unsafe { libc::bind(self.0, addr_ptr, addr_len) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(())
}
/// Bind the socket to an address assigned by the kernel, and return that address.
pub fn bind_auto(&mut self) -> Result<SocketAddr> {
let mut addr = SocketAddr::new(0, 0);
self.bind(&addr)?;
self.get_address(&mut addr)?;
Ok(addr)
}
/// Get the socket address
pub fn get_address(&self, addr: &mut SocketAddr) -> Result<()> {
let (addr_ptr, mut addr_len) = addr.as_raw_mut();
let addr_len_copy = addr_len;
let addr_len_ptr = &mut addr_len as *mut libc::socklen_t;
let res = unsafe { libc::getsockname(self.0, addr_ptr, addr_len_ptr) };
if res < 0 {
return Err(Error::last_os_error());
}
assert_eq!(addr_len, addr_len_copy);
Ok(())
}
// when building with --features smol we don't need this
#[allow(dead_code)]
/// Make this socket non-blocking
pub fn set_non_blocking(&self, non_blocking: bool) -> Result<()> {
let mut non_blocking = non_blocking as libc::c_int;
let res = unsafe { libc::ioctl(self.0, libc::FIONBIO, &mut non_blocking) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(())
}
/// Connect the socket to the given address. Netlink is a connection-less protocol, so a socket can communicate with
/// multiple peers with the [`Socket::send_to`] and [`Socket::recv_from`] methods. However, if the socket only needs
/// to communicate with one peer, it is convenient not to have to bother with the peer address. This is what
/// `connect` is for. After calling `connect`, [`Socket::send`] and [`Socket::recv`] respectively send and receive
/// datagrams to and from `remote_addr`.
///
/// # Examples
///
/// In this example we:
///
/// 1. open a socket
/// 2. connect it to the kernel with [`Socket::connect`]
/// 3. send a request to the kernel with [`Socket::send`]
/// 4. read the response (which can span over several messages) [`Socket::recv`]
///
/// ```rust
/// use netlink_sys::{protocols::NETLINK_ROUTE, Socket, SocketAddr};
/// use std::process;
///
/// let mut socket = Socket::new(NETLINK_ROUTE).unwrap();
/// let _ = socket.bind_auto().unwrap();
/// let kernel_addr = SocketAddr::new(0, 0);
/// socket.connect(&kernel_addr).unwrap();
/// // This is a valid message for listing the network links on the system
/// let msg = vec![
/// 0x14, 0x00, 0x00, 0x00, 0x12, 0x00, 0x01, 0x03, 0xfd, 0xfe, 0x38, 0x5c, 0x00, 0x00, 0x00,
/// 0x00, 0x00, 0x00, 0x00, 0x00,
/// ];
/// let n_sent = socket.send(&msg[..], 0).unwrap();
/// assert_eq!(n_sent, msg.len());
/// // buffer for receiving the response
/// let mut buf = vec![0; 4096];
/// loop {
/// let mut n_received = socket.recv(&mut &mut buf[..], 0).unwrap();
/// println!("received {:?}", &buf[..n_received]);
/// if buf[4] == 2 && buf[5] == 0 {
/// println!("the kernel responded with an error");
/// return;
/// }
/// if buf[4] == 3 && buf[5] == 0 {
/// println!("end of dump");
/// return;
/// }
/// }
/// ```
pub fn connect(&self, remote_addr: &SocketAddr) -> Result<()> {
// FIXME:
//
// Event though for SOCK_DGRAM sockets there's no IO, if our socket is non-blocking,
// connect() might return EINPROGRESS. In theory, the right way to treat EINPROGRESS would
// be to ignore the error, and let the user poll the socket to check when it becomes
// writable, indicating that the connection succeeded. The code already exists in mio for
// TcpStream:
//
// > pub fn connect(stream: net::TcpStream, addr: &SocketAddr) -> io::Result<TcpStream> {
// > set_non_block(stream.as_raw_fd())?;
// > match stream.connect(addr) {
// > Ok(..) => {}
// > Err(ref e) if e.raw_os_error() == Some(libc::EINPROGRESS) => {}
// > Err(e) => return Err(e),
// > }
// > Ok(TcpStream { inner: stream })
// > }
//
// In practice, since the connection does not require any IO for SOCK_DGRAM sockets, it
// almost never returns EINPROGRESS and so for now, we just return whatever libc::connect
// returns. If it returns EINPROGRESS, the caller will have to handle the error themself
//
// Refs:
//
// - https://stackoverflow.com/a/14046386/1836144
// - https://lists.isc.org/pipermail/bind-users/2009-August/077527.html
let (addr, addr_len) = remote_addr.as_raw();
let res = unsafe { libc::connect(self.0, addr, addr_len) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(())
}
// Most of the comments in this method come from a discussion on rust users forum.
// [thread]: https://users.rust-lang.org/t/help-understanding-libc-call/17308/9
//
/// Read a datagram from the socket and return the number of bytes that have been read and the address of the
/// sender. The data being read is copied into `buf`. If `buf` is too small, the datagram is truncated. The
/// supported flags are the `MSG_*` described in `man 2 recvmsg`
///
/// # Warning
///
/// In datagram oriented protocols, `recv` and `recvfrom` receive normally only ONE datagram, but this seems not to
/// be always true for netlink sockets: with some protocols like `NETLINK_AUDIT`, multiple netlink packets can be
/// read with a single call.
pub fn recv_from<B>(&self, buf: &mut B, flags: libc::c_int) -> Result<(usize, SocketAddr)>
where
B: bytes::BufMut,
{
// Create an empty storage for the address. Note that Rust standard library create a
// sockaddr_storage so that it works for any address family, but here, we already know that
// we'll have a Netlink address, so we can create the appropriate storage.
let mut addr = unsafe { mem::zeroed::<libc::sockaddr_nl>() };
// recvfrom takes a *sockaddr as parameter so that it can accept any kind of address
// storage, so we need to create such a pointer for the sockaddr_nl we just initialized.
//
// Create a raw pointer to Cast our raw pointer to a
// our storage. We cannot generic pointer to *sockaddr
// pass it to recvfrom yet. that recvfrom can use
// ^ ^
// | |
// +--------------+---------------+ +---------+--------+
// / \ / \
let addr_ptr = &mut addr as *mut libc::sockaddr_nl as *mut libc::sockaddr;
// Why do we need to pass the address length? We're passing a generic *sockaddr to
// recvfrom. Somehow recvfrom needs to make sure that the address of the received packet
// would fit into the actual type that is behind *sockaddr: it could be a sockaddr_nl but
// also a sockaddr_in, a sockaddr_in6, or even the generic sockaddr_storage that can store
// any address.
let mut addrlen = mem::size_of_val(&addr);
// recvfrom does not take the address length by value (see [thread]), so we need to create
// a pointer to it.
let addrlen_ptr = &mut addrlen as *mut usize as *mut libc::socklen_t;
let chunk = buf.chunk_mut();
// Cast the *mut u8 into *mut void.
// This is equivalent to casting a *char into *void
// See [thread]
// ^
// Create a *mut u8 |
// ^ |
// | |
// +------+-------+ +--------+-------+
// / \ / \
let buf_ptr = chunk.as_mut_ptr() as *mut libc::c_void;
let buf_len = chunk.len() as libc::size_t;
let res = unsafe { libc::recvfrom(self.0, buf_ptr, buf_len, flags, addr_ptr, addrlen_ptr) };
if res < 0 {
return Err(Error::last_os_error());
} else {
// with `MSG_TRUNC` `res` might exceed `buf_len`
let written = std::cmp::min(buf_len, res as usize);
unsafe {
buf.advance_mut(written);
}
}
Ok((res as usize, SocketAddr(addr)))
}
/// For a connected socket, `recv` reads a datagram from the socket. The sender is the remote peer the socket is
/// connected to (see [`Socket::connect`]). See also [`Socket::recv_from`]
pub fn recv<B>(&self, buf: &mut B, flags: libc::c_int) -> Result<usize>
where
B: bytes::BufMut,
{
let chunk = buf.chunk_mut();
let buf_ptr = chunk.as_mut_ptr() as *mut libc::c_void;
let buf_len = chunk.len() as libc::size_t;
let res = unsafe { libc::recv(self.0, buf_ptr, buf_len, flags) };
if res < 0 {
return Err(Error::last_os_error());
} else {
// with `MSG_TRUNC` `res` might exceed `buf_len`
let written = std::cmp::min(buf_len, res as usize);
unsafe {
buf.advance_mut(written);
}
}
Ok(res as usize)
}
/// Receive a full message. Unlike [`Socket::recv_from`], which truncates messages that exceed the length of the
/// buffer passed as argument, this method always reads a whole message, no matter its size.
pub fn recv_from_full(&self) -> Result<(Vec<u8>, SocketAddr)> {
// Peek
let mut buf: Vec<u8> = Vec::new();
let (peek_len, _) = self.recv_from(&mut buf, libc::MSG_PEEK | libc::MSG_TRUNC)?;
// Receive
buf.clear();
buf.reserve(peek_len);
let (rlen, addr) = self.recv_from(&mut buf, 0)?;
assert_eq!(rlen, peek_len);
Ok((buf, addr))
}
/// Send the given buffer `buf` to the remote peer with address `addr`. The supported flags are the `MSG_*` values
/// documented in `man 2 send`.
pub fn send_to(&self, buf: &[u8], addr: &SocketAddr, flags: libc::c_int) -> Result<usize> {
let (addr_ptr, addr_len) = addr.as_raw();
let buf_ptr = buf.as_ptr() as *const libc::c_void;
let buf_len = buf.len() as libc::size_t;
let res = unsafe { libc::sendto(self.0, buf_ptr, buf_len, flags, addr_ptr, addr_len) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(res as usize)
}
/// For a connected socket, `send` sends the given buffer `buf` to the remote peer the socket is connected to. See
/// also [`Socket::connect`] and [`Socket::send_to`].
pub fn send(&self, buf: &[u8], flags: libc::c_int) -> Result<usize> {
let buf_ptr = buf.as_ptr() as *const libc::c_void;
let buf_len = buf.len() as libc::size_t;
let res = unsafe { libc::send(self.0, buf_ptr, buf_len, flags) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(res as usize)
}
pub fn set_pktinfo(&mut self, value: bool) -> Result<()> {
let value: libc::c_int = if value { 1 } else { 0 };
setsockopt(self.0, libc::SOL_NETLINK, libc::NETLINK_PKTINFO, value)
}
pub fn get_pktinfo(&self) -> Result<bool> {
let res = getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_PKTINFO)?;
Ok(res == 1)
}
pub fn add_membership(&mut self, group: u32) -> Result<()> {
setsockopt(
self.0,
libc::SOL_NETLINK,
libc::NETLINK_ADD_MEMBERSHIP,
group,
)
}
pub fn drop_membership(&mut self, group: u32) -> Result<()> {
setsockopt(
self.0,
libc::SOL_NETLINK,
libc::NETLINK_DROP_MEMBERSHIP,
group,
)
}
// pub fn list_membership(&self) -> Vec<u32> {
// unimplemented!();
// // getsockopt won't be enough here, because we may need to perform 2 calls, and because the
// // length of the list returned by libc::getsockopt is returned by mutating the length
// // argument, which our implementation of getsockopt forbids.
// }
/// `NETLINK_BROADCAST_ERROR` (since Linux 2.6.30). When not set, `netlink_broadcast()` only
/// reports `ESRCH` errors and silently ignore `NOBUFS` errors.
pub fn set_broadcast_error(&mut self, value: bool) -> Result<()> {
let value: libc::c_int = if value { 1 } else { 0 };
setsockopt(
self.0,
libc::SOL_NETLINK,
libc::NETLINK_BROADCAST_ERROR,
value,
)
}
pub fn get_broadcast_error(&self) -> Result<bool> {
let res =
getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_BROADCAST_ERROR)?;
Ok(res == 1)
}
/// `NETLINK_NO_ENOBUFS` (since Linux 2.6.30). This flag can be used by unicast and broadcast
/// listeners to avoid receiving `ENOBUFS` errors.
pub fn set_no_enobufs(&mut self, value: bool) -> Result<()> {
let value: libc::c_int = if value | else { 0 };
setsockopt(self.0, libc::SOL_NETLINK, libc::NETLINK_NO_ENOBUFS, value)
}
pub fn get_no_enobufs(&self) -> Result<bool> {
let res = getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_NO_ENOBUFS)?;
Ok(res == 1)
}
/// `NETLINK_LISTEN_ALL_NSID` (since Linux 4.2). When set, this socket will receive netlink
/// notifications from all network namespaces that have an nsid assigned into the network
/// namespace where the socket has been opened. The nsid is sent to user space via an ancillary
/// data.
pub fn set_listen_all_namespaces(&mut self, value: bool) -> Result<()> {
let value: libc::c_int = if value { 1 } else { 0 };
setsockopt(
self.0,
libc::SOL_NETLINK,
libc::NETLINK_LISTEN_ALL_NSID,
value,
)
}
pub fn get_listen_all_namespaces(&self) -> Result<bool> {
let res =
getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_LISTEN_ALL_NSID)?;
Ok(res == 1)
}
/// `NETLINK_CAP_ACK` (since Linux 4.2). The kernel may fail to allocate the necessary room
/// for the acknowledgment message back to user space. This option trims off the payload of
/// the original netlink message. The netlink message header is still included, so the user can
/// guess from the sequence number which message triggered the acknowledgment.
pub fn set_cap_ack(&mut self, value: bool) -> Result<()> {
let value: libc::c_int = if value { 1 } else { 0 };
setsockopt(self.0, libc::SOL_NETLINK, libc::NETLINK_CAP_ACK, value)
}
pub fn get_cap_ack(&self) -> Result<bool> {
let res = getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_CAP_ACK)?;
Ok(res == 1)
}
}
/// Wrapper around `getsockopt`:
///
/// ```no_rust
/// int getsockopt(int socket, int level, int option_name, void *restrict option_value, socklen_t *restrict option_len);
/// ```
pub(crate) fn getsockopt<T: Copy>(fd: RawFd, level: libc::c_int, option: libc::c_int) -> Result<T> {
// Create storage for the options we're fetching
let mut slot: T = unsafe { mem::zeroed() };
// Create a mutable raw pointer to the storage so that getsockopt can fill the value
let slot_ptr = &mut slot as *mut T as *mut libc::c_void;
// Let getsockopt know how big our storage is
let mut slot_len = mem::size_of::<T>() as libc::socklen_t;
// getsockopt takes a mutable pointer to the length, because for some options like
// NETLINK_LIST_MEMBERSHIP where the option value is a list with arbitrary length,
// getsockopt uses this parameter to signal how big the storage needs to be.
let slot_len_ptr = &mut slot_len as *mut libc::socklen_t;
let res = unsafe { libc::getsockopt(fd, level, option, slot_ptr, slot_len_ptr) };
if res < 0 {
return Err(Error::last_os_error());
}
// Ignore the options that require the legnth to be set by getsockopt.
// We'll deal with them individually.
assert_eq!(slot_len as usize, mem::size_of::<T>());
Ok(slot)
}
// adapted from rust standard library
fn setsockopt<T>(fd: RawFd, level: libc::c_int, option: libc::c_int, payload: T) -> Result<()> {
let payload = &payload as *const T as *const libc::c_void;
let payload_len = mem::size_of::<T>() as libc::socklen_t;
let res = unsafe { libc::setsockopt(fd, level, option, payload, payload_len) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use crate::protocols::NETLINK_ROUTE;
#[test]
fn new() {
Socket::new(NETLINK_ROUTE).unwrap();
}
#[test]
fn connect() {
let sock = Socket::new(NETLINK_ROUTE).unwrap();
sock.connect(&SocketAddr::new(0, 0)).unwrap();
}
#[test]
fn bind() {
let mut sock = Socket::new(NETLINK_ROUTE).unwrap();
sock.bind(&SocketAddr::new(4321, 0)).unwrap();
}
#[test]
fn bind_auto() {
let mut sock = Socket::new(NETLINK_ROUTE).unwrap();
let addr = sock.bind_auto().unwrap();
// make sure that the address we got from the kernel is there
assert!(addr.port_number() != 0);
}
#[test]
fn set_non_blocking() {
let sock = Socket::new(NETLINK_ROUTE).unwrap();
sock.set_non_blocking(true).unwrap();
sock.set_non_blocking(false).unwrap();
}
#[test]
fn options() {
let mut sock = Socket::new(NETLINK_ROUTE).unwrap();
sock.set_cap_ack(true).unwrap();
assert!(sock.get_cap_ack().unwrap());
sock.set_cap_ack(false).unwrap();
assert!(!sock.get_cap_ack().unwrap());
sock.set_no_enobufs(true).unwrap();
assert!(sock.get_no_enobufs().unwrap());
sock.set_no_enobufs(false).unwrap();
assert!(!sock.get_no_enobufs().unwrap());
sock.set_broadcast_error(true).unwrap();
assert!(sock.get_broadcast_error().unwrap());
sock.set_broadcast_error(false).unwrap();
assert!(!sock.get_broadcast_error().unwrap());
// FIXME: these require root permissions
// sock.set_listen_all_namespaces(true).unwrap();
// assert!(sock.get_listen_all_namespaces().unwrap());
// sock.set_listen_all_namespaces(false).unwrap();
// assert!(!sock.get_listen_all_namespaces().unwrap());
}
}
| { 1 } | conditional_block |
context.go | package terraform
import (
"context"
"fmt"
"log"
"sort"
"strings"
"sync"
"github.com/hashicorp/terraform/tfdiags"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/hcl"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/version"
)
// InputMode defines what sort of input will be asked for when Input
// is called on Context.
type InputMode byte
const (
// InputModeVar asks for all variables
InputModeVar InputMode = 1 << iota
// InputModeVarUnset asks for variables which are not set yet.
// InputModeVar must be set for this to have an effect.
InputModeVarUnset
// InputModeProvider asks for provider variables
InputModeProvider
// InputModeStd is the standard operating mode and asks for both variables
// and providers.
InputModeStd = InputModeVar | InputModeProvider
)
var (
// contextFailOnShadowError will cause Context operations to return
// errors when shadow operations fail. This is only used for testing.
contextFailOnShadowError = false
// contextTestDeepCopyOnPlan will perform a Diff DeepCopy on every
// Plan operation, effectively testing the Diff DeepCopy whenever
// a Plan occurs. This is enabled for tests.
contextTestDeepCopyOnPlan = false
)
// ContextOpts are the user-configurable options to create a context with
// NewContext.
type ContextOpts struct {
Meta *ContextMeta
Destroy bool
Diff *Diff
Hooks []Hook
Module *module.Tree
Parallelism int
State *State
StateFutureAllowed bool
ProviderResolver ResourceProviderResolver
Provisioners map[string]ResourceProvisionerFactory
Shadow bool
Targets []string
Variables map[string]interface{}
// If non-nil, will apply as additional constraints on the provider
// plugins that will be requested from the provider resolver.
ProviderSHA256s map[string][]byte
SkipProviderVerify bool
UIInput UIInput
}
// ContextMeta is metadata about the running context. This is information
// that this package or structure cannot determine on its own but exposes
// into Terraform in various ways. This must be provided by the Context
// initializer.
type ContextMeta struct {
Env string // Env is the state environment
}
// Context represents all the context that Terraform needs in order to
// perform operations on infrastructure. This structure is built using
// NewContext. See the documentation for that.
//
// Extra functions on Context can be found in context_*.go files.
type Context struct {
// Maintainer note: Anytime this struct is changed, please verify
// that newShadowContext still does the right thing. Tests should
// fail regardless but putting this note here as well.
components contextComponentFactory
destroy bool
diff *Diff
diffLock sync.RWMutex
hooks []Hook
meta *ContextMeta
module *module.Tree
sh *stopHook
shadow bool
state *State
stateLock sync.RWMutex
targets []string
uiInput UIInput
variables map[string]interface{}
l sync.Mutex // Lock acquired during any task
parallelSem Semaphore
providerInputConfig map[string]map[string]interface{}
providerSHA256s map[string][]byte
runLock sync.Mutex
runCond *sync.Cond
runContext context.Context
runContextCancel context.CancelFunc
shadowErr error
}
// NewContext creates a new Context structure.
//
// Once a Context is creator, the pointer values within ContextOpts
// should not be mutated in any way, since the pointers are copied, not
// the values themselves.
func NewContext(opts *ContextOpts) (*Context, error) {
// Validate the version requirement if it is given
if opts.Module != nil {
if err := CheckRequiredVersion(opts.Module); err != nil {
return nil, err
}
}
// Copy all the hooks and add our stop hook. We don't append directly
// to the Config so that we're not modifying that in-place.
sh := new(stopHook)
hooks := make([]Hook, len(opts.Hooks)+1)
copy(hooks, opts.Hooks)
hooks[len(opts.Hooks)] = sh
state := opts.State
if state == nil {
state = new(State)
state.init()
}
// If our state is from the future, then error. Callers can avoid
// this error by explicitly setting `StateFutureAllowed`.
if !opts.StateFutureAllowed && state.FromFutureTerraform() {
return nil, fmt.Errorf(
"Terraform doesn't allow running any operations against a state\n"+
"that was written by a future Terraform version. The state is\n"+
"reporting it is written by Terraform '%s'.\n\n"+
"Please run at least that version of Terraform to continue.",
state.TFVersion)
}
// Explicitly reset our state version to our current version so that
// any operations we do will write out that our latest version
// has run.
state.TFVersion = version.Version
// Determine parallelism, default to 10. We do this both to limit
// CPU pressure but also to have an extra guard against rate throttling
// from providers.
par := opts.Parallelism
if par == 0 {
par = 10
}
// Set up the variables in the following sequence:
// 0 - Take default values from the configuration
// 1 - Take values from TF_VAR_x environment variables
// 2 - Take values specified in -var flags, overriding values
// set by environment variables if necessary. This includes
// values taken from -var-file in addition.
variables := make(map[string]interface{})
if opts.Module != nil {
var err error
variables, err = Variables(opts.Module, opts.Variables)
if err != nil {
return nil, err
}
}
// Bind available provider plugins to the constraints in config
var providers map[string]ResourceProviderFactory
if opts.ProviderResolver != nil {
var err error
deps := ModuleTreeDependencies(opts.Module, state)
reqd := deps.AllPluginRequirements()
if opts.ProviderSHA256s != nil && !opts.SkipProviderVerify {
reqd.LockExecutables(opts.ProviderSHA256s)
}
providers, err = resourceProviderFactories(opts.ProviderResolver, reqd)
if err != nil {
return nil, err
}
} else {
providers = make(map[string]ResourceProviderFactory)
}
diff := opts.Diff
if diff == nil {
diff = &Diff{}
}
return &Context{
components: &basicComponentFactory{
providers: providers,
provisioners: opts.Provisioners,
},
destroy: opts.Destroy,
diff: diff,
hooks: hooks,
meta: opts.Meta,
module: opts.Module,
shadow: opts.Shadow,
state: state,
targets: opts.Targets,
uiInput: opts.UIInput,
variables: variables,
parallelSem: NewSemaphore(par),
providerInputConfig: make(map[string]map[string]interface{}),
providerSHA256s: opts.ProviderSHA256s,
sh: sh,
}, nil
}
type ContextGraphOpts struct {
// If true, validates the graph structure (checks for cycles).
Validate bool
// Legacy graphs only: won't prune the graph
Verbose bool
}
// Graph returns the graph used for the given operation type.
//
// The most extensive or complex graph type is GraphTypePlan.
func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) |
// ShadowError returns any errors caught during a shadow operation.
//
// A shadow operation is an operation run in parallel to a real operation
// that performs the same tasks using new logic on copied state. The results
// are compared to ensure that the new logic works the same as the old logic.
// The shadow never affects the real operation or return values.
//
// The result of the shadow operation are only available through this function
// call after a real operation is complete.
//
// For API consumers of Context, you can safely ignore this function
// completely if you have no interest in helping report experimental feature
// errors to Terraform maintainers. Otherwise, please call this function
// after every operation and report this to the user.
//
// IMPORTANT: Shadow errors are _never_ critical: they _never_ affect
// the real state or result of a real operation. They are purely informational
// to assist in future Terraform versions being more stable. Please message
// this effectively to the end user.
//
// This must be called only when no other operation is running (refresh,
// plan, etc.). The result can be used in parallel to any other operation
// running.
func (c *Context) ShadowError() error {
return c.shadowErr
}
// State returns a copy of the current state associated with this context.
//
// This cannot safely be called in parallel with any other Context function.
func (c *Context) State() *State {
return c.state.DeepCopy()
}
// Interpolater returns an Interpolater built on a copy of the state
// that can be used to test interpolation values.
func (c *Context) Interpolater() *Interpolater {
var varLock sync.Mutex
var stateLock sync.RWMutex
return &Interpolater{
Operation: walkApply,
Meta: c.meta,
Module: c.module,
State: c.state.DeepCopy(),
StateLock: &stateLock,
VariableValues: c.variables,
VariableValuesLock: &varLock,
}
}
// Input asks for input to fill variables and provider configurations.
// This modifies the configuration in-place, so asking for Input twice
// may result in different UI output showing different current values.
func (c *Context) Input(mode InputMode) error {
defer c.acquireRun("input")()
if mode&InputModeVar != 0 {
// Walk the variables first for the root module. We walk them in
// alphabetical order for UX reasons.
rootConf := c.module.Config()
names := make([]string, len(rootConf.Variables))
m := make(map[string]*config.Variable)
for i, v := range rootConf.Variables {
names[i] = v.Name
m[v.Name] = v
}
sort.Strings(names)
for _, n := range names {
// If we only care about unset variables, then if the variable
// is set, continue on.
if mode&InputModeVarUnset != 0 {
if _, ok := c.variables[n]; ok {
continue
}
}
var valueType config.VariableType
v := m[n]
switch valueType = v.Type(); valueType {
case config.VariableTypeUnknown:
continue
case config.VariableTypeMap:
// OK
case config.VariableTypeList:
// OK
case config.VariableTypeString:
// OK
default:
panic(fmt.Sprintf("Unknown variable type: %#v", v.Type()))
}
// If the variable is not already set, and the variable defines a
// default, use that for the value.
if _, ok := c.variables[n]; !ok {
if v.Default != nil {
c.variables[n] = v.Default.(string)
continue
}
}
// this should only happen during tests
if c.uiInput == nil {
log.Println("[WARN] Content.uiInput is nil")
continue
}
// Ask the user for a value for this variable
var value string
retry := 0
for {
var err error
value, err = c.uiInput.Input(&InputOpts{
Id: fmt.Sprintf("var.%s", n),
Query: fmt.Sprintf("var.%s", n),
Description: v.Description,
})
if err != nil {
return fmt.Errorf(
"Error asking for %s: %s", n, err)
}
if value == "" && v.Required() {
// Redo if it is required, but abort if we keep getting
// blank entries
if retry > 2 {
return fmt.Errorf("missing required value for %q", n)
}
retry++
continue
}
break
}
// no value provided, so don't set the variable at all
if value == "" {
continue
}
decoded, err := parseVariableAsHCL(n, value, valueType)
if err != nil {
return err
}
if decoded != nil {
c.variables[n] = decoded
}
}
}
if mode&InputModeProvider != 0 {
// Build the graph
graph, err := c.Graph(GraphTypeInput, nil)
if err != nil {
return err
}
// Do the walk
if _, err := c.walk(graph, walkInput); err != nil {
return err
}
}
return nil
}
// Apply applies the changes represented by this context and returns
// the resulting state.
//
// Even in the case an error is returned, the state may be returned and will
// potentially be partially updated. In addition to returning the resulting
// state, this context is updated with the latest state.
//
// If the state is required after an error, the caller should call
// Context.State, rather than rely on the return value.
//
// TODO: Apply and Refresh should either always return a state, or rely on the
// State() method. Currently the helper/resource testing framework relies
// on the absence of a returned state to determine if Destroy can be
// called, so that will need to be refactored before this can be changed.
func (c *Context) Apply() (*State, error) {
defer c.acquireRun("apply")()
// Copy our own state
c.state = c.state.DeepCopy()
// Build the graph.
graph, err := c.Graph(GraphTypeApply, nil)
if err != nil {
return nil, err
}
// Determine the operation
operation := walkApply
if c.destroy {
operation = walkDestroy
}
// Walk the graph
walker, err := c.walk(graph, operation)
if len(walker.ValidationErrors) > 0 {
err = multierror.Append(err, walker.ValidationErrors...)
}
// Clean out any unused things
c.state.prune()
return c.state, err
}
// Plan generates an execution plan for the given context.
//
// The execution plan encapsulates the context and can be stored
// in order to reinstantiate a context later for Apply.
//
// Plan also updates the diff of this context to be the diff generated
// by the plan, so Apply can be called after.
func (c *Context) Plan() (*Plan, error) {
defer c.acquireRun("plan")()
p := &Plan{
Module: c.module,
Vars: c.variables,
State: c.state,
Targets: c.targets,
TerraformVersion: version.String(),
ProviderSHA256s: c.providerSHA256s,
}
var operation walkOperation
if c.destroy {
operation = walkPlanDestroy
p.Destroy = true
} else {
// Set our state to be something temporary. We do this so that
// the plan can update a fake state so that variables work, then
// we replace it back with our old state.
old := c.state
if old == nil {
c.state = &State{}
c.state.init()
} else {
c.state = old.DeepCopy()
}
defer func() {
c.state = old
}()
operation = walkPlan
}
// Setup our diff
c.diffLock.Lock()
c.diff = new(Diff)
c.diff.init()
c.diffLock.Unlock()
// Build the graph.
graphType := GraphTypePlan
if c.destroy {
graphType = GraphTypePlanDestroy
}
graph, err := c.Graph(graphType, nil)
if err != nil {
return nil, err
}
// Do the walk
walker, err := c.walk(graph, operation)
if err != nil {
return nil, err
}
p.Diff = c.diff
// If this is true, it means we're running unit tests. In this case,
// we perform a deep copy just to ensure that all context tests also
// test that a diff is copy-able. This will panic if it fails. This
// is enabled during unit tests.
//
// This should never be true during production usage, but even if it is,
// it can't do any real harm.
if contextTestDeepCopyOnPlan {
p.Diff.DeepCopy()
}
/*
// We don't do the reverification during the new destroy plan because
// it will use a different apply process.
if X_legacyGraph {
// Now that we have a diff, we can build the exact graph that Apply will use
// and catch any possible cycles during the Plan phase.
if _, err := c.Graph(GraphTypeLegacy, nil); err != nil {
return nil, err
}
}
*/
var errs error
if len(walker.ValidationErrors) > 0 {
errs = multierror.Append(errs, walker.ValidationErrors...)
}
return p, errs
}
// Refresh goes through all the resources in the state and refreshes them
// to their latest state. This will update the state that this context
// works with, along with returning it.
//
// Even in the case an error is returned, the state may be returned and
// will potentially be partially updated.
func (c *Context) Refresh() (*State, error) {
defer c.acquireRun("refresh")()
// Copy our own state
c.state = c.state.DeepCopy()
// Build the graph.
graph, err := c.Graph(GraphTypeRefresh, nil)
if err != nil {
return nil, err
}
// Do the walk
if _, err := c.walk(graph, walkRefresh); err != nil {
return nil, err
}
// Clean out any unused things
c.state.prune()
return c.state, nil
}
// Stop stops the running task.
//
// Stop will block until the task completes.
func (c *Context) Stop() {
log.Printf("[WARN] terraform: Stop called, initiating interrupt sequence")
c.l.Lock()
defer c.l.Unlock()
// If we're running, then stop
if c.runContextCancel != nil {
log.Printf("[WARN] terraform: run context exists, stopping")
// Tell the hook we want to stop
c.sh.Stop()
// Stop the context
c.runContextCancel()
c.runContextCancel = nil
}
// Grab the condition var before we exit
if cond := c.runCond; cond != nil {
cond.Wait()
}
log.Printf("[WARN] terraform: stop complete")
}
// Validate validates the configuration and returns any warnings or errors.
func (c *Context) Validate() tfdiags.Diagnostics {
defer c.acquireRun("validate")()
var diags tfdiags.Diagnostics
// Validate the configuration itself
diags = diags.Append(c.module.Validate())
// This only needs to be done for the root module, since inter-module
// variables are validated in the module tree.
if config := c.module.Config(); config != nil {
// Validate the user variables
for _, err := range smcUserVariables(config, c.variables) {
diags = diags.Append(err)
}
}
// If we have errors at this point, the graphing has no chance,
// so just bail early.
if diags.HasErrors() {
return diags
}
// Build the graph so we can walk it and run Validate on nodes.
// We also validate the graph generated here, but this graph doesn't
// necessarily match the graph that Plan will generate, so we'll validate the
// graph again later after Planning.
graph, err := c.Graph(GraphTypeValidate, nil)
if err != nil {
diags = diags.Append(err)
return diags
}
// Walk
walker, err := c.walk(graph, walkValidate)
if err != nil {
diags = diags.Append(err)
}
sort.Strings(walker.ValidationWarnings)
sort.Slice(walker.ValidationErrors, func(i, j int) bool {
return walker.ValidationErrors[i].Error() < walker.ValidationErrors[j].Error()
})
for _, warn := range walker.ValidationWarnings {
diags = diags.Append(tfdiags.SimpleWarning(warn))
}
for _, err := range walker.ValidationErrors {
diags = diags.Append(err)
}
return diags
}
// Module returns the module tree associated with this context.
func (c *Context) Module() *module.Tree {
return c.module
}
// Variables will return the mapping of variables that were defined
// for this Context. If Input was called, this mapping may be different
// than what was given.
func (c *Context) Variables() map[string]interface{} {
return c.variables
}
// SetVariable sets a variable after a context has already been built.
func (c *Context) SetVariable(k string, v interface{}) {
c.variables[k] = v
}
func (c *Context) acquireRun(phase string) func() {
// With the run lock held, grab the context lock to make changes
// to the run context.
c.l.Lock()
defer c.l.Unlock()
// Wait until we're no longer running
for c.runCond != nil {
c.runCond.Wait()
}
// Build our lock
c.runCond = sync.NewCond(&c.l)
// Setup debugging
dbug.SetPhase(phase)
// Create a new run context
c.runContext, c.runContextCancel = context.WithCancel(context.Background())
// Reset the stop hook so we're not stopped
c.sh.Reset()
// Reset the shadow errors
c.shadowErr = nil
return c.releaseRun
}
func (c *Context) releaseRun() {
// Grab the context lock so that we can make modifications to fields
c.l.Lock()
defer c.l.Unlock()
// setting the phase to "INVALID" lets us easily detect if we have
// operations happening outside of a run, or we missed setting the proper
// phase
dbug.SetPhase("INVALID")
// End our run. We check if runContext is non-nil because it can be
// set to nil if it was cancelled via Stop()
if c.runContextCancel != nil {
c.runContextCancel()
}
// Unlock all waiting our condition
cond := c.runCond
c.runCond = nil
cond.Broadcast()
// Unset the context
c.runContext = nil
}
func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, error) {
// Keep track of the "real" context which is the context that does
// the real work: talking to real providers, modifying real state, etc.
realCtx := c
log.Printf("[DEBUG] Starting graph walk: %s", operation.String())
walker := &ContextGraphWalker{
Context: realCtx,
Operation: operation,
StopContext: c.runContext,
}
// Watch for a stop so we can call the provider Stop() API.
watchStop, watchWait := c.watchStop(walker)
// Walk the real graph, this will block until it completes
realErr := graph.Walk(walker)
// Close the channel so the watcher stops, and wait for it to return.
close(watchStop)
<-watchWait
return walker, realErr
}
// watchStop immediately returns a `stop` and a `wait` chan after dispatching
// the watchStop goroutine. This will watch the runContext for cancellation and
// stop the providers accordingly. When the watch is no longer needed, the
// `stop` chan should be closed before waiting on the `wait` chan.
// The `wait` chan is important, because without synchronizing with the end of
// the watchStop goroutine, the runContext may also be closed during the select
// incorrectly causing providers to be stopped. Even if the graph walk is done
// at that point, stopping a provider permanently cancels its StopContext which
// can cause later actions to fail.
func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan struct{}) {
stop := make(chan struct{})
wait := make(chan struct{})
// get the runContext cancellation channel now, because releaseRun will
// write to the runContext field.
done := c.runContext.Done()
go func() {
defer close(wait)
// Wait for a stop or completion
select {
case <-done:
// done means the context was canceled, so we need to try and stop
// providers.
case <-stop:
// our own stop channel was closed.
return
}
// If we're here, we're stopped, trigger the call.
{
// Copy the providers so that a misbehaved blocking Stop doesn't
// completely hang Terraform.
walker.providerLock.Lock()
ps := make([]ResourceProvider, 0, len(walker.providerCache))
for _, p := range walker.providerCache {
ps = append(ps, p)
}
defer walker.providerLock.Unlock()
for _, p := range ps {
// We ignore the error for now since there isn't any reasonable
// action to take if there is an error here, since the stop is still
// advisory: Terraform will exit once the graph node completes.
p.Stop()
}
}
{
// Call stop on all the provisioners
walker.provisionerLock.Lock()
ps := make([]ResourceProvisioner, 0, len(walker.provisionerCache))
for _, p := range walker.provisionerCache {
ps = append(ps, p)
}
defer walker.provisionerLock.Unlock()
for _, p := range ps {
// We ignore the error for now since there isn't any reasonable
// action to take if there is an error here, since the stop is still
// advisory: Terraform will exit once the graph node completes.
p.Stop()
}
}
}()
return stop, wait
}
// parseVariableAsHCL parses the value of a single variable as would have been specified
// on the command line via -var or in an environment variable named TF_VAR_x, where x is
// the name of the variable. In order to get around the restriction of HCL requiring a
// top level object, we prepend a sentinel key, decode the user-specified value as its
// value and pull the value back out of the resulting map.
func parseVariableAsHCL(name string, input string, targetType config.VariableType) (interface{}, error) {
// expecting a string so don't decode anything, just strip quotes
if targetType == config.VariableTypeString {
return strings.Trim(input, `"`), nil
}
// return empty types
if strings.TrimSpace(input) == "" {
switch targetType {
case config.VariableTypeList:
return []interface{}{}, nil
case config.VariableTypeMap:
return make(map[string]interface{}), nil
}
}
const sentinelValue = "SENTINEL_TERRAFORM_VAR_OVERRIDE_KEY"
inputWithSentinal := fmt.Sprintf("%s = %s", sentinelValue, input)
var decoded map[string]interface{}
err := hcl.Decode(&decoded, inputWithSentinal)
if err != nil {
return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL: %s", name, input, err)
}
if len(decoded) != 1 {
return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. Only one value may be specified.", name, input)
}
parsedValue, ok := decoded[sentinelValue]
if !ok {
return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input)
}
switch targetType {
case config.VariableTypeList:
return parsedValue, nil
case config.VariableTypeMap:
if list, ok := parsedValue.([]map[string]interface{}); ok {
return list[0], nil
}
return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input)
default:
panic(fmt.Errorf("unknown type %s", targetType.Printable()))
}
}
| {
if opts == nil {
opts = &ContextGraphOpts{Validate: true}
}
log.Printf("[INFO] terraform: building graph: %s", typ)
switch typ {
case GraphTypeApply:
return (&ApplyGraphBuilder{
Module: c.module,
Diff: c.diff,
State: c.state,
Providers: c.components.ResourceProviders(),
Provisioners: c.components.ResourceProvisioners(),
Targets: c.targets,
Destroy: c.destroy,
Validate: opts.Validate,
}).Build(RootModulePath)
case GraphTypeInput:
// The input graph is just a slightly modified plan graph
fallthrough
case GraphTypeValidate:
// The validate graph is just a slightly modified plan graph
fallthrough
case GraphTypePlan:
// Create the plan graph builder
p := &PlanGraphBuilder{
Module: c.module,
State: c.state,
Providers: c.components.ResourceProviders(),
Targets: c.targets,
Validate: opts.Validate,
}
// Some special cases for other graph types shared with plan currently
var b GraphBuilder = p
switch typ {
case GraphTypeInput:
b = InputGraphBuilder(p)
case GraphTypeValidate:
// We need to set the provisioners so those can be validated
p.Provisioners = c.components.ResourceProvisioners()
b = ValidateGraphBuilder(p)
}
return b.Build(RootModulePath)
case GraphTypePlanDestroy:
return (&DestroyPlanGraphBuilder{
Module: c.module,
State: c.state,
Targets: c.targets,
Validate: opts.Validate,
}).Build(RootModulePath)
case GraphTypeRefresh:
return (&RefreshGraphBuilder{
Module: c.module,
State: c.state,
Providers: c.components.ResourceProviders(),
Targets: c.targets,
Validate: opts.Validate,
}).Build(RootModulePath)
}
return nil, fmt.Errorf("unknown graph type: %s", typ)
} | identifier_body |
context.go | package terraform
import (
"context"
"fmt"
"log"
"sort"
"strings"
"sync"
"github.com/hashicorp/terraform/tfdiags"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/hcl"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/version"
)
// InputMode defines what sort of input will be asked for when Input
// is called on Context.
type InputMode byte
const (
// InputModeVar asks for all variables
InputModeVar InputMode = 1 << iota
// InputModeVarUnset asks for variables which are not set yet.
// InputModeVar must be set for this to have an effect.
InputModeVarUnset
// InputModeProvider asks for provider variables
InputModeProvider
// InputModeStd is the standard operating mode and asks for both variables
// and providers.
InputModeStd = InputModeVar | InputModeProvider
)
var (
// contextFailOnShadowError will cause Context operations to return
// errors when shadow operations fail. This is only used for testing.
contextFailOnShadowError = false
// contextTestDeepCopyOnPlan will perform a Diff DeepCopy on every
// Plan operation, effectively testing the Diff DeepCopy whenever
// a Plan occurs. This is enabled for tests.
contextTestDeepCopyOnPlan = false
)
// ContextOpts are the user-configurable options to create a context with
// NewContext.
type ContextOpts struct {
Meta *ContextMeta
Destroy bool
Diff *Diff
Hooks []Hook
Module *module.Tree
Parallelism int
State *State
StateFutureAllowed bool
ProviderResolver ResourceProviderResolver
Provisioners map[string]ResourceProvisionerFactory
Shadow bool
Targets []string
Variables map[string]interface{}
// If non-nil, will apply as additional constraints on the provider
// plugins that will be requested from the provider resolver.
ProviderSHA256s map[string][]byte
SkipProviderVerify bool
UIInput UIInput
}
// ContextMeta is metadata about the running context. This is information
// that this package or structure cannot determine on its own but exposes
// into Terraform in various ways. This must be provided by the Context
// initializer.
type ContextMeta struct {
Env string // Env is the state environment
}
// Context represents all the context that Terraform needs in order to
// perform operations on infrastructure. This structure is built using
// NewContext. See the documentation for that.
//
// Extra functions on Context can be found in context_*.go files.
type Context struct {
// Maintainer note: Anytime this struct is changed, please verify
// that newShadowContext still does the right thing. Tests should
// fail regardless but putting this note here as well.
components contextComponentFactory
destroy bool
diff *Diff
diffLock sync.RWMutex
hooks []Hook
meta *ContextMeta
module *module.Tree
sh *stopHook
shadow bool
state *State
stateLock sync.RWMutex
targets []string
uiInput UIInput
variables map[string]interface{}
l sync.Mutex // Lock acquired during any task
parallelSem Semaphore
providerInputConfig map[string]map[string]interface{}
providerSHA256s map[string][]byte
runLock sync.Mutex
runCond *sync.Cond
runContext context.Context
runContextCancel context.CancelFunc
shadowErr error
}
// NewContext creates a new Context structure.
//
// Once a Context is creator, the pointer values within ContextOpts
// should not be mutated in any way, since the pointers are copied, not
// the values themselves.
func NewContext(opts *ContextOpts) (*Context, error) {
// Validate the version requirement if it is given
if opts.Module != nil {
if err := CheckRequiredVersion(opts.Module); err != nil {
return nil, err
}
}
// Copy all the hooks and add our stop hook. We don't append directly
// to the Config so that we're not modifying that in-place.
sh := new(stopHook)
hooks := make([]Hook, len(opts.Hooks)+1)
copy(hooks, opts.Hooks)
hooks[len(opts.Hooks)] = sh
state := opts.State
if state == nil {
state = new(State)
state.init()
}
// If our state is from the future, then error. Callers can avoid
// this error by explicitly setting `StateFutureAllowed`.
if !opts.StateFutureAllowed && state.FromFutureTerraform() {
return nil, fmt.Errorf(
"Terraform doesn't allow running any operations against a state\n"+
"that was written by a future Terraform version. The state is\n"+
"reporting it is written by Terraform '%s'.\n\n"+
"Please run at least that version of Terraform to continue.",
state.TFVersion)
}
// Explicitly reset our state version to our current version so that
// any operations we do will write out that our latest version
// has run.
state.TFVersion = version.Version
// Determine parallelism, default to 10. We do this both to limit
// CPU pressure but also to have an extra guard against rate throttling
// from providers.
par := opts.Parallelism
if par == 0 {
par = 10
}
// Set up the variables in the following sequence:
// 0 - Take default values from the configuration
// 1 - Take values from TF_VAR_x environment variables
// 2 - Take values specified in -var flags, overriding values
// set by environment variables if necessary. This includes
// values taken from -var-file in addition.
variables := make(map[string]interface{})
if opts.Module != nil {
var err error
variables, err = Variables(opts.Module, opts.Variables)
if err != nil {
return nil, err
}
}
// Bind available provider plugins to the constraints in config
var providers map[string]ResourceProviderFactory
if opts.ProviderResolver != nil {
var err error
deps := ModuleTreeDependencies(opts.Module, state)
reqd := deps.AllPluginRequirements()
if opts.ProviderSHA256s != nil && !opts.SkipProviderVerify {
reqd.LockExecutables(opts.ProviderSHA256s)
}
providers, err = resourceProviderFactories(opts.ProviderResolver, reqd)
if err != nil {
return nil, err
}
} else {
providers = make(map[string]ResourceProviderFactory)
}
diff := opts.Diff
if diff == nil {
diff = &Diff{}
}
return &Context{
components: &basicComponentFactory{
providers: providers,
provisioners: opts.Provisioners,
},
destroy: opts.Destroy,
diff: diff,
hooks: hooks,
meta: opts.Meta,
module: opts.Module,
shadow: opts.Shadow,
state: state,
targets: opts.Targets,
uiInput: opts.UIInput,
variables: variables,
parallelSem: NewSemaphore(par),
providerInputConfig: make(map[string]map[string]interface{}),
providerSHA256s: opts.ProviderSHA256s,
sh: sh,
}, nil
}
type ContextGraphOpts struct {
// If true, validates the graph structure (checks for cycles).
Validate bool
// Legacy graphs only: won't prune the graph
Verbose bool
}
// Graph returns the graph used for the given operation type.
//
// The most extensive or complex graph type is GraphTypePlan.
func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) {
if opts == nil {
opts = &ContextGraphOpts{Validate: true}
}
log.Printf("[INFO] terraform: building graph: %s", typ)
switch typ {
case GraphTypeApply:
return (&ApplyGraphBuilder{
Module: c.module,
Diff: c.diff,
State: c.state,
Providers: c.components.ResourceProviders(),
Provisioners: c.components.ResourceProvisioners(),
Targets: c.targets,
Destroy: c.destroy,
Validate: opts.Validate,
}).Build(RootModulePath)
case GraphTypeInput:
// The input graph is just a slightly modified plan graph
fallthrough
case GraphTypeValidate:
// The validate graph is just a slightly modified plan graph
fallthrough
case GraphTypePlan:
// Create the plan graph builder
p := &PlanGraphBuilder{
Module: c.module,
State: c.state,
Providers: c.components.ResourceProviders(),
Targets: c.targets,
Validate: opts.Validate,
}
// Some special cases for other graph types shared with plan currently
var b GraphBuilder = p
switch typ {
case GraphTypeInput:
b = InputGraphBuilder(p)
case GraphTypeValidate:
// We need to set the provisioners so those can be validated
p.Provisioners = c.components.ResourceProvisioners()
b = ValidateGraphBuilder(p)
}
return b.Build(RootModulePath)
case GraphTypePlanDestroy:
return (&DestroyPlanGraphBuilder{
Module: c.module,
State: c.state,
Targets: c.targets,
Validate: opts.Validate,
}).Build(RootModulePath)
case GraphTypeRefresh:
return (&RefreshGraphBuilder{
Module: c.module,
State: c.state,
Providers: c.components.ResourceProviders(),
Targets: c.targets,
Validate: opts.Validate,
}).Build(RootModulePath)
}
return nil, fmt.Errorf("unknown graph type: %s", typ)
}
// ShadowError returns any errors caught during a shadow operation.
//
// A shadow operation is an operation run in parallel to a real operation
// that performs the same tasks using new logic on copied state. The results
// are compared to ensure that the new logic works the same as the old logic.
// The shadow never affects the real operation or return values.
//
// The result of the shadow operation are only available through this function
// call after a real operation is complete.
//
// For API consumers of Context, you can safely ignore this function
// completely if you have no interest in helping report experimental feature
// errors to Terraform maintainers. Otherwise, please call this function
// after every operation and report this to the user.
//
// IMPORTANT: Shadow errors are _never_ critical: they _never_ affect
// the real state or result of a real operation. They are purely informational
// to assist in future Terraform versions being more stable. Please message
// this effectively to the end user.
//
// This must be called only when no other operation is running (refresh,
// plan, etc.). The result can be used in parallel to any other operation
// running.
func (c *Context) ShadowError() error {
return c.shadowErr
}
// State returns a copy of the current state associated with this context.
//
// This cannot safely be called in parallel with any other Context function.
func (c *Context) State() *State {
return c.state.DeepCopy()
}
// Interpolater returns an Interpolater built on a copy of the state
// that can be used to test interpolation values.
func (c *Context) Interpolater() *Interpolater {
var varLock sync.Mutex
var stateLock sync.RWMutex
return &Interpolater{
Operation: walkApply,
Meta: c.meta,
Module: c.module,
State: c.state.DeepCopy(),
StateLock: &stateLock,
VariableValues: c.variables,
VariableValuesLock: &varLock,
}
}
// Input asks for input to fill variables and provider configurations.
// This modifies the configuration in-place, so asking for Input twice
// may result in different UI output showing different current values.
func (c *Context) Input(mode InputMode) error {
defer c.acquireRun("input")()
if mode&InputModeVar != 0 {
// Walk the variables first for the root module. We walk them in
// alphabetical order for UX reasons.
rootConf := c.module.Config()
names := make([]string, len(rootConf.Variables))
m := make(map[string]*config.Variable)
for i, v := range rootConf.Variables {
names[i] = v.Name
m[v.Name] = v
}
sort.Strings(names)
for _, n := range names {
// If we only care about unset variables, then if the variable
// is set, continue on.
if mode&InputModeVarUnset != 0 {
if _, ok := c.variables[n]; ok {
continue
}
}
var valueType config.VariableType
v := m[n]
switch valueType = v.Type(); valueType {
case config.VariableTypeUnknown:
continue
case config.VariableTypeMap:
// OK
case config.VariableTypeList:
// OK
case config.VariableTypeString:
// OK
default:
panic(fmt.Sprintf("Unknown variable type: %#v", v.Type()))
}
// If the variable is not already set, and the variable defines a
// default, use that for the value.
if _, ok := c.variables[n]; !ok {
if v.Default != nil {
c.variables[n] = v.Default.(string)
continue
}
}
// this should only happen during tests
if c.uiInput == nil {
log.Println("[WARN] Content.uiInput is nil")
continue
}
// Ask the user for a value for this variable
var value string
retry := 0
for {
var err error
value, err = c.uiInput.Input(&InputOpts{
Id: fmt.Sprintf("var.%s", n),
Query: fmt.Sprintf("var.%s", n),
Description: v.Description,
})
if err != nil {
return fmt.Errorf(
"Error asking for %s: %s", n, err)
}
if value == "" && v.Required() {
// Redo if it is required, but abort if we keep getting
// blank entries
if retry > 2 {
return fmt.Errorf("missing required value for %q", n)
}
retry++
continue
}
break
}
// no value provided, so don't set the variable at all
if value == "" {
continue
}
decoded, err := parseVariableAsHCL(n, value, valueType)
if err != nil {
return err
}
if decoded != nil {
c.variables[n] = decoded
}
}
}
if mode&InputModeProvider != 0 {
// Build the graph
graph, err := c.Graph(GraphTypeInput, nil)
if err != nil {
return err
}
// Do the walk
if _, err := c.walk(graph, walkInput); err != nil {
return err
}
}
return nil
}
// Apply applies the changes represented by this context and returns
// the resulting state.
//
// Even in the case an error is returned, the state may be returned and will
// potentially be partially updated. In addition to returning the resulting
// state, this context is updated with the latest state.
//
// If the state is required after an error, the caller should call
// Context.State, rather than rely on the return value.
//
// TODO: Apply and Refresh should either always return a state, or rely on the
// State() method. Currently the helper/resource testing framework relies
// on the absence of a returned state to determine if Destroy can be
// called, so that will need to be refactored before this can be changed.
func (c *Context) Apply() (*State, error) {
defer c.acquireRun("apply")()
// Copy our own state
c.state = c.state.DeepCopy()
// Build the graph.
graph, err := c.Graph(GraphTypeApply, nil)
if err != nil {
return nil, err
}
// Determine the operation
operation := walkApply
if c.destroy {
operation = walkDestroy
}
// Walk the graph
walker, err := c.walk(graph, operation)
if len(walker.ValidationErrors) > 0 {
err = multierror.Append(err, walker.ValidationErrors...)
}
// Clean out any unused things
c.state.prune()
return c.state, err
}
// Plan generates an execution plan for the given context.
//
// The execution plan encapsulates the context and can be stored
// in order to reinstantiate a context later for Apply.
//
// Plan also updates the diff of this context to be the diff generated
// by the plan, so Apply can be called after.
func (c *Context) Plan() (*Plan, error) {
defer c.acquireRun("plan")()
p := &Plan{
Module: c.module,
Vars: c.variables,
State: c.state,
Targets: c.targets,
TerraformVersion: version.String(),
ProviderSHA256s: c.providerSHA256s,
}
var operation walkOperation
if c.destroy {
operation = walkPlanDestroy
p.Destroy = true
} else {
// Set our state to be something temporary. We do this so that
// the plan can update a fake state so that variables work, then
// we replace it back with our old state.
old := c.state
if old == nil {
c.state = &State{}
c.state.init()
} else {
c.state = old.DeepCopy()
}
defer func() {
c.state = old
}()
operation = walkPlan
}
// Setup our diff
c.diffLock.Lock()
c.diff = new(Diff)
c.diff.init()
c.diffLock.Unlock()
// Build the graph.
graphType := GraphTypePlan
if c.destroy {
graphType = GraphTypePlanDestroy
}
graph, err := c.Graph(graphType, nil)
if err != nil {
return nil, err
}
// Do the walk
walker, err := c.walk(graph, operation)
if err != nil {
return nil, err
}
p.Diff = c.diff
// If this is true, it means we're running unit tests. In this case,
// we perform a deep copy just to ensure that all context tests also
// test that a diff is copy-able. This will panic if it fails. This
// is enabled during unit tests.
//
// This should never be true during production usage, but even if it is,
// it can't do any real harm.
if contextTestDeepCopyOnPlan {
p.Diff.DeepCopy()
}
/*
// We don't do the reverification during the new destroy plan because
// it will use a different apply process.
if X_legacyGraph {
// Now that we have a diff, we can build the exact graph that Apply will use
// and catch any possible cycles during the Plan phase.
if _, err := c.Graph(GraphTypeLegacy, nil); err != nil {
return nil, err
}
}
*/
var errs error
if len(walker.ValidationErrors) > 0 {
errs = multierror.Append(errs, walker.ValidationErrors...)
}
return p, errs
}
// Refresh goes through all the resources in the state and refreshes them
// to their latest state. This will update the state that this context
// works with, along with returning it.
//
// Even in the case an error is returned, the state may be returned and
// will potentially be partially updated.
func (c *Context) Refresh() (*State, error) {
defer c.acquireRun("refresh")()
// Copy our own state
c.state = c.state.DeepCopy()
// Build the graph.
graph, err := c.Graph(GraphTypeRefresh, nil)
if err != nil {
return nil, err
}
// Do the walk
if _, err := c.walk(graph, walkRefresh); err != nil {
return nil, err
}
// Clean out any unused things
c.state.prune()
return c.state, nil
}
// Stop stops the running task.
//
// Stop will block until the task completes.
func (c *Context) Stop() {
log.Printf("[WARN] terraform: Stop called, initiating interrupt sequence")
c.l.Lock()
defer c.l.Unlock()
// If we're running, then stop
if c.runContextCancel != nil {
log.Printf("[WARN] terraform: run context exists, stopping")
// Tell the hook we want to stop
c.sh.Stop()
// Stop the context
c.runContextCancel()
c.runContextCancel = nil
}
// Grab the condition var before we exit
if cond := c.runCond; cond != nil {
cond.Wait()
}
log.Printf("[WARN] terraform: stop complete")
}
// Validate validates the configuration and returns any warnings or errors.
func (c *Context) Validate() tfdiags.Diagnostics {
defer c.acquireRun("validate")()
var diags tfdiags.Diagnostics
// Validate the configuration itself
diags = diags.Append(c.module.Validate())
// This only needs to be done for the root module, since inter-module
// variables are validated in the module tree.
if config := c.module.Config(); config != nil {
// Validate the user variables
for _, err := range smcUserVariables(config, c.variables) {
diags = diags.Append(err)
}
}
// If we have errors at this point, the graphing has no chance,
// so just bail early.
if diags.HasErrors() {
return diags
}
// Build the graph so we can walk it and run Validate on nodes.
// We also validate the graph generated here, but this graph doesn't
// necessarily match the graph that Plan will generate, so we'll validate the
// graph again later after Planning.
graph, err := c.Graph(GraphTypeValidate, nil)
if err != nil {
diags = diags.Append(err)
return diags
}
// Walk
walker, err := c.walk(graph, walkValidate)
if err != nil {
diags = diags.Append(err)
}
sort.Strings(walker.ValidationWarnings)
sort.Slice(walker.ValidationErrors, func(i, j int) bool {
return walker.ValidationErrors[i].Error() < walker.ValidationErrors[j].Error()
})
for _, warn := range walker.ValidationWarnings {
diags = diags.Append(tfdiags.SimpleWarning(warn))
}
for _, err := range walker.ValidationErrors {
diags = diags.Append(err)
}
return diags
}
// Module returns the module tree associated with this context.
func (c *Context) Module() *module.Tree {
return c.module
}
// Variables will return the mapping of variables that were defined
// for this Context. If Input was called, this mapping may be different
// than what was given.
func (c *Context) Variables() map[string]interface{} {
return c.variables
}
// SetVariable sets a variable after a context has already been built.
func (c *Context) | (k string, v interface{}) {
c.variables[k] = v
}
func (c *Context) acquireRun(phase string) func() {
// With the run lock held, grab the context lock to make changes
// to the run context.
c.l.Lock()
defer c.l.Unlock()
// Wait until we're no longer running
for c.runCond != nil {
c.runCond.Wait()
}
// Build our lock
c.runCond = sync.NewCond(&c.l)
// Setup debugging
dbug.SetPhase(phase)
// Create a new run context
c.runContext, c.runContextCancel = context.WithCancel(context.Background())
// Reset the stop hook so we're not stopped
c.sh.Reset()
// Reset the shadow errors
c.shadowErr = nil
return c.releaseRun
}
func (c *Context) releaseRun() {
// Grab the context lock so that we can make modifications to fields
c.l.Lock()
defer c.l.Unlock()
// setting the phase to "INVALID" lets us easily detect if we have
// operations happening outside of a run, or we missed setting the proper
// phase
dbug.SetPhase("INVALID")
// End our run. We check if runContext is non-nil because it can be
// set to nil if it was cancelled via Stop()
if c.runContextCancel != nil {
c.runContextCancel()
}
// Unlock all waiting our condition
cond := c.runCond
c.runCond = nil
cond.Broadcast()
// Unset the context
c.runContext = nil
}
func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, error) {
// Keep track of the "real" context which is the context that does
// the real work: talking to real providers, modifying real state, etc.
realCtx := c
log.Printf("[DEBUG] Starting graph walk: %s", operation.String())
walker := &ContextGraphWalker{
Context: realCtx,
Operation: operation,
StopContext: c.runContext,
}
// Watch for a stop so we can call the provider Stop() API.
watchStop, watchWait := c.watchStop(walker)
// Walk the real graph, this will block until it completes
realErr := graph.Walk(walker)
// Close the channel so the watcher stops, and wait for it to return.
close(watchStop)
<-watchWait
return walker, realErr
}
// watchStop immediately returns a `stop` and a `wait` chan after dispatching
// the watchStop goroutine. This will watch the runContext for cancellation and
// stop the providers accordingly. When the watch is no longer needed, the
// `stop` chan should be closed before waiting on the `wait` chan.
// The `wait` chan is important, because without synchronizing with the end of
// the watchStop goroutine, the runContext may also be closed during the select
// incorrectly causing providers to be stopped. Even if the graph walk is done
// at that point, stopping a provider permanently cancels its StopContext which
// can cause later actions to fail.
func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan struct{}) {
stop := make(chan struct{})
wait := make(chan struct{})
// get the runContext cancellation channel now, because releaseRun will
// write to the runContext field.
done := c.runContext.Done()
go func() {
defer close(wait)
// Wait for a stop or completion
select {
case <-done:
// done means the context was canceled, so we need to try and stop
// providers.
case <-stop:
// our own stop channel was closed.
return
}
// If we're here, we're stopped, trigger the call.
{
// Copy the providers so that a misbehaved blocking Stop doesn't
// completely hang Terraform.
walker.providerLock.Lock()
ps := make([]ResourceProvider, 0, len(walker.providerCache))
for _, p := range walker.providerCache {
ps = append(ps, p)
}
defer walker.providerLock.Unlock()
for _, p := range ps {
// We ignore the error for now since there isn't any reasonable
// action to take if there is an error here, since the stop is still
// advisory: Terraform will exit once the graph node completes.
p.Stop()
}
}
{
// Call stop on all the provisioners
walker.provisionerLock.Lock()
ps := make([]ResourceProvisioner, 0, len(walker.provisionerCache))
for _, p := range walker.provisionerCache {
ps = append(ps, p)
}
defer walker.provisionerLock.Unlock()
for _, p := range ps {
// We ignore the error for now since there isn't any reasonable
// action to take if there is an error here, since the stop is still
// advisory: Terraform will exit once the graph node completes.
p.Stop()
}
}
}()
return stop, wait
}
// parseVariableAsHCL parses the value of a single variable as would have been specified
// on the command line via -var or in an environment variable named TF_VAR_x, where x is
// the name of the variable. In order to get around the restriction of HCL requiring a
// top level object, we prepend a sentinel key, decode the user-specified value as its
// value and pull the value back out of the resulting map.
func parseVariableAsHCL(name string, input string, targetType config.VariableType) (interface{}, error) {
// expecting a string so don't decode anything, just strip quotes
if targetType == config.VariableTypeString {
return strings.Trim(input, `"`), nil
}
// return empty types
if strings.TrimSpace(input) == "" {
switch targetType {
case config.VariableTypeList:
return []interface{}{}, nil
case config.VariableTypeMap:
return make(map[string]interface{}), nil
}
}
const sentinelValue = "SENTINEL_TERRAFORM_VAR_OVERRIDE_KEY"
inputWithSentinal := fmt.Sprintf("%s = %s", sentinelValue, input)
var decoded map[string]interface{}
err := hcl.Decode(&decoded, inputWithSentinal)
if err != nil {
return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL: %s", name, input, err)
}
if len(decoded) != 1 {
return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. Only one value may be specified.", name, input)
}
parsedValue, ok := decoded[sentinelValue]
if !ok {
return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input)
}
switch targetType {
case config.VariableTypeList:
return parsedValue, nil
case config.VariableTypeMap:
if list, ok := parsedValue.([]map[string]interface{}); ok {
return list[0], nil
}
return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input)
default:
panic(fmt.Errorf("unknown type %s", targetType.Printable()))
}
}
| SetVariable | identifier_name |
context.go | package terraform
import (
"context"
"fmt"
"log"
"sort"
"strings"
"sync"
"github.com/hashicorp/terraform/tfdiags"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/hcl"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/version"
)
// InputMode defines what sort of input will be asked for when Input
// is called on Context.
type InputMode byte
const (
// InputModeVar asks for all variables
InputModeVar InputMode = 1 << iota
// InputModeVarUnset asks for variables which are not set yet.
// InputModeVar must be set for this to have an effect.
InputModeVarUnset
// InputModeProvider asks for provider variables
InputModeProvider
// InputModeStd is the standard operating mode and asks for both variables
// and providers.
InputModeStd = InputModeVar | InputModeProvider
)
var (
// contextFailOnShadowError will cause Context operations to return
// errors when shadow operations fail. This is only used for testing.
contextFailOnShadowError = false
// contextTestDeepCopyOnPlan will perform a Diff DeepCopy on every
// Plan operation, effectively testing the Diff DeepCopy whenever
// a Plan occurs. This is enabled for tests.
contextTestDeepCopyOnPlan = false
)
// ContextOpts are the user-configurable options to create a context with
// NewContext.
type ContextOpts struct {
Meta *ContextMeta
Destroy bool
Diff *Diff
Hooks []Hook
Module *module.Tree
Parallelism int
State *State
StateFutureAllowed bool
ProviderResolver ResourceProviderResolver
Provisioners map[string]ResourceProvisionerFactory
Shadow bool
Targets []string
Variables map[string]interface{}
// If non-nil, will apply as additional constraints on the provider
// plugins that will be requested from the provider resolver.
ProviderSHA256s map[string][]byte
SkipProviderVerify bool
UIInput UIInput
}
// ContextMeta is metadata about the running context. This is information
// that this package or structure cannot determine on its own but exposes
// into Terraform in various ways. This must be provided by the Context
// initializer.
type ContextMeta struct {
Env string // Env is the state environment
}
// Context represents all the context that Terraform needs in order to
// perform operations on infrastructure. This structure is built using
// NewContext. See the documentation for that.
//
// Extra functions on Context can be found in context_*.go files.
type Context struct {
// Maintainer note: Anytime this struct is changed, please verify
// that newShadowContext still does the right thing. Tests should
// fail regardless but putting this note here as well.
components contextComponentFactory
destroy bool
diff *Diff
diffLock sync.RWMutex
hooks []Hook
meta *ContextMeta
module *module.Tree
sh *stopHook
shadow bool
state *State
stateLock sync.RWMutex
targets []string
uiInput UIInput
variables map[string]interface{}
l sync.Mutex // Lock acquired during any task
parallelSem Semaphore
providerInputConfig map[string]map[string]interface{}
providerSHA256s map[string][]byte
runLock sync.Mutex
runCond *sync.Cond
runContext context.Context
runContextCancel context.CancelFunc
shadowErr error
}
// NewContext creates a new Context structure.
//
// Once a Context is creator, the pointer values within ContextOpts
// should not be mutated in any way, since the pointers are copied, not
// the values themselves.
func NewContext(opts *ContextOpts) (*Context, error) {
// Validate the version requirement if it is given
if opts.Module != nil {
if err := CheckRequiredVersion(opts.Module); err != nil {
return nil, err
}
}
// Copy all the hooks and add our stop hook. We don't append directly
// to the Config so that we're not modifying that in-place.
sh := new(stopHook)
hooks := make([]Hook, len(opts.Hooks)+1)
copy(hooks, opts.Hooks)
hooks[len(opts.Hooks)] = sh
state := opts.State
if state == nil {
state = new(State)
state.init()
}
// If our state is from the future, then error. Callers can avoid
// this error by explicitly setting `StateFutureAllowed`.
if !opts.StateFutureAllowed && state.FromFutureTerraform() {
return nil, fmt.Errorf(
"Terraform doesn't allow running any operations against a state\n"+
"that was written by a future Terraform version. The state is\n"+
"reporting it is written by Terraform '%s'.\n\n"+
"Please run at least that version of Terraform to continue.",
state.TFVersion)
}
// Explicitly reset our state version to our current version so that
// any operations we do will write out that our latest version
// has run.
state.TFVersion = version.Version
// Determine parallelism, default to 10. We do this both to limit
// CPU pressure but also to have an extra guard against rate throttling
// from providers.
par := opts.Parallelism
if par == 0 {
par = 10
}
// Set up the variables in the following sequence:
// 0 - Take default values from the configuration
// 1 - Take values from TF_VAR_x environment variables
// 2 - Take values specified in -var flags, overriding values
// set by environment variables if necessary. This includes
// values taken from -var-file in addition.
variables := make(map[string]interface{})
if opts.Module != nil {
var err error
variables, err = Variables(opts.Module, opts.Variables)
if err != nil {
return nil, err
}
}
// Bind available provider plugins to the constraints in config
var providers map[string]ResourceProviderFactory
if opts.ProviderResolver != nil {
var err error
deps := ModuleTreeDependencies(opts.Module, state)
reqd := deps.AllPluginRequirements()
if opts.ProviderSHA256s != nil && !opts.SkipProviderVerify {
reqd.LockExecutables(opts.ProviderSHA256s)
}
providers, err = resourceProviderFactories(opts.ProviderResolver, reqd)
if err != nil {
return nil, err
}
} else {
providers = make(map[string]ResourceProviderFactory)
}
diff := opts.Diff
if diff == nil {
diff = &Diff{}
}
return &Context{
components: &basicComponentFactory{
providers: providers,
provisioners: opts.Provisioners,
},
destroy: opts.Destroy,
diff: diff,
hooks: hooks,
meta: opts.Meta,
module: opts.Module,
shadow: opts.Shadow,
state: state,
targets: opts.Targets,
uiInput: opts.UIInput,
variables: variables,
parallelSem: NewSemaphore(par),
providerInputConfig: make(map[string]map[string]interface{}),
providerSHA256s: opts.ProviderSHA256s,
sh: sh,
}, nil
}
type ContextGraphOpts struct {
// If true, validates the graph structure (checks for cycles).
Validate bool
// Legacy graphs only: won't prune the graph
Verbose bool
}
// Graph returns the graph used for the given operation type.
//
// The most extensive or complex graph type is GraphTypePlan.
func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) {
if opts == nil {
opts = &ContextGraphOpts{Validate: true}
}
log.Printf("[INFO] terraform: building graph: %s", typ)
switch typ {
case GraphTypeApply:
return (&ApplyGraphBuilder{
Module: c.module,
Diff: c.diff,
State: c.state,
Providers: c.components.ResourceProviders(),
Provisioners: c.components.ResourceProvisioners(),
Targets: c.targets,
Destroy: c.destroy,
Validate: opts.Validate,
}).Build(RootModulePath)
case GraphTypeInput:
// The input graph is just a slightly modified plan graph
fallthrough
case GraphTypeValidate:
// The validate graph is just a slightly modified plan graph
fallthrough
case GraphTypePlan:
// Create the plan graph builder
p := &PlanGraphBuilder{
Module: c.module,
State: c.state,
Providers: c.components.ResourceProviders(),
Targets: c.targets,
Validate: opts.Validate,
}
// Some special cases for other graph types shared with plan currently
var b GraphBuilder = p
switch typ {
case GraphTypeInput:
b = InputGraphBuilder(p)
case GraphTypeValidate:
// We need to set the provisioners so those can be validated
p.Provisioners = c.components.ResourceProvisioners()
b = ValidateGraphBuilder(p)
}
return b.Build(RootModulePath)
case GraphTypePlanDestroy:
return (&DestroyPlanGraphBuilder{
Module: c.module,
State: c.state,
Targets: c.targets,
Validate: opts.Validate,
}).Build(RootModulePath)
case GraphTypeRefresh:
return (&RefreshGraphBuilder{
Module: c.module,
State: c.state,
Providers: c.components.ResourceProviders(),
Targets: c.targets,
Validate: opts.Validate,
}).Build(RootModulePath)
}
return nil, fmt.Errorf("unknown graph type: %s", typ)
}
// ShadowError returns any errors caught during a shadow operation.
//
// A shadow operation is an operation run in parallel to a real operation
// that performs the same tasks using new logic on copied state. The results
// are compared to ensure that the new logic works the same as the old logic.
// The shadow never affects the real operation or return values.
//
// The result of the shadow operation are only available through this function
// call after a real operation is complete.
//
// For API consumers of Context, you can safely ignore this function
// completely if you have no interest in helping report experimental feature
// errors to Terraform maintainers. Otherwise, please call this function
// after every operation and report this to the user.
//
// IMPORTANT: Shadow errors are _never_ critical: they _never_ affect
// the real state or result of a real operation. They are purely informational
// to assist in future Terraform versions being more stable. Please message
// this effectively to the end user.
//
// This must be called only when no other operation is running (refresh,
// plan, etc.). The result can be used in parallel to any other operation
// running.
func (c *Context) ShadowError() error {
return c.shadowErr
}
// State returns a copy of the current state associated with this context.
//
// This cannot safely be called in parallel with any other Context function.
func (c *Context) State() *State {
return c.state.DeepCopy()
}
// Interpolater returns an Interpolater built on a copy of the state
// that can be used to test interpolation values.
func (c *Context) Interpolater() *Interpolater {
var varLock sync.Mutex
var stateLock sync.RWMutex
return &Interpolater{
Operation: walkApply,
Meta: c.meta,
Module: c.module,
State: c.state.DeepCopy(),
StateLock: &stateLock,
VariableValues: c.variables,
VariableValuesLock: &varLock,
}
}
// Input asks for input to fill variables and provider configurations.
// This modifies the configuration in-place, so asking for Input twice
// may result in different UI output showing different current values.
func (c *Context) Input(mode InputMode) error {
defer c.acquireRun("input")()
if mode&InputModeVar != 0 {
// Walk the variables first for the root module. We walk them in
// alphabetical order for UX reasons.
rootConf := c.module.Config()
names := make([]string, len(rootConf.Variables))
m := make(map[string]*config.Variable)
for i, v := range rootConf.Variables {
names[i] = v.Name
m[v.Name] = v
}
sort.Strings(names)
for _, n := range names {
// If we only care about unset variables, then if the variable
// is set, continue on.
if mode&InputModeVarUnset != 0 {
if _, ok := c.variables[n]; ok {
continue
}
}
var valueType config.VariableType
v := m[n]
switch valueType = v.Type(); valueType {
case config.VariableTypeUnknown:
continue
case config.VariableTypeMap:
// OK
case config.VariableTypeList:
// OK
case config.VariableTypeString:
// OK
default:
panic(fmt.Sprintf("Unknown variable type: %#v", v.Type()))
}
// If the variable is not already set, and the variable defines a
// default, use that for the value.
if _, ok := c.variables[n]; !ok {
if v.Default != nil {
c.variables[n] = v.Default.(string)
continue
}
}
// this should only happen during tests
if c.uiInput == nil {
log.Println("[WARN] Content.uiInput is nil")
continue
}
// Ask the user for a value for this variable
var value string
retry := 0
for |
// no value provided, so don't set the variable at all
if value == "" {
continue
}
decoded, err := parseVariableAsHCL(n, value, valueType)
if err != nil {
return err
}
if decoded != nil {
c.variables[n] = decoded
}
}
}
if mode&InputModeProvider != 0 {
// Build the graph
graph, err := c.Graph(GraphTypeInput, nil)
if err != nil {
return err
}
// Do the walk
if _, err := c.walk(graph, walkInput); err != nil {
return err
}
}
return nil
}
// Apply applies the changes represented by this context and returns
// the resulting state.
//
// Even in the case an error is returned, the state may be returned and will
// potentially be partially updated. In addition to returning the resulting
// state, this context is updated with the latest state.
//
// If the state is required after an error, the caller should call
// Context.State, rather than rely on the return value.
//
// TODO: Apply and Refresh should either always return a state, or rely on the
// State() method. Currently the helper/resource testing framework relies
// on the absence of a returned state to determine if Destroy can be
// called, so that will need to be refactored before this can be changed.
func (c *Context) Apply() (*State, error) {
defer c.acquireRun("apply")()
// Copy our own state
c.state = c.state.DeepCopy()
// Build the graph.
graph, err := c.Graph(GraphTypeApply, nil)
if err != nil {
return nil, err
}
// Determine the operation
operation := walkApply
if c.destroy {
operation = walkDestroy
}
// Walk the graph
walker, err := c.walk(graph, operation)
if len(walker.ValidationErrors) > 0 {
err = multierror.Append(err, walker.ValidationErrors...)
}
// Clean out any unused things
c.state.prune()
return c.state, err
}
// Plan generates an execution plan for the given context.
//
// The execution plan encapsulates the context and can be stored
// in order to reinstantiate a context later for Apply.
//
// Plan also updates the diff of this context to be the diff generated
// by the plan, so Apply can be called after.
func (c *Context) Plan() (*Plan, error) {
defer c.acquireRun("plan")()
p := &Plan{
Module: c.module,
Vars: c.variables,
State: c.state,
Targets: c.targets,
TerraformVersion: version.String(),
ProviderSHA256s: c.providerSHA256s,
}
var operation walkOperation
if c.destroy {
operation = walkPlanDestroy
p.Destroy = true
} else {
// Set our state to be something temporary. We do this so that
// the plan can update a fake state so that variables work, then
// we replace it back with our old state.
old := c.state
if old == nil {
c.state = &State{}
c.state.init()
} else {
c.state = old.DeepCopy()
}
defer func() {
c.state = old
}()
operation = walkPlan
}
// Setup our diff
c.diffLock.Lock()
c.diff = new(Diff)
c.diff.init()
c.diffLock.Unlock()
// Build the graph.
graphType := GraphTypePlan
if c.destroy {
graphType = GraphTypePlanDestroy
}
graph, err := c.Graph(graphType, nil)
if err != nil {
return nil, err
}
// Do the walk
walker, err := c.walk(graph, operation)
if err != nil {
return nil, err
}
p.Diff = c.diff
// If this is true, it means we're running unit tests. In this case,
// we perform a deep copy just to ensure that all context tests also
// test that a diff is copy-able. This will panic if it fails. This
// is enabled during unit tests.
//
// This should never be true during production usage, but even if it is,
// it can't do any real harm.
if contextTestDeepCopyOnPlan {
p.Diff.DeepCopy()
}
/*
// We don't do the reverification during the new destroy plan because
// it will use a different apply process.
if X_legacyGraph {
// Now that we have a diff, we can build the exact graph that Apply will use
// and catch any possible cycles during the Plan phase.
if _, err := c.Graph(GraphTypeLegacy, nil); err != nil {
return nil, err
}
}
*/
var errs error
if len(walker.ValidationErrors) > 0 {
errs = multierror.Append(errs, walker.ValidationErrors...)
}
return p, errs
}
// Refresh goes through all the resources in the state and refreshes them
// to their latest state. This will update the state that this context
// works with, along with returning it.
//
// Even in the case an error is returned, the state may be returned and
// will potentially be partially updated.
func (c *Context) Refresh() (*State, error) {
defer c.acquireRun("refresh")()
// Copy our own state
c.state = c.state.DeepCopy()
// Build the graph.
graph, err := c.Graph(GraphTypeRefresh, nil)
if err != nil {
return nil, err
}
// Do the walk
if _, err := c.walk(graph, walkRefresh); err != nil {
return nil, err
}
// Clean out any unused things
c.state.prune()
return c.state, nil
}
// Stop stops the running task.
//
// Stop will block until the task completes.
func (c *Context) Stop() {
log.Printf("[WARN] terraform: Stop called, initiating interrupt sequence")
c.l.Lock()
defer c.l.Unlock()
// If we're running, then stop
if c.runContextCancel != nil {
log.Printf("[WARN] terraform: run context exists, stopping")
// Tell the hook we want to stop
c.sh.Stop()
// Stop the context
c.runContextCancel()
c.runContextCancel = nil
}
// Grab the condition var before we exit
if cond := c.runCond; cond != nil {
cond.Wait()
}
log.Printf("[WARN] terraform: stop complete")
}
// Validate validates the configuration and returns any warnings or errors.
func (c *Context) Validate() tfdiags.Diagnostics {
defer c.acquireRun("validate")()
var diags tfdiags.Diagnostics
// Validate the configuration itself
diags = diags.Append(c.module.Validate())
// This only needs to be done for the root module, since inter-module
// variables are validated in the module tree.
if config := c.module.Config(); config != nil {
// Validate the user variables
for _, err := range smcUserVariables(config, c.variables) {
diags = diags.Append(err)
}
}
// If we have errors at this point, the graphing has no chance,
// so just bail early.
if diags.HasErrors() {
return diags
}
// Build the graph so we can walk it and run Validate on nodes.
// We also validate the graph generated here, but this graph doesn't
// necessarily match the graph that Plan will generate, so we'll validate the
// graph again later after Planning.
graph, err := c.Graph(GraphTypeValidate, nil)
if err != nil {
diags = diags.Append(err)
return diags
}
// Walk
walker, err := c.walk(graph, walkValidate)
if err != nil {
diags = diags.Append(err)
}
sort.Strings(walker.ValidationWarnings)
sort.Slice(walker.ValidationErrors, func(i, j int) bool {
return walker.ValidationErrors[i].Error() < walker.ValidationErrors[j].Error()
})
for _, warn := range walker.ValidationWarnings {
diags = diags.Append(tfdiags.SimpleWarning(warn))
}
for _, err := range walker.ValidationErrors {
diags = diags.Append(err)
}
return diags
}
// Module returns the module tree associated with this context.
func (c *Context) Module() *module.Tree {
return c.module
}
// Variables will return the mapping of variables that were defined
// for this Context. If Input was called, this mapping may be different
// than what was given.
func (c *Context) Variables() map[string]interface{} {
return c.variables
}
// SetVariable sets a variable after a context has already been built.
func (c *Context) SetVariable(k string, v interface{}) {
c.variables[k] = v
}
func (c *Context) acquireRun(phase string) func() {
// With the run lock held, grab the context lock to make changes
// to the run context.
c.l.Lock()
defer c.l.Unlock()
// Wait until we're no longer running
for c.runCond != nil {
c.runCond.Wait()
}
// Build our lock
c.runCond = sync.NewCond(&c.l)
// Setup debugging
dbug.SetPhase(phase)
// Create a new run context
c.runContext, c.runContextCancel = context.WithCancel(context.Background())
// Reset the stop hook so we're not stopped
c.sh.Reset()
// Reset the shadow errors
c.shadowErr = nil
return c.releaseRun
}
func (c *Context) releaseRun() {
// Grab the context lock so that we can make modifications to fields
c.l.Lock()
defer c.l.Unlock()
// setting the phase to "INVALID" lets us easily detect if we have
// operations happening outside of a run, or we missed setting the proper
// phase
dbug.SetPhase("INVALID")
// End our run. We check if runContext is non-nil because it can be
// set to nil if it was cancelled via Stop()
if c.runContextCancel != nil {
c.runContextCancel()
}
// Unlock all waiting our condition
cond := c.runCond
c.runCond = nil
cond.Broadcast()
// Unset the context
c.runContext = nil
}
func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, error) {
// Keep track of the "real" context which is the context that does
// the real work: talking to real providers, modifying real state, etc.
realCtx := c
log.Printf("[DEBUG] Starting graph walk: %s", operation.String())
walker := &ContextGraphWalker{
Context: realCtx,
Operation: operation,
StopContext: c.runContext,
}
// Watch for a stop so we can call the provider Stop() API.
watchStop, watchWait := c.watchStop(walker)
// Walk the real graph, this will block until it completes
realErr := graph.Walk(walker)
// Close the channel so the watcher stops, and wait for it to return.
close(watchStop)
<-watchWait
return walker, realErr
}
// watchStop immediately returns a `stop` and a `wait` chan after dispatching
// the watchStop goroutine. This will watch the runContext for cancellation and
// stop the providers accordingly. When the watch is no longer needed, the
// `stop` chan should be closed before waiting on the `wait` chan.
// The `wait` chan is important, because without synchronizing with the end of
// the watchStop goroutine, the runContext may also be closed during the select
// incorrectly causing providers to be stopped. Even if the graph walk is done
// at that point, stopping a provider permanently cancels its StopContext which
// can cause later actions to fail.
func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan struct{}) {
stop := make(chan struct{})
wait := make(chan struct{})
// get the runContext cancellation channel now, because releaseRun will
// write to the runContext field.
done := c.runContext.Done()
go func() {
defer close(wait)
// Wait for a stop or completion
select {
case <-done:
// done means the context was canceled, so we need to try and stop
// providers.
case <-stop:
// our own stop channel was closed.
return
}
// If we're here, we're stopped, trigger the call.
{
// Copy the providers so that a misbehaved blocking Stop doesn't
// completely hang Terraform.
walker.providerLock.Lock()
ps := make([]ResourceProvider, 0, len(walker.providerCache))
for _, p := range walker.providerCache {
ps = append(ps, p)
}
defer walker.providerLock.Unlock()
for _, p := range ps {
// We ignore the error for now since there isn't any reasonable
// action to take if there is an error here, since the stop is still
// advisory: Terraform will exit once the graph node completes.
p.Stop()
}
}
{
// Call stop on all the provisioners
walker.provisionerLock.Lock()
ps := make([]ResourceProvisioner, 0, len(walker.provisionerCache))
for _, p := range walker.provisionerCache {
ps = append(ps, p)
}
defer walker.provisionerLock.Unlock()
for _, p := range ps {
// We ignore the error for now since there isn't any reasonable
// action to take if there is an error here, since the stop is still
// advisory: Terraform will exit once the graph node completes.
p.Stop()
}
}
}()
return stop, wait
}
// parseVariableAsHCL parses the value of a single variable as would have been specified
// on the command line via -var or in an environment variable named TF_VAR_x, where x is
// the name of the variable. In order to get around the restriction of HCL requiring a
// top level object, we prepend a sentinel key, decode the user-specified value as its
// value and pull the value back out of the resulting map.
func parseVariableAsHCL(name string, input string, targetType config.VariableType) (interface{}, error) {
// expecting a string so don't decode anything, just strip quotes
if targetType == config.VariableTypeString {
return strings.Trim(input, `"`), nil
}
// return empty types
if strings.TrimSpace(input) == "" {
switch targetType {
case config.VariableTypeList:
return []interface{}{}, nil
case config.VariableTypeMap:
return make(map[string]interface{}), nil
}
}
const sentinelValue = "SENTINEL_TERRAFORM_VAR_OVERRIDE_KEY"
inputWithSentinal := fmt.Sprintf("%s = %s", sentinelValue, input)
var decoded map[string]interface{}
err := hcl.Decode(&decoded, inputWithSentinal)
if err != nil {
return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL: %s", name, input, err)
}
if len(decoded) != 1 {
return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. Only one value may be specified.", name, input)
}
parsedValue, ok := decoded[sentinelValue]
if !ok {
return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input)
}
switch targetType {
case config.VariableTypeList:
return parsedValue, nil
case config.VariableTypeMap:
if list, ok := parsedValue.([]map[string]interface{}); ok {
return list[0], nil
}
return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input)
default:
panic(fmt.Errorf("unknown type %s", targetType.Printable()))
}
}
| {
var err error
value, err = c.uiInput.Input(&InputOpts{
Id: fmt.Sprintf("var.%s", n),
Query: fmt.Sprintf("var.%s", n),
Description: v.Description,
})
if err != nil {
return fmt.Errorf(
"Error asking for %s: %s", n, err)
}
if value == "" && v.Required() {
// Redo if it is required, but abort if we keep getting
// blank entries
if retry > 2 {
return fmt.Errorf("missing required value for %q", n)
}
retry++
continue
}
break
} | conditional_block |
context.go | package terraform
import (
"context"
"fmt"
"log"
"sort"
"strings"
"sync"
"github.com/hashicorp/terraform/tfdiags"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/hcl"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/version"
)
// InputMode defines what sort of input will be asked for when Input
// is called on Context.
type InputMode byte
const (
// InputModeVar asks for all variables
InputModeVar InputMode = 1 << iota
// InputModeVarUnset asks for variables which are not set yet.
// InputModeVar must be set for this to have an effect.
InputModeVarUnset
// InputModeProvider asks for provider variables
InputModeProvider
// InputModeStd is the standard operating mode and asks for both variables
// and providers.
InputModeStd = InputModeVar | InputModeProvider
)
var (
// contextFailOnShadowError will cause Context operations to return
// errors when shadow operations fail. This is only used for testing.
contextFailOnShadowError = false
// contextTestDeepCopyOnPlan will perform a Diff DeepCopy on every
// Plan operation, effectively testing the Diff DeepCopy whenever
// a Plan occurs. This is enabled for tests.
contextTestDeepCopyOnPlan = false
)
// ContextOpts are the user-configurable options to create a context with
// NewContext.
type ContextOpts struct {
Meta *ContextMeta
Destroy bool
Diff *Diff
Hooks []Hook
Module *module.Tree
Parallelism int
State *State
StateFutureAllowed bool
ProviderResolver ResourceProviderResolver
Provisioners map[string]ResourceProvisionerFactory
Shadow bool
Targets []string
Variables map[string]interface{}
// If non-nil, will apply as additional constraints on the provider
// plugins that will be requested from the provider resolver.
ProviderSHA256s map[string][]byte
SkipProviderVerify bool
UIInput UIInput
}
// ContextMeta is metadata about the running context. This is information
// that this package or structure cannot determine on its own but exposes
// into Terraform in various ways. This must be provided by the Context
// initializer.
type ContextMeta struct {
Env string // Env is the state environment
}
// Context represents all the context that Terraform needs in order to
// perform operations on infrastructure. This structure is built using
// NewContext. See the documentation for that.
//
// Extra functions on Context can be found in context_*.go files.
type Context struct {
// Maintainer note: Anytime this struct is changed, please verify
// that newShadowContext still does the right thing. Tests should
// fail regardless but putting this note here as well.
components contextComponentFactory
destroy bool
diff *Diff
diffLock sync.RWMutex
hooks []Hook
meta *ContextMeta
module *module.Tree
sh *stopHook
shadow bool
state *State
stateLock sync.RWMutex
targets []string
uiInput UIInput
variables map[string]interface{}
l sync.Mutex // Lock acquired during any task
parallelSem Semaphore
providerInputConfig map[string]map[string]interface{}
providerSHA256s map[string][]byte
runLock sync.Mutex
runCond *sync.Cond
runContext context.Context
runContextCancel context.CancelFunc
shadowErr error
}
// NewContext creates a new Context structure.
//
// Once a Context is creator, the pointer values within ContextOpts
// should not be mutated in any way, since the pointers are copied, not
// the values themselves.
func NewContext(opts *ContextOpts) (*Context, error) {
// Validate the version requirement if it is given
if opts.Module != nil {
if err := CheckRequiredVersion(opts.Module); err != nil {
return nil, err
}
}
// Copy all the hooks and add our stop hook. We don't append directly
// to the Config so that we're not modifying that in-place.
sh := new(stopHook)
hooks := make([]Hook, len(opts.Hooks)+1)
copy(hooks, opts.Hooks)
hooks[len(opts.Hooks)] = sh
state := opts.State
if state == nil {
state = new(State)
state.init()
}
// If our state is from the future, then error. Callers can avoid
// this error by explicitly setting `StateFutureAllowed`.
if !opts.StateFutureAllowed && state.FromFutureTerraform() {
return nil, fmt.Errorf(
"Terraform doesn't allow running any operations against a state\n"+
"that was written by a future Terraform version. The state is\n"+
"reporting it is written by Terraform '%s'.\n\n"+
"Please run at least that version of Terraform to continue.",
state.TFVersion)
}
// Explicitly reset our state version to our current version so that
// any operations we do will write out that our latest version
// has run.
state.TFVersion = version.Version
// Determine parallelism, default to 10. We do this both to limit
// CPU pressure but also to have an extra guard against rate throttling
// from providers.
par := opts.Parallelism
if par == 0 {
par = 10
}
// Set up the variables in the following sequence:
// 0 - Take default values from the configuration
// 1 - Take values from TF_VAR_x environment variables
// 2 - Take values specified in -var flags, overriding values
// set by environment variables if necessary. This includes
// values taken from -var-file in addition.
variables := make(map[string]interface{})
if opts.Module != nil {
var err error
variables, err = Variables(opts.Module, opts.Variables)
if err != nil {
return nil, err
}
}
// Bind available provider plugins to the constraints in config
var providers map[string]ResourceProviderFactory
if opts.ProviderResolver != nil {
var err error
deps := ModuleTreeDependencies(opts.Module, state)
reqd := deps.AllPluginRequirements()
if opts.ProviderSHA256s != nil && !opts.SkipProviderVerify {
reqd.LockExecutables(opts.ProviderSHA256s)
}
providers, err = resourceProviderFactories(opts.ProviderResolver, reqd)
if err != nil {
return nil, err
}
} else {
providers = make(map[string]ResourceProviderFactory)
}
diff := opts.Diff
if diff == nil {
diff = &Diff{}
}
return &Context{
components: &basicComponentFactory{
providers: providers,
provisioners: opts.Provisioners,
},
destroy: opts.Destroy,
diff: diff,
hooks: hooks,
meta: opts.Meta,
module: opts.Module,
shadow: opts.Shadow,
state: state,
targets: opts.Targets,
uiInput: opts.UIInput,
variables: variables,
parallelSem: NewSemaphore(par),
providerInputConfig: make(map[string]map[string]interface{}),
providerSHA256s: opts.ProviderSHA256s,
sh: sh,
}, nil
}
type ContextGraphOpts struct {
// If true, validates the graph structure (checks for cycles).
Validate bool
// Legacy graphs only: won't prune the graph
Verbose bool
}
// Graph returns the graph used for the given operation type.
//
// The most extensive or complex graph type is GraphTypePlan.
func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) {
if opts == nil {
opts = &ContextGraphOpts{Validate: true}
}
log.Printf("[INFO] terraform: building graph: %s", typ)
switch typ {
case GraphTypeApply:
return (&ApplyGraphBuilder{
Module: c.module,
Diff: c.diff,
State: c.state,
Providers: c.components.ResourceProviders(),
Provisioners: c.components.ResourceProvisioners(),
Targets: c.targets,
Destroy: c.destroy,
Validate: opts.Validate,
}).Build(RootModulePath)
case GraphTypeInput:
// The input graph is just a slightly modified plan graph
fallthrough
case GraphTypeValidate:
// The validate graph is just a slightly modified plan graph
fallthrough
case GraphTypePlan:
// Create the plan graph builder
p := &PlanGraphBuilder{
Module: c.module,
State: c.state,
Providers: c.components.ResourceProviders(),
Targets: c.targets,
Validate: opts.Validate,
}
// Some special cases for other graph types shared with plan currently
var b GraphBuilder = p
switch typ {
case GraphTypeInput:
b = InputGraphBuilder(p)
case GraphTypeValidate:
// We need to set the provisioners so those can be validated
p.Provisioners = c.components.ResourceProvisioners()
b = ValidateGraphBuilder(p)
}
return b.Build(RootModulePath)
case GraphTypePlanDestroy:
return (&DestroyPlanGraphBuilder{
Module: c.module,
State: c.state,
Targets: c.targets,
Validate: opts.Validate,
}).Build(RootModulePath)
case GraphTypeRefresh:
return (&RefreshGraphBuilder{
Module: c.module,
State: c.state,
Providers: c.components.ResourceProviders(),
Targets: c.targets,
Validate: opts.Validate,
}).Build(RootModulePath)
}
return nil, fmt.Errorf("unknown graph type: %s", typ)
}
// ShadowError returns any errors caught during a shadow operation.
//
// A shadow operation is an operation run in parallel to a real operation
// that performs the same tasks using new logic on copied state. The results
// are compared to ensure that the new logic works the same as the old logic.
// The shadow never affects the real operation or return values.
//
// The result of the shadow operation are only available through this function
// call after a real operation is complete.
//
// For API consumers of Context, you can safely ignore this function
// completely if you have no interest in helping report experimental feature
// errors to Terraform maintainers. Otherwise, please call this function
// after every operation and report this to the user.
//
// IMPORTANT: Shadow errors are _never_ critical: they _never_ affect
// the real state or result of a real operation. They are purely informational
// to assist in future Terraform versions being more stable. Please message
// this effectively to the end user.
//
// This must be called only when no other operation is running (refresh,
// plan, etc.). The result can be used in parallel to any other operation
// running.
func (c *Context) ShadowError() error {
return c.shadowErr
}
// State returns a copy of the current state associated with this context.
//
// This cannot safely be called in parallel with any other Context function.
func (c *Context) State() *State {
return c.state.DeepCopy()
}
// Interpolater returns an Interpolater built on a copy of the state
// that can be used to test interpolation values.
func (c *Context) Interpolater() *Interpolater {
var varLock sync.Mutex
var stateLock sync.RWMutex
return &Interpolater{
Operation: walkApply,
Meta: c.meta,
Module: c.module,
State: c.state.DeepCopy(),
StateLock: &stateLock,
VariableValues: c.variables,
VariableValuesLock: &varLock,
}
}
// Input asks for input to fill variables and provider configurations.
// This modifies the configuration in-place, so asking for Input twice
// may result in different UI output showing different current values.
func (c *Context) Input(mode InputMode) error {
defer c.acquireRun("input")()
if mode&InputModeVar != 0 {
// Walk the variables first for the root module. We walk them in
// alphabetical order for UX reasons.
rootConf := c.module.Config()
names := make([]string, len(rootConf.Variables))
m := make(map[string]*config.Variable)
for i, v := range rootConf.Variables {
names[i] = v.Name
m[v.Name] = v
}
sort.Strings(names)
for _, n := range names {
// If we only care about unset variables, then if the variable
// is set, continue on.
if mode&InputModeVarUnset != 0 {
if _, ok := c.variables[n]; ok {
continue
}
}
var valueType config.VariableType
v := m[n]
switch valueType = v.Type(); valueType {
case config.VariableTypeUnknown:
continue
case config.VariableTypeMap:
// OK
case config.VariableTypeList:
// OK
case config.VariableTypeString:
// OK
default:
panic(fmt.Sprintf("Unknown variable type: %#v", v.Type()))
}
// If the variable is not already set, and the variable defines a
// default, use that for the value.
if _, ok := c.variables[n]; !ok {
if v.Default != nil {
c.variables[n] = v.Default.(string)
continue
}
}
// this should only happen during tests
if c.uiInput == nil {
log.Println("[WARN] Content.uiInput is nil")
continue
}
// Ask the user for a value for this variable
var value string
retry := 0
for {
var err error
value, err = c.uiInput.Input(&InputOpts{
Id: fmt.Sprintf("var.%s", n),
Query: fmt.Sprintf("var.%s", n),
Description: v.Description,
})
if err != nil {
return fmt.Errorf(
"Error asking for %s: %s", n, err)
}
if value == "" && v.Required() {
// Redo if it is required, but abort if we keep getting
// blank entries
if retry > 2 {
return fmt.Errorf("missing required value for %q", n)
}
retry++
continue
}
break
}
// no value provided, so don't set the variable at all
if value == "" {
continue
}
decoded, err := parseVariableAsHCL(n, value, valueType)
if err != nil {
return err
}
if decoded != nil {
c.variables[n] = decoded
}
}
}
if mode&InputModeProvider != 0 {
// Build the graph
graph, err := c.Graph(GraphTypeInput, nil)
if err != nil {
return err
}
// Do the walk
if _, err := c.walk(graph, walkInput); err != nil {
return err
}
}
return nil
}
// Apply applies the changes represented by this context and returns
// the resulting state.
//
// Even in the case an error is returned, the state may be returned and will
// potentially be partially updated. In addition to returning the resulting
// state, this context is updated with the latest state.
//
// If the state is required after an error, the caller should call
// Context.State, rather than rely on the return value.
//
// TODO: Apply and Refresh should either always return a state, or rely on the
// State() method. Currently the helper/resource testing framework relies
// on the absence of a returned state to determine if Destroy can be
// called, so that will need to be refactored before this can be changed.
func (c *Context) Apply() (*State, error) {
defer c.acquireRun("apply")()
// Copy our own state
c.state = c.state.DeepCopy()
// Build the graph.
graph, err := c.Graph(GraphTypeApply, nil)
if err != nil {
return nil, err
}
// Determine the operation
operation := walkApply
if c.destroy {
operation = walkDestroy
}
// Walk the graph
walker, err := c.walk(graph, operation)
if len(walker.ValidationErrors) > 0 {
err = multierror.Append(err, walker.ValidationErrors...)
}
// Clean out any unused things
c.state.prune()
return c.state, err
}
// Plan generates an execution plan for the given context.
//
// The execution plan encapsulates the context and can be stored
// in order to reinstantiate a context later for Apply.
//
// Plan also updates the diff of this context to be the diff generated
// by the plan, so Apply can be called after.
func (c *Context) Plan() (*Plan, error) {
defer c.acquireRun("plan")()
p := &Plan{
Module: c.module,
Vars: c.variables,
State: c.state,
Targets: c.targets,
TerraformVersion: version.String(),
ProviderSHA256s: c.providerSHA256s,
}
var operation walkOperation
if c.destroy {
operation = walkPlanDestroy
p.Destroy = true
} else {
// Set our state to be something temporary. We do this so that
// the plan can update a fake state so that variables work, then
// we replace it back with our old state.
old := c.state
if old == nil {
c.state = &State{}
c.state.init()
} else {
c.state = old.DeepCopy()
}
defer func() {
c.state = old
}()
operation = walkPlan
}
// Setup our diff
c.diffLock.Lock()
c.diff = new(Diff)
c.diff.init()
c.diffLock.Unlock()
// Build the graph.
graphType := GraphTypePlan
if c.destroy {
graphType = GraphTypePlanDestroy
}
graph, err := c.Graph(graphType, nil)
if err != nil {
return nil, err
}
// Do the walk
walker, err := c.walk(graph, operation)
if err != nil {
return nil, err
}
p.Diff = c.diff
// If this is true, it means we're running unit tests. In this case,
// we perform a deep copy just to ensure that all context tests also
// test that a diff is copy-able. This will panic if it fails. This
// is enabled during unit tests.
//
// This should never be true during production usage, but even if it is,
// it can't do any real harm.
if contextTestDeepCopyOnPlan {
p.Diff.DeepCopy()
}
/*
// We don't do the reverification during the new destroy plan because
// it will use a different apply process.
if X_legacyGraph {
// Now that we have a diff, we can build the exact graph that Apply will use
// and catch any possible cycles during the Plan phase.
if _, err := c.Graph(GraphTypeLegacy, nil); err != nil {
return nil, err
}
}
*/
var errs error
if len(walker.ValidationErrors) > 0 {
errs = multierror.Append(errs, walker.ValidationErrors...)
}
return p, errs
}
// Refresh goes through all the resources in the state and refreshes them
// to their latest state. This will update the state that this context
// works with, along with returning it.
//
// Even in the case an error is returned, the state may be returned and
// will potentially be partially updated.
func (c *Context) Refresh() (*State, error) {
defer c.acquireRun("refresh")()
// Copy our own state
c.state = c.state.DeepCopy()
// Build the graph.
graph, err := c.Graph(GraphTypeRefresh, nil)
if err != nil {
return nil, err
}
// Do the walk
if _, err := c.walk(graph, walkRefresh); err != nil {
return nil, err
}
// Clean out any unused things
c.state.prune()
return c.state, nil
}
// Stop stops the running task.
//
// Stop will block until the task completes.
func (c *Context) Stop() {
log.Printf("[WARN] terraform: Stop called, initiating interrupt sequence")
c.l.Lock()
defer c.l.Unlock()
// If we're running, then stop
if c.runContextCancel != nil {
log.Printf("[WARN] terraform: run context exists, stopping")
// Tell the hook we want to stop
c.sh.Stop()
// Stop the context
c.runContextCancel()
c.runContextCancel = nil
}
// Grab the condition var before we exit
if cond := c.runCond; cond != nil {
cond.Wait()
}
log.Printf("[WARN] terraform: stop complete")
}
// Validate validates the configuration and returns any warnings or errors.
func (c *Context) Validate() tfdiags.Diagnostics {
defer c.acquireRun("validate")()
var diags tfdiags.Diagnostics
// Validate the configuration itself
diags = diags.Append(c.module.Validate())
// This only needs to be done for the root module, since inter-module
// variables are validated in the module tree.
if config := c.module.Config(); config != nil {
// Validate the user variables
for _, err := range smcUserVariables(config, c.variables) {
diags = diags.Append(err)
}
}
// If we have errors at this point, the graphing has no chance,
// so just bail early.
if diags.HasErrors() {
return diags
}
// Build the graph so we can walk it and run Validate on nodes.
// We also validate the graph generated here, but this graph doesn't
// necessarily match the graph that Plan will generate, so we'll validate the
// graph again later after Planning.
graph, err := c.Graph(GraphTypeValidate, nil)
if err != nil {
diags = diags.Append(err)
return diags
}
// Walk
walker, err := c.walk(graph, walkValidate)
if err != nil {
diags = diags.Append(err)
}
sort.Strings(walker.ValidationWarnings)
sort.Slice(walker.ValidationErrors, func(i, j int) bool {
return walker.ValidationErrors[i].Error() < walker.ValidationErrors[j].Error()
})
for _, warn := range walker.ValidationWarnings {
diags = diags.Append(tfdiags.SimpleWarning(warn))
}
for _, err := range walker.ValidationErrors {
diags = diags.Append(err)
}
return diags
}
// Module returns the module tree associated with this context.
func (c *Context) Module() *module.Tree {
return c.module
}
// Variables will return the mapping of variables that were defined
// for this Context. If Input was called, this mapping may be different
// than what was given.
func (c *Context) Variables() map[string]interface{} {
return c.variables
}
// SetVariable sets a variable after a context has already been built.
func (c *Context) SetVariable(k string, v interface{}) {
c.variables[k] = v
} | // With the run lock held, grab the context lock to make changes
// to the run context.
c.l.Lock()
defer c.l.Unlock()
// Wait until we're no longer running
for c.runCond != nil {
c.runCond.Wait()
}
// Build our lock
c.runCond = sync.NewCond(&c.l)
// Setup debugging
dbug.SetPhase(phase)
// Create a new run context
c.runContext, c.runContextCancel = context.WithCancel(context.Background())
// Reset the stop hook so we're not stopped
c.sh.Reset()
// Reset the shadow errors
c.shadowErr = nil
return c.releaseRun
}
func (c *Context) releaseRun() {
// Grab the context lock so that we can make modifications to fields
c.l.Lock()
defer c.l.Unlock()
// setting the phase to "INVALID" lets us easily detect if we have
// operations happening outside of a run, or we missed setting the proper
// phase
dbug.SetPhase("INVALID")
// End our run. We check if runContext is non-nil because it can be
// set to nil if it was cancelled via Stop()
if c.runContextCancel != nil {
c.runContextCancel()
}
// Unlock all waiting our condition
cond := c.runCond
c.runCond = nil
cond.Broadcast()
// Unset the context
c.runContext = nil
}
func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, error) {
// Keep track of the "real" context which is the context that does
// the real work: talking to real providers, modifying real state, etc.
realCtx := c
log.Printf("[DEBUG] Starting graph walk: %s", operation.String())
walker := &ContextGraphWalker{
Context: realCtx,
Operation: operation,
StopContext: c.runContext,
}
// Watch for a stop so we can call the provider Stop() API.
watchStop, watchWait := c.watchStop(walker)
// Walk the real graph, this will block until it completes
realErr := graph.Walk(walker)
// Close the channel so the watcher stops, and wait for it to return.
close(watchStop)
<-watchWait
return walker, realErr
}
// watchStop immediately returns a `stop` and a `wait` chan after dispatching
// the watchStop goroutine. This will watch the runContext for cancellation and
// stop the providers accordingly. When the watch is no longer needed, the
// `stop` chan should be closed before waiting on the `wait` chan.
// The `wait` chan is important, because without synchronizing with the end of
// the watchStop goroutine, the runContext may also be closed during the select
// incorrectly causing providers to be stopped. Even if the graph walk is done
// at that point, stopping a provider permanently cancels its StopContext which
// can cause later actions to fail.
func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan struct{}) {
stop := make(chan struct{})
wait := make(chan struct{})
// get the runContext cancellation channel now, because releaseRun will
// write to the runContext field.
done := c.runContext.Done()
go func() {
defer close(wait)
// Wait for a stop or completion
select {
case <-done:
// done means the context was canceled, so we need to try and stop
// providers.
case <-stop:
// our own stop channel was closed.
return
}
// If we're here, we're stopped, trigger the call.
{
// Copy the providers so that a misbehaved blocking Stop doesn't
// completely hang Terraform.
walker.providerLock.Lock()
ps := make([]ResourceProvider, 0, len(walker.providerCache))
for _, p := range walker.providerCache {
ps = append(ps, p)
}
defer walker.providerLock.Unlock()
for _, p := range ps {
// We ignore the error for now since there isn't any reasonable
// action to take if there is an error here, since the stop is still
// advisory: Terraform will exit once the graph node completes.
p.Stop()
}
}
{
// Call stop on all the provisioners
walker.provisionerLock.Lock()
ps := make([]ResourceProvisioner, 0, len(walker.provisionerCache))
for _, p := range walker.provisionerCache {
ps = append(ps, p)
}
defer walker.provisionerLock.Unlock()
for _, p := range ps {
// We ignore the error for now since there isn't any reasonable
// action to take if there is an error here, since the stop is still
// advisory: Terraform will exit once the graph node completes.
p.Stop()
}
}
}()
return stop, wait
}
// parseVariableAsHCL parses the value of a single variable as would have been specified
// on the command line via -var or in an environment variable named TF_VAR_x, where x is
// the name of the variable. In order to get around the restriction of HCL requiring a
// top level object, we prepend a sentinel key, decode the user-specified value as its
// value and pull the value back out of the resulting map.
func parseVariableAsHCL(name string, input string, targetType config.VariableType) (interface{}, error) {
// expecting a string so don't decode anything, just strip quotes
if targetType == config.VariableTypeString {
return strings.Trim(input, `"`), nil
}
// return empty types
if strings.TrimSpace(input) == "" {
switch targetType {
case config.VariableTypeList:
return []interface{}{}, nil
case config.VariableTypeMap:
return make(map[string]interface{}), nil
}
}
const sentinelValue = "SENTINEL_TERRAFORM_VAR_OVERRIDE_KEY"
inputWithSentinal := fmt.Sprintf("%s = %s", sentinelValue, input)
var decoded map[string]interface{}
err := hcl.Decode(&decoded, inputWithSentinal)
if err != nil {
return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL: %s", name, input, err)
}
if len(decoded) != 1 {
return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. Only one value may be specified.", name, input)
}
parsedValue, ok := decoded[sentinelValue]
if !ok {
return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input)
}
switch targetType {
case config.VariableTypeList:
return parsedValue, nil
case config.VariableTypeMap:
if list, ok := parsedValue.([]map[string]interface{}); ok {
return list[0], nil
}
return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input)
default:
panic(fmt.Errorf("unknown type %s", targetType.Printable()))
}
} |
func (c *Context) acquireRun(phase string) func() { | random_line_split |
workshopSteps.py | #first we are going to import the libraries we need
#OS library is used for operating system functionalities such as reading a file or using software/hardware products.
#Math library is for math functions such as floor/ceiling
#Sys library gives access to system only functions such as exit()
#Random library allows us to create random number generators
#Pygame library allows us to create game applications along with other types of applications.
import pygame, os, math, sys, random
#We have to initialize the game modules that pygame gives us access to.
pygame.init()
#Lets create a title for our window screen, we have to do pygame.display to access the display of the game.
pygame.display.set_caption("Theta Tau Snek Workshop")
#now lets initialize a random number generator using the random library
random.seed()
#Lets set up some constant variables
#Gonna need the movement speed of the snake, i put caps for constant values but you dont need to. Also you can't have
#any spaces in your variable names
MOVEMENT_SPEED = .4
#Gonna need a block size
SNAKE_BLOCK_SIZE = 10
#Gonna need some food size
FOOD_SIZE = SNAKE_BLOCK_SIZE
#Going to use a no color block to sepearate the blocks
SEPERATION = 10
#Lets set up our screen height and width
SCREEN_WIDTH = 600
SCREEN_HEIGHT = 600
#Some frames per second
FPS = 25
#Going to set up a dictionary. A dictionary just lets you have keys and assign values to those keys
KEY = {"UP":1, "DOWN": 2, "LEFT": 3, "RIGHT": 4}
#Going to set initialize the screen now with. Remember we have to access the pygames display using the .
#set_mode takes in the resoultion as a pair of width and height, then a surface.
#HWSURFACE uses the video card on the hardware to make it faster.
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), pygame.HWSURFACE)
#Lets create some GUI Constants
text_font = pygame.font.Font(None, 30)
score_number_font = pygame.font.Font(None, 30)
#We're going to make the score text now
#render takes in text, a boolean if you want smooth edges or not, a pygame color, and a background if wanted
score_message = text_font.render("Score", 0, pygame.Color("green"))
#Lets create a background color
background_color = pygame.Color(100,100,100)
#Now lets create a game clock variable
clock = pygame.time.Clock()
#STEP 1
#Lets create our main first so we can start seeing a screen
#we have to call this to get the events on the pygame window or else it doesn't know what to do and crashes
#lets fill the screen we initialized earlier for the window with the color we chose
#we want to update the screen so we call pygame.display.update
#youll notice we can't exit the window, so go into your terminal or command window and push control c
#we have to get the type of events that pygame can take in
#STEP 3
#lets change the line of code of pygame.event.get we had to call our function.
#lets make a variable to keep track of the key pressed. Now you can exit the window formally.
def main():
#STEP 17
#lets create a new snake object and remember we pass in the starting location. You can put any values in there as long
#as there within the screen size
snake = Snake(SCREEN_WIDTH/2,SCREEN_HEIGHT/2)
#Lets get the snake to move right away
snake.move()
#This is going to be our variable to end the game
end = 0;
#Create a variable to to have your snake start out with this many body cells. Im going to choose two.
startingCells = 2
#Create a variable to count how many cells we create
count = 0
#Create a variable to keep track if the food has beene eaten
eaten_food = 0
#Create a variable to keep track of the score
score = 0
#Lets create a while loop to let the snake grow and have it move
#we're going to use the cells variable we made as the condition
while(count < startingCells):
snake.grow()
snake.move()
count += 1
#Lets create a list of food that takes in a food object with random coordinates and state equal to 1
food = [Food(random.randint(FOOD_SIZE,SCREEN_WIDTH), random.randint(FOOD_SIZE, SCREEN_HEIGHT), 1)]
#lets spawn a food by passing in the food list and snake coordinates
spawnSingleFood(food, snake.x, snake.y)
while end != 1:
#Lets comment these out now and start from the beginning now for the loop that will run endlessly
#keyPressed = getPressedKey()
#screen.fill(background_color)
#pygame.display.update()
#Going to have the pygame clock tick as fast as the FPS
clock.tick(FPS)
#Create the keypressed variable again and call the function getPressedKey
keyPressed = getPressedKey()
#We check if the keypresed is equal to exit we make end equal to 1
if(keyPressed == "exit"):
end = 1
#lets check if the snake is crashing into itself and if it is end the game
if(snake.checkCrash() == True):
gameEnd()
#lets check if the snake is crashing into the edges and if its true end the game
if(crashing(snake.bodyStack[0], SNAKE_BLOCK_SIZE) == True):
gameEnd()
#we're going to check for all the food and if the food is not eaten then check for the snake colliding with food
#if the snake does collide then we make the snake grow make sure the food is set to state 0 for eaten
#and we add to the score and make eaten food equal to true
for f in food:
if(f.state == 1):
if(checkCollision(snake.bodyStack[0],SNAKE_BLOCK_SIZE, f, FOOD_SIZE) == True):
snake.grow()
f.state = 0
score += 5
eaten_food = True
#if the snake has eaten then we spawn another food and changed eaten food to false
if(eaten_food == True):
spawnSingleFood(food, snake.bodyStack[0].x, snake.bodyStack[0].y)
eaten_food = False
#If a key was pressed we try to change the direction of the snake then we move again
if(keyPressed):
snake.changeDirection(keyPressed)
snake.move()
#We fill the screen in again with the color
screen.fill(background_color)
#we check for all the food and if the food has not been eaten then we draw it on the screen
for f in food:
if(f.state == 1):
f.draw(screen)
#lets draw the snake on the screen
snake.draw(screen)
#lets draw the score
drawScore(score)
#We call pygame.display.flip to layer the screen
pygame.display.flip()
#we update the display
pygame.display.update()
#STEP 2
#lets create a function to access these events
#we're going to write a for loop to check all the events that can happen in the window
#We're going to create a bunch of if statements and else if statements
#We're going to check the event type and if that event type is equal to either
#key up, key down, key right, key left, key escape, key y, key n, or quit then we are going to return
#either the value of the respective key we defined earlier, a string of what to do, or exit the system
def getPressedKey():
for event in pygame.event.get():
if(event.type == pygame.KEYDOWN):
if(event.key == pygame.K_UP):
return KEY["UP"]
elif(event.key == pygame.K_DOWN):
return KEY["DOWN"]
elif(event.key == pygame.K_RIGHT):
return KEY["RIGHT"]
elif(event.key == pygame.K_LEFT):
return KEY["LEFT"]
elif(event.key == pygame.K_ESCAPE):
return "exit"
elif(event.key == pygame.K_y):
return "yes"
elif(event.key == pygame.K_n):
return "no"
if(event.type == pygame.QUIT):
sys.exit()
#STEP 4
#Alright lets create the class for the basic snake cell which the snake will be composed of
#We have to create an init function as this will be called when we want to create a new cell.
#It has two underscores on each side and this is necessary.
#The parameters are going to be self, an x value, and a y value
#Then we set the self.x and self.y to the respective x and y values.
#Then we are going to set the direction to up by using the dictionary value of up
#Then we can set the color of the cell, this doesn't matter because we will be changing the color later
#But we do need to create a color for it so go wild with whatever color you want.
class Cell:
def __init__(self,x,y):
self.x = x
self.y = y
self.direction = KEY["UP"]
self.color = "white"
#STEP 5
#Now lets create the food class so we can create food for the snake to eat
#Again the parameters will be the the self, x value, y value, and a state which is just an integer
#We're going to set the values like we did in the cell class, but now we don't have a direction
#We have a state and we need a color, you can choose any color you want your food to be, but this color will not change later
class Food:
def __init__(self,x,y,state):
self.x = x
self.y = y
self.state = state
self.color = pygame.Color("red")
#STEP 14
#We're going to write a function to draw the food
#parameters are going to be self and a screen
#lets draw a rect using the screen the self.color and the x, y coordinates and the food size and a 0 width
def draw(self,screen):
pygame.draw.rect(screen, self.color, (self.x, self.y, FOOD_SIZE, FOOD_SIZE), 0)
#STEP 6
#Lets create the snake class
#Remember to initialize it we have to create this init function.
#We have to have the parameters of self, an x value, and a y value
class Snake:
def __init__(self,x,y):
#So we initalize the snakes x and y location to the x and y passed in and
#set the direction of the snake to right
|
#STEP 7
#Lets create a function to move the snake
def move(self):
#So we're going to calculate the length of the snake - 1, since arrays start from 0 the last index will be
#the length of the snake - 1
lastCell = len(self.bodyStack) - 1
#We're going to write a while loop to iterate through all the snakes body cells from the end to the front of the snake.
#While we are not at the head of the snake lets go through every cell and make sure it is going in the direction of the
#previous cells and move that cell to the location of the cell in front of it. Then we decrease the cell index we are on.
while(lastCell != 0):
self.bodyStack[lastCell].direction = self.bodyStack[lastCell - 1].direction
self.bodyStack[lastCell].x = self.bodyStack[lastCell - 1].x
self.bodyStack[lastCell].y = self.bodyStack[lastCell - 1].y
lastCell -= 1
#we check if the body is less than 2 so we know its just the head, but if not
#then we pop out the head of the list
if(len(self.bodyStack) < 2):
headCell = self
else:
headCell = self.bodyStack.pop(lastCell)
#Now lets create some if-else statements to check if a new direction was inputted.
#We have to access the snakes bodystack and check the new direction
#If the direction is up then we set the y coordinate of the head above the second cell
#If the direction is right then we set the x coordinate of the head to the right of the second cell
#If the direction is down then we set the y coordinate of the head below the second cell
#If the direction is left then we set the y coordinate of the head to the left of the second cell
#I used fps * movement speed to make it relative to how fast you want your snake to go
if(self.bodyStack[0].direction == KEY["UP"]):
headCell.y = self.bodyStack[0].y - (FPS * MOVEMENT_SPEED)
elif(self.bodyStack[0].direction == KEY["RIGHT"]):
headCell.x = self.bodyStack[0].x + (FPS * MOVEMENT_SPEED)
elif(self.bodyStack[0].direction == KEY["DOWN"]):
headCell.y = self.bodyStack[0].y + (FPS * MOVEMENT_SPEED)
elif(self.bodyStack[0].direction == KEY["LEFT"]):
headCell.x = self.bodyStack[0].x - (FPS * MOVEMENT_SPEED)
#We then insert the head back into the front of the snakes list
self.bodyStack.insert(0, headCell)
#STEP 8
#So we can create a grow function so when your snake eats food it can grow
def grow(self):
#Again we're going to get the length of the snake and substract 1 so we can access the array
lastCell = len(self.bodyStack) - 1
#Lets check the last cells direction and create a newcell, but also a endcell to add sepration.
#We check if the direction is up,down,right, or left and depending on which direction it is we
#create the new cell and end cell in the right place.
if(self.bodyStack[lastCell].direction == KEY["UP"]):
newCell = Cell(self.bodyStack[lastCell].x, self.bodyStack[lastCell].y + SNAKE_BLOCK_SIZE)
endCell = Cell(newCell.x, newCell.y + SEPERATION)
if(self.bodyStack[lastCell].direction == KEY["DOWN"]):
newCell = Cell(self.bodyStack[lastCell].x, self.bodyStack[lastCell].y - SNAKE_BLOCK_SIZE)
endCell = Cell(newCell.x, newCell.y - SEPERATION)
if(self.bodyStack[lastCell].direction == KEY["RIGHT"]):
newCell = Cell(self.bodyStack[lastCell].x - SNAKE_BLOCK_SIZE, self.bodyStack[lastCell].y)
endCell = Cell(newCell.x - SEPERATION, newCell.y)
if(self.bodyStack[lastCell].direction == KEY["LEFT"]):
newCell = Cell(self.bodyStack[lastCell].x + SNAKE_BLOCK_SIZE, self.bodyStack[lastCell].y)
endCell = Cell(newCell.x + SEPERATION, newCell.y)
#We're going to set the color to NULL for endcells so we dont see it in the window
endCell.color = "NULL"
#We then append the cells to the body list of the snake
self.bodyStack.append(newCell)
self.bodyStack.append(endCell)
#STEP 9
#We're going to define a draw function to draw our snake.
#We're going to have the parameters as self and a screen we can draw on
def draw(self, screen):
#We're going to call pygame.draw.rect which takes in a screen, a color for what we want our cell to be
#then a tuple of (an x position, a y position, a size, and a size) and then a width
#We're going to call this first for our head cell
pygame.draw.rect(screen, pygame.Color("gold"), (self.bodyStack[0].x, self.bodyStack[0].y, SNAKE_BLOCK_SIZE, SNAKE_BLOCK_SIZE), 0)
#starting with our first cell
cellCount = 1
#while loop to go through all the cells in the body
while(cellCount < len(self.bodyStack)):
#We're going to check if the cells color is null and if it is we're going to increase the count and continue
#to the next iteration
if(self.bodyStack[cellCount].color == "NULL"):
cellCount += 1
continue
#if the color is not null we're going to draw a rectangle again using the cells positions, the screen, and any color you want
#Then increase the cell count.
pygame.draw.rect(screen, pygame.Color("green"), (self.bodyStack[cellCount].x, self.bodyStack[cellCount].y, SNAKE_BLOCK_SIZE, SNAKE_BLOCK_SIZE), 0)
cellCount += 1
#STEP 11
#Now we're going to write a function to check for self crashing
def checkCrash(self):
#Going to start the count at 1 so we can go through the snakes body cells again
cellCount = 1
#Lets write a while loop to go through the snakes body cells
while(cellCount < len(self.bodyStack)-1):
#Lets write an If statement which uses the collision method we created earlier to check if the head is crashing with any of the cells
#However we ony want to check if the head is crashing with the cells that are colored and not the separation cells.
#If they are colliding then we return True
#lets increease the cellcount by 1 everytime
if(checkCollision(self.bodyStack[0], SNAKE_BLOCK_SIZE, self.bodyStack[cellCount], SNAKE_BLOCK_SIZE) == True and self.bodyStack[cellCount].color != "NULL"):
return True
cellCount += 1
#if we get out of the while loop this means we should return false and that the snake is not crashing into itself
return False
#STEP 12
#We're going to create a function to change directions
#The parameters are going to be self and a direction
def changeDirection(self, direction):
#We're going to check if the current direction is up or down and the new direction is opposite we aren't going to change directions
if(self.direction == KEY["UP"] and direction == KEY["DOWN"] or self.direction == KEY["DOWN"] and direction == KEY["UP"]):
pass
#going to do the same for right and left
if(self.direction == KEY["RIGHT"] and direction == KEY["LEFT"] or self.direction == KEY["LEFT"] and direction == KEY["RIGHT"]):
pass
#if the new direction isn't opposite of the current direction then we change the direction
self.direction = direction
#STEP 10
#Next we're going to check for collisions with the food
#So lets create a function that takes in postionA, a size, positionB, and a size.
def checkCollision(positionA, sizeA, positionB,sizeB):
#We're going to check if positionA's x is less than positionB's.x + size of B
#and is positonAs x + sizeA is greater than positionBs x
#and if positionA y is less than positionBs y + size B
#and if positionA y is + size A is greater than position B y
#If all those are true then return true if not then return false
if(positionA.x < positionB.x+sizeB and positionA.x + sizeA > positionB.x and positionA.y < positionB.y + sizeB and positionA.y + sizeA > positionB.y):
return True
return False
#STEP 13
#Lets write a function to check for the snake crashing into the sides of the screen
#we're going to have the prarameters of positionA and a size A
def crashing(positionA, sizeA):
#We're going to check if positionAs x - the size a is less than 0
#or if the positionAs x + size a is greater than the screen width
#or if the positionAs y - size A is less than 0
#or if the positionAs y + size A is greater than the screen height
#if any of these are true then we return true
#if not then return false
if(positionA.x - sizeA < 0 or positionA.x + sizeA > SCREEN_WIDTH or positionA.y - sizeA < 0 or positionA.y + sizeA > SCREEN_HEIGHT):
return True
return False
#STEP 15
#We're now going to create the function to spawn a single food
#The parameters are going to be the food list, and the snakes x and y coordinates
def spawnSingleFood(food, sx, sy):
#We're going to delete the list of food since we don't want to add more food to list, we only want one.
del food[:]
#We're going to get a random x and y by using random uniform
#we're going to pass in the food size plus a constant 5 and the screen height/width - food size - 5
x = random.uniform(FOOD_SIZE + 5,SCREEN_WIDTH - FOOD_SIZE - 5)
y = random.uniform(FOOD_SIZE + 5, SCREEN_HEIGHT - FOOD_SIZE - 5)
#We're going to check that the food has not spawned on top of the snake already
#If the x and y coordinates are on the snakes head then we create new x and y coordinates
while(x-FOOD_SIZE == sx or x+FOOD_SIZE == sx and y-FOOD_SIZE == sy or y + FOOD_SIZE == sy):
x = random.uniform(20,SCREEN_WIDTH - 20)
y = random.uniform(20, SCREEN_HEIGHT - 20)
#we then add the food to the list by creating a new food with the state as 1 to show that its not eaten.
#We also pass in the x and y coordinates into the food constructor
food.append(Food(x,y,1))
#STEP 16
#Next lets draw the function to draw the score where we pass in a score
def drawScore(score):
#set up some variables to use the fonts we made earlier and render them
#we have to use the variable str(score) to make the score a string, remember you can make whatever color you want it
#you can also change that 1 to a 0 if you want
msg_score = score_number_font.render(str(score),1,pygame.Color("green"))
#we have to use screen.blit() to add a screen onto the window
screen.blit(score_message, (SCREEN_WIDTH - text_font.size("Score")[0] - 60, 10))
screen.blit(msg_score, (SCREEN_WIDTH - 40, 10))
#STEP 18
def gameEnd():
msg = text_font.render("Game Over",1,pygame.Color("white"))
play_again = text_font.render("Play Again? Y/N",1,pygame.Color("green"))
screen.blit(msg,(SCREEN_WIDTH/2 - text_font.size("Game Over")[0]/2,SCREEN_HEIGHT/2))
screen.blit(play_again,(SCREEN_WIDTH/2 - text_font.size('Play Again? Y/N')[0]/2,SCREEN_HEIGHT/2+40))
pygame.display.flip()
pygame.display.update()
myKey = getPressedKey()
while(myKey != "exit"):
if(myKey == "yes"):
main()
elif(myKey == "no"):
break
myKey = getPressedKey()
clock.tick(FPS)
sys.exit()
main()
| self.x = x
self.y = y
self.direction = KEY["RIGHT"]
#We're going to create a list to hold all the snakes body
self.bodyStack = []
#adding the first snake cell to the list of cells
self.bodyStack.append(self)
#We're going to create an end cell to separate the body cells, we create the end cell to the left of the head
endCell = Cell(x - SEPERATION,y)
#We dont want a color so we don't show it on the graph
endCell.color = "NULL"
#Make the end cell the same direction as the head.
endCell.direction = KEY["RIGHT"]
#Then we add the cell to body list
self.bodyStack.append(endCell) | identifier_body |
workshopSteps.py | #first we are going to import the libraries we need
#OS library is used for operating system functionalities such as reading a file or using software/hardware products.
#Math library is for math functions such as floor/ceiling
#Sys library gives access to system only functions such as exit()
#Random library allows us to create random number generators
#Pygame library allows us to create game applications along with other types of applications.
import pygame, os, math, sys, random
#We have to initialize the game modules that pygame gives us access to.
pygame.init()
#Lets create a title for our window screen, we have to do pygame.display to access the display of the game.
pygame.display.set_caption("Theta Tau Snek Workshop")
#now lets initialize a random number generator using the random library
random.seed()
#Lets set up some constant variables
#Gonna need the movement speed of the snake, i put caps for constant values but you dont need to. Also you can't have
#any spaces in your variable names
MOVEMENT_SPEED = .4
#Gonna need a block size
SNAKE_BLOCK_SIZE = 10
#Gonna need some food size
FOOD_SIZE = SNAKE_BLOCK_SIZE
#Going to use a no color block to sepearate the blocks
SEPERATION = 10
#Lets set up our screen height and width
SCREEN_WIDTH = 600
SCREEN_HEIGHT = 600
#Some frames per second
FPS = 25
#Going to set up a dictionary. A dictionary just lets you have keys and assign values to those keys
KEY = {"UP":1, "DOWN": 2, "LEFT": 3, "RIGHT": 4}
#Going to set initialize the screen now with. Remember we have to access the pygames display using the .
#set_mode takes in the resoultion as a pair of width and height, then a surface.
#HWSURFACE uses the video card on the hardware to make it faster.
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), pygame.HWSURFACE)
#Lets create some GUI Constants
text_font = pygame.font.Font(None, 30)
score_number_font = pygame.font.Font(None, 30)
#We're going to make the score text now
#render takes in text, a boolean if you want smooth edges or not, a pygame color, and a background if wanted
score_message = text_font.render("Score", 0, pygame.Color("green"))
#Lets create a background color
background_color = pygame.Color(100,100,100)
#Now lets create a game clock variable
clock = pygame.time.Clock()
#STEP 1
#Lets create our main first so we can start seeing a screen
#we have to call this to get the events on the pygame window or else it doesn't know what to do and crashes
#lets fill the screen we initialized earlier for the window with the color we chose
#we want to update the screen so we call pygame.display.update
#youll notice we can't exit the window, so go into your terminal or command window and push control c
#we have to get the type of events that pygame can take in
#STEP 3
#lets change the line of code of pygame.event.get we had to call our function.
#lets make a variable to keep track of the key pressed. Now you can exit the window formally.
def main():
#STEP 17
#lets create a new snake object and remember we pass in the starting location. You can put any values in there as long
#as there within the screen size
snake = Snake(SCREEN_WIDTH/2,SCREEN_HEIGHT/2)
#Lets get the snake to move right away
snake.move()
#This is going to be our variable to end the game
end = 0;
#Create a variable to to have your snake start out with this many body cells. Im going to choose two.
startingCells = 2
#Create a variable to count how many cells we create
count = 0
#Create a variable to keep track if the food has beene eaten
eaten_food = 0
#Create a variable to keep track of the score
score = 0
#Lets create a while loop to let the snake grow and have it move
#we're going to use the cells variable we made as the condition
while(count < startingCells):
snake.grow()
snake.move()
count += 1
#Lets create a list of food that takes in a food object with random coordinates and state equal to 1
food = [Food(random.randint(FOOD_SIZE,SCREEN_WIDTH), random.randint(FOOD_SIZE, SCREEN_HEIGHT), 1)]
#lets spawn a food by passing in the food list and snake coordinates
spawnSingleFood(food, snake.x, snake.y)
while end != 1:
#Lets comment these out now and start from the beginning now for the loop that will run endlessly
#keyPressed = getPressedKey()
#screen.fill(background_color)
#pygame.display.update()
#Going to have the pygame clock tick as fast as the FPS
clock.tick(FPS)
#Create the keypressed variable again and call the function getPressedKey
keyPressed = getPressedKey()
#We check if the keypresed is equal to exit we make end equal to 1
if(keyPressed == "exit"):
end = 1
#lets check if the snake is crashing into itself and if it is end the game
if(snake.checkCrash() == True):
gameEnd()
#lets check if the snake is crashing into the edges and if its true end the game
if(crashing(snake.bodyStack[0], SNAKE_BLOCK_SIZE) == True):
gameEnd()
#we're going to check for all the food and if the food is not eaten then check for the snake colliding with food
#if the snake does collide then we make the snake grow make sure the food is set to state 0 for eaten
#and we add to the score and make eaten food equal to true
for f in food:
if(f.state == 1):
if(checkCollision(snake.bodyStack[0],SNAKE_BLOCK_SIZE, f, FOOD_SIZE) == True):
snake.grow()
f.state = 0
score += 5
eaten_food = True
#if the snake has eaten then we spawn another food and changed eaten food to false
if(eaten_food == True):
spawnSingleFood(food, snake.bodyStack[0].x, snake.bodyStack[0].y)
eaten_food = False
#If a key was pressed we try to change the direction of the snake then we move again
if(keyPressed):
snake.changeDirection(keyPressed)
snake.move()
#We fill the screen in again with the color
screen.fill(background_color)
#we check for all the food and if the food has not been eaten then we draw it on the screen
for f in food:
if(f.state == 1):
f.draw(screen)
#lets draw the snake on the screen
snake.draw(screen)
#lets draw the score
drawScore(score)
#We call pygame.display.flip to layer the screen
pygame.display.flip()
#we update the display
pygame.display.update()
#STEP 2
#lets create a function to access these events
#we're going to write a for loop to check all the events that can happen in the window
#We're going to create a bunch of if statements and else if statements
#We're going to check the event type and if that event type is equal to either
#key up, key down, key right, key left, key escape, key y, key n, or quit then we are going to return
#either the value of the respective key we defined earlier, a string of what to do, or exit the system
def getPressedKey():
for event in pygame.event.get():
if(event.type == pygame.KEYDOWN):
if(event.key == pygame.K_UP):
return KEY["UP"]
elif(event.key == pygame.K_DOWN):
return KEY["DOWN"]
elif(event.key == pygame.K_RIGHT):
return KEY["RIGHT"]
elif(event.key == pygame.K_LEFT):
return KEY["LEFT"]
elif(event.key == pygame.K_ESCAPE):
return "exit"
elif(event.key == pygame.K_y):
return "yes"
elif(event.key == pygame.K_n):
return "no"
if(event.type == pygame.QUIT):
sys.exit()
#STEP 4
#Alright lets create the class for the basic snake cell which the snake will be composed of
#We have to create an init function as this will be called when we want to create a new cell.
#It has two underscores on each side and this is necessary.
#The parameters are going to be self, an x value, and a y value
#Then we set the self.x and self.y to the respective x and y values.
#Then we are going to set the direction to up by using the dictionary value of up
#Then we can set the color of the cell, this doesn't matter because we will be changing the color later
#But we do need to create a color for it so go wild with whatever color you want.
class Cell:
def __init__(self,x,y):
self.x = x
self.y = y
self.direction = KEY["UP"]
self.color = "white"
#STEP 5
#Now lets create the food class so we can create food for the snake to eat
#Again the parameters will be the the self, x value, y value, and a state which is just an integer
#We're going to set the values like we did in the cell class, but now we don't have a direction
#We have a state and we need a color, you can choose any color you want your food to be, but this color will not change later
class Food:
def __init__(self,x,y,state):
self.x = x
self.y = y
self.state = state
self.color = pygame.Color("red")
#STEP 14
#We're going to write a function to draw the food
#parameters are going to be self and a screen
#lets draw a rect using the screen the self.color and the x, y coordinates and the food size and a 0 width
def draw(self,screen):
pygame.draw.rect(screen, self.color, (self.x, self.y, FOOD_SIZE, FOOD_SIZE), 0)
#STEP 6
#Lets create the snake class
#Remember to initialize it we have to create this init function.
#We have to have the parameters of self, an x value, and a y value
class | :
def __init__(self,x,y):
#So we initalize the snakes x and y location to the x and y passed in and
#set the direction of the snake to right
self.x = x
self.y = y
self.direction = KEY["RIGHT"]
#We're going to create a list to hold all the snakes body
self.bodyStack = []
#adding the first snake cell to the list of cells
self.bodyStack.append(self)
#We're going to create an end cell to separate the body cells, we create the end cell to the left of the head
endCell = Cell(x - SEPERATION,y)
#We dont want a color so we don't show it on the graph
endCell.color = "NULL"
#Make the end cell the same direction as the head.
endCell.direction = KEY["RIGHT"]
#Then we add the cell to body list
self.bodyStack.append(endCell)
#STEP 7
#Lets create a function to move the snake
def move(self):
#So we're going to calculate the length of the snake - 1, since arrays start from 0 the last index will be
#the length of the snake - 1
lastCell = len(self.bodyStack) - 1
#We're going to write a while loop to iterate through all the snakes body cells from the end to the front of the snake.
#While we are not at the head of the snake lets go through every cell and make sure it is going in the direction of the
#previous cells and move that cell to the location of the cell in front of it. Then we decrease the cell index we are on.
while(lastCell != 0):
self.bodyStack[lastCell].direction = self.bodyStack[lastCell - 1].direction
self.bodyStack[lastCell].x = self.bodyStack[lastCell - 1].x
self.bodyStack[lastCell].y = self.bodyStack[lastCell - 1].y
lastCell -= 1
#we check if the body is less than 2 so we know its just the head, but if not
#then we pop out the head of the list
if(len(self.bodyStack) < 2):
headCell = self
else:
headCell = self.bodyStack.pop(lastCell)
#Now lets create some if-else statements to check if a new direction was inputted.
#We have to access the snakes bodystack and check the new direction
#If the direction is up then we set the y coordinate of the head above the second cell
#If the direction is right then we set the x coordinate of the head to the right of the second cell
#If the direction is down then we set the y coordinate of the head below the second cell
#If the direction is left then we set the y coordinate of the head to the left of the second cell
#I used fps * movement speed to make it relative to how fast you want your snake to go
if(self.bodyStack[0].direction == KEY["UP"]):
headCell.y = self.bodyStack[0].y - (FPS * MOVEMENT_SPEED)
elif(self.bodyStack[0].direction == KEY["RIGHT"]):
headCell.x = self.bodyStack[0].x + (FPS * MOVEMENT_SPEED)
elif(self.bodyStack[0].direction == KEY["DOWN"]):
headCell.y = self.bodyStack[0].y + (FPS * MOVEMENT_SPEED)
elif(self.bodyStack[0].direction == KEY["LEFT"]):
headCell.x = self.bodyStack[0].x - (FPS * MOVEMENT_SPEED)
#We then insert the head back into the front of the snakes list
self.bodyStack.insert(0, headCell)
#STEP 8
#So we can create a grow function so when your snake eats food it can grow
def grow(self):
#Again we're going to get the length of the snake and substract 1 so we can access the array
lastCell = len(self.bodyStack) - 1
#Lets check the last cells direction and create a newcell, but also a endcell to add sepration.
#We check if the direction is up,down,right, or left and depending on which direction it is we
#create the new cell and end cell in the right place.
if(self.bodyStack[lastCell].direction == KEY["UP"]):
newCell = Cell(self.bodyStack[lastCell].x, self.bodyStack[lastCell].y + SNAKE_BLOCK_SIZE)
endCell = Cell(newCell.x, newCell.y + SEPERATION)
if(self.bodyStack[lastCell].direction == KEY["DOWN"]):
newCell = Cell(self.bodyStack[lastCell].x, self.bodyStack[lastCell].y - SNAKE_BLOCK_SIZE)
endCell = Cell(newCell.x, newCell.y - SEPERATION)
if(self.bodyStack[lastCell].direction == KEY["RIGHT"]):
newCell = Cell(self.bodyStack[lastCell].x - SNAKE_BLOCK_SIZE, self.bodyStack[lastCell].y)
endCell = Cell(newCell.x - SEPERATION, newCell.y)
if(self.bodyStack[lastCell].direction == KEY["LEFT"]):
newCell = Cell(self.bodyStack[lastCell].x + SNAKE_BLOCK_SIZE, self.bodyStack[lastCell].y)
endCell = Cell(newCell.x + SEPERATION, newCell.y)
#We're going to set the color to NULL for endcells so we dont see it in the window
endCell.color = "NULL"
#We then append the cells to the body list of the snake
self.bodyStack.append(newCell)
self.bodyStack.append(endCell)
#STEP 9
#We're going to define a draw function to draw our snake.
#We're going to have the parameters as self and a screen we can draw on
def draw(self, screen):
#We're going to call pygame.draw.rect which takes in a screen, a color for what we want our cell to be
#then a tuple of (an x position, a y position, a size, and a size) and then a width
#We're going to call this first for our head cell
pygame.draw.rect(screen, pygame.Color("gold"), (self.bodyStack[0].x, self.bodyStack[0].y, SNAKE_BLOCK_SIZE, SNAKE_BLOCK_SIZE), 0)
#starting with our first cell
cellCount = 1
#while loop to go through all the cells in the body
while(cellCount < len(self.bodyStack)):
#We're going to check if the cells color is null and if it is we're going to increase the count and continue
#to the next iteration
if(self.bodyStack[cellCount].color == "NULL"):
cellCount += 1
continue
#if the color is not null we're going to draw a rectangle again using the cells positions, the screen, and any color you want
#Then increase the cell count.
pygame.draw.rect(screen, pygame.Color("green"), (self.bodyStack[cellCount].x, self.bodyStack[cellCount].y, SNAKE_BLOCK_SIZE, SNAKE_BLOCK_SIZE), 0)
cellCount += 1
#STEP 11
#Now we're going to write a function to check for self crashing
def checkCrash(self):
#Going to start the count at 1 so we can go through the snakes body cells again
cellCount = 1
#Lets write a while loop to go through the snakes body cells
while(cellCount < len(self.bodyStack)-1):
#Lets write an If statement which uses the collision method we created earlier to check if the head is crashing with any of the cells
#However we ony want to check if the head is crashing with the cells that are colored and not the separation cells.
#If they are colliding then we return True
#lets increease the cellcount by 1 everytime
if(checkCollision(self.bodyStack[0], SNAKE_BLOCK_SIZE, self.bodyStack[cellCount], SNAKE_BLOCK_SIZE) == True and self.bodyStack[cellCount].color != "NULL"):
return True
cellCount += 1
#if we get out of the while loop this means we should return false and that the snake is not crashing into itself
return False
#STEP 12
#We're going to create a function to change directions
#The parameters are going to be self and a direction
def changeDirection(self, direction):
#We're going to check if the current direction is up or down and the new direction is opposite we aren't going to change directions
if(self.direction == KEY["UP"] and direction == KEY["DOWN"] or self.direction == KEY["DOWN"] and direction == KEY["UP"]):
pass
#going to do the same for right and left
if(self.direction == KEY["RIGHT"] and direction == KEY["LEFT"] or self.direction == KEY["LEFT"] and direction == KEY["RIGHT"]):
pass
#if the new direction isn't opposite of the current direction then we change the direction
self.direction = direction
#STEP 10
#Next we're going to check for collisions with the food
#So lets create a function that takes in postionA, a size, positionB, and a size.
def checkCollision(positionA, sizeA, positionB,sizeB):
#We're going to check if positionA's x is less than positionB's.x + size of B
#and is positonAs x + sizeA is greater than positionBs x
#and if positionA y is less than positionBs y + size B
#and if positionA y is + size A is greater than position B y
#If all those are true then return true if not then return false
if(positionA.x < positionB.x+sizeB and positionA.x + sizeA > positionB.x and positionA.y < positionB.y + sizeB and positionA.y + sizeA > positionB.y):
return True
return False
#STEP 13
#Lets write a function to check for the snake crashing into the sides of the screen
#we're going to have the prarameters of positionA and a size A
def crashing(positionA, sizeA):
#We're going to check if positionAs x - the size a is less than 0
#or if the positionAs x + size a is greater than the screen width
#or if the positionAs y - size A is less than 0
#or if the positionAs y + size A is greater than the screen height
#if any of these are true then we return true
#if not then return false
if(positionA.x - sizeA < 0 or positionA.x + sizeA > SCREEN_WIDTH or positionA.y - sizeA < 0 or positionA.y + sizeA > SCREEN_HEIGHT):
return True
return False
#STEP 15
#We're now going to create the function to spawn a single food
#The parameters are going to be the food list, and the snakes x and y coordinates
def spawnSingleFood(food, sx, sy):
#We're going to delete the list of food since we don't want to add more food to list, we only want one.
del food[:]
#We're going to get a random x and y by using random uniform
#we're going to pass in the food size plus a constant 5 and the screen height/width - food size - 5
x = random.uniform(FOOD_SIZE + 5,SCREEN_WIDTH - FOOD_SIZE - 5)
y = random.uniform(FOOD_SIZE + 5, SCREEN_HEIGHT - FOOD_SIZE - 5)
#We're going to check that the food has not spawned on top of the snake already
#If the x and y coordinates are on the snakes head then we create new x and y coordinates
while(x-FOOD_SIZE == sx or x+FOOD_SIZE == sx and y-FOOD_SIZE == sy or y + FOOD_SIZE == sy):
x = random.uniform(20,SCREEN_WIDTH - 20)
y = random.uniform(20, SCREEN_HEIGHT - 20)
#we then add the food to the list by creating a new food with the state as 1 to show that its not eaten.
#We also pass in the x and y coordinates into the food constructor
food.append(Food(x,y,1))
#STEP 16
#Next lets draw the function to draw the score where we pass in a score
def drawScore(score):
#set up some variables to use the fonts we made earlier and render them
#we have to use the variable str(score) to make the score a string, remember you can make whatever color you want it
#you can also change that 1 to a 0 if you want
msg_score = score_number_font.render(str(score),1,pygame.Color("green"))
#we have to use screen.blit() to add a screen onto the window
screen.blit(score_message, (SCREEN_WIDTH - text_font.size("Score")[0] - 60, 10))
screen.blit(msg_score, (SCREEN_WIDTH - 40, 10))
#STEP 18
def gameEnd():
msg = text_font.render("Game Over",1,pygame.Color("white"))
play_again = text_font.render("Play Again? Y/N",1,pygame.Color("green"))
screen.blit(msg,(SCREEN_WIDTH/2 - text_font.size("Game Over")[0]/2,SCREEN_HEIGHT/2))
screen.blit(play_again,(SCREEN_WIDTH/2 - text_font.size('Play Again? Y/N')[0]/2,SCREEN_HEIGHT/2+40))
pygame.display.flip()
pygame.display.update()
myKey = getPressedKey()
while(myKey != "exit"):
if(myKey == "yes"):
main()
elif(myKey == "no"):
break
myKey = getPressedKey()
clock.tick(FPS)
sys.exit()
main()
| Snake | identifier_name |
workshopSteps.py | #first we are going to import the libraries we need
#OS library is used for operating system functionalities such as reading a file or using software/hardware products.
#Math library is for math functions such as floor/ceiling
#Sys library gives access to system only functions such as exit()
#Random library allows us to create random number generators
#Pygame library allows us to create game applications along with other types of applications.
import pygame, os, math, sys, random
#We have to initialize the game modules that pygame gives us access to.
pygame.init()
#Lets create a title for our window screen, we have to do pygame.display to access the display of the game.
pygame.display.set_caption("Theta Tau Snek Workshop")
#now lets initialize a random number generator using the random library
random.seed()
#Lets set up some constant variables
#Gonna need the movement speed of the snake, i put caps for constant values but you dont need to. Also you can't have
#any spaces in your variable names
MOVEMENT_SPEED = .4
#Gonna need a block size
SNAKE_BLOCK_SIZE = 10
#Gonna need some food size
FOOD_SIZE = SNAKE_BLOCK_SIZE
#Going to use a no color block to sepearate the blocks
SEPERATION = 10
#Lets set up our screen height and width
SCREEN_WIDTH = 600
SCREEN_HEIGHT = 600
#Some frames per second
FPS = 25
#Going to set up a dictionary. A dictionary just lets you have keys and assign values to those keys
KEY = {"UP":1, "DOWN": 2, "LEFT": 3, "RIGHT": 4}
#Going to set initialize the screen now with. Remember we have to access the pygames display using the .
#set_mode takes in the resoultion as a pair of width and height, then a surface.
#HWSURFACE uses the video card on the hardware to make it faster.
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), pygame.HWSURFACE)
#Lets create some GUI Constants
text_font = pygame.font.Font(None, 30)
score_number_font = pygame.font.Font(None, 30)
#We're going to make the score text now
#render takes in text, a boolean if you want smooth edges or not, a pygame color, and a background if wanted
score_message = text_font.render("Score", 0, pygame.Color("green"))
#Lets create a background color
background_color = pygame.Color(100,100,100)
#Now lets create a game clock variable
clock = pygame.time.Clock()
#STEP 1
#Lets create our main first so we can start seeing a screen
#we have to call this to get the events on the pygame window or else it doesn't know what to do and crashes
#lets fill the screen we initialized earlier for the window with the color we chose
#we want to update the screen so we call pygame.display.update
#youll notice we can't exit the window, so go into your terminal or command window and push control c
#we have to get the type of events that pygame can take in
#STEP 3
#lets change the line of code of pygame.event.get we had to call our function.
#lets make a variable to keep track of the key pressed. Now you can exit the window formally.
def main():
#STEP 17
#lets create a new snake object and remember we pass in the starting location. You can put any values in there as long
#as there within the screen size
snake = Snake(SCREEN_WIDTH/2,SCREEN_HEIGHT/2)
#Lets get the snake to move right away
snake.move()
#This is going to be our variable to end the game
end = 0;
#Create a variable to to have your snake start out with this many body cells. Im going to choose two.
startingCells = 2
#Create a variable to count how many cells we create
count = 0
#Create a variable to keep track if the food has beene eaten
eaten_food = 0
#Create a variable to keep track of the score
score = 0
#Lets create a while loop to let the snake grow and have it move
#we're going to use the cells variable we made as the condition
while(count < startingCells):
snake.grow()
snake.move()
count += 1
#Lets create a list of food that takes in a food object with random coordinates and state equal to 1
food = [Food(random.randint(FOOD_SIZE,SCREEN_WIDTH), random.randint(FOOD_SIZE, SCREEN_HEIGHT), 1)]
#lets spawn a food by passing in the food list and snake coordinates
spawnSingleFood(food, snake.x, snake.y)
while end != 1:
#Lets comment these out now and start from the beginning now for the loop that will run endlessly
#keyPressed = getPressedKey()
#screen.fill(background_color)
#pygame.display.update()
#Going to have the pygame clock tick as fast as the FPS
clock.tick(FPS)
#Create the keypressed variable again and call the function getPressedKey
keyPressed = getPressedKey()
#We check if the keypresed is equal to exit we make end equal to 1
if(keyPressed == "exit"):
end = 1
#lets check if the snake is crashing into itself and if it is end the game
if(snake.checkCrash() == True):
gameEnd()
#lets check if the snake is crashing into the edges and if its true end the game
if(crashing(snake.bodyStack[0], SNAKE_BLOCK_SIZE) == True):
gameEnd()
#we're going to check for all the food and if the food is not eaten then check for the snake colliding with food
#if the snake does collide then we make the snake grow make sure the food is set to state 0 for eaten
#and we add to the score and make eaten food equal to true
for f in food:
if(f.state == 1):
if(checkCollision(snake.bodyStack[0],SNAKE_BLOCK_SIZE, f, FOOD_SIZE) == True):
snake.grow()
f.state = 0
score += 5
eaten_food = True
#if the snake has eaten then we spawn another food and changed eaten food to false
if(eaten_food == True):
spawnSingleFood(food, snake.bodyStack[0].x, snake.bodyStack[0].y)
eaten_food = False
#If a key was pressed we try to change the direction of the snake then we move again
if(keyPressed):
snake.changeDirection(keyPressed)
snake.move()
#We fill the screen in again with the color
screen.fill(background_color)
#we check for all the food and if the food has not been eaten then we draw it on the screen
for f in food:
if(f.state == 1):
f.draw(screen)
#lets draw the snake on the screen
snake.draw(screen)
#lets draw the score
drawScore(score)
#We call pygame.display.flip to layer the screen
pygame.display.flip()
#we update the display
pygame.display.update()
#STEP 2
#lets create a function to access these events
#we're going to write a for loop to check all the events that can happen in the window
#We're going to create a bunch of if statements and else if statements
#We're going to check the event type and if that event type is equal to either
#key up, key down, key right, key left, key escape, key y, key n, or quit then we are going to return
#either the value of the respective key we defined earlier, a string of what to do, or exit the system
def getPressedKey():
for event in pygame.event.get():
if(event.type == pygame.KEYDOWN):
if(event.key == pygame.K_UP):
return KEY["UP"]
elif(event.key == pygame.K_DOWN):
return KEY["DOWN"]
elif(event.key == pygame.K_RIGHT):
return KEY["RIGHT"]
elif(event.key == pygame.K_LEFT):
return KEY["LEFT"]
elif(event.key == pygame.K_ESCAPE):
return "exit"
elif(event.key == pygame.K_y):
return "yes"
elif(event.key == pygame.K_n):
return "no"
if(event.type == pygame.QUIT):
sys.exit()
#STEP 4
#Alright lets create the class for the basic snake cell which the snake will be composed of
#We have to create an init function as this will be called when we want to create a new cell.
#It has two underscores on each side and this is necessary.
#The parameters are going to be self, an x value, and a y value
#Then we set the self.x and self.y to the respective x and y values.
#Then we are going to set the direction to up by using the dictionary value of up
#Then we can set the color of the cell, this doesn't matter because we will be changing the color later
#But we do need to create a color for it so go wild with whatever color you want.
class Cell:
def __init__(self,x,y):
self.x = x
self.y = y
self.direction = KEY["UP"]
self.color = "white"
#STEP 5
#Now lets create the food class so we can create food for the snake to eat
#Again the parameters will be the the self, x value, y value, and a state which is just an integer
#We're going to set the values like we did in the cell class, but now we don't have a direction
#We have a state and we need a color, you can choose any color you want your food to be, but this color will not change later
class Food:
def __init__(self,x,y,state):
self.x = x
self.y = y
self.state = state
self.color = pygame.Color("red")
#STEP 14 | #We're going to write a function to draw the food
#parameters are going to be self and a screen
#lets draw a rect using the screen the self.color and the x, y coordinates and the food size and a 0 width
def draw(self,screen):
pygame.draw.rect(screen, self.color, (self.x, self.y, FOOD_SIZE, FOOD_SIZE), 0)
#STEP 6
#Lets create the snake class
#Remember to initialize it we have to create this init function.
#We have to have the parameters of self, an x value, and a y value
class Snake:
def __init__(self,x,y):
#So we initalize the snakes x and y location to the x and y passed in and
#set the direction of the snake to right
self.x = x
self.y = y
self.direction = KEY["RIGHT"]
#We're going to create a list to hold all the snakes body
self.bodyStack = []
#adding the first snake cell to the list of cells
self.bodyStack.append(self)
#We're going to create an end cell to separate the body cells, we create the end cell to the left of the head
endCell = Cell(x - SEPERATION,y)
#We dont want a color so we don't show it on the graph
endCell.color = "NULL"
#Make the end cell the same direction as the head.
endCell.direction = KEY["RIGHT"]
#Then we add the cell to body list
self.bodyStack.append(endCell)
#STEP 7
#Lets create a function to move the snake
def move(self):
#So we're going to calculate the length of the snake - 1, since arrays start from 0 the last index will be
#the length of the snake - 1
lastCell = len(self.bodyStack) - 1
#We're going to write a while loop to iterate through all the snakes body cells from the end to the front of the snake.
#While we are not at the head of the snake lets go through every cell and make sure it is going in the direction of the
#previous cells and move that cell to the location of the cell in front of it. Then we decrease the cell index we are on.
while(lastCell != 0):
self.bodyStack[lastCell].direction = self.bodyStack[lastCell - 1].direction
self.bodyStack[lastCell].x = self.bodyStack[lastCell - 1].x
self.bodyStack[lastCell].y = self.bodyStack[lastCell - 1].y
lastCell -= 1
#we check if the body is less than 2 so we know its just the head, but if not
#then we pop out the head of the list
if(len(self.bodyStack) < 2):
headCell = self
else:
headCell = self.bodyStack.pop(lastCell)
#Now lets create some if-else statements to check if a new direction was inputted.
#We have to access the snakes bodystack and check the new direction
#If the direction is up then we set the y coordinate of the head above the second cell
#If the direction is right then we set the x coordinate of the head to the right of the second cell
#If the direction is down then we set the y coordinate of the head below the second cell
#If the direction is left then we set the y coordinate of the head to the left of the second cell
#I used fps * movement speed to make it relative to how fast you want your snake to go
if(self.bodyStack[0].direction == KEY["UP"]):
headCell.y = self.bodyStack[0].y - (FPS * MOVEMENT_SPEED)
elif(self.bodyStack[0].direction == KEY["RIGHT"]):
headCell.x = self.bodyStack[0].x + (FPS * MOVEMENT_SPEED)
elif(self.bodyStack[0].direction == KEY["DOWN"]):
headCell.y = self.bodyStack[0].y + (FPS * MOVEMENT_SPEED)
elif(self.bodyStack[0].direction == KEY["LEFT"]):
headCell.x = self.bodyStack[0].x - (FPS * MOVEMENT_SPEED)
#We then insert the head back into the front of the snakes list
self.bodyStack.insert(0, headCell)
#STEP 8
#So we can create a grow function so when your snake eats food it can grow
def grow(self):
#Again we're going to get the length of the snake and substract 1 so we can access the array
lastCell = len(self.bodyStack) - 1
#Lets check the last cells direction and create a newcell, but also a endcell to add sepration.
#We check if the direction is up,down,right, or left and depending on which direction it is we
#create the new cell and end cell in the right place.
if(self.bodyStack[lastCell].direction == KEY["UP"]):
newCell = Cell(self.bodyStack[lastCell].x, self.bodyStack[lastCell].y + SNAKE_BLOCK_SIZE)
endCell = Cell(newCell.x, newCell.y + SEPERATION)
if(self.bodyStack[lastCell].direction == KEY["DOWN"]):
newCell = Cell(self.bodyStack[lastCell].x, self.bodyStack[lastCell].y - SNAKE_BLOCK_SIZE)
endCell = Cell(newCell.x, newCell.y - SEPERATION)
if(self.bodyStack[lastCell].direction == KEY["RIGHT"]):
newCell = Cell(self.bodyStack[lastCell].x - SNAKE_BLOCK_SIZE, self.bodyStack[lastCell].y)
endCell = Cell(newCell.x - SEPERATION, newCell.y)
if(self.bodyStack[lastCell].direction == KEY["LEFT"]):
newCell = Cell(self.bodyStack[lastCell].x + SNAKE_BLOCK_SIZE, self.bodyStack[lastCell].y)
endCell = Cell(newCell.x + SEPERATION, newCell.y)
#We're going to set the color to NULL for endcells so we dont see it in the window
endCell.color = "NULL"
#We then append the cells to the body list of the snake
self.bodyStack.append(newCell)
self.bodyStack.append(endCell)
#STEP 9
#We're going to define a draw function to draw our snake.
#We're going to have the parameters as self and a screen we can draw on
def draw(self, screen):
#We're going to call pygame.draw.rect which takes in a screen, a color for what we want our cell to be
#then a tuple of (an x position, a y position, a size, and a size) and then a width
#We're going to call this first for our head cell
pygame.draw.rect(screen, pygame.Color("gold"), (self.bodyStack[0].x, self.bodyStack[0].y, SNAKE_BLOCK_SIZE, SNAKE_BLOCK_SIZE), 0)
#starting with our first cell
cellCount = 1
#while loop to go through all the cells in the body
while(cellCount < len(self.bodyStack)):
#We're going to check if the cells color is null and if it is we're going to increase the count and continue
#to the next iteration
if(self.bodyStack[cellCount].color == "NULL"):
cellCount += 1
continue
#if the color is not null we're going to draw a rectangle again using the cells positions, the screen, and any color you want
#Then increase the cell count.
pygame.draw.rect(screen, pygame.Color("green"), (self.bodyStack[cellCount].x, self.bodyStack[cellCount].y, SNAKE_BLOCK_SIZE, SNAKE_BLOCK_SIZE), 0)
cellCount += 1
#STEP 11
#Now we're going to write a function to check for self crashing
def checkCrash(self):
#Going to start the count at 1 so we can go through the snakes body cells again
cellCount = 1
#Lets write a while loop to go through the snakes body cells
while(cellCount < len(self.bodyStack)-1):
#Lets write an If statement which uses the collision method we created earlier to check if the head is crashing with any of the cells
#However we ony want to check if the head is crashing with the cells that are colored and not the separation cells.
#If they are colliding then we return True
#lets increease the cellcount by 1 everytime
if(checkCollision(self.bodyStack[0], SNAKE_BLOCK_SIZE, self.bodyStack[cellCount], SNAKE_BLOCK_SIZE) == True and self.bodyStack[cellCount].color != "NULL"):
return True
cellCount += 1
#if we get out of the while loop this means we should return false and that the snake is not crashing into itself
return False
#STEP 12
#We're going to create a function to change directions
#The parameters are going to be self and a direction
def changeDirection(self, direction):
#We're going to check if the current direction is up or down and the new direction is opposite we aren't going to change directions
if(self.direction == KEY["UP"] and direction == KEY["DOWN"] or self.direction == KEY["DOWN"] and direction == KEY["UP"]):
pass
#going to do the same for right and left
if(self.direction == KEY["RIGHT"] and direction == KEY["LEFT"] or self.direction == KEY["LEFT"] and direction == KEY["RIGHT"]):
pass
#if the new direction isn't opposite of the current direction then we change the direction
self.direction = direction
#STEP 10
#Next we're going to check for collisions with the food
#So lets create a function that takes in postionA, a size, positionB, and a size.
def checkCollision(positionA, sizeA, positionB,sizeB):
#We're going to check if positionA's x is less than positionB's.x + size of B
#and is positonAs x + sizeA is greater than positionBs x
#and if positionA y is less than positionBs y + size B
#and if positionA y is + size A is greater than position B y
#If all those are true then return true if not then return false
if(positionA.x < positionB.x+sizeB and positionA.x + sizeA > positionB.x and positionA.y < positionB.y + sizeB and positionA.y + sizeA > positionB.y):
return True
return False
#STEP 13
#Lets write a function to check for the snake crashing into the sides of the screen
#we're going to have the prarameters of positionA and a size A
def crashing(positionA, sizeA):
#We're going to check if positionAs x - the size a is less than 0
#or if the positionAs x + size a is greater than the screen width
#or if the positionAs y - size A is less than 0
#or if the positionAs y + size A is greater than the screen height
#if any of these are true then we return true
#if not then return false
if(positionA.x - sizeA < 0 or positionA.x + sizeA > SCREEN_WIDTH or positionA.y - sizeA < 0 or positionA.y + sizeA > SCREEN_HEIGHT):
return True
return False
#STEP 15
#We're now going to create the function to spawn a single food
#The parameters are going to be the food list, and the snakes x and y coordinates
def spawnSingleFood(food, sx, sy):
#We're going to delete the list of food since we don't want to add more food to list, we only want one.
del food[:]
#We're going to get a random x and y by using random uniform
#we're going to pass in the food size plus a constant 5 and the screen height/width - food size - 5
x = random.uniform(FOOD_SIZE + 5,SCREEN_WIDTH - FOOD_SIZE - 5)
y = random.uniform(FOOD_SIZE + 5, SCREEN_HEIGHT - FOOD_SIZE - 5)
#We're going to check that the food has not spawned on top of the snake already
#If the x and y coordinates are on the snakes head then we create new x and y coordinates
while(x-FOOD_SIZE == sx or x+FOOD_SIZE == sx and y-FOOD_SIZE == sy or y + FOOD_SIZE == sy):
x = random.uniform(20,SCREEN_WIDTH - 20)
y = random.uniform(20, SCREEN_HEIGHT - 20)
#we then add the food to the list by creating a new food with the state as 1 to show that its not eaten.
#We also pass in the x and y coordinates into the food constructor
food.append(Food(x,y,1))
#STEP 16
#Next lets draw the function to draw the score where we pass in a score
def drawScore(score):
#set up some variables to use the fonts we made earlier and render them
#we have to use the variable str(score) to make the score a string, remember you can make whatever color you want it
#you can also change that 1 to a 0 if you want
msg_score = score_number_font.render(str(score),1,pygame.Color("green"))
#we have to use screen.blit() to add a screen onto the window
screen.blit(score_message, (SCREEN_WIDTH - text_font.size("Score")[0] - 60, 10))
screen.blit(msg_score, (SCREEN_WIDTH - 40, 10))
#STEP 18
def gameEnd():
msg = text_font.render("Game Over",1,pygame.Color("white"))
play_again = text_font.render("Play Again? Y/N",1,pygame.Color("green"))
screen.blit(msg,(SCREEN_WIDTH/2 - text_font.size("Game Over")[0]/2,SCREEN_HEIGHT/2))
screen.blit(play_again,(SCREEN_WIDTH/2 - text_font.size('Play Again? Y/N')[0]/2,SCREEN_HEIGHT/2+40))
pygame.display.flip()
pygame.display.update()
myKey = getPressedKey()
while(myKey != "exit"):
if(myKey == "yes"):
main()
elif(myKey == "no"):
break
myKey = getPressedKey()
clock.tick(FPS)
sys.exit()
main() | random_line_split |
|
workshopSteps.py | #first we are going to import the libraries we need
#OS library is used for operating system functionalities such as reading a file or using software/hardware products.
#Math library is for math functions such as floor/ceiling
#Sys library gives access to system only functions such as exit()
#Random library allows us to create random number generators
#Pygame library allows us to create game applications along with other types of applications.
import pygame, os, math, sys, random
#We have to initialize the game modules that pygame gives us access to.
pygame.init()
#Lets create a title for our window screen, we have to do pygame.display to access the display of the game.
pygame.display.set_caption("Theta Tau Snek Workshop")
#now lets initialize a random number generator using the random library
random.seed()
#Lets set up some constant variables
#Gonna need the movement speed of the snake, i put caps for constant values but you dont need to. Also you can't have
#any spaces in your variable names
MOVEMENT_SPEED = .4
#Gonna need a block size
SNAKE_BLOCK_SIZE = 10
#Gonna need some food size
FOOD_SIZE = SNAKE_BLOCK_SIZE
#Going to use a no color block to sepearate the blocks
SEPERATION = 10
#Lets set up our screen height and width
SCREEN_WIDTH = 600
SCREEN_HEIGHT = 600
#Some frames per second
FPS = 25
#Going to set up a dictionary. A dictionary just lets you have keys and assign values to those keys
KEY = {"UP":1, "DOWN": 2, "LEFT": 3, "RIGHT": 4}
#Going to set initialize the screen now with. Remember we have to access the pygames display using the .
#set_mode takes in the resoultion as a pair of width and height, then a surface.
#HWSURFACE uses the video card on the hardware to make it faster.
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), pygame.HWSURFACE)
#Lets create some GUI Constants
text_font = pygame.font.Font(None, 30)
score_number_font = pygame.font.Font(None, 30)
#We're going to make the score text now
#render takes in text, a boolean if you want smooth edges or not, a pygame color, and a background if wanted
score_message = text_font.render("Score", 0, pygame.Color("green"))
#Lets create a background color
background_color = pygame.Color(100,100,100)
#Now lets create a game clock variable
clock = pygame.time.Clock()
#STEP 1
#Lets create our main first so we can start seeing a screen
#we have to call this to get the events on the pygame window or else it doesn't know what to do and crashes
#lets fill the screen we initialized earlier for the window with the color we chose
#we want to update the screen so we call pygame.display.update
#youll notice we can't exit the window, so go into your terminal or command window and push control c
#we have to get the type of events that pygame can take in
#STEP 3
#lets change the line of code of pygame.event.get we had to call our function.
#lets make a variable to keep track of the key pressed. Now you can exit the window formally.
def main():
#STEP 17
#lets create a new snake object and remember we pass in the starting location. You can put any values in there as long
#as there within the screen size
snake = Snake(SCREEN_WIDTH/2,SCREEN_HEIGHT/2)
#Lets get the snake to move right away
snake.move()
#This is going to be our variable to end the game
end = 0;
#Create a variable to to have your snake start out with this many body cells. Im going to choose two.
startingCells = 2
#Create a variable to count how many cells we create
count = 0
#Create a variable to keep track if the food has beene eaten
eaten_food = 0
#Create a variable to keep track of the score
score = 0
#Lets create a while loop to let the snake grow and have it move
#we're going to use the cells variable we made as the condition
while(count < startingCells):
snake.grow()
snake.move()
count += 1
#Lets create a list of food that takes in a food object with random coordinates and state equal to 1
food = [Food(random.randint(FOOD_SIZE,SCREEN_WIDTH), random.randint(FOOD_SIZE, SCREEN_HEIGHT), 1)]
#lets spawn a food by passing in the food list and snake coordinates
spawnSingleFood(food, snake.x, snake.y)
while end != 1:
#Lets comment these out now and start from the beginning now for the loop that will run endlessly
#keyPressed = getPressedKey()
#screen.fill(background_color)
#pygame.display.update()
#Going to have the pygame clock tick as fast as the FPS
clock.tick(FPS)
#Create the keypressed variable again and call the function getPressedKey
keyPressed = getPressedKey()
#We check if the keypresed is equal to exit we make end equal to 1
if(keyPressed == "exit"):
end = 1
#lets check if the snake is crashing into itself and if it is end the game
if(snake.checkCrash() == True):
gameEnd()
#lets check if the snake is crashing into the edges and if its true end the game
if(crashing(snake.bodyStack[0], SNAKE_BLOCK_SIZE) == True):
gameEnd()
#we're going to check for all the food and if the food is not eaten then check for the snake colliding with food
#if the snake does collide then we make the snake grow make sure the food is set to state 0 for eaten
#and we add to the score and make eaten food equal to true
for f in food:
if(f.state == 1):
if(checkCollision(snake.bodyStack[0],SNAKE_BLOCK_SIZE, f, FOOD_SIZE) == True):
snake.grow()
f.state = 0
score += 5
eaten_food = True
#if the snake has eaten then we spawn another food and changed eaten food to false
if(eaten_food == True):
|
#If a key was pressed we try to change the direction of the snake then we move again
if(keyPressed):
snake.changeDirection(keyPressed)
snake.move()
#We fill the screen in again with the color
screen.fill(background_color)
#we check for all the food and if the food has not been eaten then we draw it on the screen
for f in food:
if(f.state == 1):
f.draw(screen)
#lets draw the snake on the screen
snake.draw(screen)
#lets draw the score
drawScore(score)
#We call pygame.display.flip to layer the screen
pygame.display.flip()
#we update the display
pygame.display.update()
#STEP 2
#lets create a function to access these events
#we're going to write a for loop to check all the events that can happen in the window
#We're going to create a bunch of if statements and else if statements
#We're going to check the event type and if that event type is equal to either
#key up, key down, key right, key left, key escape, key y, key n, or quit then we are going to return
#either the value of the respective key we defined earlier, a string of what to do, or exit the system
def getPressedKey():
for event in pygame.event.get():
if(event.type == pygame.KEYDOWN):
if(event.key == pygame.K_UP):
return KEY["UP"]
elif(event.key == pygame.K_DOWN):
return KEY["DOWN"]
elif(event.key == pygame.K_RIGHT):
return KEY["RIGHT"]
elif(event.key == pygame.K_LEFT):
return KEY["LEFT"]
elif(event.key == pygame.K_ESCAPE):
return "exit"
elif(event.key == pygame.K_y):
return "yes"
elif(event.key == pygame.K_n):
return "no"
if(event.type == pygame.QUIT):
sys.exit()
#STEP 4
#Alright lets create the class for the basic snake cell which the snake will be composed of
#We have to create an init function as this will be called when we want to create a new cell.
#It has two underscores on each side and this is necessary.
#The parameters are going to be self, an x value, and a y value
#Then we set the self.x and self.y to the respective x and y values.
#Then we are going to set the direction to up by using the dictionary value of up
#Then we can set the color of the cell, this doesn't matter because we will be changing the color later
#But we do need to create a color for it so go wild with whatever color you want.
class Cell:
def __init__(self,x,y):
self.x = x
self.y = y
self.direction = KEY["UP"]
self.color = "white"
#STEP 5
#Now lets create the food class so we can create food for the snake to eat
#Again the parameters will be the the self, x value, y value, and a state which is just an integer
#We're going to set the values like we did in the cell class, but now we don't have a direction
#We have a state and we need a color, you can choose any color you want your food to be, but this color will not change later
class Food:
def __init__(self,x,y,state):
self.x = x
self.y = y
self.state = state
self.color = pygame.Color("red")
#STEP 14
#We're going to write a function to draw the food
#parameters are going to be self and a screen
#lets draw a rect using the screen the self.color and the x, y coordinates and the food size and a 0 width
def draw(self,screen):
pygame.draw.rect(screen, self.color, (self.x, self.y, FOOD_SIZE, FOOD_SIZE), 0)
#STEP 6
#Lets create the snake class
#Remember to initialize it we have to create this init function.
#We have to have the parameters of self, an x value, and a y value
class Snake:
def __init__(self,x,y):
#So we initalize the snakes x and y location to the x and y passed in and
#set the direction of the snake to right
self.x = x
self.y = y
self.direction = KEY["RIGHT"]
#We're going to create a list to hold all the snakes body
self.bodyStack = []
#adding the first snake cell to the list of cells
self.bodyStack.append(self)
#We're going to create an end cell to separate the body cells, we create the end cell to the left of the head
endCell = Cell(x - SEPERATION,y)
#We dont want a color so we don't show it on the graph
endCell.color = "NULL"
#Make the end cell the same direction as the head.
endCell.direction = KEY["RIGHT"]
#Then we add the cell to body list
self.bodyStack.append(endCell)
#STEP 7
#Lets create a function to move the snake
def move(self):
#So we're going to calculate the length of the snake - 1, since arrays start from 0 the last index will be
#the length of the snake - 1
lastCell = len(self.bodyStack) - 1
#We're going to write a while loop to iterate through all the snakes body cells from the end to the front of the snake.
#While we are not at the head of the snake lets go through every cell and make sure it is going in the direction of the
#previous cells and move that cell to the location of the cell in front of it. Then we decrease the cell index we are on.
while(lastCell != 0):
self.bodyStack[lastCell].direction = self.bodyStack[lastCell - 1].direction
self.bodyStack[lastCell].x = self.bodyStack[lastCell - 1].x
self.bodyStack[lastCell].y = self.bodyStack[lastCell - 1].y
lastCell -= 1
#we check if the body is less than 2 so we know its just the head, but if not
#then we pop out the head of the list
if(len(self.bodyStack) < 2):
headCell = self
else:
headCell = self.bodyStack.pop(lastCell)
#Now lets create some if-else statements to check if a new direction was inputted.
#We have to access the snakes bodystack and check the new direction
#If the direction is up then we set the y coordinate of the head above the second cell
#If the direction is right then we set the x coordinate of the head to the right of the second cell
#If the direction is down then we set the y coordinate of the head below the second cell
#If the direction is left then we set the y coordinate of the head to the left of the second cell
#I used fps * movement speed to make it relative to how fast you want your snake to go
if(self.bodyStack[0].direction == KEY["UP"]):
headCell.y = self.bodyStack[0].y - (FPS * MOVEMENT_SPEED)
elif(self.bodyStack[0].direction == KEY["RIGHT"]):
headCell.x = self.bodyStack[0].x + (FPS * MOVEMENT_SPEED)
elif(self.bodyStack[0].direction == KEY["DOWN"]):
headCell.y = self.bodyStack[0].y + (FPS * MOVEMENT_SPEED)
elif(self.bodyStack[0].direction == KEY["LEFT"]):
headCell.x = self.bodyStack[0].x - (FPS * MOVEMENT_SPEED)
#We then insert the head back into the front of the snakes list
self.bodyStack.insert(0, headCell)
#STEP 8
#So we can create a grow function so when your snake eats food it can grow
def grow(self):
#Again we're going to get the length of the snake and substract 1 so we can access the array
lastCell = len(self.bodyStack) - 1
#Lets check the last cells direction and create a newcell, but also a endcell to add sepration.
#We check if the direction is up,down,right, or left and depending on which direction it is we
#create the new cell and end cell in the right place.
if(self.bodyStack[lastCell].direction == KEY["UP"]):
newCell = Cell(self.bodyStack[lastCell].x, self.bodyStack[lastCell].y + SNAKE_BLOCK_SIZE)
endCell = Cell(newCell.x, newCell.y + SEPERATION)
if(self.bodyStack[lastCell].direction == KEY["DOWN"]):
newCell = Cell(self.bodyStack[lastCell].x, self.bodyStack[lastCell].y - SNAKE_BLOCK_SIZE)
endCell = Cell(newCell.x, newCell.y - SEPERATION)
if(self.bodyStack[lastCell].direction == KEY["RIGHT"]):
newCell = Cell(self.bodyStack[lastCell].x - SNAKE_BLOCK_SIZE, self.bodyStack[lastCell].y)
endCell = Cell(newCell.x - SEPERATION, newCell.y)
if(self.bodyStack[lastCell].direction == KEY["LEFT"]):
newCell = Cell(self.bodyStack[lastCell].x + SNAKE_BLOCK_SIZE, self.bodyStack[lastCell].y)
endCell = Cell(newCell.x + SEPERATION, newCell.y)
#We're going to set the color to NULL for endcells so we dont see it in the window
endCell.color = "NULL"
#We then append the cells to the body list of the snake
self.bodyStack.append(newCell)
self.bodyStack.append(endCell)
#STEP 9
#We're going to define a draw function to draw our snake.
#We're going to have the parameters as self and a screen we can draw on
def draw(self, screen):
#We're going to call pygame.draw.rect which takes in a screen, a color for what we want our cell to be
#then a tuple of (an x position, a y position, a size, and a size) and then a width
#We're going to call this first for our head cell
pygame.draw.rect(screen, pygame.Color("gold"), (self.bodyStack[0].x, self.bodyStack[0].y, SNAKE_BLOCK_SIZE, SNAKE_BLOCK_SIZE), 0)
#starting with our first cell
cellCount = 1
#while loop to go through all the cells in the body
while(cellCount < len(self.bodyStack)):
#We're going to check if the cells color is null and if it is we're going to increase the count and continue
#to the next iteration
if(self.bodyStack[cellCount].color == "NULL"):
cellCount += 1
continue
#if the color is not null we're going to draw a rectangle again using the cells positions, the screen, and any color you want
#Then increase the cell count.
pygame.draw.rect(screen, pygame.Color("green"), (self.bodyStack[cellCount].x, self.bodyStack[cellCount].y, SNAKE_BLOCK_SIZE, SNAKE_BLOCK_SIZE), 0)
cellCount += 1
#STEP 11
#Now we're going to write a function to check for self crashing
def checkCrash(self):
#Going to start the count at 1 so we can go through the snakes body cells again
cellCount = 1
#Lets write a while loop to go through the snakes body cells
while(cellCount < len(self.bodyStack)-1):
#Lets write an If statement which uses the collision method we created earlier to check if the head is crashing with any of the cells
#However we ony want to check if the head is crashing with the cells that are colored and not the separation cells.
#If they are colliding then we return True
#lets increease the cellcount by 1 everytime
if(checkCollision(self.bodyStack[0], SNAKE_BLOCK_SIZE, self.bodyStack[cellCount], SNAKE_BLOCK_SIZE) == True and self.bodyStack[cellCount].color != "NULL"):
return True
cellCount += 1
#if we get out of the while loop this means we should return false and that the snake is not crashing into itself
return False
#STEP 12
#We're going to create a function to change directions
#The parameters are going to be self and a direction
def changeDirection(self, direction):
#We're going to check if the current direction is up or down and the new direction is opposite we aren't going to change directions
if(self.direction == KEY["UP"] and direction == KEY["DOWN"] or self.direction == KEY["DOWN"] and direction == KEY["UP"]):
pass
#going to do the same for right and left
if(self.direction == KEY["RIGHT"] and direction == KEY["LEFT"] or self.direction == KEY["LEFT"] and direction == KEY["RIGHT"]):
pass
#if the new direction isn't opposite of the current direction then we change the direction
self.direction = direction
#STEP 10
#Next we're going to check for collisions with the food
#So lets create a function that takes in postionA, a size, positionB, and a size.
def checkCollision(positionA, sizeA, positionB,sizeB):
#We're going to check if positionA's x is less than positionB's.x + size of B
#and is positonAs x + sizeA is greater than positionBs x
#and if positionA y is less than positionBs y + size B
#and if positionA y is + size A is greater than position B y
#If all those are true then return true if not then return false
if(positionA.x < positionB.x+sizeB and positionA.x + sizeA > positionB.x and positionA.y < positionB.y + sizeB and positionA.y + sizeA > positionB.y):
return True
return False
#STEP 13
#Lets write a function to check for the snake crashing into the sides of the screen
#we're going to have the prarameters of positionA and a size A
def crashing(positionA, sizeA):
#We're going to check if positionAs x - the size a is less than 0
#or if the positionAs x + size a is greater than the screen width
#or if the positionAs y - size A is less than 0
#or if the positionAs y + size A is greater than the screen height
#if any of these are true then we return true
#if not then return false
if(positionA.x - sizeA < 0 or positionA.x + sizeA > SCREEN_WIDTH or positionA.y - sizeA < 0 or positionA.y + sizeA > SCREEN_HEIGHT):
return True
return False
#STEP 15
#We're now going to create the function to spawn a single food
#The parameters are going to be the food list, and the snakes x and y coordinates
def spawnSingleFood(food, sx, sy):
#We're going to delete the list of food since we don't want to add more food to list, we only want one.
del food[:]
#We're going to get a random x and y by using random uniform
#we're going to pass in the food size plus a constant 5 and the screen height/width - food size - 5
x = random.uniform(FOOD_SIZE + 5,SCREEN_WIDTH - FOOD_SIZE - 5)
y = random.uniform(FOOD_SIZE + 5, SCREEN_HEIGHT - FOOD_SIZE - 5)
#We're going to check that the food has not spawned on top of the snake already
#If the x and y coordinates are on the snakes head then we create new x and y coordinates
while(x-FOOD_SIZE == sx or x+FOOD_SIZE == sx and y-FOOD_SIZE == sy or y + FOOD_SIZE == sy):
x = random.uniform(20,SCREEN_WIDTH - 20)
y = random.uniform(20, SCREEN_HEIGHT - 20)
#we then add the food to the list by creating a new food with the state as 1 to show that its not eaten.
#We also pass in the x and y coordinates into the food constructor
food.append(Food(x,y,1))
#STEP 16
#Next lets draw the function to draw the score where we pass in a score
def drawScore(score):
#set up some variables to use the fonts we made earlier and render them
#we have to use the variable str(score) to make the score a string, remember you can make whatever color you want it
#you can also change that 1 to a 0 if you want
msg_score = score_number_font.render(str(score),1,pygame.Color("green"))
#we have to use screen.blit() to add a screen onto the window
screen.blit(score_message, (SCREEN_WIDTH - text_font.size("Score")[0] - 60, 10))
screen.blit(msg_score, (SCREEN_WIDTH - 40, 10))
#STEP 18
def gameEnd():
msg = text_font.render("Game Over",1,pygame.Color("white"))
play_again = text_font.render("Play Again? Y/N",1,pygame.Color("green"))
screen.blit(msg,(SCREEN_WIDTH/2 - text_font.size("Game Over")[0]/2,SCREEN_HEIGHT/2))
screen.blit(play_again,(SCREEN_WIDTH/2 - text_font.size('Play Again? Y/N')[0]/2,SCREEN_HEIGHT/2+40))
pygame.display.flip()
pygame.display.update()
myKey = getPressedKey()
while(myKey != "exit"):
if(myKey == "yes"):
main()
elif(myKey == "no"):
break
myKey = getPressedKey()
clock.tick(FPS)
sys.exit()
main()
| spawnSingleFood(food, snake.bodyStack[0].x, snake.bodyStack[0].y)
eaten_food = False | conditional_block |
glove.py | import logging
from collections import Counter
from random import shuffle
import numpy as np
from math import log
from matplotlib import pyplot as plt
from scipy import sparse
from scipy.spatial.distance import pdist
from sklearn.decomposition import PCA
class GloVe:
def __init__(self, lists, learning_rate=0.05, x_max=100, alpha=0.75, vector_size=100, iterations=25, min_count=None,
logging_level=logging.INFO):
self._lists = lists
self._learning_rate = learning_rate
self._x_max = x_max
self._alpha = alpha
self._vector_size = vector_size
self._iterations = iterations
self._min_count = min_count
self._logging_level = logging_level
self._logger = None
self._vocabulary = None
self.id2word = None
def setup(self):
self.setup_logger()
self.build_vocab()
self.build_id2word()
def fit(self):
space = self._train()
merged_space = self._merge_main_context(space)
return merged_space
def setup_logger(self, name_='GloVe'):
# TODO redirect to file
self._logger = logging.getLogger(name_)
stream_logger = logging.StreamHandler()
stream_logger.setLevel(self._logging_level)
self._logger.addHandler(stream_logger)
def build_vocab(self, top=None):
"""
Build a vocabulary with word frequencies for an entire corpus.
:param top: If not None, only first <top> words, base on frequency, will be preserved
Returns a dictionary `w -> (i, f)`, mapping word strings to pairs of
word ID and word corpus frequency.
"""
self._logger.info("Building vocab from corpus")
vocab = Counter()
for list_ in self._lists:
vocab.update(list_)
self._logger.info("Done building vocab from corpus.")
if top is not None and top < len(vocab):
words = sorted(vocab.items(), key=lambda x: -x[1])[:top]
else:
words = vocab.items()
self._vocabulary = {word: (i, freq) for i, (word, freq) in enumerate(words)}
def build_id2word(self):
self.id2word = dict((id_, word) for word, (id_, _) in self._vocabulary.items())
def _build_cooccur(self):
"""
Build a word co-occurrence list for the given corpus.
This function is a tuple generator, where each element (representing
a cooccurrence pair) is of the form
(i_main, i_context, cooccurrence)
where `i_main` is the ID of the main word in the cooccurrence and
`i_context` is the ID of the context word, and `cooccurrence` is the
`X_{ij}` cooccurrence value as described in Pennington et al.
(2014).
If `min_count` is not `None`, cooccurrence pairs where either word
occurs in the corpus fewer than `min_count` times are ignored.
"""
| # indexing speed; we'll convert into a list later
cooccurrences = sparse.lil_matrix((vocab_size, vocab_size),
dtype=np.float64)
for i, list_ in enumerate(self._lists):
if i % 1000 == 0:
self._logger.info("Building cooccurrence matrix: on line %i", i)
token_ids = [self._vocabulary[word][0] for word in list_ if word in self._vocabulary]
for center_i, center_id in enumerate(token_ids):
# Collect all word IDs in left window of center word
context_ids = token_ids[:]
del context_ids[center_i]
contexts_len = len(context_ids)
for left_i, left_id in enumerate(context_ids):
# Build co-occurrence matrix symmetrically (pretend we
# are calculating right contexts as well)
cooccurrences[center_id, left_id] += 0.5
cooccurrences[left_id, center_id] += 0.5
# Now yield our tuple sequence (dig into the LiL-matrix internals to
# quickly iterate through all nonzero cells)
for i, (row, data) in enumerate(zip(cooccurrences.rows,
cooccurrences.data)):
if self._min_count is not None and self._vocabulary[self.id2word[i]][1] < self._min_count:
continue
for data_idx, j in enumerate(row):
if self._min_count is not None and self._vocabulary[self.id2word[j]][1] < self._min_count:
continue
yield i, j, data[data_idx]
def _run_iter(self, data):
"""
Run a single iteration of GloVe training using the given
cooccurrence data and the previously computed weight vectors /
biases and accompanying gradient histories.
`data` is a pre-fetched data / weights list where each element is of
the form
(v_main, v_context,
b_main, b_context,
gradsq_W_main, gradsq_W_context,
gradsq_b_main, gradsq_b_context,
cooccurrence)
as produced by the `train_glove` function. Each element in this
tuple is an `ndarray` view into the data structure which contains
it.
See the `train_glove` function for information on the shapes of `W`,
`biases`, `gradient_squared`, `gradient_squared_biases` and how they
should be initialized.
The parameters `x_max`, `alpha` define our weighting function when
computing the cost for two word pairs; see the GloVe paper for more
details.
Returns the cost associated with the given weight assignments and
updates the weights by online AdaGrad in place.
"""
global_cost = 0
# We want to iterate over data randomly so as not to unintentionally
# bias the word vector contents
shuffle(data)
for (v_main, v_context, b_main, b_context, gradsq_W_main, gradsq_W_context,
gradsq_b_main, gradsq_b_context, cooccurrence) in data:
weight = (cooccurrence / self._x_max) ** self._alpha if cooccurrence < self._x_max else 1
# Compute inner component of cost function, which is used in
# both overall cost calculation and in gradient calculation
#
# $$ J' = w_i^Tw_j + b_i + b_j - log(X_{ij}) $$
cost_inner = (v_main.dot(v_context)
+ b_main[0] + b_context[0]
- log(cooccurrence))
# Compute cost
#
# $$ J = f(X_{ij}) (J')^2 $$
cost = weight * (cost_inner ** 2)
# Add weighted cost to the global cost tracker
global_cost += 0.5 * cost
# Compute gradients for word vector terms.
#
# NB: `main_word` is only a view into `W` (not a copy), so our
# modifications here will affect the global weight matrix;
# likewise for context_word, biases, etc.
grad_main = weight * cost_inner * v_context
grad_context = weight * cost_inner * v_main
# Compute gradients for bias terms
grad_bias_main = weight * cost_inner
grad_bias_context = weight * cost_inner
# Now perform adaptive updates
v_main -= (self._learning_rate * grad_main / np.sqrt(gradsq_W_main))
v_context -= (self._learning_rate * grad_context / np.sqrt(gradsq_W_context))
b_main -= (self._learning_rate * grad_bias_main / np.sqrt(gradsq_b_main))
b_context -= (self._learning_rate * grad_bias_context / np.sqrt(
gradsq_b_context))
# Update squared gradient sums
gradsq_W_main += np.square(grad_main)
gradsq_W_context += np.square(grad_context)
gradsq_b_main += grad_bias_main ** 2
gradsq_b_context += grad_bias_context ** 2
return global_cost
def _train(self, iter_callback=None):
"""
Train GloVe vectors on the given generator `cooccurrences`, where
each element is of the form
(word_i_id, word_j_id, x_ij)
where `x_ij` is a cooccurrence value $X_{ij}$ as presented in the
matrix defined by `build_cooccur` and the Pennington et al. (2014)
paper itself.
If `iter_callback` is not `None`, the provided function will be
called after each iteration with the learned `W` matrix so far.
Keyword arguments are passed on to the iteration step function
`run_iter`.
Returns the computed word vector matrix `W`.
"""
vocab_size = len(self._vocabulary)
# Word vector matrix. This matrix is (2V) * d, where N is the size
# of the corpus vocabulary and d is the dimensionality of the word
# vectors. All elements are initialized randomly in the range (-0.5,
# 0.5]. We build two word vectors for each word: one for the word as
# the main (center) word and one for the word as a context word.
#
# It is up to the client to decide what to do with the resulting two
# vectors. Pennington et al. (2014) suggest adding or averaging the
# two for each word, or discarding the context vectors.
W = (np.random.rand(vocab_size * 2, self._vector_size) - 0.5) / float(self._vector_size + 1)
# Bias terms, each associated with a single vector. An array of size
# $2V$, initialized randomly in the range (-0.5, 0.5].
biases = (np.random.rand(vocab_size * 2) - 0.5) / float(self._vector_size + 1)
# Training is done via adaptive gradient descent (AdaGrad). To make
# this work we need to store the sum of squares of all previous
# gradients.
#
# Like `W`, this matrix is (2V) * d.
#
# Initialize all squared gradient sums to 1 so that our initial
# adaptive learning rate is simply the global learning rate.
gradient_squared = np.ones((vocab_size * 2, self._vector_size),
dtype=np.float64)
# Sum of squared gradients for the bias terms.
gradient_squared_biases = np.ones(vocab_size * 2, dtype=np.float64)
# Build a reusable list from the given cooccurrence generator,
# pre-fetching all necessary data.
#
# NB: These are all views into the actual data matrices, so updates
# to them will pass on to the real data structures
#
# (We even extract the single-element biases as slices so that we
# can use them as views)
data = [(W[i_main], W[i_context + vocab_size],
biases[i_main: i_main + 1],
biases[i_context + vocab_size: i_context + vocab_size + 1],
gradient_squared[i_main], gradient_squared[i_context + vocab_size],
gradient_squared_biases[i_main: i_main + 1],
gradient_squared_biases[i_context + vocab_size
: i_context + vocab_size + 1],
cooccurrence)
for i_main, i_context, cooccurrence in self._build_cooccur()]
for i in range(self._iterations):
if (i + 1) % 500 == 0:
self._logger.info("\tBeginning iteration %i..", i + 1)
cost = self._run_iter(data)
if (i + 1) % 500 == 0:
self._logger.info("\t\tDone (cost %f)", cost)
if iter_callback is not None:
iter_callback(W)
return W
def _merge_main_context(self, W, merge_fun=lambda m, c: np.mean([m, c], axis=0),
normalize=True):
"""
Merge the main-word and context-word vectors for a weight matrix
using the provided merge function (which accepts a main-word and
context-word vector and returns a merged version).
By default, `merge_fun` returns the mean of the two vectors.
"""
vocab_size = int(len(W) / 2)
for i, row in enumerate(W[:vocab_size]):
merged = merge_fun(row, W[i + vocab_size])
if normalize:
merged /= np.linalg.norm(merged)
W[i, :] = merged
return W[:vocab_size]
if __name__ == '__main__':
test_lists = [
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
]
shuffle(test_lists)
glove = GloVe(test_lists, learning_rate=0.001, vector_size=10, iterations=2000)
glove.setup()
result = glove.fit()
distance_vector = pdist(result)
pca = PCA(n_components=2)
pca_result = pca.fit_transform(result)
xs = pca_result[:, 0]
ys = pca_result[:, 1]
_, ax = plt.subplots()
ax.scatter(xs, ys)
for i in range(len(pca_result)):
name = glove.id2word[i]
ax.annotate(name, (xs[i], ys[i]))
plt.show() | vocab_size = len(self._vocabulary)
# Collect cooccurrences internally as a sparse matrix for passable | random_line_split |
glove.py | import logging
from collections import Counter
from random import shuffle
import numpy as np
from math import log
from matplotlib import pyplot as plt
from scipy import sparse
from scipy.spatial.distance import pdist
from sklearn.decomposition import PCA
class GloVe:
def __init__(self, lists, learning_rate=0.05, x_max=100, alpha=0.75, vector_size=100, iterations=25, min_count=None,
logging_level=logging.INFO):
self._lists = lists
self._learning_rate = learning_rate
self._x_max = x_max
self._alpha = alpha
self._vector_size = vector_size
self._iterations = iterations
self._min_count = min_count
self._logging_level = logging_level
self._logger = None
self._vocabulary = None
self.id2word = None
def setup(self):
self.setup_logger()
self.build_vocab()
self.build_id2word()
def fit(self):
space = self._train()
merged_space = self._merge_main_context(space)
return merged_space
def setup_logger(self, name_='GloVe'):
# TODO redirect to file
self._logger = logging.getLogger(name_)
stream_logger = logging.StreamHandler()
stream_logger.setLevel(self._logging_level)
self._logger.addHandler(stream_logger)
def build_vocab(self, top=None):
"""
Build a vocabulary with word frequencies for an entire corpus.
:param top: If not None, only first <top> words, base on frequency, will be preserved
Returns a dictionary `w -> (i, f)`, mapping word strings to pairs of
word ID and word corpus frequency.
"""
self._logger.info("Building vocab from corpus")
vocab = Counter()
for list_ in self._lists:
vocab.update(list_)
self._logger.info("Done building vocab from corpus.")
if top is not None and top < len(vocab):
words = sorted(vocab.items(), key=lambda x: -x[1])[:top]
else:
words = vocab.items()
self._vocabulary = {word: (i, freq) for i, (word, freq) in enumerate(words)}
def build_id2word(self):
self.id2word = dict((id_, word) for word, (id_, _) in self._vocabulary.items())
def _build_cooccur(self):
"""
Build a word co-occurrence list for the given corpus.
This function is a tuple generator, where each element (representing
a cooccurrence pair) is of the form
(i_main, i_context, cooccurrence)
where `i_main` is the ID of the main word in the cooccurrence and
`i_context` is the ID of the context word, and `cooccurrence` is the
`X_{ij}` cooccurrence value as described in Pennington et al.
(2014).
If `min_count` is not `None`, cooccurrence pairs where either word
occurs in the corpus fewer than `min_count` times are ignored.
"""
vocab_size = len(self._vocabulary)
# Collect cooccurrences internally as a sparse matrix for passable
# indexing speed; we'll convert into a list later
cooccurrences = sparse.lil_matrix((vocab_size, vocab_size),
dtype=np.float64)
for i, list_ in enumerate(self._lists):
if i % 1000 == 0:
self._logger.info("Building cooccurrence matrix: on line %i", i)
token_ids = [self._vocabulary[word][0] for word in list_ if word in self._vocabulary]
for center_i, center_id in enumerate(token_ids):
# Collect all word IDs in left window of center word
context_ids = token_ids[:]
del context_ids[center_i]
contexts_len = len(context_ids)
for left_i, left_id in enumerate(context_ids):
# Build co-occurrence matrix symmetrically (pretend we
# are calculating right contexts as well)
cooccurrences[center_id, left_id] += 0.5
cooccurrences[left_id, center_id] += 0.5
# Now yield our tuple sequence (dig into the LiL-matrix internals to
# quickly iterate through all nonzero cells)
for i, (row, data) in enumerate(zip(cooccurrences.rows,
cooccurrences.data)):
if self._min_count is not None and self._vocabulary[self.id2word[i]][1] < self._min_count:
continue
for data_idx, j in enumerate(row):
if self._min_count is not None and self._vocabulary[self.id2word[j]][1] < self._min_count:
continue
yield i, j, data[data_idx]
def _run_iter(self, data):
"""
Run a single iteration of GloVe training using the given
cooccurrence data and the previously computed weight vectors /
biases and accompanying gradient histories.
`data` is a pre-fetched data / weights list where each element is of
the form
(v_main, v_context,
b_main, b_context,
gradsq_W_main, gradsq_W_context,
gradsq_b_main, gradsq_b_context,
cooccurrence)
as produced by the `train_glove` function. Each element in this
tuple is an `ndarray` view into the data structure which contains
it.
See the `train_glove` function for information on the shapes of `W`,
`biases`, `gradient_squared`, `gradient_squared_biases` and how they
should be initialized.
The parameters `x_max`, `alpha` define our weighting function when
computing the cost for two word pairs; see the GloVe paper for more
details.
Returns the cost associated with the given weight assignments and
updates the weights by online AdaGrad in place.
"""
global_cost = 0
# We want to iterate over data randomly so as not to unintentionally
# bias the word vector contents
shuffle(data)
for (v_main, v_context, b_main, b_context, gradsq_W_main, gradsq_W_context,
gradsq_b_main, gradsq_b_context, cooccurrence) in data:
weight = (cooccurrence / self._x_max) ** self._alpha if cooccurrence < self._x_max else 1
# Compute inner component of cost function, which is used in
# both overall cost calculation and in gradient calculation
#
# $$ J' = w_i^Tw_j + b_i + b_j - log(X_{ij}) $$
cost_inner = (v_main.dot(v_context)
+ b_main[0] + b_context[0]
- log(cooccurrence))
# Compute cost
#
# $$ J = f(X_{ij}) (J')^2 $$
cost = weight * (cost_inner ** 2)
# Add weighted cost to the global cost tracker
global_cost += 0.5 * cost
# Compute gradients for word vector terms.
#
# NB: `main_word` is only a view into `W` (not a copy), so our
# modifications here will affect the global weight matrix;
# likewise for context_word, biases, etc.
grad_main = weight * cost_inner * v_context
grad_context = weight * cost_inner * v_main
# Compute gradients for bias terms
grad_bias_main = weight * cost_inner
grad_bias_context = weight * cost_inner
# Now perform adaptive updates
v_main -= (self._learning_rate * grad_main / np.sqrt(gradsq_W_main))
v_context -= (self._learning_rate * grad_context / np.sqrt(gradsq_W_context))
b_main -= (self._learning_rate * grad_bias_main / np.sqrt(gradsq_b_main))
b_context -= (self._learning_rate * grad_bias_context / np.sqrt(
gradsq_b_context))
# Update squared gradient sums
gradsq_W_main += np.square(grad_main)
gradsq_W_context += np.square(grad_context)
gradsq_b_main += grad_bias_main ** 2
gradsq_b_context += grad_bias_context ** 2
return global_cost
def _train(self, iter_callback=None):
"""
Train GloVe vectors on the given generator `cooccurrences`, where
each element is of the form
(word_i_id, word_j_id, x_ij)
where `x_ij` is a cooccurrence value $X_{ij}$ as presented in the
matrix defined by `build_cooccur` and the Pennington et al. (2014)
paper itself.
If `iter_callback` is not `None`, the provided function will be
called after each iteration with the learned `W` matrix so far.
Keyword arguments are passed on to the iteration step function
`run_iter`.
Returns the computed word vector matrix `W`.
"""
vocab_size = len(self._vocabulary)
# Word vector matrix. This matrix is (2V) * d, where N is the size
# of the corpus vocabulary and d is the dimensionality of the word
# vectors. All elements are initialized randomly in the range (-0.5,
# 0.5]. We build two word vectors for each word: one for the word as
# the main (center) word and one for the word as a context word.
#
# It is up to the client to decide what to do with the resulting two
# vectors. Pennington et al. (2014) suggest adding or averaging the
# two for each word, or discarding the context vectors.
W = (np.random.rand(vocab_size * 2, self._vector_size) - 0.5) / float(self._vector_size + 1)
# Bias terms, each associated with a single vector. An array of size
# $2V$, initialized randomly in the range (-0.5, 0.5].
biases = (np.random.rand(vocab_size * 2) - 0.5) / float(self._vector_size + 1)
# Training is done via adaptive gradient descent (AdaGrad). To make
# this work we need to store the sum of squares of all previous
# gradients.
#
# Like `W`, this matrix is (2V) * d.
#
# Initialize all squared gradient sums to 1 so that our initial
# adaptive learning rate is simply the global learning rate.
gradient_squared = np.ones((vocab_size * 2, self._vector_size),
dtype=np.float64)
# Sum of squared gradients for the bias terms.
gradient_squared_biases = np.ones(vocab_size * 2, dtype=np.float64)
# Build a reusable list from the given cooccurrence generator,
# pre-fetching all necessary data.
#
# NB: These are all views into the actual data matrices, so updates
# to them will pass on to the real data structures
#
# (We even extract the single-element biases as slices so that we
# can use them as views)
data = [(W[i_main], W[i_context + vocab_size],
biases[i_main: i_main + 1],
biases[i_context + vocab_size: i_context + vocab_size + 1],
gradient_squared[i_main], gradient_squared[i_context + vocab_size],
gradient_squared_biases[i_main: i_main + 1],
gradient_squared_biases[i_context + vocab_size
: i_context + vocab_size + 1],
cooccurrence)
for i_main, i_context, cooccurrence in self._build_cooccur()]
for i in range(self._iterations):
if (i + 1) % 500 == 0:
self._logger.info("\tBeginning iteration %i..", i + 1)
cost = self._run_iter(data)
if (i + 1) % 500 == 0:
self._logger.info("\t\tDone (cost %f)", cost)
if iter_callback is not None:
iter_callback(W)
return W
def _merge_main_context(self, W, merge_fun=lambda m, c: np.mean([m, c], axis=0),
normalize=True):
"""
Merge the main-word and context-word vectors for a weight matrix
using the provided merge function (which accepts a main-word and
context-word vector and returns a merged version).
By default, `merge_fun` returns the mean of the two vectors.
"""
vocab_size = int(len(W) / 2)
for i, row in enumerate(W[:vocab_size]):
merged = merge_fun(row, W[i + vocab_size])
if normalize:
merged /= np.linalg.norm(merged)
W[i, :] = merged
return W[:vocab_size]
if __name__ == '__main__':
| test_lists = [
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
]
shuffle(test_lists)
glove = GloVe(test_lists, learning_rate=0.001, vector_size=10, iterations=2000)
glove.setup()
result = glove.fit()
distance_vector = pdist(result)
pca = PCA(n_components=2)
pca_result = pca.fit_transform(result)
xs = pca_result[:, 0]
ys = pca_result[:, 1]
_, ax = plt.subplots()
ax.scatter(xs, ys)
for i in range(len(pca_result)):
name = glove.id2word[i]
ax.annotate(name, (xs[i], ys[i]))
plt.show() | conditional_block |
|
glove.py | import logging
from collections import Counter
from random import shuffle
import numpy as np
from math import log
from matplotlib import pyplot as plt
from scipy import sparse
from scipy.spatial.distance import pdist
from sklearn.decomposition import PCA
class GloVe:
def __init__(self, lists, learning_rate=0.05, x_max=100, alpha=0.75, vector_size=100, iterations=25, min_count=None,
logging_level=logging.INFO):
self._lists = lists
self._learning_rate = learning_rate
self._x_max = x_max
self._alpha = alpha
self._vector_size = vector_size
self._iterations = iterations
self._min_count = min_count
self._logging_level = logging_level
self._logger = None
self._vocabulary = None
self.id2word = None
def setup(self):
|
def fit(self):
space = self._train()
merged_space = self._merge_main_context(space)
return merged_space
def setup_logger(self, name_='GloVe'):
# TODO redirect to file
self._logger = logging.getLogger(name_)
stream_logger = logging.StreamHandler()
stream_logger.setLevel(self._logging_level)
self._logger.addHandler(stream_logger)
def build_vocab(self, top=None):
"""
Build a vocabulary with word frequencies for an entire corpus.
:param top: If not None, only first <top> words, base on frequency, will be preserved
Returns a dictionary `w -> (i, f)`, mapping word strings to pairs of
word ID and word corpus frequency.
"""
self._logger.info("Building vocab from corpus")
vocab = Counter()
for list_ in self._lists:
vocab.update(list_)
self._logger.info("Done building vocab from corpus.")
if top is not None and top < len(vocab):
words = sorted(vocab.items(), key=lambda x: -x[1])[:top]
else:
words = vocab.items()
self._vocabulary = {word: (i, freq) for i, (word, freq) in enumerate(words)}
def build_id2word(self):
self.id2word = dict((id_, word) for word, (id_, _) in self._vocabulary.items())
def _build_cooccur(self):
"""
Build a word co-occurrence list for the given corpus.
This function is a tuple generator, where each element (representing
a cooccurrence pair) is of the form
(i_main, i_context, cooccurrence)
where `i_main` is the ID of the main word in the cooccurrence and
`i_context` is the ID of the context word, and `cooccurrence` is the
`X_{ij}` cooccurrence value as described in Pennington et al.
(2014).
If `min_count` is not `None`, cooccurrence pairs where either word
occurs in the corpus fewer than `min_count` times are ignored.
"""
vocab_size = len(self._vocabulary)
# Collect cooccurrences internally as a sparse matrix for passable
# indexing speed; we'll convert into a list later
cooccurrences = sparse.lil_matrix((vocab_size, vocab_size),
dtype=np.float64)
for i, list_ in enumerate(self._lists):
if i % 1000 == 0:
self._logger.info("Building cooccurrence matrix: on line %i", i)
token_ids = [self._vocabulary[word][0] for word in list_ if word in self._vocabulary]
for center_i, center_id in enumerate(token_ids):
# Collect all word IDs in left window of center word
context_ids = token_ids[:]
del context_ids[center_i]
contexts_len = len(context_ids)
for left_i, left_id in enumerate(context_ids):
# Build co-occurrence matrix symmetrically (pretend we
# are calculating right contexts as well)
cooccurrences[center_id, left_id] += 0.5
cooccurrences[left_id, center_id] += 0.5
# Now yield our tuple sequence (dig into the LiL-matrix internals to
# quickly iterate through all nonzero cells)
for i, (row, data) in enumerate(zip(cooccurrences.rows,
cooccurrences.data)):
if self._min_count is not None and self._vocabulary[self.id2word[i]][1] < self._min_count:
continue
for data_idx, j in enumerate(row):
if self._min_count is not None and self._vocabulary[self.id2word[j]][1] < self._min_count:
continue
yield i, j, data[data_idx]
def _run_iter(self, data):
"""
Run a single iteration of GloVe training using the given
cooccurrence data and the previously computed weight vectors /
biases and accompanying gradient histories.
`data` is a pre-fetched data / weights list where each element is of
the form
(v_main, v_context,
b_main, b_context,
gradsq_W_main, gradsq_W_context,
gradsq_b_main, gradsq_b_context,
cooccurrence)
as produced by the `train_glove` function. Each element in this
tuple is an `ndarray` view into the data structure which contains
it.
See the `train_glove` function for information on the shapes of `W`,
`biases`, `gradient_squared`, `gradient_squared_biases` and how they
should be initialized.
The parameters `x_max`, `alpha` define our weighting function when
computing the cost for two word pairs; see the GloVe paper for more
details.
Returns the cost associated with the given weight assignments and
updates the weights by online AdaGrad in place.
"""
global_cost = 0
# We want to iterate over data randomly so as not to unintentionally
# bias the word vector contents
shuffle(data)
for (v_main, v_context, b_main, b_context, gradsq_W_main, gradsq_W_context,
gradsq_b_main, gradsq_b_context, cooccurrence) in data:
weight = (cooccurrence / self._x_max) ** self._alpha if cooccurrence < self._x_max else 1
# Compute inner component of cost function, which is used in
# both overall cost calculation and in gradient calculation
#
# $$ J' = w_i^Tw_j + b_i + b_j - log(X_{ij}) $$
cost_inner = (v_main.dot(v_context)
+ b_main[0] + b_context[0]
- log(cooccurrence))
# Compute cost
#
# $$ J = f(X_{ij}) (J')^2 $$
cost = weight * (cost_inner ** 2)
# Add weighted cost to the global cost tracker
global_cost += 0.5 * cost
# Compute gradients for word vector terms.
#
# NB: `main_word` is only a view into `W` (not a copy), so our
# modifications here will affect the global weight matrix;
# likewise for context_word, biases, etc.
grad_main = weight * cost_inner * v_context
grad_context = weight * cost_inner * v_main
# Compute gradients for bias terms
grad_bias_main = weight * cost_inner
grad_bias_context = weight * cost_inner
# Now perform adaptive updates
v_main -= (self._learning_rate * grad_main / np.sqrt(gradsq_W_main))
v_context -= (self._learning_rate * grad_context / np.sqrt(gradsq_W_context))
b_main -= (self._learning_rate * grad_bias_main / np.sqrt(gradsq_b_main))
b_context -= (self._learning_rate * grad_bias_context / np.sqrt(
gradsq_b_context))
# Update squared gradient sums
gradsq_W_main += np.square(grad_main)
gradsq_W_context += np.square(grad_context)
gradsq_b_main += grad_bias_main ** 2
gradsq_b_context += grad_bias_context ** 2
return global_cost
def _train(self, iter_callback=None):
"""
Train GloVe vectors on the given generator `cooccurrences`, where
each element is of the form
(word_i_id, word_j_id, x_ij)
where `x_ij` is a cooccurrence value $X_{ij}$ as presented in the
matrix defined by `build_cooccur` and the Pennington et al. (2014)
paper itself.
If `iter_callback` is not `None`, the provided function will be
called after each iteration with the learned `W` matrix so far.
Keyword arguments are passed on to the iteration step function
`run_iter`.
Returns the computed word vector matrix `W`.
"""
vocab_size = len(self._vocabulary)
# Word vector matrix. This matrix is (2V) * d, where N is the size
# of the corpus vocabulary and d is the dimensionality of the word
# vectors. All elements are initialized randomly in the range (-0.5,
# 0.5]. We build two word vectors for each word: one for the word as
# the main (center) word and one for the word as a context word.
#
# It is up to the client to decide what to do with the resulting two
# vectors. Pennington et al. (2014) suggest adding or averaging the
# two for each word, or discarding the context vectors.
W = (np.random.rand(vocab_size * 2, self._vector_size) - 0.5) / float(self._vector_size + 1)
# Bias terms, each associated with a single vector. An array of size
# $2V$, initialized randomly in the range (-0.5, 0.5].
biases = (np.random.rand(vocab_size * 2) - 0.5) / float(self._vector_size + 1)
# Training is done via adaptive gradient descent (AdaGrad). To make
# this work we need to store the sum of squares of all previous
# gradients.
#
# Like `W`, this matrix is (2V) * d.
#
# Initialize all squared gradient sums to 1 so that our initial
# adaptive learning rate is simply the global learning rate.
gradient_squared = np.ones((vocab_size * 2, self._vector_size),
dtype=np.float64)
# Sum of squared gradients for the bias terms.
gradient_squared_biases = np.ones(vocab_size * 2, dtype=np.float64)
# Build a reusable list from the given cooccurrence generator,
# pre-fetching all necessary data.
#
# NB: These are all views into the actual data matrices, so updates
# to them will pass on to the real data structures
#
# (We even extract the single-element biases as slices so that we
# can use them as views)
data = [(W[i_main], W[i_context + vocab_size],
biases[i_main: i_main + 1],
biases[i_context + vocab_size: i_context + vocab_size + 1],
gradient_squared[i_main], gradient_squared[i_context + vocab_size],
gradient_squared_biases[i_main: i_main + 1],
gradient_squared_biases[i_context + vocab_size
: i_context + vocab_size + 1],
cooccurrence)
for i_main, i_context, cooccurrence in self._build_cooccur()]
for i in range(self._iterations):
if (i + 1) % 500 == 0:
self._logger.info("\tBeginning iteration %i..", i + 1)
cost = self._run_iter(data)
if (i + 1) % 500 == 0:
self._logger.info("\t\tDone (cost %f)", cost)
if iter_callback is not None:
iter_callback(W)
return W
def _merge_main_context(self, W, merge_fun=lambda m, c: np.mean([m, c], axis=0),
normalize=True):
"""
Merge the main-word and context-word vectors for a weight matrix
using the provided merge function (which accepts a main-word and
context-word vector and returns a merged version).
By default, `merge_fun` returns the mean of the two vectors.
"""
vocab_size = int(len(W) / 2)
for i, row in enumerate(W[:vocab_size]):
merged = merge_fun(row, W[i + vocab_size])
if normalize:
merged /= np.linalg.norm(merged)
W[i, :] = merged
return W[:vocab_size]
if __name__ == '__main__':
test_lists = [
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
]
shuffle(test_lists)
glove = GloVe(test_lists, learning_rate=0.001, vector_size=10, iterations=2000)
glove.setup()
result = glove.fit()
distance_vector = pdist(result)
pca = PCA(n_components=2)
pca_result = pca.fit_transform(result)
xs = pca_result[:, 0]
ys = pca_result[:, 1]
_, ax = plt.subplots()
ax.scatter(xs, ys)
for i in range(len(pca_result)):
name = glove.id2word[i]
ax.annotate(name, (xs[i], ys[i]))
plt.show()
| self.setup_logger()
self.build_vocab()
self.build_id2word() | identifier_body |
glove.py | import logging
from collections import Counter
from random import shuffle
import numpy as np
from math import log
from matplotlib import pyplot as plt
from scipy import sparse
from scipy.spatial.distance import pdist
from sklearn.decomposition import PCA
class GloVe:
def __init__(self, lists, learning_rate=0.05, x_max=100, alpha=0.75, vector_size=100, iterations=25, min_count=None,
logging_level=logging.INFO):
self._lists = lists
self._learning_rate = learning_rate
self._x_max = x_max
self._alpha = alpha
self._vector_size = vector_size
self._iterations = iterations
self._min_count = min_count
self._logging_level = logging_level
self._logger = None
self._vocabulary = None
self.id2word = None
def | (self):
self.setup_logger()
self.build_vocab()
self.build_id2word()
def fit(self):
space = self._train()
merged_space = self._merge_main_context(space)
return merged_space
def setup_logger(self, name_='GloVe'):
# TODO redirect to file
self._logger = logging.getLogger(name_)
stream_logger = logging.StreamHandler()
stream_logger.setLevel(self._logging_level)
self._logger.addHandler(stream_logger)
def build_vocab(self, top=None):
"""
Build a vocabulary with word frequencies for an entire corpus.
:param top: If not None, only first <top> words, base on frequency, will be preserved
Returns a dictionary `w -> (i, f)`, mapping word strings to pairs of
word ID and word corpus frequency.
"""
self._logger.info("Building vocab from corpus")
vocab = Counter()
for list_ in self._lists:
vocab.update(list_)
self._logger.info("Done building vocab from corpus.")
if top is not None and top < len(vocab):
words = sorted(vocab.items(), key=lambda x: -x[1])[:top]
else:
words = vocab.items()
self._vocabulary = {word: (i, freq) for i, (word, freq) in enumerate(words)}
def build_id2word(self):
self.id2word = dict((id_, word) for word, (id_, _) in self._vocabulary.items())
def _build_cooccur(self):
"""
Build a word co-occurrence list for the given corpus.
This function is a tuple generator, where each element (representing
a cooccurrence pair) is of the form
(i_main, i_context, cooccurrence)
where `i_main` is the ID of the main word in the cooccurrence and
`i_context` is the ID of the context word, and `cooccurrence` is the
`X_{ij}` cooccurrence value as described in Pennington et al.
(2014).
If `min_count` is not `None`, cooccurrence pairs where either word
occurs in the corpus fewer than `min_count` times are ignored.
"""
vocab_size = len(self._vocabulary)
# Collect cooccurrences internally as a sparse matrix for passable
# indexing speed; we'll convert into a list later
cooccurrences = sparse.lil_matrix((vocab_size, vocab_size),
dtype=np.float64)
for i, list_ in enumerate(self._lists):
if i % 1000 == 0:
self._logger.info("Building cooccurrence matrix: on line %i", i)
token_ids = [self._vocabulary[word][0] for word in list_ if word in self._vocabulary]
for center_i, center_id in enumerate(token_ids):
# Collect all word IDs in left window of center word
context_ids = token_ids[:]
del context_ids[center_i]
contexts_len = len(context_ids)
for left_i, left_id in enumerate(context_ids):
# Build co-occurrence matrix symmetrically (pretend we
# are calculating right contexts as well)
cooccurrences[center_id, left_id] += 0.5
cooccurrences[left_id, center_id] += 0.5
# Now yield our tuple sequence (dig into the LiL-matrix internals to
# quickly iterate through all nonzero cells)
for i, (row, data) in enumerate(zip(cooccurrences.rows,
cooccurrences.data)):
if self._min_count is not None and self._vocabulary[self.id2word[i]][1] < self._min_count:
continue
for data_idx, j in enumerate(row):
if self._min_count is not None and self._vocabulary[self.id2word[j]][1] < self._min_count:
continue
yield i, j, data[data_idx]
def _run_iter(self, data):
"""
Run a single iteration of GloVe training using the given
cooccurrence data and the previously computed weight vectors /
biases and accompanying gradient histories.
`data` is a pre-fetched data / weights list where each element is of
the form
(v_main, v_context,
b_main, b_context,
gradsq_W_main, gradsq_W_context,
gradsq_b_main, gradsq_b_context,
cooccurrence)
as produced by the `train_glove` function. Each element in this
tuple is an `ndarray` view into the data structure which contains
it.
See the `train_glove` function for information on the shapes of `W`,
`biases`, `gradient_squared`, `gradient_squared_biases` and how they
should be initialized.
The parameters `x_max`, `alpha` define our weighting function when
computing the cost for two word pairs; see the GloVe paper for more
details.
Returns the cost associated with the given weight assignments and
updates the weights by online AdaGrad in place.
"""
global_cost = 0
# We want to iterate over data randomly so as not to unintentionally
# bias the word vector contents
shuffle(data)
for (v_main, v_context, b_main, b_context, gradsq_W_main, gradsq_W_context,
gradsq_b_main, gradsq_b_context, cooccurrence) in data:
weight = (cooccurrence / self._x_max) ** self._alpha if cooccurrence < self._x_max else 1
# Compute inner component of cost function, which is used in
# both overall cost calculation and in gradient calculation
#
# $$ J' = w_i^Tw_j + b_i + b_j - log(X_{ij}) $$
cost_inner = (v_main.dot(v_context)
+ b_main[0] + b_context[0]
- log(cooccurrence))
# Compute cost
#
# $$ J = f(X_{ij}) (J')^2 $$
cost = weight * (cost_inner ** 2)
# Add weighted cost to the global cost tracker
global_cost += 0.5 * cost
# Compute gradients for word vector terms.
#
# NB: `main_word` is only a view into `W` (not a copy), so our
# modifications here will affect the global weight matrix;
# likewise for context_word, biases, etc.
grad_main = weight * cost_inner * v_context
grad_context = weight * cost_inner * v_main
# Compute gradients for bias terms
grad_bias_main = weight * cost_inner
grad_bias_context = weight * cost_inner
# Now perform adaptive updates
v_main -= (self._learning_rate * grad_main / np.sqrt(gradsq_W_main))
v_context -= (self._learning_rate * grad_context / np.sqrt(gradsq_W_context))
b_main -= (self._learning_rate * grad_bias_main / np.sqrt(gradsq_b_main))
b_context -= (self._learning_rate * grad_bias_context / np.sqrt(
gradsq_b_context))
# Update squared gradient sums
gradsq_W_main += np.square(grad_main)
gradsq_W_context += np.square(grad_context)
gradsq_b_main += grad_bias_main ** 2
gradsq_b_context += grad_bias_context ** 2
return global_cost
def _train(self, iter_callback=None):
"""
Train GloVe vectors on the given generator `cooccurrences`, where
each element is of the form
(word_i_id, word_j_id, x_ij)
where `x_ij` is a cooccurrence value $X_{ij}$ as presented in the
matrix defined by `build_cooccur` and the Pennington et al. (2014)
paper itself.
If `iter_callback` is not `None`, the provided function will be
called after each iteration with the learned `W` matrix so far.
Keyword arguments are passed on to the iteration step function
`run_iter`.
Returns the computed word vector matrix `W`.
"""
vocab_size = len(self._vocabulary)
# Word vector matrix. This matrix is (2V) * d, where N is the size
# of the corpus vocabulary and d is the dimensionality of the word
# vectors. All elements are initialized randomly in the range (-0.5,
# 0.5]. We build two word vectors for each word: one for the word as
# the main (center) word and one for the word as a context word.
#
# It is up to the client to decide what to do with the resulting two
# vectors. Pennington et al. (2014) suggest adding or averaging the
# two for each word, or discarding the context vectors.
W = (np.random.rand(vocab_size * 2, self._vector_size) - 0.5) / float(self._vector_size + 1)
# Bias terms, each associated with a single vector. An array of size
# $2V$, initialized randomly in the range (-0.5, 0.5].
biases = (np.random.rand(vocab_size * 2) - 0.5) / float(self._vector_size + 1)
# Training is done via adaptive gradient descent (AdaGrad). To make
# this work we need to store the sum of squares of all previous
# gradients.
#
# Like `W`, this matrix is (2V) * d.
#
# Initialize all squared gradient sums to 1 so that our initial
# adaptive learning rate is simply the global learning rate.
gradient_squared = np.ones((vocab_size * 2, self._vector_size),
dtype=np.float64)
# Sum of squared gradients for the bias terms.
gradient_squared_biases = np.ones(vocab_size * 2, dtype=np.float64)
# Build a reusable list from the given cooccurrence generator,
# pre-fetching all necessary data.
#
# NB: These are all views into the actual data matrices, so updates
# to them will pass on to the real data structures
#
# (We even extract the single-element biases as slices so that we
# can use them as views)
data = [(W[i_main], W[i_context + vocab_size],
biases[i_main: i_main + 1],
biases[i_context + vocab_size: i_context + vocab_size + 1],
gradient_squared[i_main], gradient_squared[i_context + vocab_size],
gradient_squared_biases[i_main: i_main + 1],
gradient_squared_biases[i_context + vocab_size
: i_context + vocab_size + 1],
cooccurrence)
for i_main, i_context, cooccurrence in self._build_cooccur()]
for i in range(self._iterations):
if (i + 1) % 500 == 0:
self._logger.info("\tBeginning iteration %i..", i + 1)
cost = self._run_iter(data)
if (i + 1) % 500 == 0:
self._logger.info("\t\tDone (cost %f)", cost)
if iter_callback is not None:
iter_callback(W)
return W
def _merge_main_context(self, W, merge_fun=lambda m, c: np.mean([m, c], axis=0),
normalize=True):
"""
Merge the main-word and context-word vectors for a weight matrix
using the provided merge function (which accepts a main-word and
context-word vector and returns a merged version).
By default, `merge_fun` returns the mean of the two vectors.
"""
vocab_size = int(len(W) / 2)
for i, row in enumerate(W[:vocab_size]):
merged = merge_fun(row, W[i + vocab_size])
if normalize:
merged /= np.linalg.norm(merged)
W[i, :] = merged
return W[:vocab_size]
if __name__ == '__main__':
test_lists = [
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
['szynka', 'woda'],
]
shuffle(test_lists)
glove = GloVe(test_lists, learning_rate=0.001, vector_size=10, iterations=2000)
glove.setup()
result = glove.fit()
distance_vector = pdist(result)
pca = PCA(n_components=2)
pca_result = pca.fit_transform(result)
xs = pca_result[:, 0]
ys = pca_result[:, 1]
_, ax = plt.subplots()
ax.scatter(xs, ys)
for i in range(len(pca_result)):
name = glove.id2word[i]
ax.annotate(name, (xs[i], ys[i]))
plt.show()
| setup | identifier_name |
printer.rs | use std::borrow::Cow;
use std::io::{self, BufRead, BufReader, Read, Write};
use std::time::Instant;
use encoding_rs::Encoding;
use encoding_rs_io::DecodeReaderBytesBuilder;
use mime::Mime;
use reqwest::blocking::{Body, Request, Response};
use reqwest::cookie::CookieStore;
use reqwest::header::{
HeaderMap, HeaderName, HeaderValue, ACCEPT, CONTENT_LENGTH, CONTENT_TYPE, COOKIE, HOST,
};
use reqwest::Version;
use url::Url;
use crate::cli::FormatOptions;
use crate::decoder::{decompress, get_compression_type};
use crate::{
buffer::Buffer,
cli::{Pretty, Theme},
formatting::{get_json_formatter, Highlighter},
middleware::ResponseExt,
utils::{copy_largebuf, test_mode, BUFFER_SIZE},
};
const BINARY_SUPPRESSOR: &str = concat!(
"+-----------------------------------------+\n",
"| NOTE: binary data not shown in terminal |\n",
"+-----------------------------------------+\n",
"\n"
);
/// A wrapper around a reader that reads line by line, (optionally) returning
/// an error if the line appears to be binary.
///
/// This is meant for streaming output. `checked` should typically be
/// set to buffer.is_terminal(), but if you need neither checking nor
/// highlighting then you may not need a `BinaryGuard` at all.
///
/// This reader does not validate UTF-8.
struct BinaryGuard<'a, T: Read> {
reader: BufReader<&'a mut T>,
buffer: Vec<u8>,
checked: bool,
}
impl<'a, T: Read> BinaryGuard<'a, T> {
fn new(reader: &'a mut T, checked: bool) -> Self {
Self {
reader: BufReader::with_capacity(BUFFER_SIZE, reader),
buffer: Vec::new(),
checked,
}
}
/// Return at least one complete line.
///
/// Compared to returning exactly one line, this gives you more information
/// about when data comes in. It's better to flush after each `read_lines`
/// call than to flush after each individual line.
///
/// We only work with complete lines to accommodate the syntax highlighting
/// and the binary data (null byte) detection. HTTPie processes exactly
/// one line at a time.
///
/// We work off the assumption that if the response contains a null byte
/// then none of it should be shown, and therefore the earlier we detect
/// the null byte, the better. This basically matches the non-streaming
/// behavior. But if it takes a while for the first null byte to show up
/// then it's unpredictable when the plain text output is cut off by the
/// binary suppressor. HTTPie is more consistent in this regard.
fn read_lines(&mut self) -> io::Result<Option<&[u8]>> {
self.buffer.clear();
loop {
let buf = match self.reader.fill_buf() {
Ok(buf) => buf,
Err(e) if e.kind() == io::ErrorKind::Interrupted => continue,
Err(e) => return Err(e),
};
if self.checked && buf.contains(&b'\0') {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Found binary data",
));
} else if buf.is_empty() {
if self.buffer.is_empty() {
return Ok(None);
} else {
return Ok(Some(&self.buffer));
}
} else if let Some(ind) = memchr::memrchr(b'\n', buf) {
// Potential optimization: return a slice of buf instead of copying.
// (We'd have to delay the call to .consume() until the next call.)
// (There is a weird borrow checker problem.)
self.buffer.extend_from_slice(&buf[..=ind]);
self.reader.consume(ind + 1);
return Ok(Some(&self.buffer));
} else {
self.buffer.extend_from_slice(buf);
let n = buf.len(); // borrow checker
self.reader.consume(n);
// It would be nice to return early if self.buffer is growing very large
// or if it's been a long time since the last read. But especially the
// second is hard to implement, and we'd want to pair this with flushing
// the output buffer. (HTTPie does nothing of this kind.)
}
}
}
}
pub struct Printer {
indent_json: bool,
color: bool,
theme: Theme,
sort_headers: bool,
stream: bool,
buffer: Buffer,
format_options: FormatOptions,
}
impl Printer {
pub fn new(
pretty: Pretty,
theme: Option<Theme>,
stream: bool,
buffer: Buffer,
format_options: FormatOptions,
) -> Self {
let theme = theme.unwrap_or(Theme::Auto);
Printer {
indent_json: pretty.format(),
sort_headers: pretty.format(),
color: pretty.color(),
stream,
theme,
buffer,
format_options,
}
}
fn get_highlighter(&mut self, syntax: &'static str) -> Highlighter<'_> {
Highlighter::new(syntax, self.theme, &mut self.buffer)
}
fn print_colorized_text(&mut self, text: &str, syntax: &'static str) -> io::Result<()> {
self.get_highlighter(syntax).highlight(text)
}
fn print_syntax_text(&mut self, text: &str, syntax: &'static str) -> io::Result<()> {
if self.color {
self.print_colorized_text(text, syntax)
} else {
self.buffer.print(text)
}
}
fn print_json_text(&mut self, text: &str, check_valid: bool) -> io::Result<()> {
if !self.indent_json {
// We don't have to do anything specialized, so fall back to the generic version
return self.print_syntax_text(text, "json");
}
if check_valid && !valid_json(text) {
// JSONXF may mess up the text, e.g. by removing whitespace
// This is somewhat common as application/json is the default
// content type for requests
return self.print_syntax_text(text, "json");
}
let mut formatter = get_json_formatter(&self.format_options);
if self.color {
let mut buf = Vec::new();
formatter.format_buf(text.as_bytes(), &mut buf)?;
// in principle, buf should already be valid UTF-8,
// because JSONXF doesn't mangle it
let text = String::from_utf8_lossy(&buf);
self.print_colorized_text(&text, "json")
} else {
formatter.format_buf(text.as_bytes(), &mut self.buffer)
}
}
fn print_body_text(&mut self, content_type: ContentType, body: &str) -> io::Result<()> {
match content_type {
ContentType::Json => self.print_json_text(body, true),
ContentType::Xml => self.print_syntax_text(body, "xml"),
ContentType::Html => self.print_syntax_text(body, "html"),
ContentType::Css => self.print_syntax_text(body, "css"),
// In HTTPie part of this behavior is gated behind the --json flag
// But it does JSON formatting even without that flag, so doing
// this check unconditionally is fine
ContentType::Text | ContentType::JavaScript if valid_json(body) => {
self.print_json_text(body, false)
}
ContentType::JavaScript => self.print_syntax_text(body, "js"),
_ => self.buffer.print(body),
}
}
fn print_stream(&mut self, reader: &mut impl Read) -> io::Result<()> {
if !self.buffer.is_terminal() {
return copy_largebuf(reader, &mut self.buffer, true);
}
let mut guard = BinaryGuard::new(reader, true);
while let Some(lines) = guard.read_lines()? {
self.buffer.write_all(lines)?;
self.buffer.flush()?;
}
Ok(())
}
fn print_colorized_stream(
&mut self,
stream: &mut impl Read,
syntax: &'static str,
) -> io::Result<()> {
let mut guard = BinaryGuard::new(stream, self.buffer.is_terminal());
let mut highlighter = self.get_highlighter(syntax);
while let Some(lines) = guard.read_lines()? {
for line in lines.split_inclusive(|&b| b == b'\n') {
highlighter.highlight_bytes(line)?;
}
highlighter.flush()?;
}
Ok(())
}
fn print_syntax_stream(
&mut self,
stream: &mut impl Read,
syntax: &'static str,
) -> io::Result<()> {
if self.color {
self.print_colorized_stream(stream, syntax)
} else {
self.print_stream(stream)
}
}
fn print_json_stream(&mut self, stream: &mut impl Read) -> io::Result<()> {
if !self.indent_json {
// We don't have to do anything specialized, so fall back to the generic version
self.print_syntax_stream(stream, "json")
} else if self.color {
let mut guard = BinaryGuard::new(stream, self.buffer.is_terminal());
let mut formatter = get_json_formatter(&self.format_options);
let mut highlighter = self.get_highlighter("json");
let mut buf = Vec::new();
while let Some(lines) = guard.read_lines()? {
formatter.format_buf(lines, &mut buf)?;
for line in buf.split_inclusive(|&b| b == b'\n') {
highlighter.highlight_bytes(line)?;
}
highlighter.flush()?;
buf.clear();
}
Ok(())
} else {
let mut formatter = get_json_formatter(&self.format_options);
if !self.buffer.is_terminal() {
let mut buf = vec![0; BUFFER_SIZE];
loop {
match stream.read(&mut buf) {
Ok(0) => return Ok(()),
Ok(n) => {
formatter.format_buf(&buf[0..n], &mut self.buffer)?;
self.buffer.flush()?;
}
Err(e) if e.kind() == io::ErrorKind::Interrupted => continue,
Err(e) => return Err(e),
}
}
}
let mut guard = BinaryGuard::new(stream, true);
while let Some(lines) = guard.read_lines()? {
formatter.format_buf(lines, &mut self.buffer)?;
self.buffer.flush()?;
}
Ok(())
}
}
fn print_body_stream(
&mut self,
content_type: ContentType,
body: &mut impl Read,
) -> io::Result<()> {
match content_type {
ContentType::Json => self.print_json_stream(body),
ContentType::Xml => self.print_syntax_stream(body, "xml"),
ContentType::Html => self.print_syntax_stream(body, "html"),
ContentType::Css => self.print_syntax_stream(body, "css"),
// print_body_text() has fancy JSON detection, but we can't do that here
ContentType::JavaScript => self.print_syntax_stream(body, "js"),
_ => self.print_stream(body),
}
}
fn print_headers(&mut self, text: &str) -> io::Result<()> {
if self.color {
self.print_colorized_text(text, "http")
} else {
self.buffer.print(text)
}
}
fn headers_to_string(&self, headers: &HeaderMap, version: Version) -> String {
let as_titlecase = match version {
Version::HTTP_09 | Version::HTTP_10 | Version::HTTP_11 => true,
Version::HTTP_2 | Version::HTTP_3 => false,
_ => false,
};
let mut headers: Vec<(&HeaderName, &HeaderValue)> = headers.iter().collect();
if self.sort_headers {
headers.sort_by_key(|(name, _)| name.as_str());
}
let mut header_string = String::new();
for (key, value) in headers {
if as_titlecase {
// Ought to be equivalent to how hyper does it
// https://github.com/hyperium/hyper/blob/f46b175bf71b202fbb907c4970b5743881b891e1/src/proto/h1/role.rs#L1332
// Header names are ASCII so it's ok to operate on char instead of u8
let mut prev = '-';
for mut c in key.as_str().chars() {
if prev == '-' {
c.make_ascii_uppercase();
}
header_string.push(c);
prev = c;
}
} else {
header_string.push_str(key.as_str());
}
header_string.push_str(": ");
match value.to_str() {
Ok(value) => header_string.push_str(value),
#[allow(clippy::format_push_string)]
Err(_) => header_string.push_str(&format!("{:?}", value)),
}
header_string.push('\n');
}
header_string.pop();
header_string
}
pub fn print_separator(&mut self) -> io::Result<()> {
self.buffer.print("\n")?;
self.buffer.flush()?;
Ok(())
}
pub fn print_request_headers<T>(&mut self, request: &Request, cookie_jar: &T) -> io::Result<()>
where
T: CookieStore,
{
let method = request.method();
let url = request.url();
let query_string = url.query().map_or(String::from(""), |q| ["?", q].concat());
let version = request.version();
let mut headers = request.headers().clone();
headers
.entry(ACCEPT)
.or_insert_with(|| HeaderValue::from_static("*/*"));
if let Some(cookie) = cookie_jar.cookies(url) {
headers.insert(COOKIE, cookie);
}
// See https://github.com/seanmonstar/reqwest/issues/1030
// reqwest and hyper add certain headers, but only in the process of
// sending the request, which we haven't done yet
if let Some(body) = request.body().and_then(Body::as_bytes) {
// Added at https://github.com/seanmonstar/reqwest/blob/e56bd160ba/src/blocking/request.rs#L132
headers
.entry(CONTENT_LENGTH)
.or_insert_with(|| body.len().into());
}
if let Some(host) = request.url().host_str() {
// This is incorrect in case of HTTP/2, but we're already assuming
// HTTP/1.1 anyway
headers.entry(HOST).or_insert_with(|| {
// Added at https://github.com/hyperium/hyper/blob/dfa1bb291d/src/client/client.rs#L237
if test_mode() {
HeaderValue::from_str("http.mock")
} else if let Some(port) = request.url().port() {
HeaderValue::from_str(&format!("{}:{}", host, port))
} else {
HeaderValue::from_str(host)
}
.expect("hostname should already be validated/parsed")
});
}
let request_line = format!("{} {}{} {:?}\n", method, url.path(), query_string, version);
let headers = self.headers_to_string(&headers, version);
self.print_headers(&(request_line + &headers))?;
self.buffer.print("\n\n")?;
self.buffer.flush()?;
Ok(())
}
pub fn print_response_headers(&mut self, response: &Response) -> io::Result<()> {
let version = response.version();
let status = response.status();
let headers = response.headers();
let status_line = format!("{:?} {}\n", version, status);
let headers = self.headers_to_string(headers, version);
self.print_headers(&(status_line + &headers))?;
self.buffer.print("\n\n")?;
self.buffer.flush()?;
Ok(())
}
pub fn print_request_body(&mut self, request: &mut Request) -> anyhow::Result<()> |
pub fn print_response_body(
&mut self,
response: &mut Response,
encoding: Option<&'static Encoding>,
mime: Option<&str>,
) -> anyhow::Result<()> {
let starting_time = Instant::now();
let url = response.url().clone();
let content_type =
mime.map_or_else(|| get_content_type(response.headers()), ContentType::from);
let encoding = encoding.or_else(|| get_charset(response));
let compression_type = get_compression_type(response.headers());
let mut body = decompress(response, compression_type);
if !self.buffer.is_terminal() {
if (self.color || self.indent_json) && content_type.is_text() {
// The user explicitly asked for formatting even though this is
// going into a file, and the response is at least supposed to be
// text, so decode it
// TODO: HTTPie re-encodes output in the original encoding, we don't
// encoding_rs::Encoder::encode_from_utf8_to_vec_without_replacement()
// and guess_encoding() may help, but it'll require refactoring
// The current design is a bit unfortunate because there's no way to
// force UTF-8 output without coloring or formatting
// Unconditionally decoding is not an option because the body
// might not be text at all
if self.stream {
self.print_body_stream(
content_type,
&mut decode_stream(&mut body, encoding, &url)?,
)?;
} else {
let mut buf = Vec::new();
body.read_to_end(&mut buf)?;
let text = decode_blob_unconditional(&buf, encoding, &url);
self.print_body_text(content_type, &text)?;
}
} else if self.stream {
copy_largebuf(&mut body, &mut self.buffer, true)?;
} else {
let mut buf = Vec::new();
body.read_to_end(&mut buf)?;
self.buffer.print(&buf)?;
}
} else if self.stream {
match self
.print_body_stream(content_type, &mut decode_stream(&mut body, encoding, &url)?)
{
Ok(_) => {
self.buffer.print("\n")?;
}
Err(err) if err.kind() == io::ErrorKind::InvalidData => {
self.buffer.print(BINARY_SUPPRESSOR)?;
}
Err(err) => return Err(err.into()),
}
} else {
let mut buf = Vec::new();
body.read_to_end(&mut buf)?;
match decode_blob(&buf, encoding, &url) {
None => {
self.buffer.print(BINARY_SUPPRESSOR)?;
}
Some(text) => {
self.print_body_text(content_type, &text)?;
self.buffer.print("\n")?;
}
};
}
self.buffer.flush()?;
drop(body); // silence the borrow checker
response.meta_mut().content_download_duration = Some(starting_time.elapsed());
Ok(())
}
pub fn print_response_meta(&mut self, response: &Response) -> anyhow::Result<()> {
let meta = response.meta();
let mut total_elapsed_time = meta.request_duration.as_secs_f64();
if let Some(content_download_duration) = meta.content_download_duration {
total_elapsed_time += content_download_duration.as_secs_f64();
}
self.buffer
.print(format!("Elapsed time: {:.5}s", total_elapsed_time))?;
self.buffer.print("\n\n")?;
Ok(())
}
}
enum ContentType {
Json,
Html,
Xml,
JavaScript,
Css,
Text,
UrlencodedForm,
Multipart,
Unknown,
}
impl ContentType {
fn is_text(&self) -> bool {
!matches!(
self,
ContentType::Unknown | ContentType::UrlencodedForm | ContentType::Multipart
)
}
}
impl From<&str> for ContentType {
fn from(content_type: &str) -> Self {
if content_type.contains("json") {
ContentType::Json
} else if content_type.contains("html") {
ContentType::Html
} else if content_type.contains("xml") {
ContentType::Xml
} else if content_type.contains("multipart") {
ContentType::Multipart
} else if content_type.contains("x-www-form-urlencoded") {
ContentType::UrlencodedForm
} else if content_type.contains("javascript") {
ContentType::JavaScript
} else if content_type.contains("css") {
ContentType::Css
} else if content_type.contains("text") {
// We later check if this one's JSON
// HTTPie checks for "json", "javascript" and "text" in one place:
// https://github.com/httpie/httpie/blob/a32ad344dd/httpie/output/formatters/json.py#L14
// We have it more spread out but it behaves more or less the same
ContentType::Text
} else {
ContentType::Unknown
}
}
}
fn get_content_type(headers: &HeaderMap) -> ContentType {
headers
.get(CONTENT_TYPE)
.and_then(|value| value.to_str().ok())
.map_or(ContentType::Unknown, ContentType::from)
}
fn valid_json(text: &str) -> bool {
serde_json::from_str::<serde::de::IgnoredAny>(text).is_ok()
}
/// Decode a response, using BOM sniffing or chardet if the encoding is unknown.
///
/// This is different from [`Response::text`], which assumes UTF-8 as a fallback.
///
/// Returns `None` if the decoded text would contain null codepoints (i.e., is binary).
fn decode_blob<'a>(
raw: &'a [u8],
encoding: Option<&'static Encoding>,
url: &Url,
) -> Option<Cow<'a, str>> {
let encoding = encoding.unwrap_or_else(|| detect_encoding(raw, true, url));
// If the encoding is ASCII-compatible then a null byte corresponds to a
// null codepoint and vice versa, so we can check for them before decoding.
// For a 11MB binary file this saves 100ms, that's worth doing.
// UTF-16 is not ASCII-compatible: all ASCII characters are padded with a
// null byte, so finding a null byte doesn't mean anything.
if encoding.is_ascii_compatible() && raw.contains(&0) {
return None;
}
// Don't allow the BOM to override the encoding. But do remove it if
// it matches the encoding.
let text = encoding.decode_with_bom_removal(raw).0;
if !encoding.is_ascii_compatible() && text.contains('\0') {
None
} else {
Some(text)
}
}
/// Like [`decode_blob`], but without binary detection.
fn decode_blob_unconditional<'a>(
raw: &'a [u8],
encoding: Option<&'static Encoding>,
url: &Url,
) -> Cow<'a, str> {
let encoding = encoding.unwrap_or_else(|| detect_encoding(raw, true, url));
encoding.decode_with_bom_removal(raw).0
}
/// Decode a streaming response in a way that matches [`decode_blob`].
///
/// As-is this should do a lossy decode with replacement characters, so the
/// output is valid UTF-8, but a differently configured DecodeReaderBytes can
/// produce invalid UTF-8.
fn decode_stream<'a>(
stream: &'a mut impl Read,
encoding: Option<&'static Encoding>,
url: &Url,
) -> io::Result<impl Read + 'a> {
// 16 KiB is the largest initial read I could achieve.
// That was with a HTTP/2 miniserve running on Linux.
// I think this is a buffer size for hyper, it could change. But it seems
// large enough for a best-effort attempt.
// (16 is otherwise used because 0 seems dangerous, but it shouldn't matter.)
let capacity = if encoding.is_some() { 16 } else { 16 * 1024 };
let mut reader = BufReader::with_capacity(capacity, stream);
let encoding = match encoding {
Some(encoding) => encoding,
None => {
// We need to guess the encoding.
// The more data we have the better our guess, but we can't just wait
// for all of it to arrive. The user explicitly asked us to hurry.
// HTTPie solves this by detecting the encoding separately for each line,
// but that's silly, and we don't necessarily go linewise.
// We'll just hope we get enough data in the very first read.
let peek = reader.fill_buf()?;
detect_encoding(peek, false, url)
}
};
// We could set .utf8_passthru(true) to not sanitize invalid UTF-8. It would
// arrive more faithfully in the terminal.
// But that has questionable benefit and writing invalid UTF-8 to stdout
// causes an error on Windows (because the console is UTF-16).
let reader = DecodeReaderBytesBuilder::new()
.encoding(Some(encoding))
.build(reader);
Ok(reader)
}
fn detect_encoding(mut bytes: &[u8], mut complete: bool, url: &Url) -> &'static Encoding {
// chardetng doesn't seem to take BOMs into account, so check those manually.
// We trust them unconditionally. (Should we?)
if bytes.starts_with(b"\xEF\xBB\xBF") {
return encoding_rs::UTF_8;
} else if bytes.starts_with(b"\xFF\xFE") {
return encoding_rs::UTF_16LE;
} else if bytes.starts_with(b"\xFE\xFF") {
return encoding_rs::UTF_16BE;
}
// 64 KiB takes 2-5 ms to check on my machine. So even on slower machines
// that should be acceptable.
// If we check the full document we can easily spend most of our runtime
// inside chardetng. That's especially problematic because we usually get
// here for binary files, which we won't even end up showing.
const CHARDET_PEEK_SIZE: usize = 64 * 1024;
if bytes.len() > CHARDET_PEEK_SIZE {
bytes = &bytes[..CHARDET_PEEK_SIZE];
complete = false;
}
// HTTPie uses https://pypi.org/project/charset-normalizer/
let mut detector = chardetng::EncodingDetector::new();
detector.feed(bytes, complete);
let tld = url.domain().and_then(get_tld).map(str::as_bytes);
// The `allow_utf8` parameter is meant for HTML content:
// https://hsivonen.fi/utf-8-detection/
// We always enable it because we're more geared toward APIs than
// toward plain webpages, and because we don't have a full HTML parser
// to implement proper UTF-8 detection.
detector.guess(tld, true)
}
fn get_tld(domain: &str) -> Option<&str> {
// Fully qualified domain names end with a .
domain.trim_end_matches('.').rsplit('.').next()
}
/// Get the response's encoding from its Content-Type.
///
/// reqwest doesn't provide an API for this, and we don't want a fixed default.
///
/// See https://github.com/seanmonstar/reqwest/blob/2940740493/src/async_impl/response.rs#L172
fn get_charset(response: &Response) -> Option<&'static Encoding> {
let content_type = response.headers().get(CONTENT_TYPE)?.to_str().ok()?;
let mime: Mime = content_type.parse().ok()?;
let encoding_name = mime.get_param("charset")?.as_str();
Encoding::for_label(encoding_name.as_bytes())
}
#[cfg(test)]
mod tests {
use indoc::indoc;
use super::*;
use crate::utils::random_string;
use crate::{buffer::Buffer, cli::Cli, vec_of_strings};
fn run_cmd(args: impl IntoIterator<Item = String>, is_stdout_tty: bool) -> Printer {
let args = Cli::try_parse_from(args).unwrap();
let buffer = Buffer::new(args.download, args.output.as_deref(), is_stdout_tty).unwrap();
let pretty = args.pretty.unwrap_or_else(|| buffer.guess_pretty());
let format_options = FormatOptions::default();
Printer::new(pretty, args.style, false, buffer, format_options)
}
fn temp_path() -> String {
let mut dir = std::env::temp_dir();
let filename = random_string();
dir.push(filename);
dir.to_str().unwrap().to_owned()
}
#[test]
fn terminal_mode() {
let p = run_cmd(vec_of_strings!["xh", "httpbin.org/get"], true);
assert_eq!(p.color, true);
assert!(p.buffer.is_stdout());
}
#[test]
fn redirect_mode() {
let p = run_cmd(vec_of_strings!["xh", "httpbin.org/get"], false);
assert_eq!(p.color, false);
assert!(p.buffer.is_redirect());
}
#[test]
fn terminal_mode_with_output_file() {
let output = temp_path();
let p = run_cmd(vec_of_strings!["xh", "httpbin.org/get", "-o", output], true);
assert_eq!(p.color, false);
assert!(p.buffer.is_file());
}
#[test]
fn redirect_mode_with_output_file() {
let output = temp_path();
let p = run_cmd(
vec_of_strings!["xh", "httpbin.org/get", "-o", output],
false,
);
assert_eq!(p.color, false);
assert!(p.buffer.is_file());
}
#[test]
fn terminal_mode_download() {
let p = run_cmd(vec_of_strings!["xh", "httpbin.org/get", "-d"], true);
assert_eq!(p.color, true);
assert!(p.buffer.is_stderr());
}
#[test]
fn redirect_mode_download() {
let p = run_cmd(vec_of_strings!["xh", "httpbin.org/get", "-d"], false);
assert_eq!(p.color, true);
assert!(p.buffer.is_stderr());
}
#[test]
fn terminal_mode_download_with_output_file() {
let output = temp_path();
let p = run_cmd(
vec_of_strings!["xh", "httpbin.org/get", "-d", "-o", output],
true,
);
assert_eq!(p.color, true);
assert!(p.buffer.is_stderr());
}
#[test]
fn redirect_mode_download_with_output_file() {
let output = temp_path();
let p = run_cmd(
vec_of_strings!["xh", "httpbin.org/get", "-d", "-o", output],
false,
);
assert_eq!(p.color, true);
assert!(p.buffer.is_stderr());
}
#[test]
fn test_header_casing() {
let p = Printer {
indent_json: false,
color: false,
theme: Theme::Auto,
sort_headers: false,
stream: false,
buffer: Buffer::new(false, None, false).unwrap(),
format_options: FormatOptions::default(),
};
let mut headers = HeaderMap::new();
headers.insert("ab-cd", "0".parse().unwrap());
headers.insert("-cd", "0".parse().unwrap());
headers.insert("-", "0".parse().unwrap());
headers.insert("ab-%c", "0".parse().unwrap());
headers.insert("A-b--C", "0".parse().unwrap());
assert_eq!(
p.headers_to_string(&headers, reqwest::Version::HTTP_11),
indoc! {"
Ab-Cd: 0
-Cd: 0
-: 0
Ab-%c: 0
A-B--C: 0"
}
);
assert_eq!(
p.headers_to_string(&headers, reqwest::Version::HTTP_2),
indoc! {"
ab-cd: 0
-cd: 0
-: 0
ab-%c: 0
a-b--c: 0"
}
);
}
}
| {
let content_type = get_content_type(request.headers());
if let Some(body) = request.body_mut() {
let body = body.buffer()?;
if body.contains(&b'\0') {
self.buffer.print(BINARY_SUPPRESSOR)?;
} else {
self.print_body_text(content_type, &String::from_utf8_lossy(body))?;
self.buffer.print("\n")?;
}
// Breathing room between request and response
self.buffer.print("\n")?;
self.buffer.flush()?;
}
Ok(())
} | identifier_body |
printer.rs | use std::borrow::Cow;
use std::io::{self, BufRead, BufReader, Read, Write};
use std::time::Instant;
use encoding_rs::Encoding;
use encoding_rs_io::DecodeReaderBytesBuilder;
use mime::Mime;
use reqwest::blocking::{Body, Request, Response};
use reqwest::cookie::CookieStore;
use reqwest::header::{
HeaderMap, HeaderName, HeaderValue, ACCEPT, CONTENT_LENGTH, CONTENT_TYPE, COOKIE, HOST,
};
use reqwest::Version;
use url::Url;
use crate::cli::FormatOptions;
use crate::decoder::{decompress, get_compression_type};
use crate::{
buffer::Buffer,
cli::{Pretty, Theme},
formatting::{get_json_formatter, Highlighter},
middleware::ResponseExt,
utils::{copy_largebuf, test_mode, BUFFER_SIZE},
};
const BINARY_SUPPRESSOR: &str = concat!(
"+-----------------------------------------+\n",
"| NOTE: binary data not shown in terminal |\n",
"+-----------------------------------------+\n",
"\n"
);
/// A wrapper around a reader that reads line by line, (optionally) returning
/// an error if the line appears to be binary.
///
/// This is meant for streaming output. `checked` should typically be
/// set to buffer.is_terminal(), but if you need neither checking nor
/// highlighting then you may not need a `BinaryGuard` at all.
///
/// This reader does not validate UTF-8.
struct BinaryGuard<'a, T: Read> {
reader: BufReader<&'a mut T>,
buffer: Vec<u8>,
checked: bool,
}
impl<'a, T: Read> BinaryGuard<'a, T> {
fn new(reader: &'a mut T, checked: bool) -> Self {
Self {
reader: BufReader::with_capacity(BUFFER_SIZE, reader),
buffer: Vec::new(),
checked,
}
}
/// Return at least one complete line.
///
/// Compared to returning exactly one line, this gives you more information
/// about when data comes in. It's better to flush after each `read_lines`
/// call than to flush after each individual line.
///
/// We only work with complete lines to accommodate the syntax highlighting
/// and the binary data (null byte) detection. HTTPie processes exactly
/// one line at a time.
///
/// We work off the assumption that if the response contains a null byte
/// then none of it should be shown, and therefore the earlier we detect
/// the null byte, the better. This basically matches the non-streaming
/// behavior. But if it takes a while for the first null byte to show up
/// then it's unpredictable when the plain text output is cut off by the
/// binary suppressor. HTTPie is more consistent in this regard.
fn read_lines(&mut self) -> io::Result<Option<&[u8]>> {
self.buffer.clear();
loop {
let buf = match self.reader.fill_buf() {
Ok(buf) => buf,
Err(e) if e.kind() == io::ErrorKind::Interrupted => continue,
Err(e) => return Err(e),
};
if self.checked && buf.contains(&b'\0') {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Found binary data",
));
} else if buf.is_empty() {
if self.buffer.is_empty() {
return Ok(None);
} else {
return Ok(Some(&self.buffer));
}
} else if let Some(ind) = memchr::memrchr(b'\n', buf) {
// Potential optimization: return a slice of buf instead of copying.
// (We'd have to delay the call to .consume() until the next call.)
// (There is a weird borrow checker problem.)
self.buffer.extend_from_slice(&buf[..=ind]);
self.reader.consume(ind + 1);
return Ok(Some(&self.buffer));
} else {
self.buffer.extend_from_slice(buf);
let n = buf.len(); // borrow checker
self.reader.consume(n);
// It would be nice to return early if self.buffer is growing very large
// or if it's been a long time since the last read. But especially the
// second is hard to implement, and we'd want to pair this with flushing
// the output buffer. (HTTPie does nothing of this kind.)
}
}
}
}
pub struct Printer {
indent_json: bool,
color: bool,
theme: Theme,
sort_headers: bool,
stream: bool,
buffer: Buffer,
format_options: FormatOptions,
}
impl Printer {
pub fn new(
pretty: Pretty,
theme: Option<Theme>,
stream: bool,
buffer: Buffer,
format_options: FormatOptions,
) -> Self {
let theme = theme.unwrap_or(Theme::Auto);
Printer {
indent_json: pretty.format(),
sort_headers: pretty.format(),
color: pretty.color(),
stream,
theme,
buffer,
format_options,
}
}
fn get_highlighter(&mut self, syntax: &'static str) -> Highlighter<'_> {
Highlighter::new(syntax, self.theme, &mut self.buffer)
}
fn print_colorized_text(&mut self, text: &str, syntax: &'static str) -> io::Result<()> {
self.get_highlighter(syntax).highlight(text)
}
fn print_syntax_text(&mut self, text: &str, syntax: &'static str) -> io::Result<()> {
if self.color {
self.print_colorized_text(text, syntax)
} else {
self.buffer.print(text)
}
}
fn print_json_text(&mut self, text: &str, check_valid: bool) -> io::Result<()> {
if !self.indent_json {
// We don't have to do anything specialized, so fall back to the generic version
return self.print_syntax_text(text, "json");
}
if check_valid && !valid_json(text) {
// JSONXF may mess up the text, e.g. by removing whitespace
// This is somewhat common as application/json is the default
// content type for requests
return self.print_syntax_text(text, "json");
}
let mut formatter = get_json_formatter(&self.format_options);
if self.color {
let mut buf = Vec::new();
formatter.format_buf(text.as_bytes(), &mut buf)?;
// in principle, buf should already be valid UTF-8,
// because JSONXF doesn't mangle it
let text = String::from_utf8_lossy(&buf);
self.print_colorized_text(&text, "json")
} else {
formatter.format_buf(text.as_bytes(), &mut self.buffer)
}
}
fn print_body_text(&mut self, content_type: ContentType, body: &str) -> io::Result<()> {
match content_type {
ContentType::Json => self.print_json_text(body, true),
ContentType::Xml => self.print_syntax_text(body, "xml"),
ContentType::Html => self.print_syntax_text(body, "html"),
ContentType::Css => self.print_syntax_text(body, "css"),
// In HTTPie part of this behavior is gated behind the --json flag
// But it does JSON formatting even without that flag, so doing
// this check unconditionally is fine
ContentType::Text | ContentType::JavaScript if valid_json(body) => {
self.print_json_text(body, false)
}
ContentType::JavaScript => self.print_syntax_text(body, "js"),
_ => self.buffer.print(body),
}
}
fn print_stream(&mut self, reader: &mut impl Read) -> io::Result<()> {
if !self.buffer.is_terminal() {
return copy_largebuf(reader, &mut self.buffer, true);
}
let mut guard = BinaryGuard::new(reader, true);
while let Some(lines) = guard.read_lines()? {
self.buffer.write_all(lines)?;
self.buffer.flush()?;
}
Ok(())
}
fn print_colorized_stream(
&mut self,
stream: &mut impl Read,
syntax: &'static str,
) -> io::Result<()> {
let mut guard = BinaryGuard::new(stream, self.buffer.is_terminal());
let mut highlighter = self.get_highlighter(syntax);
while let Some(lines) = guard.read_lines()? {
for line in lines.split_inclusive(|&b| b == b'\n') {
highlighter.highlight_bytes(line)?;
}
highlighter.flush()?;
}
Ok(())
}
fn print_syntax_stream(
&mut self,
stream: &mut impl Read,
syntax: &'static str,
) -> io::Result<()> {
if self.color {
self.print_colorized_stream(stream, syntax)
} else {
self.print_stream(stream)
}
}
fn print_json_stream(&mut self, stream: &mut impl Read) -> io::Result<()> {
if !self.indent_json {
// We don't have to do anything specialized, so fall back to the generic version
self.print_syntax_stream(stream, "json")
} else if self.color {
let mut guard = BinaryGuard::new(stream, self.buffer.is_terminal());
let mut formatter = get_json_formatter(&self.format_options);
let mut highlighter = self.get_highlighter("json");
let mut buf = Vec::new();
while let Some(lines) = guard.read_lines()? {
formatter.format_buf(lines, &mut buf)?;
for line in buf.split_inclusive(|&b| b == b'\n') {
highlighter.highlight_bytes(line)?;
}
highlighter.flush()?;
buf.clear();
}
Ok(())
} else {
let mut formatter = get_json_formatter(&self.format_options);
if !self.buffer.is_terminal() {
let mut buf = vec![0; BUFFER_SIZE];
loop {
match stream.read(&mut buf) {
Ok(0) => return Ok(()),
Ok(n) => {
formatter.format_buf(&buf[0..n], &mut self.buffer)?;
self.buffer.flush()?;
}
Err(e) if e.kind() == io::ErrorKind::Interrupted => continue,
Err(e) => return Err(e),
}
}
}
let mut guard = BinaryGuard::new(stream, true);
while let Some(lines) = guard.read_lines()? {
formatter.format_buf(lines, &mut self.buffer)?;
self.buffer.flush()?;
}
Ok(())
}
}
fn print_body_stream(
&mut self,
content_type: ContentType,
body: &mut impl Read,
) -> io::Result<()> {
match content_type {
ContentType::Json => self.print_json_stream(body),
ContentType::Xml => self.print_syntax_stream(body, "xml"),
ContentType::Html => self.print_syntax_stream(body, "html"),
ContentType::Css => self.print_syntax_stream(body, "css"),
// print_body_text() has fancy JSON detection, but we can't do that here
ContentType::JavaScript => self.print_syntax_stream(body, "js"),
_ => self.print_stream(body),
}
}
fn print_headers(&mut self, text: &str) -> io::Result<()> {
if self.color {
self.print_colorized_text(text, "http")
} else {
self.buffer.print(text)
}
}
fn headers_to_string(&self, headers: &HeaderMap, version: Version) -> String {
let as_titlecase = match version {
Version::HTTP_09 | Version::HTTP_10 | Version::HTTP_11 => true,
Version::HTTP_2 | Version::HTTP_3 => false,
_ => false,
};
let mut headers: Vec<(&HeaderName, &HeaderValue)> = headers.iter().collect();
if self.sort_headers {
headers.sort_by_key(|(name, _)| name.as_str());
}
let mut header_string = String::new();
for (key, value) in headers {
if as_titlecase {
// Ought to be equivalent to how hyper does it
// https://github.com/hyperium/hyper/blob/f46b175bf71b202fbb907c4970b5743881b891e1/src/proto/h1/role.rs#L1332
// Header names are ASCII so it's ok to operate on char instead of u8
let mut prev = '-';
for mut c in key.as_str().chars() {
if prev == '-' {
c.make_ascii_uppercase();
}
header_string.push(c);
prev = c;
}
} else {
header_string.push_str(key.as_str());
}
header_string.push_str(": ");
match value.to_str() {
Ok(value) => header_string.push_str(value),
#[allow(clippy::format_push_string)]
Err(_) => header_string.push_str(&format!("{:?}", value)),
}
header_string.push('\n');
}
header_string.pop();
header_string
}
pub fn print_separator(&mut self) -> io::Result<()> {
self.buffer.print("\n")?;
self.buffer.flush()?;
Ok(())
}
pub fn print_request_headers<T>(&mut self, request: &Request, cookie_jar: &T) -> io::Result<()>
where
T: CookieStore,
{
let method = request.method();
let url = request.url();
let query_string = url.query().map_or(String::from(""), |q| ["?", q].concat());
let version = request.version();
let mut headers = request.headers().clone();
headers
.entry(ACCEPT)
.or_insert_with(|| HeaderValue::from_static("*/*"));
if let Some(cookie) = cookie_jar.cookies(url) {
headers.insert(COOKIE, cookie);
}
// See https://github.com/seanmonstar/reqwest/issues/1030
// reqwest and hyper add certain headers, but only in the process of
// sending the request, which we haven't done yet
if let Some(body) = request.body().and_then(Body::as_bytes) {
// Added at https://github.com/seanmonstar/reqwest/blob/e56bd160ba/src/blocking/request.rs#L132
headers
.entry(CONTENT_LENGTH)
.or_insert_with(|| body.len().into());
}
if let Some(host) = request.url().host_str() {
// This is incorrect in case of HTTP/2, but we're already assuming
// HTTP/1.1 anyway
headers.entry(HOST).or_insert_with(|| {
// Added at https://github.com/hyperium/hyper/blob/dfa1bb291d/src/client/client.rs#L237
if test_mode() {
HeaderValue::from_str("http.mock")
} else if let Some(port) = request.url().port() {
HeaderValue::from_str(&format!("{}:{}", host, port))
} else {
HeaderValue::from_str(host)
}
.expect("hostname should already be validated/parsed")
});
}
let request_line = format!("{} {}{} {:?}\n", method, url.path(), query_string, version);
let headers = self.headers_to_string(&headers, version);
self.print_headers(&(request_line + &headers))?;
self.buffer.print("\n\n")?;
self.buffer.flush()?;
Ok(())
}
pub fn print_response_headers(&mut self, response: &Response) -> io::Result<()> {
let version = response.version();
let status = response.status();
let headers = response.headers();
let status_line = format!("{:?} {}\n", version, status);
let headers = self.headers_to_string(headers, version);
self.print_headers(&(status_line + &headers))?;
self.buffer.print("\n\n")?;
self.buffer.flush()?;
Ok(())
}
pub fn print_request_body(&mut self, request: &mut Request) -> anyhow::Result<()> {
let content_type = get_content_type(request.headers());
if let Some(body) = request.body_mut() {
let body = body.buffer()?;
if body.contains(&b'\0') {
self.buffer.print(BINARY_SUPPRESSOR)?;
} else {
self.print_body_text(content_type, &String::from_utf8_lossy(body))?;
self.buffer.print("\n")?;
}
// Breathing room between request and response
self.buffer.print("\n")?;
self.buffer.flush()?;
}
Ok(())
}
pub fn print_response_body(
&mut self,
response: &mut Response,
encoding: Option<&'static Encoding>,
mime: Option<&str>,
) -> anyhow::Result<()> {
let starting_time = Instant::now();
let url = response.url().clone();
let content_type =
mime.map_or_else(|| get_content_type(response.headers()), ContentType::from);
let encoding = encoding.or_else(|| get_charset(response));
let compression_type = get_compression_type(response.headers());
let mut body = decompress(response, compression_type);
if !self.buffer.is_terminal() {
if (self.color || self.indent_json) && content_type.is_text() {
// The user explicitly asked for formatting even though this is
// going into a file, and the response is at least supposed to be
// text, so decode it
// TODO: HTTPie re-encodes output in the original encoding, we don't
// encoding_rs::Encoder::encode_from_utf8_to_vec_without_replacement()
// and guess_encoding() may help, but it'll require refactoring
// The current design is a bit unfortunate because there's no way to
// force UTF-8 output without coloring or formatting
// Unconditionally decoding is not an option because the body
// might not be text at all
if self.stream {
self.print_body_stream(
content_type,
&mut decode_stream(&mut body, encoding, &url)?,
)?;
} else {
let mut buf = Vec::new();
body.read_to_end(&mut buf)?;
let text = decode_blob_unconditional(&buf, encoding, &url);
self.print_body_text(content_type, &text)?;
}
} else if self.stream {
copy_largebuf(&mut body, &mut self.buffer, true)?;
} else {
let mut buf = Vec::new();
body.read_to_end(&mut buf)?;
self.buffer.print(&buf)?;
}
} else if self.stream {
match self
.print_body_stream(content_type, &mut decode_stream(&mut body, encoding, &url)?)
{
Ok(_) => {
self.buffer.print("\n")?;
}
Err(err) if err.kind() == io::ErrorKind::InvalidData => {
self.buffer.print(BINARY_SUPPRESSOR)?;
}
Err(err) => return Err(err.into()),
}
} else {
let mut buf = Vec::new();
body.read_to_end(&mut buf)?;
match decode_blob(&buf, encoding, &url) {
None => {
self.buffer.print(BINARY_SUPPRESSOR)?;
}
Some(text) => {
self.print_body_text(content_type, &text)?;
self.buffer.print("\n")?;
}
};
}
self.buffer.flush()?;
drop(body); // silence the borrow checker
response.meta_mut().content_download_duration = Some(starting_time.elapsed());
Ok(())
}
pub fn print_response_meta(&mut self, response: &Response) -> anyhow::Result<()> {
let meta = response.meta();
let mut total_elapsed_time = meta.request_duration.as_secs_f64();
if let Some(content_download_duration) = meta.content_download_duration {
total_elapsed_time += content_download_duration.as_secs_f64();
}
self.buffer
.print(format!("Elapsed time: {:.5}s", total_elapsed_time))?;
self.buffer.print("\n\n")?;
Ok(())
}
}
enum ContentType {
Json,
Html,
Xml,
JavaScript,
Css,
Text,
UrlencodedForm,
Multipart,
Unknown,
}
impl ContentType {
fn is_text(&self) -> bool {
!matches!(
self,
ContentType::Unknown | ContentType::UrlencodedForm | ContentType::Multipart
)
}
}
impl From<&str> for ContentType {
fn from(content_type: &str) -> Self {
if content_type.contains("json") {
ContentType::Json
} else if content_type.contains("html") {
ContentType::Html
} else if content_type.contains("xml") {
ContentType::Xml
} else if content_type.contains("multipart") {
ContentType::Multipart
} else if content_type.contains("x-www-form-urlencoded") {
ContentType::UrlencodedForm
} else if content_type.contains("javascript") {
ContentType::JavaScript
} else if content_type.contains("css") {
ContentType::Css
} else if content_type.contains("text") {
// We later check if this one's JSON
// HTTPie checks for "json", "javascript" and "text" in one place:
// https://github.com/httpie/httpie/blob/a32ad344dd/httpie/output/formatters/json.py#L14
// We have it more spread out but it behaves more or less the same
ContentType::Text
} else {
ContentType::Unknown
}
}
}
fn get_content_type(headers: &HeaderMap) -> ContentType {
headers
.get(CONTENT_TYPE)
.and_then(|value| value.to_str().ok())
.map_or(ContentType::Unknown, ContentType::from)
}
fn valid_json(text: &str) -> bool {
serde_json::from_str::<serde::de::IgnoredAny>(text).is_ok()
}
/// Decode a response, using BOM sniffing or chardet if the encoding is unknown.
///
/// This is different from [`Response::text`], which assumes UTF-8 as a fallback.
///
/// Returns `None` if the decoded text would contain null codepoints (i.e., is binary).
fn decode_blob<'a>(
raw: &'a [u8],
encoding: Option<&'static Encoding>,
url: &Url,
) -> Option<Cow<'a, str>> {
let encoding = encoding.unwrap_or_else(|| detect_encoding(raw, true, url));
// If the encoding is ASCII-compatible then a null byte corresponds to a
// null codepoint and vice versa, so we can check for them before decoding.
// For a 11MB binary file this saves 100ms, that's worth doing.
// UTF-16 is not ASCII-compatible: all ASCII characters are padded with a
// null byte, so finding a null byte doesn't mean anything.
if encoding.is_ascii_compatible() && raw.contains(&0) {
return None;
}
// Don't allow the BOM to override the encoding. But do remove it if
// it matches the encoding.
let text = encoding.decode_with_bom_removal(raw).0;
if !encoding.is_ascii_compatible() && text.contains('\0') | else {
Some(text)
}
}
/// Like [`decode_blob`], but without binary detection.
fn decode_blob_unconditional<'a>(
raw: &'a [u8],
encoding: Option<&'static Encoding>,
url: &Url,
) -> Cow<'a, str> {
let encoding = encoding.unwrap_or_else(|| detect_encoding(raw, true, url));
encoding.decode_with_bom_removal(raw).0
}
/// Decode a streaming response in a way that matches [`decode_blob`].
///
/// As-is this should do a lossy decode with replacement characters, so the
/// output is valid UTF-8, but a differently configured DecodeReaderBytes can
/// produce invalid UTF-8.
fn decode_stream<'a>(
stream: &'a mut impl Read,
encoding: Option<&'static Encoding>,
url: &Url,
) -> io::Result<impl Read + 'a> {
// 16 KiB is the largest initial read I could achieve.
// That was with a HTTP/2 miniserve running on Linux.
// I think this is a buffer size for hyper, it could change. But it seems
// large enough for a best-effort attempt.
// (16 is otherwise used because 0 seems dangerous, but it shouldn't matter.)
let capacity = if encoding.is_some() { 16 } else { 16 * 1024 };
let mut reader = BufReader::with_capacity(capacity, stream);
let encoding = match encoding {
Some(encoding) => encoding,
None => {
// We need to guess the encoding.
// The more data we have the better our guess, but we can't just wait
// for all of it to arrive. The user explicitly asked us to hurry.
// HTTPie solves this by detecting the encoding separately for each line,
// but that's silly, and we don't necessarily go linewise.
// We'll just hope we get enough data in the very first read.
let peek = reader.fill_buf()?;
detect_encoding(peek, false, url)
}
};
// We could set .utf8_passthru(true) to not sanitize invalid UTF-8. It would
// arrive more faithfully in the terminal.
// But that has questionable benefit and writing invalid UTF-8 to stdout
// causes an error on Windows (because the console is UTF-16).
let reader = DecodeReaderBytesBuilder::new()
.encoding(Some(encoding))
.build(reader);
Ok(reader)
}
fn detect_encoding(mut bytes: &[u8], mut complete: bool, url: &Url) -> &'static Encoding {
// chardetng doesn't seem to take BOMs into account, so check those manually.
// We trust them unconditionally. (Should we?)
if bytes.starts_with(b"\xEF\xBB\xBF") {
return encoding_rs::UTF_8;
} else if bytes.starts_with(b"\xFF\xFE") {
return encoding_rs::UTF_16LE;
} else if bytes.starts_with(b"\xFE\xFF") {
return encoding_rs::UTF_16BE;
}
// 64 KiB takes 2-5 ms to check on my machine. So even on slower machines
// that should be acceptable.
// If we check the full document we can easily spend most of our runtime
// inside chardetng. That's especially problematic because we usually get
// here for binary files, which we won't even end up showing.
const CHARDET_PEEK_SIZE: usize = 64 * 1024;
if bytes.len() > CHARDET_PEEK_SIZE {
bytes = &bytes[..CHARDET_PEEK_SIZE];
complete = false;
}
// HTTPie uses https://pypi.org/project/charset-normalizer/
let mut detector = chardetng::EncodingDetector::new();
detector.feed(bytes, complete);
let tld = url.domain().and_then(get_tld).map(str::as_bytes);
// The `allow_utf8` parameter is meant for HTML content:
// https://hsivonen.fi/utf-8-detection/
// We always enable it because we're more geared toward APIs than
// toward plain webpages, and because we don't have a full HTML parser
// to implement proper UTF-8 detection.
detector.guess(tld, true)
}
fn get_tld(domain: &str) -> Option<&str> {
// Fully qualified domain names end with a .
domain.trim_end_matches('.').rsplit('.').next()
}
/// Get the response's encoding from its Content-Type.
///
/// reqwest doesn't provide an API for this, and we don't want a fixed default.
///
/// See https://github.com/seanmonstar/reqwest/blob/2940740493/src/async_impl/response.rs#L172
fn get_charset(response: &Response) -> Option<&'static Encoding> {
let content_type = response.headers().get(CONTENT_TYPE)?.to_str().ok()?;
let mime: Mime = content_type.parse().ok()?;
let encoding_name = mime.get_param("charset")?.as_str();
Encoding::for_label(encoding_name.as_bytes())
}
#[cfg(test)]
mod tests {
use indoc::indoc;
use super::*;
use crate::utils::random_string;
use crate::{buffer::Buffer, cli::Cli, vec_of_strings};
fn run_cmd(args: impl IntoIterator<Item = String>, is_stdout_tty: bool) -> Printer {
let args = Cli::try_parse_from(args).unwrap();
let buffer = Buffer::new(args.download, args.output.as_deref(), is_stdout_tty).unwrap();
let pretty = args.pretty.unwrap_or_else(|| buffer.guess_pretty());
let format_options = FormatOptions::default();
Printer::new(pretty, args.style, false, buffer, format_options)
}
fn temp_path() -> String {
let mut dir = std::env::temp_dir();
let filename = random_string();
dir.push(filename);
dir.to_str().unwrap().to_owned()
}
#[test]
fn terminal_mode() {
let p = run_cmd(vec_of_strings!["xh", "httpbin.org/get"], true);
assert_eq!(p.color, true);
assert!(p.buffer.is_stdout());
}
#[test]
fn redirect_mode() {
let p = run_cmd(vec_of_strings!["xh", "httpbin.org/get"], false);
assert_eq!(p.color, false);
assert!(p.buffer.is_redirect());
}
#[test]
fn terminal_mode_with_output_file() {
let output = temp_path();
let p = run_cmd(vec_of_strings!["xh", "httpbin.org/get", "-o", output], true);
assert_eq!(p.color, false);
assert!(p.buffer.is_file());
}
#[test]
fn redirect_mode_with_output_file() {
let output = temp_path();
let p = run_cmd(
vec_of_strings!["xh", "httpbin.org/get", "-o", output],
false,
);
assert_eq!(p.color, false);
assert!(p.buffer.is_file());
}
#[test]
fn terminal_mode_download() {
let p = run_cmd(vec_of_strings!["xh", "httpbin.org/get", "-d"], true);
assert_eq!(p.color, true);
assert!(p.buffer.is_stderr());
}
#[test]
fn redirect_mode_download() {
let p = run_cmd(vec_of_strings!["xh", "httpbin.org/get", "-d"], false);
assert_eq!(p.color, true);
assert!(p.buffer.is_stderr());
}
#[test]
fn terminal_mode_download_with_output_file() {
let output = temp_path();
let p = run_cmd(
vec_of_strings!["xh", "httpbin.org/get", "-d", "-o", output],
true,
);
assert_eq!(p.color, true);
assert!(p.buffer.is_stderr());
}
#[test]
fn redirect_mode_download_with_output_file() {
let output = temp_path();
let p = run_cmd(
vec_of_strings!["xh", "httpbin.org/get", "-d", "-o", output],
false,
);
assert_eq!(p.color, true);
assert!(p.buffer.is_stderr());
}
#[test]
fn test_header_casing() {
let p = Printer {
indent_json: false,
color: false,
theme: Theme::Auto,
sort_headers: false,
stream: false,
buffer: Buffer::new(false, None, false).unwrap(),
format_options: FormatOptions::default(),
};
let mut headers = HeaderMap::new();
headers.insert("ab-cd", "0".parse().unwrap());
headers.insert("-cd", "0".parse().unwrap());
headers.insert("-", "0".parse().unwrap());
headers.insert("ab-%c", "0".parse().unwrap());
headers.insert("A-b--C", "0".parse().unwrap());
assert_eq!(
p.headers_to_string(&headers, reqwest::Version::HTTP_11),
indoc! {"
Ab-Cd: 0
-Cd: 0
-: 0
Ab-%c: 0
A-B--C: 0"
}
);
assert_eq!(
p.headers_to_string(&headers, reqwest::Version::HTTP_2),
indoc! {"
ab-cd: 0
-cd: 0
-: 0
ab-%c: 0
a-b--c: 0"
}
);
}
}
| {
None
} | conditional_block |
printer.rs | use std::borrow::Cow;
use std::io::{self, BufRead, BufReader, Read, Write};
use std::time::Instant;
use encoding_rs::Encoding;
use encoding_rs_io::DecodeReaderBytesBuilder;
use mime::Mime;
use reqwest::blocking::{Body, Request, Response};
use reqwest::cookie::CookieStore;
use reqwest::header::{
HeaderMap, HeaderName, HeaderValue, ACCEPT, CONTENT_LENGTH, CONTENT_TYPE, COOKIE, HOST,
};
use reqwest::Version;
use url::Url;
use crate::cli::FormatOptions;
use crate::decoder::{decompress, get_compression_type};
use crate::{
buffer::Buffer,
cli::{Pretty, Theme},
formatting::{get_json_formatter, Highlighter},
middleware::ResponseExt,
utils::{copy_largebuf, test_mode, BUFFER_SIZE},
};
const BINARY_SUPPRESSOR: &str = concat!(
"+-----------------------------------------+\n",
"| NOTE: binary data not shown in terminal |\n",
"+-----------------------------------------+\n",
"\n"
);
/// A wrapper around a reader that reads line by line, (optionally) returning
/// an error if the line appears to be binary.
///
/// This is meant for streaming output. `checked` should typically be
/// set to buffer.is_terminal(), but if you need neither checking nor
/// highlighting then you may not need a `BinaryGuard` at all.
///
/// This reader does not validate UTF-8.
struct BinaryGuard<'a, T: Read> {
reader: BufReader<&'a mut T>,
buffer: Vec<u8>,
checked: bool,
}
impl<'a, T: Read> BinaryGuard<'a, T> {
fn new(reader: &'a mut T, checked: bool) -> Self {
Self {
reader: BufReader::with_capacity(BUFFER_SIZE, reader),
buffer: Vec::new(),
checked,
}
}
/// Return at least one complete line.
///
/// Compared to returning exactly one line, this gives you more information
/// about when data comes in. It's better to flush after each `read_lines`
/// call than to flush after each individual line.
///
/// We only work with complete lines to accommodate the syntax highlighting
/// and the binary data (null byte) detection. HTTPie processes exactly
/// one line at a time.
///
/// We work off the assumption that if the response contains a null byte
/// then none of it should be shown, and therefore the earlier we detect
/// the null byte, the better. This basically matches the non-streaming
/// behavior. But if it takes a while for the first null byte to show up
/// then it's unpredictable when the plain text output is cut off by the
/// binary suppressor. HTTPie is more consistent in this regard.
fn read_lines(&mut self) -> io::Result<Option<&[u8]>> {
self.buffer.clear();
loop {
let buf = match self.reader.fill_buf() {
Ok(buf) => buf,
Err(e) if e.kind() == io::ErrorKind::Interrupted => continue,
Err(e) => return Err(e),
};
if self.checked && buf.contains(&b'\0') {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Found binary data",
));
} else if buf.is_empty() {
if self.buffer.is_empty() {
return Ok(None);
} else {
return Ok(Some(&self.buffer));
}
} else if let Some(ind) = memchr::memrchr(b'\n', buf) {
// Potential optimization: return a slice of buf instead of copying.
// (We'd have to delay the call to .consume() until the next call.)
// (There is a weird borrow checker problem.)
self.buffer.extend_from_slice(&buf[..=ind]);
self.reader.consume(ind + 1);
return Ok(Some(&self.buffer));
} else {
self.buffer.extend_from_slice(buf);
let n = buf.len(); // borrow checker
self.reader.consume(n);
// It would be nice to return early if self.buffer is growing very large
// or if it's been a long time since the last read. But especially the
// second is hard to implement, and we'd want to pair this with flushing
// the output buffer. (HTTPie does nothing of this kind.)
}
}
}
}
pub struct Printer {
indent_json: bool,
color: bool,
theme: Theme,
sort_headers: bool,
stream: bool,
buffer: Buffer,
format_options: FormatOptions,
}
impl Printer {
pub fn new(
pretty: Pretty,
theme: Option<Theme>,
stream: bool,
buffer: Buffer,
format_options: FormatOptions,
) -> Self {
let theme = theme.unwrap_or(Theme::Auto);
Printer {
indent_json: pretty.format(),
sort_headers: pretty.format(),
color: pretty.color(),
stream,
theme,
buffer,
format_options,
}
}
fn get_highlighter(&mut self, syntax: &'static str) -> Highlighter<'_> {
Highlighter::new(syntax, self.theme, &mut self.buffer)
}
fn print_colorized_text(&mut self, text: &str, syntax: &'static str) -> io::Result<()> {
self.get_highlighter(syntax).highlight(text)
}
fn print_syntax_text(&mut self, text: &str, syntax: &'static str) -> io::Result<()> {
if self.color {
self.print_colorized_text(text, syntax)
} else {
self.buffer.print(text)
}
}
fn print_json_text(&mut self, text: &str, check_valid: bool) -> io::Result<()> {
if !self.indent_json {
// We don't have to do anything specialized, so fall back to the generic version
return self.print_syntax_text(text, "json");
}
if check_valid && !valid_json(text) {
// JSONXF may mess up the text, e.g. by removing whitespace
// This is somewhat common as application/json is the default
// content type for requests
return self.print_syntax_text(text, "json");
}
let mut formatter = get_json_formatter(&self.format_options);
if self.color {
let mut buf = Vec::new();
formatter.format_buf(text.as_bytes(), &mut buf)?;
// in principle, buf should already be valid UTF-8,
// because JSONXF doesn't mangle it
let text = String::from_utf8_lossy(&buf);
self.print_colorized_text(&text, "json")
} else {
formatter.format_buf(text.as_bytes(), &mut self.buffer)
}
}
fn print_body_text(&mut self, content_type: ContentType, body: &str) -> io::Result<()> {
match content_type {
ContentType::Json => self.print_json_text(body, true),
ContentType::Xml => self.print_syntax_text(body, "xml"),
ContentType::Html => self.print_syntax_text(body, "html"),
ContentType::Css => self.print_syntax_text(body, "css"),
// In HTTPie part of this behavior is gated behind the --json flag
// But it does JSON formatting even without that flag, so doing
// this check unconditionally is fine
ContentType::Text | ContentType::JavaScript if valid_json(body) => {
self.print_json_text(body, false)
}
ContentType::JavaScript => self.print_syntax_text(body, "js"),
_ => self.buffer.print(body),
}
}
fn print_stream(&mut self, reader: &mut impl Read) -> io::Result<()> {
if !self.buffer.is_terminal() {
return copy_largebuf(reader, &mut self.buffer, true);
}
let mut guard = BinaryGuard::new(reader, true);
while let Some(lines) = guard.read_lines()? {
self.buffer.write_all(lines)?;
self.buffer.flush()?;
}
Ok(())
}
fn print_colorized_stream(
&mut self,
stream: &mut impl Read,
syntax: &'static str,
) -> io::Result<()> {
let mut guard = BinaryGuard::new(stream, self.buffer.is_terminal());
let mut highlighter = self.get_highlighter(syntax);
while let Some(lines) = guard.read_lines()? {
for line in lines.split_inclusive(|&b| b == b'\n') {
highlighter.highlight_bytes(line)?;
}
highlighter.flush()?;
}
Ok(())
}
fn print_syntax_stream(
&mut self,
stream: &mut impl Read,
syntax: &'static str,
) -> io::Result<()> {
if self.color {
self.print_colorized_stream(stream, syntax)
} else {
self.print_stream(stream)
}
}
fn print_json_stream(&mut self, stream: &mut impl Read) -> io::Result<()> {
if !self.indent_json {
// We don't have to do anything specialized, so fall back to the generic version
self.print_syntax_stream(stream, "json")
} else if self.color {
let mut guard = BinaryGuard::new(stream, self.buffer.is_terminal());
let mut formatter = get_json_formatter(&self.format_options);
let mut highlighter = self.get_highlighter("json");
let mut buf = Vec::new();
while let Some(lines) = guard.read_lines()? {
formatter.format_buf(lines, &mut buf)?;
for line in buf.split_inclusive(|&b| b == b'\n') {
highlighter.highlight_bytes(line)?;
}
highlighter.flush()?;
buf.clear();
}
Ok(())
} else {
let mut formatter = get_json_formatter(&self.format_options);
if !self.buffer.is_terminal() {
let mut buf = vec![0; BUFFER_SIZE];
loop {
match stream.read(&mut buf) {
Ok(0) => return Ok(()),
Ok(n) => {
formatter.format_buf(&buf[0..n], &mut self.buffer)?;
self.buffer.flush()?;
}
Err(e) if e.kind() == io::ErrorKind::Interrupted => continue,
Err(e) => return Err(e),
}
}
}
let mut guard = BinaryGuard::new(stream, true);
while let Some(lines) = guard.read_lines()? {
formatter.format_buf(lines, &mut self.buffer)?;
self.buffer.flush()?;
}
Ok(())
}
}
fn print_body_stream(
&mut self,
content_type: ContentType,
body: &mut impl Read,
) -> io::Result<()> {
match content_type {
ContentType::Json => self.print_json_stream(body),
ContentType::Xml => self.print_syntax_stream(body, "xml"),
ContentType::Html => self.print_syntax_stream(body, "html"),
ContentType::Css => self.print_syntax_stream(body, "css"),
// print_body_text() has fancy JSON detection, but we can't do that here
ContentType::JavaScript => self.print_syntax_stream(body, "js"),
_ => self.print_stream(body),
}
}
fn print_headers(&mut self, text: &str) -> io::Result<()> {
if self.color {
self.print_colorized_text(text, "http")
} else {
self.buffer.print(text)
}
}
fn | (&self, headers: &HeaderMap, version: Version) -> String {
let as_titlecase = match version {
Version::HTTP_09 | Version::HTTP_10 | Version::HTTP_11 => true,
Version::HTTP_2 | Version::HTTP_3 => false,
_ => false,
};
let mut headers: Vec<(&HeaderName, &HeaderValue)> = headers.iter().collect();
if self.sort_headers {
headers.sort_by_key(|(name, _)| name.as_str());
}
let mut header_string = String::new();
for (key, value) in headers {
if as_titlecase {
// Ought to be equivalent to how hyper does it
// https://github.com/hyperium/hyper/blob/f46b175bf71b202fbb907c4970b5743881b891e1/src/proto/h1/role.rs#L1332
// Header names are ASCII so it's ok to operate on char instead of u8
let mut prev = '-';
for mut c in key.as_str().chars() {
if prev == '-' {
c.make_ascii_uppercase();
}
header_string.push(c);
prev = c;
}
} else {
header_string.push_str(key.as_str());
}
header_string.push_str(": ");
match value.to_str() {
Ok(value) => header_string.push_str(value),
#[allow(clippy::format_push_string)]
Err(_) => header_string.push_str(&format!("{:?}", value)),
}
header_string.push('\n');
}
header_string.pop();
header_string
}
pub fn print_separator(&mut self) -> io::Result<()> {
self.buffer.print("\n")?;
self.buffer.flush()?;
Ok(())
}
pub fn print_request_headers<T>(&mut self, request: &Request, cookie_jar: &T) -> io::Result<()>
where
T: CookieStore,
{
let method = request.method();
let url = request.url();
let query_string = url.query().map_or(String::from(""), |q| ["?", q].concat());
let version = request.version();
let mut headers = request.headers().clone();
headers
.entry(ACCEPT)
.or_insert_with(|| HeaderValue::from_static("*/*"));
if let Some(cookie) = cookie_jar.cookies(url) {
headers.insert(COOKIE, cookie);
}
// See https://github.com/seanmonstar/reqwest/issues/1030
// reqwest and hyper add certain headers, but only in the process of
// sending the request, which we haven't done yet
if let Some(body) = request.body().and_then(Body::as_bytes) {
// Added at https://github.com/seanmonstar/reqwest/blob/e56bd160ba/src/blocking/request.rs#L132
headers
.entry(CONTENT_LENGTH)
.or_insert_with(|| body.len().into());
}
if let Some(host) = request.url().host_str() {
// This is incorrect in case of HTTP/2, but we're already assuming
// HTTP/1.1 anyway
headers.entry(HOST).or_insert_with(|| {
// Added at https://github.com/hyperium/hyper/blob/dfa1bb291d/src/client/client.rs#L237
if test_mode() {
HeaderValue::from_str("http.mock")
} else if let Some(port) = request.url().port() {
HeaderValue::from_str(&format!("{}:{}", host, port))
} else {
HeaderValue::from_str(host)
}
.expect("hostname should already be validated/parsed")
});
}
let request_line = format!("{} {}{} {:?}\n", method, url.path(), query_string, version);
let headers = self.headers_to_string(&headers, version);
self.print_headers(&(request_line + &headers))?;
self.buffer.print("\n\n")?;
self.buffer.flush()?;
Ok(())
}
pub fn print_response_headers(&mut self, response: &Response) -> io::Result<()> {
let version = response.version();
let status = response.status();
let headers = response.headers();
let status_line = format!("{:?} {}\n", version, status);
let headers = self.headers_to_string(headers, version);
self.print_headers(&(status_line + &headers))?;
self.buffer.print("\n\n")?;
self.buffer.flush()?;
Ok(())
}
pub fn print_request_body(&mut self, request: &mut Request) -> anyhow::Result<()> {
let content_type = get_content_type(request.headers());
if let Some(body) = request.body_mut() {
let body = body.buffer()?;
if body.contains(&b'\0') {
self.buffer.print(BINARY_SUPPRESSOR)?;
} else {
self.print_body_text(content_type, &String::from_utf8_lossy(body))?;
self.buffer.print("\n")?;
}
// Breathing room between request and response
self.buffer.print("\n")?;
self.buffer.flush()?;
}
Ok(())
}
pub fn print_response_body(
&mut self,
response: &mut Response,
encoding: Option<&'static Encoding>,
mime: Option<&str>,
) -> anyhow::Result<()> {
let starting_time = Instant::now();
let url = response.url().clone();
let content_type =
mime.map_or_else(|| get_content_type(response.headers()), ContentType::from);
let encoding = encoding.or_else(|| get_charset(response));
let compression_type = get_compression_type(response.headers());
let mut body = decompress(response, compression_type);
if !self.buffer.is_terminal() {
if (self.color || self.indent_json) && content_type.is_text() {
// The user explicitly asked for formatting even though this is
// going into a file, and the response is at least supposed to be
// text, so decode it
// TODO: HTTPie re-encodes output in the original encoding, we don't
// encoding_rs::Encoder::encode_from_utf8_to_vec_without_replacement()
// and guess_encoding() may help, but it'll require refactoring
// The current design is a bit unfortunate because there's no way to
// force UTF-8 output without coloring or formatting
// Unconditionally decoding is not an option because the body
// might not be text at all
if self.stream {
self.print_body_stream(
content_type,
&mut decode_stream(&mut body, encoding, &url)?,
)?;
} else {
let mut buf = Vec::new();
body.read_to_end(&mut buf)?;
let text = decode_blob_unconditional(&buf, encoding, &url);
self.print_body_text(content_type, &text)?;
}
} else if self.stream {
copy_largebuf(&mut body, &mut self.buffer, true)?;
} else {
let mut buf = Vec::new();
body.read_to_end(&mut buf)?;
self.buffer.print(&buf)?;
}
} else if self.stream {
match self
.print_body_stream(content_type, &mut decode_stream(&mut body, encoding, &url)?)
{
Ok(_) => {
self.buffer.print("\n")?;
}
Err(err) if err.kind() == io::ErrorKind::InvalidData => {
self.buffer.print(BINARY_SUPPRESSOR)?;
}
Err(err) => return Err(err.into()),
}
} else {
let mut buf = Vec::new();
body.read_to_end(&mut buf)?;
match decode_blob(&buf, encoding, &url) {
None => {
self.buffer.print(BINARY_SUPPRESSOR)?;
}
Some(text) => {
self.print_body_text(content_type, &text)?;
self.buffer.print("\n")?;
}
};
}
self.buffer.flush()?;
drop(body); // silence the borrow checker
response.meta_mut().content_download_duration = Some(starting_time.elapsed());
Ok(())
}
pub fn print_response_meta(&mut self, response: &Response) -> anyhow::Result<()> {
let meta = response.meta();
let mut total_elapsed_time = meta.request_duration.as_secs_f64();
if let Some(content_download_duration) = meta.content_download_duration {
total_elapsed_time += content_download_duration.as_secs_f64();
}
self.buffer
.print(format!("Elapsed time: {:.5}s", total_elapsed_time))?;
self.buffer.print("\n\n")?;
Ok(())
}
}
enum ContentType {
Json,
Html,
Xml,
JavaScript,
Css,
Text,
UrlencodedForm,
Multipart,
Unknown,
}
impl ContentType {
fn is_text(&self) -> bool {
!matches!(
self,
ContentType::Unknown | ContentType::UrlencodedForm | ContentType::Multipart
)
}
}
impl From<&str> for ContentType {
fn from(content_type: &str) -> Self {
if content_type.contains("json") {
ContentType::Json
} else if content_type.contains("html") {
ContentType::Html
} else if content_type.contains("xml") {
ContentType::Xml
} else if content_type.contains("multipart") {
ContentType::Multipart
} else if content_type.contains("x-www-form-urlencoded") {
ContentType::UrlencodedForm
} else if content_type.contains("javascript") {
ContentType::JavaScript
} else if content_type.contains("css") {
ContentType::Css
} else if content_type.contains("text") {
// We later check if this one's JSON
// HTTPie checks for "json", "javascript" and "text" in one place:
// https://github.com/httpie/httpie/blob/a32ad344dd/httpie/output/formatters/json.py#L14
// We have it more spread out but it behaves more or less the same
ContentType::Text
} else {
ContentType::Unknown
}
}
}
fn get_content_type(headers: &HeaderMap) -> ContentType {
headers
.get(CONTENT_TYPE)
.and_then(|value| value.to_str().ok())
.map_or(ContentType::Unknown, ContentType::from)
}
fn valid_json(text: &str) -> bool {
serde_json::from_str::<serde::de::IgnoredAny>(text).is_ok()
}
/// Decode a response, using BOM sniffing or chardet if the encoding is unknown.
///
/// This is different from [`Response::text`], which assumes UTF-8 as a fallback.
///
/// Returns `None` if the decoded text would contain null codepoints (i.e., is binary).
fn decode_blob<'a>(
raw: &'a [u8],
encoding: Option<&'static Encoding>,
url: &Url,
) -> Option<Cow<'a, str>> {
let encoding = encoding.unwrap_or_else(|| detect_encoding(raw, true, url));
// If the encoding is ASCII-compatible then a null byte corresponds to a
// null codepoint and vice versa, so we can check for them before decoding.
// For a 11MB binary file this saves 100ms, that's worth doing.
// UTF-16 is not ASCII-compatible: all ASCII characters are padded with a
// null byte, so finding a null byte doesn't mean anything.
if encoding.is_ascii_compatible() && raw.contains(&0) {
return None;
}
// Don't allow the BOM to override the encoding. But do remove it if
// it matches the encoding.
let text = encoding.decode_with_bom_removal(raw).0;
if !encoding.is_ascii_compatible() && text.contains('\0') {
None
} else {
Some(text)
}
}
/// Like [`decode_blob`], but without binary detection.
fn decode_blob_unconditional<'a>(
raw: &'a [u8],
encoding: Option<&'static Encoding>,
url: &Url,
) -> Cow<'a, str> {
let encoding = encoding.unwrap_or_else(|| detect_encoding(raw, true, url));
encoding.decode_with_bom_removal(raw).0
}
/// Decode a streaming response in a way that matches [`decode_blob`].
///
/// As-is this should do a lossy decode with replacement characters, so the
/// output is valid UTF-8, but a differently configured DecodeReaderBytes can
/// produce invalid UTF-8.
fn decode_stream<'a>(
stream: &'a mut impl Read,
encoding: Option<&'static Encoding>,
url: &Url,
) -> io::Result<impl Read + 'a> {
// 16 KiB is the largest initial read I could achieve.
// That was with a HTTP/2 miniserve running on Linux.
// I think this is a buffer size for hyper, it could change. But it seems
// large enough for a best-effort attempt.
// (16 is otherwise used because 0 seems dangerous, but it shouldn't matter.)
let capacity = if encoding.is_some() { 16 } else { 16 * 1024 };
let mut reader = BufReader::with_capacity(capacity, stream);
let encoding = match encoding {
Some(encoding) => encoding,
None => {
// We need to guess the encoding.
// The more data we have the better our guess, but we can't just wait
// for all of it to arrive. The user explicitly asked us to hurry.
// HTTPie solves this by detecting the encoding separately for each line,
// but that's silly, and we don't necessarily go linewise.
// We'll just hope we get enough data in the very first read.
let peek = reader.fill_buf()?;
detect_encoding(peek, false, url)
}
};
// We could set .utf8_passthru(true) to not sanitize invalid UTF-8. It would
// arrive more faithfully in the terminal.
// But that has questionable benefit and writing invalid UTF-8 to stdout
// causes an error on Windows (because the console is UTF-16).
let reader = DecodeReaderBytesBuilder::new()
.encoding(Some(encoding))
.build(reader);
Ok(reader)
}
fn detect_encoding(mut bytes: &[u8], mut complete: bool, url: &Url) -> &'static Encoding {
// chardetng doesn't seem to take BOMs into account, so check those manually.
// We trust them unconditionally. (Should we?)
if bytes.starts_with(b"\xEF\xBB\xBF") {
return encoding_rs::UTF_8;
} else if bytes.starts_with(b"\xFF\xFE") {
return encoding_rs::UTF_16LE;
} else if bytes.starts_with(b"\xFE\xFF") {
return encoding_rs::UTF_16BE;
}
// 64 KiB takes 2-5 ms to check on my machine. So even on slower machines
// that should be acceptable.
// If we check the full document we can easily spend most of our runtime
// inside chardetng. That's especially problematic because we usually get
// here for binary files, which we won't even end up showing.
const CHARDET_PEEK_SIZE: usize = 64 * 1024;
if bytes.len() > CHARDET_PEEK_SIZE {
bytes = &bytes[..CHARDET_PEEK_SIZE];
complete = false;
}
// HTTPie uses https://pypi.org/project/charset-normalizer/
let mut detector = chardetng::EncodingDetector::new();
detector.feed(bytes, complete);
let tld = url.domain().and_then(get_tld).map(str::as_bytes);
// The `allow_utf8` parameter is meant for HTML content:
// https://hsivonen.fi/utf-8-detection/
// We always enable it because we're more geared toward APIs than
// toward plain webpages, and because we don't have a full HTML parser
// to implement proper UTF-8 detection.
detector.guess(tld, true)
}
fn get_tld(domain: &str) -> Option<&str> {
// Fully qualified domain names end with a .
domain.trim_end_matches('.').rsplit('.').next()
}
/// Get the response's encoding from its Content-Type.
///
/// reqwest doesn't provide an API for this, and we don't want a fixed default.
///
/// See https://github.com/seanmonstar/reqwest/blob/2940740493/src/async_impl/response.rs#L172
fn get_charset(response: &Response) -> Option<&'static Encoding> {
let content_type = response.headers().get(CONTENT_TYPE)?.to_str().ok()?;
let mime: Mime = content_type.parse().ok()?;
let encoding_name = mime.get_param("charset")?.as_str();
Encoding::for_label(encoding_name.as_bytes())
}
#[cfg(test)]
mod tests {
use indoc::indoc;
use super::*;
use crate::utils::random_string;
use crate::{buffer::Buffer, cli::Cli, vec_of_strings};
fn run_cmd(args: impl IntoIterator<Item = String>, is_stdout_tty: bool) -> Printer {
let args = Cli::try_parse_from(args).unwrap();
let buffer = Buffer::new(args.download, args.output.as_deref(), is_stdout_tty).unwrap();
let pretty = args.pretty.unwrap_or_else(|| buffer.guess_pretty());
let format_options = FormatOptions::default();
Printer::new(pretty, args.style, false, buffer, format_options)
}
fn temp_path() -> String {
let mut dir = std::env::temp_dir();
let filename = random_string();
dir.push(filename);
dir.to_str().unwrap().to_owned()
}
#[test]
fn terminal_mode() {
let p = run_cmd(vec_of_strings!["xh", "httpbin.org/get"], true);
assert_eq!(p.color, true);
assert!(p.buffer.is_stdout());
}
#[test]
fn redirect_mode() {
let p = run_cmd(vec_of_strings!["xh", "httpbin.org/get"], false);
assert_eq!(p.color, false);
assert!(p.buffer.is_redirect());
}
#[test]
fn terminal_mode_with_output_file() {
let output = temp_path();
let p = run_cmd(vec_of_strings!["xh", "httpbin.org/get", "-o", output], true);
assert_eq!(p.color, false);
assert!(p.buffer.is_file());
}
#[test]
fn redirect_mode_with_output_file() {
let output = temp_path();
let p = run_cmd(
vec_of_strings!["xh", "httpbin.org/get", "-o", output],
false,
);
assert_eq!(p.color, false);
assert!(p.buffer.is_file());
}
#[test]
fn terminal_mode_download() {
let p = run_cmd(vec_of_strings!["xh", "httpbin.org/get", "-d"], true);
assert_eq!(p.color, true);
assert!(p.buffer.is_stderr());
}
#[test]
fn redirect_mode_download() {
let p = run_cmd(vec_of_strings!["xh", "httpbin.org/get", "-d"], false);
assert_eq!(p.color, true);
assert!(p.buffer.is_stderr());
}
#[test]
fn terminal_mode_download_with_output_file() {
let output = temp_path();
let p = run_cmd(
vec_of_strings!["xh", "httpbin.org/get", "-d", "-o", output],
true,
);
assert_eq!(p.color, true);
assert!(p.buffer.is_stderr());
}
#[test]
fn redirect_mode_download_with_output_file() {
let output = temp_path();
let p = run_cmd(
vec_of_strings!["xh", "httpbin.org/get", "-d", "-o", output],
false,
);
assert_eq!(p.color, true);
assert!(p.buffer.is_stderr());
}
#[test]
fn test_header_casing() {
let p = Printer {
indent_json: false,
color: false,
theme: Theme::Auto,
sort_headers: false,
stream: false,
buffer: Buffer::new(false, None, false).unwrap(),
format_options: FormatOptions::default(),
};
let mut headers = HeaderMap::new();
headers.insert("ab-cd", "0".parse().unwrap());
headers.insert("-cd", "0".parse().unwrap());
headers.insert("-", "0".parse().unwrap());
headers.insert("ab-%c", "0".parse().unwrap());
headers.insert("A-b--C", "0".parse().unwrap());
assert_eq!(
p.headers_to_string(&headers, reqwest::Version::HTTP_11),
indoc! {"
Ab-Cd: 0
-Cd: 0
-: 0
Ab-%c: 0
A-B--C: 0"
}
);
assert_eq!(
p.headers_to_string(&headers, reqwest::Version::HTTP_2),
indoc! {"
ab-cd: 0
-cd: 0
-: 0
ab-%c: 0
a-b--c: 0"
}
);
}
}
| headers_to_string | identifier_name |
printer.rs | use std::borrow::Cow;
use std::io::{self, BufRead, BufReader, Read, Write};
use std::time::Instant;
use encoding_rs::Encoding;
use encoding_rs_io::DecodeReaderBytesBuilder;
use mime::Mime;
use reqwest::blocking::{Body, Request, Response};
use reqwest::cookie::CookieStore;
use reqwest::header::{
HeaderMap, HeaderName, HeaderValue, ACCEPT, CONTENT_LENGTH, CONTENT_TYPE, COOKIE, HOST,
};
use reqwest::Version;
use url::Url;
use crate::cli::FormatOptions;
use crate::decoder::{decompress, get_compression_type};
use crate::{
buffer::Buffer,
cli::{Pretty, Theme},
formatting::{get_json_formatter, Highlighter},
middleware::ResponseExt,
utils::{copy_largebuf, test_mode, BUFFER_SIZE},
};
const BINARY_SUPPRESSOR: &str = concat!(
"+-----------------------------------------+\n",
"| NOTE: binary data not shown in terminal |\n",
"+-----------------------------------------+\n",
"\n"
);
/// A wrapper around a reader that reads line by line, (optionally) returning
/// an error if the line appears to be binary.
///
/// This is meant for streaming output. `checked` should typically be
/// set to buffer.is_terminal(), but if you need neither checking nor
/// highlighting then you may not need a `BinaryGuard` at all.
///
/// This reader does not validate UTF-8.
struct BinaryGuard<'a, T: Read> {
reader: BufReader<&'a mut T>,
buffer: Vec<u8>,
checked: bool,
}
impl<'a, T: Read> BinaryGuard<'a, T> {
fn new(reader: &'a mut T, checked: bool) -> Self {
Self {
reader: BufReader::with_capacity(BUFFER_SIZE, reader),
buffer: Vec::new(),
checked,
}
}
/// Return at least one complete line.
///
/// Compared to returning exactly one line, this gives you more information
/// about when data comes in. It's better to flush after each `read_lines`
/// call than to flush after each individual line.
///
/// We only work with complete lines to accommodate the syntax highlighting
/// and the binary data (null byte) detection. HTTPie processes exactly
/// one line at a time.
///
/// We work off the assumption that if the response contains a null byte
/// then none of it should be shown, and therefore the earlier we detect
/// the null byte, the better. This basically matches the non-streaming
/// behavior. But if it takes a while for the first null byte to show up
/// then it's unpredictable when the plain text output is cut off by the
/// binary suppressor. HTTPie is more consistent in this regard.
fn read_lines(&mut self) -> io::Result<Option<&[u8]>> {
self.buffer.clear();
loop {
let buf = match self.reader.fill_buf() {
Ok(buf) => buf,
Err(e) if e.kind() == io::ErrorKind::Interrupted => continue,
Err(e) => return Err(e),
};
if self.checked && buf.contains(&b'\0') {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Found binary data",
));
} else if buf.is_empty() {
if self.buffer.is_empty() {
return Ok(None);
} else {
return Ok(Some(&self.buffer));
}
} else if let Some(ind) = memchr::memrchr(b'\n', buf) {
// Potential optimization: return a slice of buf instead of copying.
// (We'd have to delay the call to .consume() until the next call.)
// (There is a weird borrow checker problem.)
self.buffer.extend_from_slice(&buf[..=ind]);
self.reader.consume(ind + 1);
return Ok(Some(&self.buffer));
} else {
self.buffer.extend_from_slice(buf);
let n = buf.len(); // borrow checker
self.reader.consume(n);
// It would be nice to return early if self.buffer is growing very large
// or if it's been a long time since the last read. But especially the
// second is hard to implement, and we'd want to pair this with flushing
// the output buffer. (HTTPie does nothing of this kind.)
}
}
}
}
pub struct Printer {
indent_json: bool,
color: bool,
theme: Theme,
sort_headers: bool,
stream: bool,
buffer: Buffer,
format_options: FormatOptions,
}
impl Printer {
pub fn new(
pretty: Pretty,
theme: Option<Theme>,
stream: bool,
buffer: Buffer,
format_options: FormatOptions,
) -> Self {
let theme = theme.unwrap_or(Theme::Auto);
Printer {
indent_json: pretty.format(),
sort_headers: pretty.format(),
color: pretty.color(),
stream,
theme,
buffer,
format_options,
}
}
fn get_highlighter(&mut self, syntax: &'static str) -> Highlighter<'_> {
Highlighter::new(syntax, self.theme, &mut self.buffer)
}
fn print_colorized_text(&mut self, text: &str, syntax: &'static str) -> io::Result<()> {
self.get_highlighter(syntax).highlight(text)
}
fn print_syntax_text(&mut self, text: &str, syntax: &'static str) -> io::Result<()> {
if self.color {
self.print_colorized_text(text, syntax)
} else {
self.buffer.print(text)
}
}
fn print_json_text(&mut self, text: &str, check_valid: bool) -> io::Result<()> {
if !self.indent_json {
// We don't have to do anything specialized, so fall back to the generic version
return self.print_syntax_text(text, "json");
}
if check_valid && !valid_json(text) {
// JSONXF may mess up the text, e.g. by removing whitespace
// This is somewhat common as application/json is the default
// content type for requests
return self.print_syntax_text(text, "json");
}
let mut formatter = get_json_formatter(&self.format_options);
if self.color {
let mut buf = Vec::new();
formatter.format_buf(text.as_bytes(), &mut buf)?;
// in principle, buf should already be valid UTF-8,
// because JSONXF doesn't mangle it
let text = String::from_utf8_lossy(&buf);
self.print_colorized_text(&text, "json")
} else {
formatter.format_buf(text.as_bytes(), &mut self.buffer)
}
}
fn print_body_text(&mut self, content_type: ContentType, body: &str) -> io::Result<()> {
match content_type {
ContentType::Json => self.print_json_text(body, true),
ContentType::Xml => self.print_syntax_text(body, "xml"),
ContentType::Html => self.print_syntax_text(body, "html"),
ContentType::Css => self.print_syntax_text(body, "css"),
// In HTTPie part of this behavior is gated behind the --json flag
// But it does JSON formatting even without that flag, so doing
// this check unconditionally is fine
ContentType::Text | ContentType::JavaScript if valid_json(body) => {
self.print_json_text(body, false)
}
ContentType::JavaScript => self.print_syntax_text(body, "js"),
_ => self.buffer.print(body),
}
}
fn print_stream(&mut self, reader: &mut impl Read) -> io::Result<()> {
if !self.buffer.is_terminal() {
return copy_largebuf(reader, &mut self.buffer, true);
}
let mut guard = BinaryGuard::new(reader, true);
while let Some(lines) = guard.read_lines()? {
self.buffer.write_all(lines)?;
self.buffer.flush()?;
}
Ok(())
}
fn print_colorized_stream(
&mut self,
stream: &mut impl Read,
syntax: &'static str,
) -> io::Result<()> {
let mut guard = BinaryGuard::new(stream, self.buffer.is_terminal());
let mut highlighter = self.get_highlighter(syntax);
while let Some(lines) = guard.read_lines()? {
for line in lines.split_inclusive(|&b| b == b'\n') {
highlighter.highlight_bytes(line)?;
}
highlighter.flush()?;
}
Ok(())
}
fn print_syntax_stream(
&mut self,
stream: &mut impl Read,
syntax: &'static str,
) -> io::Result<()> {
if self.color {
self.print_colorized_stream(stream, syntax)
} else {
self.print_stream(stream)
}
}
fn print_json_stream(&mut self, stream: &mut impl Read) -> io::Result<()> {
if !self.indent_json {
// We don't have to do anything specialized, so fall back to the generic version
self.print_syntax_stream(stream, "json")
} else if self.color {
let mut guard = BinaryGuard::new(stream, self.buffer.is_terminal());
let mut formatter = get_json_formatter(&self.format_options);
let mut highlighter = self.get_highlighter("json");
let mut buf = Vec::new();
while let Some(lines) = guard.read_lines()? {
formatter.format_buf(lines, &mut buf)?;
for line in buf.split_inclusive(|&b| b == b'\n') {
highlighter.highlight_bytes(line)?;
}
highlighter.flush()?;
buf.clear();
}
Ok(())
} else {
let mut formatter = get_json_formatter(&self.format_options);
if !self.buffer.is_terminal() {
let mut buf = vec![0; BUFFER_SIZE];
loop {
match stream.read(&mut buf) {
Ok(0) => return Ok(()),
Ok(n) => {
formatter.format_buf(&buf[0..n], &mut self.buffer)?;
self.buffer.flush()?;
}
Err(e) if e.kind() == io::ErrorKind::Interrupted => continue,
Err(e) => return Err(e),
}
}
}
let mut guard = BinaryGuard::new(stream, true);
while let Some(lines) = guard.read_lines()? {
formatter.format_buf(lines, &mut self.buffer)?;
self.buffer.flush()?;
}
Ok(())
}
}
fn print_body_stream(
&mut self,
content_type: ContentType,
body: &mut impl Read,
) -> io::Result<()> {
match content_type {
ContentType::Json => self.print_json_stream(body),
ContentType::Xml => self.print_syntax_stream(body, "xml"),
ContentType::Html => self.print_syntax_stream(body, "html"),
ContentType::Css => self.print_syntax_stream(body, "css"),
// print_body_text() has fancy JSON detection, but we can't do that here
ContentType::JavaScript => self.print_syntax_stream(body, "js"),
_ => self.print_stream(body),
}
}
fn print_headers(&mut self, text: &str) -> io::Result<()> {
if self.color {
self.print_colorized_text(text, "http")
} else {
self.buffer.print(text)
}
}
fn headers_to_string(&self, headers: &HeaderMap, version: Version) -> String {
let as_titlecase = match version {
Version::HTTP_09 | Version::HTTP_10 | Version::HTTP_11 => true,
Version::HTTP_2 | Version::HTTP_3 => false,
_ => false,
};
let mut headers: Vec<(&HeaderName, &HeaderValue)> = headers.iter().collect();
if self.sort_headers {
headers.sort_by_key(|(name, _)| name.as_str());
}
let mut header_string = String::new();
for (key, value) in headers {
if as_titlecase {
// Ought to be equivalent to how hyper does it
// https://github.com/hyperium/hyper/blob/f46b175bf71b202fbb907c4970b5743881b891e1/src/proto/h1/role.rs#L1332
// Header names are ASCII so it's ok to operate on char instead of u8
let mut prev = '-';
for mut c in key.as_str().chars() {
if prev == '-' {
c.make_ascii_uppercase();
}
header_string.push(c);
prev = c;
}
} else {
header_string.push_str(key.as_str());
}
header_string.push_str(": ");
match value.to_str() {
Ok(value) => header_string.push_str(value),
#[allow(clippy::format_push_string)]
Err(_) => header_string.push_str(&format!("{:?}", value)),
}
header_string.push('\n');
}
header_string.pop();
header_string
}
pub fn print_separator(&mut self) -> io::Result<()> {
self.buffer.print("\n")?;
self.buffer.flush()?;
Ok(())
}
pub fn print_request_headers<T>(&mut self, request: &Request, cookie_jar: &T) -> io::Result<()>
where
T: CookieStore,
{
let method = request.method();
let url = request.url();
let query_string = url.query().map_or(String::from(""), |q| ["?", q].concat());
let version = request.version();
let mut headers = request.headers().clone();
headers
.entry(ACCEPT)
.or_insert_with(|| HeaderValue::from_static("*/*"));
if let Some(cookie) = cookie_jar.cookies(url) {
headers.insert(COOKIE, cookie);
}
// See https://github.com/seanmonstar/reqwest/issues/1030
// reqwest and hyper add certain headers, but only in the process of
// sending the request, which we haven't done yet
if let Some(body) = request.body().and_then(Body::as_bytes) {
// Added at https://github.com/seanmonstar/reqwest/blob/e56bd160ba/src/blocking/request.rs#L132
headers
.entry(CONTENT_LENGTH)
.or_insert_with(|| body.len().into());
}
if let Some(host) = request.url().host_str() {
// This is incorrect in case of HTTP/2, but we're already assuming
// HTTP/1.1 anyway
headers.entry(HOST).or_insert_with(|| {
// Added at https://github.com/hyperium/hyper/blob/dfa1bb291d/src/client/client.rs#L237
if test_mode() {
HeaderValue::from_str("http.mock")
} else if let Some(port) = request.url().port() {
HeaderValue::from_str(&format!("{}:{}", host, port))
} else {
HeaderValue::from_str(host)
}
.expect("hostname should already be validated/parsed")
});
}
let request_line = format!("{} {}{} {:?}\n", method, url.path(), query_string, version);
let headers = self.headers_to_string(&headers, version);
self.print_headers(&(request_line + &headers))?;
self.buffer.print("\n\n")?;
self.buffer.flush()?;
Ok(())
}
pub fn print_response_headers(&mut self, response: &Response) -> io::Result<()> {
let version = response.version();
let status = response.status();
let headers = response.headers();
let status_line = format!("{:?} {}\n", version, status);
let headers = self.headers_to_string(headers, version);
self.print_headers(&(status_line + &headers))?;
self.buffer.print("\n\n")?;
self.buffer.flush()?;
Ok(())
}
pub fn print_request_body(&mut self, request: &mut Request) -> anyhow::Result<()> {
let content_type = get_content_type(request.headers());
if let Some(body) = request.body_mut() {
let body = body.buffer()?;
if body.contains(&b'\0') {
self.buffer.print(BINARY_SUPPRESSOR)?;
} else {
self.print_body_text(content_type, &String::from_utf8_lossy(body))?;
self.buffer.print("\n")?;
}
// Breathing room between request and response
self.buffer.print("\n")?;
self.buffer.flush()?;
}
Ok(())
}
pub fn print_response_body(
&mut self,
response: &mut Response,
encoding: Option<&'static Encoding>,
mime: Option<&str>,
) -> anyhow::Result<()> {
let starting_time = Instant::now();
let url = response.url().clone();
let content_type =
mime.map_or_else(|| get_content_type(response.headers()), ContentType::from);
let encoding = encoding.or_else(|| get_charset(response));
let compression_type = get_compression_type(response.headers());
let mut body = decompress(response, compression_type);
if !self.buffer.is_terminal() {
if (self.color || self.indent_json) && content_type.is_text() {
// The user explicitly asked for formatting even though this is
// going into a file, and the response is at least supposed to be
// text, so decode it
// TODO: HTTPie re-encodes output in the original encoding, we don't
// encoding_rs::Encoder::encode_from_utf8_to_vec_without_replacement()
// and guess_encoding() may help, but it'll require refactoring
// The current design is a bit unfortunate because there's no way to
// force UTF-8 output without coloring or formatting
// Unconditionally decoding is not an option because the body
// might not be text at all
if self.stream {
self.print_body_stream(
content_type,
&mut decode_stream(&mut body, encoding, &url)?,
)?;
} else {
let mut buf = Vec::new();
body.read_to_end(&mut buf)?;
let text = decode_blob_unconditional(&buf, encoding, &url);
self.print_body_text(content_type, &text)?;
}
} else if self.stream {
copy_largebuf(&mut body, &mut self.buffer, true)?;
} else {
let mut buf = Vec::new();
body.read_to_end(&mut buf)?;
self.buffer.print(&buf)?;
}
} else if self.stream {
match self
.print_body_stream(content_type, &mut decode_stream(&mut body, encoding, &url)?)
{ | self.buffer.print("\n")?;
}
Err(err) if err.kind() == io::ErrorKind::InvalidData => {
self.buffer.print(BINARY_SUPPRESSOR)?;
}
Err(err) => return Err(err.into()),
}
} else {
let mut buf = Vec::new();
body.read_to_end(&mut buf)?;
match decode_blob(&buf, encoding, &url) {
None => {
self.buffer.print(BINARY_SUPPRESSOR)?;
}
Some(text) => {
self.print_body_text(content_type, &text)?;
self.buffer.print("\n")?;
}
};
}
self.buffer.flush()?;
drop(body); // silence the borrow checker
response.meta_mut().content_download_duration = Some(starting_time.elapsed());
Ok(())
}
pub fn print_response_meta(&mut self, response: &Response) -> anyhow::Result<()> {
let meta = response.meta();
let mut total_elapsed_time = meta.request_duration.as_secs_f64();
if let Some(content_download_duration) = meta.content_download_duration {
total_elapsed_time += content_download_duration.as_secs_f64();
}
self.buffer
.print(format!("Elapsed time: {:.5}s", total_elapsed_time))?;
self.buffer.print("\n\n")?;
Ok(())
}
}
enum ContentType {
Json,
Html,
Xml,
JavaScript,
Css,
Text,
UrlencodedForm,
Multipart,
Unknown,
}
impl ContentType {
fn is_text(&self) -> bool {
!matches!(
self,
ContentType::Unknown | ContentType::UrlencodedForm | ContentType::Multipart
)
}
}
impl From<&str> for ContentType {
fn from(content_type: &str) -> Self {
if content_type.contains("json") {
ContentType::Json
} else if content_type.contains("html") {
ContentType::Html
} else if content_type.contains("xml") {
ContentType::Xml
} else if content_type.contains("multipart") {
ContentType::Multipart
} else if content_type.contains("x-www-form-urlencoded") {
ContentType::UrlencodedForm
} else if content_type.contains("javascript") {
ContentType::JavaScript
} else if content_type.contains("css") {
ContentType::Css
} else if content_type.contains("text") {
// We later check if this one's JSON
// HTTPie checks for "json", "javascript" and "text" in one place:
// https://github.com/httpie/httpie/blob/a32ad344dd/httpie/output/formatters/json.py#L14
// We have it more spread out but it behaves more or less the same
ContentType::Text
} else {
ContentType::Unknown
}
}
}
fn get_content_type(headers: &HeaderMap) -> ContentType {
headers
.get(CONTENT_TYPE)
.and_then(|value| value.to_str().ok())
.map_or(ContentType::Unknown, ContentType::from)
}
fn valid_json(text: &str) -> bool {
serde_json::from_str::<serde::de::IgnoredAny>(text).is_ok()
}
/// Decode a response, using BOM sniffing or chardet if the encoding is unknown.
///
/// This is different from [`Response::text`], which assumes UTF-8 as a fallback.
///
/// Returns `None` if the decoded text would contain null codepoints (i.e., is binary).
fn decode_blob<'a>(
raw: &'a [u8],
encoding: Option<&'static Encoding>,
url: &Url,
) -> Option<Cow<'a, str>> {
let encoding = encoding.unwrap_or_else(|| detect_encoding(raw, true, url));
// If the encoding is ASCII-compatible then a null byte corresponds to a
// null codepoint and vice versa, so we can check for them before decoding.
// For a 11MB binary file this saves 100ms, that's worth doing.
// UTF-16 is not ASCII-compatible: all ASCII characters are padded with a
// null byte, so finding a null byte doesn't mean anything.
if encoding.is_ascii_compatible() && raw.contains(&0) {
return None;
}
// Don't allow the BOM to override the encoding. But do remove it if
// it matches the encoding.
let text = encoding.decode_with_bom_removal(raw).0;
if !encoding.is_ascii_compatible() && text.contains('\0') {
None
} else {
Some(text)
}
}
/// Like [`decode_blob`], but without binary detection.
fn decode_blob_unconditional<'a>(
raw: &'a [u8],
encoding: Option<&'static Encoding>,
url: &Url,
) -> Cow<'a, str> {
let encoding = encoding.unwrap_or_else(|| detect_encoding(raw, true, url));
encoding.decode_with_bom_removal(raw).0
}
/// Decode a streaming response in a way that matches [`decode_blob`].
///
/// As-is this should do a lossy decode with replacement characters, so the
/// output is valid UTF-8, but a differently configured DecodeReaderBytes can
/// produce invalid UTF-8.
fn decode_stream<'a>(
stream: &'a mut impl Read,
encoding: Option<&'static Encoding>,
url: &Url,
) -> io::Result<impl Read + 'a> {
// 16 KiB is the largest initial read I could achieve.
// That was with a HTTP/2 miniserve running on Linux.
// I think this is a buffer size for hyper, it could change. But it seems
// large enough for a best-effort attempt.
// (16 is otherwise used because 0 seems dangerous, but it shouldn't matter.)
let capacity = if encoding.is_some() { 16 } else { 16 * 1024 };
let mut reader = BufReader::with_capacity(capacity, stream);
let encoding = match encoding {
Some(encoding) => encoding,
None => {
// We need to guess the encoding.
// The more data we have the better our guess, but we can't just wait
// for all of it to arrive. The user explicitly asked us to hurry.
// HTTPie solves this by detecting the encoding separately for each line,
// but that's silly, and we don't necessarily go linewise.
// We'll just hope we get enough data in the very first read.
let peek = reader.fill_buf()?;
detect_encoding(peek, false, url)
}
};
// We could set .utf8_passthru(true) to not sanitize invalid UTF-8. It would
// arrive more faithfully in the terminal.
// But that has questionable benefit and writing invalid UTF-8 to stdout
// causes an error on Windows (because the console is UTF-16).
let reader = DecodeReaderBytesBuilder::new()
.encoding(Some(encoding))
.build(reader);
Ok(reader)
}
fn detect_encoding(mut bytes: &[u8], mut complete: bool, url: &Url) -> &'static Encoding {
// chardetng doesn't seem to take BOMs into account, so check those manually.
// We trust them unconditionally. (Should we?)
if bytes.starts_with(b"\xEF\xBB\xBF") {
return encoding_rs::UTF_8;
} else if bytes.starts_with(b"\xFF\xFE") {
return encoding_rs::UTF_16LE;
} else if bytes.starts_with(b"\xFE\xFF") {
return encoding_rs::UTF_16BE;
}
// 64 KiB takes 2-5 ms to check on my machine. So even on slower machines
// that should be acceptable.
// If we check the full document we can easily spend most of our runtime
// inside chardetng. That's especially problematic because we usually get
// here for binary files, which we won't even end up showing.
const CHARDET_PEEK_SIZE: usize = 64 * 1024;
if bytes.len() > CHARDET_PEEK_SIZE {
bytes = &bytes[..CHARDET_PEEK_SIZE];
complete = false;
}
// HTTPie uses https://pypi.org/project/charset-normalizer/
let mut detector = chardetng::EncodingDetector::new();
detector.feed(bytes, complete);
let tld = url.domain().and_then(get_tld).map(str::as_bytes);
// The `allow_utf8` parameter is meant for HTML content:
// https://hsivonen.fi/utf-8-detection/
// We always enable it because we're more geared toward APIs than
// toward plain webpages, and because we don't have a full HTML parser
// to implement proper UTF-8 detection.
detector.guess(tld, true)
}
fn get_tld(domain: &str) -> Option<&str> {
// Fully qualified domain names end with a .
domain.trim_end_matches('.').rsplit('.').next()
}
/// Get the response's encoding from its Content-Type.
///
/// reqwest doesn't provide an API for this, and we don't want a fixed default.
///
/// See https://github.com/seanmonstar/reqwest/blob/2940740493/src/async_impl/response.rs#L172
fn get_charset(response: &Response) -> Option<&'static Encoding> {
let content_type = response.headers().get(CONTENT_TYPE)?.to_str().ok()?;
let mime: Mime = content_type.parse().ok()?;
let encoding_name = mime.get_param("charset")?.as_str();
Encoding::for_label(encoding_name.as_bytes())
}
#[cfg(test)]
mod tests {
use indoc::indoc;
use super::*;
use crate::utils::random_string;
use crate::{buffer::Buffer, cli::Cli, vec_of_strings};
fn run_cmd(args: impl IntoIterator<Item = String>, is_stdout_tty: bool) -> Printer {
let args = Cli::try_parse_from(args).unwrap();
let buffer = Buffer::new(args.download, args.output.as_deref(), is_stdout_tty).unwrap();
let pretty = args.pretty.unwrap_or_else(|| buffer.guess_pretty());
let format_options = FormatOptions::default();
Printer::new(pretty, args.style, false, buffer, format_options)
}
fn temp_path() -> String {
let mut dir = std::env::temp_dir();
let filename = random_string();
dir.push(filename);
dir.to_str().unwrap().to_owned()
}
#[test]
fn terminal_mode() {
let p = run_cmd(vec_of_strings!["xh", "httpbin.org/get"], true);
assert_eq!(p.color, true);
assert!(p.buffer.is_stdout());
}
#[test]
fn redirect_mode() {
let p = run_cmd(vec_of_strings!["xh", "httpbin.org/get"], false);
assert_eq!(p.color, false);
assert!(p.buffer.is_redirect());
}
#[test]
fn terminal_mode_with_output_file() {
let output = temp_path();
let p = run_cmd(vec_of_strings!["xh", "httpbin.org/get", "-o", output], true);
assert_eq!(p.color, false);
assert!(p.buffer.is_file());
}
#[test]
fn redirect_mode_with_output_file() {
let output = temp_path();
let p = run_cmd(
vec_of_strings!["xh", "httpbin.org/get", "-o", output],
false,
);
assert_eq!(p.color, false);
assert!(p.buffer.is_file());
}
#[test]
fn terminal_mode_download() {
let p = run_cmd(vec_of_strings!["xh", "httpbin.org/get", "-d"], true);
assert_eq!(p.color, true);
assert!(p.buffer.is_stderr());
}
#[test]
fn redirect_mode_download() {
let p = run_cmd(vec_of_strings!["xh", "httpbin.org/get", "-d"], false);
assert_eq!(p.color, true);
assert!(p.buffer.is_stderr());
}
#[test]
fn terminal_mode_download_with_output_file() {
let output = temp_path();
let p = run_cmd(
vec_of_strings!["xh", "httpbin.org/get", "-d", "-o", output],
true,
);
assert_eq!(p.color, true);
assert!(p.buffer.is_stderr());
}
#[test]
fn redirect_mode_download_with_output_file() {
let output = temp_path();
let p = run_cmd(
vec_of_strings!["xh", "httpbin.org/get", "-d", "-o", output],
false,
);
assert_eq!(p.color, true);
assert!(p.buffer.is_stderr());
}
#[test]
fn test_header_casing() {
let p = Printer {
indent_json: false,
color: false,
theme: Theme::Auto,
sort_headers: false,
stream: false,
buffer: Buffer::new(false, None, false).unwrap(),
format_options: FormatOptions::default(),
};
let mut headers = HeaderMap::new();
headers.insert("ab-cd", "0".parse().unwrap());
headers.insert("-cd", "0".parse().unwrap());
headers.insert("-", "0".parse().unwrap());
headers.insert("ab-%c", "0".parse().unwrap());
headers.insert("A-b--C", "0".parse().unwrap());
assert_eq!(
p.headers_to_string(&headers, reqwest::Version::HTTP_11),
indoc! {"
Ab-Cd: 0
-Cd: 0
-: 0
Ab-%c: 0
A-B--C: 0"
}
);
assert_eq!(
p.headers_to_string(&headers, reqwest::Version::HTTP_2),
indoc! {"
ab-cd: 0
-cd: 0
-: 0
ab-%c: 0
a-b--c: 0"
}
);
}
} | Ok(_) => { | random_line_split |
graham_scan.py | """
Author: Ajinkya Shinde
"""
# Importing the necessary packages
from Stack import Stack
from math import atan2, sqrt, pi, cos, sin
import numpy as np
import matplotlib.pyplot as plt
import time
import random
import os
import csv
def point_with_min_y(points):
"""Returns the point with minimum y co-ordinate and the
leftmost incase of a tie from a set of points
Input: points (array-type)
Output: P0(tuple)
"""
min_idx = None
for a,coord in enumerate(points):
if min_idx == None:
min_idx = a
P0_Y = coord[1]
P0_X = coord[0]
elif coord[1] < P0_Y:
# look for the point with lowest y co-ordinate
min_idx = a
P0_X = coord[0]
P0_Y = coord[1]
elif (coord[1] == P0_Y) & (coord[0] < P0_X):
# In-case of tie with lowest y co-ordinate
# take one which is leftmost or lowest x
# co-ordinate
min_idx = a
P0_X = coord[0]
P0_Y = coord[1]
return (P0_X,P0_Y)
def euclidean_distance(points, ref_point):
"""Returns euclidean distance for all set of points
Input: points (array-like) : set of points whose
euclidean distance needs to be found
ref_point (tuple-like) : point to be used as
reference for distance calculation
Output: array object with euclidean distance for all the points
passed
Note: This function is used by sort_by_polar_angle - the original
version for the sort by polar angle logic
"""
euclidean_dist = []
for each in points:
eucl_dist = sqrt((ref_point[0]-each[0])**2 +(ref_point[1]-each[1])**2)
euclidean_dist.append(eucl_dist)
return np.asarray(euclidean_dist)
def euclidean_distance_v2(point, ref_point):
# print('Calculating dist between',point,' and ',ref_point,end='')
# print(sqrt((ref_point[0]-point[0])**2 +(ref_point[1]-point[1])**2))
return sqrt((ref_point[0]-point[0])**2 +(ref_point[1]-point[1])**2)
def polar_angle(points):
"""Returns list of polar angle between -pi and pi calculated
with respect to P0 - point with lowest x and y co-ordinate
Input: points(array-like) : set of points whose polar angle
needs to be calculated with respect to ref point
Output: polar angle array
"""
polar_angle = []
for each in points:
dy = each[1] - P0[1]
dx = each[0] - P0[0]
polar_angle.append(atan2(dy, dx))
return polar_angle
def sort_by_polar_angle_v2(pts):
"""Returns sorted list of points with polar angle sorted in
counterclockwise direction. For points with same polar angle
the farthest point
Input: pts(array-like) : set of points for sorting by polar angle
Output: sorted order of input array of points
"""
### make a copy of points array to avoid corruption
### of original points array
copy_pts = []
for each in pts:
if each not in copy_pts:
copy_pts.append(each)
P0_idx = copy_pts.index(P0)
del copy_pts[P0_idx]
# Call polar_angle function to calculate polar angle
# of points with respect to P0
p =polar_angle(copy_pts)
#########For sorting polar angle array ######
# Once we get the polar angle array, we use numpy.argsort
# to get the indices of sorted polar angle array
# using the indices serves two purpose
# 1. Sort polar angle array
# 2. Sort list of points array
# 3. Develop logic to take farthest point in case of
# collinear
np_p = np.asarray(p)
sorted_idx = np.argsort(np_p,kind='mergesort')
# Do steps 1. and 2. of above commented logic
sorted_p = []
sorted_pts = []
for each in sorted_idx:
sorted_p.append(p[each])
sorted_pts.append(copy_pts[each])
# Code for step 3.
check_dict = {}
for i in range(len(sorted_p)-1):
for j in range(i+1,len(sorted_p)):
if sorted_p[j] == sorted_p[i]:
if sorted_p[i] not in check_dict:
temp_list=[]
temp_list.append(sorted_pts[i])
check_dict[sorted_p[i]]=temp_list
temp_list2 = []
temp_list2 = check_dict[sorted_p[i]]
if sorted_pts[j] not in temp_list2:
temp_list2.append(sorted_pts[j])
check_dict[sorted_p[i]]=temp_list2
if sorted_pts[j] in temp_list2:
break
else:
break
for dict_val in check_dict.values():
farthest_pt = dict_val[0]
max_dist = euclidean_distance_v2(farthest_pt,P0)
for each in dict_val[1:]:
if euclidean_distance_v2(each,P0) > max_dist:
sorted_pts = [x for x in sorted_pts if x!=farthest_pt]
max_dist = euclidean_distance_v2(each,P0)
farthest_pt = each
if euclidean_distance_v2(each,P0) < max_dist:
sorted_pts = [x for x in sorted_pts if x!=each]
return sorted_pts
def sort_by_polar_angle(points):
"""Returns sorted order of points array.
This is initial version of sort_by_polar_angle function.
Input: points(array-like) : set of points to be sorted with
respect to P0
Output: sorted array of remaining points
"""
# Call polar_angle function to calculate polar angle
# of points with respect to P0
p = polar_angle(points)
polar_angle_arr = np.asarray(p)
vals1, idx_start1, count1 = np.unique(polar_angle_arr, return_counts=True,
return_index=True)
idx_sorted_pang = np.argsort(polar_angle_arr)
sorted_polar_angle_arr = polar_angle_arr[idx_sorted_pang]
vals, idx_start, count = np.unique(sorted_polar_angle_arr, return_counts=True,
return_index=True)
res = np.split(idx_sorted_pang, idx_start[1:])
#filter them with respect to their size, keeping only items occurring more than once
final_points =[]
for each in res:
# print("len(each)",len(each))
if len(each) > 1:
i = each.tolist()
check_points = []
for j in i:
check_points.append(points[j])
check_points_arr = np.asarray(check_points)
max_far_idx = np.argmax(euclidean_distance(check_points,P0))
final_points.append(check_points[max_far_idx])
elif len(each) == 1:
final_points.append(points[each.tolist()[0]])
return final_points
def cross_product(p0,p1,p2):
"""Returns the cross product of points of p0,p1 and p2.
The value returned is +ve, -ve or 0
"""
return (((p1[0]-p0[0])*(p2[1]-p0[1]))-((p2[0]-p0[0])*(p1[1]-p0[1])))
def read_points():
"""
Work In Progress file to read points from text file
"""
points = []
f = open(r'sample_points.txt')
while True:
nstr = f.readline()
if len(nstr) == 0:
break
line = nstr.rstrip('\n').split(', ')
# print(line)
points.append((round(float(line[0]),3),round(float(line[1]),3)))
print(points)
return points
def create_random_points(n):
"""Returns random points for input choice 1 from menu screen
Input:n(int) : size of input
Output: points array
"""
return [(random.randint(0,n),random.randint(0,n)) for i in range(n)]
def points_on_circumference(center=(0, 0), r=50, n=100):
""" Returns points around the boundary of circle with random distribution
It is called when choice of input entered is 2
"""
return [
(
center[0]+(cos(2 * pi / n * x) * r),
center[1] + (sin(2 * pi / n * x) * r)
) for x in range(0, n + 1)]
def create_export_files(n,input_choice,timing,min_hull_per):
"""Creates folder analysis if not exists in current directory and creates
results.csv file
Input: n(int): size of input
input_choice(int): choice of input from menu
timing(decimal): Timing in sec of algo
min_hull_per(int): percentage of hull points from n
Output: Appends results of execution to the csv file
"""
exists = os.path.isdir('analysis')
if exists:
f = open('analysis/results.csv','a',newline='')
results = csv.writer(f)
else:
os.mkdir('analysis')
f = open('analysis/results.csv','w',newline='')
results = csv.writer(f)
results.writerow(['Algo','Size of Input','Min. Hull Pts Per','Type of Input','Timing'])
results.writerow(['Graham Scan',n,min_hull_per,input_choice,timing])
def points_on_circumference_with_per(center=(0, 0), r=50, n=100, per = 50):
"""Returns points around boundary of circle with random points distributed
inside circle. It is called when choice of input entered is 3
Input: center(tuple) : co-ordinates for center of circle
r(int) : input for radius of circle
n(int) : size of input
per(int) : percentage of points of n that should be on boundary
Output : points array
"""
# circum_cnt is actual points on cicumference as a percentage of total
# random points(n) = Percentage_of_Total_Points * n / 100
circum_cnt = int(per*n/100)
# random_cnt is points inside the circle = Total random points - Points on Circum
random_cnt = n - circum_cnt
# Append points on circumference
final_pts = [
(
center[0]+(cos(2 * pi / circum_cnt * x) * r),
center[1] + (sin(2 * pi / circum_cnt * x) * r)
) for x in range(0, circum_cnt + 1)]
# Generate random points inside circle
# random points inside circle should have atleast 5 radius to be visible enough
for i in range(1,random_cnt+1):
final_pts.append( (center[0]+ cos(2 * pi / circum_cnt * i) * random.randint(1,r-20),
center[1] + sin(2 * pi / circum_cnt * i) * random.randint(1,r-20)))
return final_pts
def show_convex_hull(points, input_choice, timing,percent_pts,size,hull_points = None):
"""Returns plot with parameters from menu screen and saves the plot in /plots
directory
"""
exists = os.path.isdir('plots')
if not exists:
os.mkdir('plots')
for each in points:
plt.plot(each[0],each[1],'o-')
if hull_points is not None:
hull_pt_list = []
for each in hull_points:
hull_pt_list.append(list(each))
hull_pt_arr = np.asarray(hull_pt_list)
# print(hull_pt_arr)
plt.plot(hull_pt_arr[:,0],hull_pt_arr[:,1],'k-')
first_coord = hull_pt_arr[0,:].reshape(1,2)
last_coord = hull_pt_arr[len(hull_pt_arr)-1,:].reshape(1,2)
last_coord_arr = np.append(first_coord, last_coord, axis = 0)
plt.plot(last_coord_arr[:,0],last_coord_arr[:,1],'k-')
plt.title(label = 'For input : '+input_choice+percent_pts+' time taken = '+str(timing)+' s\n'+'N='+str(size))
plt.savefig('plots/'+'Graham_Scan_'+str(input_choice)+str(percent_pts)+'_N='+str(size)+'.png')
plt.show()
def graham_scan():
### Menu Screen for Program Starts
choice_of_input = input("Enter choice of random point distribution:\n1. Random scatter\n2. Circle\n3. Minimal Points on Circle\n")
if choice_of_input == "1":
while True:
try:
input_size = input("Enter the input size")
n=int(input_size)
per_min_pt = ''
break
except ValueError:
print("Enter integer value for input size")
points = create_random_points(n)
elif choice_of_input == "2":
while True:
try:
input_size = input("Enter the input size")
n=int(input_size)
per_min_pt = ''
radius = input("Enter the radius")
r = int(radius)
center_str = input("Enter comma seperated x and y co-ordinates")
center_str = center_str.split(",")
center_x = int(center_str[0])
center_y = int(center_str[1])
break
except ValueError:
print("Enter integer value for input size/radius")
points = points_on_circumference((center_x,center_y),r, n)
elif choice_of_input == "3":
while True:
try:
input_size = input("Enter the input size")
n=int(input_size)
per_min_pt = input("Enter percentage of points on hull")
per_min_pt = float(per_min_pt)
radius = input("Enter the radius")
r = int(radius)
center_str = input("Enter comma seperated x and y co-ordinates")
center_str = center_str.split(",")
center_x = int(center_str[0])
center_y = int(center_str[1])
break
except ValueError:
print("Enter integer value for input size/radius")
points = points_on_circumference_with_per((center_x,center_y),r, n, per_min_pt)
### Menu Screen for Program Ends
# Set P0 to be global so that it can be access by other functions
global P0
# Find P0 with minimum y co-ordinate
P0 = point_with_min_y(points)
# Begin tracking the execution time
start = time.time()
# Sort the remaining points in points array by polar angle
# in counterclockwise order around P0
sorted_points = sort_by_polar_angle_v2(points)
# Inital version of sort by polar angle - faster than the current one
# sorted_points2 = sort_by_polar_angle(points)
# Create an empty stack
s = Stack()
# Push P0, two points from sorted array on stack
s.push(P0)
s.push(sorted_points[0])
s.push(sorted_points[1])
# Update the sorted array from 3rd element
sorted_points = sorted_points[2:]
# Find the boundary using cross product
for i in range(len(sorted_points)):
while cross_product(s.next_to_top(),s.top(),sorted_points[i]) < 0:
|
s.push(sorted_points[i])
end = time.time()
#helper dictionary for generating plots
input_choice_title = {1:'Random Scatter',2:'Circle',3:'Circle with min. hull pts %'}
##Call results function
show_convex_hull(points,input_choice_title[int(choice_of_input)],round((end-start),6),str(per_min_pt),n,s.print_all())
create_export_files(n,input_choice_title[int(choice_of_input)],(end-start),str(per_min_pt))
if __name__ == '__main__':
graham_scan() | s.pop() | conditional_block |
graham_scan.py | """
Author: Ajinkya Shinde
"""
# Importing the necessary packages
from Stack import Stack
from math import atan2, sqrt, pi, cos, sin
import numpy as np
import matplotlib.pyplot as plt
import time
import random
import os
import csv
def point_with_min_y(points):
"""Returns the point with minimum y co-ordinate and the
leftmost incase of a tie from a set of points
Input: points (array-type)
Output: P0(tuple)
"""
min_idx = None
for a,coord in enumerate(points):
if min_idx == None:
min_idx = a
P0_Y = coord[1]
P0_X = coord[0]
elif coord[1] < P0_Y:
# look for the point with lowest y co-ordinate
min_idx = a
P0_X = coord[0]
P0_Y = coord[1]
elif (coord[1] == P0_Y) & (coord[0] < P0_X):
# In-case of tie with lowest y co-ordinate
# take one which is leftmost or lowest x
# co-ordinate
min_idx = a
P0_X = coord[0]
P0_Y = coord[1]
return (P0_X,P0_Y)
def euclidean_distance(points, ref_point):
"""Returns euclidean distance for all set of points
Input: points (array-like) : set of points whose
euclidean distance needs to be found
ref_point (tuple-like) : point to be used as
reference for distance calculation
Output: array object with euclidean distance for all the points
passed
Note: This function is used by sort_by_polar_angle - the original
version for the sort by polar angle logic
"""
euclidean_dist = []
for each in points:
eucl_dist = sqrt((ref_point[0]-each[0])**2 +(ref_point[1]-each[1])**2)
euclidean_dist.append(eucl_dist)
return np.asarray(euclidean_dist)
def euclidean_distance_v2(point, ref_point):
# print('Calculating dist between',point,' and ',ref_point,end='')
# print(sqrt((ref_point[0]-point[0])**2 +(ref_point[1]-point[1])**2))
return sqrt((ref_point[0]-point[0])**2 +(ref_point[1]-point[1])**2)
def polar_angle(points):
"""Returns list of polar angle between -pi and pi calculated
with respect to P0 - point with lowest x and y co-ordinate
Input: points(array-like) : set of points whose polar angle
needs to be calculated with respect to ref point
Output: polar angle array
"""
polar_angle = []
for each in points:
dy = each[1] - P0[1]
dx = each[0] - P0[0]
polar_angle.append(atan2(dy, dx))
return polar_angle
def sort_by_polar_angle_v2(pts):
"""Returns sorted list of points with polar angle sorted in
counterclockwise direction. For points with same polar angle
the farthest point
Input: pts(array-like) : set of points for sorting by polar angle
Output: sorted order of input array of points
"""
### make a copy of points array to avoid corruption
### of original points array
copy_pts = []
for each in pts:
if each not in copy_pts:
copy_pts.append(each)
P0_idx = copy_pts.index(P0)
del copy_pts[P0_idx]
# Call polar_angle function to calculate polar angle
# of points with respect to P0
p =polar_angle(copy_pts)
#########For sorting polar angle array ######
# Once we get the polar angle array, we use numpy.argsort
# to get the indices of sorted polar angle array
# using the indices serves two purpose
# 1. Sort polar angle array
# 2. Sort list of points array
# 3. Develop logic to take farthest point in case of
# collinear
np_p = np.asarray(p)
sorted_idx = np.argsort(np_p,kind='mergesort')
# Do steps 1. and 2. of above commented logic
sorted_p = []
sorted_pts = []
for each in sorted_idx:
sorted_p.append(p[each])
sorted_pts.append(copy_pts[each])
# Code for step 3.
check_dict = {}
for i in range(len(sorted_p)-1):
for j in range(i+1,len(sorted_p)):
if sorted_p[j] == sorted_p[i]:
if sorted_p[i] not in check_dict:
temp_list=[]
temp_list.append(sorted_pts[i])
check_dict[sorted_p[i]]=temp_list
temp_list2 = []
temp_list2 = check_dict[sorted_p[i]]
if sorted_pts[j] not in temp_list2:
temp_list2.append(sorted_pts[j])
check_dict[sorted_p[i]]=temp_list2
if sorted_pts[j] in temp_list2:
break
else:
break
for dict_val in check_dict.values():
farthest_pt = dict_val[0]
max_dist = euclidean_distance_v2(farthest_pt,P0)
for each in dict_val[1:]:
if euclidean_distance_v2(each,P0) > max_dist:
sorted_pts = [x for x in sorted_pts if x!=farthest_pt]
max_dist = euclidean_distance_v2(each,P0)
farthest_pt = each
if euclidean_distance_v2(each,P0) < max_dist:
sorted_pts = [x for x in sorted_pts if x!=each]
return sorted_pts
def sort_by_polar_angle(points):
"""Returns sorted order of points array.
This is initial version of sort_by_polar_angle function.
Input: points(array-like) : set of points to be sorted with
respect to P0
Output: sorted array of remaining points
"""
# Call polar_angle function to calculate polar angle
# of points with respect to P0
p = polar_angle(points)
polar_angle_arr = np.asarray(p)
vals1, idx_start1, count1 = np.unique(polar_angle_arr, return_counts=True,
return_index=True)
idx_sorted_pang = np.argsort(polar_angle_arr)
sorted_polar_angle_arr = polar_angle_arr[idx_sorted_pang]
vals, idx_start, count = np.unique(sorted_polar_angle_arr, return_counts=True,
return_index=True)
res = np.split(idx_sorted_pang, idx_start[1:])
#filter them with respect to their size, keeping only items occurring more than once
final_points =[]
for each in res:
# print("len(each)",len(each))
if len(each) > 1:
i = each.tolist()
check_points = []
for j in i:
check_points.append(points[j])
check_points_arr = np.asarray(check_points)
max_far_idx = np.argmax(euclidean_distance(check_points,P0))
final_points.append(check_points[max_far_idx])
elif len(each) == 1:
final_points.append(points[each.tolist()[0]])
return final_points
def cross_product(p0,p1,p2):
"""Returns the cross product of points of p0,p1 and p2.
The value returned is +ve, -ve or 0
"""
return (((p1[0]-p0[0])*(p2[1]-p0[1]))-((p2[0]-p0[0])*(p1[1]-p0[1])))
def read_points():
"""
Work In Progress file to read points from text file
"""
points = []
f = open(r'sample_points.txt')
while True:
nstr = f.readline()
if len(nstr) == 0:
break
line = nstr.rstrip('\n').split(', ')
# print(line)
points.append((round(float(line[0]),3),round(float(line[1]),3)))
print(points)
return points
def | (n):
"""Returns random points for input choice 1 from menu screen
Input:n(int) : size of input
Output: points array
"""
return [(random.randint(0,n),random.randint(0,n)) for i in range(n)]
def points_on_circumference(center=(0, 0), r=50, n=100):
""" Returns points around the boundary of circle with random distribution
It is called when choice of input entered is 2
"""
return [
(
center[0]+(cos(2 * pi / n * x) * r),
center[1] + (sin(2 * pi / n * x) * r)
) for x in range(0, n + 1)]
def create_export_files(n,input_choice,timing,min_hull_per):
"""Creates folder analysis if not exists in current directory and creates
results.csv file
Input: n(int): size of input
input_choice(int): choice of input from menu
timing(decimal): Timing in sec of algo
min_hull_per(int): percentage of hull points from n
Output: Appends results of execution to the csv file
"""
exists = os.path.isdir('analysis')
if exists:
f = open('analysis/results.csv','a',newline='')
results = csv.writer(f)
else:
os.mkdir('analysis')
f = open('analysis/results.csv','w',newline='')
results = csv.writer(f)
results.writerow(['Algo','Size of Input','Min. Hull Pts Per','Type of Input','Timing'])
results.writerow(['Graham Scan',n,min_hull_per,input_choice,timing])
def points_on_circumference_with_per(center=(0, 0), r=50, n=100, per = 50):
"""Returns points around boundary of circle with random points distributed
inside circle. It is called when choice of input entered is 3
Input: center(tuple) : co-ordinates for center of circle
r(int) : input for radius of circle
n(int) : size of input
per(int) : percentage of points of n that should be on boundary
Output : points array
"""
# circum_cnt is actual points on cicumference as a percentage of total
# random points(n) = Percentage_of_Total_Points * n / 100
circum_cnt = int(per*n/100)
# random_cnt is points inside the circle = Total random points - Points on Circum
random_cnt = n - circum_cnt
# Append points on circumference
final_pts = [
(
center[0]+(cos(2 * pi / circum_cnt * x) * r),
center[1] + (sin(2 * pi / circum_cnt * x) * r)
) for x in range(0, circum_cnt + 1)]
# Generate random points inside circle
# random points inside circle should have atleast 5 radius to be visible enough
for i in range(1,random_cnt+1):
final_pts.append( (center[0]+ cos(2 * pi / circum_cnt * i) * random.randint(1,r-20),
center[1] + sin(2 * pi / circum_cnt * i) * random.randint(1,r-20)))
return final_pts
def show_convex_hull(points, input_choice, timing,percent_pts,size,hull_points = None):
"""Returns plot with parameters from menu screen and saves the plot in /plots
directory
"""
exists = os.path.isdir('plots')
if not exists:
os.mkdir('plots')
for each in points:
plt.plot(each[0],each[1],'o-')
if hull_points is not None:
hull_pt_list = []
for each in hull_points:
hull_pt_list.append(list(each))
hull_pt_arr = np.asarray(hull_pt_list)
# print(hull_pt_arr)
plt.plot(hull_pt_arr[:,0],hull_pt_arr[:,1],'k-')
first_coord = hull_pt_arr[0,:].reshape(1,2)
last_coord = hull_pt_arr[len(hull_pt_arr)-1,:].reshape(1,2)
last_coord_arr = np.append(first_coord, last_coord, axis = 0)
plt.plot(last_coord_arr[:,0],last_coord_arr[:,1],'k-')
plt.title(label = 'For input : '+input_choice+percent_pts+' time taken = '+str(timing)+' s\n'+'N='+str(size))
plt.savefig('plots/'+'Graham_Scan_'+str(input_choice)+str(percent_pts)+'_N='+str(size)+'.png')
plt.show()
def graham_scan():
### Menu Screen for Program Starts
choice_of_input = input("Enter choice of random point distribution:\n1. Random scatter\n2. Circle\n3. Minimal Points on Circle\n")
if choice_of_input == "1":
while True:
try:
input_size = input("Enter the input size")
n=int(input_size)
per_min_pt = ''
break
except ValueError:
print("Enter integer value for input size")
points = create_random_points(n)
elif choice_of_input == "2":
while True:
try:
input_size = input("Enter the input size")
n=int(input_size)
per_min_pt = ''
radius = input("Enter the radius")
r = int(radius)
center_str = input("Enter comma seperated x and y co-ordinates")
center_str = center_str.split(",")
center_x = int(center_str[0])
center_y = int(center_str[1])
break
except ValueError:
print("Enter integer value for input size/radius")
points = points_on_circumference((center_x,center_y),r, n)
elif choice_of_input == "3":
while True:
try:
input_size = input("Enter the input size")
n=int(input_size)
per_min_pt = input("Enter percentage of points on hull")
per_min_pt = float(per_min_pt)
radius = input("Enter the radius")
r = int(radius)
center_str = input("Enter comma seperated x and y co-ordinates")
center_str = center_str.split(",")
center_x = int(center_str[0])
center_y = int(center_str[1])
break
except ValueError:
print("Enter integer value for input size/radius")
points = points_on_circumference_with_per((center_x,center_y),r, n, per_min_pt)
### Menu Screen for Program Ends
# Set P0 to be global so that it can be access by other functions
global P0
# Find P0 with minimum y co-ordinate
P0 = point_with_min_y(points)
# Begin tracking the execution time
start = time.time()
# Sort the remaining points in points array by polar angle
# in counterclockwise order around P0
sorted_points = sort_by_polar_angle_v2(points)
# Inital version of sort by polar angle - faster than the current one
# sorted_points2 = sort_by_polar_angle(points)
# Create an empty stack
s = Stack()
# Push P0, two points from sorted array on stack
s.push(P0)
s.push(sorted_points[0])
s.push(sorted_points[1])
# Update the sorted array from 3rd element
sorted_points = sorted_points[2:]
# Find the boundary using cross product
for i in range(len(sorted_points)):
while cross_product(s.next_to_top(),s.top(),sorted_points[i]) < 0:
s.pop()
s.push(sorted_points[i])
end = time.time()
#helper dictionary for generating plots
input_choice_title = {1:'Random Scatter',2:'Circle',3:'Circle with min. hull pts %'}
##Call results function
show_convex_hull(points,input_choice_title[int(choice_of_input)],round((end-start),6),str(per_min_pt),n,s.print_all())
create_export_files(n,input_choice_title[int(choice_of_input)],(end-start),str(per_min_pt))
if __name__ == '__main__':
graham_scan() | create_random_points | identifier_name |
graham_scan.py | """
Author: Ajinkya Shinde
"""
# Importing the necessary packages
from Stack import Stack
from math import atan2, sqrt, pi, cos, sin
import numpy as np
import matplotlib.pyplot as plt
import time
import random
import os
import csv
def point_with_min_y(points):
"""Returns the point with minimum y co-ordinate and the
leftmost incase of a tie from a set of points
Input: points (array-type)
Output: P0(tuple)
"""
min_idx = None
for a,coord in enumerate(points):
if min_idx == None:
min_idx = a
P0_Y = coord[1]
P0_X = coord[0]
elif coord[1] < P0_Y:
# look for the point with lowest y co-ordinate
min_idx = a
P0_X = coord[0]
P0_Y = coord[1]
elif (coord[1] == P0_Y) & (coord[0] < P0_X):
# In-case of tie with lowest y co-ordinate
# take one which is leftmost or lowest x
# co-ordinate
min_idx = a
P0_X = coord[0]
P0_Y = coord[1]
return (P0_X,P0_Y)
def euclidean_distance(points, ref_point):
"""Returns euclidean distance for all set of points
Input: points (array-like) : set of points whose
euclidean distance needs to be found
ref_point (tuple-like) : point to be used as
reference for distance calculation
Output: array object with euclidean distance for all the points
passed
Note: This function is used by sort_by_polar_angle - the original
version for the sort by polar angle logic
"""
euclidean_dist = []
for each in points:
eucl_dist = sqrt((ref_point[0]-each[0])**2 +(ref_point[1]-each[1])**2)
euclidean_dist.append(eucl_dist)
return np.asarray(euclidean_dist)
def euclidean_distance_v2(point, ref_point):
# print('Calculating dist between',point,' and ',ref_point,end='')
# print(sqrt((ref_point[0]-point[0])**2 +(ref_point[1]-point[1])**2))
return sqrt((ref_point[0]-point[0])**2 +(ref_point[1]-point[1])**2)
def polar_angle(points):
"""Returns list of polar angle between -pi and pi calculated
with respect to P0 - point with lowest x and y co-ordinate
Input: points(array-like) : set of points whose polar angle
needs to be calculated with respect to ref point
Output: polar angle array
"""
polar_angle = []
for each in points:
dy = each[1] - P0[1]
dx = each[0] - P0[0]
polar_angle.append(atan2(dy, dx))
return polar_angle
def sort_by_polar_angle_v2(pts):
"""Returns sorted list of points with polar angle sorted in
counterclockwise direction. For points with same polar angle
the farthest point
Input: pts(array-like) : set of points for sorting by polar angle
Output: sorted order of input array of points
"""
### make a copy of points array to avoid corruption
### of original points array
copy_pts = []
for each in pts:
if each not in copy_pts:
copy_pts.append(each)
P0_idx = copy_pts.index(P0)
del copy_pts[P0_idx]
# Call polar_angle function to calculate polar angle
# of points with respect to P0
p =polar_angle(copy_pts)
#########For sorting polar angle array ######
# Once we get the polar angle array, we use numpy.argsort
# to get the indices of sorted polar angle array
# using the indices serves two purpose
# 1. Sort polar angle array
# 2. Sort list of points array
# 3. Develop logic to take farthest point in case of
# collinear
np_p = np.asarray(p)
sorted_idx = np.argsort(np_p,kind='mergesort')
# Do steps 1. and 2. of above commented logic
sorted_p = []
sorted_pts = []
for each in sorted_idx:
sorted_p.append(p[each])
sorted_pts.append(copy_pts[each])
# Code for step 3.
check_dict = {}
for i in range(len(sorted_p)-1):
for j in range(i+1,len(sorted_p)):
if sorted_p[j] == sorted_p[i]:
if sorted_p[i] not in check_dict:
temp_list=[]
temp_list.append(sorted_pts[i])
check_dict[sorted_p[i]]=temp_list
temp_list2 = []
temp_list2 = check_dict[sorted_p[i]]
if sorted_pts[j] not in temp_list2:
temp_list2.append(sorted_pts[j])
check_dict[sorted_p[i]]=temp_list2
if sorted_pts[j] in temp_list2:
break
else:
break
for dict_val in check_dict.values():
farthest_pt = dict_val[0]
max_dist = euclidean_distance_v2(farthest_pt,P0)
for each in dict_val[1:]:
if euclidean_distance_v2(each,P0) > max_dist:
sorted_pts = [x for x in sorted_pts if x!=farthest_pt]
max_dist = euclidean_distance_v2(each,P0)
farthest_pt = each
if euclidean_distance_v2(each,P0) < max_dist:
sorted_pts = [x for x in sorted_pts if x!=each]
return sorted_pts
def sort_by_polar_angle(points):
"""Returns sorted order of points array.
This is initial version of sort_by_polar_angle function.
Input: points(array-like) : set of points to be sorted with
respect to P0
Output: sorted array of remaining points
"""
# Call polar_angle function to calculate polar angle
# of points with respect to P0
p = polar_angle(points)
polar_angle_arr = np.asarray(p)
vals1, idx_start1, count1 = np.unique(polar_angle_arr, return_counts=True,
return_index=True)
idx_sorted_pang = np.argsort(polar_angle_arr)
sorted_polar_angle_arr = polar_angle_arr[idx_sorted_pang]
vals, idx_start, count = np.unique(sorted_polar_angle_arr, return_counts=True,
return_index=True)
res = np.split(idx_sorted_pang, idx_start[1:])
#filter them with respect to their size, keeping only items occurring more than once
final_points =[]
for each in res:
# print("len(each)",len(each))
if len(each) > 1:
i = each.tolist()
check_points = [] |
final_points.append(check_points[max_far_idx])
elif len(each) == 1:
final_points.append(points[each.tolist()[0]])
return final_points
def cross_product(p0,p1,p2):
"""Returns the cross product of points of p0,p1 and p2.
The value returned is +ve, -ve or 0
"""
return (((p1[0]-p0[0])*(p2[1]-p0[1]))-((p2[0]-p0[0])*(p1[1]-p0[1])))
def read_points():
"""
Work In Progress file to read points from text file
"""
points = []
f = open(r'sample_points.txt')
while True:
nstr = f.readline()
if len(nstr) == 0:
break
line = nstr.rstrip('\n').split(', ')
# print(line)
points.append((round(float(line[0]),3),round(float(line[1]),3)))
print(points)
return points
def create_random_points(n):
"""Returns random points for input choice 1 from menu screen
Input:n(int) : size of input
Output: points array
"""
return [(random.randint(0,n),random.randint(0,n)) for i in range(n)]
def points_on_circumference(center=(0, 0), r=50, n=100):
""" Returns points around the boundary of circle with random distribution
It is called when choice of input entered is 2
"""
return [
(
center[0]+(cos(2 * pi / n * x) * r),
center[1] + (sin(2 * pi / n * x) * r)
) for x in range(0, n + 1)]
def create_export_files(n,input_choice,timing,min_hull_per):
"""Creates folder analysis if not exists in current directory and creates
results.csv file
Input: n(int): size of input
input_choice(int): choice of input from menu
timing(decimal): Timing in sec of algo
min_hull_per(int): percentage of hull points from n
Output: Appends results of execution to the csv file
"""
exists = os.path.isdir('analysis')
if exists:
f = open('analysis/results.csv','a',newline='')
results = csv.writer(f)
else:
os.mkdir('analysis')
f = open('analysis/results.csv','w',newline='')
results = csv.writer(f)
results.writerow(['Algo','Size of Input','Min. Hull Pts Per','Type of Input','Timing'])
results.writerow(['Graham Scan',n,min_hull_per,input_choice,timing])
def points_on_circumference_with_per(center=(0, 0), r=50, n=100, per = 50):
"""Returns points around boundary of circle with random points distributed
inside circle. It is called when choice of input entered is 3
Input: center(tuple) : co-ordinates for center of circle
r(int) : input for radius of circle
n(int) : size of input
per(int) : percentage of points of n that should be on boundary
Output : points array
"""
# circum_cnt is actual points on cicumference as a percentage of total
# random points(n) = Percentage_of_Total_Points * n / 100
circum_cnt = int(per*n/100)
# random_cnt is points inside the circle = Total random points - Points on Circum
random_cnt = n - circum_cnt
# Append points on circumference
final_pts = [
(
center[0]+(cos(2 * pi / circum_cnt * x) * r),
center[1] + (sin(2 * pi / circum_cnt * x) * r)
) for x in range(0, circum_cnt + 1)]
# Generate random points inside circle
# random points inside circle should have atleast 5 radius to be visible enough
for i in range(1,random_cnt+1):
final_pts.append( (center[0]+ cos(2 * pi / circum_cnt * i) * random.randint(1,r-20),
center[1] + sin(2 * pi / circum_cnt * i) * random.randint(1,r-20)))
return final_pts
def show_convex_hull(points, input_choice, timing,percent_pts,size,hull_points = None):
"""Returns plot with parameters from menu screen and saves the plot in /plots
directory
"""
exists = os.path.isdir('plots')
if not exists:
os.mkdir('plots')
for each in points:
plt.plot(each[0],each[1],'o-')
if hull_points is not None:
hull_pt_list = []
for each in hull_points:
hull_pt_list.append(list(each))
hull_pt_arr = np.asarray(hull_pt_list)
# print(hull_pt_arr)
plt.plot(hull_pt_arr[:,0],hull_pt_arr[:,1],'k-')
first_coord = hull_pt_arr[0,:].reshape(1,2)
last_coord = hull_pt_arr[len(hull_pt_arr)-1,:].reshape(1,2)
last_coord_arr = np.append(first_coord, last_coord, axis = 0)
plt.plot(last_coord_arr[:,0],last_coord_arr[:,1],'k-')
plt.title(label = 'For input : '+input_choice+percent_pts+' time taken = '+str(timing)+' s\n'+'N='+str(size))
plt.savefig('plots/'+'Graham_Scan_'+str(input_choice)+str(percent_pts)+'_N='+str(size)+'.png')
plt.show()
def graham_scan():
### Menu Screen for Program Starts
choice_of_input = input("Enter choice of random point distribution:\n1. Random scatter\n2. Circle\n3. Minimal Points on Circle\n")
if choice_of_input == "1":
while True:
try:
input_size = input("Enter the input size")
n=int(input_size)
per_min_pt = ''
break
except ValueError:
print("Enter integer value for input size")
points = create_random_points(n)
elif choice_of_input == "2":
while True:
try:
input_size = input("Enter the input size")
n=int(input_size)
per_min_pt = ''
radius = input("Enter the radius")
r = int(radius)
center_str = input("Enter comma seperated x and y co-ordinates")
center_str = center_str.split(",")
center_x = int(center_str[0])
center_y = int(center_str[1])
break
except ValueError:
print("Enter integer value for input size/radius")
points = points_on_circumference((center_x,center_y),r, n)
elif choice_of_input == "3":
while True:
try:
input_size = input("Enter the input size")
n=int(input_size)
per_min_pt = input("Enter percentage of points on hull")
per_min_pt = float(per_min_pt)
radius = input("Enter the radius")
r = int(radius)
center_str = input("Enter comma seperated x and y co-ordinates")
center_str = center_str.split(",")
center_x = int(center_str[0])
center_y = int(center_str[1])
break
except ValueError:
print("Enter integer value for input size/radius")
points = points_on_circumference_with_per((center_x,center_y),r, n, per_min_pt)
### Menu Screen for Program Ends
# Set P0 to be global so that it can be access by other functions
global P0
# Find P0 with minimum y co-ordinate
P0 = point_with_min_y(points)
# Begin tracking the execution time
start = time.time()
# Sort the remaining points in points array by polar angle
# in counterclockwise order around P0
sorted_points = sort_by_polar_angle_v2(points)
# Inital version of sort by polar angle - faster than the current one
# sorted_points2 = sort_by_polar_angle(points)
# Create an empty stack
s = Stack()
# Push P0, two points from sorted array on stack
s.push(P0)
s.push(sorted_points[0])
s.push(sorted_points[1])
# Update the sorted array from 3rd element
sorted_points = sorted_points[2:]
# Find the boundary using cross product
for i in range(len(sorted_points)):
while cross_product(s.next_to_top(),s.top(),sorted_points[i]) < 0:
s.pop()
s.push(sorted_points[i])
end = time.time()
#helper dictionary for generating plots
input_choice_title = {1:'Random Scatter',2:'Circle',3:'Circle with min. hull pts %'}
##Call results function
show_convex_hull(points,input_choice_title[int(choice_of_input)],round((end-start),6),str(per_min_pt),n,s.print_all())
create_export_files(n,input_choice_title[int(choice_of_input)],(end-start),str(per_min_pt))
if __name__ == '__main__':
graham_scan() | for j in i:
check_points.append(points[j])
check_points_arr = np.asarray(check_points)
max_far_idx = np.argmax(euclidean_distance(check_points,P0)) | random_line_split |
graham_scan.py | """
Author: Ajinkya Shinde
"""
# Importing the necessary packages
from Stack import Stack
from math import atan2, sqrt, pi, cos, sin
import numpy as np
import matplotlib.pyplot as plt
import time
import random
import os
import csv
def point_with_min_y(points):
"""Returns the point with minimum y co-ordinate and the
leftmost incase of a tie from a set of points
Input: points (array-type)
Output: P0(tuple)
"""
min_idx = None
for a,coord in enumerate(points):
if min_idx == None:
min_idx = a
P0_Y = coord[1]
P0_X = coord[0]
elif coord[1] < P0_Y:
# look for the point with lowest y co-ordinate
min_idx = a
P0_X = coord[0]
P0_Y = coord[1]
elif (coord[1] == P0_Y) & (coord[0] < P0_X):
# In-case of tie with lowest y co-ordinate
# take one which is leftmost or lowest x
# co-ordinate
min_idx = a
P0_X = coord[0]
P0_Y = coord[1]
return (P0_X,P0_Y)
def euclidean_distance(points, ref_point):
"""Returns euclidean distance for all set of points
Input: points (array-like) : set of points whose
euclidean distance needs to be found
ref_point (tuple-like) : point to be used as
reference for distance calculation
Output: array object with euclidean distance for all the points
passed
Note: This function is used by sort_by_polar_angle - the original
version for the sort by polar angle logic
"""
euclidean_dist = []
for each in points:
eucl_dist = sqrt((ref_point[0]-each[0])**2 +(ref_point[1]-each[1])**2)
euclidean_dist.append(eucl_dist)
return np.asarray(euclidean_dist)
def euclidean_distance_v2(point, ref_point):
# print('Calculating dist between',point,' and ',ref_point,end='')
# print(sqrt((ref_point[0]-point[0])**2 +(ref_point[1]-point[1])**2))
return sqrt((ref_point[0]-point[0])**2 +(ref_point[1]-point[1])**2)
def polar_angle(points):
"""Returns list of polar angle between -pi and pi calculated
with respect to P0 - point with lowest x and y co-ordinate
Input: points(array-like) : set of points whose polar angle
needs to be calculated with respect to ref point
Output: polar angle array
"""
polar_angle = []
for each in points:
dy = each[1] - P0[1]
dx = each[0] - P0[0]
polar_angle.append(atan2(dy, dx))
return polar_angle
def sort_by_polar_angle_v2(pts):
"""Returns sorted list of points with polar angle sorted in
counterclockwise direction. For points with same polar angle
the farthest point
Input: pts(array-like) : set of points for sorting by polar angle
Output: sorted order of input array of points
"""
### make a copy of points array to avoid corruption
### of original points array
copy_pts = []
for each in pts:
if each not in copy_pts:
copy_pts.append(each)
P0_idx = copy_pts.index(P0)
del copy_pts[P0_idx]
# Call polar_angle function to calculate polar angle
# of points with respect to P0
p =polar_angle(copy_pts)
#########For sorting polar angle array ######
# Once we get the polar angle array, we use numpy.argsort
# to get the indices of sorted polar angle array
# using the indices serves two purpose
# 1. Sort polar angle array
# 2. Sort list of points array
# 3. Develop logic to take farthest point in case of
# collinear
np_p = np.asarray(p)
sorted_idx = np.argsort(np_p,kind='mergesort')
# Do steps 1. and 2. of above commented logic
sorted_p = []
sorted_pts = []
for each in sorted_idx:
sorted_p.append(p[each])
sorted_pts.append(copy_pts[each])
# Code for step 3.
check_dict = {}
for i in range(len(sorted_p)-1):
for j in range(i+1,len(sorted_p)):
if sorted_p[j] == sorted_p[i]:
if sorted_p[i] not in check_dict:
temp_list=[]
temp_list.append(sorted_pts[i])
check_dict[sorted_p[i]]=temp_list
temp_list2 = []
temp_list2 = check_dict[sorted_p[i]]
if sorted_pts[j] not in temp_list2:
temp_list2.append(sorted_pts[j])
check_dict[sorted_p[i]]=temp_list2
if sorted_pts[j] in temp_list2:
break
else:
break
for dict_val in check_dict.values():
farthest_pt = dict_val[0]
max_dist = euclidean_distance_v2(farthest_pt,P0)
for each in dict_val[1:]:
if euclidean_distance_v2(each,P0) > max_dist:
sorted_pts = [x for x in sorted_pts if x!=farthest_pt]
max_dist = euclidean_distance_v2(each,P0)
farthest_pt = each
if euclidean_distance_v2(each,P0) < max_dist:
sorted_pts = [x for x in sorted_pts if x!=each]
return sorted_pts
def sort_by_polar_angle(points):
"""Returns sorted order of points array.
This is initial version of sort_by_polar_angle function.
Input: points(array-like) : set of points to be sorted with
respect to P0
Output: sorted array of remaining points
"""
# Call polar_angle function to calculate polar angle
# of points with respect to P0
p = polar_angle(points)
polar_angle_arr = np.asarray(p)
vals1, idx_start1, count1 = np.unique(polar_angle_arr, return_counts=True,
return_index=True)
idx_sorted_pang = np.argsort(polar_angle_arr)
sorted_polar_angle_arr = polar_angle_arr[idx_sorted_pang]
vals, idx_start, count = np.unique(sorted_polar_angle_arr, return_counts=True,
return_index=True)
res = np.split(idx_sorted_pang, idx_start[1:])
#filter them with respect to their size, keeping only items occurring more than once
final_points =[]
for each in res:
# print("len(each)",len(each))
if len(each) > 1:
i = each.tolist()
check_points = []
for j in i:
check_points.append(points[j])
check_points_arr = np.asarray(check_points)
max_far_idx = np.argmax(euclidean_distance(check_points,P0))
final_points.append(check_points[max_far_idx])
elif len(each) == 1:
final_points.append(points[each.tolist()[0]])
return final_points
def cross_product(p0,p1,p2):
"""Returns the cross product of points of p0,p1 and p2.
The value returned is +ve, -ve or 0
"""
return (((p1[0]-p0[0])*(p2[1]-p0[1]))-((p2[0]-p0[0])*(p1[1]-p0[1])))
def read_points():
"""
Work In Progress file to read points from text file
"""
points = []
f = open(r'sample_points.txt')
while True:
nstr = f.readline()
if len(nstr) == 0:
break
line = nstr.rstrip('\n').split(', ')
# print(line)
points.append((round(float(line[0]),3),round(float(line[1]),3)))
print(points)
return points
def create_random_points(n):
"""Returns random points for input choice 1 from menu screen
Input:n(int) : size of input
Output: points array
"""
return [(random.randint(0,n),random.randint(0,n)) for i in range(n)]
def points_on_circumference(center=(0, 0), r=50, n=100):
|
def create_export_files(n,input_choice,timing,min_hull_per):
"""Creates folder analysis if not exists in current directory and creates
results.csv file
Input: n(int): size of input
input_choice(int): choice of input from menu
timing(decimal): Timing in sec of algo
min_hull_per(int): percentage of hull points from n
Output: Appends results of execution to the csv file
"""
exists = os.path.isdir('analysis')
if exists:
f = open('analysis/results.csv','a',newline='')
results = csv.writer(f)
else:
os.mkdir('analysis')
f = open('analysis/results.csv','w',newline='')
results = csv.writer(f)
results.writerow(['Algo','Size of Input','Min. Hull Pts Per','Type of Input','Timing'])
results.writerow(['Graham Scan',n,min_hull_per,input_choice,timing])
def points_on_circumference_with_per(center=(0, 0), r=50, n=100, per = 50):
"""Returns points around boundary of circle with random points distributed
inside circle. It is called when choice of input entered is 3
Input: center(tuple) : co-ordinates for center of circle
r(int) : input for radius of circle
n(int) : size of input
per(int) : percentage of points of n that should be on boundary
Output : points array
"""
# circum_cnt is actual points on cicumference as a percentage of total
# random points(n) = Percentage_of_Total_Points * n / 100
circum_cnt = int(per*n/100)
# random_cnt is points inside the circle = Total random points - Points on Circum
random_cnt = n - circum_cnt
# Append points on circumference
final_pts = [
(
center[0]+(cos(2 * pi / circum_cnt * x) * r),
center[1] + (sin(2 * pi / circum_cnt * x) * r)
) for x in range(0, circum_cnt + 1)]
# Generate random points inside circle
# random points inside circle should have atleast 5 radius to be visible enough
for i in range(1,random_cnt+1):
final_pts.append( (center[0]+ cos(2 * pi / circum_cnt * i) * random.randint(1,r-20),
center[1] + sin(2 * pi / circum_cnt * i) * random.randint(1,r-20)))
return final_pts
def show_convex_hull(points, input_choice, timing,percent_pts,size,hull_points = None):
"""Returns plot with parameters from menu screen and saves the plot in /plots
directory
"""
exists = os.path.isdir('plots')
if not exists:
os.mkdir('plots')
for each in points:
plt.plot(each[0],each[1],'o-')
if hull_points is not None:
hull_pt_list = []
for each in hull_points:
hull_pt_list.append(list(each))
hull_pt_arr = np.asarray(hull_pt_list)
# print(hull_pt_arr)
plt.plot(hull_pt_arr[:,0],hull_pt_arr[:,1],'k-')
first_coord = hull_pt_arr[0,:].reshape(1,2)
last_coord = hull_pt_arr[len(hull_pt_arr)-1,:].reshape(1,2)
last_coord_arr = np.append(first_coord, last_coord, axis = 0)
plt.plot(last_coord_arr[:,0],last_coord_arr[:,1],'k-')
plt.title(label = 'For input : '+input_choice+percent_pts+' time taken = '+str(timing)+' s\n'+'N='+str(size))
plt.savefig('plots/'+'Graham_Scan_'+str(input_choice)+str(percent_pts)+'_N='+str(size)+'.png')
plt.show()
def graham_scan():
### Menu Screen for Program Starts
choice_of_input = input("Enter choice of random point distribution:\n1. Random scatter\n2. Circle\n3. Minimal Points on Circle\n")
if choice_of_input == "1":
while True:
try:
input_size = input("Enter the input size")
n=int(input_size)
per_min_pt = ''
break
except ValueError:
print("Enter integer value for input size")
points = create_random_points(n)
elif choice_of_input == "2":
while True:
try:
input_size = input("Enter the input size")
n=int(input_size)
per_min_pt = ''
radius = input("Enter the radius")
r = int(radius)
center_str = input("Enter comma seperated x and y co-ordinates")
center_str = center_str.split(",")
center_x = int(center_str[0])
center_y = int(center_str[1])
break
except ValueError:
print("Enter integer value for input size/radius")
points = points_on_circumference((center_x,center_y),r, n)
elif choice_of_input == "3":
while True:
try:
input_size = input("Enter the input size")
n=int(input_size)
per_min_pt = input("Enter percentage of points on hull")
per_min_pt = float(per_min_pt)
radius = input("Enter the radius")
r = int(radius)
center_str = input("Enter comma seperated x and y co-ordinates")
center_str = center_str.split(",")
center_x = int(center_str[0])
center_y = int(center_str[1])
break
except ValueError:
print("Enter integer value for input size/radius")
points = points_on_circumference_with_per((center_x,center_y),r, n, per_min_pt)
### Menu Screen for Program Ends
# Set P0 to be global so that it can be access by other functions
global P0
# Find P0 with minimum y co-ordinate
P0 = point_with_min_y(points)
# Begin tracking the execution time
start = time.time()
# Sort the remaining points in points array by polar angle
# in counterclockwise order around P0
sorted_points = sort_by_polar_angle_v2(points)
# Inital version of sort by polar angle - faster than the current one
# sorted_points2 = sort_by_polar_angle(points)
# Create an empty stack
s = Stack()
# Push P0, two points from sorted array on stack
s.push(P0)
s.push(sorted_points[0])
s.push(sorted_points[1])
# Update the sorted array from 3rd element
sorted_points = sorted_points[2:]
# Find the boundary using cross product
for i in range(len(sorted_points)):
while cross_product(s.next_to_top(),s.top(),sorted_points[i]) < 0:
s.pop()
s.push(sorted_points[i])
end = time.time()
#helper dictionary for generating plots
input_choice_title = {1:'Random Scatter',2:'Circle',3:'Circle with min. hull pts %'}
##Call results function
show_convex_hull(points,input_choice_title[int(choice_of_input)],round((end-start),6),str(per_min_pt),n,s.print_all())
create_export_files(n,input_choice_title[int(choice_of_input)],(end-start),str(per_min_pt))
if __name__ == '__main__':
graham_scan() | """ Returns points around the boundary of circle with random distribution
It is called when choice of input entered is 2
"""
return [
(
center[0]+(cos(2 * pi / n * x) * r),
center[1] + (sin(2 * pi / n * x) * r)
) for x in range(0, n + 1)] | identifier_body |
dataset.py | import os,sys
import inspect
import tempfile
import shutil
from OpenVisus import *
#in configure step I dont have numpy yet
try:
import numpy
except:
pass
# //////////////////////////////////////////////////////////
def CreateIdx(**args):
if not "url" in args:
raise Exception("url not specified")
url=args["url"]
if "rmtree" in args and args["rmtree"]==True:
dir=os.path.dirname(url)
shutil.rmtree(dir, ignore_errors=True)
idx=IdxFile()
buffer=None
if "data" in args:
data=args["data"]
dim=int(args["dim"])
Assert(dim>=2) # you must specify the point dim since it could be that data has multiple components
buffer=Array.fromNumPy(data,TargetDim=dim, bShareMem=True)
idx.logic_box=BoxNi(PointNi.zero(dim),PointNi(buffer.dims))
N=1 if dim==len(data.shape) else data.shape[-1]
elif "dims" in args:
dims=PointNi(args["dims"])
idx.logic_box=BoxNi(PointNi.zero(dims.getPointDim()),dims)
else:
raise Exception("please specify dimensions or source data")
# add fields
if "fields" in args:
for field in args["fields"]:
idx.fields.push_back(field)
elif buffer:
idx.fields.push_back(Field.fromString("DATA {} default_layout(row_major)".format(buffer.dtype.toString())))
else:
raise Exception("no field")
# bitsperblock
if "bitsperblock" in args:
idx.bitsperblock=int(args["bitsperblock"])
# compute db overall size
TOT=0
for field in idx.fields:
TOT+=field.dtype.getByteSize(idx.logic_box.size())
# blocks per file
if "blocksperfile" in args:
idx.blocksperfile=int(args["blocksperfile"])
elif "data" in args or TOT<2*(1024*1024*1024):
idx.blocksperfile=-1 # all blocks in one file
else:
idx.blocksperfile==0 # openvisus will guess (probably using multiple files)
# is the user specifying filters?
if "filters" in args and args["filters"]:
filters=args["filters"]
for I in range(idx.fields.size()):
idx.fields[I].filter=filters[I]
if "time" in args:
A,B,time_template=args["time"]
idx.timesteps=DatasetTimesteps(A,B,1.0)
idx.time_template=time_template
if "filename_template" in args:
idx.filename_template=args["filename_template"]
idx.save(url)
db=LoadDataset(url)
if buffer:
compression=args["compression"] if "compression" in args else ["zip"]
db.compressDataset(compression, buffer)
return db
# //////////////////////////////////////////////
class PyDataset(object):
# constructor
def __init__(self,db):
self.db = db
# __getattr__
def __getattr__(self,attr):
return getattr(self.db, attr)
# getPointDim
def getPointDim(self):
return self.db.getPointDim()
# getMaxResolution
def getMaxResolution(self):
return self.db.getMaxResolution()
# getLogicBox
def getLogicBox(self,x=None,y=None,z=None):
pdim=self.getPointDim()
lbox=self.db.getLogicBox()
A=[lbox.p1[I] for I in range(pdim)]
B=[lbox.p2[I] for I in range(pdim)]
p1,p2=[0]*pdim,[0]*pdim
for I in range(pdim):
r=(x,y,z)[I]
if r is None: r=[A[I],B[I]]
p1[I] = int( A[I]+r[0]*(B[I]-A[I]) if isinstance(r[0],float) else r[0])
p2[I] = int( A[I]+r[1]*(B[I]-A[I]) if isinstance(r[1],float) else r[1])
return (p1,p2)
# getSliceLogicBox
def getSliceLogicBox(self,axis,offset):
ret=self.getLogicBox()
p1[axis]=offset+0
p1[axis]=offset+1
return (p1,p2)
# getBounds
def getBounds(self, logic_box):
if isinstance(logic_box,(tuple,list)):
logic_box=BoxNi(PointNi(logic_box[0]),PointNi(logic_box[1]))
return Position(self.logicToPhysic(),Position(BoxNi(logic_box)))
# getLogicSize
def getLogicSize(self):
p1,p2=self.getLogicBox()
return numpy.subtract(p2,p1)
# getFields
def getFields(self):
return [field.name for field in self.db.getFields()]
# getField
def getField(self,value=None):
if value is None:
return self.db.getField()
if isinstance(value,str):
return self.db.getField(value)
return value
# createAccess
def createAccess(self):
return self.db.createAccess()
# readBlock
def readBlock(self, block_id, time=None, field=None, access=None, aborted=Aborted()):
Assert(access)
field=self.getField() if field is None else self.getField(field)
time = self.getTime() if time is None else time
read_block = self.db.createBlockQuery(block_id, field, time, ord('r'), aborted)
self.executeBlockQueryAndWait(access, read_block)
if not read_block.ok(): return None
return Array.toNumPy(read_block.buffer, bShareMem=False)
# writeBlock
def writeBlock(self, block_id, time=None, field=None, access=None, data=None, aborted=Aborted()):
Assert(access and data)
field=self.getField() if field is None else self.getField(field)
time = self.getTime() if time is None else time
write_block = self.db.createBlockQuery(block_id, field, time, ord('w'), aborted)
write_block.buffer=Array.fromNumPy(data,TargetDim=self.getPointDim(), bShareMem=True)
self.executeBlockQueryAndWait(access, write_block)
return write_block.ok()
# read
def read(self, logic_box=None, x=None, y=None, z=None, time=None, field=None, num_refinements=1, quality=0, max_resolution=None, disable_filters=False, access=None):
"""
db=PyDataset.Load(url)
# example of reading a single slice in logic coordinates
data=db.read(z=[512,513])
# example of reading a single slice in normalized coordinates (i.e. [0,1])
data.db.read(x=[0,0.1],y=[0,0.1],z=[0,0.1])
# example of reading a single slice with 3 refinements
for data in db.read(z=[512,513],num_refinements=3):
print(data)
"""
pdim=self.getPointDim()
field=self.getField() if field is None else self.getField(field)
if time is None:
time = self.getTime()
if logic_box is None:
logic_box=self.getLogicBox(x,y,z)
if isinstance(logic_box,(tuple,list)):
logic_box=BoxNi(PointNi(logic_box[0]),PointNi(logic_box[1]))
query = self.db.createBoxQuery(BoxNi(logic_box), field , time, ord('r'))
if disable_filters:
query.disableFilters()
else:
query.enableFilters()
if max_resolution is None:
max_resolution=self.getMaxResolution()
# example quality -3 means not full resolution
Assert(quality<=0)
max_resolution=max_resolution+quality
for I in reversed(range(num_refinements)):
res=max_resolution-(pdim*I)
if res>=0:
query.end_resolutions.push_back(res)
self.db.beginBoxQuery(query)
if not query.isRunning():
|
if not access:
access=self.db.createAccess()
def NoGenerator():
if not self.db.executeBoxQuery(access, query):
raise Exception("query error {0}".format(query.errormsg))
# i cannot be sure how the numpy will be used outside or when the query will dealllocate the buffer
data=Array.toNumPy(query.buffer, bShareMem=False)
return data
def WithGenerator():
while query.isRunning():
if not self.db.executeBoxQuery(access, query):
raise Exception("query error {0}".format(query.errormsg))
# i cannot be sure how the numpy will be used outside or when the query will dealllocate the buffer
data=Array.toNumPy(query.buffer, bShareMem=False)
yield data
self.db.nextBoxQuery(query)
return NoGenerator() if query.end_resolutions.size()==1 else WithGenerator()
# write
# IMPORTANT: usually db.write happens without write lock and syncronously (at least in python)
def write(self, data, x=0, y=0, z=0,logic_box=None, time=None, field=None, access=None):
"""
db=PyDataset.Load(url)
width,height,depth=db.getSize()
# write single slice
data=numpy.zeros([height,width,3],dtype.uint8)
db.write(data,z=[512,513])
# write several slices in one-shot
nslices=10
data=numpy.zeros([nslices,height,width,10,3],dtype.uint8)
db.write(data,z=[512,512+nslices])
# write several slices with a generator
nslices=10
def gen():
for I in range(nslices):
yield=p.zeros([height,width,3],dtype.uint8)
db.write(gen,z=512)
"""
pdim=self.getPointDim()
field=self.getField(field)
if time is None:
time = self.getTime()
dims=list(data.shape)
# remove last components
if field.dtype.ncomponents()>1:
dims=dims[:-1]
# could be I'm writing a slice, I need to increment the "dimension"
while len(dims)<pdim:
dims=[1] + dims
dims=list(reversed(dims))
if logic_box is None:
p1=PointNi([x,y,z][0:pdim])
logic_box=BoxNi(p1,p1+PointNi(dims))
if isinstance(logic_box,(tuple,list)):
logic_box=BoxNi(PointNi(logic_box[0]),PointNi(logic_box[1]))
query = self.db.createBoxQuery(logic_box, field , time , ord('w'))
query.end_resolutions.push_back(self.getMaxResolution())
self.db.beginBoxQuery(query)
if not query.isRunning():
raise Exception("begin query failed {0}".format(query.errormsg))
if not access:
access=IdxDiskAccess.create(self.db)
access.disableAsync()
access.disableWriteLock()
# I need to change the shape of the buffer, since the last component is the channel (like RGB for example)
buffer=Array.fromNumPy(data,bShareMem=True)
Assert(buffer.c_size()==data.nbytes)
buffer.resize(PointNi(dims),query.field.dtype,__file__,0)
query.buffer=buffer
if not self.db.executeBoxQuery(access, query):
raise Exception("query error {0}".format(query.errormsg))
# writeSlabs
def writeSlabs(self,slices, x=0, y=0, z=0, time=None, field=None, max_memsize=1024*1024*1024, access=None):
os.environ["VISUS_DISABLE_WRITE_LOCK"]="1"
slab=[]
memsize=0
for slice in slices:
slab.append(slice)
memsize+=slice.nbytes
# flush
if memsize>=max_memsize:
data=numpy.stack(slab,axis=0)
self.write(data , x=x, y=y, z=z,field=field,time=time)
z+=len(slabs)
slab=[]
memsize=0
# flush
if slab:
data=numpy.stack(slab,axis=0)
self.write(data , x=x, y=y, z=z,field=field,time=time, access=access)
| raise Exception("begin query failed {0}".format(query.errormsg)) | conditional_block |
dataset.py | import os,sys
import inspect
import tempfile
import shutil
from OpenVisus import *
#in configure step I dont have numpy yet
try:
import numpy
except:
pass
# //////////////////////////////////////////////////////////
def CreateIdx(**args):
if not "url" in args:
raise Exception("url not specified")
url=args["url"]
if "rmtree" in args and args["rmtree"]==True:
dir=os.path.dirname(url)
shutil.rmtree(dir, ignore_errors=True)
idx=IdxFile()
buffer=None
if "data" in args:
data=args["data"]
dim=int(args["dim"])
Assert(dim>=2) # you must specify the point dim since it could be that data has multiple components
buffer=Array.fromNumPy(data,TargetDim=dim, bShareMem=True)
idx.logic_box=BoxNi(PointNi.zero(dim),PointNi(buffer.dims))
N=1 if dim==len(data.shape) else data.shape[-1]
elif "dims" in args:
dims=PointNi(args["dims"])
idx.logic_box=BoxNi(PointNi.zero(dims.getPointDim()),dims)
else:
raise Exception("please specify dimensions or source data")
# add fields
if "fields" in args:
for field in args["fields"]:
idx.fields.push_back(field)
elif buffer:
idx.fields.push_back(Field.fromString("DATA {} default_layout(row_major)".format(buffer.dtype.toString())))
else:
raise Exception("no field")
# bitsperblock
if "bitsperblock" in args:
idx.bitsperblock=int(args["bitsperblock"])
# compute db overall size
TOT=0
for field in idx.fields:
TOT+=field.dtype.getByteSize(idx.logic_box.size())
# blocks per file
if "blocksperfile" in args:
idx.blocksperfile=int(args["blocksperfile"])
elif "data" in args or TOT<2*(1024*1024*1024):
idx.blocksperfile=-1 # all blocks in one file
else:
idx.blocksperfile==0 # openvisus will guess (probably using multiple files)
# is the user specifying filters?
if "filters" in args and args["filters"]:
filters=args["filters"]
for I in range(idx.fields.size()):
idx.fields[I].filter=filters[I]
if "time" in args:
A,B,time_template=args["time"]
idx.timesteps=DatasetTimesteps(A,B,1.0)
idx.time_template=time_template
if "filename_template" in args:
idx.filename_template=args["filename_template"]
idx.save(url)
db=LoadDataset(url)
if buffer:
compression=args["compression"] if "compression" in args else ["zip"]
db.compressDataset(compression, buffer)
return db
# //////////////////////////////////////////////
class PyDataset(object):
# constructor
def __init__(self,db):
self.db = db
# __getattr__
def __getattr__(self,attr):
return getattr(self.db, attr)
# getPointDim
def getPointDim(self):
return self.db.getPointDim()
# getMaxResolution
def getMaxResolution(self):
return self.db.getMaxResolution()
# getLogicBox
def getLogicBox(self,x=None,y=None,z=None):
pdim=self.getPointDim()
lbox=self.db.getLogicBox()
A=[lbox.p1[I] for I in range(pdim)]
B=[lbox.p2[I] for I in range(pdim)]
p1,p2=[0]*pdim,[0]*pdim
for I in range(pdim):
r=(x,y,z)[I]
if r is None: r=[A[I],B[I]]
p1[I] = int( A[I]+r[0]*(B[I]-A[I]) if isinstance(r[0],float) else r[0])
p2[I] = int( A[I]+r[1]*(B[I]-A[I]) if isinstance(r[1],float) else r[1])
return (p1,p2)
# getSliceLogicBox
def getSliceLogicBox(self,axis,offset):
ret=self.getLogicBox()
p1[axis]=offset+0
p1[axis]=offset+1
return (p1,p2)
# getBounds
def getBounds(self, logic_box):
if isinstance(logic_box,(tuple,list)):
logic_box=BoxNi(PointNi(logic_box[0]),PointNi(logic_box[1]))
return Position(self.logicToPhysic(),Position(BoxNi(logic_box)))
# getLogicSize
def getLogicSize(self):
p1,p2=self.getLogicBox()
return numpy.subtract(p2,p1)
# getFields
def getFields(self):
return [field.name for field in self.db.getFields()]
# getField
def getField(self,value=None):
if value is None:
return self.db.getField()
if isinstance(value,str):
return self.db.getField(value)
return value
# createAccess
def createAccess(self):
|
# readBlock
def readBlock(self, block_id, time=None, field=None, access=None, aborted=Aborted()):
Assert(access)
field=self.getField() if field is None else self.getField(field)
time = self.getTime() if time is None else time
read_block = self.db.createBlockQuery(block_id, field, time, ord('r'), aborted)
self.executeBlockQueryAndWait(access, read_block)
if not read_block.ok(): return None
return Array.toNumPy(read_block.buffer, bShareMem=False)
# writeBlock
def writeBlock(self, block_id, time=None, field=None, access=None, data=None, aborted=Aborted()):
Assert(access and data)
field=self.getField() if field is None else self.getField(field)
time = self.getTime() if time is None else time
write_block = self.db.createBlockQuery(block_id, field, time, ord('w'), aborted)
write_block.buffer=Array.fromNumPy(data,TargetDim=self.getPointDim(), bShareMem=True)
self.executeBlockQueryAndWait(access, write_block)
return write_block.ok()
# read
def read(self, logic_box=None, x=None, y=None, z=None, time=None, field=None, num_refinements=1, quality=0, max_resolution=None, disable_filters=False, access=None):
"""
db=PyDataset.Load(url)
# example of reading a single slice in logic coordinates
data=db.read(z=[512,513])
# example of reading a single slice in normalized coordinates (i.e. [0,1])
data.db.read(x=[0,0.1],y=[0,0.1],z=[0,0.1])
# example of reading a single slice with 3 refinements
for data in db.read(z=[512,513],num_refinements=3):
print(data)
"""
pdim=self.getPointDim()
field=self.getField() if field is None else self.getField(field)
if time is None:
time = self.getTime()
if logic_box is None:
logic_box=self.getLogicBox(x,y,z)
if isinstance(logic_box,(tuple,list)):
logic_box=BoxNi(PointNi(logic_box[0]),PointNi(logic_box[1]))
query = self.db.createBoxQuery(BoxNi(logic_box), field , time, ord('r'))
if disable_filters:
query.disableFilters()
else:
query.enableFilters()
if max_resolution is None:
max_resolution=self.getMaxResolution()
# example quality -3 means not full resolution
Assert(quality<=0)
max_resolution=max_resolution+quality
for I in reversed(range(num_refinements)):
res=max_resolution-(pdim*I)
if res>=0:
query.end_resolutions.push_back(res)
self.db.beginBoxQuery(query)
if not query.isRunning():
raise Exception("begin query failed {0}".format(query.errormsg))
if not access:
access=self.db.createAccess()
def NoGenerator():
if not self.db.executeBoxQuery(access, query):
raise Exception("query error {0}".format(query.errormsg))
# i cannot be sure how the numpy will be used outside or when the query will dealllocate the buffer
data=Array.toNumPy(query.buffer, bShareMem=False)
return data
def WithGenerator():
while query.isRunning():
if not self.db.executeBoxQuery(access, query):
raise Exception("query error {0}".format(query.errormsg))
# i cannot be sure how the numpy will be used outside or when the query will dealllocate the buffer
data=Array.toNumPy(query.buffer, bShareMem=False)
yield data
self.db.nextBoxQuery(query)
return NoGenerator() if query.end_resolutions.size()==1 else WithGenerator()
# write
# IMPORTANT: usually db.write happens without write lock and syncronously (at least in python)
def write(self, data, x=0, y=0, z=0,logic_box=None, time=None, field=None, access=None):
"""
db=PyDataset.Load(url)
width,height,depth=db.getSize()
# write single slice
data=numpy.zeros([height,width,3],dtype.uint8)
db.write(data,z=[512,513])
# write several slices in one-shot
nslices=10
data=numpy.zeros([nslices,height,width,10,3],dtype.uint8)
db.write(data,z=[512,512+nslices])
# write several slices with a generator
nslices=10
def gen():
for I in range(nslices):
yield=p.zeros([height,width,3],dtype.uint8)
db.write(gen,z=512)
"""
pdim=self.getPointDim()
field=self.getField(field)
if time is None:
time = self.getTime()
dims=list(data.shape)
# remove last components
if field.dtype.ncomponents()>1:
dims=dims[:-1]
# could be I'm writing a slice, I need to increment the "dimension"
while len(dims)<pdim:
dims=[1] + dims
dims=list(reversed(dims))
if logic_box is None:
p1=PointNi([x,y,z][0:pdim])
logic_box=BoxNi(p1,p1+PointNi(dims))
if isinstance(logic_box,(tuple,list)):
logic_box=BoxNi(PointNi(logic_box[0]),PointNi(logic_box[1]))
query = self.db.createBoxQuery(logic_box, field , time , ord('w'))
query.end_resolutions.push_back(self.getMaxResolution())
self.db.beginBoxQuery(query)
if not query.isRunning():
raise Exception("begin query failed {0}".format(query.errormsg))
if not access:
access=IdxDiskAccess.create(self.db)
access.disableAsync()
access.disableWriteLock()
# I need to change the shape of the buffer, since the last component is the channel (like RGB for example)
buffer=Array.fromNumPy(data,bShareMem=True)
Assert(buffer.c_size()==data.nbytes)
buffer.resize(PointNi(dims),query.field.dtype,__file__,0)
query.buffer=buffer
if not self.db.executeBoxQuery(access, query):
raise Exception("query error {0}".format(query.errormsg))
# writeSlabs
def writeSlabs(self,slices, x=0, y=0, z=0, time=None, field=None, max_memsize=1024*1024*1024, access=None):
os.environ["VISUS_DISABLE_WRITE_LOCK"]="1"
slab=[]
memsize=0
for slice in slices:
slab.append(slice)
memsize+=slice.nbytes
# flush
if memsize>=max_memsize:
data=numpy.stack(slab,axis=0)
self.write(data , x=x, y=y, z=z,field=field,time=time)
z+=len(slabs)
slab=[]
memsize=0
# flush
if slab:
data=numpy.stack(slab,axis=0)
self.write(data , x=x, y=y, z=z,field=field,time=time, access=access)
| return self.db.createAccess() | identifier_body |
dataset.py | import os,sys
import inspect
import tempfile
import shutil
from OpenVisus import *
#in configure step I dont have numpy yet
try:
import numpy
except:
pass
# //////////////////////////////////////////////////////////
def CreateIdx(**args):
if not "url" in args:
raise Exception("url not specified")
url=args["url"]
if "rmtree" in args and args["rmtree"]==True:
dir=os.path.dirname(url)
shutil.rmtree(dir, ignore_errors=True)
idx=IdxFile()
buffer=None
if "data" in args:
data=args["data"]
dim=int(args["dim"])
Assert(dim>=2) # you must specify the point dim since it could be that data has multiple components
buffer=Array.fromNumPy(data,TargetDim=dim, bShareMem=True)
idx.logic_box=BoxNi(PointNi.zero(dim),PointNi(buffer.dims))
N=1 if dim==len(data.shape) else data.shape[-1]
elif "dims" in args:
dims=PointNi(args["dims"])
idx.logic_box=BoxNi(PointNi.zero(dims.getPointDim()),dims)
else:
raise Exception("please specify dimensions or source data")
# add fields
if "fields" in args:
for field in args["fields"]:
idx.fields.push_back(field)
elif buffer:
idx.fields.push_back(Field.fromString("DATA {} default_layout(row_major)".format(buffer.dtype.toString())))
else:
raise Exception("no field")
# bitsperblock
if "bitsperblock" in args:
idx.bitsperblock=int(args["bitsperblock"])
# compute db overall size
TOT=0
for field in idx.fields:
TOT+=field.dtype.getByteSize(idx.logic_box.size())
# blocks per file
if "blocksperfile" in args:
idx.blocksperfile=int(args["blocksperfile"])
elif "data" in args or TOT<2*(1024*1024*1024):
idx.blocksperfile=-1 # all blocks in one file
else:
idx.blocksperfile==0 # openvisus will guess (probably using multiple files)
# is the user specifying filters?
if "filters" in args and args["filters"]:
filters=args["filters"]
for I in range(idx.fields.size()):
idx.fields[I].filter=filters[I]
if "time" in args:
A,B,time_template=args["time"]
idx.timesteps=DatasetTimesteps(A,B,1.0)
idx.time_template=time_template
if "filename_template" in args:
idx.filename_template=args["filename_template"]
idx.save(url)
db=LoadDataset(url)
if buffer:
compression=args["compression"] if "compression" in args else ["zip"]
db.compressDataset(compression, buffer)
return db
# //////////////////////////////////////////////
class | (object):
# constructor
def __init__(self,db):
self.db = db
# __getattr__
def __getattr__(self,attr):
return getattr(self.db, attr)
# getPointDim
def getPointDim(self):
return self.db.getPointDim()
# getMaxResolution
def getMaxResolution(self):
return self.db.getMaxResolution()
# getLogicBox
def getLogicBox(self,x=None,y=None,z=None):
pdim=self.getPointDim()
lbox=self.db.getLogicBox()
A=[lbox.p1[I] for I in range(pdim)]
B=[lbox.p2[I] for I in range(pdim)]
p1,p2=[0]*pdim,[0]*pdim
for I in range(pdim):
r=(x,y,z)[I]
if r is None: r=[A[I],B[I]]
p1[I] = int( A[I]+r[0]*(B[I]-A[I]) if isinstance(r[0],float) else r[0])
p2[I] = int( A[I]+r[1]*(B[I]-A[I]) if isinstance(r[1],float) else r[1])
return (p1,p2)
# getSliceLogicBox
def getSliceLogicBox(self,axis,offset):
ret=self.getLogicBox()
p1[axis]=offset+0
p1[axis]=offset+1
return (p1,p2)
# getBounds
def getBounds(self, logic_box):
if isinstance(logic_box,(tuple,list)):
logic_box=BoxNi(PointNi(logic_box[0]),PointNi(logic_box[1]))
return Position(self.logicToPhysic(),Position(BoxNi(logic_box)))
# getLogicSize
def getLogicSize(self):
p1,p2=self.getLogicBox()
return numpy.subtract(p2,p1)
# getFields
def getFields(self):
return [field.name for field in self.db.getFields()]
# getField
def getField(self,value=None):
if value is None:
return self.db.getField()
if isinstance(value,str):
return self.db.getField(value)
return value
# createAccess
def createAccess(self):
return self.db.createAccess()
# readBlock
def readBlock(self, block_id, time=None, field=None, access=None, aborted=Aborted()):
Assert(access)
field=self.getField() if field is None else self.getField(field)
time = self.getTime() if time is None else time
read_block = self.db.createBlockQuery(block_id, field, time, ord('r'), aborted)
self.executeBlockQueryAndWait(access, read_block)
if not read_block.ok(): return None
return Array.toNumPy(read_block.buffer, bShareMem=False)
# writeBlock
def writeBlock(self, block_id, time=None, field=None, access=None, data=None, aborted=Aborted()):
Assert(access and data)
field=self.getField() if field is None else self.getField(field)
time = self.getTime() if time is None else time
write_block = self.db.createBlockQuery(block_id, field, time, ord('w'), aborted)
write_block.buffer=Array.fromNumPy(data,TargetDim=self.getPointDim(), bShareMem=True)
self.executeBlockQueryAndWait(access, write_block)
return write_block.ok()
# read
def read(self, logic_box=None, x=None, y=None, z=None, time=None, field=None, num_refinements=1, quality=0, max_resolution=None, disable_filters=False, access=None):
"""
db=PyDataset.Load(url)
# example of reading a single slice in logic coordinates
data=db.read(z=[512,513])
# example of reading a single slice in normalized coordinates (i.e. [0,1])
data.db.read(x=[0,0.1],y=[0,0.1],z=[0,0.1])
# example of reading a single slice with 3 refinements
for data in db.read(z=[512,513],num_refinements=3):
print(data)
"""
pdim=self.getPointDim()
field=self.getField() if field is None else self.getField(field)
if time is None:
time = self.getTime()
if logic_box is None:
logic_box=self.getLogicBox(x,y,z)
if isinstance(logic_box,(tuple,list)):
logic_box=BoxNi(PointNi(logic_box[0]),PointNi(logic_box[1]))
query = self.db.createBoxQuery(BoxNi(logic_box), field , time, ord('r'))
if disable_filters:
query.disableFilters()
else:
query.enableFilters()
if max_resolution is None:
max_resolution=self.getMaxResolution()
# example quality -3 means not full resolution
Assert(quality<=0)
max_resolution=max_resolution+quality
for I in reversed(range(num_refinements)):
res=max_resolution-(pdim*I)
if res>=0:
query.end_resolutions.push_back(res)
self.db.beginBoxQuery(query)
if not query.isRunning():
raise Exception("begin query failed {0}".format(query.errormsg))
if not access:
access=self.db.createAccess()
def NoGenerator():
if not self.db.executeBoxQuery(access, query):
raise Exception("query error {0}".format(query.errormsg))
# i cannot be sure how the numpy will be used outside or when the query will dealllocate the buffer
data=Array.toNumPy(query.buffer, bShareMem=False)
return data
def WithGenerator():
while query.isRunning():
if not self.db.executeBoxQuery(access, query):
raise Exception("query error {0}".format(query.errormsg))
# i cannot be sure how the numpy will be used outside or when the query will dealllocate the buffer
data=Array.toNumPy(query.buffer, bShareMem=False)
yield data
self.db.nextBoxQuery(query)
return NoGenerator() if query.end_resolutions.size()==1 else WithGenerator()
# write
# IMPORTANT: usually db.write happens without write lock and syncronously (at least in python)
def write(self, data, x=0, y=0, z=0,logic_box=None, time=None, field=None, access=None):
"""
db=PyDataset.Load(url)
width,height,depth=db.getSize()
# write single slice
data=numpy.zeros([height,width,3],dtype.uint8)
db.write(data,z=[512,513])
# write several slices in one-shot
nslices=10
data=numpy.zeros([nslices,height,width,10,3],dtype.uint8)
db.write(data,z=[512,512+nslices])
# write several slices with a generator
nslices=10
def gen():
for I in range(nslices):
yield=p.zeros([height,width,3],dtype.uint8)
db.write(gen,z=512)
"""
pdim=self.getPointDim()
field=self.getField(field)
if time is None:
time = self.getTime()
dims=list(data.shape)
# remove last components
if field.dtype.ncomponents()>1:
dims=dims[:-1]
# could be I'm writing a slice, I need to increment the "dimension"
while len(dims)<pdim:
dims=[1] + dims
dims=list(reversed(dims))
if logic_box is None:
p1=PointNi([x,y,z][0:pdim])
logic_box=BoxNi(p1,p1+PointNi(dims))
if isinstance(logic_box,(tuple,list)):
logic_box=BoxNi(PointNi(logic_box[0]),PointNi(logic_box[1]))
query = self.db.createBoxQuery(logic_box, field , time , ord('w'))
query.end_resolutions.push_back(self.getMaxResolution())
self.db.beginBoxQuery(query)
if not query.isRunning():
raise Exception("begin query failed {0}".format(query.errormsg))
if not access:
access=IdxDiskAccess.create(self.db)
access.disableAsync()
access.disableWriteLock()
# I need to change the shape of the buffer, since the last component is the channel (like RGB for example)
buffer=Array.fromNumPy(data,bShareMem=True)
Assert(buffer.c_size()==data.nbytes)
buffer.resize(PointNi(dims),query.field.dtype,__file__,0)
query.buffer=buffer
if not self.db.executeBoxQuery(access, query):
raise Exception("query error {0}".format(query.errormsg))
# writeSlabs
def writeSlabs(self,slices, x=0, y=0, z=0, time=None, field=None, max_memsize=1024*1024*1024, access=None):
os.environ["VISUS_DISABLE_WRITE_LOCK"]="1"
slab=[]
memsize=0
for slice in slices:
slab.append(slice)
memsize+=slice.nbytes
# flush
if memsize>=max_memsize:
data=numpy.stack(slab,axis=0)
self.write(data , x=x, y=y, z=z,field=field,time=time)
z+=len(slabs)
slab=[]
memsize=0
# flush
if slab:
data=numpy.stack(slab,axis=0)
self.write(data , x=x, y=y, z=z,field=field,time=time, access=access)
| PyDataset | identifier_name |
dataset.py | import os,sys
import inspect
import tempfile
import shutil
from OpenVisus import *
#in configure step I dont have numpy yet
try:
import numpy
except:
pass
# //////////////////////////////////////////////////////////
def CreateIdx(**args):
if not "url" in args:
raise Exception("url not specified")
url=args["url"]
if "rmtree" in args and args["rmtree"]==True:
dir=os.path.dirname(url)
shutil.rmtree(dir, ignore_errors=True)
idx=IdxFile()
buffer=None
if "data" in args:
data=args["data"]
dim=int(args["dim"])
Assert(dim>=2) # you must specify the point dim since it could be that data has multiple components
buffer=Array.fromNumPy(data,TargetDim=dim, bShareMem=True)
idx.logic_box=BoxNi(PointNi.zero(dim),PointNi(buffer.dims))
N=1 if dim==len(data.shape) else data.shape[-1]
elif "dims" in args:
dims=PointNi(args["dims"])
idx.logic_box=BoxNi(PointNi.zero(dims.getPointDim()),dims)
else:
raise Exception("please specify dimensions or source data")
# add fields
if "fields" in args:
for field in args["fields"]:
idx.fields.push_back(field)
elif buffer:
idx.fields.push_back(Field.fromString("DATA {} default_layout(row_major)".format(buffer.dtype.toString())))
else:
raise Exception("no field")
# bitsperblock
if "bitsperblock" in args:
idx.bitsperblock=int(args["bitsperblock"])
# compute db overall size
TOT=0
for field in idx.fields:
TOT+=field.dtype.getByteSize(idx.logic_box.size())
# blocks per file
if "blocksperfile" in args:
idx.blocksperfile=int(args["blocksperfile"])
elif "data" in args or TOT<2*(1024*1024*1024):
idx.blocksperfile=-1 # all blocks in one file
else:
idx.blocksperfile==0 # openvisus will guess (probably using multiple files)
# is the user specifying filters?
if "filters" in args and args["filters"]:
filters=args["filters"]
for I in range(idx.fields.size()):
idx.fields[I].filter=filters[I]
if "time" in args:
A,B,time_template=args["time"]
idx.timesteps=DatasetTimesteps(A,B,1.0)
idx.time_template=time_template
if "filename_template" in args:
idx.filename_template=args["filename_template"]
idx.save(url)
db=LoadDataset(url)
if buffer:
compression=args["compression"] if "compression" in args else ["zip"]
db.compressDataset(compression, buffer)
return db
# //////////////////////////////////////////////
class PyDataset(object):
# constructor
def __init__(self,db):
self.db = db
# __getattr__
def __getattr__(self,attr):
return getattr(self.db, attr)
# getPointDim
def getPointDim(self):
return self.db.getPointDim()
# getMaxResolution
def getMaxResolution(self):
return self.db.getMaxResolution()
# getLogicBox
def getLogicBox(self,x=None,y=None,z=None):
pdim=self.getPointDim()
lbox=self.db.getLogicBox()
A=[lbox.p1[I] for I in range(pdim)]
B=[lbox.p2[I] for I in range(pdim)]
p1,p2=[0]*pdim,[0]*pdim
for I in range(pdim):
r=(x,y,z)[I]
if r is None: r=[A[I],B[I]]
p1[I] = int( A[I]+r[0]*(B[I]-A[I]) if isinstance(r[0],float) else r[0])
p2[I] = int( A[I]+r[1]*(B[I]-A[I]) if isinstance(r[1],float) else r[1])
return (p1,p2)
# getSliceLogicBox
def getSliceLogicBox(self,axis,offset):
ret=self.getLogicBox()
p1[axis]=offset+0
p1[axis]=offset+1
return (p1,p2)
# getBounds
def getBounds(self, logic_box):
if isinstance(logic_box,(tuple,list)):
logic_box=BoxNi(PointNi(logic_box[0]),PointNi(logic_box[1]))
return Position(self.logicToPhysic(),Position(BoxNi(logic_box)))
# getLogicSize
def getLogicSize(self):
p1,p2=self.getLogicBox()
return numpy.subtract(p2,p1)
# getFields
def getFields(self):
return [field.name for field in self.db.getFields()]
# getField
def getField(self,value=None):
if value is None:
return self.db.getField()
if isinstance(value,str):
return self.db.getField(value)
return value
# createAccess
def createAccess(self):
return self.db.createAccess()
# readBlock
def readBlock(self, block_id, time=None, field=None, access=None, aborted=Aborted()):
Assert(access)
field=self.getField() if field is None else self.getField(field)
time = self.getTime() if time is None else time
read_block = self.db.createBlockQuery(block_id, field, time, ord('r'), aborted)
self.executeBlockQueryAndWait(access, read_block)
if not read_block.ok(): return None
return Array.toNumPy(read_block.buffer, bShareMem=False)
# writeBlock
def writeBlock(self, block_id, time=None, field=None, access=None, data=None, aborted=Aborted()):
Assert(access and data)
field=self.getField() if field is None else self.getField(field)
time = self.getTime() if time is None else time
write_block = self.db.createBlockQuery(block_id, field, time, ord('w'), aborted)
write_block.buffer=Array.fromNumPy(data,TargetDim=self.getPointDim(), bShareMem=True)
self.executeBlockQueryAndWait(access, write_block)
return write_block.ok()
# read
def read(self, logic_box=None, x=None, y=None, z=None, time=None, field=None, num_refinements=1, quality=0, max_resolution=None, disable_filters=False, access=None):
"""
db=PyDataset.Load(url)
# example of reading a single slice in logic coordinates
data=db.read(z=[512,513])
# example of reading a single slice in normalized coordinates (i.e. [0,1])
data.db.read(x=[0,0.1],y=[0,0.1],z=[0,0.1])
# example of reading a single slice with 3 refinements
for data in db.read(z=[512,513],num_refinements=3):
print(data)
"""
pdim=self.getPointDim()
field=self.getField() if field is None else self.getField(field)
if time is None:
time = self.getTime()
if logic_box is None:
logic_box=self.getLogicBox(x,y,z)
if isinstance(logic_box,(tuple,list)):
logic_box=BoxNi(PointNi(logic_box[0]),PointNi(logic_box[1]))
query = self.db.createBoxQuery(BoxNi(logic_box), field , time, ord('r'))
if disable_filters:
query.disableFilters()
else:
query.enableFilters()
if max_resolution is None:
max_resolution=self.getMaxResolution()
# example quality -3 means not full resolution
Assert(quality<=0)
max_resolution=max_resolution+quality
for I in reversed(range(num_refinements)):
res=max_resolution-(pdim*I)
if res>=0:
query.end_resolutions.push_back(res)
self.db.beginBoxQuery(query)
if not query.isRunning():
raise Exception("begin query failed {0}".format(query.errormsg))
if not access:
access=self.db.createAccess()
def NoGenerator():
if not self.db.executeBoxQuery(access, query):
raise Exception("query error {0}".format(query.errormsg))
# i cannot be sure how the numpy will be used outside or when the query will dealllocate the buffer
data=Array.toNumPy(query.buffer, bShareMem=False)
return data
def WithGenerator():
while query.isRunning():
if not self.db.executeBoxQuery(access, query):
raise Exception("query error {0}".format(query.errormsg))
# i cannot be sure how the numpy will be used outside or when the query will dealllocate the buffer
data=Array.toNumPy(query.buffer, bShareMem=False)
yield data
self.db.nextBoxQuery(query)
return NoGenerator() if query.end_resolutions.size()==1 else WithGenerator()
# write
# IMPORTANT: usually db.write happens without write lock and syncronously (at least in python)
def write(self, data, x=0, y=0, z=0,logic_box=None, time=None, field=None, access=None):
"""
db=PyDataset.Load(url)
width,height,depth=db.getSize()
# write single slice
data=numpy.zeros([height,width,3],dtype.uint8)
db.write(data,z=[512,513])
# write several slices in one-shot
nslices=10
data=numpy.zeros([nslices,height,width,10,3],dtype.uint8)
db.write(data,z=[512,512+nslices])
# write several slices with a generator
nslices=10
def gen():
for I in range(nslices):
yield=p.zeros([height,width,3],dtype.uint8)
db.write(gen,z=512)
"""
pdim=self.getPointDim()
field=self.getField(field)
if time is None:
time = self.getTime()
dims=list(data.shape)
# remove last components
if field.dtype.ncomponents()>1:
dims=dims[:-1]
# could be I'm writing a slice, I need to increment the "dimension"
while len(dims)<pdim:
dims=[1] + dims
dims=list(reversed(dims))
if logic_box is None:
p1=PointNi([x,y,z][0:pdim])
logic_box=BoxNi(p1,p1+PointNi(dims))
if isinstance(logic_box,(tuple,list)):
|
query = self.db.createBoxQuery(logic_box, field , time , ord('w'))
query.end_resolutions.push_back(self.getMaxResolution())
self.db.beginBoxQuery(query)
if not query.isRunning():
raise Exception("begin query failed {0}".format(query.errormsg))
if not access:
access=IdxDiskAccess.create(self.db)
access.disableAsync()
access.disableWriteLock()
# I need to change the shape of the buffer, since the last component is the channel (like RGB for example)
buffer=Array.fromNumPy(data,bShareMem=True)
Assert(buffer.c_size()==data.nbytes)
buffer.resize(PointNi(dims),query.field.dtype,__file__,0)
query.buffer=buffer
if not self.db.executeBoxQuery(access, query):
raise Exception("query error {0}".format(query.errormsg))
# writeSlabs
def writeSlabs(self,slices, x=0, y=0, z=0, time=None, field=None, max_memsize=1024*1024*1024, access=None):
os.environ["VISUS_DISABLE_WRITE_LOCK"]="1"
slab=[]
memsize=0
for slice in slices:
slab.append(slice)
memsize+=slice.nbytes
# flush
if memsize>=max_memsize:
data=numpy.stack(slab,axis=0)
self.write(data , x=x, y=y, z=z,field=field,time=time)
z+=len(slabs)
slab=[]
memsize=0
# flush
if slab:
data=numpy.stack(slab,axis=0)
self.write(data , x=x, y=y, z=z,field=field,time=time, access=access) | logic_box=BoxNi(PointNi(logic_box[0]),PointNi(logic_box[1]))
| random_line_split |
imitation_ddpg_model.py | import gym
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
import setup_path
import airsim
import time
import os
import pyautogui
import pytesseract
from PIL import Image
import tracking
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
start_ymd = int(float(time.strftime('%y%m%d')))
start_hm = int(float(time.strftime('%H%M')))
start_time = str(start_ymd) + '_' + str(start_hm)
os.makedirs('./tracking/' + start_time, exist_ok=True)
os.makedirs('./save_models/' + start_time, exist_ok=True)
num_states = 11
num_actions = 4
upper_bound = 1
lower_bound = 0
api_control = False
class OUActionNoise:
def __init__(self, mean, std_deviation, theta=0.15, dt=1e-2, x_initial=None):
self.theta = theta
self.mean = mean
self.std_dev = std_deviation
self.dt = dt
self.x_initial = x_initial
self.reset()
def __call__(self):
# Formula taken from https://www.wikipedia.org/wiki/Ornstein-Uhlenbeck_process.
x = (
self.x_prev
+ self.theta * (self.mean - self.x_prev) * self.dt
+ self.std_dev * np.sqrt(self.dt) * np.random.normal(size=self.mean.shape)
)
# Store x into x_prev
# Makes next noise dependent on current one
self.x_prev = x
return x
def reset(self):
if self.x_initial is not None:
self.x_prev = self.x_initial
else:
self.x_prev = np.zeros_like(self.mean)
class Buffer:
def __init__(self, buffer_capacity=100000, batch_size=64):
# Number of "experiences" to store at max
self.buffer_capacity = buffer_capacity
# Num of tuples to train on.
self.batch_size = batch_size
# Its tells us num of times record() was called.
self.buffer_counter = 0
# Instead of list of tuples as the exp.replay concept go
# We use different np.arrays for each tuple element
self.state_buffer = np.zeros((self.buffer_capacity, num_states))
self.action_buffer = np.zeros((self.buffer_capacity, num_actions))
self.reward_buffer = np.zeros((self.buffer_capacity, 1))
self.next_state_buffer = np.zeros((self.buffer_capacity, num_states))
# Takes (s,a,r,s') obervation tuple as input
def record(self, obs_tuple):
# Set index to zero if buffer_capacity is exceeded,
# replacing old records
index = self.buffer_counter % self.buffer_capacity
self.state_buffer[index] = obs_tuple[0]
self.action_buffer[index] = obs_tuple[1]
self.reward_buffer[index] = obs_tuple[2]
self.next_state_buffer[index] = obs_tuple[3]
self.buffer_counter += 1
# Eager execution is turned on by default in TensorFlow 2. Decorating with tf.function allows
# TensorFlow to build a static graph out of the logic and computations in our function.
# This provides a large speed up for blocks of code that contain many small TensorFlow operations such as this one.
@tf.function
def update(
self, state_batch, action_batch, reward_batch, next_state_batch,
):
# Training and updating Actor & Critic networks.
# See Pseudo Code.
with tf.GradientTape() as tape:
target_actions = target_actor(next_state_batch, training=True)
y = reward_batch + gamma * target_critic(
[next_state_batch, target_actions], training=True
)
critic_value = critic_model([state_batch, action_batch], training=True)
critic_loss = tf.math.reduce_mean(tf.math.square(y - critic_value))
critic_grad = tape.gradient(critic_loss, critic_model.trainable_variables)
critic_optimizer.apply_gradients(
zip(critic_grad, critic_model.trainable_variables)
)
with tf.GradientTape() as tape:
actions = actor_model(state_batch, training=True)
critic_value = critic_model([state_batch, actions], training=True)
# Used `-value` as we want to maximize the value given
# by the critic for our actions
actor_loss = -tf.math.reduce_mean(critic_value)
actor_grad = tape.gradient(actor_loss, actor_model.trainable_variables)
actor_optimizer.apply_gradients(
zip(actor_grad, actor_model.trainable_variables)
)
# We compute the loss and update parameters
def learn(self):
# Get sampling range
record_range = min(self.buffer_counter, self.buffer_capacity)
# Randomly sample indices
batch_indices = np.random.choice(record_range, self.batch_size)
# Convert to tensors
state_batch = tf.convert_to_tensor(self.state_buffer[batch_indices])
action_batch = tf.convert_to_tensor(self.action_buffer[batch_indices])
reward_batch = tf.convert_to_tensor(self.reward_buffer[batch_indices])
reward_batch = tf.cast(reward_batch, dtype=tf.float32)
next_state_batch = tf.convert_to_tensor(self.next_state_buffer[batch_indices])
self.update(state_batch, action_batch, reward_batch, next_state_batch)
# This update target parameters slowly
# Based on rate `tau`, which is much less than one.
@tf.function
def update_target(target_weights, weights, tau):
for (a, b) in zip(target_weights, weights):
a.assign(b * tau + a * (1 - tau))
def get_actor():
# Initialize weights between -3e-3 and 3-e3
last_init = tf.random_uniform_initializer(minval=-0.003, maxval=0.003)
inputs = layers.Input(shape=(num_states,))
out = layers.Dense(256, activation="relu")(inputs)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(4, activation="tanh", kernel_initializer=last_init)(out)
model = tf.keras.Model(inputs, outputs)
return model
def get_critic():
# State as input
state_input = layers.Input(shape=(num_states))
state_out = layers.Dense(128, activation="relu")(state_input)
state_out = layers.Dense(128, activation="relu")(state_out)
# Action as input
action_input = layers.Input(shape=(num_actions))
action_out = layers.Dense(64, activation="relu")(action_input)
# Both are passed through seperate layer before concatenating
concat = layers.Concatenate()([state_out, action_out])
out = layers.Dense(256, activation="relu")(concat)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(1)(out)
# Outputs single value for give state-action
model = tf.keras.Model([state_input, action_input], outputs)
return model
def policy(state, noise_object):
sampled_actions = tf.squeeze(actor_model(state))
noise = noise_object()
# Adding noise to action
sampled_actions = sampled_actions.numpy() + noise
np.clip(sampled_actions[3], 0, 1)
sampled_actions[3] = 0 if sampled_actions[3] < 0.5 else 1
legal_action = [np.clip(sampled_actions[0], 0, 1), # brake
np.clip(sampled_actions[1], -1, 1), # steering
np.clip(sampled_actions[2], -1, 1), # throttle
sampled_actions[3]] # direction
return [np.squeeze(legal_action)]
def sim_start(): # ์๋ฎฌ๋ ์ดํฐ ์คํ
# print(pyautogui.position()) # (1125, 455)
pyautogui.click(1125, 455)
# time.sleep(1)
pyautogui.keyDown('altleft')
pyautogui.keyDown('p')
pyautogui.keyUp('altleft')
pyautogui.keyUp('p')
time.sleep(1)
pyautogui.click(1125, 455)
# connect to the AirSim simulator
client = airsim.CarClient()
client.confirmConnection()
client.enableApiControl(api_control)
print("API Control enabled: %s\n" % client.isApiControlEnabled())
car_controls = airsim.CarControls()
time.sleep(1)
return client, car_controls
def sim_stop(): # ์๋ฎฌ๋ ์ดํฐ ์ค์ง
# print(pyautogui.position()) # (1125, 455)
pyautogui.click(1125, 455)
time.sleep(1)
# ์๋ฎฌ๋ ์ดํฐ ์ข
๋ฃ
pyautogui.keyDown('esc')
pyautogui.keyUp('esc')
time.sleep(1)
def capture_goal(): # ๋ชฉํ ์ง์ ์ ์ธ๋ฆฌ์ผ ์ขํ -> ์์ด์ฌ ์ขํ ๋ณํ
# ์ธ๋ฆฌ์ผ์์ ์ถ๋ ฅ๋๋ ๋ชฉํ ์ง์ ์ขํ
unreal_goals = [[600, 2600], [600, 2230], [600, 1800], [600, 1430], [600, 990], [600, 620], # ์ฐ์ธก
[-1200, 2600], [-1200, 2230], [-1200, 1800], [-1200, 1430], [-1200, 990]] # ์ข์ธก
# ์์ด์ฌ API๋ฅผ ํตํด ์ถ๋ ฅ๋๋ ๋ชฉํ ์ง์ ์ขํ
airsim_goals = [[6, -14], [6, -17], [6, -22], [6, -25], [6, -30], [6, -33], # ์ฐ์ธก
[-7, -14], [-7, -17], [-7, -22], [-7, -25], [-7, -30]] # ์ข์ธก
# ์ขํ ์ถ๋ ฅ ๋ถ๋ถ ์คํฌ๋ฆฐ์ท ์บก์ณ
img = pyautogui.screenshot('goal.png', region=(36, 90, 210, 15)) # ์ ์ฒดํ๋ฉด(F11) ๊ธฐ์ค
# ์ขํ ์คํฌ๋ฆฐ์ท ๋ฌธ์์ด๋ก ๋ณํ
goal_pos = pytesseract.image_to_string(Image.open('goal.png'))
# print(goal_pos[:-2])
# x, y ์ขํ ๊ตฌ๋ถ -> ์ขํ ๊ฐ float ๋ณํ
goal_pos = str.split(goal_pos[:-2], ' ')
x = str.split(goal_pos[0], '.')[0]
y = str.split(goal_pos[1], '.')[0]
x = int(float(x[2:]))
if y[0] == 'ยฅ': # ๊ฐ๋ ๋ฌธ์๋ฅผ ์๋ชป ์ธ์ํ๋ ๊ฒฝ์ฐ ๋ฐ์
y = int(float(y[3:]))
else:
y = int(float(y[2:]))
goal_xy = []
for i in range(len(airsim_goals)):
if x == unreal_goals[i][0] and y == unreal_goals[i][1]:
# print('Goal x :', airsim_goals[i][0])
# print('Goal y :', airsim_goals[i][1])
goal_xy = airsim_goals[i]
print('Goal :', airsim_goals[i])
break
return goal_xy
def save_model():
# Save the weights
actor_model.save(
".\\save_models\\" + str(start_ymd) + '_' + str(start_hm) + "\\parking_actor_ep" + str(ep_cnt + 1) + ".h5")
critic_model.save(
".\\save_models\\" + str(start_ymd) + '_' + str(start_hm) + "\\parking_critic_ep" + str(ep_cnt + 1) + ".h5")
target_actor.save(".\\save_models\\" + str(start_ymd) + '_' + str(start_hm) + "\\parking_target_actor_ep" + str(
ep_cnt + 1) + ".h5")
target_critic.save(".\\save_models\\" + str(start_ymd) + '_' + str(start_hm) + "\\parking_target_critic_ep" + str(
ep_cnt + 1) + ".h5")
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract'
std_dev = 0.2
ou_noise = OUActionNoise(mean=np.zeros(1), std_deviation=float(std_dev) * np.ones(1))
actor_model = get_actor()
critic_model = get_critic()
target_actor = get_actor()
target_critic = get_critic()
# Making the weights equal initially
target_actor.set_weights(actor_model.get_weights())
target_critic.set_weights(critic_model.get_weights())
# Learning rate for actor-critic models
critic_lr = 0.002
actor_lr = 0.001
critic_optimizer = tf.keras.optimizers.Adam(critic_lr)
actor_optimizer = tf.keras.optimizers.Adam(actor_lr)
total_episodes = 100
# Discount factor for future rewards
gamma = 0.99
# Used to update target networks
tau = 0.005
buffer = Buffer(50000, 64)
# To store reward history of each episode
ep_reward_list = []
# To store average reward history of last few episodes
avg_reward_list = []
# ์ฒ์ ์คํ ์ ์ถฉ๋ ๋ฌผ์ฒด ์ธ์ ํ ๋ฆฌ์
๊ด๋ จ ๋ฌธ์ ๋๋ฌธ์ ์ค์ง ํ ๋ค์ ์์
client, car_controls = sim_start()
collision = (client.simGetCollisionInfo().object_name).lower()
while collision.find('pipesmall') < 0 and collision != '':
sim_stop()
client, car_controls = sim_start()
time.sleep(2)
ep_cnt = 0
tracking_img = []
period = 5 # ์ด๋ ๊ฒฝ๋ก ์ด๋ฏธ์ง ์ ์ฅ ์ํผ์๋ ๊ฐ๊ฒฉ
# Takes about 4 min to train
for ep in range(total_episodes):
ep_cnt = ep
# if ep == 0 or ep + 1 % period == 0:
tracking_img = cv.imread('map.png', cv.IMREAD_GRAYSCALE)
# prev_state = env.reset()
prev_state = [client.getCarState().kinematics_estimated.position.x_val, # ์ฐจ๋ ์์น x ์ขํ
client.getCarState().kinematics_estimated.position.y_val, # ์ฐจ๋ ์์น y ์ขํ
client.getCarState().speed, # ์ฐจ๋ ์๋
client.getCarControls().brake, # ๋ธ๋ ์ดํฌ
client.getCarControls().steering, # ํธ๋ค ๋ฐฉํฅ
client.getCarControls().throttle, # ์ฐจ๋ ์ด๋
client.getCarControls().manual_gear, # ํ์ง ๊ธฐ์ด
client.getDistanceSensorData("Distance1").distance, # ์ ๋ฐฉ ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance2").distance, # ์ฐ์ธก ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance3").distance, # ํ๋ฐฉ ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance4").distance] # ์ข์ธก ๊ฑฐ๋ฆฌ ์ผ์
episodic_reward = 0
is_captured = 0
count = 0
start_time = 0
end_time = 0
total_steps = 0
reward = 0
done = False
while True:
total_steps += 1
if is_captured == 0:
goal = capture_goal()
is_captured = 1
tf_prev_state = tf.expand_dims(tf.convert_to_tensor(prev_state), 0) | action = policy(tf_prev_state, ou_noise)
action = tf.squeeze(action)
print('episode :', ep + 1, '|',
'brake :', round(float(action[0]), 3), '|', 'steering :', round(float(action[1]), 3), '|',
'throttle :', round(float(abs(action[2])), 3), '|', 'direction :', round(float(action[3]), 3), '|',
'total_reward :', round(episodic_reward, 6))
# car_controls.brake = 1 if float(action[0]) > 0.5 else 0
# car_controls.steering = float(action[1])
# car_controls.throttle = float(abs(action[2]))
# if action[3]:
# car_controls.manual_gear = 0
# car_controls.is_manual_gear = False
# else:
# car_controls.manual_gear = -1
# car_controls.is_manual_gear = True
#
# client.setCarControls(car_controls)
# Recieve state and reward from environment.
# state, reward, done, info = env.step(action)
state = [client.getCarState().kinematics_estimated.position.x_val, # ์ฐจ๋ ์์น x ์ขํ
client.getCarState().kinematics_estimated.position.y_val, # ์ฐจ๋ ์์น y ์ขํ
client.getCarState().speed, # ์ฐจ๋ ์๋
client.getCarControls().brake, # ๋ธ๋ ์ดํฌ
client.getCarControls().steering, # ํธ๋ค ๋ฐฉํฅ
client.getCarControls().throttle, # ์ฐจ๋ ์ด๋
client.getCarControls().manual_gear, # ํ์ง ๊ธฐ์ด
client.getDistanceSensorData("Distance1").distance, # ์ ๋ฐฉ ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance2").distance, # ์ฐ์ธก ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance3").distance, # ํ๋ฐฉ ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance4").distance] # ์ข์ธก ๊ฑฐ๋ฆฌ ์ผ์
# ์ฐจ๋ ์ด๋ ๊ฒฝ๋ก ๊ธฐ๋ก
# if ep == 0 or ep+1 % period == 0:
tracking_img = tracking.tracking(tracking_img, state[0], state[1])
# reward = 1/1000 if ((client.simGetCollisionInfo().object_name).lower()).find('pipesmall') >= 0 else -1
collision = (client.simGetCollisionInfo().object_name).lower()
if collision.find('pipesmall') >= 0 or collision == '':
done = False
else:
print('Episode', ep + 1, ': Crash!!')
# reward += -1
reward = -100
done = True
if (goal[0] > 0):
if (6 < client.getCarState().kinematics_estimated.position.x_val < 8 and
goal[1] - 1 < client.getCarState().kinematics_estimated.position.y_val < goal[1] + 1):
print('Episode', ep + 1, ': Success!!')
# reward += 1
reward = 100
done = True
elif (goal[0] < 0):
if (-9 < client.getCarState().kinematics_estimated.position.x_val < -7 and
goal[1] - 1 < client.getCarState().kinematics_estimated.position.y_val < goal[1] + 1):
print('Episode', ep + 1, ': Success!!')
# reward += 1
reward = 100
done = True
if round(prev_state[0], 2) == round(state[0], 2) and round(prev_state[1], 2) == round(state[1], 2):
reward = -1 / 1000
if count == 0:
count += 1
start_time = time.time()
end_time = time.time()
else:
count += 1
end_time = time.time()
if end_time - start_time >= 10:
print('Episode', ep + 1, ': Don''t just stand there!!')
count = 0
# reward += -1
reward = -200
done = True
else:
reward = 1 / 100000
count = 0
buffer.record((prev_state, action, reward, state))
episodic_reward += reward
buffer.learn()
update_target(target_actor.variables, actor_model.variables, tau)
update_target(target_critic.variables, critic_model.variables, tau)
# End this episode when `done` is True
if done:
print('Final Reward :', episodic_reward)
print('Total Steps :', total_steps)
if ep == 0 or (ep + 1) % period == 0:
cv.imwrite(".\\tracking\\" + str(start_ymd) + '_' + str(start_hm) + "\\ep" + str(ep + 1) + ".png",
tracking_img)
print('tracking image saved')
is_captured = 0
sim_stop()
sim_stop()
if ep + 1 == total_episodes:
break
client, car_controls = sim_start()
sim_stop()
sim_stop()
client, car_controls = sim_start()
break
prev_state = state
ep_reward_list.append(episodic_reward)
# Mean of last 40 episodes
avg_reward = np.mean(ep_reward_list[-40:])
print("Episode * {} * Avg Reward is ==> {}".format(ep + 1, avg_reward))
avg_reward_list.append(avg_reward)
save_model()
print('model weight saved')
sim_stop()
sim_stop()
# Plotting graph
# Episodes versus Avg. Rewards
plt.plot(avg_reward_list)
plt.xlabel("Episode")
plt.ylabel("Avg. Epsiodic Reward")
ct = time.localtime()
plt.savefig('.\\graph\\' + str(start_ymd) + '_' + str(start_hm) + '.png')
print('graph saved')
plt.show() | random_line_split |
|
imitation_ddpg_model.py | import gym
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
import setup_path
import airsim
import time
import os
import pyautogui
import pytesseract
from PIL import Image
import tracking
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
start_ymd = int(float(time.strftime('%y%m%d')))
start_hm = int(float(time.strftime('%H%M')))
start_time = str(start_ymd) + '_' + str(start_hm)
os.makedirs('./tracking/' + start_time, exist_ok=True)
os.makedirs('./save_models/' + start_time, exist_ok=True)
num_states = 11
num_actions = 4
upper_bound = 1
lower_bound = 0
api_control = False
class OUActionNoise:
def __init__(self, mean, std_deviation, theta=0.15, dt=1e-2, x_initial=None):
self.theta = theta
self.mean = mean
self.std_dev = std_deviation
self.dt = dt
self.x_initial = x_initial
self.reset()
def __call__(self):
# Formula taken from https://www.wikipedia.org/wiki/Ornstein-Uhlenbeck_process.
x = (
self.x_prev
+ self.theta * (self.mean - self.x_prev) * self.dt
+ self.std_dev * np.sqrt(self.dt) * np.random.normal(size=self.mean.shape)
)
# Store x into x_prev
# Makes next noise dependent on current one
self.x_prev = x
return x
def reset(self):
if self.x_initial is not None:
self.x_prev = self.x_initial
else:
self.x_prev = np.zeros_like(self.mean)
class Buffer:
def __init__(self, buffer_capacity=100000, batch_size=64):
# Number of "experiences" to store at max
|
# Takes (s,a,r,s') obervation tuple as input
def record(self, obs_tuple):
# Set index to zero if buffer_capacity is exceeded,
# replacing old records
index = self.buffer_counter % self.buffer_capacity
self.state_buffer[index] = obs_tuple[0]
self.action_buffer[index] = obs_tuple[1]
self.reward_buffer[index] = obs_tuple[2]
self.next_state_buffer[index] = obs_tuple[3]
self.buffer_counter += 1
# Eager execution is turned on by default in TensorFlow 2. Decorating with tf.function allows
# TensorFlow to build a static graph out of the logic and computations in our function.
# This provides a large speed up for blocks of code that contain many small TensorFlow operations such as this one.
@tf.function
def update(
self, state_batch, action_batch, reward_batch, next_state_batch,
):
# Training and updating Actor & Critic networks.
# See Pseudo Code.
with tf.GradientTape() as tape:
target_actions = target_actor(next_state_batch, training=True)
y = reward_batch + gamma * target_critic(
[next_state_batch, target_actions], training=True
)
critic_value = critic_model([state_batch, action_batch], training=True)
critic_loss = tf.math.reduce_mean(tf.math.square(y - critic_value))
critic_grad = tape.gradient(critic_loss, critic_model.trainable_variables)
critic_optimizer.apply_gradients(
zip(critic_grad, critic_model.trainable_variables)
)
with tf.GradientTape() as tape:
actions = actor_model(state_batch, training=True)
critic_value = critic_model([state_batch, actions], training=True)
# Used `-value` as we want to maximize the value given
# by the critic for our actions
actor_loss = -tf.math.reduce_mean(critic_value)
actor_grad = tape.gradient(actor_loss, actor_model.trainable_variables)
actor_optimizer.apply_gradients(
zip(actor_grad, actor_model.trainable_variables)
)
# We compute the loss and update parameters
def learn(self):
# Get sampling range
record_range = min(self.buffer_counter, self.buffer_capacity)
# Randomly sample indices
batch_indices = np.random.choice(record_range, self.batch_size)
# Convert to tensors
state_batch = tf.convert_to_tensor(self.state_buffer[batch_indices])
action_batch = tf.convert_to_tensor(self.action_buffer[batch_indices])
reward_batch = tf.convert_to_tensor(self.reward_buffer[batch_indices])
reward_batch = tf.cast(reward_batch, dtype=tf.float32)
next_state_batch = tf.convert_to_tensor(self.next_state_buffer[batch_indices])
self.update(state_batch, action_batch, reward_batch, next_state_batch)
# This update target parameters slowly
# Based on rate `tau`, which is much less than one.
@tf.function
def update_target(target_weights, weights, tau):
for (a, b) in zip(target_weights, weights):
a.assign(b * tau + a * (1 - tau))
def get_actor():
# Initialize weights between -3e-3 and 3-e3
last_init = tf.random_uniform_initializer(minval=-0.003, maxval=0.003)
inputs = layers.Input(shape=(num_states,))
out = layers.Dense(256, activation="relu")(inputs)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(4, activation="tanh", kernel_initializer=last_init)(out)
model = tf.keras.Model(inputs, outputs)
return model
def get_critic():
# State as input
state_input = layers.Input(shape=(num_states))
state_out = layers.Dense(128, activation="relu")(state_input)
state_out = layers.Dense(128, activation="relu")(state_out)
# Action as input
action_input = layers.Input(shape=(num_actions))
action_out = layers.Dense(64, activation="relu")(action_input)
# Both are passed through seperate layer before concatenating
concat = layers.Concatenate()([state_out, action_out])
out = layers.Dense(256, activation="relu")(concat)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(1)(out)
# Outputs single value for give state-action
model = tf.keras.Model([state_input, action_input], outputs)
return model
def policy(state, noise_object):
sampled_actions = tf.squeeze(actor_model(state))
noise = noise_object()
# Adding noise to action
sampled_actions = sampled_actions.numpy() + noise
np.clip(sampled_actions[3], 0, 1)
sampled_actions[3] = 0 if sampled_actions[3] < 0.5 else 1
legal_action = [np.clip(sampled_actions[0], 0, 1), # brake
np.clip(sampled_actions[1], -1, 1), # steering
np.clip(sampled_actions[2], -1, 1), # throttle
sampled_actions[3]] # direction
return [np.squeeze(legal_action)]
def sim_start(): # ์๋ฎฌ๋ ์ดํฐ ์คํ
# print(pyautogui.position()) # (1125, 455)
pyautogui.click(1125, 455)
# time.sleep(1)
pyautogui.keyDown('altleft')
pyautogui.keyDown('p')
pyautogui.keyUp('altleft')
pyautogui.keyUp('p')
time.sleep(1)
pyautogui.click(1125, 455)
# connect to the AirSim simulator
client = airsim.CarClient()
client.confirmConnection()
client.enableApiControl(api_control)
print("API Control enabled: %s\n" % client.isApiControlEnabled())
car_controls = airsim.CarControls()
time.sleep(1)
return client, car_controls
def sim_stop(): # ์๋ฎฌ๋ ์ดํฐ ์ค์ง
# print(pyautogui.position()) # (1125, 455)
pyautogui.click(1125, 455)
time.sleep(1)
# ์๋ฎฌ๋ ์ดํฐ ์ข
๋ฃ
pyautogui.keyDown('esc')
pyautogui.keyUp('esc')
time.sleep(1)
def capture_goal(): # ๋ชฉํ ์ง์ ์ ์ธ๋ฆฌ์ผ ์ขํ -> ์์ด์ฌ ์ขํ ๋ณํ
# ์ธ๋ฆฌ์ผ์์ ์ถ๋ ฅ๋๋ ๋ชฉํ ์ง์ ์ขํ
unreal_goals = [[600, 2600], [600, 2230], [600, 1800], [600, 1430], [600, 990], [600, 620], # ์ฐ์ธก
[-1200, 2600], [-1200, 2230], [-1200, 1800], [-1200, 1430], [-1200, 990]] # ์ข์ธก
# ์์ด์ฌ API๋ฅผ ํตํด ์ถ๋ ฅ๋๋ ๋ชฉํ ์ง์ ์ขํ
airsim_goals = [[6, -14], [6, -17], [6, -22], [6, -25], [6, -30], [6, -33], # ์ฐ์ธก
[-7, -14], [-7, -17], [-7, -22], [-7, -25], [-7, -30]] # ์ข์ธก
# ์ขํ ์ถ๋ ฅ ๋ถ๋ถ ์คํฌ๋ฆฐ์ท ์บก์ณ
img = pyautogui.screenshot('goal.png', region=(36, 90, 210, 15)) # ์ ์ฒดํ๋ฉด(F11) ๊ธฐ์ค
# ์ขํ ์คํฌ๋ฆฐ์ท ๋ฌธ์์ด๋ก ๋ณํ
goal_pos = pytesseract.image_to_string(Image.open('goal.png'))
# print(goal_pos[:-2])
# x, y ์ขํ ๊ตฌ๋ถ -> ์ขํ ๊ฐ float ๋ณํ
goal_pos = str.split(goal_pos[:-2], ' ')
x = str.split(goal_pos[0], '.')[0]
y = str.split(goal_pos[1], '.')[0]
x = int(float(x[2:]))
if y[0] == 'ยฅ': # ๊ฐ๋ ๋ฌธ์๋ฅผ ์๋ชป ์ธ์ํ๋ ๊ฒฝ์ฐ ๋ฐ์
y = int(float(y[3:]))
else:
y = int(float(y[2:]))
goal_xy = []
for i in range(len(airsim_goals)):
if x == unreal_goals[i][0] and y == unreal_goals[i][1]:
# print('Goal x :', airsim_goals[i][0])
# print('Goal y :', airsim_goals[i][1])
goal_xy = airsim_goals[i]
print('Goal :', airsim_goals[i])
break
return goal_xy
def save_model():
# Save the weights
actor_model.save(
".\\save_models\\" + str(start_ymd) + '_' + str(start_hm) + "\\parking_actor_ep" + str(ep_cnt + 1) + ".h5")
critic_model.save(
".\\save_models\\" + str(start_ymd) + '_' + str(start_hm) + "\\parking_critic_ep" + str(ep_cnt + 1) + ".h5")
target_actor.save(".\\save_models\\" + str(start_ymd) + '_' + str(start_hm) + "\\parking_target_actor_ep" + str(
ep_cnt + 1) + ".h5")
target_critic.save(".\\save_models\\" + str(start_ymd) + '_' + str(start_hm) + "\\parking_target_critic_ep" + str(
ep_cnt + 1) + ".h5")
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract'
std_dev = 0.2
ou_noise = OUActionNoise(mean=np.zeros(1), std_deviation=float(std_dev) * np.ones(1))
actor_model = get_actor()
critic_model = get_critic()
target_actor = get_actor()
target_critic = get_critic()
# Making the weights equal initially
target_actor.set_weights(actor_model.get_weights())
target_critic.set_weights(critic_model.get_weights())
# Learning rate for actor-critic models
critic_lr = 0.002
actor_lr = 0.001
critic_optimizer = tf.keras.optimizers.Adam(critic_lr)
actor_optimizer = tf.keras.optimizers.Adam(actor_lr)
total_episodes = 100
# Discount factor for future rewards
gamma = 0.99
# Used to update target networks
tau = 0.005
buffer = Buffer(50000, 64)
# To store reward history of each episode
ep_reward_list = []
# To store average reward history of last few episodes
avg_reward_list = []
# ์ฒ์ ์คํ ์ ์ถฉ๋ ๋ฌผ์ฒด ์ธ์ ํ ๋ฆฌ์
๊ด๋ จ ๋ฌธ์ ๋๋ฌธ์ ์ค์ง ํ ๋ค์ ์์
client, car_controls = sim_start()
collision = (client.simGetCollisionInfo().object_name).lower()
while collision.find('pipesmall') < 0 and collision != '':
sim_stop()
client, car_controls = sim_start()
time.sleep(2)
ep_cnt = 0
tracking_img = []
period = 5 # ์ด๋ ๊ฒฝ๋ก ์ด๋ฏธ์ง ์ ์ฅ ์ํผ์๋ ๊ฐ๊ฒฉ
# Takes about 4 min to train
for ep in range(total_episodes):
ep_cnt = ep
# if ep == 0 or ep + 1 % period == 0:
tracking_img = cv.imread('map.png', cv.IMREAD_GRAYSCALE)
# prev_state = env.reset()
prev_state = [client.getCarState().kinematics_estimated.position.x_val, # ์ฐจ๋ ์์น x ์ขํ
client.getCarState().kinematics_estimated.position.y_val, # ์ฐจ๋ ์์น y ์ขํ
client.getCarState().speed, # ์ฐจ๋ ์๋
client.getCarControls().brake, # ๋ธ๋ ์ดํฌ
client.getCarControls().steering, # ํธ๋ค ๋ฐฉํฅ
client.getCarControls().throttle, # ์ฐจ๋ ์ด๋
client.getCarControls().manual_gear, # ํ์ง ๊ธฐ์ด
client.getDistanceSensorData("Distance1").distance, # ์ ๋ฐฉ ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance2").distance, # ์ฐ์ธก ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance3").distance, # ํ๋ฐฉ ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance4").distance] # ์ข์ธก ๊ฑฐ๋ฆฌ ์ผ์
episodic_reward = 0
is_captured = 0
count = 0
start_time = 0
end_time = 0
total_steps = 0
reward = 0
done = False
while True:
total_steps += 1
if is_captured == 0:
goal = capture_goal()
is_captured = 1
tf_prev_state = tf.expand_dims(tf.convert_to_tensor(prev_state), 0)
action = policy(tf_prev_state, ou_noise)
action = tf.squeeze(action)
print('episode :', ep + 1, '|',
'brake :', round(float(action[0]), 3), '|', 'steering :', round(float(action[1]), 3), '|',
'throttle :', round(float(abs(action[2])), 3), '|', 'direction :', round(float(action[3]), 3), '|',
'total_reward :', round(episodic_reward, 6))
# car_controls.brake = 1 if float(action[0]) > 0.5 else 0
# car_controls.steering = float(action[1])
# car_controls.throttle = float(abs(action[2]))
# if action[3]:
# car_controls.manual_gear = 0
# car_controls.is_manual_gear = False
# else:
# car_controls.manual_gear = -1
# car_controls.is_manual_gear = True
#
# client.setCarControls(car_controls)
# Recieve state and reward from environment.
# state, reward, done, info = env.step(action)
state = [client.getCarState().kinematics_estimated.position.x_val, # ์ฐจ๋ ์์น x ์ขํ
client.getCarState().kinematics_estimated.position.y_val, # ์ฐจ๋ ์์น y ์ขํ
client.getCarState().speed, # ์ฐจ๋ ์๋
client.getCarControls().brake, # ๋ธ๋ ์ดํฌ
client.getCarControls().steering, # ํธ๋ค ๋ฐฉํฅ
client.getCarControls().throttle, # ์ฐจ๋ ์ด๋
client.getCarControls().manual_gear, # ํ์ง ๊ธฐ์ด
client.getDistanceSensorData("Distance1").distance, # ์ ๋ฐฉ ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance2").distance, # ์ฐ์ธก ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance3").distance, # ํ๋ฐฉ ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance4").distance] # ์ข์ธก ๊ฑฐ๋ฆฌ ์ผ์
# ์ฐจ๋ ์ด๋ ๊ฒฝ๋ก ๊ธฐ๋ก
# if ep == 0 or ep+1 % period == 0:
tracking_img = tracking.tracking(tracking_img, state[0], state[1])
# reward = 1/1000 if ((client.simGetCollisionInfo().object_name).lower()).find('pipesmall') >= 0 else -1
collision = (client.simGetCollisionInfo().object_name).lower()
if collision.find('pipesmall') >= 0 or collision == '':
done = False
else:
print('Episode', ep + 1, ': Crash!!')
# reward += -1
reward = -100
done = True
if (goal[0] > 0):
if (6 < client.getCarState().kinematics_estimated.position.x_val < 8 and
goal[1] - 1 < client.getCarState().kinematics_estimated.position.y_val < goal[1] + 1):
print('Episode', ep + 1, ': Success!!')
# reward += 1
reward = 100
done = True
elif (goal[0] < 0):
if (-9 < client.getCarState().kinematics_estimated.position.x_val < -7 and
goal[1] - 1 < client.getCarState().kinematics_estimated.position.y_val < goal[1] + 1):
print('Episode', ep + 1, ': Success!!')
# reward += 1
reward = 100
done = True
if round(prev_state[0], 2) == round(state[0], 2) and round(prev_state[1], 2) == round(state[1], 2):
reward = -1 / 1000
if count == 0:
count += 1
start_time = time.time()
end_time = time.time()
else:
count += 1
end_time = time.time()
if end_time - start_time >= 10:
print('Episode', ep + 1, ': Don''t just stand there!!')
count = 0
# reward += -1
reward = -200
done = True
else:
reward = 1 / 100000
count = 0
buffer.record((prev_state, action, reward, state))
episodic_reward += reward
buffer.learn()
update_target(target_actor.variables, actor_model.variables, tau)
update_target(target_critic.variables, critic_model.variables, tau)
# End this episode when `done` is True
if done:
print('Final Reward :', episodic_reward)
print('Total Steps :', total_steps)
if ep == 0 or (ep + 1) % period == 0:
cv.imwrite(".\\tracking\\" + str(start_ymd) + '_' + str(start_hm) + "\\ep" + str(ep + 1) + ".png",
tracking_img)
print('tracking image saved')
is_captured = 0
sim_stop()
sim_stop()
if ep + 1 == total_episodes:
break
client, car_controls = sim_start()
sim_stop()
sim_stop()
client, car_controls = sim_start()
break
prev_state = state
ep_reward_list.append(episodic_reward)
# Mean of last 40 episodes
avg_reward = np.mean(ep_reward_list[-40:])
print("Episode * {} * Avg Reward is ==> {}".format(ep + 1, avg_reward))
avg_reward_list.append(avg_reward)
save_model()
print('model weight saved')
sim_stop()
sim_stop()
# Plotting graph
# Episodes versus Avg. Rewards
plt.plot(avg_reward_list)
plt.xlabel("Episode")
plt.ylabel("Avg. Epsiodic Reward")
ct = time.localtime()
plt.savefig('.\\graph\\' + str(start_ymd) + '_' + str(start_hm) + '.png')
print('graph saved')
plt.show()
| self.buffer_capacity = buffer_capacity
# Num of tuples to train on.
self.batch_size = batch_size
# Its tells us num of times record() was called.
self.buffer_counter = 0
# Instead of list of tuples as the exp.replay concept go
# We use different np.arrays for each tuple element
self.state_buffer = np.zeros((self.buffer_capacity, num_states))
self.action_buffer = np.zeros((self.buffer_capacity, num_actions))
self.reward_buffer = np.zeros((self.buffer_capacity, 1))
self.next_state_buffer = np.zeros((self.buffer_capacity, num_states)) | identifier_body |
imitation_ddpg_model.py | import gym
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
import setup_path
import airsim
import time
import os
import pyautogui
import pytesseract
from PIL import Image
import tracking
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
start_ymd = int(float(time.strftime('%y%m%d')))
start_hm = int(float(time.strftime('%H%M')))
start_time = str(start_ymd) + '_' + str(start_hm)
os.makedirs('./tracking/' + start_time, exist_ok=True)
os.makedirs('./save_models/' + start_time, exist_ok=True)
num_states = 11
num_actions = 4
upper_bound = 1
lower_bound = 0
api_control = False
class OUActionNoise:
def __init__(self, mean, std_deviation, theta=0.15, dt=1e-2, x_initial=None):
self.theta = theta
self.mean = mean
self.std_dev = std_deviation
self.dt = dt
self.x_initial = x_initial
self.reset()
def __call__(self):
# Formula taken from https://www.wikipedia.org/wiki/Ornstein-Uhlenbeck_process.
x = (
self.x_prev
+ self.theta * (self.mean - self.x_prev) * self.dt
+ self.std_dev * np.sqrt(self.dt) * np.random.normal(size=self.mean.shape)
)
# Store x into x_prev
# Makes next noise dependent on current one
self.x_prev = x
return x
def reset(self):
if self.x_initial is not None:
self.x_prev = self.x_initial
else:
self.x_prev = np.zeros_like(self.mean)
class Buffer:
def __init__(self, buffer_capacity=100000, batch_size=64):
# Number of "experiences" to store at max
self.buffer_capacity = buffer_capacity
# Num of tuples to train on.
self.batch_size = batch_size
# Its tells us num of times record() was called.
self.buffer_counter = 0
# Instead of list of tuples as the exp.replay concept go
# We use different np.arrays for each tuple element
self.state_buffer = np.zeros((self.buffer_capacity, num_states))
self.action_buffer = np.zeros((self.buffer_capacity, num_actions))
self.reward_buffer = np.zeros((self.buffer_capacity, 1))
self.next_state_buffer = np.zeros((self.buffer_capacity, num_states))
# Takes (s,a,r,s') obervation tuple as input
def record(self, obs_tuple):
# Set index to zero if buffer_capacity is exceeded,
# replacing old records
index = self.buffer_counter % self.buffer_capacity
self.state_buffer[index] = obs_tuple[0]
self.action_buffer[index] = obs_tuple[1]
self.reward_buffer[index] = obs_tuple[2]
self.next_state_buffer[index] = obs_tuple[3]
self.buffer_counter += 1
# Eager execution is turned on by default in TensorFlow 2. Decorating with tf.function allows
# TensorFlow to build a static graph out of the logic and computations in our function.
# This provides a large speed up for blocks of code that contain many small TensorFlow operations such as this one.
@tf.function
def update(
self, state_batch, action_batch, reward_batch, next_state_batch,
):
# Training and updating Actor & Critic networks.
# See Pseudo Code.
with tf.GradientTape() as tape:
target_actions = target_actor(next_state_batch, training=True)
y = reward_batch + gamma * target_critic(
[next_state_batch, target_actions], training=True
)
critic_value = critic_model([state_batch, action_batch], training=True)
critic_loss = tf.math.reduce_mean(tf.math.square(y - critic_value))
critic_grad = tape.gradient(critic_loss, critic_model.trainable_variables)
critic_optimizer.apply_gradients(
zip(critic_grad, critic_model.trainable_variables)
)
with tf.GradientTape() as tape:
actions = actor_model(state_batch, training=True)
critic_value = critic_model([state_batch, actions], training=True)
# Used `-value` as we want to maximize the value given
# by the critic for our actions
actor_loss = -tf.math.reduce_mean(critic_value)
actor_grad = tape.gradient(actor_loss, actor_model.trainable_variables)
actor_optimizer.apply_gradients(
zip(actor_grad, actor_model.trainable_variables)
)
# We compute the loss and update parameters
def learn(self):
# Get sampling range
record_range = min(self.buffer_counter, self.buffer_capacity)
# Randomly sample indices
batch_indices = np.random.choice(record_range, self.batch_size)
# Convert to tensors
state_batch = tf.convert_to_tensor(self.state_buffer[batch_indices])
action_batch = tf.convert_to_tensor(self.action_buffer[batch_indices])
reward_batch = tf.convert_to_tensor(self.reward_buffer[batch_indices])
reward_batch = tf.cast(reward_batch, dtype=tf.float32)
next_state_batch = tf.convert_to_tensor(self.next_state_buffer[batch_indices])
self.update(state_batch, action_batch, reward_batch, next_state_batch)
# This update target parameters slowly
# Based on rate `tau`, which is much less than one.
@tf.function
def update_target(target_weights, weights, tau):
for (a, b) in zip(target_weights, weights):
a.assign(b * tau + a * (1 - tau))
def get_actor():
# Initialize weights between -3e-3 and 3-e3
last_init = tf.random_uniform_initializer(minval=-0.003, maxval=0.003)
inputs = layers.Input(shape=(num_states,))
out = layers.Dense(256, activation="relu")(inputs)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(4, activation="tanh", kernel_initializer=last_init)(out)
model = tf.keras.Model(inputs, outputs)
return model
def get_critic():
# State as input
state_input = layers.Input(shape=(num_states))
state_out = layers.Dense(128, activation="relu")(state_input)
state_out = layers.Dense(128, activation="relu")(state_out)
# Action as input
action_input = layers.Input(shape=(num_actions))
action_out = layers.Dense(64, activation="relu")(action_input)
# Both are passed through seperate layer before concatenating
concat = layers.Concatenate()([state_out, action_out])
out = layers.Dense(256, activation="relu")(concat)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(1)(out)
# Outputs single value for give state-action
model = tf.keras.Model([state_input, action_input], outputs)
return model
def policy(state, noise_object):
sampled_actions = tf.squeeze(actor_model(state))
noise = noise_object()
# Adding noise to action
sampled_actions = sampled_actions.numpy() + noise
np.clip(sampled_actions[3], 0, 1)
sampled_actions[3] = 0 if sampled_actions[3] < 0.5 else 1
legal_action = [np.clip(sampled_actions[0], 0, 1), # brake
np.clip(sampled_actions[1], -1, 1), # steering
np.clip(sampled_actions[2], -1, 1), # throttle
sampled_actions[3]] # direction
return [np.squeeze(legal_action)]
def sim_start(): # ์๋ฎฌ๋ ์ดํฐ ์คํ
# print(pyautogui.position()) # (1125, 455)
pyautogui.click(1125, 455)
# time.sleep(1)
pyautogui.keyDown('altleft')
pyautogui.keyDown('p')
pyautogui.keyUp('altleft')
pyautogui.keyUp('p')
time.sleep(1)
pyautogui.click(1125, 455)
# connect to the AirSim simulator
client = airsim.CarClient()
client.confirmConnection()
client.enableApiControl(api_control)
print("API Control enabled: %s\n" % client.isApiControlEnabled())
car_controls = airsim.CarControls()
time.sleep(1)
return client, car_controls
def sim_stop(): # ์๋ฎฌ๋ ์ดํฐ ์ค์ง
# print(pyautogui.position()) # (1125, 455)
pyautogui.click(1125, 455)
time.sleep(1)
# ์๋ฎฌ๋ ์ดํฐ ์ข
๋ฃ
pyautogui.keyDown('esc')
pyautogui.keyUp('esc')
time.sleep(1)
def capture_goal(): # ๋ชฉํ ์ง์ ์ ์ธ๋ฆฌ์ผ ์ขํ -> ์์ด์ฌ ์ขํ ๋ณํ
# ์ธ๋ฆฌ์ผ์์ ์ถ๋ ฅ๋๋ ๋ชฉํ ์ง์ ์ขํ
unreal_goals = [[600, 2600], [600, 2230], [600, 1800], [600, 1430], [600, 990], [600, 620], # ์ฐ์ธก
[-1200, 2600], [-1200, 2230], [-1200, 1800], [-1200, 1430], [-1200, 990]] # ์ข์ธก
# ์์ด์ฌ API๋ฅผ ํตํด ์ถ๋ ฅ๋๋ ๋ชฉํ ์ง์ ์ขํ
airsim_goals = [[6, -14], [6, -17], [6, -22], [6, -25], [6, -30], [6, -33], # ์ฐ์ธก
[-7, -14], [-7, -17], [-7, -22], [-7, -25], [-7, -30]] # ์ข์ธก
# ์ขํ ์ถ๋ ฅ ๋ถ๋ถ ์คํฌ๋ฆฐ์ท ์บก์ณ
img = pyautogui.screenshot('goal.png', region=(36, 90, 210, 15)) # ์ ์ฒดํ๋ฉด(F11) ๊ธฐ์ค
# ์ขํ ์คํฌ๋ฆฐ์ท ๋ฌธ์์ด๋ก ๋ณํ
goal_pos = pytesseract.image_to_string(Image.open('goal.png'))
# print(goal_pos[:-2])
# x, y ์ขํ ๊ตฌ๋ถ -> ์ขํ ๊ฐ float ๋ณํ
goal_pos = str.split(goal_pos[:-2], ' ')
x = str.split(goal_pos[0], '.')[0]
y = str.split(goal_pos[1], '.')[0]
x = int(float(x[2:]))
if y[0] == 'ยฅ': # ๊ฐ๋ ๋ฌธ์๋ฅผ ์๋ชป ์ธ์ํ๋ ๊ฒฝ์ฐ ๋ฐ์
y = int(float(y[3:]))
else:
y = int(float(y[2:]))
goal_xy = []
for i in range(len(airsim_goals)):
if x == unreal_goals[i][0] and y == unreal_goals[i][1]:
# print('Goal x :', airsim_goals[i][0])
# print('Goal y :', airsim_goals[i][1])
goal_xy = airsim_goals[i]
print('Goal :', airsim_goals[i])
break
return goal_xy
def save_model():
# Save the weights
actor_model.save(
".\\save_models\\" + str(start_ymd) + '_' + str(start_hm) + "\\parking_actor_ep" + str(ep_cnt + 1) + ".h5")
critic_model.save(
".\\save_models\\" + str(start_ymd) + '_' + str(start_hm) + "\\parking_critic_ep" + str(ep_cnt + 1) + ".h5")
target_actor.save(".\\save_models\\" + str(start_ymd) + '_' + str(start_hm) + "\\parking_target_actor_ep" + str(
ep_cnt + 1) + ".h5")
target_critic.save(".\\save_models\\" + str(start_ymd) + '_' + str(start_hm) + "\\parking_target_critic_ep" + str(
ep_cnt + 1) + ".h5")
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract'
std_dev = 0.2
ou_noise = OUActionNoise(mean=np.zeros(1), std_deviation=float(std_dev) * np.ones(1))
actor_model = get_actor()
critic_model = get_critic()
target_actor = get_actor()
target_critic = get_critic()
# Making the weights equal initially
target_actor.set_weights(actor_model.get_weights())
target_critic.set_weights(critic_model.get_weights())
# Learning rate for actor-critic models
critic_lr = 0.002
actor_lr = 0.001
critic_optimizer = tf.keras.optimizers.Adam(critic_lr)
actor_optimizer = tf.keras.optimizers.Adam(actor_lr)
total_episodes = 100
# Discount factor for future rewards
gamma = 0.99
# Used to update target networks
tau = 0.005
buffer = Buffer(50000, 64)
# To store reward history of each episode
ep_reward_list = []
# To store average reward history of last few episodes
avg_reward_list = []
# ์ฒ์ ์คํ ์ ์ถฉ๋ ๋ฌผ์ฒด ์ธ์ ํ ๋ฆฌ์
๊ด๋ จ ๋ฌธ์ ๋๋ฌธ์ ์ค์ง ํ ๋ค์ ์์
client, car_controls = sim_start()
collision = (client.simGetCollisionInfo().object_name).lower()
while collision.find('pipesmall') < 0 and collision != '':
sim_stop()
client, car_controls = sim_start()
time.sleep(2)
ep_cnt = 0
tracking_img = []
period = 5 # ์ด๋ ๊ฒฝ๋ก ์ด๋ฏธ์ง ์ ์ฅ ์ํผ์๋ ๊ฐ๊ฒฉ
# Takes about 4 min to train
for ep in range(total_episodes):
ep_cnt = ep
# if ep == 0 or ep + 1 % period == 0:
tracking_img = cv.imread('map.png', cv.IMREAD_GRAYSCALE)
# prev_state = env.reset()
prev_state = [client.getCarState().kinematics_estimated.position.x_val, # ์ฐจ๋ ์์น x ์ขํ
client.getCarState().kinematics_estimated.position.y_val, # ์ฐจ๋ ์์น y ์ขํ
client.getCarState().speed, # ์ฐจ๋ ์๋
client.getCarControls().brake, # ๋ธ๋ ์ดํฌ
client.getCarControls().steering, # ํธ๋ค ๋ฐฉํฅ
client.getCarControls().throttle, # ์ฐจ๋ ์ด๋
client.getCarControls().manual_gear, # ํ์ง ๊ธฐ์ด
client.getDistanceSensorData("Distance1").distance, # ์ ๋ฐฉ ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance2").distance, # ์ฐ์ธก ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance3").distance, # ํ๋ฐฉ ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance4").distance] # ์ข์ธก ๊ฑฐ๋ฆฌ ์ผ์
episodic_reward = 0
is_captured = 0
count = 0
start_time = 0
end_time = 0
total_steps = 0
reward = 0
done = False
while True:
total_steps += 1
if is_captured == 0:
goal = capture_goal()
is_captured = 1
tf_prev_state = tf.expand_dims(tf.convert_to_tensor(prev_state), 0)
action = policy(tf_prev_state, ou_noise)
action = tf.squeeze(action)
print('episode :', ep + 1, '|',
'brake :', round(float(action[0]), 3), '|', 'steering :', round(float(action[1]), 3), '|',
'throttle :', round(float(abs(action[2])), 3), '|', 'direction :', round(float(action[3]), 3), '|',
'total_reward :', round(episodic_reward, 6))
# car_controls.brake = 1 if float(action[0]) > 0.5 else 0
# car_controls.steering = float(action[1])
# car_controls.throttle = float(abs(action[2]))
# if action[3]:
# car_controls.manual_gear = 0
# car_controls.is_manual_gear = False
# else:
# car_controls.manual_gear = -1
# car_controls.is_manual_gear = True
#
# client.setCarControls(car_controls)
# Recieve state and reward from environment.
# state, reward, done, info = env.step(action)
state = [client.getCarState().kinematics_estimated.position.x_val, # ์ฐจ๋ ์์น x ์ขํ
client.getCarState().kinematics_estimated.position.y_val, # ์ฐจ๋ ์์น y ์ขํ
client.getCarState().speed, # ์ฐจ๋ ์๋
client.getCarControls().brake, # ๋ธ๋ ์ดํฌ
client.getCarControls().steering, # ํธ๋ค ๋ฐฉํฅ
client.getCarControls().throttle, # ์ฐจ๋ ์ด๋
client.getCarControls().manual_gear, # ํ์ง ๊ธฐ์ด
client.getDistanceSensorData("Distance1").distance, # ์ ๋ฐฉ ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance2").distance, # ์ฐ์ธก ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance3").distance, # ํ๋ฐฉ ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance4").distance] # ์ข์ธก ๊ฑฐ๋ฆฌ ์ผ์
# ์ฐจ๋ ์ด๋ ๊ฒฝ๋ก ๊ธฐ๋ก
# if ep == 0 or ep+1 % period == 0:
tracking_img = tracking.tracking(tracking_img, state[0], state[1])
# reward = 1/1000 if ((client.simGetCollisionInfo().object_name).lower()).find('pipesmall') >= 0 else -1
collision = (client.simGetCollisionInfo().object_name).lower()
if collision.find('pipesmall') >= 0 or collision == '':
done = False
else:
print('Episode', ep + 1, ': Crash!!')
# reward += -1
reward = -100
done = True
if (goal[0] > 0):
if (6 < client.getCarState().kinematics_estimated.position.x_val < 8 and
goal[1] - 1 < client.getCarState().kinematics_estimated.position.y_val < goal[1] + 1):
print('Episode', ep + 1, ': Success!!')
# reward += 1
reward = 100
done = True
elif (goal[0] < 0):
if (-9 < client.getCarState().kinematics_estimated.position.x_val < -7 and
goal[1] - 1 < client.getCarState().kinematics_estimated.position.y_val < goal[1] + 1):
print('Episode', ep + 1, ': Success!!')
# reward += 1
reward = 100
done = True
if round(prev_state[0], 2) == round(state[0], 2) and round(prev_state[1], 2) == round(state[1], 2):
reward = -1 / 1000
if count == 0:
count += 1
start_time = time.time()
end_time = time.time()
else:
count += 1
end_time = time.time()
if end_time - start_time >= 10:
print('Episode', ep + 1, ': Don''t just stand there!!')
count = 0
# reward += -1
reward = -200
done = True
else:
reward = 1 / 100000
count = 0
buffer.record((prev_state, action, reward, state))
episodic_reward += reward
buffer.learn()
update_target(target_actor.variables, actor_model.variables, tau)
update_target(target_critic.variables, critic_model.variables, tau)
# End this episode when `done` is True
if done:
print('Final Reward :', episodic_reward)
print('Total Steps :', total_steps)
if ep == 0 or (ep + 1) % period == 0:
cv.imwrite(".\\tracking\\" + str(start_ymd) + '_' + str(start_hm) + "\\ep" + str(ep + 1) + ".png",
tracking_img)
print('tracking image saved')
is_captured = 0
sim_stop()
sim_stop()
if ep + 1 == total_episodes:
break
client, car_controls = sim_start()
sim_stop()
sim_stop()
c | ()
| lient, car_controls = sim_start()
break
prev_state = state
ep_reward_list.append(episodic_reward)
# Mean of last 40 episodes
avg_reward = np.mean(ep_reward_list[-40:])
print("Episode * {} * Avg Reward is ==> {}".format(ep + 1, avg_reward))
avg_reward_list.append(avg_reward)
save_model()
print('model weight saved')
sim_stop()
sim_stop()
# Plotting graph
# Episodes versus Avg. Rewards
plt.plot(avg_reward_list)
plt.xlabel("Episode")
plt.ylabel("Avg. Epsiodic Reward")
ct = time.localtime()
plt.savefig('.\\graph\\' + str(start_ymd) + '_' + str(start_hm) + '.png')
print('graph saved')
plt.show | conditional_block |
imitation_ddpg_model.py | import gym
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
import setup_path
import airsim
import time
import os
import pyautogui
import pytesseract
from PIL import Image
import tracking
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
start_ymd = int(float(time.strftime('%y%m%d')))
start_hm = int(float(time.strftime('%H%M')))
start_time = str(start_ymd) + '_' + str(start_hm)
os.makedirs('./tracking/' + start_time, exist_ok=True)
os.makedirs('./save_models/' + start_time, exist_ok=True)
num_states = 11
num_actions = 4
upper_bound = 1
lower_bound = 0
api_control = False
class OUActionNoise:
def __init__(self, mean, std_deviation, theta=0.15, dt=1e-2, x_initial=None):
self.theta = theta
self.mean = mean
self.std_dev = std_deviation
self.dt = dt
self.x_initial = x_initial
self.reset()
def __call__(self):
# Formula taken from https://www.wikipedia.org/wiki/Ornstein-Uhlenbeck_process.
x = (
self.x_prev
+ self.theta * (self.mean - self.x_prev) * self.dt
+ self.std_dev * np.sqrt(self.dt) * np.random.normal(size=self.mean.shape)
)
# Store x into x_prev
# Makes next noise dependent on current one
self.x_prev = x
return x
def | (self):
if self.x_initial is not None:
self.x_prev = self.x_initial
else:
self.x_prev = np.zeros_like(self.mean)
class Buffer:
def __init__(self, buffer_capacity=100000, batch_size=64):
# Number of "experiences" to store at max
self.buffer_capacity = buffer_capacity
# Num of tuples to train on.
self.batch_size = batch_size
# Its tells us num of times record() was called.
self.buffer_counter = 0
# Instead of list of tuples as the exp.replay concept go
# We use different np.arrays for each tuple element
self.state_buffer = np.zeros((self.buffer_capacity, num_states))
self.action_buffer = np.zeros((self.buffer_capacity, num_actions))
self.reward_buffer = np.zeros((self.buffer_capacity, 1))
self.next_state_buffer = np.zeros((self.buffer_capacity, num_states))
# Takes (s,a,r,s') obervation tuple as input
def record(self, obs_tuple):
# Set index to zero if buffer_capacity is exceeded,
# replacing old records
index = self.buffer_counter % self.buffer_capacity
self.state_buffer[index] = obs_tuple[0]
self.action_buffer[index] = obs_tuple[1]
self.reward_buffer[index] = obs_tuple[2]
self.next_state_buffer[index] = obs_tuple[3]
self.buffer_counter += 1
# Eager execution is turned on by default in TensorFlow 2. Decorating with tf.function allows
# TensorFlow to build a static graph out of the logic and computations in our function.
# This provides a large speed up for blocks of code that contain many small TensorFlow operations such as this one.
@tf.function
def update(
self, state_batch, action_batch, reward_batch, next_state_batch,
):
# Training and updating Actor & Critic networks.
# See Pseudo Code.
with tf.GradientTape() as tape:
target_actions = target_actor(next_state_batch, training=True)
y = reward_batch + gamma * target_critic(
[next_state_batch, target_actions], training=True
)
critic_value = critic_model([state_batch, action_batch], training=True)
critic_loss = tf.math.reduce_mean(tf.math.square(y - critic_value))
critic_grad = tape.gradient(critic_loss, critic_model.trainable_variables)
critic_optimizer.apply_gradients(
zip(critic_grad, critic_model.trainable_variables)
)
with tf.GradientTape() as tape:
actions = actor_model(state_batch, training=True)
critic_value = critic_model([state_batch, actions], training=True)
# Used `-value` as we want to maximize the value given
# by the critic for our actions
actor_loss = -tf.math.reduce_mean(critic_value)
actor_grad = tape.gradient(actor_loss, actor_model.trainable_variables)
actor_optimizer.apply_gradients(
zip(actor_grad, actor_model.trainable_variables)
)
# We compute the loss and update parameters
def learn(self):
# Get sampling range
record_range = min(self.buffer_counter, self.buffer_capacity)
# Randomly sample indices
batch_indices = np.random.choice(record_range, self.batch_size)
# Convert to tensors
state_batch = tf.convert_to_tensor(self.state_buffer[batch_indices])
action_batch = tf.convert_to_tensor(self.action_buffer[batch_indices])
reward_batch = tf.convert_to_tensor(self.reward_buffer[batch_indices])
reward_batch = tf.cast(reward_batch, dtype=tf.float32)
next_state_batch = tf.convert_to_tensor(self.next_state_buffer[batch_indices])
self.update(state_batch, action_batch, reward_batch, next_state_batch)
# This update target parameters slowly
# Based on rate `tau`, which is much less than one.
@tf.function
def update_target(target_weights, weights, tau):
for (a, b) in zip(target_weights, weights):
a.assign(b * tau + a * (1 - tau))
def get_actor():
# Initialize weights between -3e-3 and 3-e3
last_init = tf.random_uniform_initializer(minval=-0.003, maxval=0.003)
inputs = layers.Input(shape=(num_states,))
out = layers.Dense(256, activation="relu")(inputs)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(4, activation="tanh", kernel_initializer=last_init)(out)
model = tf.keras.Model(inputs, outputs)
return model
def get_critic():
# State as input
state_input = layers.Input(shape=(num_states))
state_out = layers.Dense(128, activation="relu")(state_input)
state_out = layers.Dense(128, activation="relu")(state_out)
# Action as input
action_input = layers.Input(shape=(num_actions))
action_out = layers.Dense(64, activation="relu")(action_input)
# Both are passed through seperate layer before concatenating
concat = layers.Concatenate()([state_out, action_out])
out = layers.Dense(256, activation="relu")(concat)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(1)(out)
# Outputs single value for give state-action
model = tf.keras.Model([state_input, action_input], outputs)
return model
def policy(state, noise_object):
sampled_actions = tf.squeeze(actor_model(state))
noise = noise_object()
# Adding noise to action
sampled_actions = sampled_actions.numpy() + noise
np.clip(sampled_actions[3], 0, 1)
sampled_actions[3] = 0 if sampled_actions[3] < 0.5 else 1
legal_action = [np.clip(sampled_actions[0], 0, 1), # brake
np.clip(sampled_actions[1], -1, 1), # steering
np.clip(sampled_actions[2], -1, 1), # throttle
sampled_actions[3]] # direction
return [np.squeeze(legal_action)]
def sim_start(): # ์๋ฎฌ๋ ์ดํฐ ์คํ
# print(pyautogui.position()) # (1125, 455)
pyautogui.click(1125, 455)
# time.sleep(1)
pyautogui.keyDown('altleft')
pyautogui.keyDown('p')
pyautogui.keyUp('altleft')
pyautogui.keyUp('p')
time.sleep(1)
pyautogui.click(1125, 455)
# connect to the AirSim simulator
client = airsim.CarClient()
client.confirmConnection()
client.enableApiControl(api_control)
print("API Control enabled: %s\n" % client.isApiControlEnabled())
car_controls = airsim.CarControls()
time.sleep(1)
return client, car_controls
def sim_stop(): # ์๋ฎฌ๋ ์ดํฐ ์ค์ง
# print(pyautogui.position()) # (1125, 455)
pyautogui.click(1125, 455)
time.sleep(1)
# ์๋ฎฌ๋ ์ดํฐ ์ข
๋ฃ
pyautogui.keyDown('esc')
pyautogui.keyUp('esc')
time.sleep(1)
def capture_goal(): # ๋ชฉํ ์ง์ ์ ์ธ๋ฆฌ์ผ ์ขํ -> ์์ด์ฌ ์ขํ ๋ณํ
# ์ธ๋ฆฌ์ผ์์ ์ถ๋ ฅ๋๋ ๋ชฉํ ์ง์ ์ขํ
unreal_goals = [[600, 2600], [600, 2230], [600, 1800], [600, 1430], [600, 990], [600, 620], # ์ฐ์ธก
[-1200, 2600], [-1200, 2230], [-1200, 1800], [-1200, 1430], [-1200, 990]] # ์ข์ธก
# ์์ด์ฌ API๋ฅผ ํตํด ์ถ๋ ฅ๋๋ ๋ชฉํ ์ง์ ์ขํ
airsim_goals = [[6, -14], [6, -17], [6, -22], [6, -25], [6, -30], [6, -33], # ์ฐ์ธก
[-7, -14], [-7, -17], [-7, -22], [-7, -25], [-7, -30]] # ์ข์ธก
# ์ขํ ์ถ๋ ฅ ๋ถ๋ถ ์คํฌ๋ฆฐ์ท ์บก์ณ
img = pyautogui.screenshot('goal.png', region=(36, 90, 210, 15)) # ์ ์ฒดํ๋ฉด(F11) ๊ธฐ์ค
# ์ขํ ์คํฌ๋ฆฐ์ท ๋ฌธ์์ด๋ก ๋ณํ
goal_pos = pytesseract.image_to_string(Image.open('goal.png'))
# print(goal_pos[:-2])
# x, y ์ขํ ๊ตฌ๋ถ -> ์ขํ ๊ฐ float ๋ณํ
goal_pos = str.split(goal_pos[:-2], ' ')
x = str.split(goal_pos[0], '.')[0]
y = str.split(goal_pos[1], '.')[0]
x = int(float(x[2:]))
if y[0] == 'ยฅ': # ๊ฐ๋ ๋ฌธ์๋ฅผ ์๋ชป ์ธ์ํ๋ ๊ฒฝ์ฐ ๋ฐ์
y = int(float(y[3:]))
else:
y = int(float(y[2:]))
goal_xy = []
for i in range(len(airsim_goals)):
if x == unreal_goals[i][0] and y == unreal_goals[i][1]:
# print('Goal x :', airsim_goals[i][0])
# print('Goal y :', airsim_goals[i][1])
goal_xy = airsim_goals[i]
print('Goal :', airsim_goals[i])
break
return goal_xy
def save_model():
# Save the weights
actor_model.save(
".\\save_models\\" + str(start_ymd) + '_' + str(start_hm) + "\\parking_actor_ep" + str(ep_cnt + 1) + ".h5")
critic_model.save(
".\\save_models\\" + str(start_ymd) + '_' + str(start_hm) + "\\parking_critic_ep" + str(ep_cnt + 1) + ".h5")
target_actor.save(".\\save_models\\" + str(start_ymd) + '_' + str(start_hm) + "\\parking_target_actor_ep" + str(
ep_cnt + 1) + ".h5")
target_critic.save(".\\save_models\\" + str(start_ymd) + '_' + str(start_hm) + "\\parking_target_critic_ep" + str(
ep_cnt + 1) + ".h5")
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract'
std_dev = 0.2
ou_noise = OUActionNoise(mean=np.zeros(1), std_deviation=float(std_dev) * np.ones(1))
actor_model = get_actor()
critic_model = get_critic()
target_actor = get_actor()
target_critic = get_critic()
# Making the weights equal initially
target_actor.set_weights(actor_model.get_weights())
target_critic.set_weights(critic_model.get_weights())
# Learning rate for actor-critic models
critic_lr = 0.002
actor_lr = 0.001
critic_optimizer = tf.keras.optimizers.Adam(critic_lr)
actor_optimizer = tf.keras.optimizers.Adam(actor_lr)
total_episodes = 100
# Discount factor for future rewards
gamma = 0.99
# Used to update target networks
tau = 0.005
buffer = Buffer(50000, 64)
# To store reward history of each episode
ep_reward_list = []
# To store average reward history of last few episodes
avg_reward_list = []
# ์ฒ์ ์คํ ์ ์ถฉ๋ ๋ฌผ์ฒด ์ธ์ ํ ๋ฆฌ์
๊ด๋ จ ๋ฌธ์ ๋๋ฌธ์ ์ค์ง ํ ๋ค์ ์์
client, car_controls = sim_start()
collision = (client.simGetCollisionInfo().object_name).lower()
while collision.find('pipesmall') < 0 and collision != '':
sim_stop()
client, car_controls = sim_start()
time.sleep(2)
ep_cnt = 0
tracking_img = []
period = 5 # ์ด๋ ๊ฒฝ๋ก ์ด๋ฏธ์ง ์ ์ฅ ์ํผ์๋ ๊ฐ๊ฒฉ
# Takes about 4 min to train
for ep in range(total_episodes):
ep_cnt = ep
# if ep == 0 or ep + 1 % period == 0:
tracking_img = cv.imread('map.png', cv.IMREAD_GRAYSCALE)
# prev_state = env.reset()
prev_state = [client.getCarState().kinematics_estimated.position.x_val, # ์ฐจ๋ ์์น x ์ขํ
client.getCarState().kinematics_estimated.position.y_val, # ์ฐจ๋ ์์น y ์ขํ
client.getCarState().speed, # ์ฐจ๋ ์๋
client.getCarControls().brake, # ๋ธ๋ ์ดํฌ
client.getCarControls().steering, # ํธ๋ค ๋ฐฉํฅ
client.getCarControls().throttle, # ์ฐจ๋ ์ด๋
client.getCarControls().manual_gear, # ํ์ง ๊ธฐ์ด
client.getDistanceSensorData("Distance1").distance, # ์ ๋ฐฉ ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance2").distance, # ์ฐ์ธก ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance3").distance, # ํ๋ฐฉ ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance4").distance] # ์ข์ธก ๊ฑฐ๋ฆฌ ์ผ์
episodic_reward = 0
is_captured = 0
count = 0
start_time = 0
end_time = 0
total_steps = 0
reward = 0
done = False
while True:
total_steps += 1
if is_captured == 0:
goal = capture_goal()
is_captured = 1
tf_prev_state = tf.expand_dims(tf.convert_to_tensor(prev_state), 0)
action = policy(tf_prev_state, ou_noise)
action = tf.squeeze(action)
print('episode :', ep + 1, '|',
'brake :', round(float(action[0]), 3), '|', 'steering :', round(float(action[1]), 3), '|',
'throttle :', round(float(abs(action[2])), 3), '|', 'direction :', round(float(action[3]), 3), '|',
'total_reward :', round(episodic_reward, 6))
# car_controls.brake = 1 if float(action[0]) > 0.5 else 0
# car_controls.steering = float(action[1])
# car_controls.throttle = float(abs(action[2]))
# if action[3]:
# car_controls.manual_gear = 0
# car_controls.is_manual_gear = False
# else:
# car_controls.manual_gear = -1
# car_controls.is_manual_gear = True
#
# client.setCarControls(car_controls)
# Recieve state and reward from environment.
# state, reward, done, info = env.step(action)
state = [client.getCarState().kinematics_estimated.position.x_val, # ์ฐจ๋ ์์น x ์ขํ
client.getCarState().kinematics_estimated.position.y_val, # ์ฐจ๋ ์์น y ์ขํ
client.getCarState().speed, # ์ฐจ๋ ์๋
client.getCarControls().brake, # ๋ธ๋ ์ดํฌ
client.getCarControls().steering, # ํธ๋ค ๋ฐฉํฅ
client.getCarControls().throttle, # ์ฐจ๋ ์ด๋
client.getCarControls().manual_gear, # ํ์ง ๊ธฐ์ด
client.getDistanceSensorData("Distance1").distance, # ์ ๋ฐฉ ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance2").distance, # ์ฐ์ธก ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance3").distance, # ํ๋ฐฉ ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance4").distance] # ์ข์ธก ๊ฑฐ๋ฆฌ ์ผ์
# ์ฐจ๋ ์ด๋ ๊ฒฝ๋ก ๊ธฐ๋ก
# if ep == 0 or ep+1 % period == 0:
tracking_img = tracking.tracking(tracking_img, state[0], state[1])
# reward = 1/1000 if ((client.simGetCollisionInfo().object_name).lower()).find('pipesmall') >= 0 else -1
collision = (client.simGetCollisionInfo().object_name).lower()
if collision.find('pipesmall') >= 0 or collision == '':
done = False
else:
print('Episode', ep + 1, ': Crash!!')
# reward += -1
reward = -100
done = True
if (goal[0] > 0):
if (6 < client.getCarState().kinematics_estimated.position.x_val < 8 and
goal[1] - 1 < client.getCarState().kinematics_estimated.position.y_val < goal[1] + 1):
print('Episode', ep + 1, ': Success!!')
# reward += 1
reward = 100
done = True
elif (goal[0] < 0):
if (-9 < client.getCarState().kinematics_estimated.position.x_val < -7 and
goal[1] - 1 < client.getCarState().kinematics_estimated.position.y_val < goal[1] + 1):
print('Episode', ep + 1, ': Success!!')
# reward += 1
reward = 100
done = True
if round(prev_state[0], 2) == round(state[0], 2) and round(prev_state[1], 2) == round(state[1], 2):
reward = -1 / 1000
if count == 0:
count += 1
start_time = time.time()
end_time = time.time()
else:
count += 1
end_time = time.time()
if end_time - start_time >= 10:
print('Episode', ep + 1, ': Don''t just stand there!!')
count = 0
# reward += -1
reward = -200
done = True
else:
reward = 1 / 100000
count = 0
buffer.record((prev_state, action, reward, state))
episodic_reward += reward
buffer.learn()
update_target(target_actor.variables, actor_model.variables, tau)
update_target(target_critic.variables, critic_model.variables, tau)
# End this episode when `done` is True
if done:
print('Final Reward :', episodic_reward)
print('Total Steps :', total_steps)
if ep == 0 or (ep + 1) % period == 0:
cv.imwrite(".\\tracking\\" + str(start_ymd) + '_' + str(start_hm) + "\\ep" + str(ep + 1) + ".png",
tracking_img)
print('tracking image saved')
is_captured = 0
sim_stop()
sim_stop()
if ep + 1 == total_episodes:
break
client, car_controls = sim_start()
sim_stop()
sim_stop()
client, car_controls = sim_start()
break
prev_state = state
ep_reward_list.append(episodic_reward)
# Mean of last 40 episodes
avg_reward = np.mean(ep_reward_list[-40:])
print("Episode * {} * Avg Reward is ==> {}".format(ep + 1, avg_reward))
avg_reward_list.append(avg_reward)
save_model()
print('model weight saved')
sim_stop()
sim_stop()
# Plotting graph
# Episodes versus Avg. Rewards
plt.plot(avg_reward_list)
plt.xlabel("Episode")
plt.ylabel("Avg. Epsiodic Reward")
ct = time.localtime()
plt.savefig('.\\graph\\' + str(start_ymd) + '_' + str(start_hm) + '.png')
print('graph saved')
plt.show()
| reset | identifier_name |
reading.rs | use crate::sector::{
sector_record_chunks_size, sector_size, RecordMetadata, SectorContentsMap,
SectorContentsMapFromBytesError, SectorMetadataChecksummed,
};
use parity_scale_codec::Decode;
use rayon::prelude::*;
use std::mem::ManuallyDrop;
use std::simd::Simd;
use subspace_core_primitives::crypto::{blake3_hash, Scalar};
use subspace_core_primitives::{
Piece, PieceOffset, Record, RecordCommitment, RecordWitness, SBucket, SectorId,
};
use subspace_erasure_coding::ErasureCoding;
use subspace_proof_of_space::{Quality, Table, TableGenerator};
use thiserror::Error;
use tracing::debug;
/// Errors that happen during reading
#[derive(Debug, Error)]
pub enum ReadingError {
/// Wrong sector size
#[error("Wrong sector size: expected {expected}, actual {actual}")]
WrongSectorSize {
/// Expected size in bytes
expected: usize,
/// Actual size in bytes
actual: usize,
},
/// Failed to read chunk.
///
/// This is an implementation bug, most likely due to mismatch between sector contents map and
/// other farming parameters.
#[error("Failed to read chunk at location {chunk_location}")]
FailedToReadChunk {
/// Chunk location
chunk_location: usize,
},
/// Invalid chunk, possible disk corruption
#[error(
"Invalid chunk at location {chunk_location} s-bucket {s_bucket} encoded \
{encoded_chunk_used}, possible disk corruption: {error}"
)]
InvalidChunk {
/// S-bucket
s_bucket: SBucket,
/// Indicates whether chunk was encoded
encoded_chunk_used: bool,
/// Chunk location
chunk_location: usize,
/// Lower-level error
error: String,
},
/// Failed to erasure-decode record
#[error("Failed to erasure-decode record at offset {piece_offset}: {error}")]
FailedToErasureDecodeRecord {
/// Piece offset
piece_offset: PieceOffset,
/// Lower-level error
error: String,
},
/// Wrong record size after decoding
#[error("Wrong record size after decoding: expected {expected}, actual {actual}")]
WrongRecordSizeAfterDecoding {
/// Expected size in bytes
expected: usize,
/// Actual size in bytes
actual: usize,
},
/// Failed to decode sector contents map
#[error("Failed to decode sector contents map: {0}")]
FailedToDecodeSectorContentsMap(#[from] SectorContentsMapFromBytesError),
/// Checksum mismatch
#[error("Checksum mismatch")]
ChecksumMismatch,
}
/// Record contained in the plot
#[derive(Debug, Clone)]
pub struct PlotRecord {
/// Record scalars
pub scalars: Box<[Scalar; Record::NUM_CHUNKS]>,
/// Record commitment
pub commitment: RecordCommitment,
/// Record witness
pub witness: RecordWitness,
}
/// Read sector record chunks, only plotted s-buckets are returned (in decoded form)
pub fn read_sector_record_chunks<PosTable>(
piece_offset: PieceOffset,
pieces_in_sector: u16,
s_bucket_offsets: &[u32; Record::NUM_S_BUCKETS],
sector_contents_map: &SectorContentsMap,
pos_table: &PosTable,
sector: &[u8],
) -> Result<Box<[Option<Scalar>; Record::NUM_S_BUCKETS]>, ReadingError>
where
PosTable: Table,
{
if sector.len() != sector_size(pieces_in_sector) {
return Err(ReadingError::WrongSectorSize {
expected: sector_size(pieces_in_sector),
actual: sector.len(),
});
}
let mut record_chunks = vec![None; Record::NUM_S_BUCKETS];
record_chunks
.par_iter_mut()
.zip(sector_contents_map.par_iter_record_chunk_to_plot(piece_offset))
.zip(
(u16::from(SBucket::ZERO)..=u16::from(SBucket::MAX))
.into_par_iter()
.map(SBucket::from)
.zip(s_bucket_offsets.par_iter()),
)
.try_for_each(
|((maybe_record_chunk, maybe_chunk_details), (s_bucket, &s_bucket_offset))| {
let (chunk_offset, encoded_chunk_used) = match maybe_chunk_details {
Some(chunk_details) => chunk_details,
None => {
return Ok(());
}
};
let chunk_location = chunk_offset + s_bucket_offset as usize;
let mut record_chunk = sector[SectorContentsMap::encoded_size(pieces_in_sector)..]
.array_chunks::<{ Scalar::FULL_BYTES }>()
.nth(chunk_location)
.copied()
.ok_or(ReadingError::FailedToReadChunk { chunk_location })?;
// Decode chunk if necessary
if encoded_chunk_used {
let quality = pos_table
.find_quality(s_bucket.into())
.expect("encoded_chunk_used implies quality exists for this chunk; qed");
record_chunk = Simd::to_array(
Simd::from(record_chunk) ^ Simd::from(quality.create_proof().hash()),
);
}
maybe_record_chunk.replace(Scalar::try_from(record_chunk).map_err(|error| {
ReadingError::InvalidChunk {
s_bucket,
encoded_chunk_used,
chunk_location,
error,
}
})?);
Ok::<_, ReadingError>(())
},
)?;
let mut record_chunks = ManuallyDrop::new(record_chunks);
// SAFETY: Original memory is not dropped, layout is exactly what we need here
let record_chunks = unsafe {
Box::from_raw(record_chunks.as_mut_ptr() as *mut [Option<Scalar>; Record::NUM_S_BUCKETS])
};
Ok(record_chunks)
}
/// Given sector record chunks recover extended record chunks (both source and parity)
pub fn recover_extended_record_chunks(
sector_record_chunks: &[Option<Scalar>; Record::NUM_S_BUCKETS],
piece_offset: PieceOffset,
erasure_coding: &ErasureCoding,
) -> Result<Box<[Scalar; Record::NUM_S_BUCKETS]>, ReadingError> {
// Restore source record scalars
let record_chunks = erasure_coding
.recover(sector_record_chunks)
.map_err(|error| ReadingError::FailedToErasureDecodeRecord {
piece_offset,
error,
})?;
// Required for safety invariant below
if record_chunks.len() != Record::NUM_S_BUCKETS {
return Err(ReadingError::WrongRecordSizeAfterDecoding {
expected: Record::NUM_S_BUCKETS,
actual: record_chunks.len(),
});
}
let mut record_chunks = ManuallyDrop::new(record_chunks);
// SAFETY: Original memory is not dropped, size of the data checked above
let record_chunks = unsafe {
Box::from_raw(record_chunks.as_mut_ptr() as *mut [Scalar; Record::NUM_S_BUCKETS])
};
Ok(record_chunks)
}
/// Given sector record chunks recover source record chunks in form of an iterator.
pub fn recover_source_record_chunks(
sector_record_chunks: &[Option<Scalar>; Record::NUM_S_BUCKETS],
piece_offset: PieceOffset,
erasure_coding: &ErasureCoding,
) -> Result<impl ExactSizeIterator<Item = Scalar>, ReadingError> {
// Restore source record scalars
let record_chunks = erasure_coding
.recover_source(sector_record_chunks)
.map_err(|error| ReadingError::FailedToErasureDecodeRecord {
piece_offset,
error,
})?;
// Required for safety invariant below
if record_chunks.len() != Record::NUM_CHUNKS |
Ok(record_chunks)
}
/// Read metadata (commitment and witness) for record
pub(crate) fn read_record_metadata(
piece_offset: PieceOffset,
pieces_in_sector: u16,
sector: &[u8],
) -> Result<RecordMetadata, ReadingError> {
if sector.len() != sector_size(pieces_in_sector) {
return Err(ReadingError::WrongSectorSize {
expected: sector_size(pieces_in_sector),
actual: sector.len(),
});
}
let sector_metadata_start = SectorContentsMap::encoded_size(pieces_in_sector)
+ sector_record_chunks_size(pieces_in_sector);
// Move to the beginning of the commitment and witness we care about
let record_metadata_bytes = §or[sector_metadata_start..]
[RecordMetadata::encoded_size() * usize::from(piece_offset)..];
let record_metadata = RecordMetadata::decode(&mut &*record_metadata_bytes).expect(
"Length is correct and checked above, contents doesn't have specific structure to \
it; qed",
);
Ok(record_metadata)
}
/// Read piece from sector
pub fn read_piece<PosTable>(
piece_offset: PieceOffset,
sector_id: &SectorId,
sector_metadata: &SectorMetadataChecksummed,
sector: &[u8],
erasure_coding: &ErasureCoding,
table_generator: &mut PosTable::Generator,
) -> Result<Piece, ReadingError>
where
PosTable: Table,
{
let pieces_in_sector = sector_metadata.pieces_in_sector;
if sector.len() != sector_size(pieces_in_sector) {
return Err(ReadingError::WrongSectorSize {
expected: sector_size(pieces_in_sector),
actual: sector.len(),
});
}
let sector_contents_map = {
SectorContentsMap::from_bytes(
§or[..SectorContentsMap::encoded_size(pieces_in_sector)],
pieces_in_sector,
)?
};
// Restore source record scalars
let record_chunks = recover_source_record_chunks(
&*read_sector_record_chunks(
piece_offset,
pieces_in_sector,
§or_metadata.s_bucket_offsets(),
§or_contents_map,
&table_generator.generate(
§or_id.derive_evaluation_seed(piece_offset, sector_metadata.history_size),
),
sector,
)?,
piece_offset,
erasure_coding,
)?;
let record_metadata = read_record_metadata(piece_offset, pieces_in_sector, sector)?;
let mut piece = Piece::default();
piece
.record_mut()
.iter_mut()
.zip(record_chunks)
.for_each(|(output, input)| {
*output = input.to_bytes();
});
*piece.commitment_mut() = record_metadata.commitment;
*piece.witness_mut() = record_metadata.witness;
// Verify checksum
let actual_checksum = blake3_hash(piece.as_ref());
if actual_checksum != record_metadata.piece_checksum {
debug!(
?sector_id,
%piece_offset,
actual_checksum = %hex::encode(actual_checksum),
expected_checksum = %hex::encode(record_metadata.piece_checksum),
"Hash doesn't match, plotted piece is corrupted"
);
return Err(ReadingError::ChecksumMismatch);
}
Ok(piece)
}
| {
return Err(ReadingError::WrongRecordSizeAfterDecoding {
expected: Record::NUM_CHUNKS,
actual: record_chunks.len(),
});
} | conditional_block |
reading.rs | use crate::sector::{
sector_record_chunks_size, sector_size, RecordMetadata, SectorContentsMap,
SectorContentsMapFromBytesError, SectorMetadataChecksummed,
};
use parity_scale_codec::Decode;
use rayon::prelude::*;
use std::mem::ManuallyDrop;
use std::simd::Simd;
use subspace_core_primitives::crypto::{blake3_hash, Scalar};
use subspace_core_primitives::{
Piece, PieceOffset, Record, RecordCommitment, RecordWitness, SBucket, SectorId,
};
use subspace_erasure_coding::ErasureCoding;
use subspace_proof_of_space::{Quality, Table, TableGenerator};
use thiserror::Error;
use tracing::debug;
/// Errors that happen during reading
#[derive(Debug, Error)]
pub enum ReadingError {
/// Wrong sector size
#[error("Wrong sector size: expected {expected}, actual {actual}")]
WrongSectorSize {
/// Expected size in bytes
expected: usize,
/// Actual size in bytes
actual: usize,
},
/// Failed to read chunk.
///
/// This is an implementation bug, most likely due to mismatch between sector contents map and
/// other farming parameters.
#[error("Failed to read chunk at location {chunk_location}")]
FailedToReadChunk {
/// Chunk location
chunk_location: usize,
},
/// Invalid chunk, possible disk corruption
#[error(
"Invalid chunk at location {chunk_location} s-bucket {s_bucket} encoded \
{encoded_chunk_used}, possible disk corruption: {error}"
)]
InvalidChunk {
/// S-bucket
s_bucket: SBucket,
/// Indicates whether chunk was encoded
encoded_chunk_used: bool,
/// Chunk location
chunk_location: usize,
/// Lower-level error
error: String,
},
/// Failed to erasure-decode record
#[error("Failed to erasure-decode record at offset {piece_offset}: {error}")]
FailedToErasureDecodeRecord {
/// Piece offset
piece_offset: PieceOffset,
/// Lower-level error
error: String,
},
/// Wrong record size after decoding
#[error("Wrong record size after decoding: expected {expected}, actual {actual}")]
WrongRecordSizeAfterDecoding {
/// Expected size in bytes
expected: usize,
/// Actual size in bytes
actual: usize,
},
/// Failed to decode sector contents map
#[error("Failed to decode sector contents map: {0}")]
FailedToDecodeSectorContentsMap(#[from] SectorContentsMapFromBytesError),
/// Checksum mismatch
#[error("Checksum mismatch")]
ChecksumMismatch,
}
/// Record contained in the plot
#[derive(Debug, Clone)]
pub struct PlotRecord {
/// Record scalars
pub scalars: Box<[Scalar; Record::NUM_CHUNKS]>,
/// Record commitment
pub commitment: RecordCommitment,
/// Record witness
pub witness: RecordWitness,
}
/// Read sector record chunks, only plotted s-buckets are returned (in decoded form)
pub fn read_sector_record_chunks<PosTable>(
piece_offset: PieceOffset,
pieces_in_sector: u16,
s_bucket_offsets: &[u32; Record::NUM_S_BUCKETS],
sector_contents_map: &SectorContentsMap,
pos_table: &PosTable,
sector: &[u8],
) -> Result<Box<[Option<Scalar>; Record::NUM_S_BUCKETS]>, ReadingError>
where
PosTable: Table,
{
if sector.len() != sector_size(pieces_in_sector) {
return Err(ReadingError::WrongSectorSize {
expected: sector_size(pieces_in_sector),
actual: sector.len(),
});
}
let mut record_chunks = vec![None; Record::NUM_S_BUCKETS];
record_chunks
.par_iter_mut()
.zip(sector_contents_map.par_iter_record_chunk_to_plot(piece_offset))
.zip(
(u16::from(SBucket::ZERO)..=u16::from(SBucket::MAX))
.into_par_iter()
.map(SBucket::from)
.zip(s_bucket_offsets.par_iter()),
)
.try_for_each(
|((maybe_record_chunk, maybe_chunk_details), (s_bucket, &s_bucket_offset))| {
let (chunk_offset, encoded_chunk_used) = match maybe_chunk_details {
Some(chunk_details) => chunk_details,
None => {
return Ok(());
}
};
let chunk_location = chunk_offset + s_bucket_offset as usize;
let mut record_chunk = sector[SectorContentsMap::encoded_size(pieces_in_sector)..]
.array_chunks::<{ Scalar::FULL_BYTES }>()
.nth(chunk_location)
.copied()
.ok_or(ReadingError::FailedToReadChunk { chunk_location })?;
// Decode chunk if necessary
if encoded_chunk_used {
let quality = pos_table
.find_quality(s_bucket.into())
.expect("encoded_chunk_used implies quality exists for this chunk; qed");
record_chunk = Simd::to_array(
Simd::from(record_chunk) ^ Simd::from(quality.create_proof().hash()), | ReadingError::InvalidChunk {
s_bucket,
encoded_chunk_used,
chunk_location,
error,
}
})?);
Ok::<_, ReadingError>(())
},
)?;
let mut record_chunks = ManuallyDrop::new(record_chunks);
// SAFETY: Original memory is not dropped, layout is exactly what we need here
let record_chunks = unsafe {
Box::from_raw(record_chunks.as_mut_ptr() as *mut [Option<Scalar>; Record::NUM_S_BUCKETS])
};
Ok(record_chunks)
}
/// Given sector record chunks recover extended record chunks (both source and parity)
pub fn recover_extended_record_chunks(
sector_record_chunks: &[Option<Scalar>; Record::NUM_S_BUCKETS],
piece_offset: PieceOffset,
erasure_coding: &ErasureCoding,
) -> Result<Box<[Scalar; Record::NUM_S_BUCKETS]>, ReadingError> {
// Restore source record scalars
let record_chunks = erasure_coding
.recover(sector_record_chunks)
.map_err(|error| ReadingError::FailedToErasureDecodeRecord {
piece_offset,
error,
})?;
// Required for safety invariant below
if record_chunks.len() != Record::NUM_S_BUCKETS {
return Err(ReadingError::WrongRecordSizeAfterDecoding {
expected: Record::NUM_S_BUCKETS,
actual: record_chunks.len(),
});
}
let mut record_chunks = ManuallyDrop::new(record_chunks);
// SAFETY: Original memory is not dropped, size of the data checked above
let record_chunks = unsafe {
Box::from_raw(record_chunks.as_mut_ptr() as *mut [Scalar; Record::NUM_S_BUCKETS])
};
Ok(record_chunks)
}
/// Given sector record chunks recover source record chunks in form of an iterator.
pub fn recover_source_record_chunks(
sector_record_chunks: &[Option<Scalar>; Record::NUM_S_BUCKETS],
piece_offset: PieceOffset,
erasure_coding: &ErasureCoding,
) -> Result<impl ExactSizeIterator<Item = Scalar>, ReadingError> {
// Restore source record scalars
let record_chunks = erasure_coding
.recover_source(sector_record_chunks)
.map_err(|error| ReadingError::FailedToErasureDecodeRecord {
piece_offset,
error,
})?;
// Required for safety invariant below
if record_chunks.len() != Record::NUM_CHUNKS {
return Err(ReadingError::WrongRecordSizeAfterDecoding {
expected: Record::NUM_CHUNKS,
actual: record_chunks.len(),
});
}
Ok(record_chunks)
}
/// Read metadata (commitment and witness) for record
pub(crate) fn read_record_metadata(
piece_offset: PieceOffset,
pieces_in_sector: u16,
sector: &[u8],
) -> Result<RecordMetadata, ReadingError> {
if sector.len() != sector_size(pieces_in_sector) {
return Err(ReadingError::WrongSectorSize {
expected: sector_size(pieces_in_sector),
actual: sector.len(),
});
}
let sector_metadata_start = SectorContentsMap::encoded_size(pieces_in_sector)
+ sector_record_chunks_size(pieces_in_sector);
// Move to the beginning of the commitment and witness we care about
let record_metadata_bytes = §or[sector_metadata_start..]
[RecordMetadata::encoded_size() * usize::from(piece_offset)..];
let record_metadata = RecordMetadata::decode(&mut &*record_metadata_bytes).expect(
"Length is correct and checked above, contents doesn't have specific structure to \
it; qed",
);
Ok(record_metadata)
}
/// Read piece from sector
pub fn read_piece<PosTable>(
piece_offset: PieceOffset,
sector_id: &SectorId,
sector_metadata: &SectorMetadataChecksummed,
sector: &[u8],
erasure_coding: &ErasureCoding,
table_generator: &mut PosTable::Generator,
) -> Result<Piece, ReadingError>
where
PosTable: Table,
{
let pieces_in_sector = sector_metadata.pieces_in_sector;
if sector.len() != sector_size(pieces_in_sector) {
return Err(ReadingError::WrongSectorSize {
expected: sector_size(pieces_in_sector),
actual: sector.len(),
});
}
let sector_contents_map = {
SectorContentsMap::from_bytes(
§or[..SectorContentsMap::encoded_size(pieces_in_sector)],
pieces_in_sector,
)?
};
// Restore source record scalars
let record_chunks = recover_source_record_chunks(
&*read_sector_record_chunks(
piece_offset,
pieces_in_sector,
§or_metadata.s_bucket_offsets(),
§or_contents_map,
&table_generator.generate(
§or_id.derive_evaluation_seed(piece_offset, sector_metadata.history_size),
),
sector,
)?,
piece_offset,
erasure_coding,
)?;
let record_metadata = read_record_metadata(piece_offset, pieces_in_sector, sector)?;
let mut piece = Piece::default();
piece
.record_mut()
.iter_mut()
.zip(record_chunks)
.for_each(|(output, input)| {
*output = input.to_bytes();
});
*piece.commitment_mut() = record_metadata.commitment;
*piece.witness_mut() = record_metadata.witness;
// Verify checksum
let actual_checksum = blake3_hash(piece.as_ref());
if actual_checksum != record_metadata.piece_checksum {
debug!(
?sector_id,
%piece_offset,
actual_checksum = %hex::encode(actual_checksum),
expected_checksum = %hex::encode(record_metadata.piece_checksum),
"Hash doesn't match, plotted piece is corrupted"
);
return Err(ReadingError::ChecksumMismatch);
}
Ok(piece)
} | );
}
maybe_record_chunk.replace(Scalar::try_from(record_chunk).map_err(|error| { | random_line_split |
reading.rs | use crate::sector::{
sector_record_chunks_size, sector_size, RecordMetadata, SectorContentsMap,
SectorContentsMapFromBytesError, SectorMetadataChecksummed,
};
use parity_scale_codec::Decode;
use rayon::prelude::*;
use std::mem::ManuallyDrop;
use std::simd::Simd;
use subspace_core_primitives::crypto::{blake3_hash, Scalar};
use subspace_core_primitives::{
Piece, PieceOffset, Record, RecordCommitment, RecordWitness, SBucket, SectorId,
};
use subspace_erasure_coding::ErasureCoding;
use subspace_proof_of_space::{Quality, Table, TableGenerator};
use thiserror::Error;
use tracing::debug;
/// Errors that happen during reading
#[derive(Debug, Error)]
pub enum ReadingError {
/// Wrong sector size
#[error("Wrong sector size: expected {expected}, actual {actual}")]
WrongSectorSize {
/// Expected size in bytes
expected: usize,
/// Actual size in bytes
actual: usize,
},
/// Failed to read chunk.
///
/// This is an implementation bug, most likely due to mismatch between sector contents map and
/// other farming parameters.
#[error("Failed to read chunk at location {chunk_location}")]
FailedToReadChunk {
/// Chunk location
chunk_location: usize,
},
/// Invalid chunk, possible disk corruption
#[error(
"Invalid chunk at location {chunk_location} s-bucket {s_bucket} encoded \
{encoded_chunk_used}, possible disk corruption: {error}"
)]
InvalidChunk {
/// S-bucket
s_bucket: SBucket,
/// Indicates whether chunk was encoded
encoded_chunk_used: bool,
/// Chunk location
chunk_location: usize,
/// Lower-level error
error: String,
},
/// Failed to erasure-decode record
#[error("Failed to erasure-decode record at offset {piece_offset}: {error}")]
FailedToErasureDecodeRecord {
/// Piece offset
piece_offset: PieceOffset,
/// Lower-level error
error: String,
},
/// Wrong record size after decoding
#[error("Wrong record size after decoding: expected {expected}, actual {actual}")]
WrongRecordSizeAfterDecoding {
/// Expected size in bytes
expected: usize,
/// Actual size in bytes
actual: usize,
},
/// Failed to decode sector contents map
#[error("Failed to decode sector contents map: {0}")]
FailedToDecodeSectorContentsMap(#[from] SectorContentsMapFromBytesError),
/// Checksum mismatch
#[error("Checksum mismatch")]
ChecksumMismatch,
}
/// Record contained in the plot
#[derive(Debug, Clone)]
pub struct PlotRecord {
/// Record scalars
pub scalars: Box<[Scalar; Record::NUM_CHUNKS]>,
/// Record commitment
pub commitment: RecordCommitment,
/// Record witness
pub witness: RecordWitness,
}
/// Read sector record chunks, only plotted s-buckets are returned (in decoded form)
pub fn read_sector_record_chunks<PosTable>(
piece_offset: PieceOffset,
pieces_in_sector: u16,
s_bucket_offsets: &[u32; Record::NUM_S_BUCKETS],
sector_contents_map: &SectorContentsMap,
pos_table: &PosTable,
sector: &[u8],
) -> Result<Box<[Option<Scalar>; Record::NUM_S_BUCKETS]>, ReadingError>
where
PosTable: Table,
{
if sector.len() != sector_size(pieces_in_sector) {
return Err(ReadingError::WrongSectorSize {
expected: sector_size(pieces_in_sector),
actual: sector.len(),
});
}
let mut record_chunks = vec![None; Record::NUM_S_BUCKETS];
record_chunks
.par_iter_mut()
.zip(sector_contents_map.par_iter_record_chunk_to_plot(piece_offset))
.zip(
(u16::from(SBucket::ZERO)..=u16::from(SBucket::MAX))
.into_par_iter()
.map(SBucket::from)
.zip(s_bucket_offsets.par_iter()),
)
.try_for_each(
|((maybe_record_chunk, maybe_chunk_details), (s_bucket, &s_bucket_offset))| {
let (chunk_offset, encoded_chunk_used) = match maybe_chunk_details {
Some(chunk_details) => chunk_details,
None => {
return Ok(());
}
};
let chunk_location = chunk_offset + s_bucket_offset as usize;
let mut record_chunk = sector[SectorContentsMap::encoded_size(pieces_in_sector)..]
.array_chunks::<{ Scalar::FULL_BYTES }>()
.nth(chunk_location)
.copied()
.ok_or(ReadingError::FailedToReadChunk { chunk_location })?;
// Decode chunk if necessary
if encoded_chunk_used {
let quality = pos_table
.find_quality(s_bucket.into())
.expect("encoded_chunk_used implies quality exists for this chunk; qed");
record_chunk = Simd::to_array(
Simd::from(record_chunk) ^ Simd::from(quality.create_proof().hash()),
);
}
maybe_record_chunk.replace(Scalar::try_from(record_chunk).map_err(|error| {
ReadingError::InvalidChunk {
s_bucket,
encoded_chunk_used,
chunk_location,
error,
}
})?);
Ok::<_, ReadingError>(())
},
)?;
let mut record_chunks = ManuallyDrop::new(record_chunks);
// SAFETY: Original memory is not dropped, layout is exactly what we need here
let record_chunks = unsafe {
Box::from_raw(record_chunks.as_mut_ptr() as *mut [Option<Scalar>; Record::NUM_S_BUCKETS])
};
Ok(record_chunks)
}
/// Given sector record chunks recover extended record chunks (both source and parity)
pub fn recover_extended_record_chunks(
sector_record_chunks: &[Option<Scalar>; Record::NUM_S_BUCKETS],
piece_offset: PieceOffset,
erasure_coding: &ErasureCoding,
) -> Result<Box<[Scalar; Record::NUM_S_BUCKETS]>, ReadingError> {
// Restore source record scalars
let record_chunks = erasure_coding
.recover(sector_record_chunks)
.map_err(|error| ReadingError::FailedToErasureDecodeRecord {
piece_offset,
error,
})?;
// Required for safety invariant below
if record_chunks.len() != Record::NUM_S_BUCKETS {
return Err(ReadingError::WrongRecordSizeAfterDecoding {
expected: Record::NUM_S_BUCKETS,
actual: record_chunks.len(),
});
}
let mut record_chunks = ManuallyDrop::new(record_chunks);
// SAFETY: Original memory is not dropped, size of the data checked above
let record_chunks = unsafe {
Box::from_raw(record_chunks.as_mut_ptr() as *mut [Scalar; Record::NUM_S_BUCKETS])
};
Ok(record_chunks)
}
/// Given sector record chunks recover source record chunks in form of an iterator.
pub fn recover_source_record_chunks(
sector_record_chunks: &[Option<Scalar>; Record::NUM_S_BUCKETS],
piece_offset: PieceOffset,
erasure_coding: &ErasureCoding,
) -> Result<impl ExactSizeIterator<Item = Scalar>, ReadingError> {
// Restore source record scalars
let record_chunks = erasure_coding
.recover_source(sector_record_chunks)
.map_err(|error| ReadingError::FailedToErasureDecodeRecord {
piece_offset,
error,
})?;
// Required for safety invariant below
if record_chunks.len() != Record::NUM_CHUNKS {
return Err(ReadingError::WrongRecordSizeAfterDecoding {
expected: Record::NUM_CHUNKS,
actual: record_chunks.len(),
});
}
Ok(record_chunks)
}
/// Read metadata (commitment and witness) for record
pub(crate) fn read_record_metadata(
piece_offset: PieceOffset,
pieces_in_sector: u16,
sector: &[u8],
) -> Result<RecordMetadata, ReadingError> {
if sector.len() != sector_size(pieces_in_sector) {
return Err(ReadingError::WrongSectorSize {
expected: sector_size(pieces_in_sector),
actual: sector.len(),
});
}
let sector_metadata_start = SectorContentsMap::encoded_size(pieces_in_sector)
+ sector_record_chunks_size(pieces_in_sector);
// Move to the beginning of the commitment and witness we care about
let record_metadata_bytes = §or[sector_metadata_start..]
[RecordMetadata::encoded_size() * usize::from(piece_offset)..];
let record_metadata = RecordMetadata::decode(&mut &*record_metadata_bytes).expect(
"Length is correct and checked above, contents doesn't have specific structure to \
it; qed",
);
Ok(record_metadata)
}
/// Read piece from sector
pub fn | <PosTable>(
piece_offset: PieceOffset,
sector_id: &SectorId,
sector_metadata: &SectorMetadataChecksummed,
sector: &[u8],
erasure_coding: &ErasureCoding,
table_generator: &mut PosTable::Generator,
) -> Result<Piece, ReadingError>
where
PosTable: Table,
{
let pieces_in_sector = sector_metadata.pieces_in_sector;
if sector.len() != sector_size(pieces_in_sector) {
return Err(ReadingError::WrongSectorSize {
expected: sector_size(pieces_in_sector),
actual: sector.len(),
});
}
let sector_contents_map = {
SectorContentsMap::from_bytes(
§or[..SectorContentsMap::encoded_size(pieces_in_sector)],
pieces_in_sector,
)?
};
// Restore source record scalars
let record_chunks = recover_source_record_chunks(
&*read_sector_record_chunks(
piece_offset,
pieces_in_sector,
§or_metadata.s_bucket_offsets(),
§or_contents_map,
&table_generator.generate(
§or_id.derive_evaluation_seed(piece_offset, sector_metadata.history_size),
),
sector,
)?,
piece_offset,
erasure_coding,
)?;
let record_metadata = read_record_metadata(piece_offset, pieces_in_sector, sector)?;
let mut piece = Piece::default();
piece
.record_mut()
.iter_mut()
.zip(record_chunks)
.for_each(|(output, input)| {
*output = input.to_bytes();
});
*piece.commitment_mut() = record_metadata.commitment;
*piece.witness_mut() = record_metadata.witness;
// Verify checksum
let actual_checksum = blake3_hash(piece.as_ref());
if actual_checksum != record_metadata.piece_checksum {
debug!(
?sector_id,
%piece_offset,
actual_checksum = %hex::encode(actual_checksum),
expected_checksum = %hex::encode(record_metadata.piece_checksum),
"Hash doesn't match, plotted piece is corrupted"
);
return Err(ReadingError::ChecksumMismatch);
}
Ok(piece)
}
| read_piece | identifier_name |
reading.rs | use crate::sector::{
sector_record_chunks_size, sector_size, RecordMetadata, SectorContentsMap,
SectorContentsMapFromBytesError, SectorMetadataChecksummed,
};
use parity_scale_codec::Decode;
use rayon::prelude::*;
use std::mem::ManuallyDrop;
use std::simd::Simd;
use subspace_core_primitives::crypto::{blake3_hash, Scalar};
use subspace_core_primitives::{
Piece, PieceOffset, Record, RecordCommitment, RecordWitness, SBucket, SectorId,
};
use subspace_erasure_coding::ErasureCoding;
use subspace_proof_of_space::{Quality, Table, TableGenerator};
use thiserror::Error;
use tracing::debug;
/// Errors that happen during reading
#[derive(Debug, Error)]
pub enum ReadingError {
/// Wrong sector size
#[error("Wrong sector size: expected {expected}, actual {actual}")]
WrongSectorSize {
/// Expected size in bytes
expected: usize,
/// Actual size in bytes
actual: usize,
},
/// Failed to read chunk.
///
/// This is an implementation bug, most likely due to mismatch between sector contents map and
/// other farming parameters.
#[error("Failed to read chunk at location {chunk_location}")]
FailedToReadChunk {
/// Chunk location
chunk_location: usize,
},
/// Invalid chunk, possible disk corruption
#[error(
"Invalid chunk at location {chunk_location} s-bucket {s_bucket} encoded \
{encoded_chunk_used}, possible disk corruption: {error}"
)]
InvalidChunk {
/// S-bucket
s_bucket: SBucket,
/// Indicates whether chunk was encoded
encoded_chunk_used: bool,
/// Chunk location
chunk_location: usize,
/// Lower-level error
error: String,
},
/// Failed to erasure-decode record
#[error("Failed to erasure-decode record at offset {piece_offset}: {error}")]
FailedToErasureDecodeRecord {
/// Piece offset
piece_offset: PieceOffset,
/// Lower-level error
error: String,
},
/// Wrong record size after decoding
#[error("Wrong record size after decoding: expected {expected}, actual {actual}")]
WrongRecordSizeAfterDecoding {
/// Expected size in bytes
expected: usize,
/// Actual size in bytes
actual: usize,
},
/// Failed to decode sector contents map
#[error("Failed to decode sector contents map: {0}")]
FailedToDecodeSectorContentsMap(#[from] SectorContentsMapFromBytesError),
/// Checksum mismatch
#[error("Checksum mismatch")]
ChecksumMismatch,
}
/// Record contained in the plot
#[derive(Debug, Clone)]
pub struct PlotRecord {
/// Record scalars
pub scalars: Box<[Scalar; Record::NUM_CHUNKS]>,
/// Record commitment
pub commitment: RecordCommitment,
/// Record witness
pub witness: RecordWitness,
}
/// Read sector record chunks, only plotted s-buckets are returned (in decoded form)
pub fn read_sector_record_chunks<PosTable>(
piece_offset: PieceOffset,
pieces_in_sector: u16,
s_bucket_offsets: &[u32; Record::NUM_S_BUCKETS],
sector_contents_map: &SectorContentsMap,
pos_table: &PosTable,
sector: &[u8],
) -> Result<Box<[Option<Scalar>; Record::NUM_S_BUCKETS]>, ReadingError>
where
PosTable: Table,
{
if sector.len() != sector_size(pieces_in_sector) {
return Err(ReadingError::WrongSectorSize {
expected: sector_size(pieces_in_sector),
actual: sector.len(),
});
}
let mut record_chunks = vec![None; Record::NUM_S_BUCKETS];
record_chunks
.par_iter_mut()
.zip(sector_contents_map.par_iter_record_chunk_to_plot(piece_offset))
.zip(
(u16::from(SBucket::ZERO)..=u16::from(SBucket::MAX))
.into_par_iter()
.map(SBucket::from)
.zip(s_bucket_offsets.par_iter()),
)
.try_for_each(
|((maybe_record_chunk, maybe_chunk_details), (s_bucket, &s_bucket_offset))| {
let (chunk_offset, encoded_chunk_used) = match maybe_chunk_details {
Some(chunk_details) => chunk_details,
None => {
return Ok(());
}
};
let chunk_location = chunk_offset + s_bucket_offset as usize;
let mut record_chunk = sector[SectorContentsMap::encoded_size(pieces_in_sector)..]
.array_chunks::<{ Scalar::FULL_BYTES }>()
.nth(chunk_location)
.copied()
.ok_or(ReadingError::FailedToReadChunk { chunk_location })?;
// Decode chunk if necessary
if encoded_chunk_used {
let quality = pos_table
.find_quality(s_bucket.into())
.expect("encoded_chunk_used implies quality exists for this chunk; qed");
record_chunk = Simd::to_array(
Simd::from(record_chunk) ^ Simd::from(quality.create_proof().hash()),
);
}
maybe_record_chunk.replace(Scalar::try_from(record_chunk).map_err(|error| {
ReadingError::InvalidChunk {
s_bucket,
encoded_chunk_used,
chunk_location,
error,
}
})?);
Ok::<_, ReadingError>(())
},
)?;
let mut record_chunks = ManuallyDrop::new(record_chunks);
// SAFETY: Original memory is not dropped, layout is exactly what we need here
let record_chunks = unsafe {
Box::from_raw(record_chunks.as_mut_ptr() as *mut [Option<Scalar>; Record::NUM_S_BUCKETS])
};
Ok(record_chunks)
}
/// Given sector record chunks recover extended record chunks (both source and parity)
pub fn recover_extended_record_chunks(
sector_record_chunks: &[Option<Scalar>; Record::NUM_S_BUCKETS],
piece_offset: PieceOffset,
erasure_coding: &ErasureCoding,
) -> Result<Box<[Scalar; Record::NUM_S_BUCKETS]>, ReadingError> {
// Restore source record scalars
let record_chunks = erasure_coding
.recover(sector_record_chunks)
.map_err(|error| ReadingError::FailedToErasureDecodeRecord {
piece_offset,
error,
})?;
// Required for safety invariant below
if record_chunks.len() != Record::NUM_S_BUCKETS {
return Err(ReadingError::WrongRecordSizeAfterDecoding {
expected: Record::NUM_S_BUCKETS,
actual: record_chunks.len(),
});
}
let mut record_chunks = ManuallyDrop::new(record_chunks);
// SAFETY: Original memory is not dropped, size of the data checked above
let record_chunks = unsafe {
Box::from_raw(record_chunks.as_mut_ptr() as *mut [Scalar; Record::NUM_S_BUCKETS])
};
Ok(record_chunks)
}
/// Given sector record chunks recover source record chunks in form of an iterator.
pub fn recover_source_record_chunks(
sector_record_chunks: &[Option<Scalar>; Record::NUM_S_BUCKETS],
piece_offset: PieceOffset,
erasure_coding: &ErasureCoding,
) -> Result<impl ExactSizeIterator<Item = Scalar>, ReadingError> |
/// Read metadata (commitment and witness) for record
pub(crate) fn read_record_metadata(
piece_offset: PieceOffset,
pieces_in_sector: u16,
sector: &[u8],
) -> Result<RecordMetadata, ReadingError> {
if sector.len() != sector_size(pieces_in_sector) {
return Err(ReadingError::WrongSectorSize {
expected: sector_size(pieces_in_sector),
actual: sector.len(),
});
}
let sector_metadata_start = SectorContentsMap::encoded_size(pieces_in_sector)
+ sector_record_chunks_size(pieces_in_sector);
// Move to the beginning of the commitment and witness we care about
let record_metadata_bytes = §or[sector_metadata_start..]
[RecordMetadata::encoded_size() * usize::from(piece_offset)..];
let record_metadata = RecordMetadata::decode(&mut &*record_metadata_bytes).expect(
"Length is correct and checked above, contents doesn't have specific structure to \
it; qed",
);
Ok(record_metadata)
}
/// Read piece from sector
pub fn read_piece<PosTable>(
piece_offset: PieceOffset,
sector_id: &SectorId,
sector_metadata: &SectorMetadataChecksummed,
sector: &[u8],
erasure_coding: &ErasureCoding,
table_generator: &mut PosTable::Generator,
) -> Result<Piece, ReadingError>
where
PosTable: Table,
{
let pieces_in_sector = sector_metadata.pieces_in_sector;
if sector.len() != sector_size(pieces_in_sector) {
return Err(ReadingError::WrongSectorSize {
expected: sector_size(pieces_in_sector),
actual: sector.len(),
});
}
let sector_contents_map = {
SectorContentsMap::from_bytes(
§or[..SectorContentsMap::encoded_size(pieces_in_sector)],
pieces_in_sector,
)?
};
// Restore source record scalars
let record_chunks = recover_source_record_chunks(
&*read_sector_record_chunks(
piece_offset,
pieces_in_sector,
§or_metadata.s_bucket_offsets(),
§or_contents_map,
&table_generator.generate(
§or_id.derive_evaluation_seed(piece_offset, sector_metadata.history_size),
),
sector,
)?,
piece_offset,
erasure_coding,
)?;
let record_metadata = read_record_metadata(piece_offset, pieces_in_sector, sector)?;
let mut piece = Piece::default();
piece
.record_mut()
.iter_mut()
.zip(record_chunks)
.for_each(|(output, input)| {
*output = input.to_bytes();
});
*piece.commitment_mut() = record_metadata.commitment;
*piece.witness_mut() = record_metadata.witness;
// Verify checksum
let actual_checksum = blake3_hash(piece.as_ref());
if actual_checksum != record_metadata.piece_checksum {
debug!(
?sector_id,
%piece_offset,
actual_checksum = %hex::encode(actual_checksum),
expected_checksum = %hex::encode(record_metadata.piece_checksum),
"Hash doesn't match, plotted piece is corrupted"
);
return Err(ReadingError::ChecksumMismatch);
}
Ok(piece)
}
| {
// Restore source record scalars
let record_chunks = erasure_coding
.recover_source(sector_record_chunks)
.map_err(|error| ReadingError::FailedToErasureDecodeRecord {
piece_offset,
error,
})?;
// Required for safety invariant below
if record_chunks.len() != Record::NUM_CHUNKS {
return Err(ReadingError::WrongRecordSizeAfterDecoding {
expected: Record::NUM_CHUNKS,
actual: record_chunks.len(),
});
}
Ok(record_chunks)
} | identifier_body |
api_op_UpdateMatchmakingConfiguration.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package gamelift
import (
"context"
"errors"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
"github.com/aws/aws-sdk-go-v2/service/gamelift/types"
smithyendpoints "github.com/aws/smithy-go/endpoints"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Updates settings for a FlexMatch matchmaking configuration. These changes
// affect all matches and game sessions that are created after the update. To
// update settings, specify the configuration name to be updated and provide the
// new settings. Learn more Design a FlexMatch matchmaker (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-configuration.html)
func (c *Client) UpdateMatchmakingConfiguration(ctx context.Context, params *UpdateMatchmakingConfigurationInput, optFns ...func(*Options)) (*UpdateMatchmakingConfigurationOutput, error) {
if params == nil {
params = &UpdateMatchmakingConfigurationInput{}
}
result, metadata, err := c.invokeOperation(ctx, "UpdateMatchmakingConfiguration", params, optFns, c.addOperationUpdateMatchmakingConfigurationMiddlewares)
if err != nil {
return nil, err
}
out := result.(*UpdateMatchmakingConfigurationOutput)
out.ResultMetadata = metadata
return out, nil
}
type UpdateMatchmakingConfigurationInput struct {
// A unique identifier for the matchmaking configuration to update. You can use
// either the configuration name or ARN value.
//
// This member is required.
Name *string
// A flag that indicates whether a match that was created with this configuration
// must be accepted by the matched players. To require acceptance, set to TRUE.
// With this option enabled, matchmaking tickets use the status REQUIRES_ACCEPTANCE
// to indicate when a completed potential match is waiting for player acceptance.
AcceptanceRequired *bool
// The length of time (in seconds) to wait for players to accept a proposed match,
// if acceptance is required.
AcceptanceTimeoutSeconds *int32
// The number of player slots in a match to keep open for future players. For
// example, if the configuration's rule set specifies a match for a single
// 10-person team, and the additional player count is set to 2, 10 players will be
// selected for the match and 2 more player slots will be open for future players.
// This parameter is not used if FlexMatchMode is set to STANDALONE .
AdditionalPlayerCount *int32
// The method that is used to backfill game sessions created with this matchmaking
// configuration. Specify MANUAL when your game manages backfill requests manually
// or does not use the match backfill feature. Specify AUTOMATIC to have GameLift
// create a match backfill request whenever a game session has one or more open
// slots. Learn more about manual and automatic backfill in Backfill Existing
// Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-backfill.html)
// . Automatic backfill is not available when FlexMatchMode is set to STANDALONE .
BackfillMode types.BackfillMode
// Information to add to all events related to the matchmaking configuration.
CustomEventData *string
// A description for the matchmaking configuration.
Description *string
// Indicates whether this matchmaking configuration is being used with Amazon
// GameLift hosting or as a standalone matchmaking solution.
// - STANDALONE - FlexMatch forms matches and returns match information,
// including players and team assignments, in a MatchmakingSucceeded (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-events.html#match-events-matchmakingsucceeded)
// event.
// - WITH_QUEUE - FlexMatch forms matches and uses the specified Amazon GameLift
// queue to start a game session for the match.
FlexMatchMode types.FlexMatchMode
// A set of custom properties for a game session, formatted as key:value pairs.
// These properties are passed to a game server process with a request to start a
// new game session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)
// ). This information is added to the new GameSession object that is created for
// a successful match. This parameter is not used if FlexMatchMode is set to
// STANDALONE .
GameProperties []types.GameProperty
// A set of custom game session properties, formatted as a single string value.
// This data is passed to a game server process with a request to start a new game
// session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)
// ). This information is added to the game session that is created for a
// successful match. This parameter is not used if FlexMatchMode is set to
// STANDALONE .
GameSessionData *string
// The Amazon Resource Name ( ARN (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)
// ) that is assigned to a Amazon GameLift game session queue resource and uniquely
// identifies it. ARNs are unique across all Regions. Format is
// arn:aws:gamelift:::gamesessionqueue/ . Queues can be located in any Region.
// Queues are used to start new Amazon GameLift-hosted game sessions for matches
// that are created with this matchmaking configuration. If FlexMatchMode is set
// to STANDALONE , do not set this parameter.
GameSessionQueueArns []string
// An SNS topic ARN that is set up to receive matchmaking notifications. See
// Setting up notifications for matchmaking (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-notification.html)
// for more information.
NotificationTarget *string
// The maximum duration, in seconds, that a matchmaking ticket can remain in
// process before timing out. Requests that fail due to timing out can be
// resubmitted as needed.
RequestTimeoutSeconds *int32
// A unique identifier for the matchmaking rule set to use with this
// configuration. You can use either the rule set name or ARN value. A matchmaking
// configuration can only use rule sets that are defined in the same Region.
RuleSetName *string
noSmithyDocumentSerde
}
type UpdateMatchmakingConfigurationOutput struct {
// The updated matchmaking configuration.
Configuration *types.MatchmakingConfiguration
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationUpdateMatchmakingConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateMatchmakingConfiguration{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateMatchmakingConfiguration{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addUpdateMatchmakingConfigurationResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpUpdateMatchmakingConfigurationValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateMatchmakingConfiguration(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opUpdateMatchmakingConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "gamelift",
OperationName: "UpdateMatchmakingConfiguration",
}
}
type opUpdateMatchmakingConfigurationResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opUpdateMatchmakingConfigurationResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opUpdateMatchmakingConfigurationResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) |
func addUpdateMatchmakingConfigurationResolveEndpointMiddleware(stack *middleware.Stack, options Options) error {
return stack.Serialize.Insert(&opUpdateMatchmakingConfigurationResolveEndpointMiddleware{
EndpointResolver: options.EndpointResolverV2,
BuiltInResolver: &builtInResolver{
Region: options.Region,
UseDualStack: options.EndpointOptions.UseDualStackEndpoint,
UseFIPS: options.EndpointOptions.UseFIPSEndpoint,
Endpoint: options.BaseEndpoint,
},
}, "ResolveEndpoint", middleware.After)
}
| {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "gamelift"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "gamelift"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A)
if v4aScheme.SigningName == nil {
v4aScheme.SigningName = aws.String("gamelift")
}
if v4aScheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName)
ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0])
break
case *internalauth.AuthenticationSchemeNone:
break
}
}
return next.HandleSerialize(ctx, in)
} | identifier_body |
api_op_UpdateMatchmakingConfiguration.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package gamelift
import (
"context"
"errors"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
"github.com/aws/aws-sdk-go-v2/service/gamelift/types"
smithyendpoints "github.com/aws/smithy-go/endpoints"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Updates settings for a FlexMatch matchmaking configuration. These changes
// affect all matches and game sessions that are created after the update. To
// update settings, specify the configuration name to be updated and provide the
// new settings. Learn more Design a FlexMatch matchmaker (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-configuration.html)
func (c *Client) UpdateMatchmakingConfiguration(ctx context.Context, params *UpdateMatchmakingConfigurationInput, optFns ...func(*Options)) (*UpdateMatchmakingConfigurationOutput, error) {
if params == nil {
params = &UpdateMatchmakingConfigurationInput{}
}
result, metadata, err := c.invokeOperation(ctx, "UpdateMatchmakingConfiguration", params, optFns, c.addOperationUpdateMatchmakingConfigurationMiddlewares)
if err != nil {
return nil, err
}
out := result.(*UpdateMatchmakingConfigurationOutput)
out.ResultMetadata = metadata
return out, nil
}
type UpdateMatchmakingConfigurationInput struct {
// A unique identifier for the matchmaking configuration to update. You can use
// either the configuration name or ARN value.
//
// This member is required.
Name *string
// A flag that indicates whether a match that was created with this configuration
// must be accepted by the matched players. To require acceptance, set to TRUE.
// With this option enabled, matchmaking tickets use the status REQUIRES_ACCEPTANCE
// to indicate when a completed potential match is waiting for player acceptance.
AcceptanceRequired *bool
// The length of time (in seconds) to wait for players to accept a proposed match,
// if acceptance is required.
AcceptanceTimeoutSeconds *int32
// The number of player slots in a match to keep open for future players. For
// example, if the configuration's rule set specifies a match for a single
// 10-person team, and the additional player count is set to 2, 10 players will be
// selected for the match and 2 more player slots will be open for future players.
// This parameter is not used if FlexMatchMode is set to STANDALONE .
AdditionalPlayerCount *int32
// The method that is used to backfill game sessions created with this matchmaking
// configuration. Specify MANUAL when your game manages backfill requests manually
// or does not use the match backfill feature. Specify AUTOMATIC to have GameLift
// create a match backfill request whenever a game session has one or more open
// slots. Learn more about manual and automatic backfill in Backfill Existing
// Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-backfill.html)
// . Automatic backfill is not available when FlexMatchMode is set to STANDALONE .
BackfillMode types.BackfillMode
// Information to add to all events related to the matchmaking configuration.
CustomEventData *string
// A description for the matchmaking configuration.
Description *string
// Indicates whether this matchmaking configuration is being used with Amazon
// GameLift hosting or as a standalone matchmaking solution.
// - STANDALONE - FlexMatch forms matches and returns match information,
// including players and team assignments, in a MatchmakingSucceeded (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-events.html#match-events-matchmakingsucceeded)
// event.
// - WITH_QUEUE - FlexMatch forms matches and uses the specified Amazon GameLift
// queue to start a game session for the match.
FlexMatchMode types.FlexMatchMode
// A set of custom properties for a game session, formatted as key:value pairs.
// These properties are passed to a game server process with a request to start a
// new game session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)
// ). This information is added to the new GameSession object that is created for
// a successful match. This parameter is not used if FlexMatchMode is set to
// STANDALONE .
GameProperties []types.GameProperty
// A set of custom game session properties, formatted as a single string value.
// This data is passed to a game server process with a request to start a new game
// session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)
// ). This information is added to the game session that is created for a
// successful match. This parameter is not used if FlexMatchMode is set to
// STANDALONE .
GameSessionData *string
// The Amazon Resource Name ( ARN (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)
// ) that is assigned to a Amazon GameLift game session queue resource and uniquely
// identifies it. ARNs are unique across all Regions. Format is
// arn:aws:gamelift:::gamesessionqueue/ . Queues can be located in any Region.
// Queues are used to start new Amazon GameLift-hosted game sessions for matches
// that are created with this matchmaking configuration. If FlexMatchMode is set
// to STANDALONE , do not set this parameter.
GameSessionQueueArns []string
// An SNS topic ARN that is set up to receive matchmaking notifications. See
// Setting up notifications for matchmaking (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-notification.html)
// for more information.
NotificationTarget *string
// The maximum duration, in seconds, that a matchmaking ticket can remain in
// process before timing out. Requests that fail due to timing out can be
// resubmitted as needed.
RequestTimeoutSeconds *int32
// A unique identifier for the matchmaking rule set to use with this
// configuration. You can use either the rule set name or ARN value. A matchmaking
// configuration can only use rule sets that are defined in the same Region.
RuleSetName *string
noSmithyDocumentSerde
}
type UpdateMatchmakingConfigurationOutput struct {
// The updated matchmaking configuration.
Configuration *types.MatchmakingConfiguration
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationUpdateMatchmakingConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateMatchmakingConfiguration{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateMatchmakingConfiguration{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addUpdateMatchmakingConfigurationResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpUpdateMatchmakingConfigurationValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateMatchmakingConfiguration(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func | (region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "gamelift",
OperationName: "UpdateMatchmakingConfiguration",
}
}
type opUpdateMatchmakingConfigurationResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opUpdateMatchmakingConfigurationResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opUpdateMatchmakingConfigurationResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "gamelift"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "gamelift"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A)
if v4aScheme.SigningName == nil {
v4aScheme.SigningName = aws.String("gamelift")
}
if v4aScheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName)
ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0])
break
case *internalauth.AuthenticationSchemeNone:
break
}
}
return next.HandleSerialize(ctx, in)
}
func addUpdateMatchmakingConfigurationResolveEndpointMiddleware(stack *middleware.Stack, options Options) error {
return stack.Serialize.Insert(&opUpdateMatchmakingConfigurationResolveEndpointMiddleware{
EndpointResolver: options.EndpointResolverV2,
BuiltInResolver: &builtInResolver{
Region: options.Region,
UseDualStack: options.EndpointOptions.UseDualStackEndpoint,
UseFIPS: options.EndpointOptions.UseFIPSEndpoint,
Endpoint: options.BaseEndpoint,
},
}, "ResolveEndpoint", middleware.After)
}
| newServiceMetadataMiddleware_opUpdateMatchmakingConfiguration | identifier_name |
api_op_UpdateMatchmakingConfiguration.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package gamelift
import (
"context"
"errors"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
"github.com/aws/aws-sdk-go-v2/service/gamelift/types"
smithyendpoints "github.com/aws/smithy-go/endpoints"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Updates settings for a FlexMatch matchmaking configuration. These changes
// affect all matches and game sessions that are created after the update. To
// update settings, specify the configuration name to be updated and provide the
// new settings. Learn more Design a FlexMatch matchmaker (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-configuration.html)
func (c *Client) UpdateMatchmakingConfiguration(ctx context.Context, params *UpdateMatchmakingConfigurationInput, optFns ...func(*Options)) (*UpdateMatchmakingConfigurationOutput, error) {
if params == nil {
params = &UpdateMatchmakingConfigurationInput{}
}
result, metadata, err := c.invokeOperation(ctx, "UpdateMatchmakingConfiguration", params, optFns, c.addOperationUpdateMatchmakingConfigurationMiddlewares)
if err != nil {
return nil, err
}
out := result.(*UpdateMatchmakingConfigurationOutput)
out.ResultMetadata = metadata
return out, nil
}
type UpdateMatchmakingConfigurationInput struct {
// A unique identifier for the matchmaking configuration to update. You can use
// either the configuration name or ARN value.
//
// This member is required.
Name *string
// A flag that indicates whether a match that was created with this configuration
// must be accepted by the matched players. To require acceptance, set to TRUE.
// With this option enabled, matchmaking tickets use the status REQUIRES_ACCEPTANCE
// to indicate when a completed potential match is waiting for player acceptance.
AcceptanceRequired *bool
// The length of time (in seconds) to wait for players to accept a proposed match,
// if acceptance is required.
AcceptanceTimeoutSeconds *int32
// The number of player slots in a match to keep open for future players. For
// example, if the configuration's rule set specifies a match for a single
// 10-person team, and the additional player count is set to 2, 10 players will be
// selected for the match and 2 more player slots will be open for future players.
// This parameter is not used if FlexMatchMode is set to STANDALONE .
AdditionalPlayerCount *int32
// The method that is used to backfill game sessions created with this matchmaking
// configuration. Specify MANUAL when your game manages backfill requests manually
// or does not use the match backfill feature. Specify AUTOMATIC to have GameLift
// create a match backfill request whenever a game session has one or more open
// slots. Learn more about manual and automatic backfill in Backfill Existing
// Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-backfill.html)
// . Automatic backfill is not available when FlexMatchMode is set to STANDALONE .
BackfillMode types.BackfillMode
// Information to add to all events related to the matchmaking configuration.
CustomEventData *string
// A description for the matchmaking configuration.
Description *string
// Indicates whether this matchmaking configuration is being used with Amazon
// GameLift hosting or as a standalone matchmaking solution.
// - STANDALONE - FlexMatch forms matches and returns match information,
// including players and team assignments, in a MatchmakingSucceeded (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-events.html#match-events-matchmakingsucceeded)
// event.
// - WITH_QUEUE - FlexMatch forms matches and uses the specified Amazon GameLift
// queue to start a game session for the match.
FlexMatchMode types.FlexMatchMode
// A set of custom properties for a game session, formatted as key:value pairs.
// These properties are passed to a game server process with a request to start a
// new game session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)
// ). This information is added to the new GameSession object that is created for
// a successful match. This parameter is not used if FlexMatchMode is set to
// STANDALONE .
GameProperties []types.GameProperty
// A set of custom game session properties, formatted as a single string value.
// This data is passed to a game server process with a request to start a new game
// session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)
// ). This information is added to the game session that is created for a
// successful match. This parameter is not used if FlexMatchMode is set to
// STANDALONE .
GameSessionData *string
// The Amazon Resource Name ( ARN (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)
// ) that is assigned to a Amazon GameLift game session queue resource and uniquely
// identifies it. ARNs are unique across all Regions. Format is
// arn:aws:gamelift:::gamesessionqueue/ . Queues can be located in any Region.
// Queues are used to start new Amazon GameLift-hosted game sessions for matches
// that are created with this matchmaking configuration. If FlexMatchMode is set
// to STANDALONE , do not set this parameter.
GameSessionQueueArns []string
// An SNS topic ARN that is set up to receive matchmaking notifications. See
// Setting up notifications for matchmaking (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-notification.html)
// for more information.
NotificationTarget *string
// The maximum duration, in seconds, that a matchmaking ticket can remain in
// process before timing out. Requests that fail due to timing out can be
// resubmitted as needed.
RequestTimeoutSeconds *int32
// A unique identifier for the matchmaking rule set to use with this
// configuration. You can use either the rule set name or ARN value. A matchmaking
// configuration can only use rule sets that are defined in the same Region.
RuleSetName *string
noSmithyDocumentSerde
}
type UpdateMatchmakingConfigurationOutput struct {
// The updated matchmaking configuration.
Configuration *types.MatchmakingConfiguration
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationUpdateMatchmakingConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateMatchmakingConfiguration{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateMatchmakingConfiguration{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err | }
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addUpdateMatchmakingConfigurationResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpUpdateMatchmakingConfigurationValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateMatchmakingConfiguration(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opUpdateMatchmakingConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "gamelift",
OperationName: "UpdateMatchmakingConfiguration",
}
}
type opUpdateMatchmakingConfigurationResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opUpdateMatchmakingConfigurationResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opUpdateMatchmakingConfigurationResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "gamelift"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "gamelift"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A)
if v4aScheme.SigningName == nil {
v4aScheme.SigningName = aws.String("gamelift")
}
if v4aScheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName)
ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0])
break
case *internalauth.AuthenticationSchemeNone:
break
}
}
return next.HandleSerialize(ctx, in)
}
func addUpdateMatchmakingConfigurationResolveEndpointMiddleware(stack *middleware.Stack, options Options) error {
return stack.Serialize.Insert(&opUpdateMatchmakingConfigurationResolveEndpointMiddleware{
EndpointResolver: options.EndpointResolverV2,
BuiltInResolver: &builtInResolver{
Region: options.Region,
UseDualStack: options.EndpointOptions.UseDualStackEndpoint,
UseFIPS: options.EndpointOptions.UseFIPSEndpoint,
Endpoint: options.BaseEndpoint,
},
}, "ResolveEndpoint", middleware.After)
} | random_line_split |
|
api_op_UpdateMatchmakingConfiguration.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package gamelift
import (
"context"
"errors"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
"github.com/aws/aws-sdk-go-v2/service/gamelift/types"
smithyendpoints "github.com/aws/smithy-go/endpoints"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Updates settings for a FlexMatch matchmaking configuration. These changes
// affect all matches and game sessions that are created after the update. To
// update settings, specify the configuration name to be updated and provide the
// new settings. Learn more Design a FlexMatch matchmaker (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-configuration.html)
func (c *Client) UpdateMatchmakingConfiguration(ctx context.Context, params *UpdateMatchmakingConfigurationInput, optFns ...func(*Options)) (*UpdateMatchmakingConfigurationOutput, error) {
if params == nil {
params = &UpdateMatchmakingConfigurationInput{}
}
result, metadata, err := c.invokeOperation(ctx, "UpdateMatchmakingConfiguration", params, optFns, c.addOperationUpdateMatchmakingConfigurationMiddlewares)
if err != nil {
return nil, err
}
out := result.(*UpdateMatchmakingConfigurationOutput)
out.ResultMetadata = metadata
return out, nil
}
type UpdateMatchmakingConfigurationInput struct {
// A unique identifier for the matchmaking configuration to update. You can use
// either the configuration name or ARN value.
//
// This member is required.
Name *string
// A flag that indicates whether a match that was created with this configuration
// must be accepted by the matched players. To require acceptance, set to TRUE.
// With this option enabled, matchmaking tickets use the status REQUIRES_ACCEPTANCE
// to indicate when a completed potential match is waiting for player acceptance.
AcceptanceRequired *bool
// The length of time (in seconds) to wait for players to accept a proposed match,
// if acceptance is required.
AcceptanceTimeoutSeconds *int32
// The number of player slots in a match to keep open for future players. For
// example, if the configuration's rule set specifies a match for a single
// 10-person team, and the additional player count is set to 2, 10 players will be
// selected for the match and 2 more player slots will be open for future players.
// This parameter is not used if FlexMatchMode is set to STANDALONE .
AdditionalPlayerCount *int32
// The method that is used to backfill game sessions created with this matchmaking
// configuration. Specify MANUAL when your game manages backfill requests manually
// or does not use the match backfill feature. Specify AUTOMATIC to have GameLift
// create a match backfill request whenever a game session has one or more open
// slots. Learn more about manual and automatic backfill in Backfill Existing
// Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-backfill.html)
// . Automatic backfill is not available when FlexMatchMode is set to STANDALONE .
BackfillMode types.BackfillMode
// Information to add to all events related to the matchmaking configuration.
CustomEventData *string
// A description for the matchmaking configuration.
Description *string
// Indicates whether this matchmaking configuration is being used with Amazon
// GameLift hosting or as a standalone matchmaking solution.
// - STANDALONE - FlexMatch forms matches and returns match information,
// including players and team assignments, in a MatchmakingSucceeded (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-events.html#match-events-matchmakingsucceeded)
// event.
// - WITH_QUEUE - FlexMatch forms matches and uses the specified Amazon GameLift
// queue to start a game session for the match.
FlexMatchMode types.FlexMatchMode
// A set of custom properties for a game session, formatted as key:value pairs.
// These properties are passed to a game server process with a request to start a
// new game session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)
// ). This information is added to the new GameSession object that is created for
// a successful match. This parameter is not used if FlexMatchMode is set to
// STANDALONE .
GameProperties []types.GameProperty
// A set of custom game session properties, formatted as a single string value.
// This data is passed to a game server process with a request to start a new game
// session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)
// ). This information is added to the game session that is created for a
// successful match. This parameter is not used if FlexMatchMode is set to
// STANDALONE .
GameSessionData *string
// The Amazon Resource Name ( ARN (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)
// ) that is assigned to a Amazon GameLift game session queue resource and uniquely
// identifies it. ARNs are unique across all Regions. Format is
// arn:aws:gamelift:::gamesessionqueue/ . Queues can be located in any Region.
// Queues are used to start new Amazon GameLift-hosted game sessions for matches
// that are created with this matchmaking configuration. If FlexMatchMode is set
// to STANDALONE , do not set this parameter.
GameSessionQueueArns []string
// An SNS topic ARN that is set up to receive matchmaking notifications. See
// Setting up notifications for matchmaking (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-notification.html)
// for more information.
NotificationTarget *string
// The maximum duration, in seconds, that a matchmaking ticket can remain in
// process before timing out. Requests that fail due to timing out can be
// resubmitted as needed.
RequestTimeoutSeconds *int32
// A unique identifier for the matchmaking rule set to use with this
// configuration. You can use either the rule set name or ARN value. A matchmaking
// configuration can only use rule sets that are defined in the same Region.
RuleSetName *string
noSmithyDocumentSerde
}
type UpdateMatchmakingConfigurationOutput struct {
// The updated matchmaking configuration.
Configuration *types.MatchmakingConfiguration
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationUpdateMatchmakingConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateMatchmakingConfiguration{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateMatchmakingConfiguration{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addUpdateMatchmakingConfigurationResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpUpdateMatchmakingConfigurationValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateMatchmakingConfiguration(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opUpdateMatchmakingConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "gamelift",
OperationName: "UpdateMatchmakingConfiguration",
}
}
type opUpdateMatchmakingConfigurationResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opUpdateMatchmakingConfigurationResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opUpdateMatchmakingConfigurationResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "gamelift"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "gamelift"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil |
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A)
if v4aScheme.SigningName == nil {
v4aScheme.SigningName = aws.String("gamelift")
}
if v4aScheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName)
ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0])
break
case *internalauth.AuthenticationSchemeNone:
break
}
}
return next.HandleSerialize(ctx, in)
}
func addUpdateMatchmakingConfigurationResolveEndpointMiddleware(stack *middleware.Stack, options Options) error {
return stack.Serialize.Insert(&opUpdateMatchmakingConfigurationResolveEndpointMiddleware{
EndpointResolver: options.EndpointResolverV2,
BuiltInResolver: &builtInResolver{
Region: options.Region,
UseDualStack: options.EndpointOptions.UseDualStackEndpoint,
UseFIPS: options.EndpointOptions.UseFIPSEndpoint,
Endpoint: options.BaseEndpoint,
},
}, "ResolveEndpoint", middleware.After)
}
| {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
} | conditional_block |
graph.rs | use cairo;
use gtk::{self, BoxExt, ContainerExt, DrawingArea, ScrolledWindowExt, StateFlags, WidgetExt};
use std::cell::RefCell;
use gdk::{self, WindowExt};
use std::rc::Rc;
use std::time::Instant;
use color::Color;
use utils::RotateVec;
const LEFT_WIDTH: f64 = 31.;
pub struct Graph {
elapsed: Instant,
colors: Vec<Color>,
pub data: Vec<RotateVec<f64>>,
vertical_layout: gtk::Box,
scroll_layout: gtk::ScrolledWindow,
horizontal_layout: gtk::Box,
pub area: DrawingArea,
max: Option<RefCell<f64>>,
keep_max: bool,
display_labels: RefCell<bool>,
initial_diff: Option<i32>,
label_callbacks: Option<Box<Fn(f64) -> [String; 4]>>,
labels_layout_width: i32,
}
impl Graph {
// If `max` is `None`, the graph will expect values between 0 and 1.
//
// If `keep_max` is set to `true`, then this value will never go down, meaning that graphs
// won't rescale down. It is not taken into account if `max` is `None`.
pub fn new(max: Option<f64>, keep_max: bool) -> Graph {
let g = Graph {
elapsed: Instant::now(),
colors: vec!(),
data: vec!(),
vertical_layout: gtk::Box::new(gtk::Orientation::Vertical, 0),
scroll_layout: gtk::ScrolledWindow::new(None, None),
horizontal_layout: gtk::Box::new(gtk::Orientation::Horizontal, 0),
area: DrawingArea::new(),
max: if let Some(max) = max { Some(RefCell::new(max)) } else { None },
keep_max,
display_labels: RefCell::new(true),
initial_diff: None,
label_callbacks: None,
labels_layout_width: 80,
};
g.scroll_layout.set_min_content_width(g.labels_layout_width);
g.scroll_layout.add(&g.vertical_layout);
g.horizontal_layout.pack_start(&g.area, true, true, 0);
g.horizontal_layout.pack_start(&g.scroll_layout, false, true, 10);
g.horizontal_layout.set_margin_left(5);
g
}
/// Changes the size of the layout containing labels (the one on the right).
pub fn set_labels_width(&mut self, labels_layout_width: u32) {
self.scroll_layout.set_min_content_width(labels_layout_width as i32);
self.labels_layout_width = labels_layout_width as i32;
}
pub fn set_label_callbacks(&mut self, label_callbacks: Option<Box<Fn(f64) -> [String; 4]>>) {
self.label_callbacks = label_callbacks;
}
pub fn set_display_labels(&self, display_labels: bool) {
*self.display_labels.borrow_mut() = display_labels;
if display_labels == true {
self.scroll_layout.show_all();
} else {
self.scroll_layout.hide();
}
self.invalidate();
}
pub fn hide(&self) {
self.horizontal_layout.hide();
}
pub fn show_all(&self) {
self.horizontal_layout.show_all();
if *self.display_labels.borrow() == false {
self.scroll_layout.hide();
}
}
pub fn attach_to(&self, to: >k::Box) {
to.add(&self.horizontal_layout);
}
pub fn | (&mut self, d: RotateVec<f64>, s: &str, override_color: Option<usize>) {
let c = if let Some(over) = override_color {
Color::generate(over)
} else {
Color::generate(self.data.len() + 11)
};
let l = gtk::Label::new(Some(s));
l.override_color(StateFlags::from_bits(0).expect("from_bits failed"), &c.to_gdk());
self.vertical_layout.add(&l);
self.colors.push(c);
self.data.push(d);
}
fn draw_labels(&self, c: &cairo::Context, max: f64, height: f64) {
if let Some(ref call) = self.label_callbacks {
let entries = call(max);
let font_size = 8.;
c.set_source_rgb(0., 0., 0.);
c.set_font_size(font_size);
c.move_to(LEFT_WIDTH - 4. - entries[0].len() as f64 * 4., font_size);
c.show_text(entries[0].as_str());
c.move_to(LEFT_WIDTH - 4. - entries[1].len() as f64 * 4., height / 2.);
c.show_text(entries[1].as_str());
c.move_to(LEFT_WIDTH - 4. - entries[2].len() as f64 * 4., height - 2.);
c.show_text(entries[2].as_str());
c.move_to(font_size - 1., height / 2. + 4. * (entries[3].len() >> 1) as f64);
c.rotate(-1.5708);
c.show_text(entries[3].as_str());
}
}
pub fn draw(&self, c: &cairo::Context, width: f64, height: f64) {
let x_start = if self.label_callbacks.is_some() {
LEFT_WIDTH
} else {
1.0
};
c.set_source_rgb(0.95, 0.95, 0.95);
c.rectangle(x_start, 1.0, width - 1.0, height - 2.0);
c.fill();
c.set_source_rgb(0.0, 0.0, 0.0);
c.set_line_width(1.0);
c.move_to(x_start, 0.0);
c.line_to(x_start, height);
c.move_to(width, 0.0);
c.line_to(width, height);
c.move_to(x_start, 0.0);
c.line_to(width, 0.0);
c.move_to(x_start, height);
c.line_to(width, height);
// For now it's always 60 seconds.
let time = 60.;
let elapsed = self.elapsed.elapsed().as_secs() % 5;
let x_step = (width - 2.0 - x_start) * 5.0 / (time as f64);
let mut current = width - elapsed as f64 * (x_step / 5.0) - 1.0;
if x_step < 0.1 {
c.stroke();
return;
}
while current > x_start {
c.move_to(current, 0.0);
c.line_to(current, height);
current -= x_step;
}
let step = height / 10.0;
current = step - 1.0;
while current < height - 1. {
c.move_to(x_start, current);
c.line_to(width - 1.0, current);
current += step;
}
c.stroke();
if let Some(ref self_max) = self.max {
let mut max = if self.keep_max { *self_max.borrow() } else { 1. };
let len = self.data[0].len() - 1;
for x in 0..len {
for entry in &self.data {
if entry[x] > max {
max = entry[x];
}
}
}
if !self.data.is_empty() && !self.data[0].is_empty() {
let len = self.data[0].len() - 1;
let step = (width - 2.0 - x_start) / len as f64;
current = x_start + 1.0;
let mut index = len;
while current > x_start && index > 0 {
for (entry, color) in self.data.iter().zip(self.colors.iter()) {
c.set_source_rgb(color.r, color.g, color.b);
c.move_to(current + step, height - entry[index - 1] / max * (height - 1.0));
c.line_to(current, height - entry[index] / max * (height - 1.0));
c.stroke();
}
current += step;
index -= 1;
}
}
if max > *self_max.borrow() || !self.keep_max {
*self_max.borrow_mut() = max;
}
self.draw_labels(c, max, height);
} else if !self.data.is_empty() && !self.data[0].is_empty() {
let len = self.data[0].len() - 1;
let step = (width - 2.0 - x_start) / (len as f64);
current = x_start + 1.0;
let mut index = len;
while current > x_start && index > 0 {
for (entry, color) in self.data.iter().zip(self.colors.iter()) {
c.set_source_rgb(color.r, color.g, color.b);
c.move_to(current + step, height - entry[index - 1] * (height - 1.0));
c.line_to(current, height - entry[index] * (height - 1.0));
c.stroke();
}
current += step;
index -= 1;
}
// To be called in last to avoid having to restore state (rotation).
self.draw_labels(c, 100., height);
}
}
pub fn invalidate(&self) {
if let Some(t_win) = self.area.get_window() {
let (x, y) = self.area.translate_coordinates(&self.area, 0, 0)
.expect("translate_coordinates failed");
let rect = gdk::Rectangle { x: x, y: y,
width: self.area.get_allocated_width(), height: self.area.get_allocated_height() };
t_win.invalidate_rect(&rect, true);
}
}
pub fn send_size_request(&self, width: Option<i32>) {
let mut width = match width {
Some(w) => w,
None => {
if let Some(parent) = self.area.get_parent() {
parent.get_allocation().width -
parent.get_margin_left() - parent.get_margin_right()
} else {
eprintln!("<Graph::send_size_request> A parent is required if no width is \
provided...");
return;
}
}
};
// This condition is to avoid having a graph with a bigger width than the window.
if let Some(top) = self.area.get_toplevel() {
let max_width = top.get_allocation().width;
if width > max_width {
width = max_width;
}
}
self.area.set_size_request(
if *self.display_labels.borrow() == true {
width - if width >= self.labels_layout_width {
self.labels_layout_width
} else {
width
}
} else {
width
}, 200);
}
}
pub trait Connecter {
fn connect_to_window_events(&self);
}
impl Connecter for Rc<RefCell<Graph>> {
fn connect_to_window_events(&self) {
let s = self.clone();
if let Some(parent) = self.borrow().horizontal_layout.get_toplevel() {
// TODO: ugly way to resize drawing area, I should find a better way
parent.connect_configure_event(move |w, _| {
let need_diff = s.borrow().initial_diff.is_none();
if need_diff {
let mut s = s.borrow_mut();
let parent_width = if let Some(p) = s.area.get_parent() {
p.get_allocation().width
} else {
0
};
s.initial_diff = Some(w.get_allocation().width - parent_width);
}
s.borrow().send_size_request(None);
false
});
} else {
eprintln!("This method needs to be called *after* it has been put inside a window");
}
}
}
| push | identifier_name |
graph.rs | use cairo;
use gtk::{self, BoxExt, ContainerExt, DrawingArea, ScrolledWindowExt, StateFlags, WidgetExt};
use std::cell::RefCell;
use gdk::{self, WindowExt};
use std::rc::Rc;
use std::time::Instant;
use color::Color;
use utils::RotateVec;
const LEFT_WIDTH: f64 = 31.;
pub struct Graph {
elapsed: Instant,
colors: Vec<Color>,
pub data: Vec<RotateVec<f64>>,
vertical_layout: gtk::Box,
scroll_layout: gtk::ScrolledWindow,
horizontal_layout: gtk::Box,
pub area: DrawingArea,
max: Option<RefCell<f64>>,
keep_max: bool,
display_labels: RefCell<bool>,
initial_diff: Option<i32>,
label_callbacks: Option<Box<Fn(f64) -> [String; 4]>>,
labels_layout_width: i32,
}
impl Graph {
// If `max` is `None`, the graph will expect values between 0 and 1.
//
// If `keep_max` is set to `true`, then this value will never go down, meaning that graphs
// won't rescale down. It is not taken into account if `max` is `None`.
pub fn new(max: Option<f64>, keep_max: bool) -> Graph {
let g = Graph {
elapsed: Instant::now(),
colors: vec!(),
data: vec!(),
vertical_layout: gtk::Box::new(gtk::Orientation::Vertical, 0),
scroll_layout: gtk::ScrolledWindow::new(None, None),
horizontal_layout: gtk::Box::new(gtk::Orientation::Horizontal, 0),
area: DrawingArea::new(),
max: if let Some(max) = max { Some(RefCell::new(max)) } else { None },
keep_max,
display_labels: RefCell::new(true),
initial_diff: None,
label_callbacks: None,
labels_layout_width: 80,
};
g.scroll_layout.set_min_content_width(g.labels_layout_width);
g.scroll_layout.add(&g.vertical_layout);
g.horizontal_layout.pack_start(&g.area, true, true, 0);
g.horizontal_layout.pack_start(&g.scroll_layout, false, true, 10);
g.horizontal_layout.set_margin_left(5);
g
}
/// Changes the size of the layout containing labels (the one on the right).
pub fn set_labels_width(&mut self, labels_layout_width: u32) {
self.scroll_layout.set_min_content_width(labels_layout_width as i32);
self.labels_layout_width = labels_layout_width as i32;
}
pub fn set_label_callbacks(&mut self, label_callbacks: Option<Box<Fn(f64) -> [String; 4]>>) {
self.label_callbacks = label_callbacks;
}
pub fn set_display_labels(&self, display_labels: bool) {
*self.display_labels.borrow_mut() = display_labels;
if display_labels == true {
self.scroll_layout.show_all();
} else {
self.scroll_layout.hide();
}
self.invalidate();
}
pub fn hide(&self) {
self.horizontal_layout.hide();
}
pub fn show_all(&self) {
self.horizontal_layout.show_all();
if *self.display_labels.borrow() == false {
self.scroll_layout.hide();
}
}
pub fn attach_to(&self, to: >k::Box) {
to.add(&self.horizontal_layout);
}
pub fn push(&mut self, d: RotateVec<f64>, s: &str, override_color: Option<usize>) {
let c = if let Some(over) = override_color {
Color::generate(over)
} else {
Color::generate(self.data.len() + 11)
};
let l = gtk::Label::new(Some(s));
l.override_color(StateFlags::from_bits(0).expect("from_bits failed"), &c.to_gdk());
self.vertical_layout.add(&l);
self.colors.push(c);
self.data.push(d);
}
fn draw_labels(&self, c: &cairo::Context, max: f64, height: f64) {
if let Some(ref call) = self.label_callbacks {
let entries = call(max);
let font_size = 8.;
c.set_source_rgb(0., 0., 0.);
c.set_font_size(font_size);
c.move_to(LEFT_WIDTH - 4. - entries[0].len() as f64 * 4., font_size);
c.show_text(entries[0].as_str());
c.move_to(LEFT_WIDTH - 4. - entries[1].len() as f64 * 4., height / 2.);
c.show_text(entries[1].as_str());
c.move_to(LEFT_WIDTH - 4. - entries[2].len() as f64 * 4., height - 2.);
c.show_text(entries[2].as_str());
c.move_to(font_size - 1., height / 2. + 4. * (entries[3].len() >> 1) as f64);
c.rotate(-1.5708);
c.show_text(entries[3].as_str());
}
}
pub fn draw(&self, c: &cairo::Context, width: f64, height: f64) {
let x_start = if self.label_callbacks.is_some() {
LEFT_WIDTH
} else {
1.0
};
c.set_source_rgb(0.95, 0.95, 0.95);
c.rectangle(x_start, 1.0, width - 1.0, height - 2.0);
c.fill();
c.set_source_rgb(0.0, 0.0, 0.0);
c.set_line_width(1.0);
c.move_to(x_start, 0.0);
c.line_to(x_start, height);
c.move_to(width, 0.0);
c.line_to(width, height);
c.move_to(x_start, 0.0);
c.line_to(width, 0.0);
c.move_to(x_start, height);
c.line_to(width, height);
// For now it's always 60 seconds.
let time = 60.;
let elapsed = self.elapsed.elapsed().as_secs() % 5;
let x_step = (width - 2.0 - x_start) * 5.0 / (time as f64);
let mut current = width - elapsed as f64 * (x_step / 5.0) - 1.0;
if x_step < 0.1 {
c.stroke();
return;
}
while current > x_start {
c.move_to(current, 0.0);
c.line_to(current, height);
current -= x_step;
}
let step = height / 10.0;
current = step - 1.0;
while current < height - 1. {
c.move_to(x_start, current);
c.line_to(width - 1.0, current);
current += step;
}
c.stroke();
if let Some(ref self_max) = self.max {
let mut max = if self.keep_max { *self_max.borrow() } else { 1. };
let len = self.data[0].len() - 1;
for x in 0..len {
for entry in &self.data {
if entry[x] > max {
max = entry[x];
}
}
}
if !self.data.is_empty() && !self.data[0].is_empty() {
let len = self.data[0].len() - 1;
let step = (width - 2.0 - x_start) / len as f64;
current = x_start + 1.0;
let mut index = len;
while current > x_start && index > 0 {
for (entry, color) in self.data.iter().zip(self.colors.iter()) {
c.set_source_rgb(color.r, color.g, color.b);
c.move_to(current + step, height - entry[index - 1] / max * (height - 1.0));
c.line_to(current, height - entry[index] / max * (height - 1.0));
c.stroke();
}
current += step;
index -= 1;
}
}
if max > *self_max.borrow() || !self.keep_max {
*self_max.borrow_mut() = max;
}
self.draw_labels(c, max, height);
} else if !self.data.is_empty() && !self.data[0].is_empty() {
let len = self.data[0].len() - 1;
let step = (width - 2.0 - x_start) / (len as f64);
current = x_start + 1.0;
let mut index = len;
while current > x_start && index > 0 {
for (entry, color) in self.data.iter().zip(self.colors.iter()) {
c.set_source_rgb(color.r, color.g, color.b);
c.move_to(current + step, height - entry[index - 1] * (height - 1.0));
c.line_to(current, height - entry[index] * (height - 1.0));
c.stroke();
}
current += step;
index -= 1;
}
// To be called in last to avoid having to restore state (rotation).
self.draw_labels(c, 100., height);
} | pub fn invalidate(&self) {
if let Some(t_win) = self.area.get_window() {
let (x, y) = self.area.translate_coordinates(&self.area, 0, 0)
.expect("translate_coordinates failed");
let rect = gdk::Rectangle { x: x, y: y,
width: self.area.get_allocated_width(), height: self.area.get_allocated_height() };
t_win.invalidate_rect(&rect, true);
}
}
pub fn send_size_request(&self, width: Option<i32>) {
let mut width = match width {
Some(w) => w,
None => {
if let Some(parent) = self.area.get_parent() {
parent.get_allocation().width -
parent.get_margin_left() - parent.get_margin_right()
} else {
eprintln!("<Graph::send_size_request> A parent is required if no width is \
provided...");
return;
}
}
};
// This condition is to avoid having a graph with a bigger width than the window.
if let Some(top) = self.area.get_toplevel() {
let max_width = top.get_allocation().width;
if width > max_width {
width = max_width;
}
}
self.area.set_size_request(
if *self.display_labels.borrow() == true {
width - if width >= self.labels_layout_width {
self.labels_layout_width
} else {
width
}
} else {
width
}, 200);
}
}
pub trait Connecter {
fn connect_to_window_events(&self);
}
impl Connecter for Rc<RefCell<Graph>> {
fn connect_to_window_events(&self) {
let s = self.clone();
if let Some(parent) = self.borrow().horizontal_layout.get_toplevel() {
// TODO: ugly way to resize drawing area, I should find a better way
parent.connect_configure_event(move |w, _| {
let need_diff = s.borrow().initial_diff.is_none();
if need_diff {
let mut s = s.borrow_mut();
let parent_width = if let Some(p) = s.area.get_parent() {
p.get_allocation().width
} else {
0
};
s.initial_diff = Some(w.get_allocation().width - parent_width);
}
s.borrow().send_size_request(None);
false
});
} else {
eprintln!("This method needs to be called *after* it has been put inside a window");
}
}
} | }
| random_line_split |
graph.rs | use cairo;
use gtk::{self, BoxExt, ContainerExt, DrawingArea, ScrolledWindowExt, StateFlags, WidgetExt};
use std::cell::RefCell;
use gdk::{self, WindowExt};
use std::rc::Rc;
use std::time::Instant;
use color::Color;
use utils::RotateVec;
const LEFT_WIDTH: f64 = 31.;
pub struct Graph {
elapsed: Instant,
colors: Vec<Color>,
pub data: Vec<RotateVec<f64>>,
vertical_layout: gtk::Box,
scroll_layout: gtk::ScrolledWindow,
horizontal_layout: gtk::Box,
pub area: DrawingArea,
max: Option<RefCell<f64>>,
keep_max: bool,
display_labels: RefCell<bool>,
initial_diff: Option<i32>,
label_callbacks: Option<Box<Fn(f64) -> [String; 4]>>,
labels_layout_width: i32,
}
impl Graph {
// If `max` is `None`, the graph will expect values between 0 and 1.
//
// If `keep_max` is set to `true`, then this value will never go down, meaning that graphs
// won't rescale down. It is not taken into account if `max` is `None`.
pub fn new(max: Option<f64>, keep_max: bool) -> Graph {
let g = Graph {
elapsed: Instant::now(),
colors: vec!(),
data: vec!(),
vertical_layout: gtk::Box::new(gtk::Orientation::Vertical, 0),
scroll_layout: gtk::ScrolledWindow::new(None, None),
horizontal_layout: gtk::Box::new(gtk::Orientation::Horizontal, 0),
area: DrawingArea::new(),
max: if let Some(max) = max { Some(RefCell::new(max)) } else { None },
keep_max,
display_labels: RefCell::new(true),
initial_diff: None,
label_callbacks: None,
labels_layout_width: 80,
};
g.scroll_layout.set_min_content_width(g.labels_layout_width);
g.scroll_layout.add(&g.vertical_layout);
g.horizontal_layout.pack_start(&g.area, true, true, 0);
g.horizontal_layout.pack_start(&g.scroll_layout, false, true, 10);
g.horizontal_layout.set_margin_left(5);
g
}
/// Changes the size of the layout containing labels (the one on the right).
pub fn set_labels_width(&mut self, labels_layout_width: u32) {
self.scroll_layout.set_min_content_width(labels_layout_width as i32);
self.labels_layout_width = labels_layout_width as i32;
}
pub fn set_label_callbacks(&mut self, label_callbacks: Option<Box<Fn(f64) -> [String; 4]>>) {
self.label_callbacks = label_callbacks;
}
pub fn set_display_labels(&self, display_labels: bool) {
*self.display_labels.borrow_mut() = display_labels;
if display_labels == true {
self.scroll_layout.show_all();
} else {
self.scroll_layout.hide();
}
self.invalidate();
}
pub fn hide(&self) {
self.horizontal_layout.hide();
}
pub fn show_all(&self) {
self.horizontal_layout.show_all();
if *self.display_labels.borrow() == false {
self.scroll_layout.hide();
}
}
pub fn attach_to(&self, to: >k::Box) {
to.add(&self.horizontal_layout);
}
pub fn push(&mut self, d: RotateVec<f64>, s: &str, override_color: Option<usize>) {
let c = if let Some(over) = override_color {
Color::generate(over)
} else {
Color::generate(self.data.len() + 11)
};
let l = gtk::Label::new(Some(s));
l.override_color(StateFlags::from_bits(0).expect("from_bits failed"), &c.to_gdk());
self.vertical_layout.add(&l);
self.colors.push(c);
self.data.push(d);
}
fn draw_labels(&self, c: &cairo::Context, max: f64, height: f64) {
if let Some(ref call) = self.label_callbacks {
let entries = call(max);
let font_size = 8.;
c.set_source_rgb(0., 0., 0.);
c.set_font_size(font_size);
c.move_to(LEFT_WIDTH - 4. - entries[0].len() as f64 * 4., font_size);
c.show_text(entries[0].as_str());
c.move_to(LEFT_WIDTH - 4. - entries[1].len() as f64 * 4., height / 2.);
c.show_text(entries[1].as_str());
c.move_to(LEFT_WIDTH - 4. - entries[2].len() as f64 * 4., height - 2.);
c.show_text(entries[2].as_str());
c.move_to(font_size - 1., height / 2. + 4. * (entries[3].len() >> 1) as f64);
c.rotate(-1.5708);
c.show_text(entries[3].as_str());
}
}
pub fn draw(&self, c: &cairo::Context, width: f64, height: f64) {
let x_start = if self.label_callbacks.is_some() {
LEFT_WIDTH
} else {
1.0
};
c.set_source_rgb(0.95, 0.95, 0.95);
c.rectangle(x_start, 1.0, width - 1.0, height - 2.0);
c.fill();
c.set_source_rgb(0.0, 0.0, 0.0);
c.set_line_width(1.0);
c.move_to(x_start, 0.0);
c.line_to(x_start, height);
c.move_to(width, 0.0);
c.line_to(width, height);
c.move_to(x_start, 0.0);
c.line_to(width, 0.0);
c.move_to(x_start, height);
c.line_to(width, height);
// For now it's always 60 seconds.
let time = 60.;
let elapsed = self.elapsed.elapsed().as_secs() % 5;
let x_step = (width - 2.0 - x_start) * 5.0 / (time as f64);
let mut current = width - elapsed as f64 * (x_step / 5.0) - 1.0;
if x_step < 0.1 {
c.stroke();
return;
}
while current > x_start {
c.move_to(current, 0.0);
c.line_to(current, height);
current -= x_step;
}
let step = height / 10.0;
current = step - 1.0;
while current < height - 1. {
c.move_to(x_start, current);
c.line_to(width - 1.0, current);
current += step;
}
c.stroke();
if let Some(ref self_max) = self.max {
let mut max = if self.keep_max { *self_max.borrow() } else { 1. };
let len = self.data[0].len() - 1;
for x in 0..len {
for entry in &self.data {
if entry[x] > max {
max = entry[x];
}
}
}
if !self.data.is_empty() && !self.data[0].is_empty() {
let len = self.data[0].len() - 1;
let step = (width - 2.0 - x_start) / len as f64;
current = x_start + 1.0;
let mut index = len;
while current > x_start && index > 0 {
for (entry, color) in self.data.iter().zip(self.colors.iter()) {
c.set_source_rgb(color.r, color.g, color.b);
c.move_to(current + step, height - entry[index - 1] / max * (height - 1.0));
c.line_to(current, height - entry[index] / max * (height - 1.0));
c.stroke();
}
current += step;
index -= 1;
}
}
if max > *self_max.borrow() || !self.keep_max {
*self_max.borrow_mut() = max;
}
self.draw_labels(c, max, height);
} else if !self.data.is_empty() && !self.data[0].is_empty() {
let len = self.data[0].len() - 1;
let step = (width - 2.0 - x_start) / (len as f64);
current = x_start + 1.0;
let mut index = len;
while current > x_start && index > 0 {
for (entry, color) in self.data.iter().zip(self.colors.iter()) {
c.set_source_rgb(color.r, color.g, color.b);
c.move_to(current + step, height - entry[index - 1] * (height - 1.0));
c.line_to(current, height - entry[index] * (height - 1.0));
c.stroke();
}
current += step;
index -= 1;
}
// To be called in last to avoid having to restore state (rotation).
self.draw_labels(c, 100., height);
}
}
pub fn invalidate(&self) {
if let Some(t_win) = self.area.get_window() {
let (x, y) = self.area.translate_coordinates(&self.area, 0, 0)
.expect("translate_coordinates failed");
let rect = gdk::Rectangle { x: x, y: y,
width: self.area.get_allocated_width(), height: self.area.get_allocated_height() };
t_win.invalidate_rect(&rect, true);
}
}
pub fn send_size_request(&self, width: Option<i32>) {
let mut width = match width {
Some(w) => w,
None => {
if let Some(parent) = self.area.get_parent() {
parent.get_allocation().width -
parent.get_margin_left() - parent.get_margin_right()
} else {
eprintln!("<Graph::send_size_request> A parent is required if no width is \
provided...");
return;
}
}
};
// This condition is to avoid having a graph with a bigger width than the window.
if let Some(top) = self.area.get_toplevel() {
let max_width = top.get_allocation().width;
if width > max_width {
width = max_width;
}
}
self.area.set_size_request(
if *self.display_labels.borrow() == true {
width - if width >= self.labels_layout_width {
self.labels_layout_width
} else {
width
}
} else {
width
}, 200);
}
}
pub trait Connecter {
fn connect_to_window_events(&self);
}
impl Connecter for Rc<RefCell<Graph>> {
fn connect_to_window_events(&self) |
}
| {
let s = self.clone();
if let Some(parent) = self.borrow().horizontal_layout.get_toplevel() {
// TODO: ugly way to resize drawing area, I should find a better way
parent.connect_configure_event(move |w, _| {
let need_diff = s.borrow().initial_diff.is_none();
if need_diff {
let mut s = s.borrow_mut();
let parent_width = if let Some(p) = s.area.get_parent() {
p.get_allocation().width
} else {
0
};
s.initial_diff = Some(w.get_allocation().width - parent_width);
}
s.borrow().send_size_request(None);
false
});
} else {
eprintln!("This method needs to be called *after* it has been put inside a window");
}
} | identifier_body |
graph.rs | use cairo;
use gtk::{self, BoxExt, ContainerExt, DrawingArea, ScrolledWindowExt, StateFlags, WidgetExt};
use std::cell::RefCell;
use gdk::{self, WindowExt};
use std::rc::Rc;
use std::time::Instant;
use color::Color;
use utils::RotateVec;
const LEFT_WIDTH: f64 = 31.;
pub struct Graph {
elapsed: Instant,
colors: Vec<Color>,
pub data: Vec<RotateVec<f64>>,
vertical_layout: gtk::Box,
scroll_layout: gtk::ScrolledWindow,
horizontal_layout: gtk::Box,
pub area: DrawingArea,
max: Option<RefCell<f64>>,
keep_max: bool,
display_labels: RefCell<bool>,
initial_diff: Option<i32>,
label_callbacks: Option<Box<Fn(f64) -> [String; 4]>>,
labels_layout_width: i32,
}
impl Graph {
// If `max` is `None`, the graph will expect values between 0 and 1.
//
// If `keep_max` is set to `true`, then this value will never go down, meaning that graphs
// won't rescale down. It is not taken into account if `max` is `None`.
pub fn new(max: Option<f64>, keep_max: bool) -> Graph {
let g = Graph {
elapsed: Instant::now(),
colors: vec!(),
data: vec!(),
vertical_layout: gtk::Box::new(gtk::Orientation::Vertical, 0),
scroll_layout: gtk::ScrolledWindow::new(None, None),
horizontal_layout: gtk::Box::new(gtk::Orientation::Horizontal, 0),
area: DrawingArea::new(),
max: if let Some(max) = max { Some(RefCell::new(max)) } else { None },
keep_max,
display_labels: RefCell::new(true),
initial_diff: None,
label_callbacks: None,
labels_layout_width: 80,
};
g.scroll_layout.set_min_content_width(g.labels_layout_width);
g.scroll_layout.add(&g.vertical_layout);
g.horizontal_layout.pack_start(&g.area, true, true, 0);
g.horizontal_layout.pack_start(&g.scroll_layout, false, true, 10);
g.horizontal_layout.set_margin_left(5);
g
}
/// Changes the size of the layout containing labels (the one on the right).
pub fn set_labels_width(&mut self, labels_layout_width: u32) {
self.scroll_layout.set_min_content_width(labels_layout_width as i32);
self.labels_layout_width = labels_layout_width as i32;
}
pub fn set_label_callbacks(&mut self, label_callbacks: Option<Box<Fn(f64) -> [String; 4]>>) {
self.label_callbacks = label_callbacks;
}
pub fn set_display_labels(&self, display_labels: bool) {
*self.display_labels.borrow_mut() = display_labels;
if display_labels == true {
self.scroll_layout.show_all();
} else {
self.scroll_layout.hide();
}
self.invalidate();
}
pub fn hide(&self) {
self.horizontal_layout.hide();
}
pub fn show_all(&self) {
self.horizontal_layout.show_all();
if *self.display_labels.borrow() == false {
self.scroll_layout.hide();
}
}
pub fn attach_to(&self, to: >k::Box) {
to.add(&self.horizontal_layout);
}
pub fn push(&mut self, d: RotateVec<f64>, s: &str, override_color: Option<usize>) {
let c = if let Some(over) = override_color {
Color::generate(over)
} else {
Color::generate(self.data.len() + 11)
};
let l = gtk::Label::new(Some(s));
l.override_color(StateFlags::from_bits(0).expect("from_bits failed"), &c.to_gdk());
self.vertical_layout.add(&l);
self.colors.push(c);
self.data.push(d);
}
fn draw_labels(&self, c: &cairo::Context, max: f64, height: f64) {
if let Some(ref call) = self.label_callbacks {
let entries = call(max);
let font_size = 8.;
c.set_source_rgb(0., 0., 0.);
c.set_font_size(font_size);
c.move_to(LEFT_WIDTH - 4. - entries[0].len() as f64 * 4., font_size);
c.show_text(entries[0].as_str());
c.move_to(LEFT_WIDTH - 4. - entries[1].len() as f64 * 4., height / 2.);
c.show_text(entries[1].as_str());
c.move_to(LEFT_WIDTH - 4. - entries[2].len() as f64 * 4., height - 2.);
c.show_text(entries[2].as_str());
c.move_to(font_size - 1., height / 2. + 4. * (entries[3].len() >> 1) as f64);
c.rotate(-1.5708);
c.show_text(entries[3].as_str());
}
}
pub fn draw(&self, c: &cairo::Context, width: f64, height: f64) {
let x_start = if self.label_callbacks.is_some() {
LEFT_WIDTH
} else {
1.0
};
c.set_source_rgb(0.95, 0.95, 0.95);
c.rectangle(x_start, 1.0, width - 1.0, height - 2.0);
c.fill();
c.set_source_rgb(0.0, 0.0, 0.0);
c.set_line_width(1.0);
c.move_to(x_start, 0.0);
c.line_to(x_start, height);
c.move_to(width, 0.0);
c.line_to(width, height);
c.move_to(x_start, 0.0);
c.line_to(width, 0.0);
c.move_to(x_start, height);
c.line_to(width, height);
// For now it's always 60 seconds.
let time = 60.;
let elapsed = self.elapsed.elapsed().as_secs() % 5;
let x_step = (width - 2.0 - x_start) * 5.0 / (time as f64);
let mut current = width - elapsed as f64 * (x_step / 5.0) - 1.0;
if x_step < 0.1 {
c.stroke();
return;
}
while current > x_start {
c.move_to(current, 0.0);
c.line_to(current, height);
current -= x_step;
}
let step = height / 10.0;
current = step - 1.0;
while current < height - 1. {
c.move_to(x_start, current);
c.line_to(width - 1.0, current);
current += step;
}
c.stroke();
if let Some(ref self_max) = self.max {
let mut max = if self.keep_max { *self_max.borrow() } else { 1. };
let len = self.data[0].len() - 1;
for x in 0..len {
for entry in &self.data {
if entry[x] > max {
max = entry[x];
}
}
}
if !self.data.is_empty() && !self.data[0].is_empty() {
let len = self.data[0].len() - 1;
let step = (width - 2.0 - x_start) / len as f64;
current = x_start + 1.0;
let mut index = len;
while current > x_start && index > 0 {
for (entry, color) in self.data.iter().zip(self.colors.iter()) {
c.set_source_rgb(color.r, color.g, color.b);
c.move_to(current + step, height - entry[index - 1] / max * (height - 1.0));
c.line_to(current, height - entry[index] / max * (height - 1.0));
c.stroke();
}
current += step;
index -= 1;
}
}
if max > *self_max.borrow() || !self.keep_max {
*self_max.borrow_mut() = max;
}
self.draw_labels(c, max, height);
} else if !self.data.is_empty() && !self.data[0].is_empty() {
let len = self.data[0].len() - 1;
let step = (width - 2.0 - x_start) / (len as f64);
current = x_start + 1.0;
let mut index = len;
while current > x_start && index > 0 {
for (entry, color) in self.data.iter().zip(self.colors.iter()) {
c.set_source_rgb(color.r, color.g, color.b);
c.move_to(current + step, height - entry[index - 1] * (height - 1.0));
c.line_to(current, height - entry[index] * (height - 1.0));
c.stroke();
}
current += step;
index -= 1;
}
// To be called in last to avoid having to restore state (rotation).
self.draw_labels(c, 100., height);
}
}
pub fn invalidate(&self) {
if let Some(t_win) = self.area.get_window() {
let (x, y) = self.area.translate_coordinates(&self.area, 0, 0)
.expect("translate_coordinates failed");
let rect = gdk::Rectangle { x: x, y: y,
width: self.area.get_allocated_width(), height: self.area.get_allocated_height() };
t_win.invalidate_rect(&rect, true);
}
}
pub fn send_size_request(&self, width: Option<i32>) {
let mut width = match width {
Some(w) => w,
None => {
if let Some(parent) = self.area.get_parent() {
parent.get_allocation().width -
parent.get_margin_left() - parent.get_margin_right()
} else {
eprintln!("<Graph::send_size_request> A parent is required if no width is \
provided...");
return;
}
}
};
// This condition is to avoid having a graph with a bigger width than the window.
if let Some(top) = self.area.get_toplevel() {
let max_width = top.get_allocation().width;
if width > max_width {
width = max_width;
}
}
self.area.set_size_request(
if *self.display_labels.borrow() == true {
width - if width >= self.labels_layout_width {
self.labels_layout_width
} else {
width
}
} else {
width
}, 200);
}
}
pub trait Connecter {
fn connect_to_window_events(&self);
}
impl Connecter for Rc<RefCell<Graph>> {
fn connect_to_window_events(&self) {
let s = self.clone();
if let Some(parent) = self.borrow().horizontal_layout.get_toplevel() | else {
eprintln!("This method needs to be called *after* it has been put inside a window");
}
}
}
| {
// TODO: ugly way to resize drawing area, I should find a better way
parent.connect_configure_event(move |w, _| {
let need_diff = s.borrow().initial_diff.is_none();
if need_diff {
let mut s = s.borrow_mut();
let parent_width = if let Some(p) = s.area.get_parent() {
p.get_allocation().width
} else {
0
};
s.initial_diff = Some(w.get_allocation().width - parent_width);
}
s.borrow().send_size_request(None);
false
});
} | conditional_block |
train_and_deploy.py | import argparse
from collections import OrderedDict
import json
import numpy as np
import sys
import os
import pandas as pd
import random
from sklearn.metrics import accuracy_score
from tqdm import tqdm
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import transformers
from transformers import BertJapaneseTokenizer
from transformers import BertForSequenceClassification
from transformers import AdamW, get_linear_schedule_with_warmup
from EarlyStopping import EarlyStopping
import logging
logging.basicConfig(level=logging.INFO)
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
MAX_SEQUENCE_LENGTH=128
tokenizer = BertJapaneseTokenizer.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking')
def seed_torch(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.device_count() > 1:
torch.cuda.manual_seed_all(seed)
else:
|
torch.backends.cudnn.deterministic = True
# Converting the lines to BERT format
# Thanks to https://www.kaggle.com/httpwwwfszyc/bert-in-keras-taming
def convert_lines(example, max_seq_length, tokenizer):
max_seq_length -= 2
all_tokens = []
longer = 0
for text in tqdm(example):
tokens_a = tokenizer.tokenize(text)
if len(tokens_a) > max_seq_length:
tokens_a = tokens_a[:max_seq_length]
longer += 1
one_token = tokenizer.convert_tokens_to_ids(["[CLS]"]+tokens_a+["[SEP]"])+[0] * (max_seq_length - len(tokens_a))
all_tokens.append(one_token)
return np.array(all_tokens)
def loss_fn(preds, labels):
preds = preds.view(-1)
labels = labels.view(-1)
assert(preds.shape == labels.shape)
loss = nn.BCEWithLogitsLoss()(preds, labels)
return loss
def _average_gradients(model):
# Gradient averaging.
size = float(dist.get_world_size())
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM)
param.grad.data /= size
def train(train_loader, model, optimizer, is_distributed):
model.train()
avg_loss = 0.
avg_accuracy = 0.
tk0 = tqdm(enumerate(train_loader), total=len(train_loader), leave=False)
optimizer.zero_grad()
for i, (x_batch, y_batch) in tk0:
y_pred = model(x_batch.to(DEVICE),
attention_mask=(x_batch > 0).to(DEVICE),
labels=None)
loss = loss_fn(y_pred[0], y_batch.to(DEVICE))
loss.backward()
if is_distributed and not use_cuda:
# average gradients manually for multi-machine cpu case only
_average_gradients(model)
optimizer.step()
optimizer.zero_grad()
avg_loss += loss.item() / len(train_loader)
avg_accuracy += torch.mean(
((torch.sigmoid(y_pred[0]) >= 0.5) == (y_batch >= 0.5).to(DEVICE)).to(torch.float)).item() / len(train_loader)
tk0.set_postfix(loss=loss.item(), avg_loss=avg_loss)
log = OrderedDict([('avg_loss', avg_loss), ('avg_acc', avg_accuracy)])
tk0.close()
return log
# Run validation
def evaluate(valid_loader, model):
model.eval()
avg_loss = 0.
valid_preds = []
valid_trues = []
with torch.no_grad():
tk0 = tqdm(valid_loader)
for i, (x_batch, y_batch) in enumerate(tk0):
y_pred = model(x_batch.to(DEVICE),
attention_mask=(x_batch > 0).to(DEVICE),
labels=None)
loss = loss_fn(y_pred[0], y_batch.to(DEVICE))
avg_loss += loss.item() / len(valid_loader)
outputs_np = torch.sigmoid(y_pred[0]).cpu().detach().numpy()
targets_np = y_batch.unsqueeze(1).numpy()
valid_preds.append(outputs_np)
valid_trues.append(targets_np)
valid_preds = np.vstack(valid_preds)
valid_trues = np.vstack(valid_trues)
acc = accuracy_score((valid_trues >= 0.5), (valid_preds >= 0.5))
val_log = OrderedDict([('val_loss', avg_loss), ('val_acc', acc)])
tk0.close()
return val_log
if __name__ == '__main__':
# Receive hyperparameters passed via create-training-job API
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, default=32)
parser.add_argument('--epochs', type=int, default=1)
parser.add_argument('--learning-rate', type=float, default=5e-6)
parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])
parser.add_argument('--backend', type=str, default=None,
help='backend for distributed training (tcp, gloo on cpu and gloo, nccl on gpu)')
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
parser.add_argument('--val', type=str, default=os.environ.get('SM_CHANNEL_VAL'))
parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])
parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))
args = parser.parse_args()
# Set hyperparameters after parsing the arguments
batch_size = args.batch_size
lr = args.learning_rate
num_epochs = args.epochs
current_host = args.current_host
hosts = args.hosts
model_dir = args.model_dir
training_dir = args.train
val_dir = args.val
#is_distributed = len(args.hosts) > 1 and args.backend is not None
is_distributed = len(args.hosts) > 1 and args.backend is not None
if is_distributed:
# Initialize the distributed environment.
world_size = len(args.hosts)
os.environ['WORLD_SIZE'] = str(world_size)
host_rank = args.hosts.index(args.current_host)
os.environ['RANK'] = str(host_rank)
dist.init_process_group(backend=args.backend, rank=host_rank, world_size=world_size)
logger.info('Initialized the distributed environment: \'{}\' backend on {} nodes. '.format(
args.backend, dist.get_world_size()) + 'Current host rank is {}. Number of gpus: {}'.format(
dist.get_rank(), args.num_gpus))
# fix seed
seed_torch()
# Data loading
train_df = pd.read_csv(os.path.join(training_dir, 'train.tsv'), sep ='\t')
valid_df = pd.read_csv(os.path.join(val_dir, 'valid.tsv'), sep ='\t')
# convert BERT dataset
tr_sequences = convert_lines(train_df["review_body"].fillna("DUMMY_VALUE"),
MAX_SEQUENCE_LENGTH, tokenizer)
train_dataset = torch.utils.data.TensorDataset(torch.tensor(tr_sequences, dtype=torch.long),
torch.tensor(train_df['star_rating'].values, dtype=torch.float))
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True)
val_sequences = convert_lines(valid_df["review_body"].fillna("DUMMY_VALUE"),
MAX_SEQUENCE_LENGTH,
tokenizer)
valid_dataset = torch.utils.data.TensorDataset(torch.tensor(val_sequences, dtype=torch.long),
torch.tensor(valid_df['star_rating'].values, dtype=torch.float))
valid_loader = torch.utils.data.DataLoader(valid_dataset,
batch_size=batch_size,
shuffle=False)
# Load pre-trained bert model
model = BertForSequenceClassification.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking',
num_labels=1)
model.zero_grad()
model = model.to(DEVICE)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=lr, eps=1e-8)
if is_distributed and DEVICE != 'cpu':
# multi-machine multi-gpu case
model = nn.parallel.DistributedDataParallel(model)
else:
# single-machine multi-gpu case or single-machine or multi-machine cpu case
model = nn.DataParallel(model)
es = EarlyStopping(patience=5, mode="max")
path = os.path.join(args.model_dir, 'model.pth')
for epoch in range(num_epochs):
log = train(train_loader, model, optimizer, is_distributed)
val_log = evaluate(valid_loader, model)
es(val_log["val_acc"], model, model_path=path)
if es.early_stop:
logger.info("Early stopping")
break
def model_fn(model_dir):
"""
Load the gluon model. Called once when hosting service starts.
:param: model_dir The directory where model files are stored.
:return: a model (in this case a Gluon network)
"""
model = BertForSequenceClassification.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking',
num_labels=1)
model = torch.nn.DataParallel(model)
with open(os.path.join(model_dir, 'model.pth'), 'rb') as f:
model.load_state_dict(torch.load(f))
return {"net": model, "tokenizer": tokenizer}
def transform_fn(net, data, input_content_type, output_content_type):
"""
Transform a request using the Gluon model. Called once per request.
:param net: The Gluon model.
:param data: The request payload.
:param input_content_type: The request content type.
:param output_content_type: The (desired) response content type.
:return: response payload and content type.
"""
# we can use content types to vary input/output handling, but
# here we just assume json for both
model = net["net"]
tokenizer = net["tokenizer"]
model.to(DEVICE)
# Assume one line of text
parsed = json.loads(data)
logging.info("Received_data: {}".format(parsed))
parsed = tokenizer.tokenize(parsed)
#added by manome
if len(parsed) > MAX_SEQUENCE_LENGTH:
parsed = parsed[:MAX_SEQUENCE_LENGTH-2]
logging.info("Tokens: {}".format(parsed))
#x_batch = tokenizer.convert_tokens_to_ids(["[CLS]"]+parsed+["[SEP]"])+[0] * (MAX_SEQUENCE_LENGTH - len(parsed) - 2)
x_batch = tokenizer.convert_tokens_to_ids(["[CLS]"]+parsed+["[SEP]"]) # do not zero padding
x_batch = torch.LongTensor(x_batch).unsqueeze(0)
model.eval()
with torch.no_grad():
output = model(x_batch.to(DEVICE),
attention_mask=(x_batch>0).to(DEVICE),
labels=None)
response_body = json.dumps(torch.sigmoid(output[0]).cpu().detach().numpy().tolist()[0])
return response_body, output_content_type
| torch.cuda.manual_seed(seed) | conditional_block |
train_and_deploy.py | import argparse
from collections import OrderedDict
import json
import numpy as np
import sys
import os
import pandas as pd
import random
from sklearn.metrics import accuracy_score
from tqdm import tqdm
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import transformers
from transformers import BertJapaneseTokenizer
from transformers import BertForSequenceClassification
from transformers import AdamW, get_linear_schedule_with_warmup
from EarlyStopping import EarlyStopping
import logging
logging.basicConfig(level=logging.INFO)
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
MAX_SEQUENCE_LENGTH=128
tokenizer = BertJapaneseTokenizer.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking')
def seed_torch(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.device_count() > 1:
torch.cuda.manual_seed_all(seed)
else:
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
# Converting the lines to BERT format
# Thanks to https://www.kaggle.com/httpwwwfszyc/bert-in-keras-taming
def convert_lines(example, max_seq_length, tokenizer):
max_seq_length -= 2
all_tokens = []
longer = 0
for text in tqdm(example):
tokens_a = tokenizer.tokenize(text)
if len(tokens_a) > max_seq_length:
tokens_a = tokens_a[:max_seq_length]
longer += 1
one_token = tokenizer.convert_tokens_to_ids(["[CLS]"]+tokens_a+["[SEP]"])+[0] * (max_seq_length - len(tokens_a))
all_tokens.append(one_token)
return np.array(all_tokens)
def loss_fn(preds, labels):
preds = preds.view(-1)
labels = labels.view(-1)
assert(preds.shape == labels.shape)
loss = nn.BCEWithLogitsLoss()(preds, labels)
return loss
def _average_gradients(model):
# Gradient averaging.
|
def train(train_loader, model, optimizer, is_distributed):
model.train()
avg_loss = 0.
avg_accuracy = 0.
tk0 = tqdm(enumerate(train_loader), total=len(train_loader), leave=False)
optimizer.zero_grad()
for i, (x_batch, y_batch) in tk0:
y_pred = model(x_batch.to(DEVICE),
attention_mask=(x_batch > 0).to(DEVICE),
labels=None)
loss = loss_fn(y_pred[0], y_batch.to(DEVICE))
loss.backward()
if is_distributed and not use_cuda:
# average gradients manually for multi-machine cpu case only
_average_gradients(model)
optimizer.step()
optimizer.zero_grad()
avg_loss += loss.item() / len(train_loader)
avg_accuracy += torch.mean(
((torch.sigmoid(y_pred[0]) >= 0.5) == (y_batch >= 0.5).to(DEVICE)).to(torch.float)).item() / len(train_loader)
tk0.set_postfix(loss=loss.item(), avg_loss=avg_loss)
log = OrderedDict([('avg_loss', avg_loss), ('avg_acc', avg_accuracy)])
tk0.close()
return log
# Run validation
def evaluate(valid_loader, model):
model.eval()
avg_loss = 0.
valid_preds = []
valid_trues = []
with torch.no_grad():
tk0 = tqdm(valid_loader)
for i, (x_batch, y_batch) in enumerate(tk0):
y_pred = model(x_batch.to(DEVICE),
attention_mask=(x_batch > 0).to(DEVICE),
labels=None)
loss = loss_fn(y_pred[0], y_batch.to(DEVICE))
avg_loss += loss.item() / len(valid_loader)
outputs_np = torch.sigmoid(y_pred[0]).cpu().detach().numpy()
targets_np = y_batch.unsqueeze(1).numpy()
valid_preds.append(outputs_np)
valid_trues.append(targets_np)
valid_preds = np.vstack(valid_preds)
valid_trues = np.vstack(valid_trues)
acc = accuracy_score((valid_trues >= 0.5), (valid_preds >= 0.5))
val_log = OrderedDict([('val_loss', avg_loss), ('val_acc', acc)])
tk0.close()
return val_log
if __name__ == '__main__':
# Receive hyperparameters passed via create-training-job API
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, default=32)
parser.add_argument('--epochs', type=int, default=1)
parser.add_argument('--learning-rate', type=float, default=5e-6)
parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])
parser.add_argument('--backend', type=str, default=None,
help='backend for distributed training (tcp, gloo on cpu and gloo, nccl on gpu)')
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
parser.add_argument('--val', type=str, default=os.environ.get('SM_CHANNEL_VAL'))
parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])
parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))
args = parser.parse_args()
# Set hyperparameters after parsing the arguments
batch_size = args.batch_size
lr = args.learning_rate
num_epochs = args.epochs
current_host = args.current_host
hosts = args.hosts
model_dir = args.model_dir
training_dir = args.train
val_dir = args.val
#is_distributed = len(args.hosts) > 1 and args.backend is not None
is_distributed = len(args.hosts) > 1 and args.backend is not None
if is_distributed:
# Initialize the distributed environment.
world_size = len(args.hosts)
os.environ['WORLD_SIZE'] = str(world_size)
host_rank = args.hosts.index(args.current_host)
os.environ['RANK'] = str(host_rank)
dist.init_process_group(backend=args.backend, rank=host_rank, world_size=world_size)
logger.info('Initialized the distributed environment: \'{}\' backend on {} nodes. '.format(
args.backend, dist.get_world_size()) + 'Current host rank is {}. Number of gpus: {}'.format(
dist.get_rank(), args.num_gpus))
# fix seed
seed_torch()
# Data loading
train_df = pd.read_csv(os.path.join(training_dir, 'train.tsv'), sep ='\t')
valid_df = pd.read_csv(os.path.join(val_dir, 'valid.tsv'), sep ='\t')
# convert BERT dataset
tr_sequences = convert_lines(train_df["review_body"].fillna("DUMMY_VALUE"),
MAX_SEQUENCE_LENGTH, tokenizer)
train_dataset = torch.utils.data.TensorDataset(torch.tensor(tr_sequences, dtype=torch.long),
torch.tensor(train_df['star_rating'].values, dtype=torch.float))
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True)
val_sequences = convert_lines(valid_df["review_body"].fillna("DUMMY_VALUE"),
MAX_SEQUENCE_LENGTH,
tokenizer)
valid_dataset = torch.utils.data.TensorDataset(torch.tensor(val_sequences, dtype=torch.long),
torch.tensor(valid_df['star_rating'].values, dtype=torch.float))
valid_loader = torch.utils.data.DataLoader(valid_dataset,
batch_size=batch_size,
shuffle=False)
# Load pre-trained bert model
model = BertForSequenceClassification.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking',
num_labels=1)
model.zero_grad()
model = model.to(DEVICE)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=lr, eps=1e-8)
if is_distributed and DEVICE != 'cpu':
# multi-machine multi-gpu case
model = nn.parallel.DistributedDataParallel(model)
else:
# single-machine multi-gpu case or single-machine or multi-machine cpu case
model = nn.DataParallel(model)
es = EarlyStopping(patience=5, mode="max")
path = os.path.join(args.model_dir, 'model.pth')
for epoch in range(num_epochs):
log = train(train_loader, model, optimizer, is_distributed)
val_log = evaluate(valid_loader, model)
es(val_log["val_acc"], model, model_path=path)
if es.early_stop:
logger.info("Early stopping")
break
def model_fn(model_dir):
"""
Load the gluon model. Called once when hosting service starts.
:param: model_dir The directory where model files are stored.
:return: a model (in this case a Gluon network)
"""
model = BertForSequenceClassification.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking',
num_labels=1)
model = torch.nn.DataParallel(model)
with open(os.path.join(model_dir, 'model.pth'), 'rb') as f:
model.load_state_dict(torch.load(f))
return {"net": model, "tokenizer": tokenizer}
def transform_fn(net, data, input_content_type, output_content_type):
"""
Transform a request using the Gluon model. Called once per request.
:param net: The Gluon model.
:param data: The request payload.
:param input_content_type: The request content type.
:param output_content_type: The (desired) response content type.
:return: response payload and content type.
"""
# we can use content types to vary input/output handling, but
# here we just assume json for both
model = net["net"]
tokenizer = net["tokenizer"]
model.to(DEVICE)
# Assume one line of text
parsed = json.loads(data)
logging.info("Received_data: {}".format(parsed))
parsed = tokenizer.tokenize(parsed)
#added by manome
if len(parsed) > MAX_SEQUENCE_LENGTH:
parsed = parsed[:MAX_SEQUENCE_LENGTH-2]
logging.info("Tokens: {}".format(parsed))
#x_batch = tokenizer.convert_tokens_to_ids(["[CLS]"]+parsed+["[SEP]"])+[0] * (MAX_SEQUENCE_LENGTH - len(parsed) - 2)
x_batch = tokenizer.convert_tokens_to_ids(["[CLS]"]+parsed+["[SEP]"]) # do not zero padding
x_batch = torch.LongTensor(x_batch).unsqueeze(0)
model.eval()
with torch.no_grad():
output = model(x_batch.to(DEVICE),
attention_mask=(x_batch>0).to(DEVICE),
labels=None)
response_body = json.dumps(torch.sigmoid(output[0]).cpu().detach().numpy().tolist()[0])
return response_body, output_content_type
| size = float(dist.get_world_size())
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM)
param.grad.data /= size | identifier_body |
train_and_deploy.py | import argparse
from collections import OrderedDict
import json
import numpy as np
import sys
import os
import pandas as pd
import random
from sklearn.metrics import accuracy_score
from tqdm import tqdm
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import transformers
from transformers import BertJapaneseTokenizer
from transformers import BertForSequenceClassification
from transformers import AdamW, get_linear_schedule_with_warmup
from EarlyStopping import EarlyStopping
import logging
logging.basicConfig(level=logging.INFO)
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
MAX_SEQUENCE_LENGTH=128
tokenizer = BertJapaneseTokenizer.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking')
def seed_torch(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.device_count() > 1:
torch.cuda.manual_seed_all(seed)
else:
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
# Converting the lines to BERT format
# Thanks to https://www.kaggle.com/httpwwwfszyc/bert-in-keras-taming
def convert_lines(example, max_seq_length, tokenizer):
max_seq_length -= 2
all_tokens = []
longer = 0
for text in tqdm(example):
tokens_a = tokenizer.tokenize(text)
if len(tokens_a) > max_seq_length:
tokens_a = tokens_a[:max_seq_length]
longer += 1
one_token = tokenizer.convert_tokens_to_ids(["[CLS]"]+tokens_a+["[SEP]"])+[0] * (max_seq_length - len(tokens_a))
all_tokens.append(one_token)
return np.array(all_tokens)
def loss_fn(preds, labels):
preds = preds.view(-1)
labels = labels.view(-1)
assert(preds.shape == labels.shape)
loss = nn.BCEWithLogitsLoss()(preds, labels)
return loss
def _average_gradients(model):
# Gradient averaging.
size = float(dist.get_world_size())
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM)
param.grad.data /= size
def train(train_loader, model, optimizer, is_distributed):
model.train()
avg_loss = 0.
avg_accuracy = 0.
tk0 = tqdm(enumerate(train_loader), total=len(train_loader), leave=False)
optimizer.zero_grad()
for i, (x_batch, y_batch) in tk0:
y_pred = model(x_batch.to(DEVICE),
attention_mask=(x_batch > 0).to(DEVICE),
labels=None)
loss = loss_fn(y_pred[0], y_batch.to(DEVICE))
loss.backward()
if is_distributed and not use_cuda:
# average gradients manually for multi-machine cpu case only
_average_gradients(model)
optimizer.step()
optimizer.zero_grad()
avg_loss += loss.item() / len(train_loader)
avg_accuracy += torch.mean(
((torch.sigmoid(y_pred[0]) >= 0.5) == (y_batch >= 0.5).to(DEVICE)).to(torch.float)).item() / len(train_loader)
tk0.set_postfix(loss=loss.item(), avg_loss=avg_loss)
log = OrderedDict([('avg_loss', avg_loss), ('avg_acc', avg_accuracy)])
tk0.close()
return log
# Run validation
def evaluate(valid_loader, model):
model.eval()
avg_loss = 0.
valid_preds = []
valid_trues = []
with torch.no_grad():
tk0 = tqdm(valid_loader)
for i, (x_batch, y_batch) in enumerate(tk0):
y_pred = model(x_batch.to(DEVICE),
attention_mask=(x_batch > 0).to(DEVICE),
labels=None)
loss = loss_fn(y_pred[0], y_batch.to(DEVICE))
avg_loss += loss.item() / len(valid_loader)
outputs_np = torch.sigmoid(y_pred[0]).cpu().detach().numpy()
targets_np = y_batch.unsqueeze(1).numpy()
valid_preds.append(outputs_np)
valid_trues.append(targets_np)
valid_preds = np.vstack(valid_preds)
valid_trues = np.vstack(valid_trues)
acc = accuracy_score((valid_trues >= 0.5), (valid_preds >= 0.5))
val_log = OrderedDict([('val_loss', avg_loss), ('val_acc', acc)])
tk0.close()
return val_log
if __name__ == '__main__':
# Receive hyperparameters passed via create-training-job API
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, default=32)
parser.add_argument('--epochs', type=int, default=1)
parser.add_argument('--learning-rate', type=float, default=5e-6)
parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])
parser.add_argument('--backend', type=str, default=None,
help='backend for distributed training (tcp, gloo on cpu and gloo, nccl on gpu)')
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
parser.add_argument('--val', type=str, default=os.environ.get('SM_CHANNEL_VAL'))
parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])
parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))
args = parser.parse_args()
# Set hyperparameters after parsing the arguments
batch_size = args.batch_size
lr = args.learning_rate
num_epochs = args.epochs
current_host = args.current_host
hosts = args.hosts
model_dir = args.model_dir
training_dir = args.train
val_dir = args.val
#is_distributed = len(args.hosts) > 1 and args.backend is not None
is_distributed = len(args.hosts) > 1 and args.backend is not None
if is_distributed:
# Initialize the distributed environment.
world_size = len(args.hosts)
os.environ['WORLD_SIZE'] = str(world_size)
host_rank = args.hosts.index(args.current_host)
os.environ['RANK'] = str(host_rank)
dist.init_process_group(backend=args.backend, rank=host_rank, world_size=world_size)
logger.info('Initialized the distributed environment: \'{}\' backend on {} nodes. '.format(
args.backend, dist.get_world_size()) + 'Current host rank is {}. Number of gpus: {}'.format(
dist.get_rank(), args.num_gpus))
# fix seed
seed_torch()
# Data loading
train_df = pd.read_csv(os.path.join(training_dir, 'train.tsv'), sep ='\t')
valid_df = pd.read_csv(os.path.join(val_dir, 'valid.tsv'), sep ='\t')
# convert BERT dataset
tr_sequences = convert_lines(train_df["review_body"].fillna("DUMMY_VALUE"),
MAX_SEQUENCE_LENGTH, tokenizer)
train_dataset = torch.utils.data.TensorDataset(torch.tensor(tr_sequences, dtype=torch.long),
torch.tensor(train_df['star_rating'].values, dtype=torch.float))
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True)
val_sequences = convert_lines(valid_df["review_body"].fillna("DUMMY_VALUE"),
MAX_SEQUENCE_LENGTH,
tokenizer)
valid_dataset = torch.utils.data.TensorDataset(torch.tensor(val_sequences, dtype=torch.long),
torch.tensor(valid_df['star_rating'].values, dtype=torch.float))
valid_loader = torch.utils.data.DataLoader(valid_dataset,
batch_size=batch_size,
shuffle=False) | # Load pre-trained bert model
model = BertForSequenceClassification.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking',
num_labels=1)
model.zero_grad()
model = model.to(DEVICE)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=lr, eps=1e-8)
if is_distributed and DEVICE != 'cpu':
# multi-machine multi-gpu case
model = nn.parallel.DistributedDataParallel(model)
else:
# single-machine multi-gpu case or single-machine or multi-machine cpu case
model = nn.DataParallel(model)
es = EarlyStopping(patience=5, mode="max")
path = os.path.join(args.model_dir, 'model.pth')
for epoch in range(num_epochs):
log = train(train_loader, model, optimizer, is_distributed)
val_log = evaluate(valid_loader, model)
es(val_log["val_acc"], model, model_path=path)
if es.early_stop:
logger.info("Early stopping")
break
def model_fn(model_dir):
"""
Load the gluon model. Called once when hosting service starts.
:param: model_dir The directory where model files are stored.
:return: a model (in this case a Gluon network)
"""
model = BertForSequenceClassification.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking',
num_labels=1)
model = torch.nn.DataParallel(model)
with open(os.path.join(model_dir, 'model.pth'), 'rb') as f:
model.load_state_dict(torch.load(f))
return {"net": model, "tokenizer": tokenizer}
def transform_fn(net, data, input_content_type, output_content_type):
"""
Transform a request using the Gluon model. Called once per request.
:param net: The Gluon model.
:param data: The request payload.
:param input_content_type: The request content type.
:param output_content_type: The (desired) response content type.
:return: response payload and content type.
"""
# we can use content types to vary input/output handling, but
# here we just assume json for both
model = net["net"]
tokenizer = net["tokenizer"]
model.to(DEVICE)
# Assume one line of text
parsed = json.loads(data)
logging.info("Received_data: {}".format(parsed))
parsed = tokenizer.tokenize(parsed)
#added by manome
if len(parsed) > MAX_SEQUENCE_LENGTH:
parsed = parsed[:MAX_SEQUENCE_LENGTH-2]
logging.info("Tokens: {}".format(parsed))
#x_batch = tokenizer.convert_tokens_to_ids(["[CLS]"]+parsed+["[SEP]"])+[0] * (MAX_SEQUENCE_LENGTH - len(parsed) - 2)
x_batch = tokenizer.convert_tokens_to_ids(["[CLS]"]+parsed+["[SEP]"]) # do not zero padding
x_batch = torch.LongTensor(x_batch).unsqueeze(0)
model.eval()
with torch.no_grad():
output = model(x_batch.to(DEVICE),
attention_mask=(x_batch>0).to(DEVICE),
labels=None)
response_body = json.dumps(torch.sigmoid(output[0]).cpu().detach().numpy().tolist()[0])
return response_body, output_content_type | random_line_split |
|
train_and_deploy.py | import argparse
from collections import OrderedDict
import json
import numpy as np
import sys
import os
import pandas as pd
import random
from sklearn.metrics import accuracy_score
from tqdm import tqdm
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import transformers
from transformers import BertJapaneseTokenizer
from transformers import BertForSequenceClassification
from transformers import AdamW, get_linear_schedule_with_warmup
from EarlyStopping import EarlyStopping
import logging
logging.basicConfig(level=logging.INFO)
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
MAX_SEQUENCE_LENGTH=128
tokenizer = BertJapaneseTokenizer.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking')
def seed_torch(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.device_count() > 1:
torch.cuda.manual_seed_all(seed)
else:
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
# Converting the lines to BERT format
# Thanks to https://www.kaggle.com/httpwwwfszyc/bert-in-keras-taming
def convert_lines(example, max_seq_length, tokenizer):
max_seq_length -= 2
all_tokens = []
longer = 0
for text in tqdm(example):
tokens_a = tokenizer.tokenize(text)
if len(tokens_a) > max_seq_length:
tokens_a = tokens_a[:max_seq_length]
longer += 1
one_token = tokenizer.convert_tokens_to_ids(["[CLS]"]+tokens_a+["[SEP]"])+[0] * (max_seq_length - len(tokens_a))
all_tokens.append(one_token)
return np.array(all_tokens)
def loss_fn(preds, labels):
preds = preds.view(-1)
labels = labels.view(-1)
assert(preds.shape == labels.shape)
loss = nn.BCEWithLogitsLoss()(preds, labels)
return loss
def _average_gradients(model):
# Gradient averaging.
size = float(dist.get_world_size())
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM)
param.grad.data /= size
def | (train_loader, model, optimizer, is_distributed):
model.train()
avg_loss = 0.
avg_accuracy = 0.
tk0 = tqdm(enumerate(train_loader), total=len(train_loader), leave=False)
optimizer.zero_grad()
for i, (x_batch, y_batch) in tk0:
y_pred = model(x_batch.to(DEVICE),
attention_mask=(x_batch > 0).to(DEVICE),
labels=None)
loss = loss_fn(y_pred[0], y_batch.to(DEVICE))
loss.backward()
if is_distributed and not use_cuda:
# average gradients manually for multi-machine cpu case only
_average_gradients(model)
optimizer.step()
optimizer.zero_grad()
avg_loss += loss.item() / len(train_loader)
avg_accuracy += torch.mean(
((torch.sigmoid(y_pred[0]) >= 0.5) == (y_batch >= 0.5).to(DEVICE)).to(torch.float)).item() / len(train_loader)
tk0.set_postfix(loss=loss.item(), avg_loss=avg_loss)
log = OrderedDict([('avg_loss', avg_loss), ('avg_acc', avg_accuracy)])
tk0.close()
return log
# Run validation
def evaluate(valid_loader, model):
model.eval()
avg_loss = 0.
valid_preds = []
valid_trues = []
with torch.no_grad():
tk0 = tqdm(valid_loader)
for i, (x_batch, y_batch) in enumerate(tk0):
y_pred = model(x_batch.to(DEVICE),
attention_mask=(x_batch > 0).to(DEVICE),
labels=None)
loss = loss_fn(y_pred[0], y_batch.to(DEVICE))
avg_loss += loss.item() / len(valid_loader)
outputs_np = torch.sigmoid(y_pred[0]).cpu().detach().numpy()
targets_np = y_batch.unsqueeze(1).numpy()
valid_preds.append(outputs_np)
valid_trues.append(targets_np)
valid_preds = np.vstack(valid_preds)
valid_trues = np.vstack(valid_trues)
acc = accuracy_score((valid_trues >= 0.5), (valid_preds >= 0.5))
val_log = OrderedDict([('val_loss', avg_loss), ('val_acc', acc)])
tk0.close()
return val_log
if __name__ == '__main__':
# Receive hyperparameters passed via create-training-job API
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, default=32)
parser.add_argument('--epochs', type=int, default=1)
parser.add_argument('--learning-rate', type=float, default=5e-6)
parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])
parser.add_argument('--backend', type=str, default=None,
help='backend for distributed training (tcp, gloo on cpu and gloo, nccl on gpu)')
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
parser.add_argument('--val', type=str, default=os.environ.get('SM_CHANNEL_VAL'))
parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])
parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))
args = parser.parse_args()
# Set hyperparameters after parsing the arguments
batch_size = args.batch_size
lr = args.learning_rate
num_epochs = args.epochs
current_host = args.current_host
hosts = args.hosts
model_dir = args.model_dir
training_dir = args.train
val_dir = args.val
#is_distributed = len(args.hosts) > 1 and args.backend is not None
is_distributed = len(args.hosts) > 1 and args.backend is not None
if is_distributed:
# Initialize the distributed environment.
world_size = len(args.hosts)
os.environ['WORLD_SIZE'] = str(world_size)
host_rank = args.hosts.index(args.current_host)
os.environ['RANK'] = str(host_rank)
dist.init_process_group(backend=args.backend, rank=host_rank, world_size=world_size)
logger.info('Initialized the distributed environment: \'{}\' backend on {} nodes. '.format(
args.backend, dist.get_world_size()) + 'Current host rank is {}. Number of gpus: {}'.format(
dist.get_rank(), args.num_gpus))
# fix seed
seed_torch()
# Data loading
train_df = pd.read_csv(os.path.join(training_dir, 'train.tsv'), sep ='\t')
valid_df = pd.read_csv(os.path.join(val_dir, 'valid.tsv'), sep ='\t')
# convert BERT dataset
tr_sequences = convert_lines(train_df["review_body"].fillna("DUMMY_VALUE"),
MAX_SEQUENCE_LENGTH, tokenizer)
train_dataset = torch.utils.data.TensorDataset(torch.tensor(tr_sequences, dtype=torch.long),
torch.tensor(train_df['star_rating'].values, dtype=torch.float))
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True)
val_sequences = convert_lines(valid_df["review_body"].fillna("DUMMY_VALUE"),
MAX_SEQUENCE_LENGTH,
tokenizer)
valid_dataset = torch.utils.data.TensorDataset(torch.tensor(val_sequences, dtype=torch.long),
torch.tensor(valid_df['star_rating'].values, dtype=torch.float))
valid_loader = torch.utils.data.DataLoader(valid_dataset,
batch_size=batch_size,
shuffle=False)
# Load pre-trained bert model
model = BertForSequenceClassification.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking',
num_labels=1)
model.zero_grad()
model = model.to(DEVICE)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=lr, eps=1e-8)
if is_distributed and DEVICE != 'cpu':
# multi-machine multi-gpu case
model = nn.parallel.DistributedDataParallel(model)
else:
# single-machine multi-gpu case or single-machine or multi-machine cpu case
model = nn.DataParallel(model)
es = EarlyStopping(patience=5, mode="max")
path = os.path.join(args.model_dir, 'model.pth')
for epoch in range(num_epochs):
log = train(train_loader, model, optimizer, is_distributed)
val_log = evaluate(valid_loader, model)
es(val_log["val_acc"], model, model_path=path)
if es.early_stop:
logger.info("Early stopping")
break
def model_fn(model_dir):
"""
Load the gluon model. Called once when hosting service starts.
:param: model_dir The directory where model files are stored.
:return: a model (in this case a Gluon network)
"""
model = BertForSequenceClassification.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking',
num_labels=1)
model = torch.nn.DataParallel(model)
with open(os.path.join(model_dir, 'model.pth'), 'rb') as f:
model.load_state_dict(torch.load(f))
return {"net": model, "tokenizer": tokenizer}
def transform_fn(net, data, input_content_type, output_content_type):
"""
Transform a request using the Gluon model. Called once per request.
:param net: The Gluon model.
:param data: The request payload.
:param input_content_type: The request content type.
:param output_content_type: The (desired) response content type.
:return: response payload and content type.
"""
# we can use content types to vary input/output handling, but
# here we just assume json for both
model = net["net"]
tokenizer = net["tokenizer"]
model.to(DEVICE)
# Assume one line of text
parsed = json.loads(data)
logging.info("Received_data: {}".format(parsed))
parsed = tokenizer.tokenize(parsed)
#added by manome
if len(parsed) > MAX_SEQUENCE_LENGTH:
parsed = parsed[:MAX_SEQUENCE_LENGTH-2]
logging.info("Tokens: {}".format(parsed))
#x_batch = tokenizer.convert_tokens_to_ids(["[CLS]"]+parsed+["[SEP]"])+[0] * (MAX_SEQUENCE_LENGTH - len(parsed) - 2)
x_batch = tokenizer.convert_tokens_to_ids(["[CLS]"]+parsed+["[SEP]"]) # do not zero padding
x_batch = torch.LongTensor(x_batch).unsqueeze(0)
model.eval()
with torch.no_grad():
output = model(x_batch.to(DEVICE),
attention_mask=(x_batch>0).to(DEVICE),
labels=None)
response_body = json.dumps(torch.sigmoid(output[0]).cpu().detach().numpy().tolist()[0])
return response_body, output_content_type
| train | identifier_name |
windows_aligned_file_reader.rs | /*
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT license.
*/
use std::sync::Arc;
use std::time::Duration;
use std::{ptr, thread};
use crossbeam::sync::ShardedLock;
use hashbrown::HashMap;
use once_cell::sync::Lazy;
use platform::file_handle::{AccessMode, ShareMode};
use platform::{
file_handle::FileHandle,
file_io::{get_queued_completion_status, read_file_to_slice},
io_completion_port::IOCompletionPort,
};
use winapi::{
shared::{basetsd::ULONG_PTR, minwindef::DWORD},
um::minwinbase::OVERLAPPED,
};
use crate::common::{ANNError, ANNResult};
use crate::model::IOContext;
pub const MAX_IO_CONCURRENCY: usize = 128; // To do: explore the optimal value for this. The current value is taken from C++ code.
pub const FILE_ATTRIBUTE_READONLY: DWORD = 0x00000001;
pub const IO_COMPLETION_TIMEOUT: DWORD = u32::MAX; // Infinite timeout.
pub const DISK_IO_ALIGNMENT: usize = 512;
pub const ASYNC_IO_COMPLETION_CHECK_INTERVAL: Duration = Duration::from_micros(5);
/// Aligned read struct for disk IO, it takes the ownership of the AlignedBoxedSlice and returns the AlignedBoxWithSlice data immutably.
pub struct AlignedRead<'a, T> {
/// where to read from
/// offset needs to be aligned with DISK_IO_ALIGNMENT
offset: u64,
/// where to read into
/// aligned_buf and its len need to be aligned with DISK_IO_ALIGNMENT
aligned_buf: &'a mut [T],
}
impl<'a, T> AlignedRead<'a, T> {
pub fn new(offset: u64, aligned_buf: &'a mut [T]) -> ANNResult<Self> {
Self::assert_is_aligned(offset as usize)?;
Self::assert_is_aligned(std::mem::size_of_val(aligned_buf))?;
Ok(Self {
offset,
aligned_buf,
})
}
fn assert_is_aligned(val: usize) -> ANNResult<()> {
match val % DISK_IO_ALIGNMENT {
0 => Ok(()),
_ => Err(ANNError::log_disk_io_request_alignment_error(format!(
"The offset or length of AlignedRead request is not {} bytes aligned",
DISK_IO_ALIGNMENT
))),
}
}
pub fn aligned_buf(&self) -> &[T] {
self.aligned_buf
}
}
pub struct WindowsAlignedFileReader {
file_name: String,
// ctx_map is the mapping from thread id to io context. It is hashmap behind a sharded lock to allow concurrent access from multiple threads.
// ShardedLock: shardedlock provides an implementation of a reader-writer lock that offers concurrent read access to the shared data while allowing exclusive write access.
// It achieves better scalability by dividing the shared data into multiple shards, and each with its own internal lock.
// Multiple threads can read from different shards simultaneously, reducing contention.
// https://docs.rs/crossbeam/0.8.2/crossbeam/sync/struct.ShardedLock.html
// Comparing to RwLock, ShardedLock provides higher concurrency for read operations and is suitable for read heavy workloads.
// The value of the hashmap is an Arc<IOContext> to allow immutable access to IOContext with automatic reference counting.
ctx_map: Lazy<ShardedLock<HashMap<thread::ThreadId, Arc<IOContext>>>>,
}
impl WindowsAlignedFileReader {
pub fn new(fname: &str) -> ANNResult<Self> {
let reader: WindowsAlignedFileReader = WindowsAlignedFileReader {
file_name: fname.to_string(),
ctx_map: Lazy::new(|| ShardedLock::new(HashMap::new())),
};
reader.register_thread()?;
Ok(reader)
}
// Register the io context for a thread if it hasn't been registered.
pub fn register_thread(&self) -> ANNResult<()> {
let mut ctx_map = self.ctx_map.write().map_err(|_| {
ANNError::log_lock_poison_error("unable to acquire read lock on ctx_map".to_string())
})?;
let id = thread::current().id();
if ctx_map.contains_key(&id) {
println!(
"Warning:: Duplicate registration for thread_id : {:?}. Directly call get_ctx to get the thread context data.",
id);
return Ok(());
}
let mut ctx = IOContext::new();
match unsafe { FileHandle::new(&self.file_name, AccessMode::Read, ShareMode::Read) } {
Ok(file_handle) => ctx.file_handle = file_handle,
Err(err) => {
return Err(ANNError::log_io_error(err));
}
}
// Create a io completion port for the file handle, later it will be used to get the completion status.
match IOCompletionPort::new(&ctx.file_handle, None, 0, 0) {
Ok(io_completion_port) => ctx.io_completion_port = io_completion_port,
Err(err) => {
return Err(ANNError::log_io_error(err));
}
}
ctx_map.insert(id, Arc::new(ctx));
Ok(())
}
// Get the reference counted io context for the current thread.
pub fn get_ctx(&self) -> ANNResult<Arc<IOContext>> {
let ctx_map = self.ctx_map.read().map_err(|_| {
ANNError::log_lock_poison_error("unable to acquire read lock on ctx_map".to_string())
})?;
let id = thread::current().id();
match ctx_map.get(&id) {
Some(ctx) => Ok(Arc::clone(ctx)),
None => Err(ANNError::log_index_error(format!(
"unable to find IOContext for thread_id {:?}",
id
))),
}
}
// Read the data from the file by sending concurrent io requests in batches.
pub fn read<T>(&self, read_requests: &mut [AlignedRead<T>], ctx: &IOContext) -> ANNResult<()> {
let n_requests = read_requests.len();
let n_batches = (n_requests + MAX_IO_CONCURRENCY - 1) / MAX_IO_CONCURRENCY;
let mut overlapped_in_out =
vec![unsafe { std::mem::zeroed::<OVERLAPPED>() }; MAX_IO_CONCURRENCY];
for batch_idx in 0..n_batches {
let batch_start = MAX_IO_CONCURRENCY * batch_idx;
let batch_size = std::cmp::min(n_requests - batch_start, MAX_IO_CONCURRENCY);
for j in 0..batch_size {
let req = &mut read_requests[batch_start + j];
let os = &mut overlapped_in_out[j];
match unsafe {
read_file_to_slice(&ctx.file_handle, req.aligned_buf, os, req.offset)
} {
Ok(_) => {}
Err(error) => {
return Err(ANNError::IOError { err: (error) });
}
}
}
let mut n_read: DWORD = 0;
let mut n_complete: u64 = 0;
let mut completion_key: ULONG_PTR = 0;
let mut lp_os: *mut OVERLAPPED = ptr::null_mut();
while n_complete < batch_size as u64 {
match unsafe {
get_queued_completion_status(
&ctx.io_completion_port,
&mut n_read,
&mut completion_key,
&mut lp_os,
IO_COMPLETION_TIMEOUT,
)
} {
// An IO request completed.
Ok(true) => n_complete += 1,
// No IO request completed, continue to wait.
Ok(false) => {
thread::sleep(ASYNC_IO_COMPLETION_CHECK_INTERVAL);
}
// An error ocurred.
Err(error) => return Err(ANNError::IOError { err: (error) }),
}
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::{fs::File, io::BufReader};
use bincode::deserialize_from;
use serde::{Deserialize, Serialize};
use crate::{common::AlignedBoxWithSlice, model::SECTOR_LEN};
use super::*;
pub const TEST_INDEX_PATH: &str =
"./tests/data/disk_index_siftsmall_learn_256pts_R4_L50_A1.2_alligned_reader_test.index";
pub const TRUTH_NODE_DATA_PATH: &str =
"./tests/data/disk_index_node_data_aligned_reader_truth.bin";
#[derive(Debug, Serialize, Deserialize)]
struct NodeData {
num_neighbors: u32,
coordinates: Vec<f32>,
neighbors: Vec<u32>,
}
impl PartialEq for NodeData {
fn eq(&self, other: &Self) -> bool {
self.num_neighbors == other.num_neighbors
&& self.coordinates == other.coordinates
&& self.neighbors == other.neighbors
}
}
#[test]
fn test_new_aligned_file_reader() {
// Replace "test_file_path" with actual file path
let result = WindowsAlignedFileReader::new(TEST_INDEX_PATH);
assert!(result.is_ok());
let reader = result.unwrap();
assert_eq!(reader.file_name, TEST_INDEX_PATH);
}
#[test]
fn test_read() {
let reader = WindowsAlignedFileReader::new(TEST_INDEX_PATH).unwrap();
let ctx = reader.get_ctx().unwrap();
let read_length = 512; // adjust according to your logic
let num_read = 10;
let mut aligned_mem = AlignedBoxWithSlice::<u8>::new(read_length * num_read, 512).unwrap();
// create and add AlignedReads to the vector
let mut mem_slices = aligned_mem
.split_into_nonoverlapping_mut_slices(0..aligned_mem.len(), read_length)
.unwrap();
let mut aligned_reads: Vec<AlignedRead<'_, u8>> = mem_slices
.iter_mut()
.enumerate()
.map(|(i, slice)| {
let offset = (i * read_length) as u64;
AlignedRead::new(offset, slice).unwrap()
})
.collect();
let result = reader.read(&mut aligned_reads, &ctx);
assert!(result.is_ok());
}
#[test]
fn test_read_disk_index_by_sector() {
let reader = WindowsAlignedFileReader::new(TEST_INDEX_PATH).unwrap();
let ctx = reader.get_ctx().unwrap();
let read_length = SECTOR_LEN; // adjust according to your logic
let num_sector = 10;
let mut aligned_mem =
AlignedBoxWithSlice::<u8>::new(read_length * num_sector, 512).unwrap();
// Each slice will be used as the buffer for a read request of a sector.
let mut mem_slices = aligned_mem
.split_into_nonoverlapping_mut_slices(0..aligned_mem.len(), read_length)
.unwrap();
let mut aligned_reads: Vec<AlignedRead<'_, u8>> = mem_slices
.iter_mut()
.enumerate()
.map(|(sector_id, slice)| {
let offset = (sector_id * read_length) as u64;
AlignedRead::new(offset, slice).unwrap()
})
.collect();
let result = reader.read(&mut aligned_reads, &ctx);
assert!(result.is_ok());
aligned_reads.iter().for_each(|read| {
assert_eq!(read.aligned_buf.len(), SECTOR_LEN);
});
let disk_layout_meta = reconstruct_disk_meta(aligned_reads[0].aligned_buf);
assert!(disk_layout_meta.len() > 9);
let dims = disk_layout_meta[1];
let num_pts = disk_layout_meta[0];
let max_node_len = disk_layout_meta[3];
let max_num_nodes_per_sector = disk_layout_meta[4];
assert!(max_node_len * max_num_nodes_per_sector < SECTOR_LEN as u64);
let num_nbrs_start = (dims as usize) * std::mem::size_of::<f32>();
let nbrs_buf_start = num_nbrs_start + std::mem::size_of::<u32>();
let mut node_data_array = Vec::with_capacity(max_num_nodes_per_sector as usize * 9);
// Only validate the first 9 sectors with graph nodes.
(1..9).for_each(|sector_id| {
let sector_data = &mem_slices[sector_id];
for node_data in sector_data.chunks_exact(max_node_len as usize) {
// Extract coordinates data from the start of the node_data
let coordinates_end = (dims as usize) * std::mem::size_of::<f32>();
let coordinates = node_data[0..coordinates_end] |
// Extract number of neighbors from the node_data
let neighbors_num = u32::from_le_bytes(
node_data[num_nbrs_start..nbrs_buf_start]
.try_into()
.unwrap(),
);
let nbors_buf_end =
nbrs_buf_start + (neighbors_num as usize) * std::mem::size_of::<u32>();
// Extract neighbors from the node data.
let mut neighbors = Vec::new();
for nbors_data in node_data[nbrs_buf_start..nbors_buf_end]
.chunks_exact(std::mem::size_of::<u32>())
{
let nbors_id = u32::from_le_bytes(nbors_data.try_into().unwrap());
assert!(nbors_id < num_pts as u32);
neighbors.push(nbors_id);
}
// Create NodeData struct and push it to the node_data_array
node_data_array.push(NodeData {
num_neighbors: neighbors_num,
coordinates,
neighbors,
});
}
});
// Compare that each node read from the disk index are expected.
let node_data_truth_file = File::open(TRUTH_NODE_DATA_PATH).unwrap();
let reader = BufReader::new(node_data_truth_file);
let node_data_vec: Vec<NodeData> = deserialize_from(reader).unwrap();
for (node_from_node_data_file, node_from_disk_index) in
node_data_vec.iter().zip(node_data_array.iter())
{
// Verify that the NodeData from the file is equal to the NodeData in node_data_array
assert_eq!(node_from_node_data_file, node_from_disk_index);
}
}
#[test]
fn test_read_fail_invalid_file() {
let reader = WindowsAlignedFileReader::new("/invalid_path");
assert!(reader.is_err());
}
#[test]
fn test_read_no_requests() {
let reader = WindowsAlignedFileReader::new(TEST_INDEX_PATH).unwrap();
let ctx = reader.get_ctx().unwrap();
let mut read_requests = Vec::<AlignedRead<u8>>::new();
let result = reader.read(&mut read_requests, &ctx);
assert!(result.is_ok());
}
#[test]
fn test_get_ctx() {
let reader = WindowsAlignedFileReader::new(TEST_INDEX_PATH).unwrap();
let result = reader.get_ctx();
assert!(result.is_ok());
}
#[test]
fn test_register_thread() {
let reader = WindowsAlignedFileReader::new(TEST_INDEX_PATH).unwrap();
let result = reader.register_thread();
assert!(result.is_ok());
}
fn reconstruct_disk_meta(buffer: &[u8]) -> Vec<u64> {
let size_of_u64 = std::mem::size_of::<u64>();
let num_values = buffer.len() / size_of_u64;
let mut disk_layout_meta = Vec::with_capacity(num_values);
let meta_data = &buffer[8..];
for chunk in meta_data.chunks_exact(size_of_u64) {
let value = u64::from_le_bytes(chunk.try_into().unwrap());
disk_layout_meta.push(value);
}
disk_layout_meta
}
} | .chunks_exact(std::mem::size_of::<f32>())
.map(|chunk| f32::from_le_bytes(chunk.try_into().unwrap()))
.collect(); | random_line_split |
windows_aligned_file_reader.rs | /*
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT license.
*/
use std::sync::Arc;
use std::time::Duration;
use std::{ptr, thread};
use crossbeam::sync::ShardedLock;
use hashbrown::HashMap;
use once_cell::sync::Lazy;
use platform::file_handle::{AccessMode, ShareMode};
use platform::{
file_handle::FileHandle,
file_io::{get_queued_completion_status, read_file_to_slice},
io_completion_port::IOCompletionPort,
};
use winapi::{
shared::{basetsd::ULONG_PTR, minwindef::DWORD},
um::minwinbase::OVERLAPPED,
};
use crate::common::{ANNError, ANNResult};
use crate::model::IOContext;
pub const MAX_IO_CONCURRENCY: usize = 128; // To do: explore the optimal value for this. The current value is taken from C++ code.
pub const FILE_ATTRIBUTE_READONLY: DWORD = 0x00000001;
pub const IO_COMPLETION_TIMEOUT: DWORD = u32::MAX; // Infinite timeout.
pub const DISK_IO_ALIGNMENT: usize = 512;
pub const ASYNC_IO_COMPLETION_CHECK_INTERVAL: Duration = Duration::from_micros(5);
/// Aligned read struct for disk IO, it takes the ownership of the AlignedBoxedSlice and returns the AlignedBoxWithSlice data immutably.
pub struct AlignedRead<'a, T> {
/// where to read from
/// offset needs to be aligned with DISK_IO_ALIGNMENT
offset: u64,
/// where to read into
/// aligned_buf and its len need to be aligned with DISK_IO_ALIGNMENT
aligned_buf: &'a mut [T],
}
impl<'a, T> AlignedRead<'a, T> {
pub fn new(offset: u64, aligned_buf: &'a mut [T]) -> ANNResult<Self> {
Self::assert_is_aligned(offset as usize)?;
Self::assert_is_aligned(std::mem::size_of_val(aligned_buf))?;
Ok(Self {
offset,
aligned_buf,
})
}
fn assert_is_aligned(val: usize) -> ANNResult<()> {
match val % DISK_IO_ALIGNMENT {
0 => Ok(()),
_ => Err(ANNError::log_disk_io_request_alignment_error(format!(
"The offset or length of AlignedRead request is not {} bytes aligned",
DISK_IO_ALIGNMENT
))),
}
}
pub fn aligned_buf(&self) -> &[T] {
self.aligned_buf
}
}
pub struct WindowsAlignedFileReader {
file_name: String,
// ctx_map is the mapping from thread id to io context. It is hashmap behind a sharded lock to allow concurrent access from multiple threads.
// ShardedLock: shardedlock provides an implementation of a reader-writer lock that offers concurrent read access to the shared data while allowing exclusive write access.
// It achieves better scalability by dividing the shared data into multiple shards, and each with its own internal lock.
// Multiple threads can read from different shards simultaneously, reducing contention.
// https://docs.rs/crossbeam/0.8.2/crossbeam/sync/struct.ShardedLock.html
// Comparing to RwLock, ShardedLock provides higher concurrency for read operations and is suitable for read heavy workloads.
// The value of the hashmap is an Arc<IOContext> to allow immutable access to IOContext with automatic reference counting.
ctx_map: Lazy<ShardedLock<HashMap<thread::ThreadId, Arc<IOContext>>>>,
}
impl WindowsAlignedFileReader {
pub fn new(fname: &str) -> ANNResult<Self> {
let reader: WindowsAlignedFileReader = WindowsAlignedFileReader {
file_name: fname.to_string(),
ctx_map: Lazy::new(|| ShardedLock::new(HashMap::new())),
};
reader.register_thread()?;
Ok(reader)
}
// Register the io context for a thread if it hasn't been registered.
pub fn register_thread(&self) -> ANNResult<()> {
let mut ctx_map = self.ctx_map.write().map_err(|_| {
ANNError::log_lock_poison_error("unable to acquire read lock on ctx_map".to_string())
})?;
let id = thread::current().id();
if ctx_map.contains_key(&id) {
println!(
"Warning:: Duplicate registration for thread_id : {:?}. Directly call get_ctx to get the thread context data.",
id);
return Ok(());
}
let mut ctx = IOContext::new();
match unsafe { FileHandle::new(&self.file_name, AccessMode::Read, ShareMode::Read) } {
Ok(file_handle) => ctx.file_handle = file_handle,
Err(err) => {
return Err(ANNError::log_io_error(err));
}
}
// Create a io completion port for the file handle, later it will be used to get the completion status.
match IOCompletionPort::new(&ctx.file_handle, None, 0, 0) {
Ok(io_completion_port) => ctx.io_completion_port = io_completion_port,
Err(err) => {
return Err(ANNError::log_io_error(err));
}
}
ctx_map.insert(id, Arc::new(ctx));
Ok(())
}
// Get the reference counted io context for the current thread.
pub fn get_ctx(&self) -> ANNResult<Arc<IOContext>> {
let ctx_map = self.ctx_map.read().map_err(|_| {
ANNError::log_lock_poison_error("unable to acquire read lock on ctx_map".to_string())
})?;
let id = thread::current().id();
match ctx_map.get(&id) {
Some(ctx) => Ok(Arc::clone(ctx)),
None => Err(ANNError::log_index_error(format!(
"unable to find IOContext for thread_id {:?}",
id
))),
}
}
// Read the data from the file by sending concurrent io requests in batches.
pub fn read<T>(&self, read_requests: &mut [AlignedRead<T>], ctx: &IOContext) -> ANNResult<()> {
let n_requests = read_requests.len();
let n_batches = (n_requests + MAX_IO_CONCURRENCY - 1) / MAX_IO_CONCURRENCY;
let mut overlapped_in_out =
vec![unsafe { std::mem::zeroed::<OVERLAPPED>() }; MAX_IO_CONCURRENCY];
for batch_idx in 0..n_batches {
let batch_start = MAX_IO_CONCURRENCY * batch_idx;
let batch_size = std::cmp::min(n_requests - batch_start, MAX_IO_CONCURRENCY);
for j in 0..batch_size {
let req = &mut read_requests[batch_start + j];
let os = &mut overlapped_in_out[j];
match unsafe {
read_file_to_slice(&ctx.file_handle, req.aligned_buf, os, req.offset)
} {
Ok(_) => {}
Err(error) => {
return Err(ANNError::IOError { err: (error) });
}
}
}
let mut n_read: DWORD = 0;
let mut n_complete: u64 = 0;
let mut completion_key: ULONG_PTR = 0;
let mut lp_os: *mut OVERLAPPED = ptr::null_mut();
while n_complete < batch_size as u64 {
match unsafe {
get_queued_completion_status(
&ctx.io_completion_port,
&mut n_read,
&mut completion_key,
&mut lp_os,
IO_COMPLETION_TIMEOUT,
)
} {
// An IO request completed.
Ok(true) => n_complete += 1,
// No IO request completed, continue to wait.
Ok(false) => {
thread::sleep(ASYNC_IO_COMPLETION_CHECK_INTERVAL);
}
// An error ocurred.
Err(error) => return Err(ANNError::IOError { err: (error) }),
}
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::{fs::File, io::BufReader};
use bincode::deserialize_from;
use serde::{Deserialize, Serialize};
use crate::{common::AlignedBoxWithSlice, model::SECTOR_LEN};
use super::*;
pub const TEST_INDEX_PATH: &str =
"./tests/data/disk_index_siftsmall_learn_256pts_R4_L50_A1.2_alligned_reader_test.index";
pub const TRUTH_NODE_DATA_PATH: &str =
"./tests/data/disk_index_node_data_aligned_reader_truth.bin";
#[derive(Debug, Serialize, Deserialize)]
struct NodeData {
num_neighbors: u32,
coordinates: Vec<f32>,
neighbors: Vec<u32>,
}
impl PartialEq for NodeData {
fn eq(&self, other: &Self) -> bool {
self.num_neighbors == other.num_neighbors
&& self.coordinates == other.coordinates
&& self.neighbors == other.neighbors
}
}
#[test]
fn test_new_aligned_file_reader() {
// Replace "test_file_path" with actual file path
let result = WindowsAlignedFileReader::new(TEST_INDEX_PATH);
assert!(result.is_ok());
let reader = result.unwrap();
assert_eq!(reader.file_name, TEST_INDEX_PATH);
}
#[test]
fn test_read() {
let reader = WindowsAlignedFileReader::new(TEST_INDEX_PATH).unwrap();
let ctx = reader.get_ctx().unwrap();
let read_length = 512; // adjust according to your logic
let num_read = 10;
let mut aligned_mem = AlignedBoxWithSlice::<u8>::new(read_length * num_read, 512).unwrap();
// create and add AlignedReads to the vector
let mut mem_slices = aligned_mem
.split_into_nonoverlapping_mut_slices(0..aligned_mem.len(), read_length)
.unwrap();
let mut aligned_reads: Vec<AlignedRead<'_, u8>> = mem_slices
.iter_mut()
.enumerate()
.map(|(i, slice)| {
let offset = (i * read_length) as u64;
AlignedRead::new(offset, slice).unwrap()
})
.collect();
let result = reader.read(&mut aligned_reads, &ctx);
assert!(result.is_ok());
}
#[test]
fn test_read_disk_index_by_sector() {
let reader = WindowsAlignedFileReader::new(TEST_INDEX_PATH).unwrap();
let ctx = reader.get_ctx().unwrap();
let read_length = SECTOR_LEN; // adjust according to your logic
let num_sector = 10;
let mut aligned_mem =
AlignedBoxWithSlice::<u8>::new(read_length * num_sector, 512).unwrap();
// Each slice will be used as the buffer for a read request of a sector.
let mut mem_slices = aligned_mem
.split_into_nonoverlapping_mut_slices(0..aligned_mem.len(), read_length)
.unwrap();
let mut aligned_reads: Vec<AlignedRead<'_, u8>> = mem_slices
.iter_mut()
.enumerate()
.map(|(sector_id, slice)| {
let offset = (sector_id * read_length) as u64;
AlignedRead::new(offset, slice).unwrap()
})
.collect();
let result = reader.read(&mut aligned_reads, &ctx);
assert!(result.is_ok());
aligned_reads.iter().for_each(|read| {
assert_eq!(read.aligned_buf.len(), SECTOR_LEN);
});
let disk_layout_meta = reconstruct_disk_meta(aligned_reads[0].aligned_buf);
assert!(disk_layout_meta.len() > 9);
let dims = disk_layout_meta[1];
let num_pts = disk_layout_meta[0];
let max_node_len = disk_layout_meta[3];
let max_num_nodes_per_sector = disk_layout_meta[4];
assert!(max_node_len * max_num_nodes_per_sector < SECTOR_LEN as u64);
let num_nbrs_start = (dims as usize) * std::mem::size_of::<f32>();
let nbrs_buf_start = num_nbrs_start + std::mem::size_of::<u32>();
let mut node_data_array = Vec::with_capacity(max_num_nodes_per_sector as usize * 9);
// Only validate the first 9 sectors with graph nodes.
(1..9).for_each(|sector_id| {
let sector_data = &mem_slices[sector_id];
for node_data in sector_data.chunks_exact(max_node_len as usize) {
// Extract coordinates data from the start of the node_data
let coordinates_end = (dims as usize) * std::mem::size_of::<f32>();
let coordinates = node_data[0..coordinates_end]
.chunks_exact(std::mem::size_of::<f32>())
.map(|chunk| f32::from_le_bytes(chunk.try_into().unwrap()))
.collect();
// Extract number of neighbors from the node_data
let neighbors_num = u32::from_le_bytes(
node_data[num_nbrs_start..nbrs_buf_start]
.try_into()
.unwrap(),
);
let nbors_buf_end =
nbrs_buf_start + (neighbors_num as usize) * std::mem::size_of::<u32>();
// Extract neighbors from the node data.
let mut neighbors = Vec::new();
for nbors_data in node_data[nbrs_buf_start..nbors_buf_end]
.chunks_exact(std::mem::size_of::<u32>())
{
let nbors_id = u32::from_le_bytes(nbors_data.try_into().unwrap());
assert!(nbors_id < num_pts as u32);
neighbors.push(nbors_id);
}
// Create NodeData struct and push it to the node_data_array
node_data_array.push(NodeData {
num_neighbors: neighbors_num,
coordinates,
neighbors,
});
}
});
// Compare that each node read from the disk index are expected.
let node_data_truth_file = File::open(TRUTH_NODE_DATA_PATH).unwrap();
let reader = BufReader::new(node_data_truth_file);
let node_data_vec: Vec<NodeData> = deserialize_from(reader).unwrap();
for (node_from_node_data_file, node_from_disk_index) in
node_data_vec.iter().zip(node_data_array.iter())
{
// Verify that the NodeData from the file is equal to the NodeData in node_data_array
assert_eq!(node_from_node_data_file, node_from_disk_index);
}
}
#[test]
fn test_read_fail_invalid_file() {
let reader = WindowsAlignedFileReader::new("/invalid_path");
assert!(reader.is_err());
}
#[test]
fn test_read_no_requests() {
let reader = WindowsAlignedFileReader::new(TEST_INDEX_PATH).unwrap();
let ctx = reader.get_ctx().unwrap();
let mut read_requests = Vec::<AlignedRead<u8>>::new();
let result = reader.read(&mut read_requests, &ctx);
assert!(result.is_ok());
}
#[test]
fn | () {
let reader = WindowsAlignedFileReader::new(TEST_INDEX_PATH).unwrap();
let result = reader.get_ctx();
assert!(result.is_ok());
}
#[test]
fn test_register_thread() {
let reader = WindowsAlignedFileReader::new(TEST_INDEX_PATH).unwrap();
let result = reader.register_thread();
assert!(result.is_ok());
}
fn reconstruct_disk_meta(buffer: &[u8]) -> Vec<u64> {
let size_of_u64 = std::mem::size_of::<u64>();
let num_values = buffer.len() / size_of_u64;
let mut disk_layout_meta = Vec::with_capacity(num_values);
let meta_data = &buffer[8..];
for chunk in meta_data.chunks_exact(size_of_u64) {
let value = u64::from_le_bytes(chunk.try_into().unwrap());
disk_layout_meta.push(value);
}
disk_layout_meta
}
}
| test_get_ctx | identifier_name |
windows_aligned_file_reader.rs | /*
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT license.
*/
use std::sync::Arc;
use std::time::Duration;
use std::{ptr, thread};
use crossbeam::sync::ShardedLock;
use hashbrown::HashMap;
use once_cell::sync::Lazy;
use platform::file_handle::{AccessMode, ShareMode};
use platform::{
file_handle::FileHandle,
file_io::{get_queued_completion_status, read_file_to_slice},
io_completion_port::IOCompletionPort,
};
use winapi::{
shared::{basetsd::ULONG_PTR, minwindef::DWORD},
um::minwinbase::OVERLAPPED,
};
use crate::common::{ANNError, ANNResult};
use crate::model::IOContext;
pub const MAX_IO_CONCURRENCY: usize = 128; // To do: explore the optimal value for this. The current value is taken from C++ code.
pub const FILE_ATTRIBUTE_READONLY: DWORD = 0x00000001;
pub const IO_COMPLETION_TIMEOUT: DWORD = u32::MAX; // Infinite timeout.
pub const DISK_IO_ALIGNMENT: usize = 512;
pub const ASYNC_IO_COMPLETION_CHECK_INTERVAL: Duration = Duration::from_micros(5);
/// Aligned read struct for disk IO, it takes the ownership of the AlignedBoxedSlice and returns the AlignedBoxWithSlice data immutably.
pub struct AlignedRead<'a, T> {
/// where to read from
/// offset needs to be aligned with DISK_IO_ALIGNMENT
offset: u64,
/// where to read into
/// aligned_buf and its len need to be aligned with DISK_IO_ALIGNMENT
aligned_buf: &'a mut [T],
}
impl<'a, T> AlignedRead<'a, T> {
pub fn new(offset: u64, aligned_buf: &'a mut [T]) -> ANNResult<Self> {
Self::assert_is_aligned(offset as usize)?;
Self::assert_is_aligned(std::mem::size_of_val(aligned_buf))?;
Ok(Self {
offset,
aligned_buf,
})
}
fn assert_is_aligned(val: usize) -> ANNResult<()> {
match val % DISK_IO_ALIGNMENT {
0 => Ok(()),
_ => Err(ANNError::log_disk_io_request_alignment_error(format!(
"The offset or length of AlignedRead request is not {} bytes aligned",
DISK_IO_ALIGNMENT
))),
}
}
pub fn aligned_buf(&self) -> &[T] |
}
pub struct WindowsAlignedFileReader {
file_name: String,
// ctx_map is the mapping from thread id to io context. It is hashmap behind a sharded lock to allow concurrent access from multiple threads.
// ShardedLock: shardedlock provides an implementation of a reader-writer lock that offers concurrent read access to the shared data while allowing exclusive write access.
// It achieves better scalability by dividing the shared data into multiple shards, and each with its own internal lock.
// Multiple threads can read from different shards simultaneously, reducing contention.
// https://docs.rs/crossbeam/0.8.2/crossbeam/sync/struct.ShardedLock.html
// Comparing to RwLock, ShardedLock provides higher concurrency for read operations and is suitable for read heavy workloads.
// The value of the hashmap is an Arc<IOContext> to allow immutable access to IOContext with automatic reference counting.
ctx_map: Lazy<ShardedLock<HashMap<thread::ThreadId, Arc<IOContext>>>>,
}
impl WindowsAlignedFileReader {
pub fn new(fname: &str) -> ANNResult<Self> {
let reader: WindowsAlignedFileReader = WindowsAlignedFileReader {
file_name: fname.to_string(),
ctx_map: Lazy::new(|| ShardedLock::new(HashMap::new())),
};
reader.register_thread()?;
Ok(reader)
}
// Register the io context for a thread if it hasn't been registered.
pub fn register_thread(&self) -> ANNResult<()> {
let mut ctx_map = self.ctx_map.write().map_err(|_| {
ANNError::log_lock_poison_error("unable to acquire read lock on ctx_map".to_string())
})?;
let id = thread::current().id();
if ctx_map.contains_key(&id) {
println!(
"Warning:: Duplicate registration for thread_id : {:?}. Directly call get_ctx to get the thread context data.",
id);
return Ok(());
}
let mut ctx = IOContext::new();
match unsafe { FileHandle::new(&self.file_name, AccessMode::Read, ShareMode::Read) } {
Ok(file_handle) => ctx.file_handle = file_handle,
Err(err) => {
return Err(ANNError::log_io_error(err));
}
}
// Create a io completion port for the file handle, later it will be used to get the completion status.
match IOCompletionPort::new(&ctx.file_handle, None, 0, 0) {
Ok(io_completion_port) => ctx.io_completion_port = io_completion_port,
Err(err) => {
return Err(ANNError::log_io_error(err));
}
}
ctx_map.insert(id, Arc::new(ctx));
Ok(())
}
// Get the reference counted io context for the current thread.
pub fn get_ctx(&self) -> ANNResult<Arc<IOContext>> {
let ctx_map = self.ctx_map.read().map_err(|_| {
ANNError::log_lock_poison_error("unable to acquire read lock on ctx_map".to_string())
})?;
let id = thread::current().id();
match ctx_map.get(&id) {
Some(ctx) => Ok(Arc::clone(ctx)),
None => Err(ANNError::log_index_error(format!(
"unable to find IOContext for thread_id {:?}",
id
))),
}
}
// Read the data from the file by sending concurrent io requests in batches.
pub fn read<T>(&self, read_requests: &mut [AlignedRead<T>], ctx: &IOContext) -> ANNResult<()> {
let n_requests = read_requests.len();
let n_batches = (n_requests + MAX_IO_CONCURRENCY - 1) / MAX_IO_CONCURRENCY;
let mut overlapped_in_out =
vec![unsafe { std::mem::zeroed::<OVERLAPPED>() }; MAX_IO_CONCURRENCY];
for batch_idx in 0..n_batches {
let batch_start = MAX_IO_CONCURRENCY * batch_idx;
let batch_size = std::cmp::min(n_requests - batch_start, MAX_IO_CONCURRENCY);
for j in 0..batch_size {
let req = &mut read_requests[batch_start + j];
let os = &mut overlapped_in_out[j];
match unsafe {
read_file_to_slice(&ctx.file_handle, req.aligned_buf, os, req.offset)
} {
Ok(_) => {}
Err(error) => {
return Err(ANNError::IOError { err: (error) });
}
}
}
let mut n_read: DWORD = 0;
let mut n_complete: u64 = 0;
let mut completion_key: ULONG_PTR = 0;
let mut lp_os: *mut OVERLAPPED = ptr::null_mut();
while n_complete < batch_size as u64 {
match unsafe {
get_queued_completion_status(
&ctx.io_completion_port,
&mut n_read,
&mut completion_key,
&mut lp_os,
IO_COMPLETION_TIMEOUT,
)
} {
// An IO request completed.
Ok(true) => n_complete += 1,
// No IO request completed, continue to wait.
Ok(false) => {
thread::sleep(ASYNC_IO_COMPLETION_CHECK_INTERVAL);
}
// An error ocurred.
Err(error) => return Err(ANNError::IOError { err: (error) }),
}
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::{fs::File, io::BufReader};
use bincode::deserialize_from;
use serde::{Deserialize, Serialize};
use crate::{common::AlignedBoxWithSlice, model::SECTOR_LEN};
use super::*;
pub const TEST_INDEX_PATH: &str =
"./tests/data/disk_index_siftsmall_learn_256pts_R4_L50_A1.2_alligned_reader_test.index";
pub const TRUTH_NODE_DATA_PATH: &str =
"./tests/data/disk_index_node_data_aligned_reader_truth.bin";
#[derive(Debug, Serialize, Deserialize)]
struct NodeData {
num_neighbors: u32,
coordinates: Vec<f32>,
neighbors: Vec<u32>,
}
impl PartialEq for NodeData {
fn eq(&self, other: &Self) -> bool {
self.num_neighbors == other.num_neighbors
&& self.coordinates == other.coordinates
&& self.neighbors == other.neighbors
}
}
#[test]
fn test_new_aligned_file_reader() {
// Replace "test_file_path" with actual file path
let result = WindowsAlignedFileReader::new(TEST_INDEX_PATH);
assert!(result.is_ok());
let reader = result.unwrap();
assert_eq!(reader.file_name, TEST_INDEX_PATH);
}
#[test]
fn test_read() {
let reader = WindowsAlignedFileReader::new(TEST_INDEX_PATH).unwrap();
let ctx = reader.get_ctx().unwrap();
let read_length = 512; // adjust according to your logic
let num_read = 10;
let mut aligned_mem = AlignedBoxWithSlice::<u8>::new(read_length * num_read, 512).unwrap();
// create and add AlignedReads to the vector
let mut mem_slices = aligned_mem
.split_into_nonoverlapping_mut_slices(0..aligned_mem.len(), read_length)
.unwrap();
let mut aligned_reads: Vec<AlignedRead<'_, u8>> = mem_slices
.iter_mut()
.enumerate()
.map(|(i, slice)| {
let offset = (i * read_length) as u64;
AlignedRead::new(offset, slice).unwrap()
})
.collect();
let result = reader.read(&mut aligned_reads, &ctx);
assert!(result.is_ok());
}
#[test]
fn test_read_disk_index_by_sector() {
let reader = WindowsAlignedFileReader::new(TEST_INDEX_PATH).unwrap();
let ctx = reader.get_ctx().unwrap();
let read_length = SECTOR_LEN; // adjust according to your logic
let num_sector = 10;
let mut aligned_mem =
AlignedBoxWithSlice::<u8>::new(read_length * num_sector, 512).unwrap();
// Each slice will be used as the buffer for a read request of a sector.
let mut mem_slices = aligned_mem
.split_into_nonoverlapping_mut_slices(0..aligned_mem.len(), read_length)
.unwrap();
let mut aligned_reads: Vec<AlignedRead<'_, u8>> = mem_slices
.iter_mut()
.enumerate()
.map(|(sector_id, slice)| {
let offset = (sector_id * read_length) as u64;
AlignedRead::new(offset, slice).unwrap()
})
.collect();
let result = reader.read(&mut aligned_reads, &ctx);
assert!(result.is_ok());
aligned_reads.iter().for_each(|read| {
assert_eq!(read.aligned_buf.len(), SECTOR_LEN);
});
let disk_layout_meta = reconstruct_disk_meta(aligned_reads[0].aligned_buf);
assert!(disk_layout_meta.len() > 9);
let dims = disk_layout_meta[1];
let num_pts = disk_layout_meta[0];
let max_node_len = disk_layout_meta[3];
let max_num_nodes_per_sector = disk_layout_meta[4];
assert!(max_node_len * max_num_nodes_per_sector < SECTOR_LEN as u64);
let num_nbrs_start = (dims as usize) * std::mem::size_of::<f32>();
let nbrs_buf_start = num_nbrs_start + std::mem::size_of::<u32>();
let mut node_data_array = Vec::with_capacity(max_num_nodes_per_sector as usize * 9);
// Only validate the first 9 sectors with graph nodes.
(1..9).for_each(|sector_id| {
let sector_data = &mem_slices[sector_id];
for node_data in sector_data.chunks_exact(max_node_len as usize) {
// Extract coordinates data from the start of the node_data
let coordinates_end = (dims as usize) * std::mem::size_of::<f32>();
let coordinates = node_data[0..coordinates_end]
.chunks_exact(std::mem::size_of::<f32>())
.map(|chunk| f32::from_le_bytes(chunk.try_into().unwrap()))
.collect();
// Extract number of neighbors from the node_data
let neighbors_num = u32::from_le_bytes(
node_data[num_nbrs_start..nbrs_buf_start]
.try_into()
.unwrap(),
);
let nbors_buf_end =
nbrs_buf_start + (neighbors_num as usize) * std::mem::size_of::<u32>();
// Extract neighbors from the node data.
let mut neighbors = Vec::new();
for nbors_data in node_data[nbrs_buf_start..nbors_buf_end]
.chunks_exact(std::mem::size_of::<u32>())
{
let nbors_id = u32::from_le_bytes(nbors_data.try_into().unwrap());
assert!(nbors_id < num_pts as u32);
neighbors.push(nbors_id);
}
// Create NodeData struct and push it to the node_data_array
node_data_array.push(NodeData {
num_neighbors: neighbors_num,
coordinates,
neighbors,
});
}
});
// Compare that each node read from the disk index are expected.
let node_data_truth_file = File::open(TRUTH_NODE_DATA_PATH).unwrap();
let reader = BufReader::new(node_data_truth_file);
let node_data_vec: Vec<NodeData> = deserialize_from(reader).unwrap();
for (node_from_node_data_file, node_from_disk_index) in
node_data_vec.iter().zip(node_data_array.iter())
{
// Verify that the NodeData from the file is equal to the NodeData in node_data_array
assert_eq!(node_from_node_data_file, node_from_disk_index);
}
}
#[test]
fn test_read_fail_invalid_file() {
let reader = WindowsAlignedFileReader::new("/invalid_path");
assert!(reader.is_err());
}
#[test]
fn test_read_no_requests() {
let reader = WindowsAlignedFileReader::new(TEST_INDEX_PATH).unwrap();
let ctx = reader.get_ctx().unwrap();
let mut read_requests = Vec::<AlignedRead<u8>>::new();
let result = reader.read(&mut read_requests, &ctx);
assert!(result.is_ok());
}
#[test]
fn test_get_ctx() {
let reader = WindowsAlignedFileReader::new(TEST_INDEX_PATH).unwrap();
let result = reader.get_ctx();
assert!(result.is_ok());
}
#[test]
fn test_register_thread() {
let reader = WindowsAlignedFileReader::new(TEST_INDEX_PATH).unwrap();
let result = reader.register_thread();
assert!(result.is_ok());
}
fn reconstruct_disk_meta(buffer: &[u8]) -> Vec<u64> {
let size_of_u64 = std::mem::size_of::<u64>();
let num_values = buffer.len() / size_of_u64;
let mut disk_layout_meta = Vec::with_capacity(num_values);
let meta_data = &buffer[8..];
for chunk in meta_data.chunks_exact(size_of_u64) {
let value = u64::from_le_bytes(chunk.try_into().unwrap());
disk_layout_meta.push(value);
}
disk_layout_meta
}
}
| {
self.aligned_buf
} | identifier_body |
sm2.go | /*
็ผๅ่
:ไธฅๅฟไผ
็ผๅๆถ้ด:2018/08/01
ๅ
ฌๅธ:ไธญๅฝๆ็ดขไฟกๆฏ็งๆ่กไปฝๆ้ๅ
ฌๅธ
*/
/*
ๆคญๅๆฒ็บฟๅ ่งฃๅฏๅ็ญพๅ็ฎๆณ็ๆๆฏๅ็ๅๅ
ถGo่ฏญ่จๅฎ็ฐ:http://www.jeepyurongfu.net/blog/45309.html
*/
package sm2
import "C"
import (
"bytes"
"crypto"
"crypto/elliptic"
"crypto/rand"
"encoding/asn1"
"encoding/binary"
"errors"
"fmt"
"github.com/chinaso/fabricGM/cryptopkg/golangGM/sm3"
"io"
"math/big"
)
const (
aesIV = "IV for <SM2> CTR"
)
// -------------------------------------------------- //
// PublicKey represents an SM2 public key.
type PublicKey struct {
elliptic.Curve
//SM2P256Curve
//sm2p256Curve
X, Y *big.Int
}
// PrivateKey represents an Sm2 private key.
type PrivateKey struct {
PublicKey
D *big.Int
}
type sm2Signature struct {
R, S *big.Int
}
// -------------------------------------------------- //
var errNoOneParam = errors.New("zero parameter")
var ONE = new(big.Int).SetInt64(1)
var (
default_IDA = []byte{0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38}
)
// Public returns the public key corresponding to priv.
func (priv *PrivateKey) Public() crypto.PublicKey {
return &priv.PublicKey
}
// GenerateKey generates a public and private key pair.
func GenerateKey(rand io.Reader) (*PrivateKey, error) {
c := SM2P256()
k, err := randFieldElement(c, rand)
fmt.Println(k)
if err != nil {
return nil, err
}
priv := new(PrivateKey)
priv.PublicKey.Curve= c
priv.D = k
priv.PublicKey.X, priv.PublicKey.Y = c.ScalarBaseMult(k.Bytes())
return priv, nil
}
func randFieldElement(c elliptic.Curve, rand io.Reader) (k *big.Int, err error) {
//params := c.Curve.Params()
params := c.Params()
b := make([]byte, params.BitSize/8+8)
_, err = io.ReadFull(rand, b)
if err != nil {
return
}
k = new(big.Int).SetBytes(b)
n := new(big.Int).Sub(params.N, ONE)
k.Mod(k, n)
k.Add(k, ONE)
return
}
// -------------------------------------------------- //
// SM2ๅ็:https://blog.csdn.net/samsho2/article/details/80772228
// ๅฝๅฏๆ ๅ:http://c.gb688.cn/bzgk/gb/showGb?type=online&hcno=370AF152CB5CA4A377EB4D1B21DECAE0
// ZA=H256(ENTLA || IDA || a || b || xG || yG|| xA || yA)
func ZA(pub *PublicKey, IDA []byte) ([]byte, error) {
if len(IDA) <= 0 {
IDA = default_IDA
}
entlenA := len(IDA)
if entlenA >= 8192 {
return []byte{}, errors.New("SM2: uid too large")
}
sm2util :=sm2P256Util{}
ENTLA := uint16(8*entlenA)
ZA := sm3.New()
ZA.Write([]byte{byte((ENTLA >> 8) & 0xFF)})
ZA.Write([]byte{byte(ENTLA & 0xFF)})
ZA.Write(IDA)
ZA.Write(sm2util.p256ToBig(&sm2p256Params.a).Bytes())
//ZA.Write(sm2p256Params.A.Bytes())
ZA.Write(sm2p256Params.B.Bytes())
ZA.Write(sm2p256Params.Gx.Bytes())
ZA.Write(sm2p256Params.Gy.Bytes())
xBuf := pub.X.Bytes()
yBuf := pub.Y.Bytes()
if n := len(xBuf); n < 32 {
xBuf = append(zeroByteSlice()[:32-n], xBuf...)
}
ZA.Write(xBuf)
ZA.Write(yBuf)
return ZA.Sum(nil)[:32], nil
}
// 32byte
func zeroByteSlice() []byte {
return []byte{
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
}
}
// sign format = 30 + len(z) + 02 + len(r) + r + 02 + len(s) + s, z being what follows its size, ie 02+len(r)+r+02+len(s)+s
func (priv *PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error) {
// r, s, err := Sign(priv, msg)
r, s, err := SM2Sign(priv, msg, nil)
fmt.Println("msg:",msg)
if err != nil {
return nil, err
}
return asn1.Marshal(sm2Signature{r, s})
}
// ---------------------------------------------------------------- //
func (pub *PublicKey) Verify(msg []byte, sign []byte) bool {
var sm2Sign sm2Signature
_, err := asn1.Unmarshal(sign, &sm2Sign)
if err != nil {
return false
}
return SM2Verify(pub, msg, nil, sm2Sign.R, sm2Sign.S)
}
// ---------------------------------------------------------------- //
func (pub *PublicKey) Encrypt(data []byte) ([]byte, error) {
return SM2Encrypt(pub, data)
}
func (priv *PrivateKey) Decrypt(data []byte) ([]byte, error) {
return SM2Decrypt(priv, data)
}
// -------------------------------------------------------------- //
// ๅ่็ฝๅ:https://blog.csdn.net/samsho2/article/details/80772228
func SM2Sign(priv *PrivateKey, msg, IDA []byte) (r, s *big.Int, err error) {
za, err := ZA(&priv.PublicKey, IDA)
if err != nil {
return nil, nil, err
}
e, err := hashMsg(za, msg)
if err != nil {
return nil, nil, err
}
//c := priv.PublicKey.sm2p256Curve
c := priv.PublicKey.Curve
N := c.Params().N
if N.Sign() == 0 {
return nil, nil, errNoOneParam
}
var k *big.Int
for { // ่ฐๆด็ฎๆณ็ป่ไปฅๅฎ็ฐSM2
// r = e + x mod n
for {
k, err = randFieldElement(c, rand.Reader)
if err != nil {
r = nil
return
}
r, _ = priv.Curve.ScalarBaseMult(k.Bytes())
r.Add(r, e)
r.Mod(r, N)
if r.Sign() != 0 {
if t := new(big.Int).Add(r, k); t.Cmp(N) != 0 {
break
}
}
}
//s=(1+d)^(-1) * (k - r*d) mod n
rD := new(big.Int).Mul(priv.D, r)
s = new(big.Int).Sub(k, rD)
d1 := new(big.Int).Add(priv.D, ONE)
d1Inv := new(big.Int).ModInverse(d1, N)
s.Mul(s, d1Inv)
s.Mod(s, N)
if s.Sign() != 0 {
break
}
}
return
}
func hashMsg(za, msg []byte) (*big.Int, error) {
e := sm3.New()
e.Write(za)
e.Write(msg)
return new(big.Int).SetBytes(e.Sum(nil)[:32]), nil
}
// Verify verifies the signature in r, s of hash using the public key, pub. Its
// return value records whether the signature is valid.
func SM2Verify(pub *PublicKey, msg, IDA []byte, r, s *big.Int) bool {
c := pub.Curve
N := c.Params().N
one := new(big.Int).SetInt64(1)
if r.Cmp(one) < 0 || s.Cmp(one) < 0 {
return false
}
if r.Cmp(N) >= 0 || s.Cmp(N) >= 0 {
return false
}
//M=ZA || Msg
ZA,err := ZA(pub,IDA)
if err != nil {
return false
}
// e =H(M)
e,err := hashMsg(ZA,msg)
if err != nil {
return false
}
// t= (r+s) mod n
t := new(big.Int).Add(r, s)
t.Mod(t, N)
if t.Sign() == 0 {
return false
}
// ่ฎก็ฎๆคญๅๆฒ็บฟ็นC1๏ผ[k]G๏ผ(x1,y1)ใๅ
ถไธญGไปฃ่กจๆคญๅๆฒ็บฟ็ไธไธชๅบ็น๏ผๅ
ถ้ถไธบ็ด ๆฐ๏ผ
// kไธบๆดๆฐ๏ผ[k]G่กจ็คบkๅ็น๏ผ(x1,y1)่กจ็คบๆ่ฎก็ฎๅบ็ๆคญๅๆฒ็บฟ็นC1็ๅๆ
//(x,y) = [s]G+[t]P
var x *big.Int
x1, y1 := c.ScalarBaseMult(s.Bytes()) //[s]G =p
x2, y2 := c.ScalarMult(pub.X, pub.Y, t.Bytes())//[t]P=t*(px,py)
x, _ = c.Add(x1, y1, x2, y2)
//R=(e+x) modn
x.Add(x, e)
x.Mod(x, N)
//R ?= r
return x.Cmp(r) == 0
}
//ใ่ฎพ็ง้ฅใๅ
ฌ้ฅๅๅซไธบkใK๏ผๅณK = kG๏ผๅ
ถไธญGไธบG็นใ
// ๅ
ฌ้ฅๅ ๅฏ๏ผ
//ใ้ๆฉ้ๆบๆฐr๏ผๅฐๆถๆฏM็ๆๅฏๆC๏ผ่ฏฅๅฏๆๆฏไธไธช็นๅฏน๏ผๅณ๏ผ
//ใC = {rG, M+rK}๏ผๅ
ถไธญKไธบๅ
ฌ้ฅ
func SM2Encrypt(pub *PublicKey, plaintest []byte) ([]byte, error){
length := len(plaintest)
for {
c := []byte{}
//curve := pub.sm2p256Curve
curve := pub.Curve
//่ทๅพ้ๆบๆฐk
k, err := randFieldElement(curve, rand.Reader)
if err != nil {
return nil, err
}
//(x,y) = [k]P
x1, y1 := curve.ScalarBaseMult(k.Bytes())
x2, y2 := curve.ScalarMult(pub.X, pub.Y, k.Bytes())
x1Buf := x1.Bytes()
y1Buf := y1.Bytes()
x2Buf := x2.Bytes()
y2Buf := y2.Bytes()
if n := len(x1Buf); n < 32 {
x1Buf = append(zeroByteSlice()[:32-n], x1Buf...)
}
if n := len(y1Buf); n < 32 {
y1Buf = append(zeroByteSlice()[:32-n], y1Buf...)
}
if n := len(x2Buf); n < 32 {
x2Buf = append(zeroByteSlice()[:32-n], x2Buf...)
}
if n := len(y2Buf); n < 32 {
y2Buf = append(zeroByteSlice()[:32-n], y2Buf...)
}
//c1
c = append(c, x1Buf...) // xๅ้
c = append(c, y1Buf...) // yๅ้
//hash(x || M || y)
tm := []byte{}
tm = append(tm, x2Buf...)
tm = append(tm, plaintest...)
tm = append(tm, y2Buf...)
c3 := sm3.Sum(tm)
c = append(c, c3...)
ct, ok := kdf(x2Buf, y2Buf, length) // ๅฏๆ
if !ok {
continue
}
c = append(c, ct...)
for i := 0; i < length; i++ {
c[96+i] ^= plaintest[i] //c2
}
//C = C1 || C2 || C3
return append([]byte{0x04}, c...), nil
}
}
/*
่ทๅ้ๆบๆฐ k
(x1, y1) = [k]G
S=[h]P //hไธบไฝๅ ๅญ
C1=(x2,y2)= [k]P
t=KDF(x2||y2,klen);//klenไธบM็้ฟๅบฆใKDFๆฏsm2็ๅฏ้ฅๆดพ็ๅฝๆฐ
c2 = M+t
C3 = Hash(x2||M||y2)
C = C1||C2||C3
*/
//ๅฝๅฏSM2็ฎๆณๅฏ้ฅๆดพ็ๅฝๆฐKDF็ๅฎ็ฐ:https://blog.csdn.net/Heidlyn/article/details/53993002
//ไฝ็จๆฏไปไธไธชๅ
ฑไบซ็ๆฏ็นไฝๆดพ็ๅบๅฏ้ฅๆฐๆฎ
func kdf(x, y []byte, length int) ([]byte, bool) {
var c []byte
//ct := intToBytes(1)//ct=0x00000001
ct := 1
h := sm3.New()
x = append(x, y...) //Z
for i, j := 0, (length+31)/32; i < j; i++ { // ct ไป 1 ๅฐ klen/v
// Hash(Z || ct )
h.Reset()
h.Write(x)
h.Write(intToBytes(ct))
hash := h.Sum(nil)
if i+1 == j && length%32 != 0 {
c = append(c, hash[:length%32]...)
} else {
c = append(c, hash...)
}
ct++
}
for i := 0; i < length; i++ {
if c[i] != 0 {
return c, true
}
}
return c, false
}
func intToBytes(x int) []byte {
var buf = make([]byte, 4)
binary.BigEndian.PutUint32(buf, uint32(x))
return buf
}
/*
C1 = C้้ข่ทๅ ๏ผ้ช่ฏC1ๆฏๅฆๆปก่ถณๆคญๅๆฒ็บฟใ//C2้ฟๅบฆ็กฎๅฎ๏ผๅฏไปฅ่ทๅC1ๅ
ๅฎนใ
S=[h]C1๏ผSไธบๆ ็ฉท็น๏ผ้ๅบใ
(x2,y2)=[d]C1
t=KDF(m2||y2,klen)
M' = C2+t
u=Hash(x2||M'||y2), u ?= C3
M`ไธบๆๆ
*/
//SM2 ่งฃๅฏ่ฟ็ฎ
func SM2Decrypt(priv *PrivateKey, ciphertext []byte) ([]byte, error) {
ciphertext = ciphertext[1:]
length := len(ciphertext) - 96
curve := priv.Curve
x := new(big.Int).SetBytes(ciphertext[:32])
y := new(big.Int).SetBytes(ciphertext[32:64])
// (x2,y2) = [dB]C1 C1=(x,y)
x2, y2 := curve.ScalarMult(x, y, priv.D.Bytes())
x2Buf := x2.Bytes()
y2Buf := y2.Bytes()
if n := len(x2Buf); n < 32 {
x2Buf = append(zeroByteSlice()[:32-n], x2Buf...)
}
if n := len(y2Buf); n < 32 {
y2Buf = append(zeroByteSlice()[:32-n], y2Buf...)
}
// t = KDF(x2 || y2 ,klen)
t, ok := kdf(x2Buf, y2Buf, length)
if !ok {
return nil, errors.New("Decrypt: failed to decrypt")
}
for i := 0; i < length; i++ {
t[i] ^= ciphertext[i+96]
}
//U = Hash(x2 || M || y)
tm := []byte{}
tm = append(tm, x2Buf...)
tm = append(tm, t...)
tm = append(tm, y2Buf...)
h := sm3.Sum(tm)
if bytes.Compare(h, ciphertext[64:96]) != 0 {
return t, errors.New("Decrypt: failed to decrypt")
}
return t, nil
}
type zr struct {
io.Reader
}
func (z *zr) Read(dst []byte) (n int, err error) {
for i := range dst {
dst[i] = 0
}
return len(dst), nil
}
var zeroReader = &zr{}
func getLastBit(a *big.Int) uint {
return a.Bit(0)
}
func Compress(a *PublicKey) []byte {
buf := []byte{}
yp := getLastBit(a.Y)
buf = append(buf, a.X.Bytes()...)
if n := len(a.X.Bytes()); n < 32 {
buf = append(zeroByteSlice()[:(32-n)], buf...)
}
buf = append([]byte{byte(yp)}, buf...)
return buf
}
func Decompress(a []byte) *PublicKey {
var aa, xx, xx3 sm2P256FieldElement
SM2P256()
x := new(big.Int).SetBytes(a[1:])
curve := sm2p256Params
sm2util :=sm2P256Util{}
sm2util.p256FromBig(&xx, x)
sm2util.p256Square(&xx3, &xx) // x3 = x ^ 2
sm2util.p256Mul(&xx3, &xx3, &xx) // x3 = x ^ 2 * x
sm2util.p256Mul(&aa, &curve.a, &xx) // a = a * x
sm2util.p256Add(&xx3, &xx3, &aa)
sm2util.p256Add(&xx3, &xx3, &curve.b)
y2 := sm2util.p256ToBig(&xx3)
y := new(big.Int).ModSqrt(y2, sm2p256Params.P)
if getLastBit(y) != uint(a[0]) {
y.Sub(sm2p256Params.P, y)
}
return &PublicKey{
Curve: SM2P256(),
X: x,
Y: y,
}
}
// ------------------------------------ //
const (
BitSize = 256
KeyBytes = (BitSize + 7) / 8
UnCompress = 0x04
)
func (pub *PublicKey) GetUnCompressBytes() []byte {
xBytes := pub.X.Bytes()
yBytes := pub.Y.Bytes()
xl := len(xBytes)
yl := len(yBytes)
raw := make([]byte, 1+KeyBytes*2)
raw[0] = UnCompress
if xl > KeyBytes {
copy(raw[1:1+KeyBytes], xBytes[xl-KeyBytes:])
} else if xl < KeyBytes {
copy(raw[1+(KeyBytes-xl):1+KeyBytes], xBytes)
} else {
copy(raw[1:1+KeyBytes], xBytes)
}
if yl > KeyBytes {
copy(raw[1+KeyBytes:], yBytes[yl-KeyBytes:])
} else if yl < KeyBytes {
copy(raw[1+KeyBytes+(KeyBytes-yl):], yBytes)
} else {
copy(raw[1+KeyBytes:], yBytes)
}
return raw
}
func (pub *PublicKey) GetRawBytes() []byte {
raw := pub.GetUnCompressBytes()
return raw[1:]
} | identifier_name |
||
sm2.go | /*
็ผๅ่
:ไธฅๅฟไผ
็ผๅๆถ้ด:2018/08/01
ๅ
ฌๅธ:ไธญๅฝๆ็ดขไฟกๆฏ็งๆ่กไปฝๆ้ๅ
ฌๅธ
*/
/*
ๆคญๅๆฒ็บฟๅ ่งฃๅฏๅ็ญพๅ็ฎๆณ็ๆๆฏๅ็ๅๅ
ถGo่ฏญ่จๅฎ็ฐ:http://www.jeepyurongfu.net/blog/45309.html
*/
package sm2
import "C"
import (
"bytes"
"crypto"
"crypto/elliptic"
"crypto/rand"
"encoding/asn1"
"encoding/binary"
"errors"
"fmt"
"github.com/chinaso/fabricGM/cryptopkg/golangGM/sm3"
"io"
"math/big"
)
const (
aesIV = "IV for <SM2> CTR"
)
// -------------------------------------------------- //
// PublicKey represents an SM2 public key.
type PublicKey struct {
elliptic.Curve
//SM2P256Curve
//sm2p256Curve
X, Y *big.Int
}
// PrivateKey represents an Sm2 private key.
type PrivateKey struct {
PublicKey
D *big.Int
}
type sm2Signature struct {
R, S *big.Int
}
// -------------------------------------------------- //
var errNoOneParam = errors.New("zero parameter")
var ONE = new(big.Int).SetInt64(1)
var (
default_IDA = []byte{0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38}
)
// Public returns the public key corresponding to priv.
func (priv *PrivateKey) Public() crypto.PublicKey {
return &priv.PublicKey
}
// GenerateKey generates a public and private key pair.
func GenerateKey(rand io.Reader) (*PrivateKey, error) {
c := SM2P256()
k, err := randFieldElement(c, rand)
fmt.Println(k)
if err != nil {
return nil, err
}
priv := new(PrivateKey)
priv.PublicKey.Curve= c
priv.D = k
priv.PublicKey.X, priv.PublicKey.Y = c.ScalarBaseMult(k.Bytes())
return priv, nil
}
func randFieldElement(c elliptic.Curve, rand io.Reader) (k *big.Int, err error) {
//params := c.Curve.Params()
params := c.Params()
b := make([]byte, params.BitSize/8+8)
_, err = io.ReadFull(rand, b)
if err != nil {
return
}
k = new(big.Int).SetBytes(b)
n := new(big.Int).Sub(params.N, ONE)
k.Mod(k, n)
k.Add(k, ONE)
return
}
// -------------------------------------------------- //
// SM2ๅ็:https://blog.csdn.net/samsho2/article/details/80772228
// ๅฝๅฏๆ ๅ:http://c.gb688.cn/bzgk/gb/showGb?type=online&hcno=370AF152CB5CA4A377EB4D1B21DECAE0
// ZA=H256(ENTLA || IDA || a || b || xG || yG|| xA || yA)
func ZA(pub *PublicKey, IDA []byte) ([]byte, error) {
if len(IDA) <= 0 {
IDA = default_IDA
}
entlenA := len(IDA)
if entlenA >= 8192 {
return []byte{}, errors.New("SM2: uid too large")
}
sm2util :=sm2P256Util{}
ENTLA := uint16(8*entlenA)
ZA := sm3.New()
ZA.Write([]byte{byte((ENTLA >> 8) & 0xFF)})
ZA.Write([]byte{byte(ENTLA & 0xFF)})
ZA.Write(IDA)
ZA.Write(sm2util.p256ToBig(&sm2p256Params.a).Bytes())
//ZA.Write(sm2p256Params.A.Bytes())
ZA.Write(sm2p256Params.B.Bytes())
ZA.Write(sm2p256Params.Gx.Bytes())
ZA.Write(sm2p256Params.Gy.Bytes())
xBuf := pub.X.Bytes()
yBuf := pub.Y.Bytes()
if n := len(xBuf); n < 32 {
xBuf = append(zeroByteSlice()[:32-n], xBuf...)
}
ZA.Write(xBuf)
ZA.Write(yBuf)
return ZA.Sum(nil)[:32], nil
}
// 32byte
func zeroByteSlice() []byte {
return []byte{
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
}
}
// sign format = 30 + len(z) + 02 + len(r) + r + 02 + len(s) + s, z being what follows its size, ie 02+len(r)+r+02+len(s)+s
func (priv *PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error) {
// r, s, err := Sign(priv, msg)
r, s, err := SM2Sign(priv, msg, nil)
fmt.Println("msg:",msg)
if err != nil {
return nil, err
}
return asn1.Marshal(sm2Signature{r, s})
}
// ---------------------------------------------------------------- //
func (pub *PublicKey) Verify(msg []byte, sign []byte) bool {
var sm2Sign sm2Signature
_, err := asn1.Unmarshal(sign, &sm2Sign)
if err != nil {
return false
}
return SM2Verify(pub, msg, nil, sm2Sign.R, sm2Sign.S)
}
// ---------------------------------------------------------------- //
func (pub *PublicKey) Encrypt(data []byte) ([]byte, error) {
return SM2Encrypt(pub, data)
}
func (priv *PrivateKey) Decrypt(data []byte) ([]byte, error) {
return SM2Decrypt(priv, data)
}
// -------------------------------------------------------------- //
// ๅ่็ฝๅ:https://blog.csdn.net/samsho2/article/details/80772228
func SM2Sign(priv *PrivateKey, msg, IDA []byte) (r, s *big.Int, err error) {
za, err := ZA(&priv.PublicKey, IDA)
if err != nil {
return nil, nil, err
}
e, err := hashMsg(za, msg)
if err != nil {
return nil, nil, err
}
//c := priv.PublicKey.sm2p256Curve
c := priv.PublicKey.Curve
N := c.Params().N
if N.Sign() == 0 {
return nil, nil, errNoOneParam
}
var k *big.Int
for { // ่ฐๆด็ฎๆณ็ป่ไปฅๅฎ็ฐSM2
// r = e + x mod n
for {
k, err = randFieldElement(c, rand.Reader)
if err != nil {
r = nil
return
}
r, _ = priv.Curve.ScalarBaseMult(k.Bytes())
r.Add(r, e)
r.Mod(r, N)
if r.Sign() != 0 {
if t := new(big.Int).Add(r, k); t.Cmp(N) != 0 {
break
}
}
}
//s=(1+d)^(-1) * (k - r*d) mod n
rD := new(big.Int).Mul(priv.D, r)
s = new(big.Int).Sub(k, rD)
d1 := new(big.Int).Add(priv.D, ONE)
d1Inv := new(big.Int).ModInverse(d1, N)
s.Mul(s, d1Inv)
s.Mod(s, N)
if s.Sign() != 0 {
break
}
}
return
}
func hashMsg(za, msg []byte) (*big.Int, error) {
e := sm3.New()
e.Write(za)
e.Write(msg)
return new(big.Int).SetBytes(e.Sum(nil)[:32]), nil
}
// Verify verifies the signature in r, s of hash using the public key, pub. Its
// return value records whether the signature is valid.
func SM2Verify(pub *PublicKey, msg, IDA []byte, r, s *big.Int) bool {
c := pub.Curve
N := c.Params().N
one := new(big.Int).SetInt64(1)
if r.Cmp(one) < 0 || s.Cmp(one) < 0 {
return false
}
if r.Cmp(N) >= 0 || s.Cmp(N) >= 0 {
return false
}
//M=ZA || Msg
ZA,err := ZA(pub,IDA)
if err != nil {
return false
}
// e =H(M)
e,err := hashMsg(ZA,msg)
if err != nil {
return false
}
// t= (r+s) mod n
t := new(big.Int).Add(r, s)
t.Mod(t, N)
if t.Sign() == 0 {
return false
}
// ่ฎก็ฎๆคญๅๆฒ็บฟ็นC1๏ผ[k]G๏ผ(x1,y1)ใๅ
ถไธญGไปฃ่กจๆคญๅๆฒ็บฟ็ไธไธชๅบ็น๏ผๅ
ถ้ถไธบ็ด ๆฐ๏ผ
// kไธบๆดๆฐ๏ผ[k]G่กจ็คบkๅ็น๏ผ(x1,y1)่กจ็คบๆ่ฎก็ฎๅบ็ๆคญๅๆฒ็บฟ็นC1็ๅๆ
//(x,y) = [s]G+[t]P
var x *big.Int
x1, y1 := c.ScalarBaseMult(s.Bytes()) //[s]G =p
x2, y2 := c.ScalarMult(pub.X, pub.Y, t.Bytes())//[t]P=t*(px,py)
x, _ = c.Add(x1, y1, x2, y2)
//R=(e+x) modn
x.Add(x, e)
x.Mod(x, N)
//R ?= r
return x.Cmp(r) == 0
}
//ใ่ฎพ็ง้ฅใๅ
ฌ้ฅๅๅซไธบkใK๏ผๅณK = kG๏ผๅ
ถไธญGไธบG็นใ
// ๅ
ฌ้ฅๅ ๅฏ๏ผ
//ใ้ๆฉ้ๆบๆฐr๏ผๅฐๆถๆฏM็ๆๅฏๆC๏ผ่ฏฅๅฏๆๆฏไธไธช็นๅฏน๏ผๅณ๏ผ
//ใC = {rG, M+rK}๏ผๅ
ถไธญKไธบๅ
ฌ้ฅ
func SM2Encrypt(pub *PublicKey, plaintest []byte) ([]byte, error){
length := len(plaintest)
for {
c := []byte{}
//curve := pub.sm2p256Curve
curve := pub.Curve
//่ทๅพ้ๆบๆฐk
k, err := randFieldElement(curve, rand.Reader)
if err != nil {
return nil, err
}
//(x,y) = [k]P
x1, y1 := curve.ScalarBaseMult(k.Bytes())
x2, y2 := curve.ScalarMult(pub.X, pub.Y, k.Bytes())
x1Buf := x1.Bytes()
y1Buf := y1.Bytes()
x2Buf := x2.Bytes()
y2Buf := y2.Bytes()
if n := len(x1Buf); n < 32 {
x1Buf = append(zeroByteSlice()[:32-n], x1Buf...)
}
if n := len(y1Buf); n < 32 {
y1Buf = append(zeroByteSlice()[:32-n], y1Buf...)
}
if n := len(x2Buf); n < 32 {
x2Buf = append(zeroByteSlice()[:32-n], x2Buf...)
}
if n := len(y2Buf); n < 32 {
y2Buf = append(zeroByteSlice()[:32-n], y2Buf...)
}
//c1
c = append(c, x1Buf...) // xๅ้
c = append(c, y1Buf...) // yๅ้
//hash(x || M || y)
tm := []byte{}
tm = append(tm, x2Buf...)
tm = append(tm, plaintest...)
tm = append(tm, y2Buf...)
c3 := sm3.Sum(tm)
c = append(c, c3...)
ct, ok := kdf(x2Buf, y2Buf, length) // ๅฏๆ
if !ok {
continue
}
c = append(c, ct...)
for i := 0; i < length; i++ {
c[96+i] ^= plaintest[i] //c2
}
//C = C1 || C2 || C3
return append([]byte{0x04}, c...), nil
}
}
/*
่ทๅ้ๆบๆฐ k
(x1, y1) = [k]G
S=[h]P //hไธบไฝๅ ๅญ
C1=(x2,y2)= [k]P
t=KDF(x2||y2,klen);//klenไธบM็้ฟๅบฆใKDFๆฏsm2็ๅฏ้ฅๆดพ็ๅฝๆฐ
c2 = M+t
C3 = Hash(x2||M||y2)
C = C1||C2||C3
*/
//ๅฝๅฏSM2็ฎๆณๅฏ้ฅๆดพ็ๅฝๆฐKDF็ๅฎ็ฐ:https://blog.csdn.net/Heidlyn/article/details/53993002
//ไฝ็จๆฏไปไธไธชๅ
ฑไบซ็ๆฏ็นไฝๆดพ็ๅบๅฏ้ฅๆฐๆฎ
func kdf(x, y []byte, length int) ([]byte, bool) {
var c []byte
//ct := intToBytes(1)//ct=0x00000001
ct := 1
h := sm3.New()
x = append(x, y...) //Z
for i, j := 0, (length+31)/32; i < j; i++ { // ct ไป 1 ๅฐ klen/v
// Hash(Z || ct )
h.Reset()
h.Write(x)
h.Write(intToBytes(ct))
hash := h.Sum(nil)
if i+1 == j && length%32 != 0 {
c = append(c, hash[:length%32]...)
} else {
c = append(c, hash...)
}
ct++
}
for i := 0; i < length; i++ {
if c[i] != 0 {
return c, true
}
}
return c, false
}
func intToBytes(x int) []byte {
var buf = make([]byte, 4)
binary.BigEndian.PutUint32(buf, uint32(x))
return buf
}
/*
C1 = C้้ข่ทๅ ๏ผ้ช่ฏC1ๆฏๅฆๆปก่ถณๆคญๅๆฒ็บฟใ//C2้ฟๅบฆ็กฎๅฎ๏ผๅฏไปฅ่ทๅC1ๅ
ๅฎนใ
S=[h]C1๏ผSไธบๆ ็ฉท็น๏ผ้ๅบใ
(x2,y2)=[d]C1
t=KDF(m2||y2,klen)
M' = C2+t
u=Hash(x2||M'||y2), u ?= C3
M`ไธบๆๆ
*/
//SM2 ่งฃๅฏ่ฟ็ฎ
func SM2Decrypt(priv *PrivateKey, ciphertext []byte) ([]byte, error) {
ciphertext = ciphertext[1:]
length := len(ciphertext) - 96
curve := priv.Curve
x := new(big.Int).SetBytes(ciphertext[:32])
y := new(big.Int).SetBytes(ciphertext[32:64])
// (x2,y2) = [dB]C1 C1=(x,y)
x2, y2 := curve.ScalarMult(x, y, priv.D.Bytes())
x2Buf := x2.Bytes()
y2Buf := y2.Bytes()
if n := len(x2Buf); n < 32 {
x2Buf = append(zeroByteSlice()[:32-n], x2Buf...)
}
if n := len(y2Buf); n < 32 {
y2Buf = append(zeroByteSlice()[:32-n], y2Buf...)
}
// t = KDF(x2 || y2 ,klen)
t, ok := kdf(x2Buf, y2Buf, length)
if !ok {
return nil, errors.New("Decrypt: failed to decrypt")
}
for i := 0; i < length; i++ {
t[i] ^= ciphertext[i+96]
}
//U = Hash(x2 || M || y)
tm := []byte{}
tm = append(tm, x2Buf...)
tm = append(tm, t...)
tm = append(tm, y2Buf...)
h := sm3.Sum(tm)
if bytes.Compare(h, ciphertext[64:96]) != 0 {
return t, errors.New("Decrypt: failed to decrypt")
}
r | *zr) Read(dst []byte) (n int, err error) {
for i := range dst {
dst[i] = 0
}
return len(dst), nil
}
var zeroReader = &zr{}
func getLastBit(a *big.Int) uint {
return a.Bit(0)
}
func Compress(a *PublicKey) []byte {
buf := []byte{}
yp := getLastBit(a.Y)
buf = append(buf, a.X.Bytes()...)
if n := len(a.X.Bytes()); n < 32 {
buf = append(zeroByteSlice()[:(32-n)], buf...)
}
buf = append([]byte{byte(yp)}, buf...)
return buf
}
func Decompress(a []byte) *PublicKey {
var aa, xx, xx3 sm2P256FieldElement
SM2P256()
x := new(big.Int).SetBytes(a[1:])
curve := sm2p256Params
sm2util :=sm2P256Util{}
sm2util.p256FromBig(&xx, x)
sm2util.p256Square(&xx3, &xx) // x3 = x ^ 2
sm2util.p256Mul(&xx3, &xx3, &xx) // x3 = x ^ 2 * x
sm2util.p256Mul(&aa, &curve.a, &xx) // a = a * x
sm2util.p256Add(&xx3, &xx3, &aa)
sm2util.p256Add(&xx3, &xx3, &curve.b)
y2 := sm2util.p256ToBig(&xx3)
y := new(big.Int).ModSqrt(y2, sm2p256Params.P)
if getLastBit(y) != uint(a[0]) {
y.Sub(sm2p256Params.P, y)
}
return &PublicKey{
Curve: SM2P256(),
X: x,
Y: y,
}
}
// ------------------------------------ //
const (
BitSize = 256
KeyBytes = (BitSize + 7) / 8
UnCompress = 0x04
)
func (pub *PublicKey) GetUnCompressBytes() []byte {
xBytes := pub.X.Bytes()
yBytes := pub.Y.Bytes()
xl := len(xBytes)
yl := len(yBytes)
raw := make([]byte, 1+KeyBytes*2)
raw[0] = UnCompress
if xl > KeyBytes {
copy(raw[1:1+KeyBytes], xBytes[xl-KeyBytes:])
} else if xl < KeyBytes {
copy(raw[1+(KeyBytes-xl):1+KeyBytes], xBytes)
} else {
copy(raw[1:1+KeyBytes], xBytes)
}
if yl > KeyBytes {
copy(raw[1+KeyBytes:], yBytes[yl-KeyBytes:])
} else if yl < KeyBytes {
copy(raw[1+KeyBytes+(KeyBytes-yl):], yBytes)
} else {
copy(raw[1+KeyBytes:], yBytes)
}
return raw
}
func (pub *PublicKey) GetRawBytes() []byte {
raw := pub.GetUnCompressBytes()
return raw[1:]
} | eturn t, nil
}
type zr struct {
io.Reader
}
func (z | conditional_block |
sm2.go | /*
็ผๅ่
:ไธฅๅฟไผ
็ผๅๆถ้ด:2018/08/01
ๅ
ฌๅธ:ไธญๅฝๆ็ดขไฟกๆฏ็งๆ่กไปฝๆ้ๅ
ฌๅธ
*/
/*
ๆคญๅๆฒ็บฟๅ ่งฃๅฏๅ็ญพๅ็ฎๆณ็ๆๆฏๅ็ๅๅ
ถGo่ฏญ่จๅฎ็ฐ:http://www.jeepyurongfu.net/blog/45309.html
*/
package sm2
import "C"
import (
"bytes"
"crypto"
"crypto/elliptic"
"crypto/rand"
"encoding/asn1"
"encoding/binary"
"errors"
"fmt"
"github.com/chinaso/fabricGM/cryptopkg/golangGM/sm3"
"io"
"math/big"
)
const (
aesIV = "IV for <SM2> CTR"
)
// -------------------------------------------------- //
// PublicKey represents an SM2 public key.
type PublicKey struct {
elliptic.Curve
//SM2P256Curve
//sm2p256Curve
X, Y *big.Int
}
// PrivateKey represents an Sm2 private key.
type PrivateKey struct {
PublicKey
D *big.Int
}
type sm2Signature struct {
R, S *big.Int
}
// -------------------------------------------------- //
var errNoOneParam = errors.New("zero parameter")
var ONE = new(big.Int).SetInt64(1)
var (
default_IDA = []byte{0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38}
)
// Public returns the public key corresponding to priv.
func (priv *PrivateKey) Public() crypto.PublicKey {
return &priv.PublicKey
}
// GenerateKey generates a public and private key pair.
func GenerateKey(rand io.Reader) (*PrivateKey, error) {
c := SM2P256()
k, err := randFieldElement(c, rand)
fmt.Println(k)
if err != nil {
return nil, err
}
priv := new(PrivateKey)
priv.PublicKey.Curve= c
priv.D = k
priv.PublicKey.X, priv.PublicKey.Y = c.ScalarBaseMult(k.Bytes())
return priv, nil
}
func randFieldElement(c elliptic.Curve, rand io.Reader) (k *big.Int, err error) {
//params := c.Curve.Params()
params := c.Params()
b := make([]byte, params.BitSize/8+8)
_, err = io.ReadFull(rand, b)
if err != nil {
return
}
k = new(big.Int).SetBytes(b)
n := new(big.Int).Sub(params.N, ONE)
k.Mod(k, n)
k.Add(k, ONE)
return
}
// -------------------------------------------------- //
// SM2ๅ็:https://blog.csdn.net/samsho2/article/details/80772228
// ๅฝๅฏๆ ๅ:http://c.gb688.cn/bzgk/gb/showGb?type=online&hcno=370AF152CB5CA4A377EB4D1B21DECAE0
// ZA=H256(ENTLA || IDA || a || b || xG || yG|| xA || yA)
func ZA(pub *PublicKey, IDA []byte) ([]byte, error) {
if len(IDA) <= 0 {
IDA = default_IDA
}
entlenA := len(IDA)
if entlenA >= 8192 {
return []byte{}, errors.New("SM2: uid too large")
}
sm2util :=sm2P256Util{}
ENTLA := uint16(8*entlenA)
ZA := sm3.New()
ZA.Write([]byte{byte((ENTLA >> 8) & 0xFF)})
ZA.Write([]byte{byte(ENTLA & 0xFF)})
ZA.Write(IDA)
ZA.Write(sm2util.p256ToBig(&sm2p256Params.a).Bytes())
//ZA.Write(sm2p256Params.A.Bytes())
ZA.Write(sm2p256Params.B.Bytes())
ZA.Write(sm2p256Params.Gx.Bytes())
ZA.Write(sm2p256Params.Gy.Bytes())
xBuf := pub.X.Bytes()
yBuf := pub.Y.Bytes()
if n := len(xBuf); n < 32 {
xBuf = append(zeroByteSlice()[:32-n], xBuf...)
}
ZA.Write(xBuf)
ZA.Write(yBuf)
return ZA.Sum(nil)[:32], nil
}
// 32byte
func zeroByteSlice() []byte {
return []byte{
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
}
}
// sign format = 30 + len(z) + 02 + len(r) + r + 02 + len(s) + s, z being what follows its size, ie 02+len(r)+r+02+len(s)+s
func (priv *PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error) {
// r, s, err := Sign(priv, msg)
r, s, err := SM2Sign(priv, msg, nil)
fmt.Println("msg:",msg)
if err != nil {
return nil, err
}
return asn1.Marshal(sm2Signature{r, s})
}
// ---------------------------------------------------------------- //
func (pub *PublicKey) Verify(msg []byte, sign []byte) bool {
var sm2Sign sm2Signature
_, err := asn1.Unmarshal(sign, &sm2Sign)
if err != nil {
return false
}
return SM2Verify(pub, msg, nil, sm2Sign.R, sm2Sign.S)
}
// ---------------------------------------------------------------- //
func (pub *PublicKey) Encrypt(data []byte) ([]byte, error) {
return SM2Encrypt(pub, data)
}
func (priv *PrivateKey) Decrypt(data []byte) ([]byte, error) {
return SM2Decrypt(priv, data)
}
// -------------------------------------------------------------- //
// ๅ่็ฝๅ:https://blog.csdn.net/samsho2/article/details/80772228
func SM2Sign(priv *PrivateKey, msg, IDA []byte) (r, s *big.Int, err error) {
za, err := ZA(&priv.PublicKey, IDA)
if err != nil {
return nil, nil, err
}
e, err := hashMsg(za, msg)
if err != nil {
return nil, nil, err
}
//c := priv.PublicKey.sm2p256Curve
c := priv.PublicKey.Curve
N := c.Params().N
if N.Sign() == 0 {
return nil, nil, errNoOneParam
}
var k *big.Int
for { // ่ฐๆด็ฎๆณ็ป่ไปฅๅฎ็ฐSM2
// r = e + x mod n
for {
k, err = randFieldElement(c, rand.Reader)
if err != nil {
r = nil
return
}
r, _ = priv.Curve.ScalarBaseMult(k.Bytes())
r.Add(r, e)
r.Mod(r, N)
if r.Sign() != 0 {
if t := new(big.Int).Add(r, k); t.Cmp(N) != 0 {
break
}
}
}
//s=(1+d)^(-1) * (k - r*d) mod n
rD := new(big.Int).Mul(priv.D, r)
s = new(big.Int).Sub(k, rD)
d1 := new(big.Int).Add(priv.D, ONE)
d1Inv := new(big.Int).ModInverse(d1, N)
s.Mul(s, d1Inv)
s.Mod(s, N)
if s.Sign() != 0 {
break
}
}
return
}
func hashMsg(za, msg []byte) (*big.Int, error) {
e := sm3.New()
e.Write(za)
e.Write(msg)
return new(big.Int).SetBytes(e.Sum(nil)[:32]), nil
}
// Verify verifies the signature in r, s of hash using the public key, pub. Its
// return value records whether the signature is valid.
func SM2Verify(pub *PublicKey, msg, IDA []byte, r, s *big.Int) bool {
c := pub.Curve
N := c.Params().N
one := new(big.Int).SetInt64(1)
if r.Cmp(one) < 0 || s.Cmp(one) < 0 {
return false
}
if r.Cmp(N) >= 0 || s.Cmp(N) >= 0 {
return false
}
//M=ZA || Msg
ZA,err := ZA(pub,IDA)
if err != nil {
return false
}
// e =H(M)
e,err := hashMsg(ZA,msg)
if err != nil {
return false
}
// t= (r+s) mod n
t := new(big.Int).Add(r, s)
t.Mod(t, N)
if t.Sign() == 0 {
return false
}
// ่ฎก็ฎๆคญๅๆฒ็บฟ็นC1๏ผ[k]G๏ผ(x1,y1)ใๅ
ถไธญGไปฃ่กจๆคญๅๆฒ็บฟ็ไธไธชๅบ็น๏ผๅ
ถ้ถไธบ็ด ๆฐ๏ผ
// kไธบๆดๆฐ๏ผ[k]G่กจ็คบkๅ็น๏ผ(x1,y1)่กจ็คบๆ่ฎก็ฎๅบ็ๆคญๅๆฒ็บฟ็นC1็ๅๆ
//(x,y) = [s]G+[t]P
var x *big.Int
x1, y1 := c.ScalarBaseMult(s.Bytes()) //[s]G =p
x2, y2 := c.ScalarMult(pub.X, pub.Y, t.Bytes())//[t]P=t*(px,py)
x, _ = c.Add(x1, y1, x2, y2)
//R=(e+x) modn
x.Add(x, e)
x.Mod(x, N)
//R ?= r
return x.Cmp(r) == 0
}
//ใ่ฎพ็ง้ฅใๅ
ฌ้ฅๅๅซไธบkใK๏ผๅณK = kG๏ผๅ
ถไธญGไธบG็นใ
// ๅ
ฌ้ฅๅ ๅฏ๏ผ
//ใ้ๆฉ้ๆบๆฐr๏ผๅฐๆถๆฏM็ๆๅฏๆC๏ผ่ฏฅๅฏๆๆฏไธไธช็นๅฏน๏ผๅณ๏ผ
//ใC = {rG, M+rK}๏ผๅ
ถไธญKไธบๅ
ฌ้ฅ
func SM2Encrypt(pub *PublicKey, plaintest []byte) ([]byte, error){
length := len(plaintest)
for {
c := []byte{}
//curve := pub.sm2p256Curve
curve := pub.Curve
//่ทๅพ้ๆบๆฐk
k, err := randFieldElement(curve, rand.Reader)
if err != nil {
return nil, err
}
//(x,y) = [k]P
x1, y1 := curve.ScalarBaseMult(k.Bytes())
x2, y2 := curve.ScalarMult(pub.X, pub.Y, k.Bytes())
x1Buf := x1.Bytes()
y1Buf := y1.Bytes()
x2Buf := x2.Bytes()
y2Buf := y2.Bytes()
if n := len(x1Buf); n < 32 {
x1Buf = append(zeroByteSlice()[:32-n], x1Buf...)
}
if n := len(y1Buf); n < 32 {
y1Buf = append(zeroByteSlice()[:32-n], y1Buf...)
}
if n := len(x2Buf); n < 32 {
x2Buf = append(zeroByteSlice()[:32-n], x2Buf...)
}
if n := len(y2Buf); n < 32 {
y2Buf = append(zeroByteSlice()[:32-n], y2Buf...)
}
//c1
c = append(c, x1Buf...) // xๅ้
c = append(c, y1Buf...) // yๅ้
//hash(x || M || y)
tm := []byte{}
tm = append(tm, x2Buf...)
tm = append(tm, plaintest...)
tm = append(tm, y2Buf...)
c3 := sm3.Sum(tm)
c = append(c, c3...)
ct, ok := kdf(x2Buf, y2Buf, length) // ๅฏๆ
if !ok {
continue
}
c = append(c, ct...)
for i := 0; i < length; i++ {
c[96+i] ^= plaintest[i] //c2
}
//C = C1 || C2 || C3
return append([]byte{0x04}, c...), nil
}
}
/*
่ทๅ้ๆบๆฐ k
(x1, y1) = [k]G
S=[h]P //hไธบไฝๅ ๅญ
C1=(x2,y2)= [k]P
t=KDF(x2||y2,klen);//klenไธบM็้ฟๅบฆใKDFๆฏsm2็ๅฏ้ฅๆดพ็ๅฝๆฐ
c2 = M+t
C3 = Hash(x2||M||y2)
C = C1||C2||C3
*/
//ๅฝๅฏSM2็ฎๆณๅฏ้ฅๆดพ็ๅฝๆฐKDF็ๅฎ็ฐ:https://blog.csdn.net/Heidlyn/article/details/53993002
//ไฝ็จๆฏไปไธไธชๅ
ฑไบซ็ๆฏ็นไฝๆดพ็ๅบๅฏ้ฅๆฐๆฎ
func kdf(x, y []byte, length int) ([]byte, bool) {
var c []byte
//ct := intToBytes(1)//ct=0x00000001
ct := 1
h := sm3.New()
x = append(x, y...) //Z
for i, j := 0, (length+31)/32; i < j; i++ { // ct ไป 1 ๅฐ klen/v
// Hash(Z || ct )
h.Reset()
h.Write(x)
h.Write(intToBytes(ct))
hash := h.Sum(nil)
if i+1 == j && length%32 != 0 {
c = append(c, hash[:length%32]...)
} else {
c = append(c, hash...)
}
ct++
}
for i := 0; i < length; i++ {
if c[i] != 0 {
return c, true
}
}
return c, false
}
func intToBytes(x int) []byte {
var buf = make([]byte, 4)
binary.BigEndian.PutUint32(buf, uint32(x))
return buf
}
/*
C1 = C้้ข่ทๅ ๏ผ้ช่ฏC1ๆฏๅฆๆปก่ถณๆคญๅๆฒ็บฟใ//C2้ฟๅบฆ็กฎๅฎ๏ผๅฏไปฅ่ทๅC1ๅ
ๅฎนใ
S=[h]C1๏ผSไธบๆ ็ฉท็น๏ผ้ๅบใ
(x2,y2)=[d]C1
t=KDF(m2||y2,klen)
M' = C2+t
u=Hash(x2||M'||y2), u ?= C3
M`ไธบๆๆ
*/
//SM2 ่งฃๅฏ่ฟ็ฎ
func SM2Decrypt(priv *PrivateKey, ciphertext []byte) ([]byte, error) {
ciphertext = ciphertext[1:]
length := len(ciphertext) - 96
curve := priv.Curve
x := new(big.Int).SetBytes(ciphertext[:32])
y := new(big.Int).SetBytes(ciphertext[32:64])
// (x2,y2) = [dB]C1 C1=(x,y)
x2, y2 := curve.ScalarMult(x, y, priv.D.Bytes())
x2Buf := x2.Bytes()
y2Buf := y2.Bytes()
if n := len(x2Buf); n < 32 {
x2Buf = append(zeroByteSlice()[:32-n], x2Buf...)
}
if n := len(y2Buf); n < 32 {
y2Buf = append(zeroByteSlice()[:32-n], y2Buf...)
}
// t = KDF(x2 || y2 ,klen)
t, ok := kdf(x2Buf, y2Buf, length)
if !ok {
return nil, errors.New("Decrypt: failed to decrypt")
}
for i := 0; i < length; i++ {
t[i] ^= ciphertext[i+96]
}
//U = Hash(x2 || M || y)
tm := []byte{}
tm = append(tm, x2Buf...)
tm = append(tm, t...) | return t, nil
}
type zr struct {
io.Reader
}
func (z *zr) Read(dst []byte) (n int, err error) {
for i := range dst {
dst[i] = 0
}
return len(dst), nil
}
var zeroReader = &zr{}
func getLastBit(a *big.Int) uint {
return a.Bit(0)
}
func Compress(a *PublicKey) []byte {
buf := []byte{}
yp := getLastBit(a.Y)
buf = append(buf, a.X.Bytes()...)
if n := len(a.X.Bytes()); n < 32 {
buf = append(zeroByteSlice()[:(32-n)], buf...)
}
buf = append([]byte{byte(yp)}, buf...)
return buf
}
func Decompress(a []byte) *PublicKey {
var aa, xx, xx3 sm2P256FieldElement
SM2P256()
x := new(big.Int).SetBytes(a[1:])
curve := sm2p256Params
sm2util :=sm2P256Util{}
sm2util.p256FromBig(&xx, x)
sm2util.p256Square(&xx3, &xx) // x3 = x ^ 2
sm2util.p256Mul(&xx3, &xx3, &xx) // x3 = x ^ 2 * x
sm2util.p256Mul(&aa, &curve.a, &xx) // a = a * x
sm2util.p256Add(&xx3, &xx3, &aa)
sm2util.p256Add(&xx3, &xx3, &curve.b)
y2 := sm2util.p256ToBig(&xx3)
y := new(big.Int).ModSqrt(y2, sm2p256Params.P)
if getLastBit(y) != uint(a[0]) {
y.Sub(sm2p256Params.P, y)
}
return &PublicKey{
Curve: SM2P256(),
X: x,
Y: y,
}
}
// ------------------------------------ //
const (
BitSize = 256
KeyBytes = (BitSize + 7) / 8
UnCompress = 0x04
)
func (pub *PublicKey) GetUnCompressBytes() []byte {
xBytes := pub.X.Bytes()
yBytes := pub.Y.Bytes()
xl := len(xBytes)
yl := len(yBytes)
raw := make([]byte, 1+KeyBytes*2)
raw[0] = UnCompress
if xl > KeyBytes {
copy(raw[1:1+KeyBytes], xBytes[xl-KeyBytes:])
} else if xl < KeyBytes {
copy(raw[1+(KeyBytes-xl):1+KeyBytes], xBytes)
} else {
copy(raw[1:1+KeyBytes], xBytes)
}
if yl > KeyBytes {
copy(raw[1+KeyBytes:], yBytes[yl-KeyBytes:])
} else if yl < KeyBytes {
copy(raw[1+KeyBytes+(KeyBytes-yl):], yBytes)
} else {
copy(raw[1+KeyBytes:], yBytes)
}
return raw
}
func (pub *PublicKey) GetRawBytes() []byte {
raw := pub.GetUnCompressBytes()
return raw[1:]
} | tm = append(tm, y2Buf...)
h := sm3.Sum(tm)
if bytes.Compare(h, ciphertext[64:96]) != 0 {
return t, errors.New("Decrypt: failed to decrypt")
} | random_line_split |
sm2.go | /*
็ผๅ่
:ไธฅๅฟไผ
็ผๅๆถ้ด:2018/08/01
ๅ
ฌๅธ:ไธญๅฝๆ็ดขไฟกๆฏ็งๆ่กไปฝๆ้ๅ
ฌๅธ
*/
/*
ๆคญๅๆฒ็บฟๅ ่งฃๅฏๅ็ญพๅ็ฎๆณ็ๆๆฏๅ็ๅๅ
ถGo่ฏญ่จๅฎ็ฐ:http://www.jeepyurongfu.net/blog/45309.html
*/
package sm2
import "C"
import (
"bytes"
"crypto"
"crypto/elliptic"
"crypto/rand"
"encoding/asn1"
"encoding/binary"
"errors"
"fmt"
"github.com/chinaso/fabricGM/cryptopkg/golangGM/sm3"
"io"
"math/big"
)
const (
aesIV = "IV for <SM2> CTR"
)
// -------------------------------------------------- //
// PublicKey represents an SM2 public key.
type PublicKey struct {
elliptic.Curve
//SM2P256Curve
//sm2p256Curve
X, Y *big.Int
}
// PrivateKey represents an Sm2 private key.
type PrivateKey struct {
PublicKey
D *big.Int
}
type sm2Signature struct {
R, S *big.Int
}
// -------------------------------------------------- //
var errNoOneParam = errors.New("zero parameter")
var ONE = new(big.Int).SetInt64(1)
var (
default_IDA = []byte{0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38}
)
// Public returns the public key corresponding to priv.
func (priv *PrivateKey) Public() crypto.PublicKey {
return &priv.PublicKey
}
// GenerateKey generates a public and private key pair.
func GenerateKey(rand io.Reader) (*PrivateKey, error) {
c := SM2P256()
k, err := randFieldElement(c, rand)
fmt.Println(k)
if err != nil {
return nil, err
}
priv := new(PrivateKey)
priv.PublicKey.Curve= c
priv.D = k
priv.PublicKey.X, priv.PublicKey.Y = c.ScalarBaseMult(k.Bytes())
return priv, nil
}
func randFieldElement(c elliptic.Curve, rand io.Reader) (k *big.Int, err error) {
//params := c.Curve.Params()
params := c.Params()
b := make([]byte, params.BitSize/8+8)
_, err = io.ReadFull(rand, b)
if err != nil {
return
}
k = new(big.Int).SetBytes(b)
n := new(big.Int).Sub(params.N, ONE)
k.Mod(k, n)
k.Add(k, ONE)
return
}
// -------------------------------------------------- //
// SM2ๅ็:https://blog.csdn.net/samsho2/article/details/80772228
// ๅฝๅฏๆ ๅ:http://c.gb688.cn/bzgk/gb/showGb?type=online&hcno=370AF152CB5CA4A377EB4D1B21DECAE0
// ZA=H256(ENTLA || IDA || a || b || xG || yG|| xA || yA)
func ZA(pub *PublicKey, IDA []byte) ([]byte, error) {
if len(IDA) <= 0 {
IDA = default_IDA
}
entlenA := len(IDA)
if entlenA >= 8192 {
return []byte{}, errors.New("SM2: uid too large")
}
sm2util :=sm2P256Util{}
ENTLA := uint16(8*entlenA)
ZA := sm3.New()
ZA.Write([]byte{byte((ENTLA >> 8) & 0xFF)})
ZA.Write([]byte{byte(ENTLA & 0xFF)})
ZA.Write(IDA)
ZA.Write(sm2util.p256ToBig(&sm2p256Params.a).Bytes())
//ZA.Write(sm2p256Params.A.Bytes())
ZA.Write(sm2p256Params.B.Bytes())
ZA.Write(sm2p256Params.Gx.Bytes())
ZA.Write(sm2p256Params.Gy.Bytes())
xBuf := pub.X.Bytes()
yBuf := pub.Y.Bytes()
if n := len(xBuf); n < 32 {
xBuf = append(zeroByteSlice()[:32-n], xBuf...)
}
ZA.Write(xBuf)
ZA.Write(yBuf)
return ZA.Sum(nil)[:32], nil
}
// 32byte
func zeroByteSlice() []byte {
return []byte{
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
}
}
// sign format = 30 + len(z) + 02 + len(r) + r + 02 + len(s) + s, z being what follows its size, ie 02+len(r)+r+02+len(s)+s
func (priv *PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error) {
// r, s, err := Sign(priv, msg)
r, s, err := SM2Sign(priv, msg, nil)
fmt.Println("msg:",msg)
if err != | byte, sign []byte) bool {
var sm2Sign sm2Signature
_, err := asn1.Unmarshal(sign, &sm2Sign)
if err != nil {
return false
}
return SM2Verify(pub, msg, nil, sm2Sign.R, sm2Sign.S)
}
// ---------------------------------------------------------------- //
func (pub *PublicKey) Encrypt(data []byte) ([]byte, error) {
return SM2Encrypt(pub, data)
}
func (priv *PrivateKey) Decrypt(data []byte) ([]byte, error) {
return SM2Decrypt(priv, data)
}
// -------------------------------------------------------------- //
// ๅ่็ฝๅ:https://blog.csdn.net/samsho2/article/details/80772228
func SM2Sign(priv *PrivateKey, msg, IDA []byte) (r, s *big.Int, err error) {
za, err := ZA(&priv.PublicKey, IDA)
if err != nil {
return nil, nil, err
}
e, err := hashMsg(za, msg)
if err != nil {
return nil, nil, err
}
//c := priv.PublicKey.sm2p256Curve
c := priv.PublicKey.Curve
N := c.Params().N
if N.Sign() == 0 {
return nil, nil, errNoOneParam
}
var k *big.Int
for { // ่ฐๆด็ฎๆณ็ป่ไปฅๅฎ็ฐSM2
// r = e + x mod n
for {
k, err = randFieldElement(c, rand.Reader)
if err != nil {
r = nil
return
}
r, _ = priv.Curve.ScalarBaseMult(k.Bytes())
r.Add(r, e)
r.Mod(r, N)
if r.Sign() != 0 {
if t := new(big.Int).Add(r, k); t.Cmp(N) != 0 {
break
}
}
}
//s=(1+d)^(-1) * (k - r*d) mod n
rD := new(big.Int).Mul(priv.D, r)
s = new(big.Int).Sub(k, rD)
d1 := new(big.Int).Add(priv.D, ONE)
d1Inv := new(big.Int).ModInverse(d1, N)
s.Mul(s, d1Inv)
s.Mod(s, N)
if s.Sign() != 0 {
break
}
}
return
}
func hashMsg(za, msg []byte) (*big.Int, error) {
e := sm3.New()
e.Write(za)
e.Write(msg)
return new(big.Int).SetBytes(e.Sum(nil)[:32]), nil
}
// Verify verifies the signature in r, s of hash using the public key, pub. Its
// return value records whether the signature is valid.
func SM2Verify(pub *PublicKey, msg, IDA []byte, r, s *big.Int) bool {
c := pub.Curve
N := c.Params().N
one := new(big.Int).SetInt64(1)
if r.Cmp(one) < 0 || s.Cmp(one) < 0 {
return false
}
if r.Cmp(N) >= 0 || s.Cmp(N) >= 0 {
return false
}
//M=ZA || Msg
ZA,err := ZA(pub,IDA)
if err != nil {
return false
}
// e =H(M)
e,err := hashMsg(ZA,msg)
if err != nil {
return false
}
// t= (r+s) mod n
t := new(big.Int).Add(r, s)
t.Mod(t, N)
if t.Sign() == 0 {
return false
}
// ่ฎก็ฎๆคญๅๆฒ็บฟ็นC1๏ผ[k]G๏ผ(x1,y1)ใๅ
ถไธญGไปฃ่กจๆคญๅๆฒ็บฟ็ไธไธชๅบ็น๏ผๅ
ถ้ถไธบ็ด ๆฐ๏ผ
// kไธบๆดๆฐ๏ผ[k]G่กจ็คบkๅ็น๏ผ(x1,y1)่กจ็คบๆ่ฎก็ฎๅบ็ๆคญๅๆฒ็บฟ็นC1็ๅๆ
//(x,y) = [s]G+[t]P
var x *big.Int
x1, y1 := c.ScalarBaseMult(s.Bytes()) //[s]G =p
x2, y2 := c.ScalarMult(pub.X, pub.Y, t.Bytes())//[t]P=t*(px,py)
x, _ = c.Add(x1, y1, x2, y2)
//R=(e+x) modn
x.Add(x, e)
x.Mod(x, N)
//R ?= r
return x.Cmp(r) == 0
}
//ใ่ฎพ็ง้ฅใๅ
ฌ้ฅๅๅซไธบkใK๏ผๅณK = kG๏ผๅ
ถไธญGไธบG็นใ
// ๅ
ฌ้ฅๅ ๅฏ๏ผ
//ใ้ๆฉ้ๆบๆฐr๏ผๅฐๆถๆฏM็ๆๅฏๆC๏ผ่ฏฅๅฏๆๆฏไธไธช็นๅฏน๏ผๅณ๏ผ
//ใC = {rG, M+rK}๏ผๅ
ถไธญKไธบๅ
ฌ้ฅ
func SM2Encrypt(pub *PublicKey, plaintest []byte) ([]byte, error){
length := len(plaintest)
for {
c := []byte{}
//curve := pub.sm2p256Curve
curve := pub.Curve
//่ทๅพ้ๆบๆฐk
k, err := randFieldElement(curve, rand.Reader)
if err != nil {
return nil, err
}
//(x,y) = [k]P
x1, y1 := curve.ScalarBaseMult(k.Bytes())
x2, y2 := curve.ScalarMult(pub.X, pub.Y, k.Bytes())
x1Buf := x1.Bytes()
y1Buf := y1.Bytes()
x2Buf := x2.Bytes()
y2Buf := y2.Bytes()
if n := len(x1Buf); n < 32 {
x1Buf = append(zeroByteSlice()[:32-n], x1Buf...)
}
if n := len(y1Buf); n < 32 {
y1Buf = append(zeroByteSlice()[:32-n], y1Buf...)
}
if n := len(x2Buf); n < 32 {
x2Buf = append(zeroByteSlice()[:32-n], x2Buf...)
}
if n := len(y2Buf); n < 32 {
y2Buf = append(zeroByteSlice()[:32-n], y2Buf...)
}
//c1
c = append(c, x1Buf...) // xๅ้
c = append(c, y1Buf...) // yๅ้
//hash(x || M || y)
tm := []byte{}
tm = append(tm, x2Buf...)
tm = append(tm, plaintest...)
tm = append(tm, y2Buf...)
c3 := sm3.Sum(tm)
c = append(c, c3...)
ct, ok := kdf(x2Buf, y2Buf, length) // ๅฏๆ
if !ok {
continue
}
c = append(c, ct...)
for i := 0; i < length; i++ {
c[96+i] ^= plaintest[i] //c2
}
//C = C1 || C2 || C3
return append([]byte{0x04}, c...), nil
}
}
/*
่ทๅ้ๆบๆฐ k
(x1, y1) = [k]G
S=[h]P //hไธบไฝๅ ๅญ
C1=(x2,y2)= [k]P
t=KDF(x2||y2,klen);//klenไธบM็้ฟๅบฆใKDFๆฏsm2็ๅฏ้ฅๆดพ็ๅฝๆฐ
c2 = M+t
C3 = Hash(x2||M||y2)
C = C1||C2||C3
*/
//ๅฝๅฏSM2็ฎๆณๅฏ้ฅๆดพ็ๅฝๆฐKDF็ๅฎ็ฐ:https://blog.csdn.net/Heidlyn/article/details/53993002
//ไฝ็จๆฏไปไธไธชๅ
ฑไบซ็ๆฏ็นไฝๆดพ็ๅบๅฏ้ฅๆฐๆฎ
func kdf(x, y []byte, length int) ([]byte, bool) {
var c []byte
//ct := intToBytes(1)//ct=0x00000001
ct := 1
h := sm3.New()
x = append(x, y...) //Z
for i, j := 0, (length+31)/32; i < j; i++ { // ct ไป 1 ๅฐ klen/v
// Hash(Z || ct )
h.Reset()
h.Write(x)
h.Write(intToBytes(ct))
hash := h.Sum(nil)
if i+1 == j && length%32 != 0 {
c = append(c, hash[:length%32]...)
} else {
c = append(c, hash...)
}
ct++
}
for i := 0; i < length; i++ {
if c[i] != 0 {
return c, true
}
}
return c, false
}
func intToBytes(x int) []byte {
var buf = make([]byte, 4)
binary.BigEndian.PutUint32(buf, uint32(x))
return buf
}
/*
C1 = C้้ข่ทๅ ๏ผ้ช่ฏC1ๆฏๅฆๆปก่ถณๆคญๅๆฒ็บฟใ//C2้ฟๅบฆ็กฎๅฎ๏ผๅฏไปฅ่ทๅC1ๅ
ๅฎนใ
S=[h]C1๏ผSไธบๆ ็ฉท็น๏ผ้ๅบใ
(x2,y2)=[d]C1
t=KDF(m2||y2,klen)
M' = C2+t
u=Hash(x2||M'||y2), u ?= C3
M`ไธบๆๆ
*/
//SM2 ่งฃๅฏ่ฟ็ฎ
func SM2Decrypt(priv *PrivateKey, ciphertext []byte) ([]byte, error) {
ciphertext = ciphertext[1:]
length := len(ciphertext) - 96
curve := priv.Curve
x := new(big.Int).SetBytes(ciphertext[:32])
y := new(big.Int).SetBytes(ciphertext[32:64])
// (x2,y2) = [dB]C1 C1=(x,y)
x2, y2 := curve.ScalarMult(x, y, priv.D.Bytes())
x2Buf := x2.Bytes()
y2Buf := y2.Bytes()
if n := len(x2Buf); n < 32 {
x2Buf = append(zeroByteSlice()[:32-n], x2Buf...)
}
if n := len(y2Buf); n < 32 {
y2Buf = append(zeroByteSlice()[:32-n], y2Buf...)
}
// t = KDF(x2 || y2 ,klen)
t, ok := kdf(x2Buf, y2Buf, length)
if !ok {
return nil, errors.New("Decrypt: failed to decrypt")
}
for i := 0; i < length; i++ {
t[i] ^= ciphertext[i+96]
}
//U = Hash(x2 || M || y)
tm := []byte{}
tm = append(tm, x2Buf...)
tm = append(tm, t...)
tm = append(tm, y2Buf...)
h := sm3.Sum(tm)
if bytes.Compare(h, ciphertext[64:96]) != 0 {
return t, errors.New("Decrypt: failed to decrypt")
}
return t, nil
}
type zr struct {
io.Reader
}
func (z *zr) Read(dst []byte) (n int, err error) {
for i := range dst {
dst[i] = 0
}
return len(dst), nil
}
var zeroReader = &zr{}
func getLastBit(a *big.Int) uint {
return a.Bit(0)
}
func Compress(a *PublicKey) []byte {
buf := []byte{}
yp := getLastBit(a.Y)
buf = append(buf, a.X.Bytes()...)
if n := len(a.X.Bytes()); n < 32 {
buf = append(zeroByteSlice()[:(32-n)], buf...)
}
buf = append([]byte{byte(yp)}, buf...)
return buf
}
func Decompress(a []byte) *PublicKey {
var aa, xx, xx3 sm2P256FieldElement
SM2P256()
x := new(big.Int).SetBytes(a[1:])
curve := sm2p256Params
sm2util :=sm2P256Util{}
sm2util.p256FromBig(&xx, x)
sm2util.p256Square(&xx3, &xx) // x3 = x ^ 2
sm2util.p256Mul(&xx3, &xx3, &xx) // x3 = x ^ 2 * x
sm2util.p256Mul(&aa, &curve.a, &xx) // a = a * x
sm2util.p256Add(&xx3, &xx3, &aa)
sm2util.p256Add(&xx3, &xx3, &curve.b)
y2 := sm2util.p256ToBig(&xx3)
y := new(big.Int).ModSqrt(y2, sm2p256Params.P)
if getLastBit(y) != uint(a[0]) {
y.Sub(sm2p256Params.P, y)
}
return &PublicKey{
Curve: SM2P256(),
X: x,
Y: y,
}
}
// ------------------------------------ //
const (
BitSize = 256
KeyBytes = (BitSize + 7) / 8
UnCompress = 0x04
)
func (pub *PublicKey) GetUnCompressBytes() []byte {
xBytes := pub.X.Bytes()
yBytes := pub.Y.Bytes()
xl := len(xBytes)
yl := len(yBytes)
raw := make([]byte, 1+KeyBytes*2)
raw[0] = UnCompress
if xl > KeyBytes {
copy(raw[1:1+KeyBytes], xBytes[xl-KeyBytes:])
} else if xl < KeyBytes {
copy(raw[1+(KeyBytes-xl):1+KeyBytes], xBytes)
} else {
copy(raw[1:1+KeyBytes], xBytes)
}
if yl > KeyBytes {
copy(raw[1+KeyBytes:], yBytes[yl-KeyBytes:])
} else if yl < KeyBytes {
copy(raw[1+KeyBytes+(KeyBytes-yl):], yBytes)
} else {
copy(raw[1+KeyBytes:], yBytes)
}
return raw
}
func (pub *PublicKey) GetRawBytes() []byte {
raw := pub.GetUnCompressBytes()
return raw[1:]
} | nil {
return nil, err
}
return asn1.Marshal(sm2Signature{r, s})
}
// ---------------------------------------------------------------- //
func (pub *PublicKey) Verify(msg [] | identifier_body |
varausohjelmajs.js | // Kaikki javascripti joka liittyy adminsivuun ja redirectaamiseen
// array johon kaikki lisatyt kaytajat tallennetaan
var userArray = [];
userArray[0] = "@ default default"
// sessionstorage asettaa stringin,
// aluksi muokataan array string-muotoon kayttamalla JSON.stringify jonka jalkeen
// kaytetaan JSON:ia muokkaamaan asetettavaa stringia takaisin arrayksi muokattavaan muotoon (JSON.parse)
// sessionStorage.setItem("mySharedLoginData", JSON.stringify(userArray));
var error = document.getElementById("RegisterAlertArea");
var loginError = document.getElementById("LoginAlertArea");
var RegisterErrorText = document.getElementById("errorText");
function registerUser(){
var check = document.getElementById("adminCheckbox").checked;
var x = document.forms["registerForm"]["givenUsername"].value;
var y = document.forms["registerForm"]["givenPassword"].value;
// Tarkistaa onko arvoja annettu ja onko annetuissa arvoissa paallekkaisyyksia
if (x==null || x == "" || y==null || y==""){
error.style.display = 'block';
return;
} else if (userArray.includes("@ " + x + " " + y) || userArray.includes(x + " " + y)) {
error.style.display = 'block';
RegisterErrorText.textContent = "ERROR: User already exists within current instance";
return;
} else {
error.style.display = 'none';
if (check == true){
userArray.push("@ " + x + " " + y);
alert("1");
} else {
userArray.push(x + " " + y);
alert("2");
alert(JSON.stringify(userArray));
}
}
// /n siis toimii rivinvaihtona inputtien valilla
(document.getElementById("addedUsersArea")).value = userArray.join("\n")
sessionStorage.setItem("mySharedLoginData", JSON.stringify(userArray));
// Paivitetaan tietokantaan juuri lisatty kayttaja
var elokuvaPOST = {
username: x,
password: y,
elokuva: [],
sali: [],
aika: [],
paikka: []
};
$.ajax({
type: 'POST',
url: 'http://rest.learncode.academy/api/elokuvaData/kauttajatTesti',
data: elokuvaPOST,
success: function(dataPOST){
sessionStorage.setItem("mySharedLoginUsername", x);
sessionStorage.setItem("mySharedLoginPassword", y);
console.log(sessionStorage.getItem("mySharedLoginUsername") + " & " + sessionStorage.getItem("mySharedLoginPassword"));
console.log("user registered to database:");
console.log(dataPOST);
}
});
}
// loginarray viittaa siis tamanhetkisiin syoteboxeihin laitettuun kayttajanimeen ja salasanaan
// loginarray ei ole millaan tavalla liitoksissa REST-ohjelmointirajapinnan tietokantaan vaan
// tieto on vain sessionstoragessa.
function loginUser(){
var loginArray = [];
// Sessionstorage palauttaa Stringin, kaytetaan JSON:in parse-metodia muuttamaan listan string-muotoon
loginArray = JSON.parse(sessionStorage.getItem("mySharedLoginData"));
// tarkistetaan onko yhtaan kayttajaa kirjattu sisaan tassa sessiossa
if (loginArray == null || loginArray == undefined){
loginError.style.display = 'block';
RegisterErrorText.innerHTML = "ERROR: No readable users registered";
return;
}
var x = document.forms["loginForm"]["loginUsername"].value;
var y = document.forms["loginForm"]["loginPassword"].value;
// voisi tehda kaksitasosen if-lausekkeen, erikseen tyhjalle kentalle ja kentalle jossa arvot ovat vaaria
if (x==null || x == "" || y==null || y=="" || (!loginArray.includes("@ " + x + " " + y) && !loginArray.includes(x + " " + y))){
loginError.style.display = 'block';
RegisterErrorText.innerHTML = "ERROR: Incorrect username or password";
alert(loginArray.includes("@ " + x + " " + y) + " " + loginArray.includes(x + " " + y));
return;
}
alert(loginArray.includes("@ " + x + " " + y) + " " + loginArray.includes(x + " " + y));
// tassa if-lauseessa asetetaan uusin kirjautunut kayttaja instanssin loginiksi, tata kaytetaan jatkossa varauksiin
var currentUserImput = "@ " + x + " " + y
if (loginArray.includes("@ " + x + " " + y) && searchStringInArray("@ " + x + " " + y, loginArray) != -1){
alert("adminuser");
sessionStorage.setItem("currentLoginIndex", searchStringInArray("@ " + x + " " + y, loginArray));
sessionStorage.setItem("currentLoginUser", loginArray[searchStringInArray("@ " + x + " " + y, loginArray)]);
//redirect loginin jalkeen
window.location.href="index.html";
} else if (loginArray.includes(x + " " + y) && searchStringInArray(x + " " + y, loginArray) != -1){
alert("normaluser");
sessionStorage.setItem("currentLoginIndex", searchStringInArray(x + " " + y, loginArray));
sessionStorage.setItem("currentLoginUser", loginArray[searchStringInArray(x + " " + y, loginArray)]);
//redirect loginin jalkeen
window.location.href="elokuvaAsiakassivu.html";
} else alert("Unknown login error has occurred");
}
// loginuserin tamanhetkisen kirjautujan etsiminen rekisteroityjen kayttajien listasta,
// palauttaa indeksin jossa kauttaja on mikali annettu parametri on listassa
function searchStringInArray (searchString, searchStringArray) {
for ( var i = 0 ; i < searchStringArray.length ; i++ ) {
if (searchStringArray[i] == searchString)
return i;
}
return -1;
}
// funktio jolla paivitetaan html-osiot jotka ovat riippuvaisia annetusta kauttajasta
function paivitaKayttajaElementit(kayttajaElementti){
kayttajaElementti.innerHTML = sessionStorage.getItem("currentLoginUser");
alert(sessionStorage.getItem("currentLoginUser"));
}
stringElementti1 = '<td class=" has-events" rowspan="';
rowspanElementti = ""+ listanPituusCounter +"";
stringElementti2 = '"><div class="row-fluid elokuvaElementti1" style="width: 99%; height: 100%;"><span class="title">' ;
stringElementti3 = '</span> <span class="sali"><a>' ;
stringElementti4 = '</a></span><span class="aika">' ;
stringElementti5 = '</span></div></td>' ;
perusStringElementti = '<td class=" no-events" rowspan="1"></td>';
var elokuvaBlokki = stringElementti1 + rowspanElementti + stringElementti2 + elokuvanNimi + stringElementti3 + sali + stringElementti4 + aika + stringElementti5;
var elokuvanNimi = "asda";
var sali = "sali 1*";
var aika = changeFunc() || "00:31";
jQuery(".list-group-item").click(function (e) {
jQuery(this).addClass('active').siblings().removeClass('active');
});
jQuery("#sel1").click(function (ee) {
jQuery(this).addClass('selected').siblings().removeClass('active');
});
// Eventlistenerit hyllytetty silla tarkistuksen voi asettaa submittausmetodin alkuun
var elokuvanNimiElementti = document.getElementById("ElokuvaNimiImput");
var selectBox = document.getElementById("sel2");
var selectedValue = selectBox.options[selectBox.selectedIndex].value;
var selectedTimeIndex = selectBox.options[selectBox.selectedIndex].index;
var selectedTimeIndex2 = selectedTimeIndex;
elokuvanNimiElementti.addEventListener("", changeFunc)
selectBox.addEventListener("", changeFunc)
function changeFunc() {
var selectBox = document.getElementById("sel2");
selectedValue = selectBox.options[selectBox.selectedIndex].innerHTML;
selectedTimeIndex = selectBox.options[selectBox.selectedIndex].index;
selectedTimeIndex2 = selectedTimeIndex;
aika = selectedValue;
elokuvanNimi = document.getElementById("usrInputNimi").value;
updateHTMLBlock();
}
$("td").change(function() {
alert($(this).find("option:selected").text()+' clicked!');
});
$("#myElement").click(function() {
$(this).siblings(":last").hide();
});
$("a").click(function(event){
event.preventDefault();
});
// updates all the clickevents for the td tags of the calendar
var checklist = document.getElementsByTagName("td");
function updateCalendarEvents(){
var checklist = document.getElementsByTagName("td");
for (var i = 0; i < checklist.length; i++) {
checklist[i].addEventListener("click", updateSelectedtd)
}
}
// var tdlista = checklist.querySelectorAll("td");
for (var i = 0; i < checklist.length; i++) {
checklist[i].addEventListener("click", updateSelectedtd)
}
function updateSelectedtd(){
for (var k = 0; k < checklist.length; k++) {
checklist[k].style.backgroundColor = "#492079";
if ($(checklist[k]).hasClass('selected')){
checklist[k].classList.remove("selected");
}
else if ($(checklist[k]).hasClass('selectedForAnnihilation')){
checklist[k].classList.remove("selectedForAnnihilation");
}
}
if($(this).hasClass('noselect')){
return;
} else if($(this).hasClass('has-events')){
$(this).addClass('selectedForAnnihilation')
this.style.backgroundColor = "#e20a0a";
}
else {
this.classList.add("selected");
$(this).addClass('selected')
this.style.backgroundColor = "green";
}
}
function updateHTMLBlock(){
elokuvaBlokki = stringElementti1 + rowspanElementti + stringElementti2 + elokuvanNimi + stringElementti3 + sali + stringElementti4 + aika + stringElementti5;
}
var asda = document.getElementsByName
function tarkistaKoko(){
if (parseInt(rowspanKokoSallija, 10) < (selectedTimeIndex +1)) {
return false;
} else {
return true;
}
}
function tarkistaKokoKonfliktit(){
while(selectedTimeIndex2 > 0) {
if (!$(seuraavanLapsitdt2[rowspanKohta]).hasClass('no-events')){
alert("Asettamasi aika on konfliktissa toisen ajan kanssa");
return false;
}
seuraavanLapsitdt2 = seuraavanLapsitdt2[0].parentElement.nextElementSibling.children;
selectedTimeIndex2--;
}
}
var rowspanKokoSallija = 0;
var seuraavaIsantatd;
var seuraavanLapsitdt;
var tdosoitin;
var listanPituusCounter;
// Kaytetaan uuden elokuvablokin sijoittamiseen kalenteriin adminin toimesta.
// Kutsuu tarkistakonfliktit() ja poistaa tyhjรคt blokit elokuvablokin alta.
function asetaAika(){
changeFunc();
rowspanElementti = selectedTimeIndex +1;
updateHTMLBlock();
if (elokuvanNimi == ""){
alert("Et antanut nimea");
return;
}
for (var u = 0; u < checklist.length; u++) {
if ($(checklist[u]).hasClass('selected')){
var whileLoopControlElement = checklist[u];
seuraavanLapsitdt = whileLoopControlElement.parentElement.nextElementSibling.children;
seuraavanLapsitdt2 = whileLoopControlElement.parentElement.nextElementSibling.children;
rowspanKohta = $(checklist[u]).index();
rowspanKokoSallija = parseInt(checklist[u].parentElement.children[0].className.split(' ')[1]);
if (tarkistaKoko() == false){
alert("Elokuvan aika yli aukiolajan");
return;
}
if (tarkistaKokoKonfliktit() != false){
while(selectedTimeIndex > 0) {
//HAIKKAAA Indeksi arvo rowspankohta on vaarin silla koodi ei ota huomioon
// elokuvablokkien alla olevia puuttuvia gridin elementteja,
// koodi pitaisi uudelleenkirjoittaa niin etta elokuvablokeissa olisi
// mukana placeholderblokkeja.
if ($(seuraavanLapsitdt[rowspanKohta]).hasClass('no-events')){
$(seuraavanLapsitdt[rowspanKohta]).remove();
}
seuraavanLapsitdt = seuraavanLapsitdt[0].parentElement.nextElementSibling.children;
selectedTimeIndex--;
}
} else return;
$(checklist[u]).replaceWith( elokuvaBlokki );
// while(selectedTimeIndex > 0) {
// $(seuraavanLapsitdt).last("td").remove();
/**
* tassa voisi nyt olla jaaneen undefined arvon poisto, unefined on automaattisesti listan lopussa
*/
// listanPituusCounter = seuraavanLapsitdt.length;
// seuraavanLapsitdt = seuraavanLapsitdt[0].parentElement.nextElementSibling.children;
// selectedTimeIndex--;
// }
}
}
updateCalendarEvents();
tablelist = $(".tableTogglet");
sessionStorage.setItem("GlobalTableArray", tablelist);
}
var para = document.createElement("td");
function poistaAika(){
for (var u = 0; u < checklist.length; u++) {
// if ($(checklist[u]).hasClass('selectedForAnnihilation') && $(checklist[u]).hasClass('no-events')) {
// $(checklist[u]).replaceWith( perusStringElementti );
// }
if ($(checklist[u]).hasClass('selectedForAnnihilation')){
var lisaajaCounterArvo = checklist[u].rowSpan;
var whileLoopControlElement = checklist[u];
seuraavanLapsitdt = whileLoopControlElement.parentElement.children;
var seuraavanParenttd = whileLoopControlElement.parentElement.nextElementSibling;
rowspanKohta = $(checklist[u]).index();
while(lisaajaCounterArvo > 1) {
// if ($(seuraavanLapsitdt[rowspanKohta]).hasClass('no-events')){
// seuraavanParenttd.insertBefore(para, lapsitdtNode[rowspanKohta]);
seuraavanParenttd.children[rowspanKohta].insertAdjacentHTML("beforebegin", perusStringElementti);
alert("used");
// }
// var seuraavanParenttd = seuraavanLapsitdt[0].parentElement.nextElementSibling;
// $( seuraavanParenttd.previousElementSibling ).append( perusStringElementti );
// listanPituusCounter = seuraavanLapsitdt.length;
seuraavanParenttd = seuraavanParenttd.children[0].parentElement.nextElementSibling;
lisaajaCounterArvo--;
}
alert(checklist[u].rowSpan);
$(checklist[u]).replaceWith( perusStringElementti );
updateCalendarEvents();
}
tablelist = $(".tableTogglet");
sessionStorage.setItem("GlobalTableArray", tablelist);
}
}
var modiviedtime = document.lastModified;
var ataglist = document.getElementsByClassName("saliTogglet");
//ataglist = fillArray(ataglist, 3);
//ataglist[0] = document.getElementById("original");
var tablelist = $(".tableTogglet");
sessionStorage.setItem("GlobalTableArray", tablelist);
//var tableArray = document.getElementsByClassName("tableTogglet");
//tableArray = fillArray(tableArray, 4);
//tableArray[0] = document.getElementById("theTable");
//var nappilist = document.getElementsByClassName("nappiSetit");
var nappilist = $(".nappiSetit");
//nappilist = fillArray(nappilist, 3);
//nappilist[0] = document.getElementById("nappiSetti");
var teatteriNappilist = document.getElementsByClassName("teatteriTogglet");
function toggleTeatteri(sali){
// for (var i = 0; i < ataglist.length; i++){
// $(ataglist[i]).removeClass("active");
// }
$( nappilist[sali] ).toggle(true);
$(nappilist[sali]).siblings().toggle( false );
}
toggleTeatteri("0")
$(ataglist[0]).addClass("active");
function toggleShowRoom(room){ |
toggleShowRoom(0);
function toggleTable(indexsi) {
for (var i = 0; i < ataglist.length; i++) {
var x = ataglist[i];
if (x.style.display === 'none') {
x.style.display = 'block';
} else {
x.style.display = 'none';
}
}
}
//var lTable = document.getElementById(""+table1+"");
//lTable.style.display = (lTable.style.display == "table1") ? "none" : "table1";
// Elokuvateatterinappien salinvaihto klikattaessa elokuvateatterinappeja
jQuery(".list-group-item").click(function (e) {
jQuery(this).addClass('active').siblings().removeClass('active');
});
//for (var i = 0; i < teatteriNappilist.length; i++) {
// teatteriNappilist[i].addEventListener("click", toggleShowRoom())
//}
//arraykokeilu----------------------------------------------------------------------------------------------------------------------------
function fillArray(elementti, arvo) {
var arr = [];
for (var i = 0; i < arvo; i++) {
arr.push(elementti);
}
return arr;
}
//function getIndexAndWorkFromThere(){
//for (var i = 0, neljanNappiSetit = aNapit.children.length; i < len; i++){
// var aNapit = document.getElementById('nappiSetti');
// for (var i = 0, len = aNapit.children.length; i < len; i++){
//
// (function(index){
// aNapit.children[i].onclick = function(){
// alert(index) ;
// toggleTable(index);
//
// }
//
// })(i); //TAAVAYTBRHTHEFUCKISTHISMARKINGANDWHYDOESREMOVINGITBREAKTHECODE
//
// }
//}
//var Hienotapatehdapuuttuvialukuja = clickOnButtonList || 0;
// koska variableen ei voi tallentaa clickonbuttonlistia koska sita ei viela ole olemassa, siihen tallennetaan 0.
//var clickOnButtonList = document.getElementsByClassName("list-group-item");
//for (i = 0; i < clickOnButtonList.length; i++ ){
// clickOnButtonList[i].addEventListener("click", clickOnButton)
//}
//function clickOnButton(){
// //Looppi toimii!
//}
//piece of code that does actions
//minimoi funktioiden toistokรคyttรถ
//function go(name, age) {
// "use strict";
// if (age < 20) {
// return name + "!";
// } else {
// return name;
// }
// alert(go("will",34));
//}
|
for (var i = 0; i < ataglist.length; i++){
$( ataglist[i] ).removeClass("active");
}
$( tablelist[room] ).toggle(true);
$(tablelist[room]).siblings().toggle( false );
}; | identifier_body |
varausohjelmajs.js | // Kaikki javascripti joka liittyy adminsivuun ja redirectaamiseen
// array johon kaikki lisatyt kaytajat tallennetaan
var userArray = [];
userArray[0] = "@ default default"
// sessionstorage asettaa stringin,
// aluksi muokataan array string-muotoon kayttamalla JSON.stringify jonka jalkeen
// kaytetaan JSON:ia muokkaamaan asetettavaa stringia takaisin arrayksi muokattavaan muotoon (JSON.parse)
// sessionStorage.setItem("mySharedLoginData", JSON.stringify(userArray));
var error = document.getElementById("RegisterAlertArea");
var loginError = document.getElementById("LoginAlertArea");
var RegisterErrorText = document.getElementById("errorText");
function registerUser(){
var check = document.getElementById("adminCheckbox").checked;
var x = document.forms["registerForm"]["givenUsername"].value;
var y = document.forms["registerForm"]["givenPassword"].value;
// Tarkistaa onko arvoja annettu ja onko annetuissa arvoissa paallekkaisyyksia
if (x==null || x == "" || y==null || y==""){
error.style.display = 'block';
return;
} else if (userArray.includes("@ " + x + " " + y) || userArray.includes(x + " " + y)) {
error.style.display = 'block';
RegisterErrorText.textContent = "ERROR: User already exists within current instance";
return;
} else {
error.style.display = 'none';
if (check == true){
userArray.push("@ " + x + " " + y);
alert("1");
} else {
userArray.push(x + " " + y);
alert("2");
alert(JSON.stringify(userArray));
}
}
// /n siis toimii rivinvaihtona inputtien valilla
(document.getElementById("addedUsersArea")).value = userArray.join("\n")
sessionStorage.setItem("mySharedLoginData", JSON.stringify(userArray));
// Paivitetaan tietokantaan juuri lisatty kayttaja
var elokuvaPOST = {
username: x,
password: y,
elokuva: [],
sali: [],
aika: [],
paikka: []
};
$.ajax({
type: 'POST',
url: 'http://rest.learncode.academy/api/elokuvaData/kauttajatTesti',
data: elokuvaPOST,
success: function(dataPOST){
sessionStorage.setItem("mySharedLoginUsername", x);
sessionStorage.setItem("mySharedLoginPassword", y);
console.log(sessionStorage.getItem("mySharedLoginUsername") + " & " + sessionStorage.getItem("mySharedLoginPassword"));
console.log("user registered to database:");
console.log(dataPOST);
}
});
}
// loginarray viittaa siis tamanhetkisiin syoteboxeihin laitettuun kayttajanimeen ja salasanaan
// loginarray ei ole millaan tavalla liitoksissa REST-ohjelmointirajapinnan tietokantaan vaan
// tieto on vain sessionstoragessa.
function loginUser(){
var loginArray = [];
// Sessionstorage palauttaa Stringin, kaytetaan JSON:in parse-metodia muuttamaan listan string-muotoon
loginArray = JSON.parse(sessionStorage.getItem("mySharedLoginData"));
// tarkistetaan onko yhtaan kayttajaa kirjattu sisaan tassa sessiossa
if (loginArray == null || loginArray == undefined){
loginError.style.display = 'block';
RegisterErrorText.innerHTML = "ERROR: No readable users registered";
return;
}
var x = document.forms["loginForm"]["loginUsername"].value;
var y = document.forms["loginForm"]["loginPassword"].value;
// voisi tehda kaksitasosen if-lausekkeen, erikseen tyhjalle kentalle ja kentalle jossa arvot ovat vaaria
if (x==null || x == "" || y==null || y=="" || (!loginArray.includes("@ " + x + " " + y) && !loginArray.includes(x + " " + y))){
loginError.style.display = 'block';
RegisterErrorText.innerHTML = "ERROR: Incorrect username or password";
alert(loginArray.includes("@ " + x + " " + y) + " " + loginArray.includes(x + " " + y));
return;
}
alert(loginArray.includes("@ " + x + " " + y) + " " + loginArray.includes(x + " " + y));
// tassa if-lauseessa asetetaan uusin kirjautunut kayttaja instanssin loginiksi, tata kaytetaan jatkossa varauksiin
var currentUserImput = "@ " + x + " " + y
if (loginArray.includes("@ " + x + " " + y) && searchStringInArray("@ " + x + " " + y, loginArray) != -1){
alert("adminuser");
sessionStorage.setItem("currentLoginIndex", searchStringInArray("@ " + x + " " + y, loginArray));
sessionStorage.setItem("currentLoginUser", loginArray[searchStringInArray("@ " + x + " " + y, loginArray)]);
//redirect loginin jalkeen
window.location.href="index.html";
} else if (loginArray.includes(x + " " + y) && searchStringInArray(x + " " + y, loginArray) != -1){
alert("normaluser");
sessionStorage.setItem("currentLoginIndex", searchStringInArray(x + " " + y, loginArray));
sessionStorage.setItem("currentLoginUser", loginArray[searchStringInArray(x + " " + y, loginArray)]);
//redirect loginin jalkeen
window.location.href="elokuvaAsiakassivu.html";
} else alert("Unknown login error has occurred");
}
// loginuserin tamanhetkisen kirjautujan etsiminen rekisteroityjen kayttajien listasta,
// palauttaa indeksin jossa kauttaja on mikali annettu parametri on listassa
function searchStringInArray (searchString, searchStringArray) {
for ( var i = 0 ; i < searchStringArray.length ; i++ ) {
if (searchStringArray[i] == searchString)
return i;
}
return -1;
}
// funktio jolla paivitetaan html-osiot jotka ovat riippuvaisia annetusta kauttajasta
function paivitaKayttajaElementit(kayttajaElementti){
kayttajaElementti.innerHTML = sessionStorage.getItem("currentLoginUser");
alert(sessionStorage.getItem("currentLoginUser"));
}
stringElementti1 = '<td class=" has-events" rowspan="';
rowspanElementti = ""+ listanPituusCounter +"";
stringElementti2 = '"><div class="row-fluid elokuvaElementti1" style="width: 99%; height: 100%;"><span class="title">' ;
stringElementti3 = '</span> <span class="sali"><a>' ;
stringElementti4 = '</a></span><span class="aika">' ;
stringElementti5 = '</span></div></td>' ;
perusStringElementti = '<td class=" no-events" rowspan="1"></td>';
var elokuvaBlokki = stringElementti1 + rowspanElementti + stringElementti2 + elokuvanNimi + stringElementti3 + sali + stringElementti4 + aika + stringElementti5;
var elokuvanNimi = "asda";
var sali = "sali 1*";
var aika = changeFunc() || "00:31";
jQuery(".list-group-item").click(function (e) {
jQuery(this).addClass('active').siblings().removeClass('active');
});
jQuery("#sel1").click(function (ee) {
jQuery(this).addClass('selected').siblings().removeClass('active');
});
// Eventlistenerit hyllytetty silla tarkistuksen voi asettaa submittausmetodin alkuun
var elokuvanNimiElementti = document.getElementById("ElokuvaNimiImput");
var selectBox = document.getElementById("sel2");
var selectedValue = selectBox.options[selectBox.selectedIndex].value;
var selectedTimeIndex = selectBox.options[selectBox.selectedIndex].index;
var selectedTimeIndex2 = selectedTimeIndex;
elokuvanNimiElementti.addEventListener("", changeFunc)
selectBox.addEventListener("", changeFunc)
function changeFunc() {
var selectBox = document.getElementById("sel2");
selectedValue = selectBox.options[selectBox.selectedIndex].innerHTML;
selectedTimeIndex = selectBox.options[selectBox.selectedIndex].index;
selectedTimeIndex2 = selectedTimeIndex;
aika = selectedValue;
elokuvanNimi = document.getElementById("usrInputNimi").value;
updateHTMLBlock();
}
$("td").change(function() {
alert($(this).find("option:selected").text()+' clicked!');
});
$("#myElement").click(function() {
$(this).siblings(":last").hide();
});
$("a").click(function(event){
event.preventDefault();
});
// updates all the clickevents for the td tags of the calendar
var checklist = document.getElementsByTagName("td");
function updateCalendarEvents(){
var checklist = document.getElementsByTagName("td");
for (var i = 0; i < checklist.length; i++) {
checklist[i].addEventListener("click", updateSelectedtd)
}
}
// var tdlista = checklist.querySelectorAll("td");
for (var i = 0; i < checklist.length; i++) {
checklist[i].addEventListener("click", updateSelectedtd)
}
function updateSelectedtd(){
for (var k = 0; k < checklist.length; k++) {
checklist[k].style.backgroundColor = "#492079";
if ($(checklist[k]).hasClass('selected')){
checklist[k].classList.remove("selected");
}
else if ($(checklist[k]).hasClass('selectedForAnnihilation')){
checklist[k].classList.remove("selectedForAnnihilation");
}
}
if($(this).hasClass('noselect')){
return;
} else if($(this).hasClass('has-events')){
$(this).addClass('selectedForAnnihilation')
this.style.backgroundColor = "#e20a0a";
}
else |
}
function updateHTMLBlock(){
elokuvaBlokki = stringElementti1 + rowspanElementti + stringElementti2 + elokuvanNimi + stringElementti3 + sali + stringElementti4 + aika + stringElementti5;
}
var asda = document.getElementsByName
function tarkistaKoko(){
if (parseInt(rowspanKokoSallija, 10) < (selectedTimeIndex +1)) {
return false;
} else {
return true;
}
}
function tarkistaKokoKonfliktit(){
while(selectedTimeIndex2 > 0) {
if (!$(seuraavanLapsitdt2[rowspanKohta]).hasClass('no-events')){
alert("Asettamasi aika on konfliktissa toisen ajan kanssa");
return false;
}
seuraavanLapsitdt2 = seuraavanLapsitdt2[0].parentElement.nextElementSibling.children;
selectedTimeIndex2--;
}
}
var rowspanKokoSallija = 0;
var seuraavaIsantatd;
var seuraavanLapsitdt;
var tdosoitin;
var listanPituusCounter;
// Kaytetaan uuden elokuvablokin sijoittamiseen kalenteriin adminin toimesta.
// Kutsuu tarkistakonfliktit() ja poistaa tyhjรคt blokit elokuvablokin alta.
function asetaAika(){
changeFunc();
rowspanElementti = selectedTimeIndex +1;
updateHTMLBlock();
if (elokuvanNimi == ""){
alert("Et antanut nimea");
return;
}
for (var u = 0; u < checklist.length; u++) {
if ($(checklist[u]).hasClass('selected')){
var whileLoopControlElement = checklist[u];
seuraavanLapsitdt = whileLoopControlElement.parentElement.nextElementSibling.children;
seuraavanLapsitdt2 = whileLoopControlElement.parentElement.nextElementSibling.children;
rowspanKohta = $(checklist[u]).index();
rowspanKokoSallija = parseInt(checklist[u].parentElement.children[0].className.split(' ')[1]);
if (tarkistaKoko() == false){
alert("Elokuvan aika yli aukiolajan");
return;
}
if (tarkistaKokoKonfliktit() != false){
while(selectedTimeIndex > 0) {
//HAIKKAAA Indeksi arvo rowspankohta on vaarin silla koodi ei ota huomioon
// elokuvablokkien alla olevia puuttuvia gridin elementteja,
// koodi pitaisi uudelleenkirjoittaa niin etta elokuvablokeissa olisi
// mukana placeholderblokkeja.
if ($(seuraavanLapsitdt[rowspanKohta]).hasClass('no-events')){
$(seuraavanLapsitdt[rowspanKohta]).remove();
}
seuraavanLapsitdt = seuraavanLapsitdt[0].parentElement.nextElementSibling.children;
selectedTimeIndex--;
}
} else return;
$(checklist[u]).replaceWith( elokuvaBlokki );
// while(selectedTimeIndex > 0) {
// $(seuraavanLapsitdt).last("td").remove();
/**
* tassa voisi nyt olla jaaneen undefined arvon poisto, unefined on automaattisesti listan lopussa
*/
// listanPituusCounter = seuraavanLapsitdt.length;
// seuraavanLapsitdt = seuraavanLapsitdt[0].parentElement.nextElementSibling.children;
// selectedTimeIndex--;
// }
}
}
updateCalendarEvents();
tablelist = $(".tableTogglet");
sessionStorage.setItem("GlobalTableArray", tablelist);
}
var para = document.createElement("td");
function poistaAika(){
for (var u = 0; u < checklist.length; u++) {
// if ($(checklist[u]).hasClass('selectedForAnnihilation') && $(checklist[u]).hasClass('no-events')) {
// $(checklist[u]).replaceWith( perusStringElementti );
// }
if ($(checklist[u]).hasClass('selectedForAnnihilation')){
var lisaajaCounterArvo = checklist[u].rowSpan;
var whileLoopControlElement = checklist[u];
seuraavanLapsitdt = whileLoopControlElement.parentElement.children;
var seuraavanParenttd = whileLoopControlElement.parentElement.nextElementSibling;
rowspanKohta = $(checklist[u]).index();
while(lisaajaCounterArvo > 1) {
// if ($(seuraavanLapsitdt[rowspanKohta]).hasClass('no-events')){
// seuraavanParenttd.insertBefore(para, lapsitdtNode[rowspanKohta]);
seuraavanParenttd.children[rowspanKohta].insertAdjacentHTML("beforebegin", perusStringElementti);
alert("used");
// }
// var seuraavanParenttd = seuraavanLapsitdt[0].parentElement.nextElementSibling;
// $( seuraavanParenttd.previousElementSibling ).append( perusStringElementti );
// listanPituusCounter = seuraavanLapsitdt.length;
seuraavanParenttd = seuraavanParenttd.children[0].parentElement.nextElementSibling;
lisaajaCounterArvo--;
}
alert(checklist[u].rowSpan);
$(checklist[u]).replaceWith( perusStringElementti );
updateCalendarEvents();
}
tablelist = $(".tableTogglet");
sessionStorage.setItem("GlobalTableArray", tablelist);
}
}
var modiviedtime = document.lastModified;
var ataglist = document.getElementsByClassName("saliTogglet");
//ataglist = fillArray(ataglist, 3);
//ataglist[0] = document.getElementById("original");
var tablelist = $(".tableTogglet");
sessionStorage.setItem("GlobalTableArray", tablelist);
//var tableArray = document.getElementsByClassName("tableTogglet");
//tableArray = fillArray(tableArray, 4);
//tableArray[0] = document.getElementById("theTable");
//var nappilist = document.getElementsByClassName("nappiSetit");
var nappilist = $(".nappiSetit");
//nappilist = fillArray(nappilist, 3);
//nappilist[0] = document.getElementById("nappiSetti");
var teatteriNappilist = document.getElementsByClassName("teatteriTogglet");
function toggleTeatteri(sali){
// for (var i = 0; i < ataglist.length; i++){
// $(ataglist[i]).removeClass("active");
// }
$( nappilist[sali] ).toggle(true);
$(nappilist[sali]).siblings().toggle( false );
}
toggleTeatteri("0")
$(ataglist[0]).addClass("active");
function toggleShowRoom(room){
for (var i = 0; i < ataglist.length; i++){
$( ataglist[i] ).removeClass("active");
}
$( tablelist[room] ).toggle(true);
$(tablelist[room]).siblings().toggle( false );
};
toggleShowRoom(0);
function toggleTable(indexsi) {
for (var i = 0; i < ataglist.length; i++) {
var x = ataglist[i];
if (x.style.display === 'none') {
x.style.display = 'block';
} else {
x.style.display = 'none';
}
}
}
//var lTable = document.getElementById(""+table1+"");
//lTable.style.display = (lTable.style.display == "table1") ? "none" : "table1";
// Elokuvateatterinappien salinvaihto klikattaessa elokuvateatterinappeja
jQuery(".list-group-item").click(function (e) {
jQuery(this).addClass('active').siblings().removeClass('active');
});
//for (var i = 0; i < teatteriNappilist.length; i++) {
// teatteriNappilist[i].addEventListener("click", toggleShowRoom())
//}
//arraykokeilu----------------------------------------------------------------------------------------------------------------------------
function fillArray(elementti, arvo) {
var arr = [];
for (var i = 0; i < arvo; i++) {
arr.push(elementti);
}
return arr;
}
//function getIndexAndWorkFromThere(){
//for (var i = 0, neljanNappiSetit = aNapit.children.length; i < len; i++){
// var aNapit = document.getElementById('nappiSetti');
// for (var i = 0, len = aNapit.children.length; i < len; i++){
//
// (function(index){
// aNapit.children[i].onclick = function(){
// alert(index) ;
// toggleTable(index);
//
// }
//
// })(i); //TAAVAYTBRHTHEFUCKISTHISMARKINGANDWHYDOESREMOVINGITBREAKTHECODE
//
// }
//}
//var Hienotapatehdapuuttuvialukuja = clickOnButtonList || 0;
// koska variableen ei voi tallentaa clickonbuttonlistia koska sita ei viela ole olemassa, siihen tallennetaan 0.
//var clickOnButtonList = document.getElementsByClassName("list-group-item");
//for (i = 0; i < clickOnButtonList.length; i++ ){
// clickOnButtonList[i].addEventListener("click", clickOnButton)
//}
//function clickOnButton(){
// //Looppi toimii!
//}
//piece of code that does actions
//minimoi funktioiden toistokรคyttรถ
//function go(name, age) {
// "use strict";
// if (age < 20) {
// return name + "!";
// } else {
// return name;
// }
// alert(go("will",34));
//}
| {
this.classList.add("selected");
$(this).addClass('selected')
this.style.backgroundColor = "green";
} | conditional_block |
varausohjelmajs.js | // Kaikki javascripti joka liittyy adminsivuun ja redirectaamiseen
// array johon kaikki lisatyt kaytajat tallennetaan
var userArray = [];
userArray[0] = "@ default default"
// sessionstorage asettaa stringin,
// aluksi muokataan array string-muotoon kayttamalla JSON.stringify jonka jalkeen
// kaytetaan JSON:ia muokkaamaan asetettavaa stringia takaisin arrayksi muokattavaan muotoon (JSON.parse)
// sessionStorage.setItem("mySharedLoginData", JSON.stringify(userArray));
var error = document.getElementById("RegisterAlertArea");
var loginError = document.getElementById("LoginAlertArea");
var RegisterErrorText = document.getElementById("errorText");
function registerUser(){
var check = document.getElementById("adminCheckbox").checked;
var x = document.forms["registerForm"]["givenUsername"].value;
var y = document.forms["registerForm"]["givenPassword"].value;
// Tarkistaa onko arvoja annettu ja onko annetuissa arvoissa paallekkaisyyksia
if (x==null || x == "" || y==null || y==""){
error.style.display = 'block';
return;
} else if (userArray.includes("@ " + x + " " + y) || userArray.includes(x + " " + y)) {
error.style.display = 'block';
RegisterErrorText.textContent = "ERROR: User already exists within current instance";
return;
} else {
error.style.display = 'none';
if (check == true){
userArray.push("@ " + x + " " + y);
alert("1");
} else {
userArray.push(x + " " + y);
alert("2");
alert(JSON.stringify(userArray));
}
}
// /n siis toimii rivinvaihtona inputtien valilla
(document.getElementById("addedUsersArea")).value = userArray.join("\n")
sessionStorage.setItem("mySharedLoginData", JSON.stringify(userArray));
// Paivitetaan tietokantaan juuri lisatty kayttaja
var elokuvaPOST = {
username: x,
password: y,
elokuva: [],
sali: [],
aika: [],
paikka: []
};
$.ajax({
type: 'POST',
url: 'http://rest.learncode.academy/api/elokuvaData/kauttajatTesti',
data: elokuvaPOST,
success: function(dataPOST){
sessionStorage.setItem("mySharedLoginUsername", x);
sessionStorage.setItem("mySharedLoginPassword", y);
console.log(sessionStorage.getItem("mySharedLoginUsername") + " & " + sessionStorage.getItem("mySharedLoginPassword"));
console.log("user registered to database:");
console.log(dataPOST);
}
});
}
// loginarray viittaa siis tamanhetkisiin syoteboxeihin laitettuun kayttajanimeen ja salasanaan
// loginarray ei ole millaan tavalla liitoksissa REST-ohjelmointirajapinnan tietokantaan vaan
// tieto on vain sessionstoragessa.
function loginUser(){
var loginArray = [];
// Sessionstorage palauttaa Stringin, kaytetaan JSON:in parse-metodia muuttamaan listan string-muotoon
loginArray = JSON.parse(sessionStorage.getItem("mySharedLoginData"));
// tarkistetaan onko yhtaan kayttajaa kirjattu sisaan tassa sessiossa
if (loginArray == null || loginArray == undefined){
loginError.style.display = 'block';
RegisterErrorText.innerHTML = "ERROR: No readable users registered";
return;
}
var x = document.forms["loginForm"]["loginUsername"].value;
var y = document.forms["loginForm"]["loginPassword"].value;
// voisi tehda kaksitasosen if-lausekkeen, erikseen tyhjalle kentalle ja kentalle jossa arvot ovat vaaria
if (x==null || x == "" || y==null || y=="" || (!loginArray.includes("@ " + x + " " + y) && !loginArray.includes(x + " " + y))){
loginError.style.display = 'block';
RegisterErrorText.innerHTML = "ERROR: Incorrect username or password";
alert(loginArray.includes("@ " + x + " " + y) + " " + loginArray.includes(x + " " + y));
return;
}
alert(loginArray.includes("@ " + x + " " + y) + " " + loginArray.includes(x + " " + y));
// tassa if-lauseessa asetetaan uusin kirjautunut kayttaja instanssin loginiksi, tata kaytetaan jatkossa varauksiin
var currentUserImput = "@ " + x + " " + y
if (loginArray.includes("@ " + x + " " + y) && searchStringInArray("@ " + x + " " + y, loginArray) != -1){
alert("adminuser");
sessionStorage.setItem("currentLoginIndex", searchStringInArray("@ " + x + " " + y, loginArray));
sessionStorage.setItem("currentLoginUser", loginArray[searchStringInArray("@ " + x + " " + y, loginArray)]);
//redirect loginin jalkeen
window.location.href="index.html";
} else if (loginArray.includes(x + " " + y) && searchStringInArray(x + " " + y, loginArray) != -1){
alert("normaluser");
sessionStorage.setItem("currentLoginIndex", searchStringInArray(x + " " + y, loginArray));
sessionStorage.setItem("currentLoginUser", loginArray[searchStringInArray(x + " " + y, loginArray)]);
//redirect loginin jalkeen
window.location.href="elokuvaAsiakassivu.html";
} else alert("Unknown login error has occurred");
}
// loginuserin tamanhetkisen kirjautujan etsiminen rekisteroityjen kayttajien listasta,
// palauttaa indeksin jossa kauttaja on mikali annettu parametri on listassa
function searchStringInArray (searchString, searchStringArray) {
for ( var i = 0 ; i < searchStringArray.length ; i++ ) {
if (searchStringArray[i] == searchString)
return i;
}
return -1;
}
// funktio jolla paivitetaan html-osiot jotka ovat riippuvaisia annetusta kauttajasta
function paivitaKayttajaElementit(kayttajaElementti){
kayttajaElementti.innerHTML = sessionStorage.getItem("currentLoginUser");
alert(sessionStorage.getItem("currentLoginUser"));
}
stringElementti1 = '<td class=" has-events" rowspan="';
rowspanElementti = ""+ listanPituusCounter +"";
stringElementti2 = '"><div class="row-fluid elokuvaElementti1" style="width: 99%; height: 100%;"><span class="title">' ;
stringElementti3 = '</span> <span class="sali"><a>' ;
stringElementti4 = '</a></span><span class="aika">' ;
stringElementti5 = '</span></div></td>' ;
perusStringElementti = '<td class=" no-events" rowspan="1"></td>';
var elokuvaBlokki = stringElementti1 + rowspanElementti + stringElementti2 + elokuvanNimi + stringElementti3 + sali + stringElementti4 + aika + stringElementti5;
var elokuvanNimi = "asda";
var sali = "sali 1*";
var aika = changeFunc() || "00:31";
jQuery(".list-group-item").click(function (e) {
jQuery(this).addClass('active').siblings().removeClass('active');
});
jQuery("#sel1").click(function (ee) {
jQuery(this).addClass('selected').siblings().removeClass('active');
});
// Eventlistenerit hyllytetty silla tarkistuksen voi asettaa submittausmetodin alkuun
var elokuvanNimiElementti = document.getElementById("ElokuvaNimiImput");
var selectBox = document.getElementById("sel2");
var selectedValue = selectBox.options[selectBox.selectedIndex].value;
var selectedTimeIndex = selectBox.options[selectBox.selectedIndex].index;
var selectedTimeIndex2 = selectedTimeIndex;
elokuvanNimiElementti.addEventListener("", changeFunc)
selectBox.addEventListener("", changeFunc)
function changeFunc() {
var selectBox = document.getElementById("sel2");
selectedValue = selectBox.options[selectBox.selectedIndex].innerHTML;
selectedTimeIndex = selectBox.options[selectBox.selectedIndex].index;
selectedTimeIndex2 = selectedTimeIndex;
aika = selectedValue;
elokuvanNimi = document.getElementById("usrInputNimi").value;
updateHTMLBlock();
}
$("td").change(function() {
alert($(this).find("option:selected").text()+' clicked!');
});
$("#myElement").click(function() {
$(this).siblings(":last").hide();
});
$("a").click(function(event){
event.preventDefault();
});
// updates all the clickevents for the td tags of the calendar
var checklist = document.getElementsByTagName("td");
function updateCalendarEvents(){
var checklist = document.getElementsByTagName("td");
for (var i = 0; i < checklist.length; i++) {
checklist[i].addEventListener("click", updateSelectedtd)
}
}
// var tdlista = checklist.querySelectorAll("td");
for (var i = 0; i < checklist.length; i++) {
checklist[i].addEventListener("click", updateSelectedtd)
}
function updateSelectedtd(){
for (var k = 0; k < checklist.length; k++) {
checklist[k].style.backgroundColor = "#492079";
if ($(checklist[k]).hasClass('selected')){
checklist[k].classList.remove("selected");
}
else if ($(checklist[k]).hasClass('selectedForAnnihilation')){
checklist[k].classList.remove("selectedForAnnihilation");
}
}
if($(this).hasClass('noselect')){
return;
} else if($(this).hasClass('has-events')){
$(this).addClass('selectedForAnnihilation')
this.style.backgroundColor = "#e20a0a";
}
else {
this.classList.add("selected");
$(this).addClass('selected')
this.style.backgroundColor = "green";
}
}
function updateHTMLBlock(){
elokuvaBlokki = stringElementti1 + rowspanElementti + stringElementti2 + elokuvanNimi + stringElementti3 + sali + stringElementti4 + aika + stringElementti5;
}
var asda = document.getElementsByName
function tarkistaKoko(){
if (parseInt(rowspanKokoSallija, 10) < (selectedTimeIndex +1)) {
return false;
} else {
return true;
}
}
function tarkistaKokoKonfliktit(){
while(selectedTimeIndex2 > 0) {
if (!$(seuraavanLapsitdt2[rowspanKohta]).hasClass('no-events')){
alert("Asettamasi aika on konfliktissa toisen ajan kanssa");
return false;
}
seuraavanLapsitdt2 = seuraavanLapsitdt2[0].parentElement.nextElementSibling.children;
selectedTimeIndex2--;
}
}
var rowspanKokoSallija = 0;
var seuraavaIsantatd;
var seuraavanLapsitdt;
var tdosoitin;
var listanPituusCounter;
// Kaytetaan uuden elokuvablokin sijoittamiseen kalenteriin adminin toimesta.
// Kutsuu tarkistakonfliktit() ja poistaa tyhjรคt blokit elokuvablokin alta.
function asetaAika(){
changeFunc();
rowspanElementti = selectedTimeIndex +1;
updateHTMLBlock();
if (elokuvanNimi == ""){
alert("Et antanut nimea");
return;
}
for (var u = 0; u < checklist.length; u++) {
if ($(checklist[u]).hasClass('selected')){
var whileLoopControlElement = checklist[u];
seuraavanLapsitdt = whileLoopControlElement.parentElement.nextElementSibling.children;
seuraavanLapsitdt2 = whileLoopControlElement.parentElement.nextElementSibling.children;
rowspanKohta = $(checklist[u]).index();
rowspanKokoSallija = parseInt(checklist[u].parentElement.children[0].className.split(' ')[1]);
if (tarkistaKoko() == false){
alert("Elokuvan aika yli aukiolajan");
return;
}
if (tarkistaKokoKonfliktit() != false){
while(selectedTimeIndex > 0) {
//HAIKKAAA Indeksi arvo rowspankohta on vaarin silla koodi ei ota huomioon
// elokuvablokkien alla olevia puuttuvia gridin elementteja,
// koodi pitaisi uudelleenkirjoittaa niin etta elokuvablokeissa olisi
// mukana placeholderblokkeja.
if ($(seuraavanLapsitdt[rowspanKohta]).hasClass('no-events')){
$(seuraavanLapsitdt[rowspanKohta]).remove();
}
seuraavanLapsitdt = seuraavanLapsitdt[0].parentElement.nextElementSibling.children;
selectedTimeIndex--;
}
} else return;
$(checklist[u]).replaceWith( elokuvaBlokki );
// while(selectedTimeIndex > 0) {
// $(seuraavanLapsitdt).last("td").remove();
/**
* tassa voisi nyt olla jaaneen undefined arvon poisto, unefined on automaattisesti listan lopussa
*/
// listanPituusCounter = seuraavanLapsitdt.length;
// seuraavanLapsitdt = seuraavanLapsitdt[0].parentElement.nextElementSibling.children;
// selectedTimeIndex--;
// }
}
}
updateCalendarEvents();
tablelist = $(".tableTogglet");
sessionStorage.setItem("GlobalTableArray", tablelist);
}
var para = document.createElement("td");
function poistaAika(){
for (var u = 0; u < checklist.length; u++) {
// if ($(checklist[u]).hasClass('selectedForAnnihilation') && $(checklist[u]).hasClass('no-events')) {
// $(checklist[u]).replaceWith( perusStringElementti );
// }
if ($(checklist[u]).hasClass('selectedForAnnihilation')){
var lisaajaCounterArvo = checklist[u].rowSpan;
var whileLoopControlElement = checklist[u];
seuraavanLapsitdt = whileLoopControlElement.parentElement.children;
var seuraavanParenttd = whileLoopControlElement.parentElement.nextElementSibling;
rowspanKohta = $(checklist[u]).index();
while(lisaajaCounterArvo > 1) {
// if ($(seuraavanLapsitdt[rowspanKohta]).hasClass('no-events')){
// seuraavanParenttd.insertBefore(para, lapsitdtNode[rowspanKohta]);
seuraavanParenttd.children[rowspanKohta].insertAdjacentHTML("beforebegin", perusStringElementti);
alert("used");
// }
// var seuraavanParenttd = seuraavanLapsitdt[0].parentElement.nextElementSibling;
// $( seuraavanParenttd.previousElementSibling ).append( perusStringElementti );
// listanPituusCounter = seuraavanLapsitdt.length;
seuraavanParenttd = seuraavanParenttd.children[0].parentElement.nextElementSibling;
lisaajaCounterArvo--;
}
alert(checklist[u].rowSpan);
$(checklist[u]).replaceWith( perusStringElementti );
updateCalendarEvents();
}
tablelist = $(".tableTogglet");
sessionStorage.setItem("GlobalTableArray", tablelist);
}
}
var modiviedtime = document.lastModified;
var ataglist = document.getElementsByClassName("saliTogglet");
//ataglist = fillArray(ataglist, 3);
//ataglist[0] = document.getElementById("original");
var tablelist = $(".tableTogglet");
sessionStorage.setItem("GlobalTableArray", tablelist);
//var tableArray = document.getElementsByClassName("tableTogglet");
//tableArray = fillArray(tableArray, 4);
//tableArray[0] = document.getElementById("theTable");
//var nappilist = document.getElementsByClassName("nappiSetit");
var nappilist = $(".nappiSetit");
//nappilist = fillArray(nappilist, 3);
//nappilist[0] = document.getElementById("nappiSetti");
var teatteriNappilist = document.getElementsByClassName("teatteriTogglet");
function toggleTeatteri(sali){
// for (var i = 0; i < ataglist.length; i++){
// $(ataglist[i]).removeClass("active");
// }
$( nappilist[sali] ).toggle(true);
$(nappilist[sali]).siblings().toggle( false );
}
toggleTeatteri("0")
$(ataglist[0]).addClass("active");
function toggleShowRoom(room){
for (var i = 0; i < ataglist.length; i++){
$( ataglist[i] ).removeClass("active");
}
$( tablelist[room] ).toggle(true);
$(tablelist[room]).siblings().toggle( false );
};
toggleShowRoom(0);
function toggleTable(indexsi) {
for (var i = 0; i < ataglist.length; i++) {
var x = ataglist[i];
if (x.style.display === 'none') {
x.style.display = 'block';
} else {
x.style.display = 'none';
}
}
}
//var lTable = document.getElementById(""+table1+"");
//lTable.style.display = (lTable.style.display == "table1") ? "none" : "table1";
// Elokuvateatterinappien salinvaihto klikattaessa elokuvateatterinappeja
jQuery(".list-group-item").click(function (e) {
jQuery(this).addClass('active').siblings().removeClass('active');
});
//for (var i = 0; i < teatteriNappilist.length; i++) {
// teatteriNappilist[i].addEventListener("click", toggleShowRoom())
//}
//arraykokeilu----------------------------------------------------------------------------------------------------------------------------
function f | elementti, arvo) {
var arr = [];
for (var i = 0; i < arvo; i++) {
arr.push(elementti);
}
return arr;
}
//function getIndexAndWorkFromThere(){
//for (var i = 0, neljanNappiSetit = aNapit.children.length; i < len; i++){
// var aNapit = document.getElementById('nappiSetti');
// for (var i = 0, len = aNapit.children.length; i < len; i++){
//
// (function(index){
// aNapit.children[i].onclick = function(){
// alert(index) ;
// toggleTable(index);
//
// }
//
// })(i); //TAAVAYTBRHTHEFUCKISTHISMARKINGANDWHYDOESREMOVINGITBREAKTHECODE
//
// }
//}
//var Hienotapatehdapuuttuvialukuja = clickOnButtonList || 0;
// koska variableen ei voi tallentaa clickonbuttonlistia koska sita ei viela ole olemassa, siihen tallennetaan 0.
//var clickOnButtonList = document.getElementsByClassName("list-group-item");
//for (i = 0; i < clickOnButtonList.length; i++ ){
// clickOnButtonList[i].addEventListener("click", clickOnButton)
//}
//function clickOnButton(){
// //Looppi toimii!
//}
//piece of code that does actions
//minimoi funktioiden toistokรคyttรถ
//function go(name, age) {
// "use strict";
// if (age < 20) {
// return name + "!";
// } else {
// return name;
// }
// alert(go("will",34));
//}
| illArray( | identifier_name |
varausohjelmajs.js | // Kaikki javascripti joka liittyy adminsivuun ja redirectaamiseen
// array johon kaikki lisatyt kaytajat tallennetaan
var userArray = [];
userArray[0] = "@ default default"
// sessionstorage asettaa stringin,
// aluksi muokataan array string-muotoon kayttamalla JSON.stringify jonka jalkeen
// kaytetaan JSON:ia muokkaamaan asetettavaa stringia takaisin arrayksi muokattavaan muotoon (JSON.parse)
// sessionStorage.setItem("mySharedLoginData", JSON.stringify(userArray));
var error = document.getElementById("RegisterAlertArea");
var loginError = document.getElementById("LoginAlertArea");
var RegisterErrorText = document.getElementById("errorText");
function registerUser(){
var check = document.getElementById("adminCheckbox").checked;
var x = document.forms["registerForm"]["givenUsername"].value;
var y = document.forms["registerForm"]["givenPassword"].value;
// Tarkistaa onko arvoja annettu ja onko annetuissa arvoissa paallekkaisyyksia
if (x==null || x == "" || y==null || y==""){
error.style.display = 'block';
return;
} else if (userArray.includes("@ " + x + " " + y) || userArray.includes(x + " " + y)) {
error.style.display = 'block';
RegisterErrorText.textContent = "ERROR: User already exists within current instance";
return;
} else {
error.style.display = 'none';
if (check == true){
userArray.push("@ " + x + " " + y);
alert("1");
} else {
userArray.push(x + " " + y);
alert("2");
alert(JSON.stringify(userArray));
}
}
// /n siis toimii rivinvaihtona inputtien valilla
(document.getElementById("addedUsersArea")).value = userArray.join("\n")
sessionStorage.setItem("mySharedLoginData", JSON.stringify(userArray));
// Paivitetaan tietokantaan juuri lisatty kayttaja
var elokuvaPOST = {
username: x,
password: y,
elokuva: [],
sali: [],
aika: [],
paikka: []
};
$.ajax({
type: 'POST',
url: 'http://rest.learncode.academy/api/elokuvaData/kauttajatTesti',
data: elokuvaPOST,
success: function(dataPOST){
sessionStorage.setItem("mySharedLoginUsername", x);
sessionStorage.setItem("mySharedLoginPassword", y);
console.log(sessionStorage.getItem("mySharedLoginUsername") + " & " + sessionStorage.getItem("mySharedLoginPassword"));
console.log("user registered to database:");
console.log(dataPOST);
}
});
}
// loginarray viittaa siis tamanhetkisiin syoteboxeihin laitettuun kayttajanimeen ja salasanaan
// loginarray ei ole millaan tavalla liitoksissa REST-ohjelmointirajapinnan tietokantaan vaan
// tieto on vain sessionstoragessa.
function loginUser(){
var loginArray = [];
// Sessionstorage palauttaa Stringin, kaytetaan JSON:in parse-metodia muuttamaan listan string-muotoon
loginArray = JSON.parse(sessionStorage.getItem("mySharedLoginData"));
// tarkistetaan onko yhtaan kayttajaa kirjattu sisaan tassa sessiossa
if (loginArray == null || loginArray == undefined){
loginError.style.display = 'block';
RegisterErrorText.innerHTML = "ERROR: No readable users registered";
return;
}
var x = document.forms["loginForm"]["loginUsername"].value;
var y = document.forms["loginForm"]["loginPassword"].value;
// voisi tehda kaksitasosen if-lausekkeen, erikseen tyhjalle kentalle ja kentalle jossa arvot ovat vaaria
if (x==null || x == "" || y==null || y=="" || (!loginArray.includes("@ " + x + " " + y) && !loginArray.includes(x + " " + y))){
loginError.style.display = 'block';
RegisterErrorText.innerHTML = "ERROR: Incorrect username or password";
alert(loginArray.includes("@ " + x + " " + y) + " " + loginArray.includes(x + " " + y));
return;
}
alert(loginArray.includes("@ " + x + " " + y) + " " + loginArray.includes(x + " " + y));
// tassa if-lauseessa asetetaan uusin kirjautunut kayttaja instanssin loginiksi, tata kaytetaan jatkossa varauksiin
var currentUserImput = "@ " + x + " " + y
if (loginArray.includes("@ " + x + " " + y) && searchStringInArray("@ " + x + " " + y, loginArray) != -1){
alert("adminuser");
sessionStorage.setItem("currentLoginIndex", searchStringInArray("@ " + x + " " + y, loginArray));
sessionStorage.setItem("currentLoginUser", loginArray[searchStringInArray("@ " + x + " " + y, loginArray)]);
//redirect loginin jalkeen
window.location.href="index.html";
} else if (loginArray.includes(x + " " + y) && searchStringInArray(x + " " + y, loginArray) != -1){
alert("normaluser");
sessionStorage.setItem("currentLoginIndex", searchStringInArray(x + " " + y, loginArray));
sessionStorage.setItem("currentLoginUser", loginArray[searchStringInArray(x + " " + y, loginArray)]);
//redirect loginin jalkeen
window.location.href="elokuvaAsiakassivu.html";
} else alert("Unknown login error has occurred");
}
// loginuserin tamanhetkisen kirjautujan etsiminen rekisteroityjen kayttajien listasta,
// palauttaa indeksin jossa kauttaja on mikali annettu parametri on listassa
function searchStringInArray (searchString, searchStringArray) {
for ( var i = 0 ; i < searchStringArray.length ; i++ ) {
if (searchStringArray[i] == searchString)
return i;
}
return -1;
}
// funktio jolla paivitetaan html-osiot jotka ovat riippuvaisia annetusta kauttajasta
function paivitaKayttajaElementit(kayttajaElementti){
kayttajaElementti.innerHTML = sessionStorage.getItem("currentLoginUser");
alert(sessionStorage.getItem("currentLoginUser"));
}
stringElementti1 = '<td class=" has-events" rowspan="';
rowspanElementti = ""+ listanPituusCounter +"";
stringElementti2 = '"><div class="row-fluid elokuvaElementti1" style="width: 99%; height: 100%;"><span class="title">' ;
stringElementti3 = '</span> <span class="sali"><a>' ;
stringElementti4 = '</a></span><span class="aika">' ;
stringElementti5 = '</span></div></td>' ;
perusStringElementti = '<td class=" no-events" rowspan="1"></td>';
var elokuvaBlokki = stringElementti1 + rowspanElementti + stringElementti2 + elokuvanNimi + stringElementti3 + sali + stringElementti4 + aika + stringElementti5;
var elokuvanNimi = "asda";
var sali = "sali 1*";
var aika = changeFunc() || "00:31";
jQuery(".list-group-item").click(function (e) {
jQuery(this).addClass('active').siblings().removeClass('active');
});
jQuery("#sel1").click(function (ee) {
jQuery(this).addClass('selected').siblings().removeClass('active');
});
// Eventlistenerit hyllytetty silla tarkistuksen voi asettaa submittausmetodin alkuun
var elokuvanNimiElementti = document.getElementById("ElokuvaNimiImput");
var selectBox = document.getElementById("sel2");
var selectedValue = selectBox.options[selectBox.selectedIndex].value;
var selectedTimeIndex = selectBox.options[selectBox.selectedIndex].index;
var selectedTimeIndex2 = selectedTimeIndex;
elokuvanNimiElementti.addEventListener("", changeFunc)
selectBox.addEventListener("", changeFunc)
function changeFunc() {
var selectBox = document.getElementById("sel2");
selectedValue = selectBox.options[selectBox.selectedIndex].innerHTML;
selectedTimeIndex = selectBox.options[selectBox.selectedIndex].index;
selectedTimeIndex2 = selectedTimeIndex;
aika = selectedValue;
elokuvanNimi = document.getElementById("usrInputNimi").value;
updateHTMLBlock();
}
$("td").change(function() {
alert($(this).find("option:selected").text()+' clicked!');
});
$("#myElement").click(function() {
$(this).siblings(":last").hide();
});
$("a").click(function(event){
event.preventDefault();
});
// updates all the clickevents for the td tags of the calendar
var checklist = document.getElementsByTagName("td");
function updateCalendarEvents(){
var checklist = document.getElementsByTagName("td");
for (var i = 0; i < checklist.length; i++) {
checklist[i].addEventListener("click", updateSelectedtd)
}
}
// var tdlista = checklist.querySelectorAll("td");
for (var i = 0; i < checklist.length; i++) {
checklist[i].addEventListener("click", updateSelectedtd)
}
function updateSelectedtd(){
for (var k = 0; k < checklist.length; k++) {
checklist[k].style.backgroundColor = "#492079";
if ($(checklist[k]).hasClass('selected')){
checklist[k].classList.remove("selected");
}
else if ($(checklist[k]).hasClass('selectedForAnnihilation')){
checklist[k].classList.remove("selectedForAnnihilation");
}
}
if($(this).hasClass('noselect')){
return;
} else if($(this).hasClass('has-events')){
$(this).addClass('selectedForAnnihilation')
this.style.backgroundColor = "#e20a0a";
}
else {
this.classList.add("selected");
$(this).addClass('selected')
this.style.backgroundColor = "green";
}
}
function updateHTMLBlock(){
elokuvaBlokki = stringElementti1 + rowspanElementti + stringElementti2 + elokuvanNimi + stringElementti3 + sali + stringElementti4 + aika + stringElementti5;
}
var asda = document.getElementsByName
| return true;
}
}
function tarkistaKokoKonfliktit(){
while(selectedTimeIndex2 > 0) {
if (!$(seuraavanLapsitdt2[rowspanKohta]).hasClass('no-events')){
alert("Asettamasi aika on konfliktissa toisen ajan kanssa");
return false;
}
seuraavanLapsitdt2 = seuraavanLapsitdt2[0].parentElement.nextElementSibling.children;
selectedTimeIndex2--;
}
}
var rowspanKokoSallija = 0;
var seuraavaIsantatd;
var seuraavanLapsitdt;
var tdosoitin;
var listanPituusCounter;
// Kaytetaan uuden elokuvablokin sijoittamiseen kalenteriin adminin toimesta.
// Kutsuu tarkistakonfliktit() ja poistaa tyhjรคt blokit elokuvablokin alta.
function asetaAika(){
changeFunc();
rowspanElementti = selectedTimeIndex +1;
updateHTMLBlock();
if (elokuvanNimi == ""){
alert("Et antanut nimea");
return;
}
for (var u = 0; u < checklist.length; u++) {
if ($(checklist[u]).hasClass('selected')){
var whileLoopControlElement = checklist[u];
seuraavanLapsitdt = whileLoopControlElement.parentElement.nextElementSibling.children;
seuraavanLapsitdt2 = whileLoopControlElement.parentElement.nextElementSibling.children;
rowspanKohta = $(checklist[u]).index();
rowspanKokoSallija = parseInt(checklist[u].parentElement.children[0].className.split(' ')[1]);
if (tarkistaKoko() == false){
alert("Elokuvan aika yli aukiolajan");
return;
}
if (tarkistaKokoKonfliktit() != false){
while(selectedTimeIndex > 0) {
//HAIKKAAA Indeksi arvo rowspankohta on vaarin silla koodi ei ota huomioon
// elokuvablokkien alla olevia puuttuvia gridin elementteja,
// koodi pitaisi uudelleenkirjoittaa niin etta elokuvablokeissa olisi
// mukana placeholderblokkeja.
if ($(seuraavanLapsitdt[rowspanKohta]).hasClass('no-events')){
$(seuraavanLapsitdt[rowspanKohta]).remove();
}
seuraavanLapsitdt = seuraavanLapsitdt[0].parentElement.nextElementSibling.children;
selectedTimeIndex--;
}
} else return;
$(checklist[u]).replaceWith( elokuvaBlokki );
// while(selectedTimeIndex > 0) {
// $(seuraavanLapsitdt).last("td").remove();
/**
* tassa voisi nyt olla jaaneen undefined arvon poisto, unefined on automaattisesti listan lopussa
*/
// listanPituusCounter = seuraavanLapsitdt.length;
// seuraavanLapsitdt = seuraavanLapsitdt[0].parentElement.nextElementSibling.children;
// selectedTimeIndex--;
// }
}
}
updateCalendarEvents();
tablelist = $(".tableTogglet");
sessionStorage.setItem("GlobalTableArray", tablelist);
}
var para = document.createElement("td");
function poistaAika(){
for (var u = 0; u < checklist.length; u++) {
// if ($(checklist[u]).hasClass('selectedForAnnihilation') && $(checklist[u]).hasClass('no-events')) {
// $(checklist[u]).replaceWith( perusStringElementti );
// }
if ($(checklist[u]).hasClass('selectedForAnnihilation')){
var lisaajaCounterArvo = checklist[u].rowSpan;
var whileLoopControlElement = checklist[u];
seuraavanLapsitdt = whileLoopControlElement.parentElement.children;
var seuraavanParenttd = whileLoopControlElement.parentElement.nextElementSibling;
rowspanKohta = $(checklist[u]).index();
while(lisaajaCounterArvo > 1) {
// if ($(seuraavanLapsitdt[rowspanKohta]).hasClass('no-events')){
// seuraavanParenttd.insertBefore(para, lapsitdtNode[rowspanKohta]);
seuraavanParenttd.children[rowspanKohta].insertAdjacentHTML("beforebegin", perusStringElementti);
alert("used");
// }
// var seuraavanParenttd = seuraavanLapsitdt[0].parentElement.nextElementSibling;
// $( seuraavanParenttd.previousElementSibling ).append( perusStringElementti );
// listanPituusCounter = seuraavanLapsitdt.length;
seuraavanParenttd = seuraavanParenttd.children[0].parentElement.nextElementSibling;
lisaajaCounterArvo--;
}
alert(checklist[u].rowSpan);
$(checklist[u]).replaceWith( perusStringElementti );
updateCalendarEvents();
}
tablelist = $(".tableTogglet");
sessionStorage.setItem("GlobalTableArray", tablelist);
}
}
var modiviedtime = document.lastModified;
var ataglist = document.getElementsByClassName("saliTogglet");
//ataglist = fillArray(ataglist, 3);
//ataglist[0] = document.getElementById("original");
var tablelist = $(".tableTogglet");
sessionStorage.setItem("GlobalTableArray", tablelist);
//var tableArray = document.getElementsByClassName("tableTogglet");
//tableArray = fillArray(tableArray, 4);
//tableArray[0] = document.getElementById("theTable");
//var nappilist = document.getElementsByClassName("nappiSetit");
var nappilist = $(".nappiSetit");
//nappilist = fillArray(nappilist, 3);
//nappilist[0] = document.getElementById("nappiSetti");
var teatteriNappilist = document.getElementsByClassName("teatteriTogglet");
function toggleTeatteri(sali){
// for (var i = 0; i < ataglist.length; i++){
// $(ataglist[i]).removeClass("active");
// }
$( nappilist[sali] ).toggle(true);
$(nappilist[sali]).siblings().toggle( false );
}
toggleTeatteri("0")
$(ataglist[0]).addClass("active");
function toggleShowRoom(room){
for (var i = 0; i < ataglist.length; i++){
$( ataglist[i] ).removeClass("active");
}
$( tablelist[room] ).toggle(true);
$(tablelist[room]).siblings().toggle( false );
};
toggleShowRoom(0);
function toggleTable(indexsi) {
for (var i = 0; i < ataglist.length; i++) {
var x = ataglist[i];
if (x.style.display === 'none') {
x.style.display = 'block';
} else {
x.style.display = 'none';
}
}
}
//var lTable = document.getElementById(""+table1+"");
//lTable.style.display = (lTable.style.display == "table1") ? "none" : "table1";
// Elokuvateatterinappien salinvaihto klikattaessa elokuvateatterinappeja
jQuery(".list-group-item").click(function (e) {
jQuery(this).addClass('active').siblings().removeClass('active');
});
//for (var i = 0; i < teatteriNappilist.length; i++) {
// teatteriNappilist[i].addEventListener("click", toggleShowRoom())
//}
//arraykokeilu----------------------------------------------------------------------------------------------------------------------------
function fillArray(elementti, arvo) {
var arr = [];
for (var i = 0; i < arvo; i++) {
arr.push(elementti);
}
return arr;
}
//function getIndexAndWorkFromThere(){
//for (var i = 0, neljanNappiSetit = aNapit.children.length; i < len; i++){
// var aNapit = document.getElementById('nappiSetti');
// for (var i = 0, len = aNapit.children.length; i < len; i++){
//
// (function(index){
// aNapit.children[i].onclick = function(){
// alert(index) ;
// toggleTable(index);
//
// }
//
// })(i); //TAAVAYTBRHTHEFUCKISTHISMARKINGANDWHYDOESREMOVINGITBREAKTHECODE
//
// }
//}
//var Hienotapatehdapuuttuvialukuja = clickOnButtonList || 0;
// koska variableen ei voi tallentaa clickonbuttonlistia koska sita ei viela ole olemassa, siihen tallennetaan 0.
//var clickOnButtonList = document.getElementsByClassName("list-group-item");
//for (i = 0; i < clickOnButtonList.length; i++ ){
// clickOnButtonList[i].addEventListener("click", clickOnButton)
//}
//function clickOnButton(){
// //Looppi toimii!
//}
//piece of code that does actions
//minimoi funktioiden toistokรคyttรถ
//function go(name, age) {
// "use strict";
// if (age < 20) {
// return name + "!";
// } else {
// return name;
// }
// alert(go("will",34));
//} | function tarkistaKoko(){
if (parseInt(rowspanKokoSallija, 10) < (selectedTimeIndex +1)) {
return false;
} else {
| random_line_split |
openfoodfacts.py | """
Ce module permet de rรฉcupรฉrer les donnรฉes du site web OpenFoodFacts ("https://fr.openfoodfacts.org/").
Le module contient une fonction de scraping de donnรฉes rรฉcupรฉrant diverses informations sur tous les produits recensรฉs sur le site.
Il est aussi accompagnรฉ d'une mesure du temps de computation vous indiquant en combien de temps l'opรฉration a eu lieu.
Attention, en fonction du nombre de pages web ร scrapper, le temps de computation peut vite exploser.
"""
def scrap_openfoodfacts(nb_pages = 50) :
""" Il s'agit de la fonction principale du module.
Cette derniรจre crรฉe dans votre espace de travail un DataFrame Pandas contenant les informations scrapรฉes sur le site OpenFoodFacts.
L'argument "nb_pages" permet de rรฉgler le nombre de page ร scraper.
Veuillez ne pas trop l'augmenter afin que l'opรฉration prenne un temps raisonnable.
Il faut compter environ 30 secondes pour scraper une page (25 minutes pour les 50 pages par dรฉfaut).
27 variables sont scrapรฉes pour chaque nouvelle donnรฉe.
"""
# Importation des modules
import time
from time import sleep
import numpy as np
import requests
import re
from bs4 import BeautifulSoup
# Mesure du temps
start_time = time.time()
# Initialisation de la liste records rรฉcoltant nos donnรฉes
records = []
#Initialisation de la valeur des erreurs ร implรฉmenter dans le DataSet
error = np.NaN
# On rรฉcupรจre l'url de chaque produit sur le nombre de pages souhaitรฉes
for i in range(1,nb_pages+1) :
r = requests.get(('https://fr.openfoodfacts.org/' + str(i)))
soup = BeautifulSoup(r.text, 'html.parser')
products = soup.find_all('ul', {'class' : "products"})
products = products[0].find_all('a')
liste_url = ['https://fr.openfoodfacts.org/' + elt['href'] for elt in products]
# Pour chaque produit on place dans des variables les donnรฉes que l'on souhaite scraper
for url in liste_url :
s = requests.get(url)
soup = BeautifulSoup(s.text, 'html.parser')
# Si la donnรฉe peut รชtre rรฉcupรฉrรฉe, on la place dans notre variable, sinon on la replace par une erreur
try :
name = soup.title.text[:-2]
except :
name = error
try :
code_barre = soup.find('span', attrs = {'style' : "speak-as:digits;"}).text
except :
code_barre = error
try:
nutri_score = soup.find('div', attrs = {'id' : 'nutriscore_drop'}).contents[-2].text[-1]
except :
nutri_score = error
try :
nova = soup.find(style = "margin-bottom:1rem;max-width:100%")['alt'][0]
nova = [float(elt) for elt in nova.split() if elt.replace('.', '').isdigit()].pop()
except :
nova = error
try :
caractรฉristiques = soup.find(itemprop="description").text
except :
caractรฉristiques = error
try :
ingrรฉdients = soup.find(property="food:ingredientListAsText").text
except :
ingrรฉdients = error
try :
palme = soup.find('span', {'class' : "alert round label ingredients_analysis green"}).contents[-1][:-3]
except :
palme = error
try :
palme2 = soup.find(href="/ingredients-issus-de-l-huile-de-palme/huile-de-palme").text
except :
palme2 = error
try :
repรจres_nutritionnels = soup.find_all('div', {'class' : "small-12 xlarge-6 columns"})[1].text.split("\n")[-5:-1]
except :
repรจre_nutritionnels = error
# On dรฉcoupe les repรจres nutritionnels en 4 variabes distinctes (matiรจre grasse, acide gras, sucre et sel)
# Puis on les transforme en float pour faciliter l'analyse
liste_repรจres_nutri = repรจres_nutritionnels
try :
matiรจre_grasse = liste_repรจres_nutri[0]
matiรจre_grasse = [float(elt) for elt in matiรจre_grasse.split() if elt.replace('.', '').isdigit()].pop()
except :
matiรจre_grasse = error
try :
acide_gras = liste_repรจres_nutri[1]
acide_gras = [float(elt) for elt in acide_gras.split() if elt.replace('.', '').isdigit()].pop()
except :
acide_gras = error
try :
sucre = liste_repรจres_nutri[2]
sucre = [float(elt) for elt in sucre.split() if elt.replace('.', '').isdigit()].pop()
except :
sucre = error
try :
sel = liste_repรจres_nutri[3]
sel = [float(elt) for elt in sel.split() if elt.replace('.', '').isdigit()].pop()
except :
sel = error
# On utilise la mรชme mรฉthode sur les KJ et les KCAL pour les transformer en float
try :
kj = soup.find(id="nutriment_energy-kj_tr").find('td', {'class' : 'nutriment_value'}).text[9: 13]
kj = [float(elt) for elt in kj.split() if elt.replace('.', '').isdigit()].pop()
except :
kj = error
try :
kcal = soup.find(id="nutriment_energy-kcal_tr").find('td', {'class' : 'nutriment_value'}).text[9:15]
kcal = [float(elt) for elt in kcal.split() if elt.replace('.', '').isdigit()].pop()
except :
kcal = error
try :
eco_score = soup.find(id="ecoscore_drop").contents[-2].text[-1]
except :
eco_score = error
#Pour toutes les variables suivantes; l'utilisation de Regex va nous permettre d'extraire la donnรฉe
info = soup.find('div',{ 'class':'medium-12 large-8 xlarge-8 xxlarge-8 columns'})
infos = []
for el in info:
try:
infos.append(el.text)
| r = re.compile('^Quan.*$')
quantity = list(filter(r.match, infos))[0].split(':')[-1]
except :
quantity = error
try :
r = re.compile('^Conditionnement.*$')
conditionnement = list(filter(r.match, infos))[0].split(':')[-1]
except :
conditionnement = error
try :
r = re.compile('^Marques.*$')
marques = list(filter(r.match, infos))[0].split(':')[-1]
except :
marques = error
try :
r = re.compile('^Catรฉgories.*$')
catรฉgories = list(filter(r.match, infos))[0].split(':')[-1]
except :
catรฉgoris = error
try :
r = re.compile('^Labels.*$')
labels = list(filter(r.match, infos))[0].split(':')[-1]
except :
labels = error
try :
r = re.compile('^Lieux.*$')
lieux = list(filter(r.match, infos))[0].split(':')[-1]
except :
lieux = error
try :
r = re.compile('^Code.*$')
code = list(filter(r.match, infos))[0].split(':')[-1]
except :
code = error
try :
r = re.compile('^Lien.*$')
lien = list(filter(r.match, infos))[0].split(':')[-1]
except :
lien = error
try :
r = re.compile('^Magasins.*$')
magasins = list(filter(r.match, infos))[0].split(':')[-1]
except :
magasins = error
try :
r = re.compile('^Origine.*$')
origine = list(filter(r.match, infos))[0].split(':')[-1]
except :
origine = error
try :
r = re.compile('^Pays.*$')
pays = list(filter(r.match, infos))[0].split(',')[1:]
except :
pays = error
nb_pays = len(pays)
#On place nos diffรฉrentes variables dans la liste records
records.append((name, code_barre, nutri_score, nova, caractรฉristiques, ingrรฉdients, palme, palme2,
kj, kcal, eco_score, quantity, conditionnement, marques, catรฉgories, labels, lieux, code, lien, magasins,
origine, pays, nb_pays, matiรจre_grasse, acide_gras, sucre, sel))
i+=1
# On laisse un temps d'attente entre chaque itรฉration pour ne pas provoquer une erreur dรป au trop grand nombre de requรชtes envoyรฉes
# vers Open Fact Food
sleep(1)
# On construit le DataFrame, puis on l'exporte dans l'espace de travail
import pandas as pd
df = pd.DataFrame(records, columns = ['Produit', 'CodeBarre', 'NutriScore', 'Nova', 'Caractรฉristiques', 'Ingrรฉdients',
'NoPalme','Palme', 'KJ', 'KCAL', 'Eco-Score', 'Quantitรฉ', 'Conditionnement',
'Marque', 'Catรฉgorie', 'Label', 'Lieux', 'Code', 'Lien', 'Magasin',
'Origine', 'Pays', 'NbPays', 'MatGrasse', 'AcideGras', 'Sucre', 'Sel'])
df.to_csv('openfoodfacts.csv', index=False, encoding='utf-8')
# On affiche le temps d'รฉxecution de la fonction
print("Temps d'รฉxecution : "+"--- %s seconds ---" % (time.time() - start_time))
print("Merci pour votre patience !") | except:
pass
try :
| conditional_block |
openfoodfacts.py | """
Ce module permet de rรฉcupรฉrer les donnรฉes du site web OpenFoodFacts ("https://fr.openfoodfacts.org/").
Le module contient une fonction de scraping de donnรฉes rรฉcupรฉrant diverses informations sur tous les produits recensรฉs sur le site.
Il est aussi accompagnรฉ d'une mesure du temps de computation vous indiquant en combien de temps l'opรฉration a eu lieu.
Attention, en fonction du nombre de pages web ร scrapper, le temps de computation peut vite exploser.
"""
def scrap_openfoodfacts(nb_pages = 50) :
""" Il s'agit de la fonction principale du module.
Cette derniรจre crรฉe dans votre espace de travail un DataFrame Pandas contenant les informations scrapรฉes sur le site OpenFoodFacts.
L'argument "nb_pages" permet de rรฉgler le nombre de page ร scraper.
Veuillez ne pas trop l'augmenter afin que l'opรฉration prenne un temps raisonnable.
Il faut compter environ 30 secondes pour scraper une page (25 minutes pour les 50 pages par dรฉfaut).
27 variables sont scrapรฉes pour chaque nouvelle donnรฉe.
"""
# Importation des modules
import time
from time import sleep
import numpy as np
import requests
import re
from bs4 import BeautifulSoup
# Mesure du temps
start_time = time.time()
# Initialisation de la liste records rรฉcoltant nos donnรฉes
records = []
#Initialisation de la valeur des erreurs ร implรฉmenter dans le DataSet
error = np.NaN
# On rรฉcupรจre l'url de chaque produit sur le nombre de pages souhaitรฉes
for i in range(1,nb_pages+1) :
r = requests.get(('https://fr.openfoodfacts.org/' + str(i)))
soup = BeautifulSoup(r.text, 'html.parser')
products = soup.find_all('ul', {'class' : "products"})
products = products[0].find_all('a')
liste_url = ['https://fr.openfoodfacts.org/' + elt['href'] for elt in products]
# Pour chaque produit on place dans des variables les donnรฉes que l'on souhaite scraper
for url in liste_url :
s = requests.get(url)
soup = BeautifulSoup(s.text, 'html.parser')
# Si la donnรฉe peut รชtre rรฉcupรฉrรฉe, on la place dans notre variable, sinon on la replace par une erreur
try :
name = soup.title.text[:-2]
except :
name = error
try :
code_barre = soup.find('span', attrs = {'style' : "speak-as:digits;"}).text
except :
code_barre = error
try:
nutri_score = soup.find('div', attrs = {'id' : 'nutriscore_drop'}).contents[-2].text[-1]
except :
nutri_score = error
try :
nova = soup.find(style = "margin-bottom:1rem;max-width:100%")['alt'][0]
nova = [float(elt) for elt in nova.split() if elt.replace('.', '').isdigit()].pop()
except :
nova = error
try :
caractรฉristiques = soup.find(itemprop="description").text
except :
caractรฉristiques = error
try :
ingrรฉdients = soup.find(property="food:ingredientListAsText").text
except :
ingrรฉdients = error
try :
palme = soup.find('span', {'class' : "alert round label ingredients_analysis green"}).contents[-1][:-3]
except :
palme = error
try :
palme2 = soup.find(href="/ingredients-issus-de-l-huile-de-palme/huile-de-palme").text
except :
palme2 = error
try :
repรจres_nutritionnels = soup.find_all('div', {'class' : "small-12 xlarge-6 columns"})[1].text.split("\n")[-5:-1]
except :
repรจre_nutritionnels = error
# On dรฉcoupe les repรจres nutritionnels en 4 variabes distinctes (matiรจre grasse, acide gras, sucre et sel)
# Puis on les transforme en float pour faciliter l'analyse
liste_repรจres_nutri = repรจres_nutritionnels
try :
matiรจre_grasse = liste_repรจres_nutri[0]
matiรจre_grasse = [float(elt) for elt in matiรจre_grasse.split() if elt.replace('.', '').isdigit()].pop()
except :
matiรจre_grasse = error
try :
acide_gras = liste_repรจres_nutri[1]
acide_gras = [float(elt) for elt in acide_gras.split() if elt.replace('.', '').isdigit()].pop()
except :
acide_gras = error
try : | except :
sucre = error
try :
sel = liste_repรจres_nutri[3]
sel = [float(elt) for elt in sel.split() if elt.replace('.', '').isdigit()].pop()
except :
sel = error
# On utilise la mรชme mรฉthode sur les KJ et les KCAL pour les transformer en float
try :
kj = soup.find(id="nutriment_energy-kj_tr").find('td', {'class' : 'nutriment_value'}).text[9: 13]
kj = [float(elt) for elt in kj.split() if elt.replace('.', '').isdigit()].pop()
except :
kj = error
try :
kcal = soup.find(id="nutriment_energy-kcal_tr").find('td', {'class' : 'nutriment_value'}).text[9:15]
kcal = [float(elt) for elt in kcal.split() if elt.replace('.', '').isdigit()].pop()
except :
kcal = error
try :
eco_score = soup.find(id="ecoscore_drop").contents[-2].text[-1]
except :
eco_score = error
#Pour toutes les variables suivantes; l'utilisation de Regex va nous permettre d'extraire la donnรฉe
info = soup.find('div',{ 'class':'medium-12 large-8 xlarge-8 xxlarge-8 columns'})
infos = []
for el in info:
try:
infos.append(el.text)
except:
pass
try :
r = re.compile('^Quan.*$')
quantity = list(filter(r.match, infos))[0].split(':')[-1]
except :
quantity = error
try :
r = re.compile('^Conditionnement.*$')
conditionnement = list(filter(r.match, infos))[0].split(':')[-1]
except :
conditionnement = error
try :
r = re.compile('^Marques.*$')
marques = list(filter(r.match, infos))[0].split(':')[-1]
except :
marques = error
try :
r = re.compile('^Catรฉgories.*$')
catรฉgories = list(filter(r.match, infos))[0].split(':')[-1]
except :
catรฉgoris = error
try :
r = re.compile('^Labels.*$')
labels = list(filter(r.match, infos))[0].split(':')[-1]
except :
labels = error
try :
r = re.compile('^Lieux.*$')
lieux = list(filter(r.match, infos))[0].split(':')[-1]
except :
lieux = error
try :
r = re.compile('^Code.*$')
code = list(filter(r.match, infos))[0].split(':')[-1]
except :
code = error
try :
r = re.compile('^Lien.*$')
lien = list(filter(r.match, infos))[0].split(':')[-1]
except :
lien = error
try :
r = re.compile('^Magasins.*$')
magasins = list(filter(r.match, infos))[0].split(':')[-1]
except :
magasins = error
try :
r = re.compile('^Origine.*$')
origine = list(filter(r.match, infos))[0].split(':')[-1]
except :
origine = error
try :
r = re.compile('^Pays.*$')
pays = list(filter(r.match, infos))[0].split(',')[1:]
except :
pays = error
nb_pays = len(pays)
#On place nos diffรฉrentes variables dans la liste records
records.append((name, code_barre, nutri_score, nova, caractรฉristiques, ingrรฉdients, palme, palme2,
kj, kcal, eco_score, quantity, conditionnement, marques, catรฉgories, labels, lieux, code, lien, magasins,
origine, pays, nb_pays, matiรจre_grasse, acide_gras, sucre, sel))
i+=1
# On laisse un temps d'attente entre chaque itรฉration pour ne pas provoquer une erreur dรป au trop grand nombre de requรชtes envoyรฉes
# vers Open Fact Food
sleep(1)
# On construit le DataFrame, puis on l'exporte dans l'espace de travail
import pandas as pd
df = pd.DataFrame(records, columns = ['Produit', 'CodeBarre', 'NutriScore', 'Nova', 'Caractรฉristiques', 'Ingrรฉdients',
'NoPalme','Palme', 'KJ', 'KCAL', 'Eco-Score', 'Quantitรฉ', 'Conditionnement',
'Marque', 'Catรฉgorie', 'Label', 'Lieux', 'Code', 'Lien', 'Magasin',
'Origine', 'Pays', 'NbPays', 'MatGrasse', 'AcideGras', 'Sucre', 'Sel'])
df.to_csv('openfoodfacts.csv', index=False, encoding='utf-8')
# On affiche le temps d'รฉxecution de la fonction
print("Temps d'รฉxecution : "+"--- %s seconds ---" % (time.time() - start_time))
print("Merci pour votre patience !") | sucre = liste_repรจres_nutri[2]
sucre = [float(elt) for elt in sucre.split() if elt.replace('.', '').isdigit()].pop() | random_line_split |
openfoodfacts.py | """
Ce module permet de rรฉcupรฉrer les donnรฉes du site web OpenFoodFacts ("https://fr.openfoodfacts.org/").
Le module contient une fonction de scraping de donnรฉes rรฉcupรฉrant diverses informations sur tous les produits recensรฉs sur le site.
Il est aussi accompagnรฉ d'une mesure du temps de computation vous indiquant en combien de temps l'opรฉration a eu lieu.
Attention, en fonction du nombre de pages web ร scrapper, le temps de computation peut vite exploser.
"""
def scrap_openfoodfacts(nb_pages = 50) :
""" Il s'a | git de la fonction principale du module.
Cette derniรจre crรฉe dans votre espace de travail un DataFrame Pandas contenant les informations scrapรฉes sur le site OpenFoodFacts.
L'argument "nb_pages" permet de rรฉgler le nombre de page ร scraper.
Veuillez ne pas trop l'augmenter afin que l'opรฉration prenne un temps raisonnable.
Il faut compter environ 30 secondes pour scraper une page (25 minutes pour les 50 pages par dรฉfaut).
27 variables sont scrapรฉes pour chaque nouvelle donnรฉe.
"""
# Importation des modules
import time
from time import sleep
import numpy as np
import requests
import re
from bs4 import BeautifulSoup
# Mesure du temps
start_time = time.time()
# Initialisation de la liste records rรฉcoltant nos donnรฉes
records = []
#Initialisation de la valeur des erreurs ร implรฉmenter dans le DataSet
error = np.NaN
# On rรฉcupรจre l'url de chaque produit sur le nombre de pages souhaitรฉes
for i in range(1,nb_pages+1) :
r = requests.get(('https://fr.openfoodfacts.org/' + str(i)))
soup = BeautifulSoup(r.text, 'html.parser')
products = soup.find_all('ul', {'class' : "products"})
products = products[0].find_all('a')
liste_url = ['https://fr.openfoodfacts.org/' + elt['href'] for elt in products]
# Pour chaque produit on place dans des variables les donnรฉes que l'on souhaite scraper
for url in liste_url :
s = requests.get(url)
soup = BeautifulSoup(s.text, 'html.parser')
# Si la donnรฉe peut รชtre rรฉcupรฉrรฉe, on la place dans notre variable, sinon on la replace par une erreur
try :
name = soup.title.text[:-2]
except :
name = error
try :
code_barre = soup.find('span', attrs = {'style' : "speak-as:digits;"}).text
except :
code_barre = error
try:
nutri_score = soup.find('div', attrs = {'id' : 'nutriscore_drop'}).contents[-2].text[-1]
except :
nutri_score = error
try :
nova = soup.find(style = "margin-bottom:1rem;max-width:100%")['alt'][0]
nova = [float(elt) for elt in nova.split() if elt.replace('.', '').isdigit()].pop()
except :
nova = error
try :
caractรฉristiques = soup.find(itemprop="description").text
except :
caractรฉristiques = error
try :
ingrรฉdients = soup.find(property="food:ingredientListAsText").text
except :
ingrรฉdients = error
try :
palme = soup.find('span', {'class' : "alert round label ingredients_analysis green"}).contents[-1][:-3]
except :
palme = error
try :
palme2 = soup.find(href="/ingredients-issus-de-l-huile-de-palme/huile-de-palme").text
except :
palme2 = error
try :
repรจres_nutritionnels = soup.find_all('div', {'class' : "small-12 xlarge-6 columns"})[1].text.split("\n")[-5:-1]
except :
repรจre_nutritionnels = error
# On dรฉcoupe les repรจres nutritionnels en 4 variabes distinctes (matiรจre grasse, acide gras, sucre et sel)
# Puis on les transforme en float pour faciliter l'analyse
liste_repรจres_nutri = repรจres_nutritionnels
try :
matiรจre_grasse = liste_repรจres_nutri[0]
matiรจre_grasse = [float(elt) for elt in matiรจre_grasse.split() if elt.replace('.', '').isdigit()].pop()
except :
matiรจre_grasse = error
try :
acide_gras = liste_repรจres_nutri[1]
acide_gras = [float(elt) for elt in acide_gras.split() if elt.replace('.', '').isdigit()].pop()
except :
acide_gras = error
try :
sucre = liste_repรจres_nutri[2]
sucre = [float(elt) for elt in sucre.split() if elt.replace('.', '').isdigit()].pop()
except :
sucre = error
try :
sel = liste_repรจres_nutri[3]
sel = [float(elt) for elt in sel.split() if elt.replace('.', '').isdigit()].pop()
except :
sel = error
# On utilise la mรชme mรฉthode sur les KJ et les KCAL pour les transformer en float
try :
kj = soup.find(id="nutriment_energy-kj_tr").find('td', {'class' : 'nutriment_value'}).text[9: 13]
kj = [float(elt) for elt in kj.split() if elt.replace('.', '').isdigit()].pop()
except :
kj = error
try :
kcal = soup.find(id="nutriment_energy-kcal_tr").find('td', {'class' : 'nutriment_value'}).text[9:15]
kcal = [float(elt) for elt in kcal.split() if elt.replace('.', '').isdigit()].pop()
except :
kcal = error
try :
eco_score = soup.find(id="ecoscore_drop").contents[-2].text[-1]
except :
eco_score = error
#Pour toutes les variables suivantes; l'utilisation de Regex va nous permettre d'extraire la donnรฉe
info = soup.find('div',{ 'class':'medium-12 large-8 xlarge-8 xxlarge-8 columns'})
infos = []
for el in info:
try:
infos.append(el.text)
except:
pass
try :
r = re.compile('^Quan.*$')
quantity = list(filter(r.match, infos))[0].split(':')[-1]
except :
quantity = error
try :
r = re.compile('^Conditionnement.*$')
conditionnement = list(filter(r.match, infos))[0].split(':')[-1]
except :
conditionnement = error
try :
r = re.compile('^Marques.*$')
marques = list(filter(r.match, infos))[0].split(':')[-1]
except :
marques = error
try :
r = re.compile('^Catรฉgories.*$')
catรฉgories = list(filter(r.match, infos))[0].split(':')[-1]
except :
catรฉgoris = error
try :
r = re.compile('^Labels.*$')
labels = list(filter(r.match, infos))[0].split(':')[-1]
except :
labels = error
try :
r = re.compile('^Lieux.*$')
lieux = list(filter(r.match, infos))[0].split(':')[-1]
except :
lieux = error
try :
r = re.compile('^Code.*$')
code = list(filter(r.match, infos))[0].split(':')[-1]
except :
code = error
try :
r = re.compile('^Lien.*$')
lien = list(filter(r.match, infos))[0].split(':')[-1]
except :
lien = error
try :
r = re.compile('^Magasins.*$')
magasins = list(filter(r.match, infos))[0].split(':')[-1]
except :
magasins = error
try :
r = re.compile('^Origine.*$')
origine = list(filter(r.match, infos))[0].split(':')[-1]
except :
origine = error
try :
r = re.compile('^Pays.*$')
pays = list(filter(r.match, infos))[0].split(',')[1:]
except :
pays = error
nb_pays = len(pays)
#On place nos diffรฉrentes variables dans la liste records
records.append((name, code_barre, nutri_score, nova, caractรฉristiques, ingrรฉdients, palme, palme2,
kj, kcal, eco_score, quantity, conditionnement, marques, catรฉgories, labels, lieux, code, lien, magasins,
origine, pays, nb_pays, matiรจre_grasse, acide_gras, sucre, sel))
i+=1
# On laisse un temps d'attente entre chaque itรฉration pour ne pas provoquer une erreur dรป au trop grand nombre de requรชtes envoyรฉes
# vers Open Fact Food
sleep(1)
# On construit le DataFrame, puis on l'exporte dans l'espace de travail
import pandas as pd
df = pd.DataFrame(records, columns = ['Produit', 'CodeBarre', 'NutriScore', 'Nova', 'Caractรฉristiques', 'Ingrรฉdients',
'NoPalme','Palme', 'KJ', 'KCAL', 'Eco-Score', 'Quantitรฉ', 'Conditionnement',
'Marque', 'Catรฉgorie', 'Label', 'Lieux', 'Code', 'Lien', 'Magasin',
'Origine', 'Pays', 'NbPays', 'MatGrasse', 'AcideGras', 'Sucre', 'Sel'])
df.to_csv('openfoodfacts.csv', index=False, encoding='utf-8')
# On affiche le temps d'รฉxecution de la fonction
print("Temps d'รฉxecution : "+"--- %s seconds ---" % (time.time() - start_time))
print("Merci pour votre patience !") | identifier_body |
|
openfoodfacts.py | """
Ce module permet de rรฉcupรฉrer les donnรฉes du site web OpenFoodFacts ("https://fr.openfoodfacts.org/").
Le module contient une fonction de scraping de donnรฉes rรฉcupรฉrant diverses informations sur tous les produits recensรฉs sur le site.
Il est aussi accompagnรฉ d'une mesure du temps de computation vous indiquant en combien de temps l'opรฉration a eu lieu.
Attention, en fonction du nombre de pages web ร scrapper, le temps de computation peut vite exploser.
"""
def scrap_open | = 50) :
""" Il s'agit de la fonction principale du module.
Cette derniรจre crรฉe dans votre espace de travail un DataFrame Pandas contenant les informations scrapรฉes sur le site OpenFoodFacts.
L'argument "nb_pages" permet de rรฉgler le nombre de page ร scraper.
Veuillez ne pas trop l'augmenter afin que l'opรฉration prenne un temps raisonnable.
Il faut compter environ 30 secondes pour scraper une page (25 minutes pour les 50 pages par dรฉfaut).
27 variables sont scrapรฉes pour chaque nouvelle donnรฉe.
"""
# Importation des modules
import time
from time import sleep
import numpy as np
import requests
import re
from bs4 import BeautifulSoup
# Mesure du temps
start_time = time.time()
# Initialisation de la liste records rรฉcoltant nos donnรฉes
records = []
#Initialisation de la valeur des erreurs ร implรฉmenter dans le DataSet
error = np.NaN
# On rรฉcupรจre l'url de chaque produit sur le nombre de pages souhaitรฉes
for i in range(1,nb_pages+1) :
r = requests.get(('https://fr.openfoodfacts.org/' + str(i)))
soup = BeautifulSoup(r.text, 'html.parser')
products = soup.find_all('ul', {'class' : "products"})
products = products[0].find_all('a')
liste_url = ['https://fr.openfoodfacts.org/' + elt['href'] for elt in products]
# Pour chaque produit on place dans des variables les donnรฉes que l'on souhaite scraper
for url in liste_url :
s = requests.get(url)
soup = BeautifulSoup(s.text, 'html.parser')
# Si la donnรฉe peut รชtre rรฉcupรฉrรฉe, on la place dans notre variable, sinon on la replace par une erreur
try :
name = soup.title.text[:-2]
except :
name = error
try :
code_barre = soup.find('span', attrs = {'style' : "speak-as:digits;"}).text
except :
code_barre = error
try:
nutri_score = soup.find('div', attrs = {'id' : 'nutriscore_drop'}).contents[-2].text[-1]
except :
nutri_score = error
try :
nova = soup.find(style = "margin-bottom:1rem;max-width:100%")['alt'][0]
nova = [float(elt) for elt in nova.split() if elt.replace('.', '').isdigit()].pop()
except :
nova = error
try :
caractรฉristiques = soup.find(itemprop="description").text
except :
caractรฉristiques = error
try :
ingrรฉdients = soup.find(property="food:ingredientListAsText").text
except :
ingrรฉdients = error
try :
palme = soup.find('span', {'class' : "alert round label ingredients_analysis green"}).contents[-1][:-3]
except :
palme = error
try :
palme2 = soup.find(href="/ingredients-issus-de-l-huile-de-palme/huile-de-palme").text
except :
palme2 = error
try :
repรจres_nutritionnels = soup.find_all('div', {'class' : "small-12 xlarge-6 columns"})[1].text.split("\n")[-5:-1]
except :
repรจre_nutritionnels = error
# On dรฉcoupe les repรจres nutritionnels en 4 variabes distinctes (matiรจre grasse, acide gras, sucre et sel)
# Puis on les transforme en float pour faciliter l'analyse
liste_repรจres_nutri = repรจres_nutritionnels
try :
matiรจre_grasse = liste_repรจres_nutri[0]
matiรจre_grasse = [float(elt) for elt in matiรจre_grasse.split() if elt.replace('.', '').isdigit()].pop()
except :
matiรจre_grasse = error
try :
acide_gras = liste_repรจres_nutri[1]
acide_gras = [float(elt) for elt in acide_gras.split() if elt.replace('.', '').isdigit()].pop()
except :
acide_gras = error
try :
sucre = liste_repรจres_nutri[2]
sucre = [float(elt) for elt in sucre.split() if elt.replace('.', '').isdigit()].pop()
except :
sucre = error
try :
sel = liste_repรจres_nutri[3]
sel = [float(elt) for elt in sel.split() if elt.replace('.', '').isdigit()].pop()
except :
sel = error
# On utilise la mรชme mรฉthode sur les KJ et les KCAL pour les transformer en float
try :
kj = soup.find(id="nutriment_energy-kj_tr").find('td', {'class' : 'nutriment_value'}).text[9: 13]
kj = [float(elt) for elt in kj.split() if elt.replace('.', '').isdigit()].pop()
except :
kj = error
try :
kcal = soup.find(id="nutriment_energy-kcal_tr").find('td', {'class' : 'nutriment_value'}).text[9:15]
kcal = [float(elt) for elt in kcal.split() if elt.replace('.', '').isdigit()].pop()
except :
kcal = error
try :
eco_score = soup.find(id="ecoscore_drop").contents[-2].text[-1]
except :
eco_score = error
#Pour toutes les variables suivantes; l'utilisation de Regex va nous permettre d'extraire la donnรฉe
info = soup.find('div',{ 'class':'medium-12 large-8 xlarge-8 xxlarge-8 columns'})
infos = []
for el in info:
try:
infos.append(el.text)
except:
pass
try :
r = re.compile('^Quan.*$')
quantity = list(filter(r.match, infos))[0].split(':')[-1]
except :
quantity = error
try :
r = re.compile('^Conditionnement.*$')
conditionnement = list(filter(r.match, infos))[0].split(':')[-1]
except :
conditionnement = error
try :
r = re.compile('^Marques.*$')
marques = list(filter(r.match, infos))[0].split(':')[-1]
except :
marques = error
try :
r = re.compile('^Catรฉgories.*$')
catรฉgories = list(filter(r.match, infos))[0].split(':')[-1]
except :
catรฉgoris = error
try :
r = re.compile('^Labels.*$')
labels = list(filter(r.match, infos))[0].split(':')[-1]
except :
labels = error
try :
r = re.compile('^Lieux.*$')
lieux = list(filter(r.match, infos))[0].split(':')[-1]
except :
lieux = error
try :
r = re.compile('^Code.*$')
code = list(filter(r.match, infos))[0].split(':')[-1]
except :
code = error
try :
r = re.compile('^Lien.*$')
lien = list(filter(r.match, infos))[0].split(':')[-1]
except :
lien = error
try :
r = re.compile('^Magasins.*$')
magasins = list(filter(r.match, infos))[0].split(':')[-1]
except :
magasins = error
try :
r = re.compile('^Origine.*$')
origine = list(filter(r.match, infos))[0].split(':')[-1]
except :
origine = error
try :
r = re.compile('^Pays.*$')
pays = list(filter(r.match, infos))[0].split(',')[1:]
except :
pays = error
nb_pays = len(pays)
#On place nos diffรฉrentes variables dans la liste records
records.append((name, code_barre, nutri_score, nova, caractรฉristiques, ingrรฉdients, palme, palme2,
kj, kcal, eco_score, quantity, conditionnement, marques, catรฉgories, labels, lieux, code, lien, magasins,
origine, pays, nb_pays, matiรจre_grasse, acide_gras, sucre, sel))
i+=1
# On laisse un temps d'attente entre chaque itรฉration pour ne pas provoquer une erreur dรป au trop grand nombre de requรชtes envoyรฉes
# vers Open Fact Food
sleep(1)
# On construit le DataFrame, puis on l'exporte dans l'espace de travail
import pandas as pd
df = pd.DataFrame(records, columns = ['Produit', 'CodeBarre', 'NutriScore', 'Nova', 'Caractรฉristiques', 'Ingrรฉdients',
'NoPalme','Palme', 'KJ', 'KCAL', 'Eco-Score', 'Quantitรฉ', 'Conditionnement',
'Marque', 'Catรฉgorie', 'Label', 'Lieux', 'Code', 'Lien', 'Magasin',
'Origine', 'Pays', 'NbPays', 'MatGrasse', 'AcideGras', 'Sucre', 'Sel'])
df.to_csv('openfoodfacts.csv', index=False, encoding='utf-8')
# On affiche le temps d'รฉxecution de la fonction
print("Temps d'รฉxecution : "+"--- %s seconds ---" % (time.time() - start_time))
print("Merci pour votre patience !") | foodfacts(nb_pages | identifier_name |
hexformat.go | package anticipation
import (
"bytes"
"errors"
"fmt"
"log"
)
//needs to be all 1s on right, can't be larger than 255
const FileXFerDataLineSize = uint16(0xff)
type EncodeDecodeError struct {
s string
}
func NewEncodeDecodeError(s string) error {
return &EncodeDecodeError{s}
}
func (d *EncodeDecodeError) Error() string {
return d.s
}
//
// We implement all the hexlinetypes except 3, which is some ancient
// X86 thing involving memory segments...
//
type HexLineType int
const (
DataLine HexLineType = 0
EndOfFile HexLineType = 1
ExtendedSegmentAddress HexLineType = 2
ExtendedLinearAddress HexLineType = 4
StartLinearAddress HexLineType = 5
ExtensionSetParameters HexLineType = 0x80
ExtensionBigLinearAddress HexLineType = 0x81
ExtensionBigEntryPoint HexLineType = 0x82
)
func (hlt HexLineType) String() string {
switch hlt {
case DataLine:
return "DataLine"
case EndOfFile:
return "EndOfFile"
case ExtendedSegmentAddress:
return "ExtendedSegmentAddress"
case ExtendedLinearAddress:
return "ExtendedLinearAddress"
case StartLinearAddress:
return "StartLinearAddress"
case ExtensionSetParameters:
return "ExtensionSetParametersTime"
case ExtensionBigLinearAddress:
return "ExtensionBigLinear"
case ExtensionBigEntryPoint:
return "ExtensionBigEntryPoint"
}
return "unknown"
}
func hexLineTypeFromInt(i int) HexLineType {
switch i {
case 0:
return DataLine
case 1:
return EndOfFile
case 2:
return ExtendedSegmentAddress
case 4:
return ExtendedLinearAddress
case 5:
return StartLinearAddress
case 0x80:
return ExtensionSetParameters
case 0x81:
return ExtensionBigLinearAddress
case 0x82:
return ExtensionBigEntryPoint
}
panic("!unable to understand line type\n")
}
///////////////////////////////////////////////////////////////////////////////////
// DECODE
///////////////////////////////////////////////////////////////////////////////////
var fart = uint64(0xf)
// deal with a received hex line and return (error?,done?)
func ProcessLine(t HexLineType, converted []byte, bb byteBuster) (bool, bool) {
switch t {
case DataLine:
l := converted[0]
offset := (uint64(converted[1]) * 256) + (uint64(converted[2]))
//baseAddr + value in the line => basePtr
baseAddr := bb.BaseAddress() + offset
var val uint8
for i := uint64(0); i < uint64(l); i++ {
addr := baseAddr + i
val = converted[4+i]
if addr&(^fart) == 0xfffffc0030000000 {
log.Printf("processing line %x -> %x %+v", addr, val, converted)
}
if !bb.Write(addr, val) {
return true, false
}
}
return false, false
case EndOfFile:
return false, true
case ExtendedSegmentAddress: //16 bit addr
length := converted[0]
if length != 2 {
print("!ESA value has too many bytes:", length, "\n")
return true, false
}
esaAddr := uint32(converted[4])*256 + uint32(converted[5])
esaAddr = esaAddr << 4 //it's assumed to be a multiple of 16
bb.SetBaseAddr(esaAddr)
return false, false
case ExtendedLinearAddress: //32 bit addr but only top 16 passed
length := converted[0]
if length != 2 {
print("!ELA value has too many bytes:", length, "\n")
return true, false
}
elaAddr := uint32(converted[4])*256 + uint32(converted[5])
elaAddr = elaAddr << 16 //data supplied is high order 16 of 32
bb.SetBaseAddr(elaAddr) //but this sets the lower order 32 of 64
return false, false
case ExtensionSetParameters: //4 64 bit integers
length := converted[0]
if length != 32 {
print("!extension parameters must be exactly 32 bytes, but was :", length, "\n")
return true, false
}
for i := 0; i < 4; i++ {
value := uint64(0)
for p := 7; p >= 0; p-- {
placeValue := uint64(1 << (8 * p))
//4 is because of four constant valuesat left of converted[]
//i*8 is which param
//7-p is byte
value += (placeValue * uint64(converted[(4)+(i*8)+(7-p)]))
}
bb.SetParameter(i, value)
}
return false, false
case ExtensionBigLinearAddress: //32 bit int which is the HIGH order of 64bit addr
length := converted[0]
if length != 4 {
print("!extension big linear address has wrong length:", length, "\n")
return true, false
}
t := uint32(converted[4])*0x1000000 + uint32(converted[5])*0x10000 + uint32(converted[6])*0x100 + uint32(converted[7])
bb.SetBigBaseAddr(t)
return false, false
case ExtensionBigEntryPoint: //32 bit int which is the HIGH order of 64bit pointer
length := converted[0]
if length != 4 {
print("!extension big linear address has wrong length:", length, "\n")
return true, false
}
t := uint32(converted[4])*0x1000000 + uint32(converted[5])*0x10000 + uint32(converted[6])*0x100 + uint32(converted[7])
bb.SetBigEntryPoint(t)
return false, false
case StartLinearAddress: //32 bit addr
length := converted[0]
if length != 4 {
print("!SLA value has too many bytes:", length, "\n")
return true, false
}
slaAddr := uint32(converted[4])*0x1000000 + uint32(converted[5])*0x10000 + uint32(converted[6])*0x100 + uint32(converted[7])
bb.SetEntryPoint(slaAddr)
return false, false
}
print("!unable to understand line type [processLine]\n")
return false, true
}
// take in a string and return either an exception or a well formed value
func DecodeAndCheckStringToBytes(s string) ([]byte, HexLineType, uint32, error) {
lenAs16 := uint16(len(s))
converted := ConvertBuffer(lenAs16, []byte(s))
if converted == nil {
return nil, HexLineType(0), 0, errors.New("convert buffer failed")
}
var addr uint32
lt, ok := ExtractLineType(converted)
if !ok {
return nil, DataLine, 0, NewEncodeDecodeError(fmt.Sprintf("unable to extract line type from: %s", s))
}
if lt == DataLine {
addr = (uint32(converted[1]) * 256) + (uint32(converted[2]))
}
if ok := ValidBufferLength(lenAs16, converted); ok == false {
return nil, lt, addr, NewEncodeDecodeError(fmt.Sprintf("expected buffer length to be ok, but wasn't: %s", s))
}
if ok := CheckChecksum(lenAs16, converted); ok == false {
return nil, lt, addr, NewEncodeDecodeError(fmt.Sprintf("expected checksum to be ok, but wasn't:%s", s))
}
return converted, lt, addr, nil
}
// received a line, check that it has a hope of being syntactically correct
func ValidBufferLength(l uint16, converted []byte) bool {
total := uint16(11) //size of just framing in characters (colon, 2 len chars, 4 addr chars, 2 type chars, 2 checksum chars)
if uint16(l) < total {
print("!bad buffer length, can't be smaller than", total, ":", l, "\n")
return false
}
total += uint16(converted[0]) * 2
if l != total {
print("!bad buffer length, expected ", total, " but got", l, " based on ", total*2, "\n")
return false
}
return true
}
// verify line's checksum
func CheckChecksum(l uint16, converted []byte) bool {
sum := uint64(0)
limit := (l - 1) / 2
for i := uint16(0); i < limit; i++ {
sum += uint64(converted[i])
}
complement := ^sum
complement++
checksum := uint8(complement & 0xff)
if checksum != 0 {
print("!bad checksum! expected 0 and got ", checksum,
" from declared checksum of ", converted[limit-1], "\n")
return false
}
return true
}
// extract the line type, 00 (data), 01 (eof), or 02 (esa) and (ok?)
func ExtractLineType(converted []byte) (HexLineType, bool) {
switch converted[3] {
case 0:
return DataLine, true
case 1:
return EndOfFile, true
case 2:
return ExtendedSegmentAddress, true
case 4:
return ExtendedLinearAddress, true
case 5:
return StartLinearAddress, true
case 0x80:
return ExtensionSetParameters, true
case 0x81:
return ExtensionBigLinearAddress, true
case 0x82:
return ExtensionBigEntryPoint, true
case 3:
print("!unimplemented line type in hex transmission [StartSegmentAddress] ")
return DataLine, false
default:
print("!bad buffer type:", converted[3], "\n")
return DataLine, false
}
}
// change buffer of ascii->converted bytes by taking the ascii values (2 per byte) and making them proper bytes
func ConvertBuffer(l uint16, raw []byte) []byte {
//l-1 because the : is skipped so the remaining number of characters must be even
if (l-1)%2 == 1 {
print("!bad payload, expected even number of hex bytes but got:", l-1, "\n")
return nil
}
converted := make([]byte, (l-1)/2)
//skip first colon
for i := uint16(1); i < l; i += 2 {
v, ok := bufferValue(i, raw)
if !ok {
return nil // they already sent the error to the other side
}
converted[(i-1)/2] = v
}
return converted
}
// this hits buffer[i] and buffer[i+1] to convert an ascii byte
// returns false to mean you had a bad character in the input
func bufferValue(index uint16, buffer []byte) (uint8, bool) |
///////////////////////////////////////////////////////////////////////////////////
// ENCODING
///////////////////////////////////////////////////////////////////////////////////
func EncodeDataBytes(raw []byte, offset uint16) string {
if len(raw) > 255 {
log.Fatalf("intel hex format only allows 2 hex characters for the size\n"+
"of a data buffer, it can't be more than 0xff bytes (you have %x)", len(raw))
}
buf := bytes.Buffer{}
buf.WriteString(fmt.Sprintf(":%02X%04X%02X", len(raw), offset, int(DataLine)))
for _, b := range raw {
buf.WriteString(fmt.Sprintf("%02x", b))
}
cs := createChecksum(raw, offset, DataLine)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
func EncodeBigEntry(entry uint32) string {
buf := bytes.Buffer{}
raw := []byte{byte(entry & 0xff000000 >> 24), byte(entry & 0x00ff0000 >> 16),
byte(entry & 0x0000ff00 >> 8), byte(entry & 0x000000ff)}
buf.WriteString(fmt.Sprintf(":040000%02X%08X", int(ExtensionBigEntryPoint), entry))
cs := createChecksum(raw, 0, ExtensionBigEntryPoint)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
func EncodeSLA(addr uint32) string {
buf := bytes.Buffer{}
raw := []byte{byte(addr & 0xff000000 >> 24), byte(addr & 0x00ff0000 >> 16),
byte(addr & 0x0000ff00 >> 8), byte(addr & 0x000000ff)}
buf.WriteString(fmt.Sprintf(":040000%02X%08X", int(StartLinearAddress), addr))
cs := createChecksum(raw, 0, StartLinearAddress)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
func EncodeBigAddr(addr uint32) string {
buf := bytes.Buffer{}
raw := []byte{byte(addr & 0xff000000 >> 24), byte(addr & 0x00ff0000 >> 16),
byte(addr & 0x0000ff00 >> 8), byte(addr & 0x000000ff)}
buf.WriteString(fmt.Sprintf(":040000%02X%08X", int(ExtensionBigLinearAddress), addr))
cs := createChecksum(raw, 0, ExtensionBigLinearAddress)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
// only pass the most significant 16 bits of the 32 bit base
func EncodeELA(base uint16) string {
buf := bytes.Buffer{}
buf.WriteString(fmt.Sprintf(":020000%02X%04X", int(ExtendedLinearAddress), base))
raw := []byte{byte(base & 0xff00 >> 8), byte(base & 0x00ff)}
cs := createChecksum(raw, 0, ExtendedLinearAddress)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
// only pass the top 16 bits of 24 bit base
func EncodeESA(base uint16) string {
buf := bytes.Buffer{}
buf.WriteString(fmt.Sprintf(":020000%02X%04X", int(ExtendedSegmentAddress), base))
raw := []byte{byte(base & 0xff00 >> 8), byte(base & 0x00ff)}
cs := createChecksum(raw, 0, ExtendedSegmentAddress)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
// this takes 4 64 bit integers (32 bytes)
func EncodeExtensionSetParameters(v [4]uint64) string {
buf := bytes.Buffer{}
valueBuffer := bytes.Buffer{} //for checksum ease
buf.WriteString(fmt.Sprintf(":200000%02X", int(ExtensionSetParameters)))
for i := 0; i < 4; i++ {
value := v[i]
for p := 7; p >= 0; p-- {
b := byte((value >> (p * 8)) & 0xff)
valueBuffer.WriteByte(b)
}
buf.WriteString(fmt.Sprintf("%016X", value))
}
cs := createChecksum(valueBuffer.Bytes(), 0, ExtensionSetParameters)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
// tricky: offset only used by the data packet since everything else has 0 offset (not used)
func createChecksum(raw []byte, offset uint16, hlt HexLineType) uint8 {
sum := len(raw)
sum += int(offset & 0xff)
sum += int(offset>>8) & 0xff
sum += int(hlt)
for _, v := range raw {
sum += int(v)
}
sum = ^sum
sum += 1
sum = sum & 0xff
return uint8(sum)
}
| {
i := int(index)
total := uint8(0)
switch buffer[i] {
case '0':
case '1':
total += 16 * 1
case '2':
total += 16 * 2
case '3':
total += 16 * 3
case '4':
total += 16 * 4
case '5':
total += 16 * 5
case '6':
total += 16 * 6
case '7':
total += 16 * 7
case '8':
total += 16 * 8
case '9':
total += 16 * 9
case 'a', 'A':
total += 16 * 10
case 'b', 'B':
total += 16 * 11
case 'c', 'C':
total += 16 * 12
case 'd', 'D':
total += 16 * 13
case 'e', 'E':
total += 16 * 14
case 'f', 'F':
total += 16 * 15
default:
print("!bad character in payload hi byte(number #", i, "):", buffer[i], "\n")
return 0xff, false
}
switch buffer[i+1] {
case '0':
case '1':
total++
case '2':
total += 2
case '3':
total += 3
case '4':
total += 4
case '5':
total += 5
case '6':
total += 6
case '7':
total += 7
case '8':
total += 8
case '9':
total += 9
case 'a', 'A':
total += 10
case 'b', 'B':
total += 11
case 'c', 'C':
total += 12
case 'd', 'D':
total += 13
case 'e', 'E':
total += 14
case 'f', 'F':
total += 15
default:
print("!bad character in payload low byte (number #", i+1, "):", buffer[i+1], "\n")
return 0xff, false
}
return total, true
} | identifier_body |
hexformat.go | package anticipation
import (
"bytes"
"errors"
"fmt"
"log"
)
//needs to be all 1s on right, can't be larger than 255
const FileXFerDataLineSize = uint16(0xff)
type EncodeDecodeError struct {
s string
}
func NewEncodeDecodeError(s string) error {
return &EncodeDecodeError{s}
}
func (d *EncodeDecodeError) Error() string {
return d.s
}
//
// We implement all the hexlinetypes except 3, which is some ancient
// X86 thing involving memory segments...
//
type HexLineType int
const (
DataLine HexLineType = 0
EndOfFile HexLineType = 1
ExtendedSegmentAddress HexLineType = 2
ExtendedLinearAddress HexLineType = 4
StartLinearAddress HexLineType = 5
ExtensionSetParameters HexLineType = 0x80
ExtensionBigLinearAddress HexLineType = 0x81
ExtensionBigEntryPoint HexLineType = 0x82
)
func (hlt HexLineType) String() string {
switch hlt {
case DataLine:
return "DataLine"
case EndOfFile:
return "EndOfFile"
case ExtendedSegmentAddress:
return "ExtendedSegmentAddress"
case ExtendedLinearAddress:
return "ExtendedLinearAddress"
case StartLinearAddress:
return "StartLinearAddress"
case ExtensionSetParameters:
return "ExtensionSetParametersTime"
case ExtensionBigLinearAddress:
return "ExtensionBigLinear"
case ExtensionBigEntryPoint:
return "ExtensionBigEntryPoint"
}
return "unknown"
}
func hexLineTypeFromInt(i int) HexLineType {
switch i {
case 0:
return DataLine
case 1:
return EndOfFile
case 2:
return ExtendedSegmentAddress
case 4:
return ExtendedLinearAddress
case 5:
return StartLinearAddress
case 0x80:
return ExtensionSetParameters
case 0x81:
return ExtensionBigLinearAddress
case 0x82:
return ExtensionBigEntryPoint
}
panic("!unable to understand line type\n")
}
///////////////////////////////////////////////////////////////////////////////////
// DECODE
///////////////////////////////////////////////////////////////////////////////////
var fart = uint64(0xf)
// deal with a received hex line and return (error?,done?)
func ProcessLine(t HexLineType, converted []byte, bb byteBuster) (bool, bool) {
switch t {
case DataLine:
l := converted[0]
offset := (uint64(converted[1]) * 256) + (uint64(converted[2]))
//baseAddr + value in the line => basePtr
baseAddr := bb.BaseAddress() + offset
var val uint8
for i := uint64(0); i < uint64(l); i++ {
addr := baseAddr + i
val = converted[4+i]
if addr&(^fart) == 0xfffffc0030000000 {
log.Printf("processing line %x -> %x %+v", addr, val, converted)
}
if !bb.Write(addr, val) {
return true, false
}
}
return false, false
case EndOfFile:
return false, true
case ExtendedSegmentAddress: //16 bit addr
length := converted[0]
if length != 2 {
print("!ESA value has too many bytes:", length, "\n")
return true, false
}
esaAddr := uint32(converted[4])*256 + uint32(converted[5])
esaAddr = esaAddr << 4 //it's assumed to be a multiple of 16
bb.SetBaseAddr(esaAddr)
return false, false
case ExtendedLinearAddress: //32 bit addr but only top 16 passed
length := converted[0]
if length != 2 {
print("!ELA value has too many bytes:", length, "\n")
return true, false
}
elaAddr := uint32(converted[4])*256 + uint32(converted[5])
elaAddr = elaAddr << 16 //data supplied is high order 16 of 32
bb.SetBaseAddr(elaAddr) //but this sets the lower order 32 of 64
return false, false
case ExtensionSetParameters: //4 64 bit integers
length := converted[0]
if length != 32 {
print("!extension parameters must be exactly 32 bytes, but was :", length, "\n")
return true, false
}
for i := 0; i < 4; i++ {
value := uint64(0)
for p := 7; p >= 0; p-- {
placeValue := uint64(1 << (8 * p))
//4 is because of four constant valuesat left of converted[]
//i*8 is which param
//7-p is byte
value += (placeValue * uint64(converted[(4)+(i*8)+(7-p)]))
}
bb.SetParameter(i, value)
}
return false, false
case ExtensionBigLinearAddress: //32 bit int which is the HIGH order of 64bit addr
length := converted[0]
if length != 4 {
print("!extension big linear address has wrong length:", length, "\n")
return true, false
}
t := uint32(converted[4])*0x1000000 + uint32(converted[5])*0x10000 + uint32(converted[6])*0x100 + uint32(converted[7])
bb.SetBigBaseAddr(t)
return false, false
case ExtensionBigEntryPoint: //32 bit int which is the HIGH order of 64bit pointer
length := converted[0]
if length != 4 {
print("!extension big linear address has wrong length:", length, "\n")
return true, false
}
t := uint32(converted[4])*0x1000000 + uint32(converted[5])*0x10000 + uint32(converted[6])*0x100 + uint32(converted[7])
bb.SetBigEntryPoint(t)
return false, false
case StartLinearAddress: //32 bit addr
length := converted[0]
if length != 4 {
print("!SLA value has too many bytes:", length, "\n")
return true, false
}
slaAddr := uint32(converted[4])*0x1000000 + uint32(converted[5])*0x10000 + uint32(converted[6])*0x100 + uint32(converted[7])
bb.SetEntryPoint(slaAddr)
return false, false
}
print("!unable to understand line type [processLine]\n")
return false, true
}
// take in a string and return either an exception or a well formed value
func DecodeAndCheckStringToBytes(s string) ([]byte, HexLineType, uint32, error) {
lenAs16 := uint16(len(s))
converted := ConvertBuffer(lenAs16, []byte(s))
if converted == nil {
return nil, HexLineType(0), 0, errors.New("convert buffer failed")
}
var addr uint32
lt, ok := ExtractLineType(converted)
if !ok {
return nil, DataLine, 0, NewEncodeDecodeError(fmt.Sprintf("unable to extract line type from: %s", s))
}
if lt == DataLine {
addr = (uint32(converted[1]) * 256) + (uint32(converted[2]))
}
if ok := ValidBufferLength(lenAs16, converted); ok == false {
return nil, lt, addr, NewEncodeDecodeError(fmt.Sprintf("expected buffer length to be ok, but wasn't: %s", s))
}
if ok := CheckChecksum(lenAs16, converted); ok == false {
return nil, lt, addr, NewEncodeDecodeError(fmt.Sprintf("expected checksum to be ok, but wasn't:%s", s))
}
return converted, lt, addr, nil
}
// received a line, check that it has a hope of being syntactically correct
func ValidBufferLength(l uint16, converted []byte) bool {
total := uint16(11) //size of just framing in characters (colon, 2 len chars, 4 addr chars, 2 type chars, 2 checksum chars)
if uint16(l) < total {
print("!bad buffer length, can't be smaller than", total, ":", l, "\n")
return false
}
total += uint16(converted[0]) * 2
if l != total {
print("!bad buffer length, expected ", total, " but got", l, " based on ", total*2, "\n")
return false
}
return true
}
// verify line's checksum
func CheckChecksum(l uint16, converted []byte) bool {
sum := uint64(0)
limit := (l - 1) / 2
for i := uint16(0); i < limit; i++ {
sum += uint64(converted[i])
}
complement := ^sum
complement++
checksum := uint8(complement & 0xff)
if checksum != 0 {
print("!bad checksum! expected 0 and got ", checksum,
" from declared checksum of ", converted[limit-1], "\n")
return false
}
return true
}
// extract the line type, 00 (data), 01 (eof), or 02 (esa) and (ok?)
func ExtractLineType(converted []byte) (HexLineType, bool) {
switch converted[3] {
case 0:
return DataLine, true
case 1:
return EndOfFile, true
case 2:
return ExtendedSegmentAddress, true
case 4:
return ExtendedLinearAddress, true
case 5:
return StartLinearAddress, true
case 0x80:
return ExtensionSetParameters, true
case 0x81:
return ExtensionBigLinearAddress, true
case 0x82:
return ExtensionBigEntryPoint, true
case 3:
print("!unimplemented line type in hex transmission [StartSegmentAddress] ")
return DataLine, false
default:
print("!bad buffer type:", converted[3], "\n")
return DataLine, false
}
}
// change buffer of ascii->converted bytes by taking the ascii values (2 per byte) and making them proper bytes
func ConvertBuffer(l uint16, raw []byte) []byte {
//l-1 because the : is skipped so the remaining number of characters must be even
if (l-1)%2 == 1 {
print("!bad payload, expected even number of hex bytes but got:", l-1, "\n")
return nil
}
converted := make([]byte, (l-1)/2)
//skip first colon
for i := uint16(1); i < l; i += 2 {
v, ok := bufferValue(i, raw)
if !ok {
return nil // they already sent the error to the other side
}
converted[(i-1)/2] = v
}
return converted
}
// this hits buffer[i] and buffer[i+1] to convert an ascii byte
// returns false to mean you had a bad character in the input
func | (index uint16, buffer []byte) (uint8, bool) {
i := int(index)
total := uint8(0)
switch buffer[i] {
case '0':
case '1':
total += 16 * 1
case '2':
total += 16 * 2
case '3':
total += 16 * 3
case '4':
total += 16 * 4
case '5':
total += 16 * 5
case '6':
total += 16 * 6
case '7':
total += 16 * 7
case '8':
total += 16 * 8
case '9':
total += 16 * 9
case 'a', 'A':
total += 16 * 10
case 'b', 'B':
total += 16 * 11
case 'c', 'C':
total += 16 * 12
case 'd', 'D':
total += 16 * 13
case 'e', 'E':
total += 16 * 14
case 'f', 'F':
total += 16 * 15
default:
print("!bad character in payload hi byte(number #", i, "):", buffer[i], "\n")
return 0xff, false
}
switch buffer[i+1] {
case '0':
case '1':
total++
case '2':
total += 2
case '3':
total += 3
case '4':
total += 4
case '5':
total += 5
case '6':
total += 6
case '7':
total += 7
case '8':
total += 8
case '9':
total += 9
case 'a', 'A':
total += 10
case 'b', 'B':
total += 11
case 'c', 'C':
total += 12
case 'd', 'D':
total += 13
case 'e', 'E':
total += 14
case 'f', 'F':
total += 15
default:
print("!bad character in payload low byte (number #", i+1, "):", buffer[i+1], "\n")
return 0xff, false
}
return total, true
}
///////////////////////////////////////////////////////////////////////////////////
// ENCODING
///////////////////////////////////////////////////////////////////////////////////
func EncodeDataBytes(raw []byte, offset uint16) string {
if len(raw) > 255 {
log.Fatalf("intel hex format only allows 2 hex characters for the size\n"+
"of a data buffer, it can't be more than 0xff bytes (you have %x)", len(raw))
}
buf := bytes.Buffer{}
buf.WriteString(fmt.Sprintf(":%02X%04X%02X", len(raw), offset, int(DataLine)))
for _, b := range raw {
buf.WriteString(fmt.Sprintf("%02x", b))
}
cs := createChecksum(raw, offset, DataLine)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
func EncodeBigEntry(entry uint32) string {
buf := bytes.Buffer{}
raw := []byte{byte(entry & 0xff000000 >> 24), byte(entry & 0x00ff0000 >> 16),
byte(entry & 0x0000ff00 >> 8), byte(entry & 0x000000ff)}
buf.WriteString(fmt.Sprintf(":040000%02X%08X", int(ExtensionBigEntryPoint), entry))
cs := createChecksum(raw, 0, ExtensionBigEntryPoint)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
func EncodeSLA(addr uint32) string {
buf := bytes.Buffer{}
raw := []byte{byte(addr & 0xff000000 >> 24), byte(addr & 0x00ff0000 >> 16),
byte(addr & 0x0000ff00 >> 8), byte(addr & 0x000000ff)}
buf.WriteString(fmt.Sprintf(":040000%02X%08X", int(StartLinearAddress), addr))
cs := createChecksum(raw, 0, StartLinearAddress)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
func EncodeBigAddr(addr uint32) string {
buf := bytes.Buffer{}
raw := []byte{byte(addr & 0xff000000 >> 24), byte(addr & 0x00ff0000 >> 16),
byte(addr & 0x0000ff00 >> 8), byte(addr & 0x000000ff)}
buf.WriteString(fmt.Sprintf(":040000%02X%08X", int(ExtensionBigLinearAddress), addr))
cs := createChecksum(raw, 0, ExtensionBigLinearAddress)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
// only pass the most significant 16 bits of the 32 bit base
func EncodeELA(base uint16) string {
buf := bytes.Buffer{}
buf.WriteString(fmt.Sprintf(":020000%02X%04X", int(ExtendedLinearAddress), base))
raw := []byte{byte(base & 0xff00 >> 8), byte(base & 0x00ff)}
cs := createChecksum(raw, 0, ExtendedLinearAddress)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
// only pass the top 16 bits of 24 bit base
func EncodeESA(base uint16) string {
buf := bytes.Buffer{}
buf.WriteString(fmt.Sprintf(":020000%02X%04X", int(ExtendedSegmentAddress), base))
raw := []byte{byte(base & 0xff00 >> 8), byte(base & 0x00ff)}
cs := createChecksum(raw, 0, ExtendedSegmentAddress)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
// this takes 4 64 bit integers (32 bytes)
func EncodeExtensionSetParameters(v [4]uint64) string {
buf := bytes.Buffer{}
valueBuffer := bytes.Buffer{} //for checksum ease
buf.WriteString(fmt.Sprintf(":200000%02X", int(ExtensionSetParameters)))
for i := 0; i < 4; i++ {
value := v[i]
for p := 7; p >= 0; p-- {
b := byte((value >> (p * 8)) & 0xff)
valueBuffer.WriteByte(b)
}
buf.WriteString(fmt.Sprintf("%016X", value))
}
cs := createChecksum(valueBuffer.Bytes(), 0, ExtensionSetParameters)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
// tricky: offset only used by the data packet since everything else has 0 offset (not used)
func createChecksum(raw []byte, offset uint16, hlt HexLineType) uint8 {
sum := len(raw)
sum += int(offset & 0xff)
sum += int(offset>>8) & 0xff
sum += int(hlt)
for _, v := range raw {
sum += int(v)
}
sum = ^sum
sum += 1
sum = sum & 0xff
return uint8(sum)
}
| bufferValue | identifier_name |
hexformat.go | package anticipation
import (
"bytes"
"errors"
"fmt"
"log"
)
//needs to be all 1s on right, can't be larger than 255
const FileXFerDataLineSize = uint16(0xff)
type EncodeDecodeError struct {
s string
}
func NewEncodeDecodeError(s string) error {
return &EncodeDecodeError{s}
}
func (d *EncodeDecodeError) Error() string {
return d.s
}
//
// We implement all the hexlinetypes except 3, which is some ancient
// X86 thing involving memory segments...
//
type HexLineType int
const (
DataLine HexLineType = 0
EndOfFile HexLineType = 1
ExtendedSegmentAddress HexLineType = 2
ExtendedLinearAddress HexLineType = 4
StartLinearAddress HexLineType = 5
ExtensionSetParameters HexLineType = 0x80
ExtensionBigLinearAddress HexLineType = 0x81
ExtensionBigEntryPoint HexLineType = 0x82
)
func (hlt HexLineType) String() string {
switch hlt {
case DataLine:
return "DataLine"
case EndOfFile:
return "EndOfFile"
case ExtendedSegmentAddress:
return "ExtendedSegmentAddress"
case ExtendedLinearAddress:
return "ExtendedLinearAddress"
case StartLinearAddress:
return "StartLinearAddress"
case ExtensionSetParameters:
return "ExtensionSetParametersTime"
case ExtensionBigLinearAddress:
return "ExtensionBigLinear"
case ExtensionBigEntryPoint:
return "ExtensionBigEntryPoint"
}
return "unknown"
}
func hexLineTypeFromInt(i int) HexLineType {
switch i {
case 0:
return DataLine
case 1:
return EndOfFile
case 2:
return ExtendedSegmentAddress
case 4:
return ExtendedLinearAddress
case 5:
return StartLinearAddress
case 0x80:
return ExtensionSetParameters
case 0x81:
return ExtensionBigLinearAddress
case 0x82:
return ExtensionBigEntryPoint
}
panic("!unable to understand line type\n")
}
///////////////////////////////////////////////////////////////////////////////////
// DECODE
///////////////////////////////////////////////////////////////////////////////////
var fart = uint64(0xf)
// deal with a received hex line and return (error?,done?)
func ProcessLine(t HexLineType, converted []byte, bb byteBuster) (bool, bool) {
switch t {
case DataLine:
l := converted[0]
offset := (uint64(converted[1]) * 256) + (uint64(converted[2]))
//baseAddr + value in the line => basePtr
baseAddr := bb.BaseAddress() + offset
var val uint8
for i := uint64(0); i < uint64(l); i++ {
addr := baseAddr + i
val = converted[4+i]
if addr&(^fart) == 0xfffffc0030000000 {
log.Printf("processing line %x -> %x %+v", addr, val, converted)
}
if !bb.Write(addr, val) {
return true, false
}
}
return false, false
case EndOfFile:
return false, true
case ExtendedSegmentAddress: //16 bit addr
length := converted[0]
if length != 2 {
print("!ESA value has too many bytes:", length, "\n")
return true, false
}
esaAddr := uint32(converted[4])*256 + uint32(converted[5])
esaAddr = esaAddr << 4 //it's assumed to be a multiple of 16
bb.SetBaseAddr(esaAddr)
return false, false
case ExtendedLinearAddress: //32 bit addr but only top 16 passed
length := converted[0]
if length != 2 {
print("!ELA value has too many bytes:", length, "\n")
return true, false
}
elaAddr := uint32(converted[4])*256 + uint32(converted[5])
elaAddr = elaAddr << 16 //data supplied is high order 16 of 32
bb.SetBaseAddr(elaAddr) //but this sets the lower order 32 of 64
return false, false
case ExtensionSetParameters: //4 64 bit integers
length := converted[0]
if length != 32 {
print("!extension parameters must be exactly 32 bytes, but was :", length, "\n")
return true, false
}
for i := 0; i < 4; i++ {
value := uint64(0)
for p := 7; p >= 0; p-- {
placeValue := uint64(1 << (8 * p))
//4 is because of four constant valuesat left of converted[]
//i*8 is which param
//7-p is byte
value += (placeValue * uint64(converted[(4)+(i*8)+(7-p)]))
}
bb.SetParameter(i, value)
}
return false, false
case ExtensionBigLinearAddress: //32 bit int which is the HIGH order of 64bit addr
length := converted[0]
if length != 4 {
print("!extension big linear address has wrong length:", length, "\n")
return true, false
}
t := uint32(converted[4])*0x1000000 + uint32(converted[5])*0x10000 + uint32(converted[6])*0x100 + uint32(converted[7])
bb.SetBigBaseAddr(t)
return false, false
case ExtensionBigEntryPoint: //32 bit int which is the HIGH order of 64bit pointer
length := converted[0]
if length != 4 {
print("!extension big linear address has wrong length:", length, "\n")
return true, false
}
t := uint32(converted[4])*0x1000000 + uint32(converted[5])*0x10000 + uint32(converted[6])*0x100 + uint32(converted[7])
bb.SetBigEntryPoint(t)
return false, false
case StartLinearAddress: //32 bit addr
length := converted[0]
if length != 4 {
print("!SLA value has too many bytes:", length, "\n")
return true, false
}
slaAddr := uint32(converted[4])*0x1000000 + uint32(converted[5])*0x10000 + uint32(converted[6])*0x100 + uint32(converted[7])
bb.SetEntryPoint(slaAddr)
return false, false
}
print("!unable to understand line type [processLine]\n")
return false, true
}
// take in a string and return either an exception or a well formed value
func DecodeAndCheckStringToBytes(s string) ([]byte, HexLineType, uint32, error) {
lenAs16 := uint16(len(s))
converted := ConvertBuffer(lenAs16, []byte(s))
if converted == nil {
return nil, HexLineType(0), 0, errors.New("convert buffer failed")
}
var addr uint32
lt, ok := ExtractLineType(converted)
if !ok {
return nil, DataLine, 0, NewEncodeDecodeError(fmt.Sprintf("unable to extract line type from: %s", s))
}
if lt == DataLine {
addr = (uint32(converted[1]) * 256) + (uint32(converted[2]))
}
if ok := ValidBufferLength(lenAs16, converted); ok == false {
return nil, lt, addr, NewEncodeDecodeError(fmt.Sprintf("expected buffer length to be ok, but wasn't: %s", s))
}
if ok := CheckChecksum(lenAs16, converted); ok == false {
return nil, lt, addr, NewEncodeDecodeError(fmt.Sprintf("expected checksum to be ok, but wasn't:%s", s))
}
return converted, lt, addr, nil
}
// received a line, check that it has a hope of being syntactically correct
func ValidBufferLength(l uint16, converted []byte) bool {
total := uint16(11) //size of just framing in characters (colon, 2 len chars, 4 addr chars, 2 type chars, 2 checksum chars)
if uint16(l) < total {
print("!bad buffer length, can't be smaller than", total, ":", l, "\n")
return false
}
total += uint16(converted[0]) * 2
if l != total {
print("!bad buffer length, expected ", total, " but got", l, " based on ", total*2, "\n")
return false
}
return true
}
// verify line's checksum
func CheckChecksum(l uint16, converted []byte) bool {
sum := uint64(0)
limit := (l - 1) / 2
for i := uint16(0); i < limit; i++ {
sum += uint64(converted[i])
}
complement := ^sum
complement++
checksum := uint8(complement & 0xff)
if checksum != 0 {
print("!bad checksum! expected 0 and got ", checksum,
" from declared checksum of ", converted[limit-1], "\n")
return false
}
return true
}
// extract the line type, 00 (data), 01 (eof), or 02 (esa) and (ok?)
func ExtractLineType(converted []byte) (HexLineType, bool) {
switch converted[3] {
case 0:
return DataLine, true
case 1:
return EndOfFile, true
case 2:
return ExtendedSegmentAddress, true
case 4:
return ExtendedLinearAddress, true
case 5:
return StartLinearAddress, true
case 0x80:
return ExtensionSetParameters, true
case 0x81:
return ExtensionBigLinearAddress, true
case 0x82:
return ExtensionBigEntryPoint, true
case 3:
print("!unimplemented line type in hex transmission [StartSegmentAddress] ")
return DataLine, false
default:
print("!bad buffer type:", converted[3], "\n")
return DataLine, false
}
}
// change buffer of ascii->converted bytes by taking the ascii values (2 per byte) and making them proper bytes
func ConvertBuffer(l uint16, raw []byte) []byte {
//l-1 because the : is skipped so the remaining number of characters must be even
if (l-1)%2 == 1 {
print("!bad payload, expected even number of hex bytes but got:", l-1, "\n")
return nil
}
converted := make([]byte, (l-1)/2)
//skip first colon
for i := uint16(1); i < l; i += 2 {
v, ok := bufferValue(i, raw)
if !ok {
return nil // they already sent the error to the other side
}
converted[(i-1)/2] = v
}
return converted
}
// this hits buffer[i] and buffer[i+1] to convert an ascii byte
// returns false to mean you had a bad character in the input
func bufferValue(index uint16, buffer []byte) (uint8, bool) {
i := int(index)
total := uint8(0)
switch buffer[i] {
case '0':
case '1':
total += 16 * 1
case '2':
total += 16 * 2
case '3':
total += 16 * 3
case '4':
total += 16 * 4
case '5':
total += 16 * 5
case '6':
total += 16 * 6
case '7':
total += 16 * 7
case '8':
total += 16 * 8
case '9':
total += 16 * 9
case 'a', 'A':
total += 16 * 10
case 'b', 'B':
total += 16 * 11
case 'c', 'C':
total += 16 * 12
case 'd', 'D':
total += 16 * 13
case 'e', 'E':
total += 16 * 14
case 'f', 'F':
total += 16 * 15
default:
print("!bad character in payload hi byte(number #", i, "):", buffer[i], "\n")
return 0xff, false
}
switch buffer[i+1] {
case '0':
case '1':
total++
case '2':
total += 2
case '3':
total += 3
case '4':
total += 4
case '5':
total += 5
case '6':
total += 6
case '7':
total += 7
case '8':
total += 8
case '9':
total += 9
case 'a', 'A':
total += 10
case 'b', 'B':
total += 11
case 'c', 'C':
total += 12
case 'd', 'D':
total += 13
case 'e', 'E':
total += 14
case 'f', 'F':
total += 15
default:
print("!bad character in payload low byte (number #", i+1, "):", buffer[i+1], "\n")
return 0xff, false
}
return total, true
}
///////////////////////////////////////////////////////////////////////////////////
// ENCODING
///////////////////////////////////////////////////////////////////////////////////
func EncodeDataBytes(raw []byte, offset uint16) string {
if len(raw) > 255 {
log.Fatalf("intel hex format only allows 2 hex characters for the size\n"+
"of a data buffer, it can't be more than 0xff bytes (you have %x)", len(raw))
}
buf := bytes.Buffer{}
buf.WriteString(fmt.Sprintf(":%02X%04X%02X", len(raw), offset, int(DataLine)))
for _, b := range raw {
buf.WriteString(fmt.Sprintf("%02x", b))
}
cs := createChecksum(raw, offset, DataLine)
buf.WriteString(fmt.Sprintf("%02X", cs)) | raw := []byte{byte(entry & 0xff000000 >> 24), byte(entry & 0x00ff0000 >> 16),
byte(entry & 0x0000ff00 >> 8), byte(entry & 0x000000ff)}
buf.WriteString(fmt.Sprintf(":040000%02X%08X", int(ExtensionBigEntryPoint), entry))
cs := createChecksum(raw, 0, ExtensionBigEntryPoint)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
func EncodeSLA(addr uint32) string {
buf := bytes.Buffer{}
raw := []byte{byte(addr & 0xff000000 >> 24), byte(addr & 0x00ff0000 >> 16),
byte(addr & 0x0000ff00 >> 8), byte(addr & 0x000000ff)}
buf.WriteString(fmt.Sprintf(":040000%02X%08X", int(StartLinearAddress), addr))
cs := createChecksum(raw, 0, StartLinearAddress)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
func EncodeBigAddr(addr uint32) string {
buf := bytes.Buffer{}
raw := []byte{byte(addr & 0xff000000 >> 24), byte(addr & 0x00ff0000 >> 16),
byte(addr & 0x0000ff00 >> 8), byte(addr & 0x000000ff)}
buf.WriteString(fmt.Sprintf(":040000%02X%08X", int(ExtensionBigLinearAddress), addr))
cs := createChecksum(raw, 0, ExtensionBigLinearAddress)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
// only pass the most significant 16 bits of the 32 bit base
func EncodeELA(base uint16) string {
buf := bytes.Buffer{}
buf.WriteString(fmt.Sprintf(":020000%02X%04X", int(ExtendedLinearAddress), base))
raw := []byte{byte(base & 0xff00 >> 8), byte(base & 0x00ff)}
cs := createChecksum(raw, 0, ExtendedLinearAddress)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
// only pass the top 16 bits of 24 bit base
func EncodeESA(base uint16) string {
buf := bytes.Buffer{}
buf.WriteString(fmt.Sprintf(":020000%02X%04X", int(ExtendedSegmentAddress), base))
raw := []byte{byte(base & 0xff00 >> 8), byte(base & 0x00ff)}
cs := createChecksum(raw, 0, ExtendedSegmentAddress)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
// this takes 4 64 bit integers (32 bytes)
func EncodeExtensionSetParameters(v [4]uint64) string {
buf := bytes.Buffer{}
valueBuffer := bytes.Buffer{} //for checksum ease
buf.WriteString(fmt.Sprintf(":200000%02X", int(ExtensionSetParameters)))
for i := 0; i < 4; i++ {
value := v[i]
for p := 7; p >= 0; p-- {
b := byte((value >> (p * 8)) & 0xff)
valueBuffer.WriteByte(b)
}
buf.WriteString(fmt.Sprintf("%016X", value))
}
cs := createChecksum(valueBuffer.Bytes(), 0, ExtensionSetParameters)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
// tricky: offset only used by the data packet since everything else has 0 offset (not used)
func createChecksum(raw []byte, offset uint16, hlt HexLineType) uint8 {
sum := len(raw)
sum += int(offset & 0xff)
sum += int(offset>>8) & 0xff
sum += int(hlt)
for _, v := range raw {
sum += int(v)
}
sum = ^sum
sum += 1
sum = sum & 0xff
return uint8(sum)
} | return buf.String()
}
func EncodeBigEntry(entry uint32) string {
buf := bytes.Buffer{} | random_line_split |
hexformat.go | package anticipation
import (
"bytes"
"errors"
"fmt"
"log"
)
//needs to be all 1s on right, can't be larger than 255
const FileXFerDataLineSize = uint16(0xff)
type EncodeDecodeError struct {
s string
}
func NewEncodeDecodeError(s string) error {
return &EncodeDecodeError{s}
}
func (d *EncodeDecodeError) Error() string {
return d.s
}
//
// We implement all the hexlinetypes except 3, which is some ancient
// X86 thing involving memory segments...
//
type HexLineType int
const (
DataLine HexLineType = 0
EndOfFile HexLineType = 1
ExtendedSegmentAddress HexLineType = 2
ExtendedLinearAddress HexLineType = 4
StartLinearAddress HexLineType = 5
ExtensionSetParameters HexLineType = 0x80
ExtensionBigLinearAddress HexLineType = 0x81
ExtensionBigEntryPoint HexLineType = 0x82
)
func (hlt HexLineType) String() string {
switch hlt {
case DataLine:
return "DataLine"
case EndOfFile:
return "EndOfFile"
case ExtendedSegmentAddress:
return "ExtendedSegmentAddress"
case ExtendedLinearAddress:
return "ExtendedLinearAddress"
case StartLinearAddress:
return "StartLinearAddress"
case ExtensionSetParameters:
return "ExtensionSetParametersTime"
case ExtensionBigLinearAddress:
return "ExtensionBigLinear"
case ExtensionBigEntryPoint:
return "ExtensionBigEntryPoint"
}
return "unknown"
}
func hexLineTypeFromInt(i int) HexLineType {
switch i {
case 0:
return DataLine
case 1:
return EndOfFile
case 2:
return ExtendedSegmentAddress
case 4:
return ExtendedLinearAddress
case 5:
return StartLinearAddress
case 0x80:
return ExtensionSetParameters
case 0x81:
return ExtensionBigLinearAddress
case 0x82:
return ExtensionBigEntryPoint
}
panic("!unable to understand line type\n")
}
///////////////////////////////////////////////////////////////////////////////////
// DECODE
///////////////////////////////////////////////////////////////////////////////////
var fart = uint64(0xf)
// deal with a received hex line and return (error?,done?)
func ProcessLine(t HexLineType, converted []byte, bb byteBuster) (bool, bool) {
switch t {
case DataLine:
l := converted[0]
offset := (uint64(converted[1]) * 256) + (uint64(converted[2]))
//baseAddr + value in the line => basePtr
baseAddr := bb.BaseAddress() + offset
var val uint8
for i := uint64(0); i < uint64(l); i++ {
addr := baseAddr + i
val = converted[4+i]
if addr&(^fart) == 0xfffffc0030000000 {
log.Printf("processing line %x -> %x %+v", addr, val, converted)
}
if !bb.Write(addr, val) {
return true, false
}
}
return false, false
case EndOfFile:
return false, true
case ExtendedSegmentAddress: //16 bit addr
length := converted[0]
if length != 2 {
print("!ESA value has too many bytes:", length, "\n")
return true, false
}
esaAddr := uint32(converted[4])*256 + uint32(converted[5])
esaAddr = esaAddr << 4 //it's assumed to be a multiple of 16
bb.SetBaseAddr(esaAddr)
return false, false
case ExtendedLinearAddress: //32 bit addr but only top 16 passed
length := converted[0]
if length != 2 {
print("!ELA value has too many bytes:", length, "\n")
return true, false
}
elaAddr := uint32(converted[4])*256 + uint32(converted[5])
elaAddr = elaAddr << 16 //data supplied is high order 16 of 32
bb.SetBaseAddr(elaAddr) //but this sets the lower order 32 of 64
return false, false
case ExtensionSetParameters: //4 64 bit integers
length := converted[0]
if length != 32 {
print("!extension parameters must be exactly 32 bytes, but was :", length, "\n")
return true, false
}
for i := 0; i < 4; i++ {
value := uint64(0)
for p := 7; p >= 0; p-- {
placeValue := uint64(1 << (8 * p))
//4 is because of four constant valuesat left of converted[]
//i*8 is which param
//7-p is byte
value += (placeValue * uint64(converted[(4)+(i*8)+(7-p)]))
}
bb.SetParameter(i, value)
}
return false, false
case ExtensionBigLinearAddress: //32 bit int which is the HIGH order of 64bit addr
length := converted[0]
if length != 4 {
print("!extension big linear address has wrong length:", length, "\n")
return true, false
}
t := uint32(converted[4])*0x1000000 + uint32(converted[5])*0x10000 + uint32(converted[6])*0x100 + uint32(converted[7])
bb.SetBigBaseAddr(t)
return false, false
case ExtensionBigEntryPoint: //32 bit int which is the HIGH order of 64bit pointer
length := converted[0]
if length != 4 {
print("!extension big linear address has wrong length:", length, "\n")
return true, false
}
t := uint32(converted[4])*0x1000000 + uint32(converted[5])*0x10000 + uint32(converted[6])*0x100 + uint32(converted[7])
bb.SetBigEntryPoint(t)
return false, false
case StartLinearAddress: //32 bit addr
length := converted[0]
if length != 4 {
print("!SLA value has too many bytes:", length, "\n")
return true, false
}
slaAddr := uint32(converted[4])*0x1000000 + uint32(converted[5])*0x10000 + uint32(converted[6])*0x100 + uint32(converted[7])
bb.SetEntryPoint(slaAddr)
return false, false
}
print("!unable to understand line type [processLine]\n")
return false, true
}
// take in a string and return either an exception or a well formed value
func DecodeAndCheckStringToBytes(s string) ([]byte, HexLineType, uint32, error) {
lenAs16 := uint16(len(s))
converted := ConvertBuffer(lenAs16, []byte(s))
if converted == nil {
return nil, HexLineType(0), 0, errors.New("convert buffer failed")
}
var addr uint32
lt, ok := ExtractLineType(converted)
if !ok {
return nil, DataLine, 0, NewEncodeDecodeError(fmt.Sprintf("unable to extract line type from: %s", s))
}
if lt == DataLine {
addr = (uint32(converted[1]) * 256) + (uint32(converted[2]))
}
if ok := ValidBufferLength(lenAs16, converted); ok == false {
return nil, lt, addr, NewEncodeDecodeError(fmt.Sprintf("expected buffer length to be ok, but wasn't: %s", s))
}
if ok := CheckChecksum(lenAs16, converted); ok == false {
return nil, lt, addr, NewEncodeDecodeError(fmt.Sprintf("expected checksum to be ok, but wasn't:%s", s))
}
return converted, lt, addr, nil
}
// received a line, check that it has a hope of being syntactically correct
func ValidBufferLength(l uint16, converted []byte) bool {
total := uint16(11) //size of just framing in characters (colon, 2 len chars, 4 addr chars, 2 type chars, 2 checksum chars)
if uint16(l) < total {
print("!bad buffer length, can't be smaller than", total, ":", l, "\n")
return false
}
total += uint16(converted[0]) * 2
if l != total {
print("!bad buffer length, expected ", total, " but got", l, " based on ", total*2, "\n")
return false
}
return true
}
// verify line's checksum
func CheckChecksum(l uint16, converted []byte) bool {
sum := uint64(0)
limit := (l - 1) / 2
for i := uint16(0); i < limit; i++ {
sum += uint64(converted[i])
}
complement := ^sum
complement++
checksum := uint8(complement & 0xff)
if checksum != 0 {
print("!bad checksum! expected 0 and got ", checksum,
" from declared checksum of ", converted[limit-1], "\n")
return false
}
return true
}
// extract the line type, 00 (data), 01 (eof), or 02 (esa) and (ok?)
func ExtractLineType(converted []byte) (HexLineType, bool) {
switch converted[3] {
case 0:
return DataLine, true
case 1:
return EndOfFile, true
case 2:
return ExtendedSegmentAddress, true
case 4:
return ExtendedLinearAddress, true
case 5:
return StartLinearAddress, true
case 0x80:
return ExtensionSetParameters, true
case 0x81:
return ExtensionBigLinearAddress, true
case 0x82:
return ExtensionBigEntryPoint, true
case 3:
print("!unimplemented line type in hex transmission [StartSegmentAddress] ")
return DataLine, false
default:
print("!bad buffer type:", converted[3], "\n")
return DataLine, false
}
}
// change buffer of ascii->converted bytes by taking the ascii values (2 per byte) and making them proper bytes
func ConvertBuffer(l uint16, raw []byte) []byte {
//l-1 because the : is skipped so the remaining number of characters must be even
if (l-1)%2 == 1 |
converted := make([]byte, (l-1)/2)
//skip first colon
for i := uint16(1); i < l; i += 2 {
v, ok := bufferValue(i, raw)
if !ok {
return nil // they already sent the error to the other side
}
converted[(i-1)/2] = v
}
return converted
}
// this hits buffer[i] and buffer[i+1] to convert an ascii byte
// returns false to mean you had a bad character in the input
func bufferValue(index uint16, buffer []byte) (uint8, bool) {
i := int(index)
total := uint8(0)
switch buffer[i] {
case '0':
case '1':
total += 16 * 1
case '2':
total += 16 * 2
case '3':
total += 16 * 3
case '4':
total += 16 * 4
case '5':
total += 16 * 5
case '6':
total += 16 * 6
case '7':
total += 16 * 7
case '8':
total += 16 * 8
case '9':
total += 16 * 9
case 'a', 'A':
total += 16 * 10
case 'b', 'B':
total += 16 * 11
case 'c', 'C':
total += 16 * 12
case 'd', 'D':
total += 16 * 13
case 'e', 'E':
total += 16 * 14
case 'f', 'F':
total += 16 * 15
default:
print("!bad character in payload hi byte(number #", i, "):", buffer[i], "\n")
return 0xff, false
}
switch buffer[i+1] {
case '0':
case '1':
total++
case '2':
total += 2
case '3':
total += 3
case '4':
total += 4
case '5':
total += 5
case '6':
total += 6
case '7':
total += 7
case '8':
total += 8
case '9':
total += 9
case 'a', 'A':
total += 10
case 'b', 'B':
total += 11
case 'c', 'C':
total += 12
case 'd', 'D':
total += 13
case 'e', 'E':
total += 14
case 'f', 'F':
total += 15
default:
print("!bad character in payload low byte (number #", i+1, "):", buffer[i+1], "\n")
return 0xff, false
}
return total, true
}
///////////////////////////////////////////////////////////////////////////////////
// ENCODING
///////////////////////////////////////////////////////////////////////////////////
func EncodeDataBytes(raw []byte, offset uint16) string {
if len(raw) > 255 {
log.Fatalf("intel hex format only allows 2 hex characters for the size\n"+
"of a data buffer, it can't be more than 0xff bytes (you have %x)", len(raw))
}
buf := bytes.Buffer{}
buf.WriteString(fmt.Sprintf(":%02X%04X%02X", len(raw), offset, int(DataLine)))
for _, b := range raw {
buf.WriteString(fmt.Sprintf("%02x", b))
}
cs := createChecksum(raw, offset, DataLine)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
func EncodeBigEntry(entry uint32) string {
buf := bytes.Buffer{}
raw := []byte{byte(entry & 0xff000000 >> 24), byte(entry & 0x00ff0000 >> 16),
byte(entry & 0x0000ff00 >> 8), byte(entry & 0x000000ff)}
buf.WriteString(fmt.Sprintf(":040000%02X%08X", int(ExtensionBigEntryPoint), entry))
cs := createChecksum(raw, 0, ExtensionBigEntryPoint)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
func EncodeSLA(addr uint32) string {
buf := bytes.Buffer{}
raw := []byte{byte(addr & 0xff000000 >> 24), byte(addr & 0x00ff0000 >> 16),
byte(addr & 0x0000ff00 >> 8), byte(addr & 0x000000ff)}
buf.WriteString(fmt.Sprintf(":040000%02X%08X", int(StartLinearAddress), addr))
cs := createChecksum(raw, 0, StartLinearAddress)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
func EncodeBigAddr(addr uint32) string {
buf := bytes.Buffer{}
raw := []byte{byte(addr & 0xff000000 >> 24), byte(addr & 0x00ff0000 >> 16),
byte(addr & 0x0000ff00 >> 8), byte(addr & 0x000000ff)}
buf.WriteString(fmt.Sprintf(":040000%02X%08X", int(ExtensionBigLinearAddress), addr))
cs := createChecksum(raw, 0, ExtensionBigLinearAddress)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
// only pass the most significant 16 bits of the 32 bit base
func EncodeELA(base uint16) string {
buf := bytes.Buffer{}
buf.WriteString(fmt.Sprintf(":020000%02X%04X", int(ExtendedLinearAddress), base))
raw := []byte{byte(base & 0xff00 >> 8), byte(base & 0x00ff)}
cs := createChecksum(raw, 0, ExtendedLinearAddress)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
// only pass the top 16 bits of 24 bit base
func EncodeESA(base uint16) string {
buf := bytes.Buffer{}
buf.WriteString(fmt.Sprintf(":020000%02X%04X", int(ExtendedSegmentAddress), base))
raw := []byte{byte(base & 0xff00 >> 8), byte(base & 0x00ff)}
cs := createChecksum(raw, 0, ExtendedSegmentAddress)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
// this takes 4 64 bit integers (32 bytes)
func EncodeExtensionSetParameters(v [4]uint64) string {
buf := bytes.Buffer{}
valueBuffer := bytes.Buffer{} //for checksum ease
buf.WriteString(fmt.Sprintf(":200000%02X", int(ExtensionSetParameters)))
for i := 0; i < 4; i++ {
value := v[i]
for p := 7; p >= 0; p-- {
b := byte((value >> (p * 8)) & 0xff)
valueBuffer.WriteByte(b)
}
buf.WriteString(fmt.Sprintf("%016X", value))
}
cs := createChecksum(valueBuffer.Bytes(), 0, ExtensionSetParameters)
buf.WriteString(fmt.Sprintf("%02X", cs))
return buf.String()
}
// tricky: offset only used by the data packet since everything else has 0 offset (not used)
func createChecksum(raw []byte, offset uint16, hlt HexLineType) uint8 {
sum := len(raw)
sum += int(offset & 0xff)
sum += int(offset>>8) & 0xff
sum += int(hlt)
for _, v := range raw {
sum += int(v)
}
sum = ^sum
sum += 1
sum = sum & 0xff
return uint8(sum)
}
| {
print("!bad payload, expected even number of hex bytes but got:", l-1, "\n")
return nil
} | conditional_block |
columnar.rs | // Copyright Materialize, Inc. and contributors. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
//! A columnar representation of ((Key, Val), Time, i64) data suitable for in-memory
//! reads and persistent storage.
use std::mem::size_of;
use std::{cmp, fmt};
use arrow2::buffer::Buffer;
use arrow2::offset::OffsetsBuffer;
use arrow2::types::Index;
pub mod arrow;
pub mod parquet;
/// The maximum allowed amount of total key data (similarly val data) in a
/// single ColumnarBatch.
///
/// Note that somewhat counter-intuitively, this also includes offsets (counting
/// as 4 bytes each) in the definition of "key/val data".
///
/// TODO: The limit on the amount of {key,val} data is because we use i32
/// offsets in parquet; this won't change. However, we include the offsets in
/// the size because the parquet library we use currently maps each Array 1:1
/// with a parquet "page" (so for a "binary" column this is both the offsets and
/// the data). The parquet format internally stores the size of a page in an
/// i32, so if this gets too big, our library overflows it and writes bad data.
/// There's no reason it needs to map an Array 1:1 to a page (it could instead
/// be 1:1 with a "column chunk", which contains 1 or more pages). For now, we
/// work around it.
// TODO(benesch): find a way to express this without `as`.
#[allow(clippy::as_conversions)]
pub const KEY_VAL_DATA_MAX_LEN: usize = i32::MAX as usize;
const BYTES_PER_KEY_VAL_OFFSET: usize = 4;
/// A set of ((Key, Val), Time, Diff) records stored in a columnar
/// representation.
///
/// Note that the data are unsorted, and unconsolidated (so there may be
/// multiple instances of the same ((Key, Val), Time), and some Diffs might be
/// zero, or add up to zero).
///
/// Both Time and Diff are presented externally to persist users as a type
/// parameter that implements [mz_persist_types::Codec64]. Our columnar format
/// intentionally stores them both as i64 columns (as opposed to something like
/// a fixed width binary column) because this allows us additional compression
/// options.
///
/// Also note that we intentionally use an i64 over a u64 for Time. Over the
/// range `[0, i64::MAX]`, the bytes are the same and we've talked at various
/// times about changing Time in mz to an i64. Both millis since unix epoch and
/// nanos since unix epoch easily fit into this range (the latter until some
/// time after year 2200). Using a i64 might be a pessimization for a
/// non-realtime mz source with u64 timestamps in the range `(i64::MAX,
/// u64::MAX]`, but realtime sources are overwhelmingly the common case.
///
/// The i'th key's data is stored in
/// `key_data[key_offsets[i]..key_offsets[i+1]]`. Similarly for val.
///
/// Invariants:
/// - len < usize::MAX (so len+1 can fit in a usize)
/// - key_offsets.len() * BYTES_PER_KEY_VAL_OFFSET + key_data.len() <= KEY_VAL_DATA_MAX_LEN
/// - key_offsets.len() == len + 1
/// - key_offsets are non-decreasing
/// - Each key_offset is <= key_data.len()
/// - key_offsets.first().unwrap() == 0
/// - key_offsets.last().unwrap() == key_data.len()
/// - val_offsets.len() * BYTES_PER_KEY_VAL_OFFSET + val_data.len() <= KEY_VAL_DATA_MAX_LEN
/// - val_offsets.len() == len + 1
/// - val_offsets are non-decreasing
/// - Each val_offset is <= val_data.len()
/// - val_offsets.first().unwrap() == 0
/// - val_offsets.last().unwrap() == val_data.len()
/// - timestamps.len() == len
/// - diffs.len() == len
#[derive(Clone, PartialEq)]
pub struct ColumnarRecords {
len: usize,
key_data: Buffer<u8>,
key_offsets: OffsetsBuffer<i32>,
val_data: Buffer<u8>,
val_offsets: OffsetsBuffer<i32>,
timestamps: Buffer<i64>,
diffs: Buffer<i64>,
}
impl fmt::Debug for ColumnarRecords {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&self.borrow(), fmt)
}
}
impl ColumnarRecords {
/// The number of (potentially duplicated) ((Key, Val), Time, i64) records
/// stored in Self.
pub fn len(&self) -> usize {
self.len
}
/// The number of logical bytes in the represented data, excluding offsets
/// and lengths.
pub fn goodbytes(&self) -> usize {
self.key_data.len() + self.val_data.len() + 8 * self.timestamps.len() + 8 * self.diffs.len()
}
/// Read the record at `idx`, if there is one.
///
/// Returns None if `idx >= self.len()`.
pub fn get<'a>(&'a self, idx: usize) -> Option<((&'a [u8], &'a [u8]), [u8; 8], [u8; 8])> {
self.borrow().get(idx)
}
/// Borrow Self as a [ColumnarRecordsRef].
fn borrow<'a>(&'a self) -> ColumnarRecordsRef<'a> {
// The ColumnarRecords constructor already validates, so don't bother
// doing it again.
//
// TODO: Forcing everything through a `fn new` would make this more
// obvious.
ColumnarRecordsRef {
len: self.len,
key_data: self.key_data.as_slice(),
key_offsets: self.key_offsets.as_slice(),
val_data: self.val_data.as_slice(),
val_offsets: self.val_offsets.as_slice(),
timestamps: self.timestamps.as_slice(),
diffs: self.diffs.as_slice(),
}
}
/// Iterate through the records in Self.
pub fn iter<'a>(&'a self) -> ColumnarRecordsIter<'a> {
self.borrow().iter()
}
}
/// A reference to a [ColumnarRecords].
#[derive(Clone)]
struct ColumnarRecordsRef<'a> {
len: usize,
key_data: &'a [u8],
key_offsets: &'a [i32],
val_data: &'a [u8],
val_offsets: &'a [i32],
timestamps: &'a [i64],
diffs: &'a [i64],
}
impl<'a> fmt::Debug for ColumnarRecordsRef<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_list().entries(self.iter()).finish()
}
}
impl<'a> ColumnarRecordsRef<'a> {
fn validate(&self) -> Result<(), String> {
let key_data_size = self.key_offsets.len() * BYTES_PER_KEY_VAL_OFFSET + self.key_data.len();
if key_data_size > KEY_VAL_DATA_MAX_LEN {
return Err(format!(
"expected encoded key offsets and data size to be less than or equal to {} got {}",
KEY_VAL_DATA_MAX_LEN, key_data_size
));
}
if self.key_offsets.len() != self.len + 1 {
return Err(format!(
"expected {} key_offsets got {}",
self.len + 1,
self.key_offsets.len()
));
}
if let Some(first_key_offset) = self.key_offsets.first() {
if first_key_offset.to_usize() != 0 {
return Err(format!(
"expected first key offset to be 0 got {}",
first_key_offset.to_usize()
));
}
}
if let Some(last_key_offset) = self.key_offsets.last() {
if last_key_offset.to_usize() != self.key_data.len() {
return Err(format!(
"expected {} bytes of key data got {}",
last_key_offset,
self.key_data.len()
));
}
}
let val_data_size = self.val_offsets.len() * BYTES_PER_KEY_VAL_OFFSET + self.val_data.len();
if val_data_size > KEY_VAL_DATA_MAX_LEN {
return Err(format!(
"expected encoded val offsets and data size to be less than or equal to {} got {}",
KEY_VAL_DATA_MAX_LEN, val_data_size
));
}
if self.val_offsets.len() != self.len + 1 {
return Err(format!(
"expected {} val_offsets got {}",
self.len + 1,
self.val_offsets.len()
));
}
if let Some(first_val_offset) = self.val_offsets.first() {
if first_val_offset.to_usize() != 0 {
return Err(format!(
"expected first val offset to be 0 got {}",
first_val_offset.to_usize()
));
}
}
if let Some(last_val_offset) = self.val_offsets.last() {
if last_val_offset.to_usize() != self.val_data.len() {
return Err(format!(
"expected {} bytes of val data got {}",
last_val_offset,
self.val_data.len()
));
}
}
if self.diffs.len() != self.len {
return Err(format!(
"expected {} diffs got {}",
self.len,
self.diffs.len()
));
}
if self.timestamps.len() != self.len {
return Err(format!(
"expected {} timestamps got {}",
self.len,
self.timestamps.len()
));
}
// Unlike most of our Validate methods, this one is called in a
// production code path: when decoding a columnar batch. Only check the
// more expensive assertions in debug.
#[cfg(debug_assertions)]
{
let (mut prev_key, mut prev_val) = (0, 0);
for i in 0..=self.len {
let (key, val) = (self.key_offsets[i], self.val_offsets[i]);
if key < prev_key {
return Err(format!(
"expected non-decreasing key offsets got {} followed by {}",
prev_key, key
));
}
if val < prev_val {
return Err(format!(
"expected non-decreasing val offsets got {} followed by {}",
prev_val, val
));
}
prev_key = key;
prev_val = val;
}
}
Ok(())
}
/// Read the record at `idx`, if there is one.
///
/// Returns None if `idx >= self.len()`.
fn get(&self, idx: usize) -> Option<((&'a [u8], &'a [u8]), [u8; 8], [u8; 8])> {
if idx >= self.len {
return None;
}
// There used to be `debug_assert_eq!(self.validate(), Ok(()))`, but it
// resulted in accidentally O(n^2) behavior in debug mode. Instead, we
// push that responsibility to the ColumnarRecordsRef constructor.
let key_range = self.key_offsets[idx].to_usize()..self.key_offsets[idx + 1].to_usize();
let val_range = self.val_offsets[idx].to_usize()..self.val_offsets[idx + 1].to_usize();
let key = &self.key_data[key_range];
let val = &self.val_data[val_range];
let ts = i64::to_le_bytes(self.timestamps[idx]);
let diff = i64::to_le_bytes(self.diffs[idx]);
Some(((key, val), ts, diff))
}
/// Iterate through the records in Self.
fn iter(&self) -> ColumnarRecordsIter<'a> {
ColumnarRecordsIter {
idx: 0,
records: self.clone(),
}
}
}
/// An [Iterator] over the records in a [ColumnarRecords].
#[derive(Clone, Debug)]
pub struct ColumnarRecordsIter<'a> {
idx: usize,
records: ColumnarRecordsRef<'a>,
}
impl<'a> Iterator for ColumnarRecordsIter<'a> {
type Item = ((&'a [u8], &'a [u8]), [u8; 8], [u8; 8]);
fn size_hint(&self) -> (usize, Option<usize>) {
(self.records.len, Some(self.records.len))
}
fn next(&mut self) -> Option<Self::Item> {
let ret = self.records.get(self.idx);
self.idx += 1;
ret
}
}
impl<'a> ExactSizeIterator for ColumnarRecordsIter<'a> {}
/// An abstraction to incrementally add ((Key, Value), Time, i64) records
/// in a columnar representation, and eventually get back a [ColumnarRecords].
pub struct ColumnarRecordsBuilder {
len: usize,
key_data: Vec<u8>,
key_offsets: Vec<i32>,
val_data: Vec<u8>,
val_offsets: Vec<i32>,
timestamps: Vec<i64>,
diffs: Vec<i64>,
}
impl fmt::Debug for ColumnarRecordsBuilder {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&self.borrow(), fmt)
}
}
impl Default for ColumnarRecordsBuilder {
fn default() -> Self {
let mut ret = ColumnarRecordsBuilder {
len: 0,
key_data: Vec::new(),
key_offsets: Vec::new(),
val_data: Vec::new(),
val_offsets: Vec::new(),
timestamps: Vec::new(),
diffs: Vec::new(),
};
// Push initial 0 offsets to maintain our invariants, even as we build.
ret.key_offsets.push(0);
ret.val_offsets.push(0);
debug_assert_eq!(ret.borrow().validate(), Ok(()));
ret
}
}
impl ColumnarRecordsBuilder {
/// The number of (potentially duplicated) ((Key, Val), Time, i64) records
/// stored in Self.
pub fn len(&self) -> usize {
self.len
}
/// Borrow Self as a [ColumnarRecordsRef].
fn borrow<'a>(&'a self) -> ColumnarRecordsRef<'a> {
let ret = ColumnarRecordsRef {
len: self.len,
key_data: self.key_data.as_slice(),
key_offsets: self.key_offsets.as_slice(),
val_data: self.val_data.as_slice(),
val_offsets: self.val_offsets.as_slice(),
timestamps: self.timestamps.as_slice(),
diffs: self.diffs.as_slice(),
};
debug_assert_eq!(ret.validate(), Ok(()));
ret
}
/// Reserve space for `additional` more records, based on `key_size_guess` and
/// `val_size_guess`.
///
/// The guesses for key and val sizes are best effort, and if they end up being
/// too small, the underlying buffers will be resized.
pub fn reserve(&mut self, additional: usize, key_size_guess: usize, val_size_guess: usize) {
self.key_offsets.reserve(additional);
self.key_data
.reserve(cmp::min(additional * key_size_guess, KEY_VAL_DATA_MAX_LEN));
self.val_offsets.reserve(additional);
self.val_data
.reserve(cmp::min(additional * val_size_guess, KEY_VAL_DATA_MAX_LEN));
self.timestamps.reserve(additional);
self.diffs.reserve(additional);
debug_assert_eq!(self.borrow().validate(), Ok(()));
}
/// Reserve space for `additional` more records, with exact sizes for the key and value data.
pub fn reserve_exact(&mut self, additional: usize, key_bytes: usize, val_bytes: usize) {
self.key_offsets.reserve(additional);
self.key_data
.reserve(cmp::min(key_bytes, KEY_VAL_DATA_MAX_LEN));
self.val_offsets.reserve(additional);
self.val_data
.reserve(cmp::min(val_bytes, KEY_VAL_DATA_MAX_LEN));
self.timestamps.reserve(additional);
self.diffs.reserve(additional);
debug_assert_eq!(self.borrow().validate(), Ok(()));
}
/// Returns if the given key_offsets+key_data or val_offsets+val_data fits
/// in the limits imposed by ColumnarRecords.
///
/// Note that limit is always [KEY_VAL_DATA_MAX_LEN] in production. It's
/// only override-able here for testing.
pub fn can_fit(&self, key: &[u8], val: &[u8], limit: usize) -> bool {
let key_data_size = (self.key_offsets.len() + 1) * BYTES_PER_KEY_VAL_OFFSET
+ self.key_data.len()
+ key.len();
let val_data_size = (self.val_offsets.len() + 1) * BYTES_PER_KEY_VAL_OFFSET
+ self.val_data.len()
+ val.len();
key_data_size <= limit && val_data_size <= limit
}
/// Add a record to Self.
///
/// Returns whether the record was successfully added. A record will not a
/// added if it exceeds the size limitations of ColumnarBatch. This method
/// is atomic, if it fails, no partial data will have been added.
#[must_use]
pub fn push(&mut self, record: ((&[u8], &[u8]), [u8; 8], [u8; 8])) -> bool |
/// Finalize constructing a [ColumnarRecords].
pub fn finish(self) -> ColumnarRecords {
let ret = ColumnarRecords {
len: self.len,
key_data: Buffer::from(self.key_data),
key_offsets: OffsetsBuffer::try_from(self.key_offsets)
.expect("constructed valid offsets"),
val_data: Buffer::from(self.val_data),
val_offsets: OffsetsBuffer::try_from(self.val_offsets)
.expect("constructed valid offsets"),
timestamps: Buffer::from(self.timestamps),
diffs: Buffer::from(self.diffs),
};
debug_assert_eq!(ret.borrow().validate(), Ok(()));
ret
}
/// Size of an update record as stored in the columnar representation
pub fn columnar_record_size(key_bytes_len: usize, value_bytes_len: usize) -> usize {
(key_bytes_len + BYTES_PER_KEY_VAL_OFFSET)
+ (value_bytes_len + BYTES_PER_KEY_VAL_OFFSET)
+ (2 * size_of::<u64>()) // T and D
}
}
#[cfg(test)]
mod tests {
use mz_persist_types::Codec64;
use super::*;
/// Smoke test some edge cases around empty sets of records and empty keys/vals
///
/// Most of this functionality is also well-exercised in other unit tests as well.
#[mz_ore::test]
fn columnar_records() {
let builder = ColumnarRecordsBuilder::default();
// Empty builder.
let records = builder.finish();
let reads: Vec<_> = records.iter().collect();
assert_eq!(reads, vec![]);
// Empty key and val.
let updates: Vec<((Vec<u8>, Vec<u8>), u64, i64)> = vec![
(("".into(), "".into()), 0, 0),
(("".into(), "".into()), 1, 1),
];
let mut builder = ColumnarRecordsBuilder::default();
for ((key, val), time, diff) in updates.iter() {
assert!(builder.push(((key, val), u64::encode(time), i64::encode(diff))));
}
let records = builder.finish();
let reads: Vec<_> = records
.iter()
.map(|((k, v), t, d)| ((k.to_vec(), v.to_vec()), u64::decode(t), i64::decode(d)))
.collect();
assert_eq!(reads, updates);
}
}
| {
let ((key, val), ts, diff) = record;
// Check size invariants ahead of time so we stay atomic when we can't
// add the record.
if !self.can_fit(key, val, KEY_VAL_DATA_MAX_LEN) {
return false;
}
// NB: We should never hit the following expects because we check them
// above.
self.key_data.extend_from_slice(key);
self.key_offsets
.push(i32::try_from(self.key_data.len()).expect("key_data is smaller than 2GB"));
self.val_data.extend_from_slice(val);
self.val_offsets
.push(i32::try_from(self.val_data.len()).expect("val_data is smaller than 2GB"));
self.timestamps.push(i64::from_le_bytes(ts));
self.diffs.push(i64::from_le_bytes(diff));
self.len += 1;
true
} | identifier_body |
columnar.rs | // Copyright Materialize, Inc. and contributors. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
//! A columnar representation of ((Key, Val), Time, i64) data suitable for in-memory
//! reads and persistent storage.
use std::mem::size_of;
use std::{cmp, fmt};
use arrow2::buffer::Buffer;
use arrow2::offset::OffsetsBuffer;
use arrow2::types::Index;
pub mod arrow;
pub mod parquet;
/// The maximum allowed amount of total key data (similarly val data) in a
/// single ColumnarBatch.
///
/// Note that somewhat counter-intuitively, this also includes offsets (counting
/// as 4 bytes each) in the definition of "key/val data".
///
/// TODO: The limit on the amount of {key,val} data is because we use i32
/// offsets in parquet; this won't change. However, we include the offsets in
/// the size because the parquet library we use currently maps each Array 1:1
/// with a parquet "page" (so for a "binary" column this is both the offsets and
/// the data). The parquet format internally stores the size of a page in an
/// i32, so if this gets too big, our library overflows it and writes bad data.
/// There's no reason it needs to map an Array 1:1 to a page (it could instead
/// be 1:1 with a "column chunk", which contains 1 or more pages). For now, we
/// work around it.
// TODO(benesch): find a way to express this without `as`.
#[allow(clippy::as_conversions)]
pub const KEY_VAL_DATA_MAX_LEN: usize = i32::MAX as usize;
const BYTES_PER_KEY_VAL_OFFSET: usize = 4;
/// A set of ((Key, Val), Time, Diff) records stored in a columnar
/// representation.
///
/// Note that the data are unsorted, and unconsolidated (so there may be
/// multiple instances of the same ((Key, Val), Time), and some Diffs might be
/// zero, or add up to zero).
///
/// Both Time and Diff are presented externally to persist users as a type
/// parameter that implements [mz_persist_types::Codec64]. Our columnar format
/// intentionally stores them both as i64 columns (as opposed to something like
/// a fixed width binary column) because this allows us additional compression
/// options.
///
/// Also note that we intentionally use an i64 over a u64 for Time. Over the
/// range `[0, i64::MAX]`, the bytes are the same and we've talked at various
/// times about changing Time in mz to an i64. Both millis since unix epoch and
/// nanos since unix epoch easily fit into this range (the latter until some
/// time after year 2200). Using a i64 might be a pessimization for a
/// non-realtime mz source with u64 timestamps in the range `(i64::MAX,
/// u64::MAX]`, but realtime sources are overwhelmingly the common case.
///
/// The i'th key's data is stored in
/// `key_data[key_offsets[i]..key_offsets[i+1]]`. Similarly for val.
///
/// Invariants:
/// - len < usize::MAX (so len+1 can fit in a usize)
/// - key_offsets.len() * BYTES_PER_KEY_VAL_OFFSET + key_data.len() <= KEY_VAL_DATA_MAX_LEN
/// - key_offsets.len() == len + 1
/// - key_offsets are non-decreasing
/// - Each key_offset is <= key_data.len()
/// - key_offsets.first().unwrap() == 0
/// - key_offsets.last().unwrap() == key_data.len()
/// - val_offsets.len() * BYTES_PER_KEY_VAL_OFFSET + val_data.len() <= KEY_VAL_DATA_MAX_LEN
/// - val_offsets.len() == len + 1
/// - val_offsets are non-decreasing
/// - Each val_offset is <= val_data.len()
/// - val_offsets.first().unwrap() == 0
/// - val_offsets.last().unwrap() == val_data.len()
/// - timestamps.len() == len
/// - diffs.len() == len
#[derive(Clone, PartialEq)]
pub struct ColumnarRecords {
len: usize,
key_data: Buffer<u8>,
key_offsets: OffsetsBuffer<i32>,
val_data: Buffer<u8>,
val_offsets: OffsetsBuffer<i32>,
timestamps: Buffer<i64>,
diffs: Buffer<i64>,
}
impl fmt::Debug for ColumnarRecords {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&self.borrow(), fmt)
}
}
impl ColumnarRecords {
/// The number of (potentially duplicated) ((Key, Val), Time, i64) records
/// stored in Self.
pub fn len(&self) -> usize {
self.len
}
/// The number of logical bytes in the represented data, excluding offsets
/// and lengths.
pub fn goodbytes(&self) -> usize {
self.key_data.len() + self.val_data.len() + 8 * self.timestamps.len() + 8 * self.diffs.len()
}
/// Read the record at `idx`, if there is one.
///
/// Returns None if `idx >= self.len()`.
pub fn get<'a>(&'a self, idx: usize) -> Option<((&'a [u8], &'a [u8]), [u8; 8], [u8; 8])> {
self.borrow().get(idx)
}
/// Borrow Self as a [ColumnarRecordsRef].
fn borrow<'a>(&'a self) -> ColumnarRecordsRef<'a> {
// The ColumnarRecords constructor already validates, so don't bother
// doing it again.
//
// TODO: Forcing everything through a `fn new` would make this more
// obvious.
ColumnarRecordsRef {
len: self.len,
key_data: self.key_data.as_slice(),
key_offsets: self.key_offsets.as_slice(),
val_data: self.val_data.as_slice(),
val_offsets: self.val_offsets.as_slice(),
timestamps: self.timestamps.as_slice(),
diffs: self.diffs.as_slice(),
}
}
/// Iterate through the records in Self.
pub fn iter<'a>(&'a self) -> ColumnarRecordsIter<'a> {
self.borrow().iter()
}
}
/// A reference to a [ColumnarRecords].
#[derive(Clone)]
struct ColumnarRecordsRef<'a> {
len: usize,
key_data: &'a [u8],
key_offsets: &'a [i32],
val_data: &'a [u8],
val_offsets: &'a [i32],
timestamps: &'a [i64],
diffs: &'a [i64],
}
impl<'a> fmt::Debug for ColumnarRecordsRef<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_list().entries(self.iter()).finish()
}
}
impl<'a> ColumnarRecordsRef<'a> {
fn validate(&self) -> Result<(), String> {
let key_data_size = self.key_offsets.len() * BYTES_PER_KEY_VAL_OFFSET + self.key_data.len();
if key_data_size > KEY_VAL_DATA_MAX_LEN {
return Err(format!(
"expected encoded key offsets and data size to be less than or equal to {} got {}",
KEY_VAL_DATA_MAX_LEN, key_data_size
));
}
if self.key_offsets.len() != self.len + 1 {
return Err(format!(
"expected {} key_offsets got {}",
self.len + 1,
self.key_offsets.len()
));
}
if let Some(first_key_offset) = self.key_offsets.first() {
if first_key_offset.to_usize() != 0 {
return Err(format!(
"expected first key offset to be 0 got {}",
first_key_offset.to_usize()
));
}
}
if let Some(last_key_offset) = self.key_offsets.last() {
if last_key_offset.to_usize() != self.key_data.len() {
return Err(format!(
"expected {} bytes of key data got {}",
last_key_offset,
self.key_data.len()
));
}
}
let val_data_size = self.val_offsets.len() * BYTES_PER_KEY_VAL_OFFSET + self.val_data.len();
if val_data_size > KEY_VAL_DATA_MAX_LEN {
return Err(format!(
"expected encoded val offsets and data size to be less than or equal to {} got {}",
KEY_VAL_DATA_MAX_LEN, val_data_size
));
}
if self.val_offsets.len() != self.len + 1 {
return Err(format!(
"expected {} val_offsets got {}",
self.len + 1,
self.val_offsets.len()
));
}
if let Some(first_val_offset) = self.val_offsets.first() {
if first_val_offset.to_usize() != 0 {
return Err(format!(
"expected first val offset to be 0 got {}",
first_val_offset.to_usize()
));
}
}
if let Some(last_val_offset) = self.val_offsets.last() {
if last_val_offset.to_usize() != self.val_data.len() {
return Err(format!(
"expected {} bytes of val data got {}",
last_val_offset,
self.val_data.len()
));
}
}
if self.diffs.len() != self.len {
return Err(format!(
"expected {} diffs got {}",
self.len,
self.diffs.len()
));
}
if self.timestamps.len() != self.len {
return Err(format!(
"expected {} timestamps got {}",
self.len,
self.timestamps.len()
));
}
// Unlike most of our Validate methods, this one is called in a
// production code path: when decoding a columnar batch. Only check the
// more expensive assertions in debug.
#[cfg(debug_assertions)]
{
let (mut prev_key, mut prev_val) = (0, 0);
for i in 0..=self.len {
let (key, val) = (self.key_offsets[i], self.val_offsets[i]);
if key < prev_key {
return Err(format!(
"expected non-decreasing key offsets got {} followed by {}",
prev_key, key
));
}
if val < prev_val {
return Err(format!(
"expected non-decreasing val offsets got {} followed by {}",
prev_val, val
));
}
prev_key = key;
prev_val = val;
}
}
Ok(())
}
/// Read the record at `idx`, if there is one.
///
/// Returns None if `idx >= self.len()`.
fn get(&self, idx: usize) -> Option<((&'a [u8], &'a [u8]), [u8; 8], [u8; 8])> {
if idx >= self.len {
return None;
}
// There used to be `debug_assert_eq!(self.validate(), Ok(()))`, but it
// resulted in accidentally O(n^2) behavior in debug mode. Instead, we
// push that responsibility to the ColumnarRecordsRef constructor.
let key_range = self.key_offsets[idx].to_usize()..self.key_offsets[idx + 1].to_usize();
let val_range = self.val_offsets[idx].to_usize()..self.val_offsets[idx + 1].to_usize();
let key = &self.key_data[key_range];
let val = &self.val_data[val_range];
let ts = i64::to_le_bytes(self.timestamps[idx]);
let diff = i64::to_le_bytes(self.diffs[idx]);
Some(((key, val), ts, diff))
}
/// Iterate through the records in Self.
fn iter(&self) -> ColumnarRecordsIter<'a> {
ColumnarRecordsIter {
idx: 0,
records: self.clone(),
}
}
}
/// An [Iterator] over the records in a [ColumnarRecords].
#[derive(Clone, Debug)]
pub struct ColumnarRecordsIter<'a> {
idx: usize,
records: ColumnarRecordsRef<'a>,
}
impl<'a> Iterator for ColumnarRecordsIter<'a> {
type Item = ((&'a [u8], &'a [u8]), [u8; 8], [u8; 8]);
fn size_hint(&self) -> (usize, Option<usize>) {
(self.records.len, Some(self.records.len))
}
fn next(&mut self) -> Option<Self::Item> {
let ret = self.records.get(self.idx);
self.idx += 1;
ret
}
}
impl<'a> ExactSizeIterator for ColumnarRecordsIter<'a> {}
/// An abstraction to incrementally add ((Key, Value), Time, i64) records
/// in a columnar representation, and eventually get back a [ColumnarRecords].
pub struct ColumnarRecordsBuilder {
len: usize,
key_data: Vec<u8>,
key_offsets: Vec<i32>,
val_data: Vec<u8>,
val_offsets: Vec<i32>,
timestamps: Vec<i64>,
diffs: Vec<i64>,
}
impl fmt::Debug for ColumnarRecordsBuilder {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&self.borrow(), fmt)
}
}
impl Default for ColumnarRecordsBuilder {
fn default() -> Self {
let mut ret = ColumnarRecordsBuilder {
len: 0,
key_data: Vec::new(),
key_offsets: Vec::new(),
val_data: Vec::new(),
val_offsets: Vec::new(),
timestamps: Vec::new(),
diffs: Vec::new(),
};
// Push initial 0 offsets to maintain our invariants, even as we build.
ret.key_offsets.push(0);
ret.val_offsets.push(0);
debug_assert_eq!(ret.borrow().validate(), Ok(()));
ret
}
}
impl ColumnarRecordsBuilder {
/// The number of (potentially duplicated) ((Key, Val), Time, i64) records
/// stored in Self.
pub fn len(&self) -> usize {
self.len
}
| key_data: self.key_data.as_slice(),
key_offsets: self.key_offsets.as_slice(),
val_data: self.val_data.as_slice(),
val_offsets: self.val_offsets.as_slice(),
timestamps: self.timestamps.as_slice(),
diffs: self.diffs.as_slice(),
};
debug_assert_eq!(ret.validate(), Ok(()));
ret
}
/// Reserve space for `additional` more records, based on `key_size_guess` and
/// `val_size_guess`.
///
/// The guesses for key and val sizes are best effort, and if they end up being
/// too small, the underlying buffers will be resized.
pub fn reserve(&mut self, additional: usize, key_size_guess: usize, val_size_guess: usize) {
self.key_offsets.reserve(additional);
self.key_data
.reserve(cmp::min(additional * key_size_guess, KEY_VAL_DATA_MAX_LEN));
self.val_offsets.reserve(additional);
self.val_data
.reserve(cmp::min(additional * val_size_guess, KEY_VAL_DATA_MAX_LEN));
self.timestamps.reserve(additional);
self.diffs.reserve(additional);
debug_assert_eq!(self.borrow().validate(), Ok(()));
}
/// Reserve space for `additional` more records, with exact sizes for the key and value data.
pub fn reserve_exact(&mut self, additional: usize, key_bytes: usize, val_bytes: usize) {
self.key_offsets.reserve(additional);
self.key_data
.reserve(cmp::min(key_bytes, KEY_VAL_DATA_MAX_LEN));
self.val_offsets.reserve(additional);
self.val_data
.reserve(cmp::min(val_bytes, KEY_VAL_DATA_MAX_LEN));
self.timestamps.reserve(additional);
self.diffs.reserve(additional);
debug_assert_eq!(self.borrow().validate(), Ok(()));
}
/// Returns if the given key_offsets+key_data or val_offsets+val_data fits
/// in the limits imposed by ColumnarRecords.
///
/// Note that limit is always [KEY_VAL_DATA_MAX_LEN] in production. It's
/// only override-able here for testing.
pub fn can_fit(&self, key: &[u8], val: &[u8], limit: usize) -> bool {
let key_data_size = (self.key_offsets.len() + 1) * BYTES_PER_KEY_VAL_OFFSET
+ self.key_data.len()
+ key.len();
let val_data_size = (self.val_offsets.len() + 1) * BYTES_PER_KEY_VAL_OFFSET
+ self.val_data.len()
+ val.len();
key_data_size <= limit && val_data_size <= limit
}
/// Add a record to Self.
///
/// Returns whether the record was successfully added. A record will not a
/// added if it exceeds the size limitations of ColumnarBatch. This method
/// is atomic, if it fails, no partial data will have been added.
#[must_use]
pub fn push(&mut self, record: ((&[u8], &[u8]), [u8; 8], [u8; 8])) -> bool {
let ((key, val), ts, diff) = record;
// Check size invariants ahead of time so we stay atomic when we can't
// add the record.
if !self.can_fit(key, val, KEY_VAL_DATA_MAX_LEN) {
return false;
}
// NB: We should never hit the following expects because we check them
// above.
self.key_data.extend_from_slice(key);
self.key_offsets
.push(i32::try_from(self.key_data.len()).expect("key_data is smaller than 2GB"));
self.val_data.extend_from_slice(val);
self.val_offsets
.push(i32::try_from(self.val_data.len()).expect("val_data is smaller than 2GB"));
self.timestamps.push(i64::from_le_bytes(ts));
self.diffs.push(i64::from_le_bytes(diff));
self.len += 1;
true
}
/// Finalize constructing a [ColumnarRecords].
pub fn finish(self) -> ColumnarRecords {
let ret = ColumnarRecords {
len: self.len,
key_data: Buffer::from(self.key_data),
key_offsets: OffsetsBuffer::try_from(self.key_offsets)
.expect("constructed valid offsets"),
val_data: Buffer::from(self.val_data),
val_offsets: OffsetsBuffer::try_from(self.val_offsets)
.expect("constructed valid offsets"),
timestamps: Buffer::from(self.timestamps),
diffs: Buffer::from(self.diffs),
};
debug_assert_eq!(ret.borrow().validate(), Ok(()));
ret
}
/// Size of an update record as stored in the columnar representation
pub fn columnar_record_size(key_bytes_len: usize, value_bytes_len: usize) -> usize {
(key_bytes_len + BYTES_PER_KEY_VAL_OFFSET)
+ (value_bytes_len + BYTES_PER_KEY_VAL_OFFSET)
+ (2 * size_of::<u64>()) // T and D
}
}
#[cfg(test)]
mod tests {
use mz_persist_types::Codec64;
use super::*;
/// Smoke test some edge cases around empty sets of records and empty keys/vals
///
/// Most of this functionality is also well-exercised in other unit tests as well.
#[mz_ore::test]
fn columnar_records() {
let builder = ColumnarRecordsBuilder::default();
// Empty builder.
let records = builder.finish();
let reads: Vec<_> = records.iter().collect();
assert_eq!(reads, vec![]);
// Empty key and val.
let updates: Vec<((Vec<u8>, Vec<u8>), u64, i64)> = vec![
(("".into(), "".into()), 0, 0),
(("".into(), "".into()), 1, 1),
];
let mut builder = ColumnarRecordsBuilder::default();
for ((key, val), time, diff) in updates.iter() {
assert!(builder.push(((key, val), u64::encode(time), i64::encode(diff))));
}
let records = builder.finish();
let reads: Vec<_> = records
.iter()
.map(|((k, v), t, d)| ((k.to_vec(), v.to_vec()), u64::decode(t), i64::decode(d)))
.collect();
assert_eq!(reads, updates);
}
} | /// Borrow Self as a [ColumnarRecordsRef].
fn borrow<'a>(&'a self) -> ColumnarRecordsRef<'a> {
let ret = ColumnarRecordsRef {
len: self.len, | random_line_split |
columnar.rs | // Copyright Materialize, Inc. and contributors. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
//! A columnar representation of ((Key, Val), Time, i64) data suitable for in-memory
//! reads and persistent storage.
use std::mem::size_of;
use std::{cmp, fmt};
use arrow2::buffer::Buffer;
use arrow2::offset::OffsetsBuffer;
use arrow2::types::Index;
pub mod arrow;
pub mod parquet;
/// The maximum allowed amount of total key data (similarly val data) in a
/// single ColumnarBatch.
///
/// Note that somewhat counter-intuitively, this also includes offsets (counting
/// as 4 bytes each) in the definition of "key/val data".
///
/// TODO: The limit on the amount of {key,val} data is because we use i32
/// offsets in parquet; this won't change. However, we include the offsets in
/// the size because the parquet library we use currently maps each Array 1:1
/// with a parquet "page" (so for a "binary" column this is both the offsets and
/// the data). The parquet format internally stores the size of a page in an
/// i32, so if this gets too big, our library overflows it and writes bad data.
/// There's no reason it needs to map an Array 1:1 to a page (it could instead
/// be 1:1 with a "column chunk", which contains 1 or more pages). For now, we
/// work around it.
// TODO(benesch): find a way to express this without `as`.
#[allow(clippy::as_conversions)]
pub const KEY_VAL_DATA_MAX_LEN: usize = i32::MAX as usize;
const BYTES_PER_KEY_VAL_OFFSET: usize = 4;
/// A set of ((Key, Val), Time, Diff) records stored in a columnar
/// representation.
///
/// Note that the data are unsorted, and unconsolidated (so there may be
/// multiple instances of the same ((Key, Val), Time), and some Diffs might be
/// zero, or add up to zero).
///
/// Both Time and Diff are presented externally to persist users as a type
/// parameter that implements [mz_persist_types::Codec64]. Our columnar format
/// intentionally stores them both as i64 columns (as opposed to something like
/// a fixed width binary column) because this allows us additional compression
/// options.
///
/// Also note that we intentionally use an i64 over a u64 for Time. Over the
/// range `[0, i64::MAX]`, the bytes are the same and we've talked at various
/// times about changing Time in mz to an i64. Both millis since unix epoch and
/// nanos since unix epoch easily fit into this range (the latter until some
/// time after year 2200). Using a i64 might be a pessimization for a
/// non-realtime mz source with u64 timestamps in the range `(i64::MAX,
/// u64::MAX]`, but realtime sources are overwhelmingly the common case.
///
/// The i'th key's data is stored in
/// `key_data[key_offsets[i]..key_offsets[i+1]]`. Similarly for val.
///
/// Invariants:
/// - len < usize::MAX (so len+1 can fit in a usize)
/// - key_offsets.len() * BYTES_PER_KEY_VAL_OFFSET + key_data.len() <= KEY_VAL_DATA_MAX_LEN
/// - key_offsets.len() == len + 1
/// - key_offsets are non-decreasing
/// - Each key_offset is <= key_data.len()
/// - key_offsets.first().unwrap() == 0
/// - key_offsets.last().unwrap() == key_data.len()
/// - val_offsets.len() * BYTES_PER_KEY_VAL_OFFSET + val_data.len() <= KEY_VAL_DATA_MAX_LEN
/// - val_offsets.len() == len + 1
/// - val_offsets are non-decreasing
/// - Each val_offset is <= val_data.len()
/// - val_offsets.first().unwrap() == 0
/// - val_offsets.last().unwrap() == val_data.len()
/// - timestamps.len() == len
/// - diffs.len() == len
#[derive(Clone, PartialEq)]
pub struct ColumnarRecords {
len: usize,
key_data: Buffer<u8>,
key_offsets: OffsetsBuffer<i32>,
val_data: Buffer<u8>,
val_offsets: OffsetsBuffer<i32>,
timestamps: Buffer<i64>,
diffs: Buffer<i64>,
}
impl fmt::Debug for ColumnarRecords {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&self.borrow(), fmt)
}
}
impl ColumnarRecords {
/// The number of (potentially duplicated) ((Key, Val), Time, i64) records
/// stored in Self.
pub fn len(&self) -> usize {
self.len
}
/// The number of logical bytes in the represented data, excluding offsets
/// and lengths.
pub fn goodbytes(&self) -> usize {
self.key_data.len() + self.val_data.len() + 8 * self.timestamps.len() + 8 * self.diffs.len()
}
/// Read the record at `idx`, if there is one.
///
/// Returns None if `idx >= self.len()`.
pub fn get<'a>(&'a self, idx: usize) -> Option<((&'a [u8], &'a [u8]), [u8; 8], [u8; 8])> {
self.borrow().get(idx)
}
/// Borrow Self as a [ColumnarRecordsRef].
fn borrow<'a>(&'a self) -> ColumnarRecordsRef<'a> {
// The ColumnarRecords constructor already validates, so don't bother
// doing it again.
//
// TODO: Forcing everything through a `fn new` would make this more
// obvious.
ColumnarRecordsRef {
len: self.len,
key_data: self.key_data.as_slice(),
key_offsets: self.key_offsets.as_slice(),
val_data: self.val_data.as_slice(),
val_offsets: self.val_offsets.as_slice(),
timestamps: self.timestamps.as_slice(),
diffs: self.diffs.as_slice(),
}
}
/// Iterate through the records in Self.
pub fn iter<'a>(&'a self) -> ColumnarRecordsIter<'a> {
self.borrow().iter()
}
}
/// A reference to a [ColumnarRecords].
#[derive(Clone)]
struct ColumnarRecordsRef<'a> {
len: usize,
key_data: &'a [u8],
key_offsets: &'a [i32],
val_data: &'a [u8],
val_offsets: &'a [i32],
timestamps: &'a [i64],
diffs: &'a [i64],
}
impl<'a> fmt::Debug for ColumnarRecordsRef<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_list().entries(self.iter()).finish()
}
}
impl<'a> ColumnarRecordsRef<'a> {
fn validate(&self) -> Result<(), String> {
let key_data_size = self.key_offsets.len() * BYTES_PER_KEY_VAL_OFFSET + self.key_data.len();
if key_data_size > KEY_VAL_DATA_MAX_LEN {
return Err(format!(
"expected encoded key offsets and data size to be less than or equal to {} got {}",
KEY_VAL_DATA_MAX_LEN, key_data_size
));
}
if self.key_offsets.len() != self.len + 1 {
return Err(format!(
"expected {} key_offsets got {}",
self.len + 1,
self.key_offsets.len()
));
}
if let Some(first_key_offset) = self.key_offsets.first() {
if first_key_offset.to_usize() != 0 {
return Err(format!(
"expected first key offset to be 0 got {}",
first_key_offset.to_usize()
));
}
}
if let Some(last_key_offset) = self.key_offsets.last() {
if last_key_offset.to_usize() != self.key_data.len() {
return Err(format!(
"expected {} bytes of key data got {}",
last_key_offset,
self.key_data.len()
));
}
}
let val_data_size = self.val_offsets.len() * BYTES_PER_KEY_VAL_OFFSET + self.val_data.len();
if val_data_size > KEY_VAL_DATA_MAX_LEN {
return Err(format!(
"expected encoded val offsets and data size to be less than or equal to {} got {}",
KEY_VAL_DATA_MAX_LEN, val_data_size
));
}
if self.val_offsets.len() != self.len + 1 {
return Err(format!(
"expected {} val_offsets got {}",
self.len + 1,
self.val_offsets.len()
));
}
if let Some(first_val_offset) = self.val_offsets.first() {
if first_val_offset.to_usize() != 0 {
return Err(format!(
"expected first val offset to be 0 got {}",
first_val_offset.to_usize()
));
}
}
if let Some(last_val_offset) = self.val_offsets.last() {
if last_val_offset.to_usize() != self.val_data.len() {
return Err(format!(
"expected {} bytes of val data got {}",
last_val_offset,
self.val_data.len()
));
}
}
if self.diffs.len() != self.len {
return Err(format!(
"expected {} diffs got {}",
self.len,
self.diffs.len()
));
}
if self.timestamps.len() != self.len {
return Err(format!(
"expected {} timestamps got {}",
self.len,
self.timestamps.len()
));
}
// Unlike most of our Validate methods, this one is called in a
// production code path: when decoding a columnar batch. Only check the
// more expensive assertions in debug.
#[cfg(debug_assertions)]
{
let (mut prev_key, mut prev_val) = (0, 0);
for i in 0..=self.len {
let (key, val) = (self.key_offsets[i], self.val_offsets[i]);
if key < prev_key {
return Err(format!(
"expected non-decreasing key offsets got {} followed by {}",
prev_key, key
));
}
if val < prev_val {
return Err(format!(
"expected non-decreasing val offsets got {} followed by {}",
prev_val, val
));
}
prev_key = key;
prev_val = val;
}
}
Ok(())
}
/// Read the record at `idx`, if there is one.
///
/// Returns None if `idx >= self.len()`.
fn get(&self, idx: usize) -> Option<((&'a [u8], &'a [u8]), [u8; 8], [u8; 8])> {
if idx >= self.len {
return None;
}
// There used to be `debug_assert_eq!(self.validate(), Ok(()))`, but it
// resulted in accidentally O(n^2) behavior in debug mode. Instead, we
// push that responsibility to the ColumnarRecordsRef constructor.
let key_range = self.key_offsets[idx].to_usize()..self.key_offsets[idx + 1].to_usize();
let val_range = self.val_offsets[idx].to_usize()..self.val_offsets[idx + 1].to_usize();
let key = &self.key_data[key_range];
let val = &self.val_data[val_range];
let ts = i64::to_le_bytes(self.timestamps[idx]);
let diff = i64::to_le_bytes(self.diffs[idx]);
Some(((key, val), ts, diff))
}
/// Iterate through the records in Self.
fn iter(&self) -> ColumnarRecordsIter<'a> {
ColumnarRecordsIter {
idx: 0,
records: self.clone(),
}
}
}
/// An [Iterator] over the records in a [ColumnarRecords].
#[derive(Clone, Debug)]
pub struct ColumnarRecordsIter<'a> {
idx: usize,
records: ColumnarRecordsRef<'a>,
}
impl<'a> Iterator for ColumnarRecordsIter<'a> {
type Item = ((&'a [u8], &'a [u8]), [u8; 8], [u8; 8]);
fn size_hint(&self) -> (usize, Option<usize>) {
(self.records.len, Some(self.records.len))
}
fn next(&mut self) -> Option<Self::Item> {
let ret = self.records.get(self.idx);
self.idx += 1;
ret
}
}
impl<'a> ExactSizeIterator for ColumnarRecordsIter<'a> {}
/// An abstraction to incrementally add ((Key, Value), Time, i64) records
/// in a columnar representation, and eventually get back a [ColumnarRecords].
pub struct ColumnarRecordsBuilder {
len: usize,
key_data: Vec<u8>,
key_offsets: Vec<i32>,
val_data: Vec<u8>,
val_offsets: Vec<i32>,
timestamps: Vec<i64>,
diffs: Vec<i64>,
}
impl fmt::Debug for ColumnarRecordsBuilder {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&self.borrow(), fmt)
}
}
impl Default for ColumnarRecordsBuilder {
fn default() -> Self {
let mut ret = ColumnarRecordsBuilder {
len: 0,
key_data: Vec::new(),
key_offsets: Vec::new(),
val_data: Vec::new(),
val_offsets: Vec::new(),
timestamps: Vec::new(),
diffs: Vec::new(),
};
// Push initial 0 offsets to maintain our invariants, even as we build.
ret.key_offsets.push(0);
ret.val_offsets.push(0);
debug_assert_eq!(ret.borrow().validate(), Ok(()));
ret
}
}
impl ColumnarRecordsBuilder {
/// The number of (potentially duplicated) ((Key, Val), Time, i64) records
/// stored in Self.
pub fn len(&self) -> usize {
self.len
}
/// Borrow Self as a [ColumnarRecordsRef].
fn borrow<'a>(&'a self) -> ColumnarRecordsRef<'a> {
let ret = ColumnarRecordsRef {
len: self.len,
key_data: self.key_data.as_slice(),
key_offsets: self.key_offsets.as_slice(),
val_data: self.val_data.as_slice(),
val_offsets: self.val_offsets.as_slice(),
timestamps: self.timestamps.as_slice(),
diffs: self.diffs.as_slice(),
};
debug_assert_eq!(ret.validate(), Ok(()));
ret
}
/// Reserve space for `additional` more records, based on `key_size_guess` and
/// `val_size_guess`.
///
/// The guesses for key and val sizes are best effort, and if they end up being
/// too small, the underlying buffers will be resized.
pub fn reserve(&mut self, additional: usize, key_size_guess: usize, val_size_guess: usize) {
self.key_offsets.reserve(additional);
self.key_data
.reserve(cmp::min(additional * key_size_guess, KEY_VAL_DATA_MAX_LEN));
self.val_offsets.reserve(additional);
self.val_data
.reserve(cmp::min(additional * val_size_guess, KEY_VAL_DATA_MAX_LEN));
self.timestamps.reserve(additional);
self.diffs.reserve(additional);
debug_assert_eq!(self.borrow().validate(), Ok(()));
}
/// Reserve space for `additional` more records, with exact sizes for the key and value data.
pub fn reserve_exact(&mut self, additional: usize, key_bytes: usize, val_bytes: usize) {
self.key_offsets.reserve(additional);
self.key_data
.reserve(cmp::min(key_bytes, KEY_VAL_DATA_MAX_LEN));
self.val_offsets.reserve(additional);
self.val_data
.reserve(cmp::min(val_bytes, KEY_VAL_DATA_MAX_LEN));
self.timestamps.reserve(additional);
self.diffs.reserve(additional);
debug_assert_eq!(self.borrow().validate(), Ok(()));
}
/// Returns if the given key_offsets+key_data or val_offsets+val_data fits
/// in the limits imposed by ColumnarRecords.
///
/// Note that limit is always [KEY_VAL_DATA_MAX_LEN] in production. It's
/// only override-able here for testing.
pub fn can_fit(&self, key: &[u8], val: &[u8], limit: usize) -> bool {
let key_data_size = (self.key_offsets.len() + 1) * BYTES_PER_KEY_VAL_OFFSET
+ self.key_data.len()
+ key.len();
let val_data_size = (self.val_offsets.len() + 1) * BYTES_PER_KEY_VAL_OFFSET
+ self.val_data.len()
+ val.len();
key_data_size <= limit && val_data_size <= limit
}
/// Add a record to Self.
///
/// Returns whether the record was successfully added. A record will not a
/// added if it exceeds the size limitations of ColumnarBatch. This method
/// is atomic, if it fails, no partial data will have been added.
#[must_use]
pub fn push(&mut self, record: ((&[u8], &[u8]), [u8; 8], [u8; 8])) -> bool {
let ((key, val), ts, diff) = record;
// Check size invariants ahead of time so we stay atomic when we can't
// add the record.
if !self.can_fit(key, val, KEY_VAL_DATA_MAX_LEN) {
return false;
}
// NB: We should never hit the following expects because we check them
// above.
self.key_data.extend_from_slice(key);
self.key_offsets
.push(i32::try_from(self.key_data.len()).expect("key_data is smaller than 2GB"));
self.val_data.extend_from_slice(val);
self.val_offsets
.push(i32::try_from(self.val_data.len()).expect("val_data is smaller than 2GB"));
self.timestamps.push(i64::from_le_bytes(ts));
self.diffs.push(i64::from_le_bytes(diff));
self.len += 1;
true
}
/// Finalize constructing a [ColumnarRecords].
pub fn | (self) -> ColumnarRecords {
let ret = ColumnarRecords {
len: self.len,
key_data: Buffer::from(self.key_data),
key_offsets: OffsetsBuffer::try_from(self.key_offsets)
.expect("constructed valid offsets"),
val_data: Buffer::from(self.val_data),
val_offsets: OffsetsBuffer::try_from(self.val_offsets)
.expect("constructed valid offsets"),
timestamps: Buffer::from(self.timestamps),
diffs: Buffer::from(self.diffs),
};
debug_assert_eq!(ret.borrow().validate(), Ok(()));
ret
}
/// Size of an update record as stored in the columnar representation
pub fn columnar_record_size(key_bytes_len: usize, value_bytes_len: usize) -> usize {
(key_bytes_len + BYTES_PER_KEY_VAL_OFFSET)
+ (value_bytes_len + BYTES_PER_KEY_VAL_OFFSET)
+ (2 * size_of::<u64>()) // T and D
}
}
#[cfg(test)]
mod tests {
use mz_persist_types::Codec64;
use super::*;
/// Smoke test some edge cases around empty sets of records and empty keys/vals
///
/// Most of this functionality is also well-exercised in other unit tests as well.
#[mz_ore::test]
fn columnar_records() {
let builder = ColumnarRecordsBuilder::default();
// Empty builder.
let records = builder.finish();
let reads: Vec<_> = records.iter().collect();
assert_eq!(reads, vec![]);
// Empty key and val.
let updates: Vec<((Vec<u8>, Vec<u8>), u64, i64)> = vec![
(("".into(), "".into()), 0, 0),
(("".into(), "".into()), 1, 1),
];
let mut builder = ColumnarRecordsBuilder::default();
for ((key, val), time, diff) in updates.iter() {
assert!(builder.push(((key, val), u64::encode(time), i64::encode(diff))));
}
let records = builder.finish();
let reads: Vec<_> = records
.iter()
.map(|((k, v), t, d)| ((k.to_vec(), v.to_vec()), u64::decode(t), i64::decode(d)))
.collect();
assert_eq!(reads, updates);
}
}
| finish | identifier_name |
columnar.rs | // Copyright Materialize, Inc. and contributors. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
//! A columnar representation of ((Key, Val), Time, i64) data suitable for in-memory
//! reads and persistent storage.
use std::mem::size_of;
use std::{cmp, fmt};
use arrow2::buffer::Buffer;
use arrow2::offset::OffsetsBuffer;
use arrow2::types::Index;
pub mod arrow;
pub mod parquet;
/// The maximum allowed amount of total key data (similarly val data) in a
/// single ColumnarBatch.
///
/// Note that somewhat counter-intuitively, this also includes offsets (counting
/// as 4 bytes each) in the definition of "key/val data".
///
/// TODO: The limit on the amount of {key,val} data is because we use i32
/// offsets in parquet; this won't change. However, we include the offsets in
/// the size because the parquet library we use currently maps each Array 1:1
/// with a parquet "page" (so for a "binary" column this is both the offsets and
/// the data). The parquet format internally stores the size of a page in an
/// i32, so if this gets too big, our library overflows it and writes bad data.
/// There's no reason it needs to map an Array 1:1 to a page (it could instead
/// be 1:1 with a "column chunk", which contains 1 or more pages). For now, we
/// work around it.
// TODO(benesch): find a way to express this without `as`.
#[allow(clippy::as_conversions)]
pub const KEY_VAL_DATA_MAX_LEN: usize = i32::MAX as usize;
const BYTES_PER_KEY_VAL_OFFSET: usize = 4;
/// A set of ((Key, Val), Time, Diff) records stored in a columnar
/// representation.
///
/// Note that the data are unsorted, and unconsolidated (so there may be
/// multiple instances of the same ((Key, Val), Time), and some Diffs might be
/// zero, or add up to zero).
///
/// Both Time and Diff are presented externally to persist users as a type
/// parameter that implements [mz_persist_types::Codec64]. Our columnar format
/// intentionally stores them both as i64 columns (as opposed to something like
/// a fixed width binary column) because this allows us additional compression
/// options.
///
/// Also note that we intentionally use an i64 over a u64 for Time. Over the
/// range `[0, i64::MAX]`, the bytes are the same and we've talked at various
/// times about changing Time in mz to an i64. Both millis since unix epoch and
/// nanos since unix epoch easily fit into this range (the latter until some
/// time after year 2200). Using a i64 might be a pessimization for a
/// non-realtime mz source with u64 timestamps in the range `(i64::MAX,
/// u64::MAX]`, but realtime sources are overwhelmingly the common case.
///
/// The i'th key's data is stored in
/// `key_data[key_offsets[i]..key_offsets[i+1]]`. Similarly for val.
///
/// Invariants:
/// - len < usize::MAX (so len+1 can fit in a usize)
/// - key_offsets.len() * BYTES_PER_KEY_VAL_OFFSET + key_data.len() <= KEY_VAL_DATA_MAX_LEN
/// - key_offsets.len() == len + 1
/// - key_offsets are non-decreasing
/// - Each key_offset is <= key_data.len()
/// - key_offsets.first().unwrap() == 0
/// - key_offsets.last().unwrap() == key_data.len()
/// - val_offsets.len() * BYTES_PER_KEY_VAL_OFFSET + val_data.len() <= KEY_VAL_DATA_MAX_LEN
/// - val_offsets.len() == len + 1
/// - val_offsets are non-decreasing
/// - Each val_offset is <= val_data.len()
/// - val_offsets.first().unwrap() == 0
/// - val_offsets.last().unwrap() == val_data.len()
/// - timestamps.len() == len
/// - diffs.len() == len
#[derive(Clone, PartialEq)]
pub struct ColumnarRecords {
len: usize,
key_data: Buffer<u8>,
key_offsets: OffsetsBuffer<i32>,
val_data: Buffer<u8>,
val_offsets: OffsetsBuffer<i32>,
timestamps: Buffer<i64>,
diffs: Buffer<i64>,
}
impl fmt::Debug for ColumnarRecords {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&self.borrow(), fmt)
}
}
impl ColumnarRecords {
/// The number of (potentially duplicated) ((Key, Val), Time, i64) records
/// stored in Self.
pub fn len(&self) -> usize {
self.len
}
/// The number of logical bytes in the represented data, excluding offsets
/// and lengths.
pub fn goodbytes(&self) -> usize {
self.key_data.len() + self.val_data.len() + 8 * self.timestamps.len() + 8 * self.diffs.len()
}
/// Read the record at `idx`, if there is one.
///
/// Returns None if `idx >= self.len()`.
pub fn get<'a>(&'a self, idx: usize) -> Option<((&'a [u8], &'a [u8]), [u8; 8], [u8; 8])> {
self.borrow().get(idx)
}
/// Borrow Self as a [ColumnarRecordsRef].
fn borrow<'a>(&'a self) -> ColumnarRecordsRef<'a> {
// The ColumnarRecords constructor already validates, so don't bother
// doing it again.
//
// TODO: Forcing everything through a `fn new` would make this more
// obvious.
ColumnarRecordsRef {
len: self.len,
key_data: self.key_data.as_slice(),
key_offsets: self.key_offsets.as_slice(),
val_data: self.val_data.as_slice(),
val_offsets: self.val_offsets.as_slice(),
timestamps: self.timestamps.as_slice(),
diffs: self.diffs.as_slice(),
}
}
/// Iterate through the records in Self.
pub fn iter<'a>(&'a self) -> ColumnarRecordsIter<'a> {
self.borrow().iter()
}
}
/// A reference to a [ColumnarRecords].
#[derive(Clone)]
struct ColumnarRecordsRef<'a> {
len: usize,
key_data: &'a [u8],
key_offsets: &'a [i32],
val_data: &'a [u8],
val_offsets: &'a [i32],
timestamps: &'a [i64],
diffs: &'a [i64],
}
impl<'a> fmt::Debug for ColumnarRecordsRef<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_list().entries(self.iter()).finish()
}
}
impl<'a> ColumnarRecordsRef<'a> {
fn validate(&self) -> Result<(), String> {
let key_data_size = self.key_offsets.len() * BYTES_PER_KEY_VAL_OFFSET + self.key_data.len();
if key_data_size > KEY_VAL_DATA_MAX_LEN {
return Err(format!(
"expected encoded key offsets and data size to be less than or equal to {} got {}",
KEY_VAL_DATA_MAX_LEN, key_data_size
));
}
if self.key_offsets.len() != self.len + 1 {
return Err(format!(
"expected {} key_offsets got {}",
self.len + 1,
self.key_offsets.len()
));
}
if let Some(first_key_offset) = self.key_offsets.first() {
if first_key_offset.to_usize() != 0 {
return Err(format!(
"expected first key offset to be 0 got {}",
first_key_offset.to_usize()
));
}
}
if let Some(last_key_offset) = self.key_offsets.last() {
if last_key_offset.to_usize() != self.key_data.len() {
return Err(format!(
"expected {} bytes of key data got {}",
last_key_offset,
self.key_data.len()
));
}
}
let val_data_size = self.val_offsets.len() * BYTES_PER_KEY_VAL_OFFSET + self.val_data.len();
if val_data_size > KEY_VAL_DATA_MAX_LEN {
return Err(format!(
"expected encoded val offsets and data size to be less than or equal to {} got {}",
KEY_VAL_DATA_MAX_LEN, val_data_size
));
}
if self.val_offsets.len() != self.len + 1 |
if let Some(first_val_offset) = self.val_offsets.first() {
if first_val_offset.to_usize() != 0 {
return Err(format!(
"expected first val offset to be 0 got {}",
first_val_offset.to_usize()
));
}
}
if let Some(last_val_offset) = self.val_offsets.last() {
if last_val_offset.to_usize() != self.val_data.len() {
return Err(format!(
"expected {} bytes of val data got {}",
last_val_offset,
self.val_data.len()
));
}
}
if self.diffs.len() != self.len {
return Err(format!(
"expected {} diffs got {}",
self.len,
self.diffs.len()
));
}
if self.timestamps.len() != self.len {
return Err(format!(
"expected {} timestamps got {}",
self.len,
self.timestamps.len()
));
}
// Unlike most of our Validate methods, this one is called in a
// production code path: when decoding a columnar batch. Only check the
// more expensive assertions in debug.
#[cfg(debug_assertions)]
{
let (mut prev_key, mut prev_val) = (0, 0);
for i in 0..=self.len {
let (key, val) = (self.key_offsets[i], self.val_offsets[i]);
if key < prev_key {
return Err(format!(
"expected non-decreasing key offsets got {} followed by {}",
prev_key, key
));
}
if val < prev_val {
return Err(format!(
"expected non-decreasing val offsets got {} followed by {}",
prev_val, val
));
}
prev_key = key;
prev_val = val;
}
}
Ok(())
}
/// Read the record at `idx`, if there is one.
///
/// Returns None if `idx >= self.len()`.
fn get(&self, idx: usize) -> Option<((&'a [u8], &'a [u8]), [u8; 8], [u8; 8])> {
if idx >= self.len {
return None;
}
// There used to be `debug_assert_eq!(self.validate(), Ok(()))`, but it
// resulted in accidentally O(n^2) behavior in debug mode. Instead, we
// push that responsibility to the ColumnarRecordsRef constructor.
let key_range = self.key_offsets[idx].to_usize()..self.key_offsets[idx + 1].to_usize();
let val_range = self.val_offsets[idx].to_usize()..self.val_offsets[idx + 1].to_usize();
let key = &self.key_data[key_range];
let val = &self.val_data[val_range];
let ts = i64::to_le_bytes(self.timestamps[idx]);
let diff = i64::to_le_bytes(self.diffs[idx]);
Some(((key, val), ts, diff))
}
/// Iterate through the records in Self.
fn iter(&self) -> ColumnarRecordsIter<'a> {
ColumnarRecordsIter {
idx: 0,
records: self.clone(),
}
}
}
/// An [Iterator] over the records in a [ColumnarRecords].
#[derive(Clone, Debug)]
pub struct ColumnarRecordsIter<'a> {
idx: usize,
records: ColumnarRecordsRef<'a>,
}
impl<'a> Iterator for ColumnarRecordsIter<'a> {
type Item = ((&'a [u8], &'a [u8]), [u8; 8], [u8; 8]);
fn size_hint(&self) -> (usize, Option<usize>) {
(self.records.len, Some(self.records.len))
}
fn next(&mut self) -> Option<Self::Item> {
let ret = self.records.get(self.idx);
self.idx += 1;
ret
}
}
impl<'a> ExactSizeIterator for ColumnarRecordsIter<'a> {}
/// An abstraction to incrementally add ((Key, Value), Time, i64) records
/// in a columnar representation, and eventually get back a [ColumnarRecords].
pub struct ColumnarRecordsBuilder {
len: usize,
key_data: Vec<u8>,
key_offsets: Vec<i32>,
val_data: Vec<u8>,
val_offsets: Vec<i32>,
timestamps: Vec<i64>,
diffs: Vec<i64>,
}
impl fmt::Debug for ColumnarRecordsBuilder {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&self.borrow(), fmt)
}
}
impl Default for ColumnarRecordsBuilder {
fn default() -> Self {
let mut ret = ColumnarRecordsBuilder {
len: 0,
key_data: Vec::new(),
key_offsets: Vec::new(),
val_data: Vec::new(),
val_offsets: Vec::new(),
timestamps: Vec::new(),
diffs: Vec::new(),
};
// Push initial 0 offsets to maintain our invariants, even as we build.
ret.key_offsets.push(0);
ret.val_offsets.push(0);
debug_assert_eq!(ret.borrow().validate(), Ok(()));
ret
}
}
impl ColumnarRecordsBuilder {
/// The number of (potentially duplicated) ((Key, Val), Time, i64) records
/// stored in Self.
pub fn len(&self) -> usize {
self.len
}
/// Borrow Self as a [ColumnarRecordsRef].
fn borrow<'a>(&'a self) -> ColumnarRecordsRef<'a> {
let ret = ColumnarRecordsRef {
len: self.len,
key_data: self.key_data.as_slice(),
key_offsets: self.key_offsets.as_slice(),
val_data: self.val_data.as_slice(),
val_offsets: self.val_offsets.as_slice(),
timestamps: self.timestamps.as_slice(),
diffs: self.diffs.as_slice(),
};
debug_assert_eq!(ret.validate(), Ok(()));
ret
}
/// Reserve space for `additional` more records, based on `key_size_guess` and
/// `val_size_guess`.
///
/// The guesses for key and val sizes are best effort, and if they end up being
/// too small, the underlying buffers will be resized.
pub fn reserve(&mut self, additional: usize, key_size_guess: usize, val_size_guess: usize) {
self.key_offsets.reserve(additional);
self.key_data
.reserve(cmp::min(additional * key_size_guess, KEY_VAL_DATA_MAX_LEN));
self.val_offsets.reserve(additional);
self.val_data
.reserve(cmp::min(additional * val_size_guess, KEY_VAL_DATA_MAX_LEN));
self.timestamps.reserve(additional);
self.diffs.reserve(additional);
debug_assert_eq!(self.borrow().validate(), Ok(()));
}
/// Reserve space for `additional` more records, with exact sizes for the key and value data.
pub fn reserve_exact(&mut self, additional: usize, key_bytes: usize, val_bytes: usize) {
self.key_offsets.reserve(additional);
self.key_data
.reserve(cmp::min(key_bytes, KEY_VAL_DATA_MAX_LEN));
self.val_offsets.reserve(additional);
self.val_data
.reserve(cmp::min(val_bytes, KEY_VAL_DATA_MAX_LEN));
self.timestamps.reserve(additional);
self.diffs.reserve(additional);
debug_assert_eq!(self.borrow().validate(), Ok(()));
}
/// Returns if the given key_offsets+key_data or val_offsets+val_data fits
/// in the limits imposed by ColumnarRecords.
///
/// Note that limit is always [KEY_VAL_DATA_MAX_LEN] in production. It's
/// only override-able here for testing.
pub fn can_fit(&self, key: &[u8], val: &[u8], limit: usize) -> bool {
let key_data_size = (self.key_offsets.len() + 1) * BYTES_PER_KEY_VAL_OFFSET
+ self.key_data.len()
+ key.len();
let val_data_size = (self.val_offsets.len() + 1) * BYTES_PER_KEY_VAL_OFFSET
+ self.val_data.len()
+ val.len();
key_data_size <= limit && val_data_size <= limit
}
/// Add a record to Self.
///
/// Returns whether the record was successfully added. A record will not a
/// added if it exceeds the size limitations of ColumnarBatch. This method
/// is atomic, if it fails, no partial data will have been added.
#[must_use]
pub fn push(&mut self, record: ((&[u8], &[u8]), [u8; 8], [u8; 8])) -> bool {
let ((key, val), ts, diff) = record;
// Check size invariants ahead of time so we stay atomic when we can't
// add the record.
if !self.can_fit(key, val, KEY_VAL_DATA_MAX_LEN) {
return false;
}
// NB: We should never hit the following expects because we check them
// above.
self.key_data.extend_from_slice(key);
self.key_offsets
.push(i32::try_from(self.key_data.len()).expect("key_data is smaller than 2GB"));
self.val_data.extend_from_slice(val);
self.val_offsets
.push(i32::try_from(self.val_data.len()).expect("val_data is smaller than 2GB"));
self.timestamps.push(i64::from_le_bytes(ts));
self.diffs.push(i64::from_le_bytes(diff));
self.len += 1;
true
}
/// Finalize constructing a [ColumnarRecords].
pub fn finish(self) -> ColumnarRecords {
let ret = ColumnarRecords {
len: self.len,
key_data: Buffer::from(self.key_data),
key_offsets: OffsetsBuffer::try_from(self.key_offsets)
.expect("constructed valid offsets"),
val_data: Buffer::from(self.val_data),
val_offsets: OffsetsBuffer::try_from(self.val_offsets)
.expect("constructed valid offsets"),
timestamps: Buffer::from(self.timestamps),
diffs: Buffer::from(self.diffs),
};
debug_assert_eq!(ret.borrow().validate(), Ok(()));
ret
}
/// Size of an update record as stored in the columnar representation
pub fn columnar_record_size(key_bytes_len: usize, value_bytes_len: usize) -> usize {
(key_bytes_len + BYTES_PER_KEY_VAL_OFFSET)
+ (value_bytes_len + BYTES_PER_KEY_VAL_OFFSET)
+ (2 * size_of::<u64>()) // T and D
}
}
#[cfg(test)]
mod tests {
use mz_persist_types::Codec64;
use super::*;
/// Smoke test some edge cases around empty sets of records and empty keys/vals
///
/// Most of this functionality is also well-exercised in other unit tests as well.
#[mz_ore::test]
fn columnar_records() {
let builder = ColumnarRecordsBuilder::default();
// Empty builder.
let records = builder.finish();
let reads: Vec<_> = records.iter().collect();
assert_eq!(reads, vec![]);
// Empty key and val.
let updates: Vec<((Vec<u8>, Vec<u8>), u64, i64)> = vec![
(("".into(), "".into()), 0, 0),
(("".into(), "".into()), 1, 1),
];
let mut builder = ColumnarRecordsBuilder::default();
for ((key, val), time, diff) in updates.iter() {
assert!(builder.push(((key, val), u64::encode(time), i64::encode(diff))));
}
let records = builder.finish();
let reads: Vec<_> = records
.iter()
.map(|((k, v), t, d)| ((k.to_vec(), v.to_vec()), u64::decode(t), i64::decode(d)))
.collect();
assert_eq!(reads, updates);
}
}
| {
return Err(format!(
"expected {} val_offsets got {}",
self.len + 1,
self.val_offsets.len()
));
} | conditional_block |
Transformer_prac.py | import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import matplotlib as plt
dtype = torch.FloatTensor
sentence =['ich mochte ein bier P', 'S i want a beer','i want a beer E']
# S : Symbol that shows starting point of decoding input
# E : Symbol that shows starting of decoding output
# P : Symbol that will fill in blank sequence if current batch data size is short than time steps
# Transformer parameter
src_voca = {'P':0, 'ich':1, 'mochte':2, 'ein':3, 'bier':4} # ๋์
๋๋ฆฌ , P๋ padding = 0
src_voca_size = len(src_voca)
tgt_voca = {'P':0, 'i':1, 'want':2, 'a':3, 'beer':4, 'S':5, 'E':6}
number_dict = {i:w for i, w in enumerate(tgt_voca)}
tgt_voca_size = len(tgt_voca)
src_len = 5
tgt_len = 5
d_model = 512 # ์๋ฒ ๋ฉ ์ฌ์ด์ฆ
d_ff = 2048 # feedforward dimension
n_layers = 6 # encoder decoder ์ธต ๊ฐ์
n_head = 8 # multi-head attention ใ
ํค๋ ๊ฐ์
d_k = d_v = 64 # K = Q(๊ฐ์์ผ ํ๋ค) ๋๋ฉ์
๊ฐ์ ,V
def make_batch(sentence):
input_batch = [[src_voca[n] for n in sentence[0].split()]] # list๋ก ๋ง๋ฆ
output_batch = [[tgt_voca[n] for n in sentence[1].split()]]
target_batch = [[tgt_voca[n] for n in sentence[2].split()]]
return Variable(torch.LongTensor(input_batch)), Variable(torch.LongTensor(output_batch)), Variable(torch.LongTensor(target_batch))
# Variable = autograd : ๋ํดํธ requires_grad = False, tensor๋ก ์ ์๋ ๋ชจ๋ API๋ฅผ ์ง์ํ๋ค
# x = Variable(torch.ones(2,2), requries_grad = True) ์ผ๋
# ๋ชจ๋ธ ํ๋ผ๋ฏธํฐ x๋ฅผ ํ์ตํ๊ธฐ์ํด lossํจ์๋ก ๊ณ์ฐ๋ loss๋ฅผ ์ ์ฅํ๊ธฐ ์ํด variable loss์ฌ์ฉ
# โloss/โx๋ฅผ ๊ณ์ฐํ๋ loss.backward๋ฅผ ํธ์ถํ๋ฉด pytorch๋ x ๋ณ์์ gradient๋ฅผ ์ ์ฅ
# requries_grad๋ ๋ณ์ x๊ฐ ํ์ต๊ฐ๋ฅํ์ง ๋ฅผ ๋ํ๋! ์ฆ, ์์๊บผ๋ ํ์ต๋ถ๊ฐ
def get_sinusoid_encoding_table(n_position, d_model): # positonal encoding
def cal_angle(position, hid_idx):
return position/np.power(10000, 2*(hid_idx // 2)/d_model) # 10000^(2i/d_model)
def get_posi_angle_vec(position):
return [cal_angle(position, hid_j) for hid_j in range(d_model)] # hid_j๋ 0-d_model๊น์ง
sinusoid_table = np.array([get_posi_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:,0::2] = np.sin(sinusoid_table[:,0::2])
# x[startpoint:endpoint:skip] ์์์ ๋ถํฐ skip์ ์ฐจ์ด์ฉ ๋์ฐ๋ฉด์ ํํ๋จ
# ex) l = range(20)
# l[1::3] = [1,4,7,10,13,16,19] ์ด๋ฐ์์ผ๋ก ํํ๋จ
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:,1::2])
return torch.FloatTensor(sinusoid_table)
def get_attn_pad_mask(seq_q, seq_k):
batch_size, len_q = seq_q.size()
batch_size, len_k = seq_k.size()
pad_attn_mask = seq_k.data.eq(0).unsqueeze(1) # eq : element-wise equality
# x = torch.tensor([1,2,3,4]) # dim = 1
# torch.unsqueeze(x,0) = tensor([[1,2,3,4]])
# torch.unsqueeze(x,1) = tnesor([[1],
# [2],
# [3],
# [4]])
return pad_attn_mask.expand(batch_size, len_q, len_k)
# x = torch.tensor([[1],[2],[3]])
# x.size() = torch.size([3,1])
# x.expand(3,4) = tensor([1,1,1,1],[2,2,2,2],[3,3,3,3])
# x.expand(-1,4) = tensor([1,1,1,1],[2,2,2,2],[3,3,3,3]) # -1์ ์ฌ์ด์ฆ๊ฐ ๋ณํ์ง ์๋๋ค๋ ๋ป
def get_attn_subsequent_mask(seq):
attn_shape = [seq.size(0), seq.size(1), seq.size(1)]
subsequent_mask = np.triu(np.ones(attn_shape), k=1) # k ๋ฒ์งธ diagonal์ 0์ผ๋ก ๋ง๋ ๋ค ๋๋จธ์ง๋ 1
# np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) ์ผ๋
# array([[ 1, 2, 3],
# [ 4, 5, 6],
# [ 0, 8, 9],
# [ 0, 0, 12]]) ์ด๋ฐ์์ผ๋ก ํํ๋๋ค
subsequent_mask = torch.from_numpy(subsequent_mask).byte() # numpy์์ torch๋ก ํ
์ ๋ฒ์ ์ ๋ฐ๊พผ๋ค
return subsequent_mask
class ScaledDotProduct(nn.Module):
def __init__(self):
super(ScaledDotProduct,self).__init__()
self.softmax = nn.Softmax(dim = -1) # softmax์ dim? ์ํํธ๋งฅ์ค๊ณ์ฐ๋๋ ๋๋ฉ์
# NLLLoss ์๋ Logsoftmax๋ฅผ ์ฌ์ฉํ๋ค
self.const = np.sqrt(d_k) # d_k๋?
def forward(self, Q, K, V, att_mask): # att_mask๋
score = torch.matmul(Q,K.transpose(-1,-2))/self.const # tranpose: ์ฃผ์ด์ง dim0๊ณผ dim1์ด ์๋ก ๋ฐ๊ฟ๋ค
score.masked_fill_(att_mask, -1e9) # masked!
# masked_fill_(mask, value) mask๋ boolean์ผ๋ก, ๋ง์คํฌ๊ฐ true์ธ ๊ณณ์ value๋ฅผ ์ฑ์
attn = self.softmax(score) # attn = attention distribution
context = torch.matmul(attn, V)
return context, attn
############################################################
# self ๋ ๋ฌด์์ธ๊ฐ?
# class Foo:
# def func1(): # ์ธ์๊ฐ self๊ฐ ์๋์ด๋ ์ค๋ฅ๋ ๋์ง ์๋๋ค
# print("fuckck")
# def func2(self):
# print("fuck!!")
# f = Foo() # ํด๋น ํด๋์ค์ ๋ํ ์ธ์คํด์ค ์์ฑ
# f.func2()=> function 2๊ฐ ์ ์์ ์ผ๋ก ํ๋ฆฐํธ ๋๋ค # ์ธ์คํด์ค ๋ฉ์๋ ํธ์ถ -> func2์ ๋ฉ์๋์ธ์๋ self๋ฟ์ด๋ฏ๋ก ์ธํ ํ์์๋ค
# ๋ฉ์๋์ธ func2์ ์ธ์ self์ ๋ํ ๊ฐ์ ํ์ด์ฌ์ด ์๋์ผ๋ก ๋๊ฒจ์ฃผ๊ธฐ ๋๋ฌธ์ ์ธํํ์์๋ค
# f.func1() -> ์๋ฌ๊ฐ ๋๋ค self ์ธ์๋ ์์ง๋ง ํ์ด์ฌ์ด ์๋์ผ๋ก ๊ฐ์ ์ ๋ฌํ๊ธฐ ๋๋ฌธ์ ๋ฐ์
# class ๋ด์ self๋ ํด๋์ค ์์ฒด๋ฅผ ๋ํ๋ด๋ ์ธ์คํด์ค์ด๋ค!
############################################################
class MultiHeadAttention(nn.Module):
def __init__(self):
super(MultiHeadAttention, self).__init__() # d_v = d_k
self.W_Q = nn.Linear(d_model, d_k * n_head) # n_head ๋ฒ ๋ณ๋ ฌ์ํ # concat์ ํ๊ธฐ ๋๋ฌธ์ d_k x n_head ์ด๋ค
self.W_K = nn.Linear(d_model, d_k * n_head) #
self.W_V = nn.Linear(d_model, d_k * n_head)
def forward(self,Q, K, V, att_mask): # ์ธ์ฝ๋๋ QKV๊ฐ ๋ค๋๊ฐ๊ณ , ๋์ฝ๋๋ KV๋ ๊ฐ๊ตฌ Q๋ ๋ค๋ฅด๋ค
residual = Q
batch_size = Q.size(0)
q_s = self.W_Q(Q).view(batch_size, -1, n_head, d_k).transpose(1,2)
k_s = self.W_K(K).view(batch_size, -1, n_head, d_k).transpose(1,2)
v_s = self.W_V(V).view(batch_size, -1, n_head, d_v).transpose(1,2)
att_mask = att_mask.unsqueeze(1).repeat(1, n_head, 1,1) # unsqueeze(1)์ col๋ก ๋ณํ
context, attn = ScaledDotProduct()(q_s, k_s, v_s, att_mask)
context = context.transpose(1,2).contiguous().view(batch_size, -1, n_head * d_v)
# contiguous[์ธ์ ํ]() : self ํ
์์ ๊ฐ์ data๋ฅผ ๊ฐ์ง๊ณ ์๋ contiguous ํ
์๋ฅผ ๋ฆฌํด
# ํ
์์ ์ด์ด๋ ํ์ ์ญ์ (?)
output = nn.Linear(n_head*d_v, d_model)(context) # ์ฝ์บฃ๋ ์ ๋ฅผ ํ๋ฒ ๋ ๊ฐ์ค์น ํ๋ ฌ์ ํต๊ณผ์ํต๋๋ค
return nn.LayerNorm(output + residual), attn
class PositionwiseFFNN(nn.Module):
def __init__(self):
super(PositionwiseFFNN, self).__init__() # conv1d ๋ ๋ฌด์์ธ๊ฐ 2d์ ๋ญ๊ฐ ๋ค๋ฅธ๊ฐ...
# W1 = d_model x d_ff
self.linear1 = nn.Conv1d(in_channels = d_model, out_channels = d_ff, kernel_size=1)
# W2 = d_ff x d_model
self.linear2 = nn.Conv1d(in_channels = d_ff, out_channels = d_model, kernel_size=1)
self.relu = nn.ReLU()
def forward(self, input):
residual = input
output = self.linear1(input.transpose(1,2))
output = self.relu(output)
output = self.linear2(output).transpose(1,2)
return nn.LayerNorm(d_model)(output + residual)
class EncoderLayer(nn.Module):
def __init__(self):
super(EncoderLayer,self).__init__()
self.enc_self_attn = MultiHeadAttention()
self.PWfeedforward = PositionwiseFFNN()
def forward(self, enc_input, enc_self_attn_mask):
enc_output, attn = self.enc_self_attn(enc_input, enc_input, enc_input, enc_self_attn_mask)
enc_output = self.PWfeedforward(enc_output)
return enc_output, attn
class Encoder(nn.Module):
def __init__(self):
super(Encoder,self).__init__()
self.src_emb = nn.Embedding(src_voca_size, d_model)
# Embedding : ์๋ฒ ๋ฉ์ ํ๊ธฐ์ํ table์ด ์๋ค
self.pos_emb = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(src_len+1, d_model),freeze = True)
self.layer = nn.ModuleList([EncoderLayer() for _ in range(n_layers)])
def forward(self, enc_input):
enc_output = self.src_emb(enc_input)+self.pos_emb(torch.LongTensor([[1,2,3,4,0]]))
enc_self_attn_mask = get_attn_pad_mask(enc_input, enc_input)
enc_self_attns = []
for layer in self.layer:
enc_output, enc_self_attn = layer(enc_output, enc_self_attn_mask)
enc_self_attns.append(enc_self_attn) # append = concat ๊ฐ์ ๋๋
return enc_output, enc_self_attns
class DecoderLayer(nn.Module):
def __init__(self):
super(DecoderLayer, self).__init__()
self.dec_self_attn = MultiHeadAttention()
self.dec_enc_attn = MultiHeadAttention()
self.PWfeedforward = PositionwiseFFNN()
def forward(self, dec_input, enc_output, dec_self_attn_mask, dec_enc_attn_mask):
dec_output, dec_self_attn = self.dec_self_attn(dec_input, dec_input, dec_input, dec_self_attn_mask)
dec_output, dec_end_attn = self.dec_enc_attn(dec_output, enc_output, enc_output, dec_enc_attn_mask)
dec_output = self.PWfeedforward(dec_output)
return dec_output, dec_self_attn, dec_end_attn
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.tgt_emb = nn.Embedding(tgt_voca_size, d_model)
self.pos_emb = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(tgt_len+1, d_model), freeze = True)
self.layers = nn.ModuleList([DecoderLayer() for _ in range(n_layers)])
def forward(self, dec_input, enc_input, enc_output):
dec_output = self.tgt_emb(dec_input)+pos_emb(torch.LongTensor([5,1,2,3,4]))
dec_self_attn_pad_mask = get_attn_pad_mask(dec_input, dec_input)
dec_self_attn_subsequent_mask = get_attn_subsequent_mask(dec_input)
dec_self_attn_mask = torch.gt((dec_self_attn_pad_mask+dec_self_attn_subsequent_mask),0)
dec_enc_attn_mask = get_attn_pad_mask(dec_input, enc_input)
dec_self_attn_mask = get_attn_pad_mask(dec_input, enc_input)
dec_self_attn, dec_enc_attn = [],[]
for layer in self.layers:
dec_output, dec_self_attn, dec_enc_attn = layer(dec_outp | .contiguous().view(-1))
print('Epoch:','%04d'%(epoch+1), 'cost = '.format(loss))
loss.backward()
optimizer.step() | ut, enc_output, dec_self_attn_mask, dec_enc_attn_mask)
dec_self_attn.append(dec_self_attn)
dec_enc_attn.append(dec_enc_attn)
return dec_output, dec_self_attn, dec_enc_attn, dec_enc_attn
class Transformer(nn.Module):
def __init__(self):
super(Transformer, self).__init__()
self.encoder = Encoder()
self.decoder = Decoder()
self.projection = nn.Linear(d_model, tgt_voca_size, bias = False)
self.softmax = nn.Softmax()
def forward(self, enc_input, dec_input):
enc_output, enc_self_attn = self.encoder(enc_input)
dec_output, dec_self_attn, dec_enc_attn = self.decoder(dec_input, enc_input, enc_output)
dec_logit = self.protjection(dec_output)
return dec_logit.view(-1, dec_logit.size(-1)), enc_self_attn, dec_self_attn, dec_enc_attn
model = Transformer()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr = 0.001)
for epoch in range(20):
optimizer.zero_grad()
enc_input, dec_input, target_batch = make_batch(sentence)
outputs, enc_self_attns, dec_self_attns, dec_enc_attns = model(enc_input, dec_input)
loss = criterion(outputs, target_batch | identifier_body |
Transformer_prac.py | import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import matplotlib as plt
dtype = torch.FloatTensor
sentence =['ich mochte ein bier P', 'S i want a beer','i want a beer E']
# S : Symbol that shows starting point of decoding input
# E : Symbol that shows starting of decoding output
# P : Symbol that will fill in blank sequence if current batch data size is short than time steps
# Transformer parameter
src_voca = {'P':0, 'ich':1, 'mochte':2, 'ein':3, 'bier':4} # ๋์
๋๋ฆฌ , P๋ padding = 0
src_voca_size = len(src_voca)
tgt_voca = {'P':0, 'i':1, 'want':2, 'a':3, 'beer':4, 'S':5, 'E':6}
number_dict = {i:w for i, w in enumerate(tgt_voca)}
tgt_voca_size = len(tgt_voca)
src_len = 5
tgt_len = 5
d_model = 512 # ์๋ฒ ๋ฉ ์ฌ์ด์ฆ
d_ff = 2048 # feedforward dimension
n_layers = 6 # encoder decoder ์ธต ๊ฐ์
n_head = 8 # multi-head attention ใ
ํค๋ ๊ฐ์
d_k = d_v = 64 # K = Q(๊ฐ์์ผ ํ๋ค) ๋๋ฉ์
๊ฐ์ ,V
def make_batch(sentence):
input_batch = [[src_voca[n] for n in sentence[0].split()]] # list๋ก ๋ง๋ฆ
output_batch = [[tgt_voca[n] for n in sentence[1].split()]]
target_batch = [[tgt_voca[n] for n in sentence[2].split()]]
return Variable(torch.LongTensor(input_batch)), Variable(torch.LongTensor(output_batch)), Variable(torch.LongTensor(target_batch))
# Variable = autograd : ๋ํดํธ requires_grad = False, tensor๋ก ์ ์๋ ๋ชจ๋ API๋ฅผ ์ง์ํ๋ค
# x = Variable(torch.ones(2,2), requries_grad = True) ์ผ๋
# ๋ชจ๋ธ ํ๋ผ๋ฏธํฐ x๋ฅผ ํ์ตํ๊ธฐ์ํด lossํจ์๋ก ๊ณ์ฐ๋ loss๋ฅผ ์ ์ฅํ๊ธฐ ์ํด variable loss์ฌ์ฉ
# โloss/โx๋ฅผ ๊ณ์ฐํ๋ loss.backward๋ฅผ ํธ์ถํ๋ฉด pytorch๋ x ๋ณ์์ gradient๋ฅผ ์ ์ฅ
# requries_grad๋ ๋ณ์ x๊ฐ ํ์ต๊ฐ๋ฅํ์ง ๋ฅผ ๋ํ๋! ์ฆ, ์์๊บผ๋ ํ์ต๋ถ๊ฐ
def get_sinusoid_encoding_table(n_position, d_model): # positonal encoding
def cal_angle(position, hid_idx):
return position/np.power(10000, 2*(hid_idx // 2)/d_model) # 10000^(2i/d_model)
def get_posi_angle_vec(position):
return [cal_angle(position, hid_j) for hid_j in range(d_model)] # hid_j๋ 0-d_model๊น์ง
sinusoid_table = np.array([get_posi_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:,0::2] = np.sin(sinusoid_table[:,0::2])
# x[startpoint:endpoint:skip] ์์์ ๋ถํฐ skip์ ์ฐจ์ด์ฉ ๋์ฐ๋ฉด์ ํํ๋จ
# ex) l = range(20)
# l[1::3] = [1,4,7,10,13,16,19] ์ด๋ฐ์์ผ๋ก ํํ๋จ
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:,1::2])
return torch.FloatTensor(sinusoid_table)
def get_attn_pad_mask(seq_q, seq_k):
batch_size, len_q = seq_q.size()
batch_size, len_k = seq_k.size()
pad_attn_mask = seq_k.data.eq(0).unsqueeze(1) # eq : element-wise equality
# x = torch.tensor([1,2,3,4]) # dim = 1
# torch.unsqueeze(x,0) = tensor([[1,2,3,4]])
# torch.unsqueeze(x,1) = tnesor([[1],
# [2],
# [3],
# [4]])
return pad_attn_mask.expand(batch_size, len_q, len_k)
# x = torch.tensor([[1],[2],[3]])
# x.size() = torch.size([3,1])
# x.expand(3,4) = tensor([1,1,1,1],[2,2,2,2],[3,3,3,3])
# x.expand(-1,4) = tensor([1,1,1,1],[2,2,2,2],[3,3,3,3]) # -1์ ์ฌ์ด์ฆ๊ฐ ๋ณํ์ง ์๋๋ค๋ ๋ป
def get_attn_subsequent_mask(seq):
attn_shape = [seq.size(0), seq.size(1), seq.size(1)]
subsequent_mask = np.triu(np.ones(attn_shape), k=1) # k ๋ฒ์งธ diagonal์ 0์ผ๋ก ๋ง๋ ๋ค ๋๋จธ์ง๋ 1
# np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) ์ผ๋
# array([[ 1, 2, 3],
# [ 4, 5, 6],
# [ 0, 8, 9],
# [ 0, 0, 12]]) ์ด๋ฐ์์ผ๋ก ํํ๋๋ค
subsequent_mask = torch.from_numpy(subsequent_mask).byte() # numpy์์ torch๋ก ํ
์ ๋ฒ์ ์ ๋ฐ๊พผ๋ค
return subsequent_mask
class ScaledDotProduct(nn.Module):
def __init__(self):
super(ScaledDotProduct,self).__init__()
self.softmax = nn.Softmax(dim = -1) # softmax์ dim? ์ํํธ๋งฅ์ค๊ณ์ฐ๋๋ ๋๋ฉ์
# NLLLoss ์๋ Logsoftmax๋ฅผ ์ฌ์ฉํ๋ค
self.const = np.sqrt(d_k) # d_k๋?
def forward(self, Q, K, V, att_mask): # att_mask๋
score = torch.matmul(Q,K.transpose(-1,-2))/self.const # tr | ๊ณผ dim1์ด ์๋ก ๋ฐ๊ฟ๋ค
score.masked_fill_(att_mask, -1e9) # masked!
# masked_fill_(mask, value) mask๋ boolean์ผ๋ก, ๋ง์คํฌ๊ฐ true์ธ ๊ณณ์ value๋ฅผ ์ฑ์
attn = self.softmax(score) # attn = attention distribution
context = torch.matmul(attn, V)
return context, attn
############################################################
# self ๋ ๋ฌด์์ธ๊ฐ?
# class Foo:
# def func1(): # ์ธ์๊ฐ self๊ฐ ์๋์ด๋ ์ค๋ฅ๋ ๋์ง ์๋๋ค
# print("fuckck")
# def func2(self):
# print("fuck!!")
# f = Foo() # ํด๋น ํด๋์ค์ ๋ํ ์ธ์คํด์ค ์์ฑ
# f.func2()=> function 2๊ฐ ์ ์์ ์ผ๋ก ํ๋ฆฐํธ ๋๋ค # ์ธ์คํด์ค ๋ฉ์๋ ํธ์ถ -> func2์ ๋ฉ์๋์ธ์๋ self๋ฟ์ด๋ฏ๋ก ์ธํ ํ์์๋ค
# ๋ฉ์๋์ธ func2์ ์ธ์ self์ ๋ํ ๊ฐ์ ํ์ด์ฌ์ด ์๋์ผ๋ก ๋๊ฒจ์ฃผ๊ธฐ ๋๋ฌธ์ ์ธํํ์์๋ค
# f.func1() -> ์๋ฌ๊ฐ ๋๋ค self ์ธ์๋ ์์ง๋ง ํ์ด์ฌ์ด ์๋์ผ๋ก ๊ฐ์ ์ ๋ฌํ๊ธฐ ๋๋ฌธ์ ๋ฐ์
# class ๋ด์ self๋ ํด๋์ค ์์ฒด๋ฅผ ๋ํ๋ด๋ ์ธ์คํด์ค์ด๋ค!
############################################################
class MultiHeadAttention(nn.Module):
def __init__(self):
super(MultiHeadAttention, self).__init__() # d_v = d_k
self.W_Q = nn.Linear(d_model, d_k * n_head) # n_head ๋ฒ ๋ณ๋ ฌ์ํ # concat์ ํ๊ธฐ ๋๋ฌธ์ d_k x n_head ์ด๋ค
self.W_K = nn.Linear(d_model, d_k * n_head) #
self.W_V = nn.Linear(d_model, d_k * n_head)
def forward(self,Q, K, V, att_mask): # ์ธ์ฝ๋๋ QKV๊ฐ ๋ค๋๊ฐ๊ณ , ๋์ฝ๋๋ KV๋ ๊ฐ๊ตฌ Q๋ ๋ค๋ฅด๋ค
residual = Q
batch_size = Q.size(0)
q_s = self.W_Q(Q).view(batch_size, -1, n_head, d_k).transpose(1,2)
k_s = self.W_K(K).view(batch_size, -1, n_head, d_k).transpose(1,2)
v_s = self.W_V(V).view(batch_size, -1, n_head, d_v).transpose(1,2)
att_mask = att_mask.unsqueeze(1).repeat(1, n_head, 1,1) # unsqueeze(1)์ col๋ก ๋ณํ
context, attn = ScaledDotProduct()(q_s, k_s, v_s, att_mask)
context = context.transpose(1,2).contiguous().view(batch_size, -1, n_head * d_v)
# contiguous[์ธ์ ํ]() : self ํ
์์ ๊ฐ์ data๋ฅผ ๊ฐ์ง๊ณ ์๋ contiguous ํ
์๋ฅผ ๋ฆฌํด
# ํ
์์ ์ด์ด๋ ํ์ ์ญ์ (?)
output = nn.Linear(n_head*d_v, d_model)(context) # ์ฝ์บฃ๋ ์ ๋ฅผ ํ๋ฒ ๋ ๊ฐ์ค์น ํ๋ ฌ์ ํต๊ณผ์ํต๋๋ค
return nn.LayerNorm(output + residual), attn
class PositionwiseFFNN(nn.Module):
def __init__(self):
super(PositionwiseFFNN, self).__init__() # conv1d ๋ ๋ฌด์์ธ๊ฐ 2d์ ๋ญ๊ฐ ๋ค๋ฅธ๊ฐ...
# W1 = d_model x d_ff
self.linear1 = nn.Conv1d(in_channels = d_model, out_channels = d_ff, kernel_size=1)
# W2 = d_ff x d_model
self.linear2 = nn.Conv1d(in_channels = d_ff, out_channels = d_model, kernel_size=1)
self.relu = nn.ReLU()
def forward(self, input):
residual = input
output = self.linear1(input.transpose(1,2))
output = self.relu(output)
output = self.linear2(output).transpose(1,2)
return nn.LayerNorm(d_model)(output + residual)
class EncoderLayer(nn.Module):
def __init__(self):
super(EncoderLayer,self).__init__()
self.enc_self_attn = MultiHeadAttention()
self.PWfeedforward = PositionwiseFFNN()
def forward(self, enc_input, enc_self_attn_mask):
enc_output, attn = self.enc_self_attn(enc_input, enc_input, enc_input, enc_self_attn_mask)
enc_output = self.PWfeedforward(enc_output)
return enc_output, attn
class Encoder(nn.Module):
def __init__(self):
super(Encoder,self).__init__()
self.src_emb = nn.Embedding(src_voca_size, d_model)
# Embedding : ์๋ฒ ๋ฉ์ ํ๊ธฐ์ํ table์ด ์๋ค
self.pos_emb = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(src_len+1, d_model),freeze = True)
self.layer = nn.ModuleList([EncoderLayer() for _ in range(n_layers)])
def forward(self, enc_input):
enc_output = self.src_emb(enc_input)+self.pos_emb(torch.LongTensor([[1,2,3,4,0]]))
enc_self_attn_mask = get_attn_pad_mask(enc_input, enc_input)
enc_self_attns = []
for layer in self.layer:
enc_output, enc_self_attn = layer(enc_output, enc_self_attn_mask)
enc_self_attns.append(enc_self_attn) # append = concat ๊ฐ์ ๋๋
return enc_output, enc_self_attns
class DecoderLayer(nn.Module):
def __init__(self):
super(DecoderLayer, self).__init__()
self.dec_self_attn = MultiHeadAttention()
self.dec_enc_attn = MultiHeadAttention()
self.PWfeedforward = PositionwiseFFNN()
def forward(self, dec_input, enc_output, dec_self_attn_mask, dec_enc_attn_mask):
dec_output, dec_self_attn = self.dec_self_attn(dec_input, dec_input, dec_input, dec_self_attn_mask)
dec_output, dec_end_attn = self.dec_enc_attn(dec_output, enc_output, enc_output, dec_enc_attn_mask)
dec_output = self.PWfeedforward(dec_output)
return dec_output, dec_self_attn, dec_end_attn
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.tgt_emb = nn.Embedding(tgt_voca_size, d_model)
self.pos_emb = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(tgt_len+1, d_model), freeze = True)
self.layers = nn.ModuleList([DecoderLayer() for _ in range(n_layers)])
def forward(self, dec_input, enc_input, enc_output):
dec_output = self.tgt_emb(dec_input)+pos_emb(torch.LongTensor([5,1,2,3,4]))
dec_self_attn_pad_mask = get_attn_pad_mask(dec_input, dec_input)
dec_self_attn_subsequent_mask = get_attn_subsequent_mask(dec_input)
dec_self_attn_mask = torch.gt((dec_self_attn_pad_mask+dec_self_attn_subsequent_mask),0)
dec_enc_attn_mask = get_attn_pad_mask(dec_input, enc_input)
dec_self_attn_mask = get_attn_pad_mask(dec_input, enc_input)
dec_self_attn, dec_enc_attn = [],[]
for layer in self.layers:
dec_output, dec_self_attn, dec_enc_attn = layer(dec_output, enc_output, dec_self_attn_mask, dec_enc_attn_mask)
dec_self_attn.append(dec_self_attn)
dec_enc_attn.append(dec_enc_attn)
return dec_output, dec_self_attn, dec_enc_attn, dec_enc_attn
class Transformer(nn.Module):
def __init__(self):
super(Transformer, self).__init__()
self.encoder = Encoder()
self.decoder = Decoder()
self.projection = nn.Linear(d_model, tgt_voca_size, bias = False)
self.softmax = nn.Softmax()
def forward(self, enc_input, dec_input):
enc_output, enc_self_attn = self.encoder(enc_input)
dec_output, dec_self_attn, dec_enc_attn = self.decoder(dec_input, enc_input, enc_output)
dec_logit = self.protjection(dec_output)
return dec_logit.view(-1, dec_logit.size(-1)), enc_self_attn, dec_self_attn, dec_enc_attn
model = Transformer()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr = 0.001)
for epoch in range(20):
optimizer.zero_grad()
enc_input, dec_input, target_batch = make_batch(sentence)
outputs, enc_self_attns, dec_self_attns, dec_enc_attns = model(enc_input, dec_input)
loss = criterion(outputs, target_batch.contiguous().view(-1))
print('Epoch:','%04d'%(epoch+1), 'cost = '.format(loss))
loss.backward()
optimizer.step() | anpose: ์ฃผ์ด์ง dim0 | identifier_name |
Transformer_prac.py | import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import matplotlib as plt
dtype = torch.FloatTensor
sentence =['ich mochte ein bier P', 'S i want a beer','i want a beer E']
# S : Symbol that shows starting point of decoding input
# E : Symbol that shows starting of decoding output
# P : Symbol that will fill in blank sequence if current batch data size is short than time steps
# Transformer parameter
src_voca = {'P':0, 'ich':1, 'mochte':2, 'ein':3, 'bier':4} # ๋์
๋๋ฆฌ , P๋ padding = 0
src_voca_size = len(src_voca)
tgt_voca = {'P':0, 'i':1, 'want':2, 'a':3, 'beer':4, 'S':5, 'E':6}
number_dict = {i:w for i, w in enumerate(tgt_voca)}
tgt_voca_size = len(tgt_voca)
src_len = 5
tgt_len = 5
d_model = 512 # ์๋ฒ ๋ฉ ์ฌ์ด์ฆ
d_ff = 2048 # feedforward dimension
n_layers = 6 # encoder decoder ์ธต ๊ฐ์
n_head = 8 # multi-head attention ใ
ํค๋ ๊ฐ์
d_k = d_v = 64 # K = Q(๊ฐ์์ผ ํ๋ค) ๋๋ฉ์
๊ฐ์ ,V
def make_batch(sentence):
input_batch = [[src_voca[n] for n in sentence[0].split()]] # list๋ก ๋ง๋ฆ
output_batch = [[tgt_voca[n] for n in sentence[1].split()]]
target_batch = [[tgt_voca[n] for n in sentence[2].split()]]
return Variable(torch.LongTensor(input_batch)), Variable(torch.LongTensor(output_batch)), Variable(torch.LongTensor(target_batch))
# Variable = autograd : ๋ํดํธ requires_grad = False, tensor๋ก ์ ์๋ ๋ชจ๋ API๋ฅผ ์ง์ํ๋ค
# x = Variable(torch.ones(2,2), requries_grad = True) ์ผ๋
# ๋ชจ๋ธ ํ๋ผ๋ฏธํฐ x๋ฅผ ํ์ตํ๊ธฐ์ํด lossํจ์๋ก ๊ณ์ฐ๋ loss๋ฅผ ์ ์ฅํ๊ธฐ ์ํด variable loss์ฌ์ฉ
# โloss/โx๋ฅผ ๊ณ์ฐํ๋ loss.backward๋ฅผ ํธ์ถํ๋ฉด pytorch๋ x ๋ณ์์ gradient๋ฅผ ์ ์ฅ
# requries_grad๋ ๋ณ์ x๊ฐ ํ์ต๊ฐ๋ฅํ์ง ๋ฅผ ๋ํ๋! ์ฆ, ์์๊บผ๋ ํ์ต๋ถ๊ฐ
def get_sinusoid_encoding_table(n_position, d_model): # positonal encoding
def cal_angle(position, hid_idx):
return position/np.power(10000, 2*(hid_idx // 2)/d_model) # 10000^(2i/d_model)
def get_posi_angle_vec(position):
return [cal_angle(position, hid_j) for hid_j in range(d_model)] # hid_j๋ 0-d_model๊น์ง
sinusoid_table = np.array([get_posi_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:,0::2] = np.sin(sinusoid_table[:,0::2])
# x[startpoint:endpoint:skip] ์์์ ๋ถํฐ skip์ ์ฐจ์ด์ฉ ๋์ฐ๋ฉด์ ํํ๋จ
# ex) l = range(20)
# l[1::3] = [1,4,7,10,13,16,19] ์ด๋ฐ์์ผ๋ก ํํ๋จ
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:,1::2])
return torch.FloatTensor(sinusoid_table)
def get_attn_pad_mask(seq_q, seq_k):
batch_size, len_q = seq_q.size()
batch_size, len_k = seq_k.size()
pad_attn_mask = seq_k.data.eq(0).unsqueeze(1) # eq : element-wise equality
# x = torch.tensor([1,2,3,4]) # dim = 1
# torch.unsqueeze(x,0) = tensor([[1,2,3,4]])
# torch.unsqueeze(x,1) = tnesor([[1],
# [2],
# [3],
# [4]])
return pad_attn_mask.expand(batch_size, len_q, len_k)
# x = torch.tensor([[1],[2],[3]])
# x.size() = torch.size([3,1])
# x.expand(3,4) = tensor([1,1,1,1],[2,2,2,2],[3,3,3,3])
# x.expand(-1,4) = tensor([1,1,1,1],[2,2,2,2],[3,3,3,3]) # -1์ ์ฌ์ด์ฆ๊ฐ ๋ณํ์ง ์๋๋ค๋ ๋ป
def get_attn_subsequent_mask(seq):
attn_shape = [seq.size(0), seq.size(1), seq.size(1)]
subsequent_mask = np.triu(np.ones(attn_shape), k=1) # k ๋ฒ์งธ diagonal์ 0์ผ๋ก ๋ง๋ ๋ค ๋๋จธ์ง๋ 1
# np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) ์ผ๋
# array([[ 1, 2, 3],
# [ 4, 5, 6],
# [ 0, 8, 9],
# [ 0, 0, 12]]) ์ด๋ฐ์์ผ๋ก ํํ๋๋ค
subsequent_mask = torch.from_numpy(subsequent_mask).byte() # numpy์์ torch๋ก ํ
์ ๋ฒ์ ์ ๋ฐ๊พผ๋ค
return subsequent_mask
class ScaledDotProduct(nn.Module):
def __init__(self):
super(ScaledDotProduct,self).__init__()
self.softmax = nn.Softmax(dim = -1) # softmax์ dim? ์ํํธ๋งฅ์ค๊ณ์ฐ๋๋ ๋๋ฉ์
# NLLLoss ์๋ Logsoftmax๋ฅผ ์ฌ์ฉํ๋ค
self.const = np.sqrt(d_k) # d_k๋?
def forward(self, Q, K, V, att_mask): # att_mask๋
score = torch.matmul(Q,K.transpose(-1,-2))/self.const # tranpose: ์ฃผ์ด์ง dim0๊ณผ dim1์ด ์๋ก ๋ฐ๊ฟ๋ค
score.masked_fill_(att_mask, -1e9) # masked!
# masked_fill_(mask, value) mask๋ boolean์ผ๋ก, ๋ง์คํฌ๊ฐ true์ธ ๊ณณ์ value๋ฅผ ์ฑ์
attn = self.softmax(score) # attn = attention distribution
context = torch.matmul(attn, V)
return context, attn
############################################################
# self ๋ ๋ฌด์์ธ๊ฐ?
# class Foo:
# def func1(): # ์ธ์๊ฐ self๊ฐ ์๋์ด๋ ์ค๋ฅ๋ ๋์ง ์๋๋ค
# print("fuckck")
# def func2(self):
# print("fuck!!")
# f = Foo() # ํด๋น ํด๋์ค์ ๋ํ ์ธ์คํด์ค ์์ฑ
# f.func2()=> function 2๊ฐ ์ ์์ ์ผ๋ก ํ๋ฆฐํธ ๋๋ค # ์ธ์คํด์ค ๋ฉ์๋ ํธ์ถ -> func2์ ๋ฉ์๋์ธ์๋ self๋ฟ์ด๋ฏ๋ก ์ธํ ํ์์๋ค
# ๋ฉ์๋์ธ func2์ ์ธ์ self์ ๋ํ ๊ฐ์ ํ์ด์ฌ์ด ์๋์ผ๋ก ๋๊ฒจ์ฃผ๊ธฐ ๋๋ฌธ์ ์ธํํ์์๋ค
# f.func1() -> ์๋ฌ๊ฐ ๋๋ค self ์ธ์๋ ์์ง๋ง ํ์ด์ฌ์ด ์๋์ผ๋ก ๊ฐ์ ์ ๋ฌํ๊ธฐ ๋๋ฌธ์ ๋ฐ์
# class ๋ด์ self๋ ํด๋์ค ์์ฒด๋ฅผ ๋ํ๋ด๋ ์ธ์คํด์ค์ด๋ค!
############################################################
class MultiHeadAttention(nn.Module):
def __init__(self):
super(MultiHeadAttention, self).__init__() # d_v = d_k
self.W_Q = nn.Linear(d_model, d_k * n_head) # n_head ๋ฒ ๋ณ๋ ฌ์ํ # concat์ ํ๊ธฐ ๋๋ฌธ์ d_k x n_head ์ด๋ค
self.W_K = nn.Linear(d_model, d_k * n_head) #
self.W_V = nn.Linear(d_model, d_k * n_head)
def forward(self,Q, K, V, att_mask): # ์ธ์ฝ๋๋ QKV๊ฐ ๋ค๋๊ฐ๊ณ , ๋์ฝ๋๋ KV๋ ๊ฐ๊ตฌ Q๋ ๋ค๋ฅด๋ค
residual = Q
batch_size = Q.size(0)
q_s = self.W_Q(Q).view(batch_size, -1, n_head, d_k).transpose(1,2)
k_s = self.W_K(K).view(batch_size, -1, n_head, d_k).transpose(1,2)
v_s = self.W_V(V).view(batch_size, -1, n_head, d_v).transpose(1,2)
att_mask = att_mask.unsqueeze(1).repeat(1, n_head, 1,1) # unsqueeze(1)์ col๋ก ๋ณํ
context, attn = ScaledDotProduct()(q_s, k_s, v_s, att_mask)
context = context.transpose(1,2).contiguous().view(batch_size, -1, n_head * d_v)
# contiguous[์ธ์ ํ]() : self ํ
์์ ๊ฐ์ data๋ฅผ ๊ฐ์ง๊ณ ์๋ contiguous ํ
์๋ฅผ ๋ฆฌํด
# ํ
์์ ์ด์ด๋ ํ์ ์ญ์ (?)
output = nn.Linear(n_head*d_v, d_model)(context) # ์ฝ์บฃ๋ ์ ๋ฅผ ํ๋ฒ ๋ ๊ฐ์ค์น ํ๋ ฌ์ ํต๊ณผ์ํต๋๋ค
return nn.LayerNorm(output + residual), attn
class PositionwiseFFNN(nn.Module):
def __init__(self):
super(PositionwiseFFNN, self).__init__() # conv1d ๋ ๋ฌด์์ธ๊ฐ 2d์ ๋ญ๊ฐ ๋ค๋ฅธ๊ฐ...
# W1 = d_model x d_ff
self.linear1 = nn.Conv1d(in_channels = d_model, out_channels = d_ff, kernel_size=1)
# W2 = d_ff x d_model
self.linear2 = nn.Conv1d(in_channels = d_ff, out_channels = d_model, kernel_size=1)
self.relu = nn.ReLU()
def forward(self, input):
residual = input
output = self.linear1(input.transpose(1,2))
output = self.relu(output)
output = self.linear2(output).transpose(1,2)
return nn.LayerNorm(d_model)(output + residual)
class EncoderLayer(nn.Module):
def __init__(self):
super(EncoderLayer,self).__init__()
self.enc_self_attn = MultiHeadAttention()
self.PWfeedforward = PositionwiseFFNN()
def forward(self, enc_input, enc_self_attn_mask):
enc_output, attn = self.enc_self_attn(enc_input, enc_input, enc_input, enc_self_attn_mask)
enc_output = self.PWfeedforward(enc_output)
return enc_output, attn
class Encoder(nn.Module):
def __init__(self):
super(Encoder,self).__init__()
self.src_emb = nn.Embedding(src_voca_size, d_model)
# Embedding : ์๋ฒ ๋ฉ์ ํ๊ธฐ์ํ table์ด ์๋ค
self.pos_emb = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(src_len+1, d_model),freeze = True)
self.layer = nn.ModuleList([EncoderLayer() for _ in range(n_layers)])
def forward(self, enc_input):
enc_output = self.src_emb(enc_input)+self.pos_emb(torch.LongTensor([[1,2,3,4,0]]))
enc_self_attn_mask = get_attn_pad_mask(enc_input, enc_input)
enc_self_attns = []
for layer in self.layer:
enc_output, enc_self_attn = layer(enc_output, enc_self_attn_mask)
enc_self_attns.append(enc_self_attn) # append = concat ๊ฐ์ ๋๋
return enc_output, enc_self_attns
class DecoderLayer(nn.Module):
def __init__(self):
super(DecoderLayer, self).__init__()
self.dec_self_attn = MultiHeadAttention()
self.dec_enc_attn = MultiHeadAttention()
self.PWfeedforward = PositionwiseFFNN()
def forward(self, dec_input, enc_output, dec_self_attn_mask, dec_enc_attn_mask):
dec_output, dec_self_attn = self.dec_self_attn(dec_input, dec_input, dec_input, dec_self_attn_mask)
dec_output, dec_end_attn = self.dec_enc_attn(dec_output, enc_output, enc_output, dec_enc_attn_mask)
dec_output = self.PWfeedforward(dec_output)
return dec_output, dec_self_attn, dec_end_attn
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.tgt_emb = nn.Embedding(tgt_voca_size, d_model)
self.pos_emb = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(tgt_len+1, d_model), freeze = True)
self.layers = nn.ModuleList([DecoderLayer() for _ in range(n_layers)])
def forward(self, dec_input, enc_input, enc_output):
dec_output = self.tgt_emb(dec_input)+pos_emb(torch.LongTensor([5,1,2,3,4]))
dec_self_attn_pad_mask = get_attn_pad_mask(dec_input, dec_input)
dec_self_attn_subsequent_mask = get_attn_subsequent_mask(dec_input)
dec_self_attn_mask = torch.gt((dec_self_attn_pad_mask+dec_self_attn_subsequent_mask),0)
dec_enc_attn_mask = get_attn_pad_mask(dec_input, enc_input)
dec_self_attn_mask = get_attn_pad_mask(dec_input, enc_input)
dec_self_attn, dec_enc_attn = [],[]
for layer in self.layers:
dec_output, dec_self_attn, dec_enc_attn = layer(dec_output, enc_output, dec_self_attn_mask, dec_enc_attn_mask)
dec_self_attn.append(dec_self_attn)
dec_enc_attn.append(dec_enc_attn)
return dec_output, dec_self_attn, dec_enc_attn, dec_enc_attn
class Transformer(nn.Module):
def __init__(self):
super(Transformer, self).__init__()
self.encoder = Encoder()
self.decoder = Decoder()
self.projection = nn.Linear(d_model, tgt_voca_size, bias = False)
self.softmax = nn.Softmax()
def forward(self, enc_input, dec_input):
enc_output, enc_self_attn = self.encoder(enc_input)
dec_output, dec_self_attn, dec_enc_attn = self.decoder(dec_input, enc_input, enc_output)
dec_logit = self.protjection(dec_output)
return dec_logit.view(-1, dec_logit.size(-1)), enc_self_attn, dec_self_attn, dec_enc_attn
model = Transformer()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model. | odel(enc_input, dec_input)
loss = criterion(outputs, target_batch.contiguous().view(-1))
print('Epoch:','%04d'%(epoch+1), 'cost = '.format(loss))
loss.backward()
optimizer.step() | parameters(), lr = 0.001)
for epoch in range(20):
optimizer.zero_grad()
enc_input, dec_input, target_batch = make_batch(sentence)
outputs, enc_self_attns, dec_self_attns, dec_enc_attns = m | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.