file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
graph.rs | /// Returns the end node.
pub fn get_end_node(&self) -> Index {
self.end_node
}
/// Resets the frame graph by removing all nodes and sets up a new end node.
pub fn reset(&mut self) {
let mut nodes = self.node_arena.write();
let end_node_impl = nodes.get(self.end_node).unwrap().node.clone();
nodes.clear();
self.traversed_node_cache.clear();
self.edges_arena.write().clear();
self.end_node = nodes.insert(ConnectedNode {
name: "EndNode".to_string(),
node: end_node_impl,
inputs: [None; MAX_INPUT_OUTPUTS_PER_NODE],
});
}
/// Add a new node into the graph.
pub fn add_node<T: FrameGraphNode>(&self, node: T, name: &str) -> Index {
self.node_arena.write().insert(ConnectedNode {
name: name.to_string(),
node: Arc::new(node),
inputs: [None; MAX_INPUT_OUTPUTS_PER_NODE],
})
}
/// Connects one nodes output to another nodes input.
pub fn connect(
&self,
source: Index,
source_output: usize,
destination: Index,
destination_input: usize,
) -> Result<(), GraphConnectError> {
// Validate connection parameters.
if destination_input >= MAX_INPUT_OUTPUTS_PER_NODE {
return Err(GraphConnectError::MaximumInputsReached);
};
if source_output >= MAX_INPUT_OUTPUTS_PER_NODE {
return Err(GraphConnectError::MaximumInputsReached);
};
let mut edges = self.edges_arena.write();
let mut nodes = self.node_arena.write();
let destination_node = nodes
.get_mut(destination)
.ok_or(GraphConnectError::InvalidDestination)?;
// Target input is already connected.
if destination_node.inputs[destination_input].is_some() {
return Err(GraphConnectError::AlreadyConnected);
}
// Target input is empty so simply create the connection.
let edge = edges.insert(ConnectedEdges {
owner_node_index: source,
output_index: source_output,
});
destination_node.inputs[destination_input] = Some(edge);
Ok(())
}
fn traverse_node(
cache: &mut HashMap<Index, usize>,
levels_map: &mut MultiMap<usize, TraversedGraphNode>,
nodes: &RwLockReadGuard<Arena<ConnectedNode>>,
edges: &RwLockReadGuard<Arena<ConnectedEdges>>,
node_index: Index,
level: usize,
) {
//Build traverse node with input/output mapping info.
let mut traversed_node = TraversedGraphNode {
index: node_index,
inputs: [None; MAX_INPUT_OUTPUTS_PER_NODE],
};
// Remove from dependencies from all levels lower
let mut has_retained = false;
for l in level..0 {
// Remove previous traversed node from level.
let vec = levels_map.get_vec_mut(&l).unwrap();
let before_len = vec.len();
vec.retain(|x| x.index != node_index);
if before_len != vec.len() {
has_retained = true;
}
}
// Update all inputs that still reference kicked out node.
if has_retained {
for l in level..0 {
let vec = levels_map.get_vec_mut(&l).unwrap();
for node in vec {
for input in &mut node.inputs {
if let Some((nlevel, _, index)) = input {
if index == &node_index {
*nlevel = level;
}
}
}
}
}
}
// Loop through all inputs
let next_level = level + 1;
let node = nodes.get(node_index).unwrap();
for (input_index, input) in node.inputs.iter().enumerate() {
if let Some(input) = input {
let edge = edges.get(*input).unwrap();
let inner_node = edge.owner_node_index;
traversed_node.inputs[input_index] = Some((next_level, edge.output_index, inner_node));
Self::traverse_node(cache, levels_map, nodes, edges, inner_node, next_level);
}
}
// Store traversed node at level.
//let traversed_index = levels_map.get_vec(&level).map(|x| x.len()).unwrap_or(0);
//cache.insert(node_index, traversed_index);
// TODO: Due to retaining this index breaks currently :'(
levels_map.insert(level, traversed_node);
}
/// Executes the graph using the given scheduler.
pub fn execute<T: DeviceHost>(
&mut self,
sc_frame: Arc<wgpu::SwapChainFrame>,
device_host: &'static T,
pool: &ThreadPool,
) {
{
{
optick::event!("FrameGraph::traverse");
// Gain read access to nodes and connections.
let nodes = self.node_arena.read();
let edges = self.edges_arena.read();
// Start traversing from end.
self.levels_map.clear();
Self::traverse_node(
&mut self.traversed_node_cache,
&mut self.levels_map,
&nodes,
&edges,
self.end_node,
0,
);
}
let cache = &mut self.traversed_node_cache;
// Create async executer.
let mut local_pool = futures::executor::LocalPool::new();
let local_spawner = local_pool.spawner();
// Execute in levels order
let mut all_levels = self.levels_map.keys().cloned().collect::<Vec<_>>();
all_levels.sort_unstable();
let max_levels = all_levels.len();
for level in all_levels.into_iter().rev() {
optick::event!("FrameGraph::execute_level");
optick::tag!("level", level as u32);
// Get rid of duplicated nodes.
let mut nodes_in_level = self.levels_map.get_vec_mut(&level).unwrap().clone();
nodes_in_level.sort_unstable_by_key(|x| x.index);
nodes_in_level.dedup_by_key(|x| x.index);
// Build cache for this level
for (index, node) in nodes_in_level.iter().enumerate() {
cache.insert(node.index, index);
}
// Get chunks
let nodes = self.node_arena.read();
let read_nodes = nodes_in_level
.iter()
.map(|node| (nodes.get(node.index).unwrap(), node.inputs))
.collect::<Vec<_>>();
let mut empty = [Vec::with_capacity(0)];
#[allow(clippy::type_complexity)]
let (outputs, previous_outputs): (
&mut [Vec<Option<FrameNodeValue>>],
&mut [Vec<Option<FrameNodeValue>>],
) = if level == (max_levels - 1) {
(&mut self.output_map, &mut empty)
} else {
self.output_map.split_at_mut(level + 1)
};
let outputs_per_node = outputs[outputs.len() - 1]
.chunks_mut(MAX_INPUT_OUTPUTS_PER_NODE)
.enumerate()
.collect::<Vec<_>>();
// Execute
let encoder_outputs = pool.install(|| {
read_nodes
.par_iter()
.zip(outputs_per_node)
.enumerate()
.map(|(_i, ((node, inputs), (_oi, outputs)))| {
optick::event!("FrameGraph::node");
// Prepare node execution
optick::tag!("name", node.name);
let node_trait = node.node.clone();
let label = format!("NodeCommandEncoder_{}", node.name);
// Map outputs -> inputs.
/*
for (idx, input) in inputs.iter().enumerate() {
if let Some((target_level, output_index, node_index)) = input {
let i = cache.get(&node_index).unwrap();
println!(
"Mapping input #{} to level = {} ({}) and index = {} ({}, {})",
idx,
target_level,
previous_outputs.len() - (target_level - level),
i * MAX_INPUT_OUTPUTS_PER_NODE + output_index,
i,
output_index
);
} else {
println!("Mapping input #{} to None", i);
}
}
*/
let inputs = inputs
.iter()
.map(|input| {
input.map(|(target_level, output_index, node_index)| {
let i = cache.get(&node_index).unwrap();
&previous_outputs[previous_outputs.len() - (target_level - level)]
[i * MAX_INPUT_OUTPUTS_PER_NODE + output_index]
})
})
.map(|input| match input {
Some(Some(rf)) => Some(rf.clone()),
_ => None,
})
.collect::<Vec<_>>();
let sc_cloned = sc_frame.clone();
let out = {
optick::event!("FrameGraph::record_commands");
optick::tag!("name", label);
// Execute node asynchronisly.
node_trait.execute_raw(
&inputs,
outputs,
device_host.get_device(),
device_host.get_queue(),
&*sc_cloned,
)
};
out
})
.collect::<Vec<_>>()
});
{
optick::event!("FrameGraph::submit_level");
optick::tag!("level", level as u32);
let mut buffers = Vec::with_capacity(encoder_outputs.len());
for out in encoder_outputs {
if let Some(buffer) = out.command_buffer | {
buffers.push(buffer);
} | conditional_block |
|
signature_format.py | _48000 = 6
class FrequencyBand(IntEnum): # Enum keys are frequency ranges in Hz
_0_250 = -1 # Nothing above 250 Hz is actually stored
_250_520 = 0
_520_1450 = 1
_1450_3500 = 2
_3500_5500 = 3 # This one (3.5 KHz - 5.5 KHz) should not be used in legacy mode
class RawSignatureHeader(LittleEndianStructure):
_pack = True
_fields_ = [
('magic1', c_uint32), # Fixed 0xcafe2580 - 80 25 fe ca
('crc32', c_uint32), # CRC-32 for all of the following (so excluding these first 8 bytes)
('size_minus_header', c_uint32), # Total size of the message, minus the size of the current header (which is 48 bytes)
('magic2', c_uint32), # Fixed 0x94119c00 - 00 9c 11 94
('void1', c_uint32 * 3), # Void
('shifted_sample_rate_id', c_uint32), # A member of SampleRate (usually 3 for 16000 Hz), left-shifted by 27 (usually giving 0x18000000 - 00 00 00 18)
('void2', c_uint32 * 2), # Void, or maybe used only in "rolling window" mode?
('number_samples_plus_divided_sample_rate', c_uint32), # int(number_of_samples + sample_rate * 0.24) - As the sample rate is known thanks to the field above, it can be inferred and substracted so that we obtain the number of samples, and from the number of samples and sample rate we can obtain the length of the recording
('fixed_value', c_uint32) # Calculated as ((15 << 19) + 0x40000) - 0x7c0000 or 00 00 7c 00 - seems pretty constant, may be different in the "SigType.STREAMING" mode
]
class FrequencyPeak:
fft_pass_number : int = None
peak_magnitude : int = None
corrected_peak_frequency_bin : int = None
sample_rate_hz : int = None
def __init__(self, fft_pass_number : int, peak_magnitude : int, corrected_peak_frequency_bin : int, sample_rate_hz : int):
self.fft_pass_number = fft_pass_number
self.peak_magnitude = peak_magnitude
self.corrected_peak_frequency_bin = corrected_peak_frequency_bin
self.sample_rate_hz = sample_rate_hz
def get_frequency_hz(self) -> float:
return self.corrected_peak_frequency_bin * (self.sample_rate_hz / 2 / 1024 / 64)
# ^ Convert back a FFT bin to a frequency, given a 16 KHz sample
# rate, 1024 useful bins and the multiplication by 64 made before
# storing the information
def get_amplitude_pcm(self) -> float:
return sqrt(exp((self.peak_magnitude - 6144) / 1477.3) * (1 << 17) / 2) / 1024
# ^ Not sure about this calculation but gives small enough numbers
def get_seconds(self) -> float:
return (self.fft_pass_number * 128) / self.sample_rate_hz
# ^ Assume that new FFT bins are emitted every 128 samples, on a
# standard 16 KHz sample rate basis.
class DecodedMessage:
sample_rate_hz : int = None
number_samples : int = None
frequency_band_to_sound_peaks : Dict[FrequencyBand, List[FrequencyPeak]] = None
@classmethod
def decode_from_binary(cls, data : bytes):
self = cls()
buf = BytesIO(data)
buf.seek(8)
checksummable_data = buf.read()
buf.seek(0)
# Read and check the header
header = RawSignatureHeader()
buf.readinto(header)
assert header.magic1 == 0xcafe2580
assert header.size_minus_header == len(data) - 48
assert crc32(checksummable_data) & 0xffffffff == header.crc32
assert header.magic2 == 0x94119c00
self.sample_rate_hz = int(SampleRate(header.shifted_sample_rate_id >> 27).name.strip('_'))
self.number_samples = int(header.number_samples_plus_divided_sample_rate - self.sample_rate_hz * 0.24)
# Read the type-length-value sequence that follows the header
# The first chunk is fixed and has no value, but instead just repeats
# the length of the message size minus the header:
assert int.from_bytes(buf.read(4), 'little') == 0x40000000
assert int.from_bytes(buf.read(4), 'little') == len(data) - 48
# Then, lists of frequency peaks for respective bands follow
self.frequency_band_to_sound_peaks = {}
while True:
tlv_header = buf.read(8)
if not tlv_header:
break
frequency_band_id = int.from_bytes(tlv_header[:4], 'little')
frequency_peaks_size = int.from_bytes(tlv_header[4:], 'little')
frequency_peaks_padding = -frequency_peaks_size % 4
frequency_peaks_buf = BytesIO(buf.read(frequency_peaks_size))
buf.read(frequency_peaks_padding)
# Decode frequency peaks
frequency_band = FrequencyBand(frequency_band_id - 0x60030040)
fft_pass_number = 0
self.frequency_band_to_sound_peaks[frequency_band] = []
while True:
raw_fft_pass : bytes = frequency_peaks_buf.read(1)
if not raw_fft_pass:
break
fft_pass_offset : int = raw_fft_pass[0]
if fft_pass_offset == 0xff:
fft_pass_number = int.from_bytes(frequency_peaks_buf.read(4), 'little')
continue
else:
fft_pass_number += fft_pass_offset
peak_magnitude = int.from_bytes(frequency_peaks_buf.read(2), 'little')
corrected_peak_frequency_bin = int.from_bytes(frequency_peaks_buf.read(2), 'little')
self.frequency_band_to_sound_peaks[frequency_band].append(
FrequencyPeak(fft_pass_number, peak_magnitude, corrected_peak_frequency_bin, self.sample_rate_hz)
)
return self
@classmethod
def decode_from_uri(cls, uri : str):
assert uri.startswith(DATA_URI_PREFIX)
return cls.decode_from_binary(b64decode(uri.replace(DATA_URI_PREFIX, '', 1)))
"""
Encode the current object to a readable JSON format, for debugging
purposes.
"""
def encode_to_json(self) -> dict:
return {
"sample_rate_hz": self.sample_rate_hz,
"number_samples": self.number_samples,
"_seconds": self.number_samples / self.sample_rate_hz,
"frequency_band_to_peaks": {
frequency_band.name.strip('_'): [
{ | "_seconds": frequency_peak.get_seconds()
}
for frequency_peak in frequency_peaks
]
for frequency_band, frequency_peaks in sorted(self.frequency_band_to_sound_peaks.items())
}
}
def encode_to_binary(self) -> bytes:
header = RawSignatureHeader()
header.magic1 = 0xcafe2580
header.magic2 = 0x94119c00
header.shifted_sample_rate_id = int(getattr(SampleRate, '_%s' % self.sample_rate_hz)) << 27
header.fixed_value = ((15 << 19) + 0x40000)
header.number_samples_plus_divided_sample_rate = int(self.number_samples + self.sample_rate_hz * 0.24)
contents_buf = BytesIO()
for frequency_band, frequency_peaks in sorted(self.frequency_band_to_sound_peaks.items()):
peaks_buf = BytesIO()
fft_pass_number = 0
# NOTE: Correctly filtering and sorting the peaks within the members
# of "self.frequency_band_to_sound_peaks" is the responsability of the
# caller
for frequency_peak in frequency_peaks:
assert frequency_peak.fft_pass_number >= fft_pass_number
if | "fft_pass_number": frequency_peak.fft_pass_number,
"peak_magnitude": frequency_peak.peak_magnitude,
"corrected_peak_frequency_bin": frequency_peak.corrected_peak_frequency_bin,
"_frequency_hz": frequency_peak.get_frequency_hz(),
"_amplitude_pcm": frequency_peak.get_amplitude_pcm(), | random_line_split |
signature_format.py | 48000 = 6
class FrequencyBand(IntEnum): # Enum keys are frequency ranges in Hz
_0_250 = -1 # Nothing above 250 Hz is actually stored
_250_520 = 0
_520_1450 = 1
_1450_3500 = 2
_3500_5500 = 3 # This one (3.5 KHz - 5.5 KHz) should not be used in legacy mode
class RawSignatureHeader(LittleEndianStructure):
_pack = True
_fields_ = [
('magic1', c_uint32), # Fixed 0xcafe2580 - 80 25 fe ca
('crc32', c_uint32), # CRC-32 for all of the following (so excluding these first 8 bytes)
('size_minus_header', c_uint32), # Total size of the message, minus the size of the current header (which is 48 bytes)
('magic2', c_uint32), # Fixed 0x94119c00 - 00 9c 11 94
('void1', c_uint32 * 3), # Void
('shifted_sample_rate_id', c_uint32), # A member of SampleRate (usually 3 for 16000 Hz), left-shifted by 27 (usually giving 0x18000000 - 00 00 00 18)
('void2', c_uint32 * 2), # Void, or maybe used only in "rolling window" mode?
('number_samples_plus_divided_sample_rate', c_uint32), # int(number_of_samples + sample_rate * 0.24) - As the sample rate is known thanks to the field above, it can be inferred and substracted so that we obtain the number of samples, and from the number of samples and sample rate we can obtain the length of the recording
('fixed_value', c_uint32) # Calculated as ((15 << 19) + 0x40000) - 0x7c0000 or 00 00 7c 00 - seems pretty constant, may be different in the "SigType.STREAMING" mode
]
class FrequencyPeak:
fft_pass_number : int = None
peak_magnitude : int = None
corrected_peak_frequency_bin : int = None
sample_rate_hz : int = None
def __init__(self, fft_pass_number : int, peak_magnitude : int, corrected_peak_frequency_bin : int, sample_rate_hz : int):
self.fft_pass_number = fft_pass_number
self.peak_magnitude = peak_magnitude
self.corrected_peak_frequency_bin = corrected_peak_frequency_bin
self.sample_rate_hz = sample_rate_hz
def get_frequency_hz(self) -> float:
return self.corrected_peak_frequency_bin * (self.sample_rate_hz / 2 / 1024 / 64)
# ^ Convert back a FFT bin to a frequency, given a 16 KHz sample
# rate, 1024 useful bins and the multiplication by 64 made before
# storing the information
def get_amplitude_pcm(self) -> float:
return sqrt(exp((self.peak_magnitude - 6144) / 1477.3) * (1 << 17) / 2) / 1024
# ^ Not sure about this calculation but gives small enough numbers
def get_seconds(self) -> float:
return (self.fft_pass_number * 128) / self.sample_rate_hz
# ^ Assume that new FFT bins are emitted every 128 samples, on a
# standard 16 KHz sample rate basis.
class DecodedMessage:
sample_rate_hz : int = None
number_samples : int = None
frequency_band_to_sound_peaks : Dict[FrequencyBand, List[FrequencyPeak]] = None
@classmethod
def decode_from_binary(cls, data : bytes):
| self.number_samples = int(header.number_samples_plus_divided_sample_rate - self.sample_rate_hz * 0.24)
# Read the type-length-value sequence that follows the header
# The first chunk is fixed and has no value, but instead just repeats
# the length of the message size minus the header:
assert int.from_bytes(buf.read(4), 'little') == 0x40000000
assert int.from_bytes(buf.read(4), 'little') == len(data) - 48
# Then, lists of frequency peaks for respective bands follow
self.frequency_band_to_sound_peaks = {}
while True:
tlv_header = buf.read(8)
if not tlv_header:
break
frequency_band_id = int.from_bytes(tlv_header[:4], 'little')
frequency_peaks_size = int.from_bytes(tlv_header[4:], 'little')
frequency_peaks_padding = -frequency_peaks_size % 4
frequency_peaks_buf = BytesIO(buf.read(frequency_peaks_size))
buf.read(frequency_peaks_padding)
# Decode frequency peaks
frequency_band = FrequencyBand(frequency_band_id - 0x60030040)
fft_pass_number = 0
self.frequency_band_to_sound_peaks[frequency_band] = []
while True:
raw_fft_pass : bytes = frequency_peaks_buf.read(1)
if not raw_fft_pass:
break
fft_pass_offset : int = raw_fft_pass[0]
if fft_pass_offset == 0xff:
fft_pass_number = int.from_bytes(frequency_peaks_buf.read(4), 'little')
continue
else:
fft_pass_number += fft_pass_offset
peak_magnitude = int.from_bytes(frequency_peaks_buf.read(2), 'little')
corrected_peak_frequency_bin = int.from_bytes(frequency_peaks_buf.read(2), 'little')
self.frequency_band_to_sound_peaks[frequency_band].append(
FrequencyPeak(fft_pass_number, peak_magnitude, corrected_peak_frequency_bin, self.sample_rate_hz)
)
return self
@classmethod
def decode_from_uri(cls, uri : str):
assert uri.startswith(DATA_URI_PREFIX)
return cls.decode_from_binary(b64decode(uri.replace(DATA_URI_PREFIX, '', 1)))
"""
Encode the current object to a readable JSON format, for debugging
purposes.
"""
def encode_to_json(self) -> dict:
return {
"sample_rate_hz": self.sample_rate_hz,
"number_samples": self.number_samples,
"_seconds": self.number_samples / self.sample_rate_hz,
"frequency_band_to_peaks": {
frequency_band.name.strip('_'): [
{
"fft_pass_number": frequency_peak.fft_pass_number,
"peak_magnitude": frequency_peak.peak_magnitude,
"corrected_peak_frequency_bin": frequency_peak.corrected_peak_frequency_bin,
"_frequency_hz": frequency_peak.get_frequency_hz(),
"_amplitude_pcm": frequency_peak.get_amplitude_pcm(),
"_seconds": frequency_peak.get_seconds()
}
for frequency_peak in frequency_peaks
]
for frequency_band, frequency_peaks in sorted(self.frequency_band_to_sound_peaks.items())
}
}
def encode_to_binary(self) -> bytes:
header = RawSignatureHeader()
header.magic1 = 0xcafe2580
header.magic2 = 0x94119c00
header.shifted_sample_rate_id = int(getattr(SampleRate, '_%s' % self.sample_rate_hz)) << 27
header.fixed_value = ((15 << 19) + 0x40000)
header.number_samples_plus_divided_sample_rate = int(self.number_samples + self.sample_rate_hz * 0.24)
contents_buf = BytesIO()
for frequency_band, frequency_peaks in sorted(self.frequency_band_to_sound_peaks.items()):
peaks_buf = BytesIO()
fft_pass_number = 0
# NOTE: Correctly filtering and sorting the peaks within the members
# of "self.frequency_band_to_sound_peaks" is the responsability of the
# caller
for frequency_peak in frequency_peaks:
assert frequency_peak.fft_pass_number >= fft_pass_number
if frequency | self = cls()
buf = BytesIO(data)
buf.seek(8)
checksummable_data = buf.read()
buf.seek(0)
# Read and check the header
header = RawSignatureHeader()
buf.readinto(header)
assert header.magic1 == 0xcafe2580
assert header.size_minus_header == len(data) - 48
assert crc32(checksummable_data) & 0xffffffff == header.crc32
assert header.magic2 == 0x94119c00
self.sample_rate_hz = int(SampleRate(header.shifted_sample_rate_id >> 27).name.strip('_'))
| identifier_body |
signature_format.py | 48000 = 6
class FrequencyBand(IntEnum): # Enum keys are frequency ranges in Hz
_0_250 = -1 # Nothing above 250 Hz is actually stored
_250_520 = 0
_520_1450 = 1
_1450_3500 = 2
_3500_5500 = 3 # This one (3.5 KHz - 5.5 KHz) should not be used in legacy mode
class RawSignatureHeader(LittleEndianStructure):
_pack = True
_fields_ = [
('magic1', c_uint32), # Fixed 0xcafe2580 - 80 25 fe ca
('crc32', c_uint32), # CRC-32 for all of the following (so excluding these first 8 bytes)
('size_minus_header', c_uint32), # Total size of the message, minus the size of the current header (which is 48 bytes)
('magic2', c_uint32), # Fixed 0x94119c00 - 00 9c 11 94
('void1', c_uint32 * 3), # Void
('shifted_sample_rate_id', c_uint32), # A member of SampleRate (usually 3 for 16000 Hz), left-shifted by 27 (usually giving 0x18000000 - 00 00 00 18)
('void2', c_uint32 * 2), # Void, or maybe used only in "rolling window" mode?
('number_samples_plus_divided_sample_rate', c_uint32), # int(number_of_samples + sample_rate * 0.24) - As the sample rate is known thanks to the field above, it can be inferred and substracted so that we obtain the number of samples, and from the number of samples and sample rate we can obtain the length of the recording
('fixed_value', c_uint32) # Calculated as ((15 << 19) + 0x40000) - 0x7c0000 or 00 00 7c 00 - seems pretty constant, may be different in the "SigType.STREAMING" mode
]
class FrequencyPeak:
fft_pass_number : int = None
peak_magnitude : int = None
corrected_peak_frequency_bin : int = None
sample_rate_hz : int = None
def __init__(self, fft_pass_number : int, peak_magnitude : int, corrected_peak_frequency_bin : int, sample_rate_hz : int):
self.fft_pass_number = fft_pass_number
self.peak_magnitude = peak_magnitude
self.corrected_peak_frequency_bin = corrected_peak_frequency_bin
self.sample_rate_hz = sample_rate_hz
def get_frequency_hz(self) -> float:
return self.corrected_peak_frequency_bin * (self.sample_rate_hz / 2 / 1024 / 64)
# ^ Convert back a FFT bin to a frequency, given a 16 KHz sample
# rate, 1024 useful bins and the multiplication by 64 made before
# storing the information
def get_amplitude_pcm(self) -> float:
return sqrt(exp((self.peak_magnitude - 6144) / 1477.3) * (1 << 17) / 2) / 1024
# ^ Not sure about this calculation but gives small enough numbers
def | (self) -> float:
return (self.fft_pass_number * 128) / self.sample_rate_hz
# ^ Assume that new FFT bins are emitted every 128 samples, on a
# standard 16 KHz sample rate basis.
class DecodedMessage:
sample_rate_hz : int = None
number_samples : int = None
frequency_band_to_sound_peaks : Dict[FrequencyBand, List[FrequencyPeak]] = None
@classmethod
def decode_from_binary(cls, data : bytes):
self = cls()
buf = BytesIO(data)
buf.seek(8)
checksummable_data = buf.read()
buf.seek(0)
# Read and check the header
header = RawSignatureHeader()
buf.readinto(header)
assert header.magic1 == 0xcafe2580
assert header.size_minus_header == len(data) - 48
assert crc32(checksummable_data) & 0xffffffff == header.crc32
assert header.magic2 == 0x94119c00
self.sample_rate_hz = int(SampleRate(header.shifted_sample_rate_id >> 27).name.strip('_'))
self.number_samples = int(header.number_samples_plus_divided_sample_rate - self.sample_rate_hz * 0.24)
# Read the type-length-value sequence that follows the header
# The first chunk is fixed and has no value, but instead just repeats
# the length of the message size minus the header:
assert int.from_bytes(buf.read(4), 'little') == 0x40000000
assert int.from_bytes(buf.read(4), 'little') == len(data) - 48
# Then, lists of frequency peaks for respective bands follow
self.frequency_band_to_sound_peaks = {}
while True:
tlv_header = buf.read(8)
if not tlv_header:
break
frequency_band_id = int.from_bytes(tlv_header[:4], 'little')
frequency_peaks_size = int.from_bytes(tlv_header[4:], 'little')
frequency_peaks_padding = -frequency_peaks_size % 4
frequency_peaks_buf = BytesIO(buf.read(frequency_peaks_size))
buf.read(frequency_peaks_padding)
# Decode frequency peaks
frequency_band = FrequencyBand(frequency_band_id - 0x60030040)
fft_pass_number = 0
self.frequency_band_to_sound_peaks[frequency_band] = []
while True:
raw_fft_pass : bytes = frequency_peaks_buf.read(1)
if not raw_fft_pass:
break
fft_pass_offset : int = raw_fft_pass[0]
if fft_pass_offset == 0xff:
fft_pass_number = int.from_bytes(frequency_peaks_buf.read(4), 'little')
continue
else:
fft_pass_number += fft_pass_offset
peak_magnitude = int.from_bytes(frequency_peaks_buf.read(2), 'little')
corrected_peak_frequency_bin = int.from_bytes(frequency_peaks_buf.read(2), 'little')
self.frequency_band_to_sound_peaks[frequency_band].append(
FrequencyPeak(fft_pass_number, peak_magnitude, corrected_peak_frequency_bin, self.sample_rate_hz)
)
return self
@classmethod
def decode_from_uri(cls, uri : str):
assert uri.startswith(DATA_URI_PREFIX)
return cls.decode_from_binary(b64decode(uri.replace(DATA_URI_PREFIX, '', 1)))
"""
Encode the current object to a readable JSON format, for debugging
purposes.
"""
def encode_to_json(self) -> dict:
return {
"sample_rate_hz": self.sample_rate_hz,
"number_samples": self.number_samples,
"_seconds": self.number_samples / self.sample_rate_hz,
"frequency_band_to_peaks": {
frequency_band.name.strip('_'): [
{
"fft_pass_number": frequency_peak.fft_pass_number,
"peak_magnitude": frequency_peak.peak_magnitude,
"corrected_peak_frequency_bin": frequency_peak.corrected_peak_frequency_bin,
"_frequency_hz": frequency_peak.get_frequency_hz(),
"_amplitude_pcm": frequency_peak.get_amplitude_pcm(),
"_seconds": frequency_peak.get_seconds()
}
for frequency_peak in frequency_peaks
]
for frequency_band, frequency_peaks in sorted(self.frequency_band_to_sound_peaks.items())
}
}
def encode_to_binary(self) -> bytes:
header = RawSignatureHeader()
header.magic1 = 0xcafe2580
header.magic2 = 0x94119c00
header.shifted_sample_rate_id = int(getattr(SampleRate, '_%s' % self.sample_rate_hz)) << 27
header.fixed_value = ((15 << 19) + 0x40000)
header.number_samples_plus_divided_sample_rate = int(self.number_samples + self.sample_rate_hz * 0.24)
contents_buf = BytesIO()
for frequency_band, frequency_peaks in sorted(self.frequency_band_to_sound_peaks.items()):
peaks_buf = BytesIO()
fft_pass_number = 0
# NOTE: Correctly filtering and sorting the peaks within the members
# of "self.frequency_band_to_sound_peaks" is the responsability of the
# caller
for frequency_peak in frequency_peaks:
assert frequency_peak.fft_pass_number >= fft_pass_number
if | get_seconds | identifier_name |
signature_format.py | 1
_1450_3500 = 2
_3500_5500 = 3 # This one (3.5 KHz - 5.5 KHz) should not be used in legacy mode
class RawSignatureHeader(LittleEndianStructure):
_pack = True
_fields_ = [
('magic1', c_uint32), # Fixed 0xcafe2580 - 80 25 fe ca
('crc32', c_uint32), # CRC-32 for all of the following (so excluding these first 8 bytes)
('size_minus_header', c_uint32), # Total size of the message, minus the size of the current header (which is 48 bytes)
('magic2', c_uint32), # Fixed 0x94119c00 - 00 9c 11 94
('void1', c_uint32 * 3), # Void
('shifted_sample_rate_id', c_uint32), # A member of SampleRate (usually 3 for 16000 Hz), left-shifted by 27 (usually giving 0x18000000 - 00 00 00 18)
('void2', c_uint32 * 2), # Void, or maybe used only in "rolling window" mode?
('number_samples_plus_divided_sample_rate', c_uint32), # int(number_of_samples + sample_rate * 0.24) - As the sample rate is known thanks to the field above, it can be inferred and substracted so that we obtain the number of samples, and from the number of samples and sample rate we can obtain the length of the recording
('fixed_value', c_uint32) # Calculated as ((15 << 19) + 0x40000) - 0x7c0000 or 00 00 7c 00 - seems pretty constant, may be different in the "SigType.STREAMING" mode
]
class FrequencyPeak:
fft_pass_number : int = None
peak_magnitude : int = None
corrected_peak_frequency_bin : int = None
sample_rate_hz : int = None
def __init__(self, fft_pass_number : int, peak_magnitude : int, corrected_peak_frequency_bin : int, sample_rate_hz : int):
self.fft_pass_number = fft_pass_number
self.peak_magnitude = peak_magnitude
self.corrected_peak_frequency_bin = corrected_peak_frequency_bin
self.sample_rate_hz = sample_rate_hz
def get_frequency_hz(self) -> float:
return self.corrected_peak_frequency_bin * (self.sample_rate_hz / 2 / 1024 / 64)
# ^ Convert back a FFT bin to a frequency, given a 16 KHz sample
# rate, 1024 useful bins and the multiplication by 64 made before
# storing the information
def get_amplitude_pcm(self) -> float:
return sqrt(exp((self.peak_magnitude - 6144) / 1477.3) * (1 << 17) / 2) / 1024
# ^ Not sure about this calculation but gives small enough numbers
def get_seconds(self) -> float:
return (self.fft_pass_number * 128) / self.sample_rate_hz
# ^ Assume that new FFT bins are emitted every 128 samples, on a
# standard 16 KHz sample rate basis.
class DecodedMessage:
sample_rate_hz : int = None
number_samples : int = None
frequency_band_to_sound_peaks : Dict[FrequencyBand, List[FrequencyPeak]] = None
@classmethod
def decode_from_binary(cls, data : bytes):
self = cls()
buf = BytesIO(data)
buf.seek(8)
checksummable_data = buf.read()
buf.seek(0)
# Read and check the header
header = RawSignatureHeader()
buf.readinto(header)
assert header.magic1 == 0xcafe2580
assert header.size_minus_header == len(data) - 48
assert crc32(checksummable_data) & 0xffffffff == header.crc32
assert header.magic2 == 0x94119c00
self.sample_rate_hz = int(SampleRate(header.shifted_sample_rate_id >> 27).name.strip('_'))
self.number_samples = int(header.number_samples_plus_divided_sample_rate - self.sample_rate_hz * 0.24)
# Read the type-length-value sequence that follows the header
# The first chunk is fixed and has no value, but instead just repeats
# the length of the message size minus the header:
assert int.from_bytes(buf.read(4), 'little') == 0x40000000
assert int.from_bytes(buf.read(4), 'little') == len(data) - 48
# Then, lists of frequency peaks for respective bands follow
self.frequency_band_to_sound_peaks = {}
while True:
tlv_header = buf.read(8)
if not tlv_header:
break
frequency_band_id = int.from_bytes(tlv_header[:4], 'little')
frequency_peaks_size = int.from_bytes(tlv_header[4:], 'little')
frequency_peaks_padding = -frequency_peaks_size % 4
frequency_peaks_buf = BytesIO(buf.read(frequency_peaks_size))
buf.read(frequency_peaks_padding)
# Decode frequency peaks
frequency_band = FrequencyBand(frequency_band_id - 0x60030040)
fft_pass_number = 0
self.frequency_band_to_sound_peaks[frequency_band] = []
while True:
raw_fft_pass : bytes = frequency_peaks_buf.read(1)
if not raw_fft_pass:
break
fft_pass_offset : int = raw_fft_pass[0]
if fft_pass_offset == 0xff:
fft_pass_number = int.from_bytes(frequency_peaks_buf.read(4), 'little')
continue
else:
fft_pass_number += fft_pass_offset
peak_magnitude = int.from_bytes(frequency_peaks_buf.read(2), 'little')
corrected_peak_frequency_bin = int.from_bytes(frequency_peaks_buf.read(2), 'little')
self.frequency_band_to_sound_peaks[frequency_band].append(
FrequencyPeak(fft_pass_number, peak_magnitude, corrected_peak_frequency_bin, self.sample_rate_hz)
)
return self
@classmethod
def decode_from_uri(cls, uri : str):
assert uri.startswith(DATA_URI_PREFIX)
return cls.decode_from_binary(b64decode(uri.replace(DATA_URI_PREFIX, '', 1)))
"""
Encode the current object to a readable JSON format, for debugging
purposes.
"""
def encode_to_json(self) -> dict:
return {
"sample_rate_hz": self.sample_rate_hz,
"number_samples": self.number_samples,
"_seconds": self.number_samples / self.sample_rate_hz,
"frequency_band_to_peaks": {
frequency_band.name.strip('_'): [
{
"fft_pass_number": frequency_peak.fft_pass_number,
"peak_magnitude": frequency_peak.peak_magnitude,
"corrected_peak_frequency_bin": frequency_peak.corrected_peak_frequency_bin,
"_frequency_hz": frequency_peak.get_frequency_hz(),
"_amplitude_pcm": frequency_peak.get_amplitude_pcm(),
"_seconds": frequency_peak.get_seconds()
}
for frequency_peak in frequency_peaks
]
for frequency_band, frequency_peaks in sorted(self.frequency_band_to_sound_peaks.items())
}
}
def encode_to_binary(self) -> bytes:
header = RawSignatureHeader()
header.magic1 = 0xcafe2580
header.magic2 = 0x94119c00
header.shifted_sample_rate_id = int(getattr(SampleRate, '_%s' % self.sample_rate_hz)) << 27
header.fixed_value = ((15 << 19) + 0x40000)
header.number_samples_plus_divided_sample_rate = int(self.number_samples + self.sample_rate_hz * 0.24)
contents_buf = BytesIO()
for frequency_band, frequency_peaks in sorted(self.frequency_band_to_sound_peaks.items()):
| peaks_buf = BytesIO()
fft_pass_number = 0
# NOTE: Correctly filtering and sorting the peaks within the members
# of "self.frequency_band_to_sound_peaks" is the responsability of the
# caller
for frequency_peak in frequency_peaks:
assert frequency_peak.fft_pass_number >= fft_pass_number
if frequency_peak.fft_pass_number - fft_pass_number >= 255:
peaks_buf.write(b'\xff')
peaks_buf.write((frequency_peak.fft_pass_number).to_bytes(4, 'little'))
fft_pass_number = frequency_peak.fft_pass_number
peaks_buf.write(bytes([frequency_peak.fft_pass_number - fft_pass_number])) | conditional_block |
|
mp3.py | None,
'deselected_color': background}
def __init__(self, **kwargs):
global RootApp
super(Mp3PiAppLayout, self).__init__(**kwargs)
RootApp = self
self.ids['search_results_list'].adapter.bind(on_selection_change=self.change_selection)
self.ids.volume_slider.value = Alsa.get_mixer("", {})
# XXX validate!!
#self.ids.volume_slider.value = 0# int(subprocess.check_output(["pulseaudio-ctl", "full-status"]).split(" ")[0])
self.statusthread = threading.Thread(target=self.status_thread)
self.statusthread.daemon = True
self.statusthread.start()
def change_volume(self, args):
#os.system("amixer set Master %s%%" % int(args))
#os.system("pactl set-sink-volume bluez_sink.0C_A6_94_E3_76_DA %s%%" % int(args))
Alsa.set_mixer("", int(args), {})
#os.system("pulseaudio-ctl set %s%%" % int(args))
def change_selection(self, args):
if args.selection:
self.change_image(args.selection[0].text)
self.stop_second_thread()
self.start_second_thread(Stations.getStreamURLbyName(args.selection[0].text))
else:
self.stop_second_thread()
def stop_second_thread(self):
if self.isPlaying == True: # stop playing
if self.proc is not None:
if self.mythread.isAlive():
print("set stop")
self.stop.set()
#self.proc.kill() ??
Logger.info("mpg123: killing %s" % self.proc.pid)
os.kill(self.proc.pid, SIGTERM)
self.proc = None
self.isPlaying = False
def start_second_thread(self, l_text):
if self.isPlaying == False:
Logger.info("Player: starting player " + l_text)
self.isPlaying = True
self.mythread = threading.Thread(target=self.infinite_loop, args=(l_text,))
self.mythread.daemon = True
self.mythread.start()
else:
|
def infinite_loop(self, url):
iteration = 0
self.proc = subprocess.Popen(["mpg123","-o", "alsa", "-@", url], stderr=subprocess.PIPE, bufsize = 0)
line = []
while True:
if self.stop.is_set():
Logger.info("Player: stopping thread")
self.stop.clear()
return
while (select.select([self.proc.stderr], [], [], 0)[0]):
# check if mpg123 is died
#print(self.proc.returncode)
#print(self.proc.pid)
if self.proc.returncode is not None:
print("died")
return
if self.stop.is_set():
Logger.info("Player: stopping thread")
self.stop.clear()
return
char = self.proc.stderr.read(1)
if char != '\n':
line.append(char)
else:
line_joined = "".join(line)
Logger.info("MPG123: says %s " % line_joined)
if "ICY-META: StreamTitle=" in line_joined:
pairs = {}
elements = line_joined.split(";")
for element in elements:
if element:
res = re.search(r"([A-Za-z]*)='(.*)'", element)
pairs[res.group(1)] = res.group(2)
self.ids.icytags.text = pairs['StreamTitle']
if "ICY-NAME: " in line_joined:
Logger.debug("ICYTAGS: ICY name found: %s " % line_joined.replace("ICY-NAME: ", ""))
if "ICY-URL: " in line_joined:
Logger.debug("ICYTAGS: ICY url found: %s " % line_joined.replace("ICY-URL: ", ""))
if "ICY-META: StreamTitle=" in line_joined:
Logger.debug("ICYTAGS: ICY StreamTitle found: %s " % line_joined.replace("ICY-META: StreamTitle=", ""))
line = []
iteration += 1
#print('Infinite loop, iteration {}.'.format(iteration))
time.sleep(.1)
def status_thread(self):
global ConfigObject
connection = NMCLI.current_connection()
while True:
if self.statusthread_stop.is_set():
self.statusthread_stop.clear()
return
if not int(time.time()) % 5:
connection = NMCLI.current_connection()
ip = NMCLI.get_ip()
if ip is None:
self.ids.wlanstatus.text = "No network connection"
else:
self.ids.wlanstatus.text = "%s %s%%\n%s\n%s" % (connection.get('SSID', None), connection.get('SIGNAL', None), ip, time.strftime("%H:%M", time.localtime()))
#self.ids.wlanstatus.text = "%s %s%%\n%s" % ("myNetwork", Network.strength, "192.168.47.11")
# wlan symbol
lines = []
for i in self.ids.wlanstatus.canvas.get_group(None)[1:]:
if type(i) is Color:
lines.append(i)
i.a = 1
if connection is not None:
if connection['SIGNAL'] < 50:
for i in lines[0:3]:
i.a = .5
if connection['SIGNAL'] < 60:
for i in lines[0:2]:
i.a = .5
if connection['SIGNAL'] < 70:
for i in lines[0:1]:
i.a = .5
if Stations.no_data == True:
print("no data")
if ConfigObject.get('General', 'playlist') == "radio.de":
Stations.update()
if Stations.no_data == False:
del self.search_results.adapter.data[:]
self.search_results.adapter.data.extend((Stations.data))
if ConfigObject.get('General', 'playlist') == "custom":
Stations.load_playlist("custom")
if Stations.no_data == False:
del self.search_results.adapter.data[:]
self.search_results.adapter.data.extend((Stations.data))
# screensaver
timeout = ConfigObject.get('General', 'screensaver')
if timeout < 60:
timeout = 60
if (time.time() - last_activity_time) > int(timeout):
if ScreenSaver.display_state is True:
Logger.info("ScreenSaver: enabling screensaver")
ScreenSaver.display_off()
else:
if ScreenSaver.display_state is False:
Logger.info("ScreenSaver: disabling screensaver")
ScreenSaver.display_on()
time.sleep(.5)
def change_image(self, station_name):
imageUrl = Stations.getImageUrl(Stations.getIdByName(station_name))
Logger.info("ImageLoader: Loading Image from %s" % (imageUrl))
self.ids.imageid.source = imageUrl
def pause(self):
self.stop.set()
self.search_results.adapter.deselect_list(self.search_results.adapter.selection)
def next(self):
self.stop.set()
#browse(self.search_results.adapter)
if self.search_results.adapter.selection:
index = self.search_results.adapter.selection[0].index
if index < len(self.search_results.adapter.data):
self.search_results.adapter.get_view(index+1).trigger_action(duration=0)
def prev(self):
self.stop.set()
if self.search_results.adapter.selection:
index = self.search_results.adapter.selection[0].index
if index >= 1:
self.search_results.adapter.get_view(index-1).trigger_action(duration=0)
def poweroff(self):
print("poweroff")
os.system("poweroff")
def reboot(self):
print("reboot")
os.system("reboot")
class Mp3PiApp(App):
global last_activity_time, ConfigObject
# initialize GPIO stuff
GPIO.setmode(GPIO.BOARD)
GPIO_PIR = 7
GPIO.setup(GPIO_PIR,GPIO.IN)
def my_callback(channel):
Logger.debug("Presence detector triggered!")
global last_activity_time
last_activity_time = time.time()
GPIO.add_event_detect(GPIO_PIR, GPIO.RISING, callback=my_callback, bouncetime=300)
def build_config(self, config):
config.setdefaults('General', {'screensaver': "60"})
config.setdefaults('General', {'name': "name"})
config.setdefaults('General', {'playlist': "radio.de"})
def build_settings(self, settings):
settings.add_json_panel("General", self.config, data="""
[
{"type": "numeric",
"title": "Screensaver Timeout",
"section": "General",
"key": "screensaver"
},
{"type": "string",
"title": "String",
"section": "General",
"key": "name"
},
{"type": "options",
"title": "Playlist",
"section": "General",
"options": ["radio.de", "custom"],
"key": "playlist"
}
]"""
)
def on_stop(self):
# The Kivy event loop is about to stop, set a stop signal;
# otherwise the | Logger.info("Player: already playing") | conditional_block |
mp3.py | == True: # stop playing
if self.proc is not None:
if self.mythread.isAlive():
print("set stop")
self.stop.set()
#self.proc.kill() ??
Logger.info("mpg123: killing %s" % self.proc.pid)
os.kill(self.proc.pid, SIGTERM)
self.proc = None
self.isPlaying = False
def start_second_thread(self, l_text):
if self.isPlaying == False:
Logger.info("Player: starting player " + l_text)
self.isPlaying = True
self.mythread = threading.Thread(target=self.infinite_loop, args=(l_text,))
self.mythread.daemon = True
self.mythread.start()
else:
Logger.info("Player: already playing")
def infinite_loop(self, url):
iteration = 0
self.proc = subprocess.Popen(["mpg123","-o", "alsa", "-@", url], stderr=subprocess.PIPE, bufsize = 0)
line = []
while True:
if self.stop.is_set():
Logger.info("Player: stopping thread")
self.stop.clear()
return
while (select.select([self.proc.stderr], [], [], 0)[0]):
# check if mpg123 is died
#print(self.proc.returncode)
#print(self.proc.pid)
if self.proc.returncode is not None:
print("died")
return
if self.stop.is_set():
Logger.info("Player: stopping thread")
self.stop.clear()
return
char = self.proc.stderr.read(1)
if char != '\n':
line.append(char)
else:
line_joined = "".join(line)
Logger.info("MPG123: says %s " % line_joined)
if "ICY-META: StreamTitle=" in line_joined:
pairs = {}
elements = line_joined.split(";")
for element in elements:
if element:
res = re.search(r"([A-Za-z]*)='(.*)'", element)
pairs[res.group(1)] = res.group(2)
self.ids.icytags.text = pairs['StreamTitle']
if "ICY-NAME: " in line_joined:
Logger.debug("ICYTAGS: ICY name found: %s " % line_joined.replace("ICY-NAME: ", ""))
if "ICY-URL: " in line_joined:
Logger.debug("ICYTAGS: ICY url found: %s " % line_joined.replace("ICY-URL: ", ""))
if "ICY-META: StreamTitle=" in line_joined:
Logger.debug("ICYTAGS: ICY StreamTitle found: %s " % line_joined.replace("ICY-META: StreamTitle=", ""))
line = []
iteration += 1
#print('Infinite loop, iteration {}.'.format(iteration))
time.sleep(.1)
def status_thread(self):
global ConfigObject
connection = NMCLI.current_connection()
while True:
if self.statusthread_stop.is_set():
self.statusthread_stop.clear()
return
if not int(time.time()) % 5:
connection = NMCLI.current_connection()
ip = NMCLI.get_ip()
if ip is None:
self.ids.wlanstatus.text = "No network connection"
else:
self.ids.wlanstatus.text = "%s %s%%\n%s\n%s" % (connection.get('SSID', None), connection.get('SIGNAL', None), ip, time.strftime("%H:%M", time.localtime()))
#self.ids.wlanstatus.text = "%s %s%%\n%s" % ("myNetwork", Network.strength, "192.168.47.11")
# wlan symbol
lines = []
for i in self.ids.wlanstatus.canvas.get_group(None)[1:]:
if type(i) is Color:
lines.append(i)
i.a = 1
if connection is not None:
if connection['SIGNAL'] < 50:
for i in lines[0:3]:
i.a = .5
if connection['SIGNAL'] < 60:
for i in lines[0:2]:
i.a = .5
if connection['SIGNAL'] < 70:
for i in lines[0:1]:
i.a = .5
if Stations.no_data == True:
print("no data")
if ConfigObject.get('General', 'playlist') == "radio.de":
Stations.update()
if Stations.no_data == False:
del self.search_results.adapter.data[:]
self.search_results.adapter.data.extend((Stations.data))
if ConfigObject.get('General', 'playlist') == "custom":
Stations.load_playlist("custom")
if Stations.no_data == False:
del self.search_results.adapter.data[:]
self.search_results.adapter.data.extend((Stations.data))
# screensaver
timeout = ConfigObject.get('General', 'screensaver')
if timeout < 60:
timeout = 60
if (time.time() - last_activity_time) > int(timeout):
if ScreenSaver.display_state is True:
Logger.info("ScreenSaver: enabling screensaver")
ScreenSaver.display_off()
else:
if ScreenSaver.display_state is False:
Logger.info("ScreenSaver: disabling screensaver")
ScreenSaver.display_on()
time.sleep(.5)
def change_image(self, station_name):
imageUrl = Stations.getImageUrl(Stations.getIdByName(station_name))
Logger.info("ImageLoader: Loading Image from %s" % (imageUrl))
self.ids.imageid.source = imageUrl
def pause(self):
self.stop.set()
self.search_results.adapter.deselect_list(self.search_results.adapter.selection)
def next(self):
self.stop.set()
#browse(self.search_results.adapter)
if self.search_results.adapter.selection:
index = self.search_results.adapter.selection[0].index
if index < len(self.search_results.adapter.data):
self.search_results.adapter.get_view(index+1).trigger_action(duration=0)
def prev(self):
self.stop.set()
if self.search_results.adapter.selection:
index = self.search_results.adapter.selection[0].index
if index >= 1:
self.search_results.adapter.get_view(index-1).trigger_action(duration=0)
def poweroff(self):
print("poweroff")
os.system("poweroff")
def reboot(self):
print("reboot")
os.system("reboot")
class Mp3PiApp(App):
global last_activity_time, ConfigObject
# initialize GPIO stuff
GPIO.setmode(GPIO.BOARD)
GPIO_PIR = 7
GPIO.setup(GPIO_PIR,GPIO.IN)
def my_callback(channel):
Logger.debug("Presence detector triggered!")
global last_activity_time
last_activity_time = time.time()
GPIO.add_event_detect(GPIO_PIR, GPIO.RISING, callback=my_callback, bouncetime=300)
def build_config(self, config):
config.setdefaults('General', {'screensaver': "60"})
config.setdefaults('General', {'name': "name"})
config.setdefaults('General', {'playlist': "radio.de"})
def build_settings(self, settings):
settings.add_json_panel("General", self.config, data="""
[
{"type": "numeric",
"title": "Screensaver Timeout",
"section": "General",
"key": "screensaver"
},
{"type": "string",
"title": "String",
"section": "General",
"key": "name"
},
{"type": "options",
"title": "Playlist",
"section": "General",
"options": ["radio.de", "custom"],
"key": "playlist"
}
]"""
)
def on_stop(self):
# The Kivy event loop is about to stop, set a stop signal;
# otherwise the app window will close, but the Python process will
# keep running until all secondary threads exit.
#layout.clear_widgets()
#browse(self)
True
#main = self.root.manager.get_screen('main').layout
#main.stop.set()
#self.root.stop.set()
#self.root.statusthread_stop.set()
def build(self):
global last_activity_time, ConfigObject
#sm = ScreenManager(transition=FadeTransition())
self.settings_cls = MySettingsWithTabbedPanel
from kivy.core.window import Window
# Window.size = (800, 480)
def on_motion(self, etype, motionevent):
global last_activity_time
last_activity_time = time.time()
Window.bind(on_motion=on_motion)
ConfigObject = self.config
sm = ScreenManager()
sm.add_widget(Mp3PiAppLayout())
sm.add_widget(SettingsScreen())
return sm
#return Mp3PiAppLayout()
class SettingsScreen(Screen):
def __init__(self, **kwargs):
super(SettingsScreen, self).__init__(**kwargs)
networklist = []
# for net in Network.visible_aps:
# networklist.append(net['ssid']) | # if net['ssid'] is Network.ssid:
# self.ids['wlan_list'].text = net[Network.ssid]
# self.ids['wlan_list'].values = networklist | random_line_split |
|
mp3.py | : stopping thread")
self.stop.clear()
return
while (select.select([self.proc.stderr], [], [], 0)[0]):
# check if mpg123 is died
#print(self.proc.returncode)
#print(self.proc.pid)
if self.proc.returncode is not None:
print("died")
return
if self.stop.is_set():
Logger.info("Player: stopping thread")
self.stop.clear()
return
char = self.proc.stderr.read(1)
if char != '\n':
line.append(char)
else:
line_joined = "".join(line)
Logger.info("MPG123: says %s " % line_joined)
if "ICY-META: StreamTitle=" in line_joined:
pairs = {}
elements = line_joined.split(";")
for element in elements:
if element:
res = re.search(r"([A-Za-z]*)='(.*)'", element)
pairs[res.group(1)] = res.group(2)
self.ids.icytags.text = pairs['StreamTitle']
if "ICY-NAME: " in line_joined:
Logger.debug("ICYTAGS: ICY name found: %s " % line_joined.replace("ICY-NAME: ", ""))
if "ICY-URL: " in line_joined:
Logger.debug("ICYTAGS: ICY url found: %s " % line_joined.replace("ICY-URL: ", ""))
if "ICY-META: StreamTitle=" in line_joined:
Logger.debug("ICYTAGS: ICY StreamTitle found: %s " % line_joined.replace("ICY-META: StreamTitle=", ""))
line = []
iteration += 1
#print('Infinite loop, iteration {}.'.format(iteration))
time.sleep(.1)
def status_thread(self):
global ConfigObject
connection = NMCLI.current_connection()
while True:
if self.statusthread_stop.is_set():
self.statusthread_stop.clear()
return
if not int(time.time()) % 5:
connection = NMCLI.current_connection()
ip = NMCLI.get_ip()
if ip is None:
self.ids.wlanstatus.text = "No network connection"
else:
self.ids.wlanstatus.text = "%s %s%%\n%s\n%s" % (connection.get('SSID', None), connection.get('SIGNAL', None), ip, time.strftime("%H:%M", time.localtime()))
#self.ids.wlanstatus.text = "%s %s%%\n%s" % ("myNetwork", Network.strength, "192.168.47.11")
# wlan symbol
lines = []
for i in self.ids.wlanstatus.canvas.get_group(None)[1:]:
if type(i) is Color:
lines.append(i)
i.a = 1
if connection is not None:
if connection['SIGNAL'] < 50:
for i in lines[0:3]:
i.a = .5
if connection['SIGNAL'] < 60:
for i in lines[0:2]:
i.a = .5
if connection['SIGNAL'] < 70:
for i in lines[0:1]:
i.a = .5
if Stations.no_data == True:
print("no data")
if ConfigObject.get('General', 'playlist') == "radio.de":
Stations.update()
if Stations.no_data == False:
del self.search_results.adapter.data[:]
self.search_results.adapter.data.extend((Stations.data))
if ConfigObject.get('General', 'playlist') == "custom":
Stations.load_playlist("custom")
if Stations.no_data == False:
del self.search_results.adapter.data[:]
self.search_results.adapter.data.extend((Stations.data))
# screensaver
timeout = ConfigObject.get('General', 'screensaver')
if timeout < 60:
timeout = 60
if (time.time() - last_activity_time) > int(timeout):
if ScreenSaver.display_state is True:
Logger.info("ScreenSaver: enabling screensaver")
ScreenSaver.display_off()
else:
if ScreenSaver.display_state is False:
Logger.info("ScreenSaver: disabling screensaver")
ScreenSaver.display_on()
time.sleep(.5)
def change_image(self, station_name):
imageUrl = Stations.getImageUrl(Stations.getIdByName(station_name))
Logger.info("ImageLoader: Loading Image from %s" % (imageUrl))
self.ids.imageid.source = imageUrl
def pause(self):
self.stop.set()
self.search_results.adapter.deselect_list(self.search_results.adapter.selection)
def next(self):
self.stop.set()
#browse(self.search_results.adapter)
if self.search_results.adapter.selection:
index = self.search_results.adapter.selection[0].index
if index < len(self.search_results.adapter.data):
self.search_results.adapter.get_view(index+1).trigger_action(duration=0)
def prev(self):
self.stop.set()
if self.search_results.adapter.selection:
index = self.search_results.adapter.selection[0].index
if index >= 1:
self.search_results.adapter.get_view(index-1).trigger_action(duration=0)
def poweroff(self):
print("poweroff")
os.system("poweroff")
def reboot(self):
print("reboot")
os.system("reboot")
class Mp3PiApp(App):
global last_activity_time, ConfigObject
# initialize GPIO stuff
GPIO.setmode(GPIO.BOARD)
GPIO_PIR = 7
GPIO.setup(GPIO_PIR,GPIO.IN)
def my_callback(channel):
Logger.debug("Presence detector triggered!")
global last_activity_time
last_activity_time = time.time()
GPIO.add_event_detect(GPIO_PIR, GPIO.RISING, callback=my_callback, bouncetime=300)
def build_config(self, config):
config.setdefaults('General', {'screensaver': "60"})
config.setdefaults('General', {'name': "name"})
config.setdefaults('General', {'playlist': "radio.de"})
def build_settings(self, settings):
settings.add_json_panel("General", self.config, data="""
[
{"type": "numeric",
"title": "Screensaver Timeout",
"section": "General",
"key": "screensaver"
},
{"type": "string",
"title": "String",
"section": "General",
"key": "name"
},
{"type": "options",
"title": "Playlist",
"section": "General",
"options": ["radio.de", "custom"],
"key": "playlist"
}
]"""
)
def on_stop(self):
# The Kivy event loop is about to stop, set a stop signal;
# otherwise the app window will close, but the Python process will
# keep running until all secondary threads exit.
#layout.clear_widgets()
#browse(self)
True
#main = self.root.manager.get_screen('main').layout
#main.stop.set()
#self.root.stop.set()
#self.root.statusthread_stop.set()
def build(self):
global last_activity_time, ConfigObject
#sm = ScreenManager(transition=FadeTransition())
self.settings_cls = MySettingsWithTabbedPanel
from kivy.core.window import Window
# Window.size = (800, 480)
def on_motion(self, etype, motionevent):
global last_activity_time
last_activity_time = time.time()
Window.bind(on_motion=on_motion)
ConfigObject = self.config
sm = ScreenManager()
sm.add_widget(Mp3PiAppLayout())
sm.add_widget(SettingsScreen())
return sm
#return Mp3PiAppLayout()
class SettingsScreen(Screen):
def __init__(self, **kwargs):
super(SettingsScreen, self).__init__(**kwargs)
networklist = []
# for net in Network.visible_aps:
# networklist.append(net['ssid'])
# if net['ssid'] is Network.ssid:
# self.ids['wlan_list'].text = net[Network.ssid]
# self.ids['wlan_list'].values = networklist
# self.ids['wlan_list'].bind(text=self.change_wlan_selection)
def change_wlan_selection(self, spinner, args):
Logger.info("WLAN: user selection %s" % args)
# Logger.info("WLAN: current WLAN %s" % Network.ssid)
# if args != Network.ssid:
# Logger.info("WLAN: changing WLAN to %s" % args)
# Network.activate([args])
def signal_handler(signal, frame):
print("exit");
sys.exit(0);
class HTTPHandler(BaseHTTPRequestHandler):
| global RootApp
#print(Mp3PiAppClass)
def do_GET(self):
if self.path == "/":
self.page = markup.page()
self.page.init(title="Title")
self.page.table(border="true")
firstline = True
for row in RootApp.search_results.adapter.data:
if firstline is True:
self.page.tr()
for column in row:
#pdb.set_trace()
string1 = column
if type(column) == 'float':
string1 = str(column) | identifier_body |
|
mp3.py | None,
'deselected_color': background}
def __init__(self, **kwargs):
global RootApp
super(Mp3PiAppLayout, self).__init__(**kwargs)
RootApp = self
self.ids['search_results_list'].adapter.bind(on_selection_change=self.change_selection)
self.ids.volume_slider.value = Alsa.get_mixer("", {})
# XXX validate!!
#self.ids.volume_slider.value = 0# int(subprocess.check_output(["pulseaudio-ctl", "full-status"]).split(" ")[0])
self.statusthread = threading.Thread(target=self.status_thread)
self.statusthread.daemon = True
self.statusthread.start()
def change_volume(self, args):
#os.system("amixer set Master %s%%" % int(args))
#os.system("pactl set-sink-volume bluez_sink.0C_A6_94_E3_76_DA %s%%" % int(args))
Alsa.set_mixer("", int(args), {})
#os.system("pulseaudio-ctl set %s%%" % int(args))
def change_selection(self, args):
if args.selection:
self.change_image(args.selection[0].text)
self.stop_second_thread()
self.start_second_thread(Stations.getStreamURLbyName(args.selection[0].text))
else:
self.stop_second_thread()
def stop_second_thread(self):
if self.isPlaying == True: # stop playing
if self.proc is not None:
if self.mythread.isAlive():
print("set stop")
self.stop.set()
#self.proc.kill() ??
Logger.info("mpg123: killing %s" % self.proc.pid)
os.kill(self.proc.pid, SIGTERM)
self.proc = None
self.isPlaying = False
def start_second_thread(self, l_text):
if self.isPlaying == False:
Logger.info("Player: starting player " + l_text)
self.isPlaying = True
self.mythread = threading.Thread(target=self.infinite_loop, args=(l_text,))
self.mythread.daemon = True
self.mythread.start()
else:
Logger.info("Player: already playing")
def infinite_loop(self, url):
iteration = 0
self.proc = subprocess.Popen(["mpg123","-o", "alsa", "-@", url], stderr=subprocess.PIPE, bufsize = 0)
line = []
while True:
if self.stop.is_set():
Logger.info("Player: stopping thread")
self.stop.clear()
return
while (select.select([self.proc.stderr], [], [], 0)[0]):
# check if mpg123 is died
#print(self.proc.returncode)
#print(self.proc.pid)
if self.proc.returncode is not None:
print("died")
return
if self.stop.is_set():
Logger.info("Player: stopping thread")
self.stop.clear()
return
char = self.proc.stderr.read(1)
if char != '\n':
line.append(char)
else:
line_joined = "".join(line)
Logger.info("MPG123: says %s " % line_joined)
if "ICY-META: StreamTitle=" in line_joined:
pairs = {}
elements = line_joined.split(";")
for element in elements:
if element:
res = re.search(r"([A-Za-z]*)='(.*)'", element)
pairs[res.group(1)] = res.group(2)
self.ids.icytags.text = pairs['StreamTitle']
if "ICY-NAME: " in line_joined:
Logger.debug("ICYTAGS: ICY name found: %s " % line_joined.replace("ICY-NAME: ", ""))
if "ICY-URL: " in line_joined:
Logger.debug("ICYTAGS: ICY url found: %s " % line_joined.replace("ICY-URL: ", ""))
if "ICY-META: StreamTitle=" in line_joined:
Logger.debug("ICYTAGS: ICY StreamTitle found: %s " % line_joined.replace("ICY-META: StreamTitle=", ""))
line = []
iteration += 1
#print('Infinite loop, iteration {}.'.format(iteration))
time.sleep(.1)
def | (self):
global ConfigObject
connection = NMCLI.current_connection()
while True:
if self.statusthread_stop.is_set():
self.statusthread_stop.clear()
return
if not int(time.time()) % 5:
connection = NMCLI.current_connection()
ip = NMCLI.get_ip()
if ip is None:
self.ids.wlanstatus.text = "No network connection"
else:
self.ids.wlanstatus.text = "%s %s%%\n%s\n%s" % (connection.get('SSID', None), connection.get('SIGNAL', None), ip, time.strftime("%H:%M", time.localtime()))
#self.ids.wlanstatus.text = "%s %s%%\n%s" % ("myNetwork", Network.strength, "192.168.47.11")
# wlan symbol
lines = []
for i in self.ids.wlanstatus.canvas.get_group(None)[1:]:
if type(i) is Color:
lines.append(i)
i.a = 1
if connection is not None:
if connection['SIGNAL'] < 50:
for i in lines[0:3]:
i.a = .5
if connection['SIGNAL'] < 60:
for i in lines[0:2]:
i.a = .5
if connection['SIGNAL'] < 70:
for i in lines[0:1]:
i.a = .5
if Stations.no_data == True:
print("no data")
if ConfigObject.get('General', 'playlist') == "radio.de":
Stations.update()
if Stations.no_data == False:
del self.search_results.adapter.data[:]
self.search_results.adapter.data.extend((Stations.data))
if ConfigObject.get('General', 'playlist') == "custom":
Stations.load_playlist("custom")
if Stations.no_data == False:
del self.search_results.adapter.data[:]
self.search_results.adapter.data.extend((Stations.data))
# screensaver
timeout = ConfigObject.get('General', 'screensaver')
if timeout < 60:
timeout = 60
if (time.time() - last_activity_time) > int(timeout):
if ScreenSaver.display_state is True:
Logger.info("ScreenSaver: enabling screensaver")
ScreenSaver.display_off()
else:
if ScreenSaver.display_state is False:
Logger.info("ScreenSaver: disabling screensaver")
ScreenSaver.display_on()
time.sleep(.5)
def change_image(self, station_name):
imageUrl = Stations.getImageUrl(Stations.getIdByName(station_name))
Logger.info("ImageLoader: Loading Image from %s" % (imageUrl))
self.ids.imageid.source = imageUrl
def pause(self):
self.stop.set()
self.search_results.adapter.deselect_list(self.search_results.adapter.selection)
def next(self):
self.stop.set()
#browse(self.search_results.adapter)
if self.search_results.adapter.selection:
index = self.search_results.adapter.selection[0].index
if index < len(self.search_results.adapter.data):
self.search_results.adapter.get_view(index+1).trigger_action(duration=0)
def prev(self):
self.stop.set()
if self.search_results.adapter.selection:
index = self.search_results.adapter.selection[0].index
if index >= 1:
self.search_results.adapter.get_view(index-1).trigger_action(duration=0)
def poweroff(self):
print("poweroff")
os.system("poweroff")
def reboot(self):
print("reboot")
os.system("reboot")
class Mp3PiApp(App):
global last_activity_time, ConfigObject
# initialize GPIO stuff
GPIO.setmode(GPIO.BOARD)
GPIO_PIR = 7
GPIO.setup(GPIO_PIR,GPIO.IN)
def my_callback(channel):
Logger.debug("Presence detector triggered!")
global last_activity_time
last_activity_time = time.time()
GPIO.add_event_detect(GPIO_PIR, GPIO.RISING, callback=my_callback, bouncetime=300)
def build_config(self, config):
config.setdefaults('General', {'screensaver': "60"})
config.setdefaults('General', {'name': "name"})
config.setdefaults('General', {'playlist': "radio.de"})
def build_settings(self, settings):
settings.add_json_panel("General", self.config, data="""
[
{"type": "numeric",
"title": "Screensaver Timeout",
"section": "General",
"key": "screensaver"
},
{"type": "string",
"title": "String",
"section": "General",
"key": "name"
},
{"type": "options",
"title": "Playlist",
"section": "General",
"options": ["radio.de", "custom"],
"key": "playlist"
}
]"""
)
def on_stop(self):
# The Kivy event loop is about to stop, set a stop signal;
# otherwise | status_thread | identifier_name |
base.py | ElementSymbolError(MalformedError):
def __init__(self, malformed_element_symbol):
self.malformed_element_symbol = malformed_element_symbol
def __str__(self):
return ('Expecting an atomic symbol (e.g. Fe) - supplied {0}').format(
self.malformed_element_symbol)
class MalformedQuantityError(MalformedError):
def __init__(self, malformed_quantity_string):
self.malformed_quantity_string = malformed_quantity_string
def __str__(self):
return ('Expecting a quantity string(e.g. "5 km/s") for keyword '
'- supplied {0}').format(self.malformed_quantity_string)
def int_to_roman(i):
"""
Convert an integer into its roman numeral representation.
Parameters
----------
i : int
Integer to be converted into roman numerals
Returns
-------
str
Returns roman numeral representation of i in str format.
"""
result = []
for integer, numeral in NUMERAL_MAP:
count = i // integer
result.append(numeral * count)
i -= integer * count
return ''.join(result)
def roman_to_int(roman_string):
"""
Convert a roman numeral into its corresponding integer.
Parameters
----------
roman_string : str
Roman numeral to be converted into an integer
Returns
-------
int
Returns integer representation of roman_string
"""
NUMERALS_SET = set(list(zip(*NUMERAL_MAP))[1])
roman_string = roman_string.upper()
if len(set(list(roman_string.upper())) - NUMERALS_SET) != 0:
raise ValueError('{0} does not seem to be a roman numeral'.format(
roman_string))
i = result = 0
for integer, numeral in NUMERAL_MAP:
while roman_string[i:i + len(numeral)] == numeral:
result += integer
i += len(numeral)
if result < 1:
raise ValueError('Can not interpret Roman Numeral {0}'.format(roman_string))
return result
def calculate_luminosity(
spec_fname, distance, wavelength_column=0,
wavelength_unit=u.angstrom, flux_column=1,
flux_unit=u.Unit('erg / (Angstrom cm2 s)')):
"""
Calculates luminosity of star.
Parameters
----------
spec_fname : file or str
File or file name to be read
distance : float
Distance to star
wavelength_column : int, optional(default = 0)
Column index in which the wavelength is stored
wavelength_unit : float, optional(default = u.angstrom)
Dictates units used for calculating wavelength.
flux_column : int, optional(default = 1)
Column index in which the flux is stored
flux_unit : str, optional(default = u.Unit('erg / (Angstrom cm2 s)')
Dictates units used for flux
Returns
-------
luminosity.value : float
Returned luminosity value of star.
wavelength.min() : float
Minimum value of wavelength of light
wavelength.max() : float
Maximum value of wavelength of light
"""
#BAD STYLE change to parse quantity
distance = u.Unit(distance)
wavelength, flux = np.loadtxt(spec_fname, usecols=(wavelength_column, flux_column), unpack=True)
flux_density = np.trapz(flux, wavelength) * (flux_unit * wavelength_unit)
luminosity = (flux_density * 4 * np.pi * distance**2).to('erg/s')
return luminosity.value, wavelength.min(), wavelength.max()
def create_synpp_yaml(radial1d_mdl, fname, shell_no=0, lines_db=None):
"""
Create a yaml file that is readable from syn++
Parameters
----------
radial1d_mdl : Radial1DModel
Inputted object that will be read into YAML file
fname : str
File name for the synpp yaml
shell_no : int, optional(default = 0)
Number of shells
lines_db : file, optional(default = None)
Raises
------
ValueError
If the current dataset does not contain necessary reference files
"""
logger.warning('Currently only works with Si and a special setup')
if radial1d_mdl.atom_data.synpp_refs is not None:
raise ValueError(
'The current atom dataset does not contain the '
'necessary reference files (please contact the authors)')
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] = -99.0
for key, value in radial1d_mdl.atom_data.synpp_refs.iterrows():
try:
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'].loc[key] = np.log10(
radial1d_mdl.plasma.tau_sobolevs[0].loc[value['line_id']])
except KeyError:
pass
relevant_synpp_refs = radial1d_mdl.atom_data.synpp_refs[
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] > -50]
with open(synpp_default_yaml_fname) as stream:
yaml_reference = yaml.load(stream, Loader=yaml.CLoader)
if lines_db is not None:
yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'lines')
yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'refs.dat')
yaml_reference['output']['min_wl'] = float(
radial1d_mdl.runner.spectrum.wavelength.to('angstrom').value.min())
yaml_reference['output']['max_wl'] = float(
radial1d_mdl.runner.spectrum.wavelength.to('angstrom').value.max())
#raise Exception("there's a problem here with units what units does synpp expect?")
yaml_reference['opacity']['v_ref'] = float(
(radial1d_mdl.tardis_config.structure.v_inner[0].to('km/s') /
(1000. * u.km / u.s)).value)
yaml_reference['grid']['v_outer_max'] = float(
(radial1d_mdl.tardis_config.structure.v_outer[-1].to('km/s') /
(1000. * u.km / u.s)).value)
#pdb.set_trace()
yaml_setup = yaml_reference['setups'][0]
yaml_setup['ions'] = []
yaml_setup['log_tau'] = []
yaml_setup['active'] = []
yaml_setup['temp'] = []
yaml_setup['v_min'] = []
yaml_setup['v_max'] = []
yaml_setup['aux'] = []
for species, synpp_ref in relevant_synpp_refs.iterrows():
yaml_setup['ions'].append(100 * species[0] + species[1])
yaml_setup['log_tau'].append(float(synpp_ref['ref_log_tau']))
yaml_setup['active'].append(True)
yaml_setup['temp'].append(yaml_setup['t_phot'])
yaml_setup['v_min'].append(yaml_reference['opacity']['v_ref'])
yaml_setup['v_max'].append(yaml_reference['grid']['v_outer_max'])
yaml_setup['aux'].append(1e200)
with open(fname, 'w') as f:
yaml.dump(yaml_reference, stream=f, explicit_start=True)
def | (nu, T):
"""
Calculate the intensity of a black-body according to the following formula
.. math::
I(\\nu, T) = \\frac{2h\\nu^3}{c^2}\frac{1}
{e^{h\\nu \\beta_\\textrm{rad}} - 1}
Parameters
----------
nu : float
Frequency of light
T : float
Temperature in kelvin
Returns
-------
Intensity : float
Returns the intensity of the black body
"""
beta_rad = 1 / (k_B_cgs * T)
coefficient = 2 * h_cgs / c_cgs ** 2
intensity = ne.evaluate('coefficient * nu**3 / '
'(exp(h_cgs * nu * beta_rad) -1 )')
return intensity
def species_tuple_to_string(species_tuple, roman_numerals=True):
"""
Convert a species tuple to its corresponding string representation.
Parameters
----------
species_tuple : tuple
Tuple of 2 values indicated atomic number and number of
electrons missing
roman_numerals : bool, optional(default = TRUE)
Indicates whether the returned ion number is in roman numerals
Returns
-------
element_symbol, roman_ion_number : str
Returns corresponding string representation of given tuple
"""
atomic_number, ion_number = species_tuple
element_symbol = ATOMIC_NUMBER2SYMBOL[atomic_number]
if roman_numerals:
roman_ion_number = int_to_roman(ion_number+1)
return '{0} {1}'.format(str(element_symbol), roman_ion_number)
else:
return '{0} {1:d}'.format(element_symbol, ion_number)
def species_string_to_tuple(species_string):
"""
Convert a species string to its corresponding tuple representation
Parameters
----------
species_string : str
String containing species symbol (e.g. Si II, Fe III)
Returns
-------
atomic_number, ion_number : tuple
Returns tuple of length 2 indicating atomic number and ion number
Raises
| intensity_black_body | identifier_name |
base.py | str
File name for the synpp yaml
shell_no : int, optional(default = 0)
Number of shells
lines_db : file, optional(default = None)
Raises
------
ValueError
If the current dataset does not contain necessary reference files
"""
logger.warning('Currently only works with Si and a special setup')
if radial1d_mdl.atom_data.synpp_refs is not None:
raise ValueError(
'The current atom dataset does not contain the '
'necessary reference files (please contact the authors)')
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] = -99.0
for key, value in radial1d_mdl.atom_data.synpp_refs.iterrows():
try:
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'].loc[key] = np.log10(
radial1d_mdl.plasma.tau_sobolevs[0].loc[value['line_id']])
except KeyError:
pass
relevant_synpp_refs = radial1d_mdl.atom_data.synpp_refs[
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] > -50]
with open(synpp_default_yaml_fname) as stream:
yaml_reference = yaml.load(stream, Loader=yaml.CLoader)
if lines_db is not None:
yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'lines')
yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'refs.dat')
yaml_reference['output']['min_wl'] = float(
radial1d_mdl.runner.spectrum.wavelength.to('angstrom').value.min())
yaml_reference['output']['max_wl'] = float(
radial1d_mdl.runner.spectrum.wavelength.to('angstrom').value.max())
#raise Exception("there's a problem here with units what units does synpp expect?")
yaml_reference['opacity']['v_ref'] = float(
(radial1d_mdl.tardis_config.structure.v_inner[0].to('km/s') /
(1000. * u.km / u.s)).value)
yaml_reference['grid']['v_outer_max'] = float(
(radial1d_mdl.tardis_config.structure.v_outer[-1].to('km/s') /
(1000. * u.km / u.s)).value)
#pdb.set_trace()
yaml_setup = yaml_reference['setups'][0]
yaml_setup['ions'] = []
yaml_setup['log_tau'] = []
yaml_setup['active'] = []
yaml_setup['temp'] = []
yaml_setup['v_min'] = []
yaml_setup['v_max'] = []
yaml_setup['aux'] = []
for species, synpp_ref in relevant_synpp_refs.iterrows():
yaml_setup['ions'].append(100 * species[0] + species[1])
yaml_setup['log_tau'].append(float(synpp_ref['ref_log_tau']))
yaml_setup['active'].append(True)
yaml_setup['temp'].append(yaml_setup['t_phot'])
yaml_setup['v_min'].append(yaml_reference['opacity']['v_ref'])
yaml_setup['v_max'].append(yaml_reference['grid']['v_outer_max'])
yaml_setup['aux'].append(1e200)
with open(fname, 'w') as f:
yaml.dump(yaml_reference, stream=f, explicit_start=True)
def intensity_black_body(nu, T):
"""
Calculate the intensity of a black-body according to the following formula
.. math::
I(\\nu, T) = \\frac{2h\\nu^3}{c^2}\frac{1}
{e^{h\\nu \\beta_\\textrm{rad}} - 1}
Parameters
----------
nu : float
Frequency of light
T : float
Temperature in kelvin
Returns
-------
Intensity : float
Returns the intensity of the black body
"""
beta_rad = 1 / (k_B_cgs * T)
coefficient = 2 * h_cgs / c_cgs ** 2
intensity = ne.evaluate('coefficient * nu**3 / '
'(exp(h_cgs * nu * beta_rad) -1 )')
return intensity
def species_tuple_to_string(species_tuple, roman_numerals=True):
"""
Convert a species tuple to its corresponding string representation.
Parameters
----------
species_tuple : tuple
Tuple of 2 values indicated atomic number and number of
electrons missing
roman_numerals : bool, optional(default = TRUE)
Indicates whether the returned ion number is in roman numerals
Returns
-------
element_symbol, roman_ion_number : str
Returns corresponding string representation of given tuple
"""
atomic_number, ion_number = species_tuple
element_symbol = ATOMIC_NUMBER2SYMBOL[atomic_number]
if roman_numerals:
roman_ion_number = int_to_roman(ion_number+1)
return '{0} {1}'.format(str(element_symbol), roman_ion_number)
else:
return '{0} {1:d}'.format(element_symbol, ion_number)
def species_string_to_tuple(species_string):
"""
Convert a species string to its corresponding tuple representation
Parameters
----------
species_string : str
String containing species symbol (e.g. Si II, Fe III)
Returns
-------
atomic_number, ion_number : tuple
Returns tuple of length 2 indicating atomic number and ion number
Raises
------
MalformedSpeciesError
If the inputted string does not match the species format
"""
try:
element_symbol, ion_number_string = re.match(r'^(\w+)\s*(\d+)',
species_string).groups()
except AttributeError:
try:
element_symbol, ion_number_string = species_string.split()
except ValueError:
raise MalformedSpeciesError(
'Species string "{0}" is not of format <element_symbol><number>'
' (e.g. Fe 2, Fe2, ..)'.format(species_string))
atomic_number = element_symbol2atomic_number(element_symbol)
try:
ion_number = roman_to_int(ion_number_string)
except ValueError:
try:
ion_number = int(ion_number_string)
except ValueError:
raise MalformedSpeciesError(
"Given ion number ('{}') could not be parsed".format(
ion_number_string))
if ion_number > atomic_number:
raise ValueError(
'Species given does not exist: ion number > atomic number')
return atomic_number, ion_number - 1
def parse_quantity(quantity_string):
"""
Changes a string into it's corresponding astropy.Quantity object.
Parameters
----------
quantity_string : str
String to be converted into astropy.Quantity
Returns
-------
q : ~u.Quantity
Corresponding astropy.Quantity object for passed string
Raises
------
MalformedQuantityError
If string is not properly formatted for Astropy Quantity
"""
if not isinstance(quantity_string, str):
raise MalformedQuantityError(quantity_string)
try:
value_string, unit_string = quantity_string.split()
except ValueError:
raise MalformedQuantityError(quantity_string)
try:
value = float(value_string)
except ValueError:
raise MalformedQuantityError(quantity_string)
try:
q = u.Quantity(value, unit_string)
except ValueError:
raise MalformedQuantityError(quantity_string)
return q
def element_symbol2atomic_number(element_string):
"""
Takes an element symbol and returns its corresponding atomic number
Parameters
----------
element_string : str
Inputted element symbol
Returns
-------
int
Returned atomic number
"""
reformatted_element_string = reformat_element_symbol(element_string)
if reformatted_element_string not in SYMBOL2ATOMIC_NUMBER:
raise MalformedElementSymbolError(element_string)
return SYMBOL2ATOMIC_NUMBER[reformatted_element_string]
def atomic_number2element_symbol(atomic_number):
"""
Convert atomic number to string
Parameters
----------
atomic_number : int
Inputted atomic number
Returns
-------
str
Returned corresponding element symbol
"""
return ATOMIC_NUMBER2SYMBOL[atomic_number]
def reformat_element_symbol(element_string):
"""
Reformat the string so the first letter is uppercase and all subsequent
letters lowercase.
Parameters
----------
element_string : str
Inputted element symbol
Returns
-------
str
Returned reformatted element symbol
"""
return element_string[0].upper() + element_string[1:].lower()
def quantity_linspace(start, stop, num, **kwargs):
"""
Essentially the same input parameters as linspace, but
calculated for an astropy quantity start and stop.
Parameters
----------
start : ~astropy.Quantity
Starting value of the sequence
stop : ~astropy.Quantity
End value of the sequence
num : int
Number of samples to generate
Returns
-------
~astropy.Quantity
Returns num evenly spaced characters of type astropy.Quantity
Raises
------
ValueError
If start and stop values have no unit attribute.
"""
if not (hasattr(start, 'unit') and hasattr(stop, 'unit')): | raise ValueError('Both start and stop need to be quantities with a '
'unit attribute') | random_line_split |
|
base.py | light
"""
#BAD STYLE change to parse quantity
distance = u.Unit(distance)
wavelength, flux = np.loadtxt(spec_fname, usecols=(wavelength_column, flux_column), unpack=True)
flux_density = np.trapz(flux, wavelength) * (flux_unit * wavelength_unit)
luminosity = (flux_density * 4 * np.pi * distance**2).to('erg/s')
return luminosity.value, wavelength.min(), wavelength.max()
def create_synpp_yaml(radial1d_mdl, fname, shell_no=0, lines_db=None):
"""
Create a yaml file that is readable from syn++
Parameters
----------
radial1d_mdl : Radial1DModel
Inputted object that will be read into YAML file
fname : str
File name for the synpp yaml
shell_no : int, optional(default = 0)
Number of shells
lines_db : file, optional(default = None)
Raises
------
ValueError
If the current dataset does not contain necessary reference files
"""
logger.warning('Currently only works with Si and a special setup')
if radial1d_mdl.atom_data.synpp_refs is not None:
raise ValueError(
'The current atom dataset does not contain the '
'necessary reference files (please contact the authors)')
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] = -99.0
for key, value in radial1d_mdl.atom_data.synpp_refs.iterrows():
try:
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'].loc[key] = np.log10(
radial1d_mdl.plasma.tau_sobolevs[0].loc[value['line_id']])
except KeyError:
pass
relevant_synpp_refs = radial1d_mdl.atom_data.synpp_refs[
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] > -50]
with open(synpp_default_yaml_fname) as stream:
yaml_reference = yaml.load(stream, Loader=yaml.CLoader)
if lines_db is not None:
yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'lines')
yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'refs.dat')
yaml_reference['output']['min_wl'] = float(
radial1d_mdl.runner.spectrum.wavelength.to('angstrom').value.min())
yaml_reference['output']['max_wl'] = float(
radial1d_mdl.runner.spectrum.wavelength.to('angstrom').value.max())
#raise Exception("there's a problem here with units what units does synpp expect?")
yaml_reference['opacity']['v_ref'] = float(
(radial1d_mdl.tardis_config.structure.v_inner[0].to('km/s') /
(1000. * u.km / u.s)).value)
yaml_reference['grid']['v_outer_max'] = float(
(radial1d_mdl.tardis_config.structure.v_outer[-1].to('km/s') /
(1000. * u.km / u.s)).value)
#pdb.set_trace()
yaml_setup = yaml_reference['setups'][0]
yaml_setup['ions'] = []
yaml_setup['log_tau'] = []
yaml_setup['active'] = []
yaml_setup['temp'] = []
yaml_setup['v_min'] = []
yaml_setup['v_max'] = []
yaml_setup['aux'] = []
for species, synpp_ref in relevant_synpp_refs.iterrows():
yaml_setup['ions'].append(100 * species[0] + species[1])
yaml_setup['log_tau'].append(float(synpp_ref['ref_log_tau']))
yaml_setup['active'].append(True)
yaml_setup['temp'].append(yaml_setup['t_phot'])
yaml_setup['v_min'].append(yaml_reference['opacity']['v_ref'])
yaml_setup['v_max'].append(yaml_reference['grid']['v_outer_max'])
yaml_setup['aux'].append(1e200)
with open(fname, 'w') as f:
yaml.dump(yaml_reference, stream=f, explicit_start=True)
def intensity_black_body(nu, T):
"""
Calculate the intensity of a black-body according to the following formula
.. math::
I(\\nu, T) = \\frac{2h\\nu^3}{c^2}\frac{1}
{e^{h\\nu \\beta_\\textrm{rad}} - 1}
Parameters
----------
nu : float
Frequency of light
T : float
Temperature in kelvin
Returns
-------
Intensity : float
Returns the intensity of the black body
"""
beta_rad = 1 / (k_B_cgs * T)
coefficient = 2 * h_cgs / c_cgs ** 2
intensity = ne.evaluate('coefficient * nu**3 / '
'(exp(h_cgs * nu * beta_rad) -1 )')
return intensity
def species_tuple_to_string(species_tuple, roman_numerals=True):
"""
Convert a species tuple to its corresponding string representation.
Parameters
----------
species_tuple : tuple
Tuple of 2 values indicated atomic number and number of
electrons missing
roman_numerals : bool, optional(default = TRUE)
Indicates whether the returned ion number is in roman numerals
Returns
-------
element_symbol, roman_ion_number : str
Returns corresponding string representation of given tuple
"""
atomic_number, ion_number = species_tuple
element_symbol = ATOMIC_NUMBER2SYMBOL[atomic_number]
if roman_numerals:
roman_ion_number = int_to_roman(ion_number+1)
return '{0} {1}'.format(str(element_symbol), roman_ion_number)
else:
return '{0} {1:d}'.format(element_symbol, ion_number)
def species_string_to_tuple(species_string):
"""
Convert a species string to its corresponding tuple representation
Parameters
----------
species_string : str
String containing species symbol (e.g. Si II, Fe III)
Returns
-------
atomic_number, ion_number : tuple
Returns tuple of length 2 indicating atomic number and ion number
Raises
------
MalformedSpeciesError
If the inputted string does not match the species format
"""
try:
element_symbol, ion_number_string = re.match(r'^(\w+)\s*(\d+)',
species_string).groups()
except AttributeError:
try:
element_symbol, ion_number_string = species_string.split()
except ValueError:
raise MalformedSpeciesError(
'Species string "{0}" is not of format <element_symbol><number>'
' (e.g. Fe 2, Fe2, ..)'.format(species_string))
atomic_number = element_symbol2atomic_number(element_symbol)
try:
ion_number = roman_to_int(ion_number_string)
except ValueError:
try:
ion_number = int(ion_number_string)
except ValueError:
raise MalformedSpeciesError(
"Given ion number ('{}') could not be parsed".format(
ion_number_string))
if ion_number > atomic_number:
raise ValueError(
'Species given does not exist: ion number > atomic number')
return atomic_number, ion_number - 1
def parse_quantity(quantity_string):
"""
Changes a string into it's corresponding astropy.Quantity object.
Parameters
----------
quantity_string : str
String to be converted into astropy.Quantity
Returns
-------
q : ~u.Quantity
Corresponding astropy.Quantity object for passed string
Raises
------
MalformedQuantityError
If string is not properly formatted for Astropy Quantity
"""
if not isinstance(quantity_string, str):
raise MalformedQuantityError(quantity_string)
try:
value_string, unit_string = quantity_string.split()
except ValueError:
raise MalformedQuantityError(quantity_string)
try:
value = float(value_string)
except ValueError:
raise MalformedQuantityError(quantity_string)
try:
q = u.Quantity(value, unit_string)
except ValueError:
raise MalformedQuantityError(quantity_string)
return q
def element_symbol2atomic_number(element_string):
"""
Takes an element symbol and returns its corresponding atomic number
Parameters
----------
element_string : str
Inputted element symbol
Returns
-------
int
Returned atomic number
"""
reformatted_element_string = reformat_element_symbol(element_string)
if reformatted_element_string not in SYMBOL2ATOMIC_NUMBER:
raise MalformedElementSymbolError(element_string)
return SYMBOL2ATOMIC_NUMBER[reformatted_element_string]
def atomic_number2element_symbol(atomic_number):
"""
Convert atomic number to string
Parameters
----------
atomic_number : int
Inputted atomic number
Returns
-------
str
Returned corresponding element symbol
"""
return ATOMIC_NUMBER2SYMBOL[atomic_number]
def reformat_element_symbol(element_string):
| """
Reformat the string so the first letter is uppercase and all subsequent
letters lowercase.
Parameters
----------
element_string : str
Inputted element symbol
Returns
-------
str
Returned reformatted element symbol
"""
return element_string[0].upper() + element_string[1:].lower() | identifier_body |
|
base.py | formedElementSymbolError(MalformedError):
def __init__(self, malformed_element_symbol):
self.malformed_element_symbol = malformed_element_symbol
def __str__(self):
return ('Expecting an atomic symbol (e.g. Fe) - supplied {0}').format(
self.malformed_element_symbol)
class MalformedQuantityError(MalformedError):
def __init__(self, malformed_quantity_string):
self.malformed_quantity_string = malformed_quantity_string
def __str__(self):
return ('Expecting a quantity string(e.g. "5 km/s") for keyword '
'- supplied {0}').format(self.malformed_quantity_string)
def int_to_roman(i):
"""
Convert an integer into its roman numeral representation.
Parameters
----------
i : int
Integer to be converted into roman numerals
Returns
-------
str
Returns roman numeral representation of i in str format.
"""
result = []
for integer, numeral in NUMERAL_MAP:
count = i // integer
result.append(numeral * count)
i -= integer * count
return ''.join(result)
def roman_to_int(roman_string):
"""
Convert a roman numeral into its corresponding integer.
Parameters
----------
roman_string : str
Roman numeral to be converted into an integer
Returns
-------
int
Returns integer representation of roman_string
"""
NUMERALS_SET = set(list(zip(*NUMERAL_MAP))[1])
roman_string = roman_string.upper()
if len(set(list(roman_string.upper())) - NUMERALS_SET) != 0:
raise ValueError('{0} does not seem to be a roman numeral'.format(
roman_string))
i = result = 0
for integer, numeral in NUMERAL_MAP:
while roman_string[i:i + len(numeral)] == numeral:
result += integer
i += len(numeral)
if result < 1:
raise ValueError('Can not interpret Roman Numeral {0}'.format(roman_string))
return result
def calculate_luminosity(
spec_fname, distance, wavelength_column=0,
wavelength_unit=u.angstrom, flux_column=1,
flux_unit=u.Unit('erg / (Angstrom cm2 s)')):
"""
Calculates luminosity of star.
Parameters
----------
spec_fname : file or str
File or file name to be read
distance : float
Distance to star
wavelength_column : int, optional(default = 0)
Column index in which the wavelength is stored
wavelength_unit : float, optional(default = u.angstrom)
Dictates units used for calculating wavelength.
flux_column : int, optional(default = 1)
Column index in which the flux is stored
flux_unit : str, optional(default = u.Unit('erg / (Angstrom cm2 s)')
Dictates units used for flux
Returns
-------
luminosity.value : float
Returned luminosity value of star.
wavelength.min() : float
Minimum value of wavelength of light
wavelength.max() : float
Maximum value of wavelength of light
"""
#BAD STYLE change to parse quantity
distance = u.Unit(distance)
wavelength, flux = np.loadtxt(spec_fname, usecols=(wavelength_column, flux_column), unpack=True)
flux_density = np.trapz(flux, wavelength) * (flux_unit * wavelength_unit)
luminosity = (flux_density * 4 * np.pi * distance**2).to('erg/s')
return luminosity.value, wavelength.min(), wavelength.max()
def create_synpp_yaml(radial1d_mdl, fname, shell_no=0, lines_db=None):
"""
Create a yaml file that is readable from syn++
Parameters
----------
radial1d_mdl : Radial1DModel
Inputted object that will be read into YAML file
fname : str
File name for the synpp yaml
shell_no : int, optional(default = 0)
Number of shells
lines_db : file, optional(default = None)
Raises
------
ValueError
If the current dataset does not contain necessary reference files
"""
logger.warning('Currently only works with Si and a special setup')
if radial1d_mdl.atom_data.synpp_refs is not None:
raise ValueError(
'The current atom dataset does not contain the '
'necessary reference files (please contact the authors)')
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] = -99.0
for key, value in radial1d_mdl.atom_data.synpp_refs.iterrows():
|
relevant_synpp_refs = radial1d_mdl.atom_data.synpp_refs[
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'] > -50]
with open(synpp_default_yaml_fname) as stream:
yaml_reference = yaml.load(stream, Loader=yaml.CLoader)
if lines_db is not None:
yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'lines')
yaml_reference['opacity']['line_dir'] = os.path.join(lines_db, 'refs.dat')
yaml_reference['output']['min_wl'] = float(
radial1d_mdl.runner.spectrum.wavelength.to('angstrom').value.min())
yaml_reference['output']['max_wl'] = float(
radial1d_mdl.runner.spectrum.wavelength.to('angstrom').value.max())
#raise Exception("there's a problem here with units what units does synpp expect?")
yaml_reference['opacity']['v_ref'] = float(
(radial1d_mdl.tardis_config.structure.v_inner[0].to('km/s') /
(1000. * u.km / u.s)).value)
yaml_reference['grid']['v_outer_max'] = float(
(radial1d_mdl.tardis_config.structure.v_outer[-1].to('km/s') /
(1000. * u.km / u.s)).value)
#pdb.set_trace()
yaml_setup = yaml_reference['setups'][0]
yaml_setup['ions'] = []
yaml_setup['log_tau'] = []
yaml_setup['active'] = []
yaml_setup['temp'] = []
yaml_setup['v_min'] = []
yaml_setup['v_max'] = []
yaml_setup['aux'] = []
for species, synpp_ref in relevant_synpp_refs.iterrows():
yaml_setup['ions'].append(100 * species[0] + species[1])
yaml_setup['log_tau'].append(float(synpp_ref['ref_log_tau']))
yaml_setup['active'].append(True)
yaml_setup['temp'].append(yaml_setup['t_phot'])
yaml_setup['v_min'].append(yaml_reference['opacity']['v_ref'])
yaml_setup['v_max'].append(yaml_reference['grid']['v_outer_max'])
yaml_setup['aux'].append(1e200)
with open(fname, 'w') as f:
yaml.dump(yaml_reference, stream=f, explicit_start=True)
def intensity_black_body(nu, T):
"""
Calculate the intensity of a black-body according to the following formula
.. math::
I(\\nu, T) = \\frac{2h\\nu^3}{c^2}\frac{1}
{e^{h\\nu \\beta_\\textrm{rad}} - 1}
Parameters
----------
nu : float
Frequency of light
T : float
Temperature in kelvin
Returns
-------
Intensity : float
Returns the intensity of the black body
"""
beta_rad = 1 / (k_B_cgs * T)
coefficient = 2 * h_cgs / c_cgs ** 2
intensity = ne.evaluate('coefficient * nu**3 / '
'(exp(h_cgs * nu * beta_rad) -1 )')
return intensity
def species_tuple_to_string(species_tuple, roman_numerals=True):
"""
Convert a species tuple to its corresponding string representation.
Parameters
----------
species_tuple : tuple
Tuple of 2 values indicated atomic number and number of
electrons missing
roman_numerals : bool, optional(default = TRUE)
Indicates whether the returned ion number is in roman numerals
Returns
-------
element_symbol, roman_ion_number : str
Returns corresponding string representation of given tuple
"""
atomic_number, ion_number = species_tuple
element_symbol = ATOMIC_NUMBER2SYMBOL[atomic_number]
if roman_numerals:
roman_ion_number = int_to_roman(ion_number+1)
return '{0} {1}'.format(str(element_symbol), roman_ion_number)
else:
return '{0} {1:d}'.format(element_symbol, ion_number)
def species_string_to_tuple(species_string):
"""
Convert a species string to its corresponding tuple representation
Parameters
----------
species_string : str
String containing species symbol (e.g. Si II, Fe III)
Returns
-------
atomic_number, ion_number : tuple
Returns tuple of length 2 indicating atomic number and ion number
Raises
| try:
radial1d_mdl.atom_data.synpp_refs['ref_log_tau'].loc[key] = np.log10(
radial1d_mdl.plasma.tau_sobolevs[0].loc[value['line_id']])
except KeyError:
pass | conditional_block |
profiling_data.go | .GetColumns()
numSliceRows := slicesQueryResult.GetNumRecords()
slices := make([]*service.ProfilingData_GpuSlices_Slice, numSliceRows)
groupParentLookup := map[api.CmdSubmissionKey]*service.ProfilingData_GpuSlices_Group{}
groups := []*service.ProfilingData_GpuSlices_Group{}
groupIds := make([]int32, numSliceRows)
var tracks []*service.ProfilingData_GpuSlices_Track
// Grab all the column values. Depends on the order of columns selected in slicesQuery
contextIds := slicesColumns[0].GetLongValues()
fixContextIds(contextIds)
extractTraceHandles(ctx, &contextIds, "VkDevice", handleMapping)
renderTargets := slicesColumns[1].GetLongValues()
extractTraceHandles(ctx, &renderTargets, "VkFramebuffer", handleMapping)
commandBuffers := slicesColumns[5].GetLongValues()
extractTraceHandles(ctx, &commandBuffers, "VkCommandBuffer", handleMapping)
renderPasses := slicesColumns[6].GetLongValues()
extractTraceHandles(ctx, &renderPasses, "VkRenderPass", handleMapping)
frameIds := slicesColumns[2].GetLongValues()
submissionIds := slicesColumns[3].GetLongValues()
hwQueueIds := slicesColumns[4].GetLongValues()
timestamps := slicesColumns[7].GetLongValues()
durations := slicesColumns[8].GetLongValues()
ids := slicesColumns[9].GetLongValues()
names := slicesColumns[10].GetStringValues()
depths := slicesColumns[11].GetLongValues()
argSetIds := slicesColumns[12].GetLongValues()
trackIds := slicesColumns[13].GetLongValues()
trackNames := slicesColumns[14].GetStringValues()
subCommandGroupMap := make(map[api.CmdSubmissionKey]int)
for i, v := range submissionIds {
subOrder, ok := submissionOrdering[v]
if ok {
cb := uint64(commandBuffers[i])
key := api.CmdSubmissionKey{subOrder, cb, uint64(renderPasses[i]), uint64(renderTargets[i])}
// Create a new group for each main renderPass slice.
if indices, ok := syncData.SubmissionIndices[key]; ok && names[i] == renderPassSliceName {
var idx []uint64
if c, ok := subCommandGroupMap[key]; ok { // Sometimes multiple renderPass slices shares the same renderPass and renderTarget.
idx = indices[c]
} else {
idx = indices[0]
subCommandGroupMap[key] = 0
}
names[i] = fmt.Sprintf("%v", idx)
parent := utils.FindParentGroup(ctx, subOrder, cb, groupParentLookup, &groups, syncData.SubmissionIndices, capture)
group := &service.ProfilingData_GpuSlices_Group{
Id: int32(len(groups)),
Name: fmt.Sprintf("RenderPass %v, RenderTarget %v", uint64(renderPasses[i]), uint64(renderTargets[i])),
Parent: parent,
Link: &path.Command{Capture: capture, Indices: idx},
}
groups = append(groups, group)
subCommandGroupMap[key]++
}
} else {
log.W(ctx, "Encountered submission ID mismatch %v", v)
}
// Find the group that the current slice belongs to and mark down group id.
if len(groups) > 0 {
groupIds[i] = groups[len(groups)-1].Id // Slices were time sorted and main renderPass slice comes first.
} else {
log.W(ctx, "Group missing for slice %v at submission %v, commandBuffer %v, renderPass %v, renderTarget %v", names[i], submissionIds[i], commandBuffers[i], renderPasses[i], renderTargets[i])
groupIds[i] = -1
}
}
for i := uint64(0); i < numSliceRows; i++ {
var argsQueryResult *perfetto_service.QueryResult
var ok bool
if argsQueryResult, ok = argsQueryCache[argSetIds[i]]; !ok {
argsQuery := fmt.Sprintf(argsQueryFmt, argSetIds[i])
argsQueryResult, err = processor.Query(argsQuery)
if err != nil {
log.W(ctx, "SQL query failed: %v", argsQuery)
}
argsQueryCache[argSetIds[i]] = argsQueryResult
}
argsColumns := argsQueryResult.GetColumns()
numArgsRows := argsQueryResult.GetNumRecords()
var extras []*service.ProfilingData_GpuSlices_Slice_Extra
for j := uint64(0); j < numArgsRows; j++ {
keys := argsColumns[0].GetStringValues()
values := argsColumns[1].GetStringValues()
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: keys[j],
Value: &service.ProfilingData_GpuSlices_Slice_Extra_StringValue{StringValue: values[j]},
})
}
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "contextId",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(contextIds[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "renderTarget",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(renderTargets[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "commandBuffer",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(commandBuffers[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "renderPass",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(renderPasses[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "frameId",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(frameIds[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "submissionId",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(submissionIds[i])},
})
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: "hwQueueId",
Value: &service.ProfilingData_GpuSlices_Slice_Extra_IntValue{IntValue: uint64(hwQueueIds[i])},
})
slices[i] = &service.ProfilingData_GpuSlices_Slice{
Ts: uint64(timestamps[i]),
Dur: uint64(durations[i]),
Id: uint64(ids[i]),
Label: names[i],
Depth: int32(depths[i]),
Extras: extras,
TrackId: int32(trackIds[i]),
GroupId: groupIds[i],
}
if _, ok := trackIdCache[trackIds[i]]; !ok {
trackIdCache[trackIds[i]] = true
tracks = append(tracks, &service.ProfilingData_GpuSlices_Track{
Id: int32(trackIds[i]),
Name: trackNames[i],
})
}
}
return &service.ProfilingData_GpuSlices{
Slices: slices,
Tracks: tracks,
Groups: groups,
}, nil
}
func processCounters(ctx context.Context, processor *perfetto.Processor, desc *device.GpuCounterDescriptor) ([]*service.ProfilingData_Counter, error) | {
counterTracksQueryResult, err := processor.Query(counterTracksQuery)
if err != nil {
return nil, log.Errf(ctx, err, "SQL query failed: %v", counterTracksQuery)
}
// t.id, name, unit, description, ts, value
tracksColumns := counterTracksQueryResult.GetColumns()
numTracksRows := counterTracksQueryResult.GetNumRecords()
counters := make([]*service.ProfilingData_Counter, numTracksRows)
// Grab all the column values. Depends on the order of columns selected in countersQuery
trackIds := tracksColumns[0].GetLongValues()
names := tracksColumns[1].GetStringValues()
units := tracksColumns[2].GetStringValues()
descriptions := tracksColumns[3].GetStringValues()
nameToSpec := map[string]*device.GpuCounterDescriptor_GpuCounterSpec{}
if desc != nil {
for _, spec := range desc.Specs {
nameToSpec[spec.Name] = spec
} | identifier_body |
|
profiling_data.go | "
)
func ProcessProfilingData(ctx context.Context, processor *perfetto.Processor, capture *path.Capture, desc *device.GpuCounterDescriptor, handleMapping *map[uint64][]service.VulkanHandleMappingItem, syncData *sync.Data) (*service.ProfilingData, error) {
slices, err := processGpuSlices(ctx, processor, capture, handleMapping, syncData)
if err != nil {
log.Err(ctx, err, "Failed to get GPU slices")
}
counters, err := processCounters(ctx, processor, desc)
if err != nil {
log.Err(ctx, err, "Failed to get GPU counters")
}
gpuCounters, err := profile.ComputeCounters(ctx, slices, counters)
if err != nil {
log.Err(ctx, err, "Failed to calculate performance data based on GPU slices and counters")
}
return &service.ProfilingData{
Slices: slices,
Counters: counters,
GpuCounters: gpuCounters,
}, nil
}
func | (ctx context.Context, replayHandles *[]int64, replayHandleType string, handleMapping *map[uint64][]service.VulkanHandleMappingItem) {
for i, v := range *replayHandles {
handles, ok := (*handleMapping)[uint64(v)]
if !ok {
log.E(ctx, "%v not found in replay: %v", replayHandleType, v)
continue
}
found := false
for _, handle := range handles {
if handle.HandleType == replayHandleType {
(*replayHandles)[i] = int64(handle.TraceValue)
found = true
break
}
}
if !found {
log.E(ctx, "Incorrect Handle type for %v: %v", replayHandleType, v)
}
}
}
func fixContextIds(contextIDs []int64) {
// This is a workaround a QC bug(b/192546534)
// that causes first deviceID to be zero after a
// renderpass change in the same queue submit.
// So, we fill the zero devices with the existing
// device id, where there is only one device id.
zeroIndices := make([]int, 0)
contextID := int64(0)
for i, v := range contextIDs {
if v == 0 {
zeroIndices = append(zeroIndices, i)
continue
}
if contextID == 0 {
contextID = v
continue
}
if contextID != v {
// There are multiple devices
// We cannot know which one to fill
return
}
}
for _, v := range zeroIndices {
// If there is only one device in entire trace
// We can assume that we possibly have only one device
contextIDs[v] = contextID
}
}
func processGpuSlices(ctx context.Context, processor *perfetto.Processor, capture *path.Capture, handleMapping *map[uint64][]service.VulkanHandleMappingItem, syncData *sync.Data) (*service.ProfilingData_GpuSlices, error) {
slicesQueryResult, err := processor.Query(slicesQuery)
if err != nil {
return nil, log.Errf(ctx, err, "SQL query failed: %v", slicesQuery)
}
queueSubmitQueryResult, err := processor.Query(queueSubmitQuery)
if err != nil {
return nil, log.Errf(ctx, err, "SQL query failed: %v", queueSubmitQuery)
}
queueSubmitColumns := queueSubmitQueryResult.GetColumns()
queueSubmitIds := queueSubmitColumns[0].GetLongValues()
submissionOrdering := make(map[int64]uint64)
for i, v := range queueSubmitIds {
submissionOrdering[v] = uint64(i)
}
trackIdCache := make(map[int64]bool)
argsQueryCache := make(map[int64]*perfetto_service.QueryResult)
slicesColumns := slicesQueryResult.GetColumns()
numSliceRows := slicesQueryResult.GetNumRecords()
slices := make([]*service.ProfilingData_GpuSlices_Slice, numSliceRows)
groupParentLookup := map[api.CmdSubmissionKey]*service.ProfilingData_GpuSlices_Group{}
groups := []*service.ProfilingData_GpuSlices_Group{}
groupIds := make([]int32, numSliceRows)
var tracks []*service.ProfilingData_GpuSlices_Track
// Grab all the column values. Depends on the order of columns selected in slicesQuery
contextIds := slicesColumns[0].GetLongValues()
fixContextIds(contextIds)
extractTraceHandles(ctx, &contextIds, "VkDevice", handleMapping)
renderTargets := slicesColumns[1].GetLongValues()
extractTraceHandles(ctx, &renderTargets, "VkFramebuffer", handleMapping)
commandBuffers := slicesColumns[5].GetLongValues()
extractTraceHandles(ctx, &commandBuffers, "VkCommandBuffer", handleMapping)
renderPasses := slicesColumns[6].GetLongValues()
extractTraceHandles(ctx, &renderPasses, "VkRenderPass", handleMapping)
frameIds := slicesColumns[2].GetLongValues()
submissionIds := slicesColumns[3].GetLongValues()
hwQueueIds := slicesColumns[4].GetLongValues()
timestamps := slicesColumns[7].GetLongValues()
durations := slicesColumns[8].GetLongValues()
ids := slicesColumns[9].GetLongValues()
names := slicesColumns[10].GetStringValues()
depths := slicesColumns[11].GetLongValues()
argSetIds := slicesColumns[12].GetLongValues()
trackIds := slicesColumns[13].GetLongValues()
trackNames := slicesColumns[14].GetStringValues()
subCommandGroupMap := make(map[api.CmdSubmissionKey]int)
for i, v := range submissionIds {
subOrder, ok := submissionOrdering[v]
if ok {
cb := uint64(commandBuffers[i])
key := api.CmdSubmissionKey{subOrder, cb, uint64(renderPasses[i]), uint64(renderTargets[i])}
// Create a new group for each main renderPass slice.
if indices, ok := syncData.SubmissionIndices[key]; ok && names[i] == renderPassSliceName {
var idx []uint64
if c, ok := subCommandGroupMap[key]; ok { // Sometimes multiple renderPass slices shares the same renderPass and renderTarget.
idx = indices[c]
} else {
idx = indices[0]
subCommandGroupMap[key] = 0
}
names[i] = fmt.Sprintf("%v", idx)
parent := utils.FindParentGroup(ctx, subOrder, cb, groupParentLookup, &groups, syncData.SubmissionIndices, capture)
group := &service.ProfilingData_GpuSlices_Group{
Id: int32(len(groups)),
Name: fmt.Sprintf("RenderPass %v, RenderTarget %v", uint64(renderPasses[i]), uint64(renderTargets[i])),
Parent: parent,
Link: &path.Command{Capture: capture, Indices: idx},
}
groups = append(groups, group)
subCommandGroupMap[key]++
}
} else {
log.W(ctx, "Encountered submission ID mismatch %v", v)
}
// Find the group that the current slice belongs to and mark down group id.
if len(groups) > 0 {
groupIds[i] = groups[len(groups)-1].Id // Slices were time sorted and main renderPass slice comes first.
} else {
log.W(ctx, "Group missing for slice %v at submission %v, commandBuffer %v, renderPass %v, renderTarget %v", names[i], submissionIds[i], commandBuffers[i], renderPasses[i], renderTargets[i])
groupIds[i] = -1
}
}
for i := uint64(0); i < numSliceRows; i++ {
var argsQueryResult *perfetto_service.QueryResult
var ok bool
if argsQueryResult, ok = argsQueryCache[argSetIds[i]]; !ok {
argsQuery := fmt.Sprintf(argsQueryFmt, argSetIds[i])
argsQueryResult, err = processor.Query(argsQuery)
if err != nil {
log.W(ctx, "SQL query failed: %v", argsQuery)
}
argsQueryCache[argSetIds[i]] = argsQueryResult
}
argsColumns := argsQueryResult.GetColumns()
numArgsRows := argsQueryResult.GetNumRecords()
var extras []*service.ProfilingData_GpuSlices_Slice_Extra
for j := uint64(0); j < numArgsRows; j++ {
keys := argsColumns[0].GetStringValues()
values := argsColumns[1].GetStringValues()
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: keys[j],
Value: &service.ProfilingData_GpuSlices_Slice_Extra_StringValue{StringValue: values[j]},
})
}
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: | extractTraceHandles | identifier_name |
profiling_data.go | )
func ProcessProfilingData(ctx context.Context, processor *perfetto.Processor, capture *path.Capture, desc *device.GpuCounterDescriptor, handleMapping *map[uint64][]service.VulkanHandleMappingItem, syncData *sync.Data) (*service.ProfilingData, error) {
slices, err := processGpuSlices(ctx, processor, capture, handleMapping, syncData)
if err != nil {
log.Err(ctx, err, "Failed to get GPU slices")
}
counters, err := processCounters(ctx, processor, desc)
if err != nil {
log.Err(ctx, err, "Failed to get GPU counters")
}
gpuCounters, err := profile.ComputeCounters(ctx, slices, counters)
if err != nil {
log.Err(ctx, err, "Failed to calculate performance data based on GPU slices and counters")
}
return &service.ProfilingData{
Slices: slices,
Counters: counters,
GpuCounters: gpuCounters,
}, nil
}
func extractTraceHandles(ctx context.Context, replayHandles *[]int64, replayHandleType string, handleMapping *map[uint64][]service.VulkanHandleMappingItem) {
for i, v := range *replayHandles {
handles, ok := (*handleMapping)[uint64(v)]
if !ok {
log.E(ctx, "%v not found in replay: %v", replayHandleType, v)
continue
}
found := false
for _, handle := range handles |
if !found {
log.E(ctx, "Incorrect Handle type for %v: %v", replayHandleType, v)
}
}
}
func fixContextIds(contextIDs []int64) {
// This is a workaround a QC bug(b/192546534)
// that causes first deviceID to be zero after a
// renderpass change in the same queue submit.
// So, we fill the zero devices with the existing
// device id, where there is only one device id.
zeroIndices := make([]int, 0)
contextID := int64(0)
for i, v := range contextIDs {
if v == 0 {
zeroIndices = append(zeroIndices, i)
continue
}
if contextID == 0 {
contextID = v
continue
}
if contextID != v {
// There are multiple devices
// We cannot know which one to fill
return
}
}
for _, v := range zeroIndices {
// If there is only one device in entire trace
// We can assume that we possibly have only one device
contextIDs[v] = contextID
}
}
func processGpuSlices(ctx context.Context, processor *perfetto.Processor, capture *path.Capture, handleMapping *map[uint64][]service.VulkanHandleMappingItem, syncData *sync.Data) (*service.ProfilingData_GpuSlices, error) {
slicesQueryResult, err := processor.Query(slicesQuery)
if err != nil {
return nil, log.Errf(ctx, err, "SQL query failed: %v", slicesQuery)
}
queueSubmitQueryResult, err := processor.Query(queueSubmitQuery)
if err != nil {
return nil, log.Errf(ctx, err, "SQL query failed: %v", queueSubmitQuery)
}
queueSubmitColumns := queueSubmitQueryResult.GetColumns()
queueSubmitIds := queueSubmitColumns[0].GetLongValues()
submissionOrdering := make(map[int64]uint64)
for i, v := range queueSubmitIds {
submissionOrdering[v] = uint64(i)
}
trackIdCache := make(map[int64]bool)
argsQueryCache := make(map[int64]*perfetto_service.QueryResult)
slicesColumns := slicesQueryResult.GetColumns()
numSliceRows := slicesQueryResult.GetNumRecords()
slices := make([]*service.ProfilingData_GpuSlices_Slice, numSliceRows)
groupParentLookup := map[api.CmdSubmissionKey]*service.ProfilingData_GpuSlices_Group{}
groups := []*service.ProfilingData_GpuSlices_Group{}
groupIds := make([]int32, numSliceRows)
var tracks []*service.ProfilingData_GpuSlices_Track
// Grab all the column values. Depends on the order of columns selected in slicesQuery
contextIds := slicesColumns[0].GetLongValues()
fixContextIds(contextIds)
extractTraceHandles(ctx, &contextIds, "VkDevice", handleMapping)
renderTargets := slicesColumns[1].GetLongValues()
extractTraceHandles(ctx, &renderTargets, "VkFramebuffer", handleMapping)
commandBuffers := slicesColumns[5].GetLongValues()
extractTraceHandles(ctx, &commandBuffers, "VkCommandBuffer", handleMapping)
renderPasses := slicesColumns[6].GetLongValues()
extractTraceHandles(ctx, &renderPasses, "VkRenderPass", handleMapping)
frameIds := slicesColumns[2].GetLongValues()
submissionIds := slicesColumns[3].GetLongValues()
hwQueueIds := slicesColumns[4].GetLongValues()
timestamps := slicesColumns[7].GetLongValues()
durations := slicesColumns[8].GetLongValues()
ids := slicesColumns[9].GetLongValues()
names := slicesColumns[10].GetStringValues()
depths := slicesColumns[11].GetLongValues()
argSetIds := slicesColumns[12].GetLongValues()
trackIds := slicesColumns[13].GetLongValues()
trackNames := slicesColumns[14].GetStringValues()
subCommandGroupMap := make(map[api.CmdSubmissionKey]int)
for i, v := range submissionIds {
subOrder, ok := submissionOrdering[v]
if ok {
cb := uint64(commandBuffers[i])
key := api.CmdSubmissionKey{subOrder, cb, uint64(renderPasses[i]), uint64(renderTargets[i])}
// Create a new group for each main renderPass slice.
if indices, ok := syncData.SubmissionIndices[key]; ok && names[i] == renderPassSliceName {
var idx []uint64
if c, ok := subCommandGroupMap[key]; ok { // Sometimes multiple renderPass slices shares the same renderPass and renderTarget.
idx = indices[c]
} else {
idx = indices[0]
subCommandGroupMap[key] = 0
}
names[i] = fmt.Sprintf("%v", idx)
parent := utils.FindParentGroup(ctx, subOrder, cb, groupParentLookup, &groups, syncData.SubmissionIndices, capture)
group := &service.ProfilingData_GpuSlices_Group{
Id: int32(len(groups)),
Name: fmt.Sprintf("RenderPass %v, RenderTarget %v", uint64(renderPasses[i]), uint64(renderTargets[i])),
Parent: parent,
Link: &path.Command{Capture: capture, Indices: idx},
}
groups = append(groups, group)
subCommandGroupMap[key]++
}
} else {
log.W(ctx, "Encountered submission ID mismatch %v", v)
}
// Find the group that the current slice belongs to and mark down group id.
if len(groups) > 0 {
groupIds[i] = groups[len(groups)-1].Id // Slices were time sorted and main renderPass slice comes first.
} else {
log.W(ctx, "Group missing for slice %v at submission %v, commandBuffer %v, renderPass %v, renderTarget %v", names[i], submissionIds[i], commandBuffers[i], renderPasses[i], renderTargets[i])
groupIds[i] = -1
}
}
for i := uint64(0); i < numSliceRows; i++ {
var argsQueryResult *perfetto_service.QueryResult
var ok bool
if argsQueryResult, ok = argsQueryCache[argSetIds[i]]; !ok {
argsQuery := fmt.Sprintf(argsQueryFmt, argSetIds[i])
argsQueryResult, err = processor.Query(argsQuery)
if err != nil {
log.W(ctx, "SQL query failed: %v", argsQuery)
}
argsQueryCache[argSetIds[i]] = argsQueryResult
}
argsColumns := argsQueryResult.GetColumns()
numArgsRows := argsQueryResult.GetNumRecords()
var extras []*service.ProfilingData_GpuSlices_Slice_Extra
for j := uint64(0); j < numArgsRows; j++ {
keys := argsColumns[0].GetStringValues()
values := argsColumns[1].GetStringValues()
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: keys[j],
Value: &service.ProfilingData_GpuSlices_Slice_Extra_StringValue{StringValue: values[j]},
})
}
extras = append(extras, &service.ProfilingData_GpuSlices_Slice_Extra{
Name: | {
if handle.HandleType == replayHandleType {
(*replayHandles)[i] = int64(handle.TraceValue)
found = true
break
}
} | conditional_block |
profiling_data.go | "github.com/google/gapid/gapis/perfetto"
perfetto_service "github.com/google/gapid/gapis/perfetto/service"
"github.com/google/gapid/gapis/service"
"github.com/google/gapid/gapis/service/path"
"github.com/google/gapid/gapis/trace/android/profile"
"github.com/google/gapid/gapis/trace/android/utils"
)
var (
slicesQuery = "" +
"SELECT s.context_id, s.render_target, s.frame_id, s.submission_id, s.hw_queue_id, s.command_buffer, s.render_pass, s.ts, s.dur, s.id, s.name, depth, arg_set_id, track_id, t.name " +
"FROM gpu_track t LEFT JOIN gpu_slice s " +
"ON s.track_id = t.id WHERE t.scope = 'gpu_render_stage' ORDER BY s.ts"
argsQueryFmt = "" +
"SELECT key, string_value FROM args WHERE args.arg_set_id = %d"
queueSubmitQuery = "" +
"SELECT submission_id FROM gpu_slice s JOIN track t ON s.track_id = t.id WHERE s.name = 'vkQueueSubmit' AND t.name = 'Vulkan Events' ORDER BY submission_id"
counterTracksQuery = "" +
"SELECT id, name, unit, description FROM gpu_counter_track ORDER BY id"
countersQueryFmt = "" +
"SELECT ts, value FROM counter c WHERE c.track_id = %d ORDER BY ts"
renderPassSliceName = "Surface"
)
func ProcessProfilingData(ctx context.Context, processor *perfetto.Processor, capture *path.Capture, desc *device.GpuCounterDescriptor, handleMapping *map[uint64][]service.VulkanHandleMappingItem, syncData *sync.Data) (*service.ProfilingData, error) {
slices, err := processGpuSlices(ctx, processor, capture, handleMapping, syncData)
if err != nil {
log.Err(ctx, err, "Failed to get GPU slices")
}
counters, err := processCounters(ctx, processor, desc)
if err != nil {
log.Err(ctx, err, "Failed to get GPU counters")
}
gpuCounters, err := profile.ComputeCounters(ctx, slices, counters)
if err != nil {
log.Err(ctx, err, "Failed to calculate performance data based on GPU slices and counters")
}
return &service.ProfilingData{
Slices: slices,
Counters: counters,
GpuCounters: gpuCounters,
}, nil
}
func extractTraceHandles(ctx context.Context, replayHandles *[]int64, replayHandleType string, handleMapping *map[uint64][]service.VulkanHandleMappingItem) {
for i, v := range *replayHandles {
handles, ok := (*handleMapping)[uint64(v)]
if !ok {
log.E(ctx, "%v not found in replay: %v", replayHandleType, v)
continue
}
found := false
for _, handle := range handles {
if handle.HandleType == replayHandleType {
(*replayHandles)[i] = int64(handle.TraceValue)
found = true
break
}
}
if !found {
log.E(ctx, "Incorrect Handle type for %v: %v", replayHandleType, v)
}
}
}
func fixContextIds(contextIDs []int64) {
// This is a workaround a QC bug(b/192546534)
// that causes first deviceID to be zero after a
// renderpass change in the same queue submit.
// So, we fill the zero devices with the existing
// device id, where there is only one device id.
zeroIndices := make([]int, 0)
contextID := int64(0)
for i, v := range contextIDs {
if v == 0 {
zeroIndices = append(zeroIndices, i)
continue
}
if contextID == 0 {
contextID = v
continue
}
if contextID != v {
// There are multiple devices
// We cannot know which one to fill
return
}
}
for _, v := range zeroIndices {
// If there is only one device in entire trace
// We can assume that we possibly have only one device
contextIDs[v] = contextID
}
}
func processGpuSlices(ctx context.Context, processor *perfetto.Processor, capture *path.Capture, handleMapping *map[uint64][]service.VulkanHandleMappingItem, syncData *sync.Data) (*service.ProfilingData_GpuSlices, error) {
slicesQueryResult, err := processor.Query(slicesQuery)
if err != nil {
return nil, log.Errf(ctx, err, "SQL query failed: %v", slicesQuery)
}
queueSubmitQueryResult, err := processor.Query(queueSubmitQuery)
if err != nil {
return nil, log.Errf(ctx, err, "SQL query failed: %v", queueSubmitQuery)
}
queueSubmitColumns := queueSubmitQueryResult.GetColumns()
queueSubmitIds := queueSubmitColumns[0].GetLongValues()
submissionOrdering := make(map[int64]uint64)
for i, v := range queueSubmitIds {
submissionOrdering[v] = uint64(i)
}
trackIdCache := make(map[int64]bool)
argsQueryCache := make(map[int64]*perfetto_service.QueryResult)
slicesColumns := slicesQueryResult.GetColumns()
numSliceRows := slicesQueryResult.GetNumRecords()
slices := make([]*service.ProfilingData_GpuSlices_Slice, numSliceRows)
groupParentLookup := map[api.CmdSubmissionKey]*service.ProfilingData_GpuSlices_Group{}
groups := []*service.ProfilingData_GpuSlices_Group{}
groupIds := make([]int32, numSliceRows)
var tracks []*service.ProfilingData_GpuSlices_Track
// Grab all the column values. Depends on the order of columns selected in slicesQuery
contextIds := slicesColumns[0].GetLongValues()
fixContextIds(contextIds)
extractTraceHandles(ctx, &contextIds, "VkDevice", handleMapping)
renderTargets := slicesColumns[1].GetLongValues()
extractTraceHandles(ctx, &renderTargets, "VkFramebuffer", handleMapping)
commandBuffers := slicesColumns[5].GetLongValues()
extractTraceHandles(ctx, &commandBuffers, "VkCommandBuffer", handleMapping)
renderPasses := slicesColumns[6].GetLongValues()
extractTraceHandles(ctx, &renderPasses, "VkRenderPass", handleMapping)
frameIds := slicesColumns[2].GetLongValues()
submissionIds := slicesColumns[3].GetLongValues()
hwQueueIds := slicesColumns[4].GetLongValues()
timestamps := slicesColumns[7].GetLongValues()
durations := slicesColumns[8].GetLongValues()
ids := slicesColumns[9].GetLongValues()
names := slicesColumns[10].GetStringValues()
depths := slicesColumns[11].GetLongValues()
argSetIds := slicesColumns[12].GetLongValues()
trackIds := slicesColumns[13].GetLongValues()
trackNames := slicesColumns[14].GetStringValues()
subCommandGroupMap := make(map[api.CmdSubmissionKey]int)
for i, v := range submissionIds {
subOrder, ok := submissionOrdering[v]
if ok {
cb := uint64(commandBuffers[i])
key := api.CmdSubmissionKey{subOrder, cb, uint64(renderPasses[i]), uint64(renderTargets[i])}
// Create a new group for each main renderPass slice.
if indices, ok := syncData.SubmissionIndices[key]; ok && names[i] == renderPassSliceName {
var idx []uint64
if c, ok := subCommandGroupMap[key]; ok { // Sometimes multiple renderPass slices shares the same renderPass and renderTarget.
idx = indices[c]
} else {
idx = indices[0]
subCommandGroupMap[key] = 0
}
names[i] = fmt.Sprintf("%v", idx)
parent := utils.FindParentGroup(ctx, subOrder, cb, groupParentLookup, &groups, syncData.SubmissionIndices, capture)
group := &service.ProfilingData_GpuSlices_Group{
Id: int32(len(groups)),
Name: fmt.Sprintf("RenderPass %v, RenderTarget %v", uint64(renderPasses[i]), uint64(renderTargets[i])),
Parent: parent,
Link: &path.Command{Capture: capture, Indices: idx},
}
groups = append(groups, group)
subCommandGroupMap[key]++
}
} else {
log.W(ctx, "Encountered submission ID mismatch %v", v)
}
// Find the group that the current slice belongs to and mark down group id.
if len(groups) > 0 {
groupIds[i] = groups[len(groups)-1].Id // Slices were time sorted and main renderPass slice comes first.
} else {
log.W(ctx, "Group missing for slice %v at submission %v, commandBuffer %v, renderPass %v, renderTarget %v", names[i], submissionIds[i], commandBuffers[i | random_line_split |
||
config.js | License.
*/
define([], function () {
return {
// This file contains various configuration settings for esri template
//
// Use this file to perform the following:
//
// 1. Customize application settings here - [ Tag(s) to look for: ApplicationSettings ]
// 2. Specify header widget settings - [ Tag(s) to look for: AppHeaderWidgets ]
// 3. Specify URLs for base maps - [ Tag(s) to look for: BaseMapLayers ]
// 4. Customize address search settings - [ Tag(s) to look for: LocatorSettings]
//------------------------------------------------------------------------------------------------------------------------
// GENERAL SETTINGS
//------------------------------------------------------------------------------------------------------------------------
// group: Set the Group id for the application
// appid: ID of application on ArcGIS.com containing your settings for this template
// applicationName: Set application title
// applicationIcon: Set application icon path
// applicationFavicon: Set application Favicon path
// customLogoUrl: Set custom map logo path
// itemSearchDefaultValue: Set the default value to search
// theme: Set the application theme. If blank, default blue theme will be loaded. Supported theme keys are blueTheme, greenTheme and redTheme.
// showCategoriesTagCloud: Set this variable to enable or disable categories tag cloud
// showGeographiesTagCloud: Set this variable to enable or disable geographies tag cloud
// geographiesTagText: This identifies the tag for geographies tag cloud. If set to blank,
// geographies tag cloud will not be displayed irrespective of the value for showGeographiesTagCloud.
// geographiesPrefixText: Set this variable to trim prefix text (eg. arcgis.) from geographies tag cloud. If set to blank,
// geographies tag cloud will be displayed as is. Case sensitive.
// enableAutoComplete: Set this variable to enable or disable autocomplete on item search
// tagCloudFontMinValue: Set min value of the tag cloud font,
// tagCloudFontMaxValue: set the max value of the tag cloud font,
// tagCloudFontUnits: Set the units for the text in tag cloud. UI will be distorted if font sizes have inappropriate values
// showMaxTopTags: Set this variable to the maximum number of results to be displayed in geographies and categories tag clouds
// displaySharingAttribute: If set to true, display sharing attributes ("ALL", "GRP" or "ORG").
// If set to false, sharing attributes ("ALL", "GRP" or "ORG") should not be displayed in item thumbnail
// useItemPage: If set to true then display Item Info Page
// If set to false and item is of type webmap then load the Item
// If set to false and item is of type other than webmap then download the Item
// portalURL: Set the portal URL
// geometryService: Set the URL for geometry service
// groupDescription: Displayed on the left panel of the index page. Defaults to group description.
// mapTitle: If not specified, the ArcGIS.com map's title is used.
// mapSnippet: If not specified, the ArcGIS.com web map's summary is used
// mapItemDescription: Displayed on item details page. Defaults to map description.
// mapLicenseInfo: Displayed on item details page. Defaults to map licenseInfo.
// defaultLayout: Default layout to use. "grid" or "list".
// sortField: Order to display the group items. Valid fields are: modified, numViews.
// sortOrder: Order to sort the group: "asc" or "desc".
// mapViewer: URL to open the gallery items to. "simple","arcgis".
// searchString: Performs a default search on the group with the set string.
// searchType: Performs a default search on the group for the specified item type. Valid fields are valid item types, eg. web map, feature service, map service, etc.
// showBasemapGallery: Show basemap gallery on map: true or false.
// showMapSearch: Show textbox for address search on map: true or false
// showOverviewMap: Show overview on map: true or false.
// showMoreInfo: Show more info link on item details page: true or false.
// showRatings: Show ratings of items on item details page.
// showViews: Show ratings of items on item details page.
// showLicenseInfo: Show Use Constraints on item details page.
// showAttribution: Show sources on item details page.
// showComments: Show comments on item details page.
// defaultLocatorSymbol: Set the image path for locator symbol. e.g. pushpin.
// markupSymbolWidth: Set the image width in pixels for locator symbol.
// markupSymbolHeight: Set the image height in pixels for locator symbol.
// zoomLevel: Following zoom level will be set for the map upon searching an address
// locatorDefaultAddress: Set the default address to search.
ApplicationSettings: {
group: "801cffe54b004008a8c316469c1e8326",
appid: "",
applicationName: "Map Gallery",
applicationIcon: "/themes/images/logo.png",
applicationFavicon: "/themes/images/favicon.ico",
customLogoUrl: "",
itemSearchDefaultValue: "Web Map",
theme: "",
showCategoriesTagCloud: true,
showGeographiesTagCloud: true,
geographiesTagText: "arcgis.",
geographiesPrefixText: "",
enableAutoComplete: true,
tagCloudFontMinValue: 15,
tagCloudFontMaxValue: 20,
tagCloudFontUnits: "px",
showMaxTopTags: 10,
displaySharingAttribute: false,
useItemPage: false,
portalURL: "http://www.arcgis.com",
geometryService: "http://tasks.arcgisonline.com/ArcGIS/rest/services/Geometry/GeometryServer",
groupDescription: "",
mapTitle: "",
mapSnippet: "",
mapItemDescription: "",
mapLicenseInfo: "",
defaultLayout: "list",
sortField: "numViews",
sortOrder: "desc",
mapViewer: "",
searchString: "",
searchType: "",
showBasemapGallery: true,
showMapSearch: true,
showOverviewMap: false,
showMoreInfo: true,
showRatings: true,
showViews: true,
showLicenseInfo: true,
showAttribution: false,
showComments: true,
defaultLocatorSymbol: "/themes/images/redpushpin.png",
markupSymbolWidth: 35,
markupSymbolHeight: 35,
zoomLevel: 12,
locatorDefaultAddress: "Lake Echo Rd Tracy City TN 37387"
},
//------------------------------------------------------------------------------------------------------------------------
// Header Widget Settings
//------------------------------------------------------------------------------------------------------------------------
// Set widgets settings such as widget title, widgetPath to be displayed in header panel
// Title: Name of the widget, will displayed as title of widget in header panel
// WidgetPath: path of the widget respective to the widgets package.
AppHeaderWidgets: [{
Title: "Settings",
WidgetPath: "widgets/settings/settings"
}, {
Title: "Item Search",
WidgetPath: "widgets/locator/locator"
}, { | }, {
Title: "Layout",
WidgetPath: "widgets/layout/layout"
}, {
Title: "Sign In",
WidgetPath: "widgets/portalSignin/portalSignin"
}],
// ------------------------------------------------------------------------------------------------------------------------
// BASEMAP SETTINGS
// ------------------------------------------------------------------------------------------------------------------------
// Set baseMap layers
// Please note: All base-maps need to use the same spatial reference. By default, the first base-map will be loaded
BaseMapLayers: [{
Key: "topo",
ThumbnailSource: "themes/images/Topographic.jpg",
Name: "Topographic Map",
MapURL: "http://services.arcgisonline.com/ArcGIS/rest/services/World_Topo_Map/MapServer"
}, {
Key: "streets",
ThumbnailSource: "themes/images/streets.png",
Name: "Street Map",
MapURL: "http://services.arcgisonline.com/ArcGIS/rest/services/World_Street_Map/MapServer"
}, {
Key: "imagery",
ThumbnailSource: "themes/images/imagery.png",
Name: "Imagery Map",
MapURL: "http://services.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer"
}],
// ------------------------------------------------------------------------------------------------------------------------
// ADDRESS SEARCH SETTINGS
// ------------------------------------------------------------------------------------------------------------------------
// Set locator settings such as locator display fields, match score
// LocatorParameters: Required parameters to search the address candidates.
// SearchField: The name of geocode service input field that accepts the search address. e.g. 'SingleLine' or 'Address'.
// SearchBoundaryField: The name of geocode service input field that accepts an extent to search an input address within. e.g."searchExtent".
// LocatorURL: Specify URL for geocode service.
// LocatorOutFields: The list of outfields to be included in the result set | Title: "Info",
WidgetPath: "widgets/info/info"
}, {
Title: "Sort By",
WidgetPath: "widgets/sortby/sortby" | random_line_split |
manager.go | )
// create election directory if it does not exist
c.client.Set(ctx, key, c.address, &client.SetOptions{
Dir: false,
TTL: ServiceTTL,
})
// create watcher
watcher := c.client.Watcher(c.dir.Election, &client.WatcherOptions{
AfterIndex: 0,
Recursive: true,
})
go func() {
for {
select {
case <-c.cancel:
if !isCancelled {
cancel()
isCancelled = true
}
return
}
}
}()
// observe election changes
for {
resp, err := watcher.Next(ctx)
if err != nil {
panic(err)
}
if resp.Node.Dir {
continue
}
if c.Leader() == nil {
continue
}
switch resp.Action {
case "set", "update":
case "delete":
if leader := c.Leader(); leader.Key == resp.Node.Key {
c.events <- &models.Event{Type: EventElection, Group: GroupWorker}
go c.LeaderDiscovery()
}
}
}
}
// RegisterNode - register node to etcd
func (c *Client) RegisterNode(dir string) {
c.client.Set(context.Background(), dir, c.address, &client.SetOptions{
Dir: false,
TTL: ServiceTTL,
})
}
// UnsetNode - unregister node and extend ttl
func (c *Client) UnsetNode(dir string) {
c.client.Delete(context.Background(), dir, nil)
}
// RenewNode - renew node and extend ttl
func (c *Client) RenewNode(dir string) {
c.client.Set(context.Background(), dir, c.address, &client.SetOptions{
PrevExist: client.PrevExist,
TTL: ServiceTTL,
})
}
// RunApplication - run application
func (c *Client) RunApplication(entrypoint *models.ApplicationEntryPoint) {
c.RLock()
if c.started {
return
}
c.RUnlock()
c.Lock()
c.started = true
c.Unlock()
receive := make(chan string)
// generate scope
scope := c.GenerateScope()
// launcher start daemon
go launcher.Start(scope, entrypoint)
// health check
if entrypoint.Health != nil && entrypoint.Health.Ports != nil {
go health.Check(receive, entrypoint.Health.Ports...)
}
for {
select {
case event := <-receive:
switch event {
case health.Pass:
c.Lock()
if !c.running {
fmt.Println("service is now running")
}
c.running = true
c.Unlock()
c.RegisterNode(c.dir.RunningNode(c.address))
if c.IsLeader() {
c.RegisterNode(c.dir.MasterNode(c.address))
}
case health.Fail:
c.Lock()
if c.running {
fmt.Println("service is now stopped")
}
c.running = false
c.Unlock()
c.UnsetNode(c.dir.RunningNode(c.address))
}
}
}
}
// IsLeader - is current node a leader
func (c *Client) IsLeader() bool {
// self node key
self := c.dir.ElectionNode(c.address)
if c.leader != nil && c.leader.Key == self {
return true
}
return false
}
// LeaderDiscovery - get leader/master node information
func (c *Client) LeaderDiscovery() {
dir := c.dir.Election
// self node key
self := fmt.Sprintf("%v/%v", dir, c.address)
// get a list of election nodes
resp, err := c.client.Get(context.Background(), dir, &client.GetOptions{Sort: true})
if err != nil {
log.Fatal(err)
}
// leader key and address
var key, addr string
// current lowest node index
var idx uint64
if len(resp.Node.Nodes) > 0 {
for _, v := range resp.Node.Nodes {
if v.Dir {
continue
}
if idx == 0 || v.CreatedIndex < idx {
key = v.Key
addr = v.Value
idx = v.CreatedIndex
}
}
}
if key == "" || addr == "" {
fmt.Println("# no nodes were found")
c.Lock()
c.leader = nil
c.Unlock()
} else {
leader := &models.Leader{Key: key, Address: addr}
if c.leader == nil {
if leader.Key == self {
fmt.Println("# elected as leader")
c.events <- &models.Event{Type: EventElected, Group: GroupLeader}
} else {
fmt.Println("# elected as worker")
// do not send any event until leader node is ready
if nodes := c.GetRunningNodes(); len(nodes) > 0 {
c.events <- &models.Event{Type: EventElected, Group: GroupWorker}
} else {
go c.WaitForLeader()
}
}
} else if c.leader != nil && leader.Key != c.leader.Key {
if leader.Key == self {
fmt.Println("# re-elected as leader")
c.events <- &models.Event{Type: EventReElected, Group: GroupLeader}
}
}
c.Lock()
c.leader = leader
c.Unlock()
}
}
// WaitForLeader - wait for leader node is ready
func (c *Client) WaitForLeader() {
defer func() {
c.Lock()
c.locked = false
c.Unlock()
}()
c.RLock()
var locked = c.locked
c.RUnlock()
if !locked {
fmt.Println("# waiting for leader node")
c.Lock()
c.locked = true
c.Unlock()
interval := time.NewTicker(ServiceTTL)
defer interval.Stop()
for {
select {
case <-interval.C:
if c.IsLeader() {
return
}
fmt.Println("# scanning running nodes...")
if nodes := c.GetRunningNodes(); len(nodes) > 0 {
c.events <- &models.Event{Type: EventElected, Group: GroupWorker}
} else {
fmt.Println("# no nodes are ready yet")
}
}
}
}
}
// GenerateScope - generate scope base
func (c *Client) GenerateScope() *models.Scope {
return models.SetupEnvironment(
c.GetServiceHostname(),
c.GetServiceIP(),
c.GetRunningNodes(),
)
}
// GetRunningNodes to get existed nodes
func (c *Client) GetRunningNodes() []models.Node {
dir := c.dir.Running
res := []models.Node{}
if c.client == nil {
return res
}
resp, err := c.client.Get(context.Background(), dir, nil)
if err != nil {
return res
}
if !resp.Node.Dir {
return res
}
for _, node := range resp.Node.Nodes {
res = append(res, models.Node(node.Value))
}
return res
}
// GetEnvEndPoint - to extract etcd endpoint environment from shell
func (c *Client) GetEnvEndPoint() string {
whitelist := []string{"ETCD_ENDPOINT", "ETCDCTL_ENDPOINT", "ETCD_HOST", "COREOS_PRIVATE_IPV4", "COREOS_PUBLIC_IPV4"}
for _, i := range whitelist {
if v := os.Getenv(i); v != "" {
return v
}
}
return ""
}
// GetEndPoint - to get endpoint from config, env or docker host
func (c *Client) GetEndPoint() []string {
for i := 0; i < 3; i++ {
switch i {
case 0:
if c.endpoints != nil && len(c.endpoints) > 0 {
return c.endpoints
}
case 1:
env := c.GetEnvEndPoint()
if strings.TrimSpace(env) == "" {
continue
}
if arr := strings.Split(env, ","); len(arr) > 0 {
return arr
}
case 2:
addr := c.GetServiceHostIP()
return []string{
fmt.Sprintf("http://%v:2379", addr),
fmt.Sprintf("http://%v:4001", addr),
}
}
}
return []string{"http://127.0.0.1:2379", "http://127.0.0.1:4001"}
}
// GetServiceHostname - extract FQDN hostname from kernel
func (c *Client) GetServiceHostname() string {
hostname, err := os.Hostname()
if err != nil {
return ""
}
return hostname
}
// GetServiceHostIP - return service host ip (container host)
func (c *Client) GetServiceHostIP() string {
output, err := network.IP("route")
if err != nil {
log.Fatal(err)
}
for _, line := range strings.Split(output, "\n") {
if !strings.Contains(line, "default") {
continue
}
parts := strings.Split(line, " ")
for _, part := range parts {
if ip := net.ParseIP(part); ip != nil {
return part
}
}
}
return ""
}
// GetServiceIP - get service ip address
func (c *Client) GetServiceIP() string {
ifaces, err := net.Interfaces()
if err != nil | {
return ""
} | conditional_block |
|
manager.go | return leader
}
// SetupDirectory - setup directory for service
func (c *Client) SetupDirectory() {
v := reflect.ValueOf(c.dir)
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
if v.Kind() != reflect.Struct {
log.Fatal("only accepts structs")
}
for i := 0; i < v.NumField(); i++ {
key := v.Field(i).String()
c.client.Set(context.Background(), key, "", &client.SetOptions{
Dir: true,
PrevExist: client.PrevNoExist,
})
}
}
// SetDir - set discovery directory
func (c *Client) SetDir(prefix, name string) {
c.Lock()
c.dir = &models.Directory{
Base: fmt.Sprintf("%v/%v", prefix, name),
Election: fmt.Sprintf("%v/%v/%v", prefix, name, DirectoryElection),
Running: fmt.Sprintf("%v/%v/%v", prefix, name, DirectoryRunning),
Queue: fmt.Sprintf("%v/%v/%v", prefix, name, DirectoryQueue),
Nodes: fmt.Sprintf("%v/%v/%v", prefix, name, DirectoryNodes),
Masters: fmt.Sprintf("%v/%v/%v", prefix, name, DirectoryMasters),
}
c.Unlock()
}
// Observe - observe directory
func (c *Client) Observe() {
// register service
c.SetupDirectory()
c.RegisterNode(c.dir.Node(c.address))
c.RegisterNode(c.dir.QueueNode(c.address))
c.RegisterNode(c.dir.ElectionNode(c.address))
// create a interval timer to monitor service nodes
interval := time.NewTicker(ServiceTTL / 2)
defer interval.Stop()
for {
select {
case <-interval.C:
go func() {
// read running state
c.RLock()
var running = c.running
c.RUnlock()
// renew nodes
c.RenewNode(c.dir.Node(c.address))
c.RenewNode(c.dir.ElectionNode(c.address))
if running {
c.RenewNode(c.dir.RunningNode(c.address))
if c.IsLeader() {
c.RenewNode(c.dir.MasterNode(c.address))
}
} else {
c.RenewNode(c.dir.QueueNode(c.address))
}
c.LeaderDiscovery()
}()
}
}
}
// Election - to start leader election task
func (c *Client) Election() {
defer func() {
// recover if panic
if r := recover(); r != nil {
c.Election()
}
}()
// determine if context is already cancelled
isCancelled := false
// create context with cancel
ctx, cancel := context.WithCancel(context.Background())
defer func() {
if !isCancelled {
cancel()
isCancelled = true
}
}()
// generate election key
key := c.dir.ElectionNode(c.address)
// create election directory if it does not exist
c.client.Set(ctx, key, c.address, &client.SetOptions{
Dir: false,
TTL: ServiceTTL,
})
// create watcher
watcher := c.client.Watcher(c.dir.Election, &client.WatcherOptions{
AfterIndex: 0,
Recursive: true,
})
go func() {
for {
select {
case <-c.cancel:
if !isCancelled {
cancel()
isCancelled = true
}
return
}
}
}()
// observe election changes
for {
resp, err := watcher.Next(ctx)
if err != nil {
panic(err)
}
if resp.Node.Dir {
continue
}
if c.Leader() == nil {
continue
}
switch resp.Action {
case "set", "update":
case "delete":
if leader := c.Leader(); leader.Key == resp.Node.Key {
c.events <- &models.Event{Type: EventElection, Group: GroupWorker}
go c.LeaderDiscovery()
}
}
}
}
// RegisterNode - register node to etcd
func (c *Client) RegisterNode(dir string) {
c.client.Set(context.Background(), dir, c.address, &client.SetOptions{
Dir: false,
TTL: ServiceTTL,
})
}
// UnsetNode - unregister node and extend ttl
func (c *Client) UnsetNode(dir string) {
c.client.Delete(context.Background(), dir, nil)
}
// RenewNode - renew node and extend ttl
func (c *Client) RenewNode(dir string) {
c.client.Set(context.Background(), dir, c.address, &client.SetOptions{
PrevExist: client.PrevExist,
TTL: ServiceTTL,
})
}
// RunApplication - run application
func (c *Client) RunApplication(entrypoint *models.ApplicationEntryPoint) {
c.RLock()
if c.started {
return
}
c.RUnlock()
c.Lock()
c.started = true
c.Unlock()
receive := make(chan string)
// generate scope
scope := c.GenerateScope()
// launcher start daemon
go launcher.Start(scope, entrypoint)
// health check
if entrypoint.Health != nil && entrypoint.Health.Ports != nil {
go health.Check(receive, entrypoint.Health.Ports...)
}
for {
select {
case event := <-receive:
switch event {
case health.Pass:
c.Lock()
if !c.running {
fmt.Println("service is now running")
}
c.running = true
c.Unlock()
c.RegisterNode(c.dir.RunningNode(c.address))
if c.IsLeader() {
c.RegisterNode(c.dir.MasterNode(c.address))
}
case health.Fail:
c.Lock()
if c.running {
fmt.Println("service is now stopped")
}
c.running = false
c.Unlock()
c.UnsetNode(c.dir.RunningNode(c.address))
}
}
}
}
// IsLeader - is current node a leader
func (c *Client) IsLeader() bool {
// self node key
self := c.dir.ElectionNode(c.address)
if c.leader != nil && c.leader.Key == self {
return true
}
return false
}
// LeaderDiscovery - get leader/master node information
func (c *Client) LeaderDiscovery() {
dir := c.dir.Election
// self node key
self := fmt.Sprintf("%v/%v", dir, c.address)
// get a list of election nodes
resp, err := c.client.Get(context.Background(), dir, &client.GetOptions{Sort: true})
if err != nil {
log.Fatal(err)
}
// leader key and address
var key, addr string
// current lowest node index
var idx uint64
if len(resp.Node.Nodes) > 0 {
for _, v := range resp.Node.Nodes {
if v.Dir {
continue
}
if idx == 0 || v.CreatedIndex < idx {
key = v.Key
addr = v.Value
idx = v.CreatedIndex
}
}
}
if key == "" || addr == "" {
fmt.Println("# no nodes were found")
c.Lock()
c.leader = nil
c.Unlock()
} else {
leader := &models.Leader{Key: key, Address: addr}
if c.leader == nil {
if leader.Key == self {
fmt.Println("# elected as leader")
c.events <- &models.Event{Type: EventElected, Group: GroupLeader}
} else {
fmt.Println("# elected as worker")
// do not send any event until leader node is ready
if nodes := c.GetRunningNodes(); len(nodes) > 0 {
c.events <- &models.Event{Type: EventElected, Group: GroupWorker}
} else {
go c.WaitForLeader()
}
}
} else if c.leader != nil && leader.Key != c.leader.Key {
if leader.Key == self {
fmt.Println("# re-elected as leader")
c.events <- &models.Event{Type: EventReElected, Group: GroupLeader}
}
}
c.Lock()
c.leader = leader
c.Unlock()
}
}
// WaitForLeader - wait for leader node is ready
func (c *Client) WaitForLeader() {
defer func() {
c.Lock()
c.locked = false
c.Unlock()
}()
c.RLock()
var locked = c.locked
c.RUnlock()
if !locked {
fmt.Println("# waiting for leader node")
c.Lock()
c.locked = true
c.Unlock()
interval := time.NewTicker(ServiceTTL)
defer interval.Stop()
for {
select {
case <-interval.C:
if c.IsLeader() {
return
}
fmt.Println("# scanning running nodes...")
if nodes := c.GetRunningNodes(); len(nodes) > 0 {
c.events <- &models.Event{Type: EventElected, Group: GroupWorker}
} else {
fmt.Println("# no nodes are ready yet")
}
}
}
}
}
// GenerateScope - generate scope base
func (c *Client) GenerateScope() *models.Scope | {
return models.SetupEnvironment(
c.GetServiceHostname(),
c.GetServiceIP(),
c.GetRunningNodes(),
)
} | identifier_body |
|
manager.go | case "delete":
if leader := c.Leader(); leader.Key == resp.Node.Key {
c.events <- &models.Event{Type: EventElection, Group: GroupWorker}
go c.LeaderDiscovery()
}
}
}
}
// RegisterNode - register node to etcd
func (c *Client) RegisterNode(dir string) {
c.client.Set(context.Background(), dir, c.address, &client.SetOptions{
Dir: false,
TTL: ServiceTTL,
})
}
// UnsetNode - unregister node and extend ttl
func (c *Client) UnsetNode(dir string) {
c.client.Delete(context.Background(), dir, nil)
}
// RenewNode - renew node and extend ttl
func (c *Client) RenewNode(dir string) {
c.client.Set(context.Background(), dir, c.address, &client.SetOptions{
PrevExist: client.PrevExist,
TTL: ServiceTTL,
})
}
// RunApplication - run application
func (c *Client) RunApplication(entrypoint *models.ApplicationEntryPoint) {
c.RLock()
if c.started {
return
}
c.RUnlock()
c.Lock()
c.started = true
c.Unlock()
receive := make(chan string)
// generate scope
scope := c.GenerateScope()
// launcher start daemon
go launcher.Start(scope, entrypoint)
// health check
if entrypoint.Health != nil && entrypoint.Health.Ports != nil {
go health.Check(receive, entrypoint.Health.Ports...)
}
for {
select {
case event := <-receive:
switch event {
case health.Pass:
c.Lock()
if !c.running {
fmt.Println("service is now running")
}
c.running = true
c.Unlock()
c.RegisterNode(c.dir.RunningNode(c.address))
if c.IsLeader() {
c.RegisterNode(c.dir.MasterNode(c.address))
}
case health.Fail:
c.Lock()
if c.running {
fmt.Println("service is now stopped")
}
c.running = false
c.Unlock()
c.UnsetNode(c.dir.RunningNode(c.address))
}
}
}
}
// IsLeader - is current node a leader
func (c *Client) IsLeader() bool {
// self node key
self := c.dir.ElectionNode(c.address)
if c.leader != nil && c.leader.Key == self {
return true
}
return false
}
// LeaderDiscovery - get leader/master node information
func (c *Client) LeaderDiscovery() {
dir := c.dir.Election
// self node key
self := fmt.Sprintf("%v/%v", dir, c.address)
// get a list of election nodes
resp, err := c.client.Get(context.Background(), dir, &client.GetOptions{Sort: true})
if err != nil {
log.Fatal(err)
}
// leader key and address
var key, addr string
// current lowest node index
var idx uint64
if len(resp.Node.Nodes) > 0 {
for _, v := range resp.Node.Nodes {
if v.Dir {
continue
}
if idx == 0 || v.CreatedIndex < idx {
key = v.Key
addr = v.Value
idx = v.CreatedIndex
}
}
}
if key == "" || addr == "" {
fmt.Println("# no nodes were found")
c.Lock()
c.leader = nil
c.Unlock()
} else {
leader := &models.Leader{Key: key, Address: addr}
if c.leader == nil {
if leader.Key == self {
fmt.Println("# elected as leader")
c.events <- &models.Event{Type: EventElected, Group: GroupLeader}
} else {
fmt.Println("# elected as worker")
// do not send any event until leader node is ready
if nodes := c.GetRunningNodes(); len(nodes) > 0 {
c.events <- &models.Event{Type: EventElected, Group: GroupWorker}
} else {
go c.WaitForLeader()
}
}
} else if c.leader != nil && leader.Key != c.leader.Key {
if leader.Key == self {
fmt.Println("# re-elected as leader")
c.events <- &models.Event{Type: EventReElected, Group: GroupLeader}
}
}
c.Lock()
c.leader = leader
c.Unlock()
}
}
// WaitForLeader - wait for leader node is ready
func (c *Client) WaitForLeader() {
defer func() {
c.Lock()
c.locked = false
c.Unlock()
}()
c.RLock()
var locked = c.locked
c.RUnlock()
if !locked {
fmt.Println("# waiting for leader node")
c.Lock()
c.locked = true
c.Unlock()
interval := time.NewTicker(ServiceTTL)
defer interval.Stop()
for {
select {
case <-interval.C:
if c.IsLeader() {
return
}
fmt.Println("# scanning running nodes...")
if nodes := c.GetRunningNodes(); len(nodes) > 0 {
c.events <- &models.Event{Type: EventElected, Group: GroupWorker}
} else {
fmt.Println("# no nodes are ready yet")
}
}
}
}
}
// GenerateScope - generate scope base
func (c *Client) GenerateScope() *models.Scope {
return models.SetupEnvironment(
c.GetServiceHostname(),
c.GetServiceIP(),
c.GetRunningNodes(),
)
}
// GetRunningNodes to get existed nodes
func (c *Client) GetRunningNodes() []models.Node {
dir := c.dir.Running
res := []models.Node{}
if c.client == nil {
return res
}
resp, err := c.client.Get(context.Background(), dir, nil)
if err != nil {
return res
}
if !resp.Node.Dir {
return res
}
for _, node := range resp.Node.Nodes {
res = append(res, models.Node(node.Value))
}
return res
}
// GetEnvEndPoint - to extract etcd endpoint environment from shell
func (c *Client) GetEnvEndPoint() string {
whitelist := []string{"ETCD_ENDPOINT", "ETCDCTL_ENDPOINT", "ETCD_HOST", "COREOS_PRIVATE_IPV4", "COREOS_PUBLIC_IPV4"}
for _, i := range whitelist {
if v := os.Getenv(i); v != "" {
return v
}
}
return ""
}
// GetEndPoint - to get endpoint from config, env or docker host
func (c *Client) GetEndPoint() []string {
for i := 0; i < 3; i++ {
switch i {
case 0:
if c.endpoints != nil && len(c.endpoints) > 0 {
return c.endpoints
}
case 1:
env := c.GetEnvEndPoint()
if strings.TrimSpace(env) == "" {
continue
}
if arr := strings.Split(env, ","); len(arr) > 0 {
return arr
}
case 2:
addr := c.GetServiceHostIP()
return []string{
fmt.Sprintf("http://%v:2379", addr),
fmt.Sprintf("http://%v:4001", addr),
}
}
}
return []string{"http://127.0.0.1:2379", "http://127.0.0.1:4001"}
}
// GetServiceHostname - extract FQDN hostname from kernel
func (c *Client) GetServiceHostname() string {
hostname, err := os.Hostname()
if err != nil {
return ""
}
return hostname
}
// GetServiceHostIP - return service host ip (container host)
func (c *Client) GetServiceHostIP() string {
output, err := network.IP("route")
if err != nil {
log.Fatal(err)
}
for _, line := range strings.Split(output, "\n") {
if !strings.Contains(line, "default") {
continue
}
parts := strings.Split(line, " ")
for _, part := range parts {
if ip := net.ParseIP(part); ip != nil {
return part
}
}
}
return ""
}
// GetServiceIP - get service ip address
func (c *Client) GetServiceIP() string {
ifaces, err := net.Interfaces()
if err != nil {
return ""
}
for _, iface := range ifaces {
if iface.Flags&net.FlagUp == 0 {
continue // interface down
}
if iface.Flags&net.FlagLoopback != 0 {
continue // loopback interface
}
addrs, err := iface.Addrs()
if err != nil {
return ""
}
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ip == nil || ip.IsLoopback() {
continue
}
ip = ip.To4()
if ip == nil {
continue // not an ipv4 address
}
return ip.String()
}
}
return ""
}
// Connect to connect etcd client
func (c *Client) | Connect | identifier_name |
|
manager.go | AfterIndex: 0,
Recursive: true,
})
go func() {
for {
select {
case <-c.cancel:
if !isCancelled {
cancel()
isCancelled = true
}
return
}
}
}()
// observe election changes
for {
resp, err := watcher.Next(ctx)
if err != nil {
panic(err)
}
if resp.Node.Dir {
continue
}
if c.Leader() == nil {
continue
}
switch resp.Action {
case "set", "update":
case "delete":
if leader := c.Leader(); leader.Key == resp.Node.Key {
c.events <- &models.Event{Type: EventElection, Group: GroupWorker}
go c.LeaderDiscovery()
}
}
}
}
// RegisterNode - register node to etcd
func (c *Client) RegisterNode(dir string) {
c.client.Set(context.Background(), dir, c.address, &client.SetOptions{
Dir: false,
TTL: ServiceTTL,
})
}
// UnsetNode - unregister node and extend ttl
func (c *Client) UnsetNode(dir string) {
c.client.Delete(context.Background(), dir, nil)
}
// RenewNode - renew node and extend ttl
func (c *Client) RenewNode(dir string) {
c.client.Set(context.Background(), dir, c.address, &client.SetOptions{
PrevExist: client.PrevExist,
TTL: ServiceTTL,
})
}
// RunApplication - run application
func (c *Client) RunApplication(entrypoint *models.ApplicationEntryPoint) {
c.RLock()
if c.started {
return
}
c.RUnlock()
c.Lock()
c.started = true
c.Unlock()
receive := make(chan string)
// generate scope
scope := c.GenerateScope()
// launcher start daemon
go launcher.Start(scope, entrypoint)
// health check
if entrypoint.Health != nil && entrypoint.Health.Ports != nil {
go health.Check(receive, entrypoint.Health.Ports...)
}
for {
select {
case event := <-receive:
switch event {
case health.Pass:
c.Lock()
if !c.running {
fmt.Println("service is now running")
}
c.running = true
c.Unlock()
c.RegisterNode(c.dir.RunningNode(c.address))
if c.IsLeader() {
c.RegisterNode(c.dir.MasterNode(c.address))
}
case health.Fail:
c.Lock()
if c.running {
fmt.Println("service is now stopped")
}
c.running = false
c.Unlock()
c.UnsetNode(c.dir.RunningNode(c.address))
}
}
}
}
// IsLeader - is current node a leader
func (c *Client) IsLeader() bool {
// self node key
self := c.dir.ElectionNode(c.address)
if c.leader != nil && c.leader.Key == self {
return true
}
return false
}
// LeaderDiscovery - get leader/master node information
func (c *Client) LeaderDiscovery() {
dir := c.dir.Election
// self node key
self := fmt.Sprintf("%v/%v", dir, c.address)
// get a list of election nodes
resp, err := c.client.Get(context.Background(), dir, &client.GetOptions{Sort: true})
if err != nil {
log.Fatal(err)
}
// leader key and address
var key, addr string
// current lowest node index
var idx uint64
if len(resp.Node.Nodes) > 0 {
for _, v := range resp.Node.Nodes {
if v.Dir {
continue
}
if idx == 0 || v.CreatedIndex < idx {
key = v.Key
addr = v.Value
idx = v.CreatedIndex
}
}
}
if key == "" || addr == "" {
fmt.Println("# no nodes were found")
c.Lock()
c.leader = nil
c.Unlock()
} else {
leader := &models.Leader{Key: key, Address: addr}
if c.leader == nil {
if leader.Key == self {
fmt.Println("# elected as leader")
c.events <- &models.Event{Type: EventElected, Group: GroupLeader}
} else {
fmt.Println("# elected as worker")
// do not send any event until leader node is ready
if nodes := c.GetRunningNodes(); len(nodes) > 0 {
c.events <- &models.Event{Type: EventElected, Group: GroupWorker}
} else {
go c.WaitForLeader()
}
}
} else if c.leader != nil && leader.Key != c.leader.Key {
if leader.Key == self {
fmt.Println("# re-elected as leader")
c.events <- &models.Event{Type: EventReElected, Group: GroupLeader}
}
}
c.Lock()
c.leader = leader
c.Unlock()
}
}
// WaitForLeader - wait for leader node is ready
func (c *Client) WaitForLeader() {
defer func() {
c.Lock()
c.locked = false
c.Unlock()
}()
c.RLock()
var locked = c.locked
c.RUnlock()
if !locked {
fmt.Println("# waiting for leader node")
c.Lock()
c.locked = true
c.Unlock()
interval := time.NewTicker(ServiceTTL)
defer interval.Stop()
for {
select {
case <-interval.C:
if c.IsLeader() {
return
}
fmt.Println("# scanning running nodes...")
if nodes := c.GetRunningNodes(); len(nodes) > 0 {
c.events <- &models.Event{Type: EventElected, Group: GroupWorker}
} else {
fmt.Println("# no nodes are ready yet")
}
}
}
}
}
// GenerateScope - generate scope base
func (c *Client) GenerateScope() *models.Scope {
return models.SetupEnvironment(
c.GetServiceHostname(),
c.GetServiceIP(),
c.GetRunningNodes(),
)
}
// GetRunningNodes to get existed nodes
func (c *Client) GetRunningNodes() []models.Node {
dir := c.dir.Running
res := []models.Node{}
if c.client == nil {
return res
}
resp, err := c.client.Get(context.Background(), dir, nil)
if err != nil {
return res
}
if !resp.Node.Dir {
return res
}
for _, node := range resp.Node.Nodes {
res = append(res, models.Node(node.Value))
}
return res
}
// GetEnvEndPoint - to extract etcd endpoint environment from shell
func (c *Client) GetEnvEndPoint() string {
whitelist := []string{"ETCD_ENDPOINT", "ETCDCTL_ENDPOINT", "ETCD_HOST", "COREOS_PRIVATE_IPV4", "COREOS_PUBLIC_IPV4"}
for _, i := range whitelist {
if v := os.Getenv(i); v != "" {
return v
}
}
return ""
}
// GetEndPoint - to get endpoint from config, env or docker host
func (c *Client) GetEndPoint() []string {
for i := 0; i < 3; i++ {
switch i {
case 0:
if c.endpoints != nil && len(c.endpoints) > 0 {
return c.endpoints
}
case 1:
env := c.GetEnvEndPoint()
if strings.TrimSpace(env) == "" {
continue
}
if arr := strings.Split(env, ","); len(arr) > 0 {
return arr
}
case 2:
addr := c.GetServiceHostIP()
return []string{
fmt.Sprintf("http://%v:2379", addr),
fmt.Sprintf("http://%v:4001", addr),
}
}
}
return []string{"http://127.0.0.1:2379", "http://127.0.0.1:4001"}
}
// GetServiceHostname - extract FQDN hostname from kernel
func (c *Client) GetServiceHostname() string {
hostname, err := os.Hostname()
if err != nil {
return ""
}
return hostname
}
// GetServiceHostIP - return service host ip (container host)
func (c *Client) GetServiceHostIP() string {
output, err := network.IP("route")
if err != nil {
log.Fatal(err)
}
for _, line := range strings.Split(output, "\n") {
if !strings.Contains(line, "default") {
continue
}
parts := strings.Split(line, " ")
for _, part := range parts {
if ip := net.ParseIP(part); ip != nil {
return part
}
}
}
return ""
}
// GetServiceIP - get service ip address
func (c *Client) GetServiceIP() string {
ifaces, err := net.Interfaces()
if err != nil {
return ""
}
for _, iface := range ifaces {
if iface.Flags&net.FlagUp == 0 {
continue // interface down
}
if iface.Flags&net.FlagLoopback != 0 { | continue // loopback interface
}
addrs, err := iface.Addrs()
if err != nil { | random_line_split |
|
main.rs | : u16,
boost: u16,
initiative: i8,
attack: AttackTypes,
immunity: AttackTypes,
weakness: AttackTypes,
}
impl Group {
fn effective_power(&self) -> u32 {
self.units * (self.damages as u32 + self.boost as u32)
} |
fn calc_hit(&self, enemy: &Group) -> u32 {
match (
self.immunity.to(enemy.attack),
self.weakness.to(enemy.attack),
) {
(false, false) => enemy.effective_power(),
(true, false) => 0,
(false, true) => enemy.effective_power() * 2,
(true, true) => unreachable!(),
}
}
fn hit(&mut self, points: u32) -> u32 {
let org_units = self.units;
let units_kill = points / self.hits;
self.units = self.units.saturating_sub(units_kill);
let units_lost = org_units - self.units;
dbg_print!("Units lost: {}\n", units_lost);
units_lost
}
}
#[derive(Default, Clone)]
struct Army<'a> {
groups: Vec<Group>,
name: &'a str,
}
impl Army<'_> {
fn sort_for_attack(&self) -> Vec<u16> {
let mut ids: Vec<u16> = (0..self.groups.len() as u16).collect();
ids.sort_by_key(|i|
// descending sort
(
!self.groups[*i as usize].is_alive(),
-(self.groups[*i as usize].effective_power() as i32),
-self.groups[*i as usize].initiative,
));
ids
}
fn choose_enemy(&self, order: &Vec<u16>, enemy: &Army) -> Vec<Option<u16>> {
let mut chosen = vec![false; enemy.groups.len()];
order
.iter()
.map(|idx| {
let i = *idx as usize;
if !self.groups[i].is_alive() {
return None;
}
let mut enemy_ids: Vec<_> = (0..enemy.groups.len()).collect();
enemy_ids.sort_by_cached_key(|&j| {
(
!enemy.groups[j].is_alive(),
chosen[j],
-(enemy.groups[j].calc_hit(&self.groups[i]) as i32),
-(enemy.groups[j].effective_power() as i32),
-enemy.groups[j].initiative,
)
});
// If chosen[j] wasnโt a field in sorting, weโve to use |filter|, not
// |take_while| as top results mightโve been already chosen.
match enemy_ids
.iter()
.take_while(|&&j| {
// Although not explicitly stated in puzzle, if this unit canโt deal
// any damage to any enemy unit, then donโt mark chosen.
enemy.groups[j].is_alive()
&& !chosen[j]
&& enemy.groups[j].calc_hit(&self.groups[i]) > 0
})
.next()
{
Some(&c) => {
chosen[c] = true;
Some(c as u16)
}
None => None,
}
})
.collect()
}
fn is_alive(&self) -> bool {
self.groups.iter().any(|g| g.is_alive())
}
fn boost(&mut self, points: u16) {
for g in &mut self.groups {
g.boost = points;
}
}
}
// PrimInt is yet to get the BITS member; make a new trait.
// https://stackoverflow.com/q/73711297/183120
trait Bits {
const BITS: usize;
}
macro_rules! impl_bits {
( $($ty:ident)* ) => {
$(
impl Bits for $ty {
const BITS: usize = Self::BITS as usize;
}
)*
};
}
impl_bits!(u8 u16 u32 u64 u128);
fn to_flag<'a, T: Bits + PrimInt>(
attack: &'a str,
attack_to_flag: &mut HashMap<&'a str, T>,
) -> Result<T, Box<dyn Error>> {
let n = attack_to_flag.len();
let mask = T::one() << n;
match n < T::BITS {
true => Ok(*attack_to_flag.entry(attack).or_insert(mask)),
false => Err(Box::<dyn Error>::from(
"More than {T::BITS} distinct attacks; insufficient bit-width.",
)),
}
}
struct Attack {
army: usize,
group: usize,
enemy: usize,
}
impl Attack {
fn enemy_army(&self) -> usize {
// make a bool and convert to integral as !1u8 = 254
(self.army == 0) as usize
}
}
// Army ID and remaining units
struct Victor(Option<u8>, u32);
fn fight(mut armies: [Army; 2]) -> Victor {
while armies.iter().all(|a| a.is_alive()) {
let ids = [armies[0].sort_for_attack(), armies[1].sort_for_attack()];
let choices = [
armies[0].choose_enemy(&ids[0], &armies[1]),
armies[1].choose_enemy(&ids[1], &armies[0]),
];
// Excessive debugging; turn on if needed.
// for (i, _) in armies.iter().enumerate() {
// dbg_print!("Army {}\n", i);
// for (idx, &j) in ids[i].iter().enumerate() {
// dbg_print!(
// " Group {}: {} --> {:?}\n",
// j,
// armies[i].groups[j as usize].units,
// choices[i][idx]
// );
// }
// }
// collect all alive groups with respective army ID
let mut fight: Vec<Attack> = ids[0]
.iter()
.zip(choices[0].iter())
.filter_map(|(&i, &choice)| {
match (armies[0].groups[i as usize].is_alive(), choice) {
(true, Some(enemy)) => Some(Attack {
army: 0,
group: i as usize,
enemy: enemy.into(),
}),
_ => None,
}
})
.chain(ids[1].iter().zip(choices[1].iter()).filter_map(
|(&j, &choice)| match (armies[1].groups[j as usize].is_alive(), choice)
{
(true, Some(enemy)) => Some(Attack {
army: 1,
group: j as usize,
enemy: enemy.into(),
}),
_ => None,
},
))
.collect::<Vec<Attack>>();
// Attacks in this fight are only b/w alive groups from here on.
fight.sort_by_key(|a| -armies[a.army].groups[a.group].initiative);
let mut total_units_lost = 0;
for attack in &fight {
dbg_print!(
"{}'s Group {} --> {}'s Group {}; ",
armies[attack.army].name,
attack.group,
armies[attack.enemy_army()].name,
attack.enemy
);
let attacker = &armies[attack.army].groups[attack.group];
let defender = &armies[attack.enemy_army()].groups[attack.enemy];
let damage = defender.calc_hit(attacker);
let defender_mut = &mut armies[attack.enemy_army()].groups[attack.enemy];
total_units_lost += defender_mut.hit(damage);
}
if total_units_lost == 0 {
return Victor(None, 0);
}
dbg_print!("--------------\n");
}
match armies[0].is_alive() {
true => Victor(
Some(0),
armies[0].groups.iter().fold(0, |units, g| units + g.units),
),
false => Victor(
Some(1),
armies[1].groups.iter().fold(0, |units, g| units + g.units),
),
}
}
fn main() -> Result<(), Box<dyn Error>> {
let mut input_str = String::new();
let mut stdin = io::stdin();
stdin.read_to_string(&mut input_str)?;
let input = InputParser::parse(Rule::file, &input_str)
.expect("Invalid input")
.next()
.unwrap();
let mut armies = [Army::default(), Army::default()];
let mut next_army: u8 = 0;
let mut attack_to_flag: HashMap<&str, u8> = HashMap::new();
for line in input.into_inner() {
match line.as_rule() {
Rule::army_name => {
armies[next_army as usize].name = line.as_str();
next_army += 1;
}
Rule::group => {
let mut counts = [0u32; 4];
let mut idx = 0;
let mut attack = AttackTypes(0);
let mut immunities = 0u8;
let mut weaknesses = 0u8;
|
fn is_alive(&self) -> bool {
self.units > 0
} | random_line_split |
main.rs | : u16,
boost: u16,
initiative: i8,
attack: AttackTypes,
immunity: AttackTypes,
weakness: AttackTypes,
}
impl Group {
fn effective_power(&self) -> u32 {
self.units * (self.damages as u32 + self.boost as u32)
}
fn is_alive(&self) -> bool {
self.units > 0
}
fn calc_hit(&self, enemy: &Group) -> u32 {
match (
self.immunity.to(enemy.attack),
self.weakness.to(enemy.attack),
) {
(false, false) => enemy.effective_power(),
(true, false) => 0,
(false, true) => enemy.effective_power() * 2,
(true, true) => unreachable!(),
}
}
fn hit(&mut self, points: u32) -> u32 |
}
#[derive(Default, Clone)]
struct Army<'a> {
groups: Vec<Group>,
name: &'a str,
}
impl Army<'_> {
fn sort_for_attack(&self) -> Vec<u16> {
let mut ids: Vec<u16> = (0..self.groups.len() as u16).collect();
ids.sort_by_key(|i|
// descending sort
(
!self.groups[*i as usize].is_alive(),
-(self.groups[*i as usize].effective_power() as i32),
-self.groups[*i as usize].initiative,
));
ids
}
fn choose_enemy(&self, order: &Vec<u16>, enemy: &Army) -> Vec<Option<u16>> {
let mut chosen = vec![false; enemy.groups.len()];
order
.iter()
.map(|idx| {
let i = *idx as usize;
if !self.groups[i].is_alive() {
return None;
}
let mut enemy_ids: Vec<_> = (0..enemy.groups.len()).collect();
enemy_ids.sort_by_cached_key(|&j| {
(
!enemy.groups[j].is_alive(),
chosen[j],
-(enemy.groups[j].calc_hit(&self.groups[i]) as i32),
-(enemy.groups[j].effective_power() as i32),
-enemy.groups[j].initiative,
)
});
// If chosen[j] wasnโt a field in sorting, weโve to use |filter|, not
// |take_while| as top results mightโve been already chosen.
match enemy_ids
.iter()
.take_while(|&&j| {
// Although not explicitly stated in puzzle, if this unit canโt deal
// any damage to any enemy unit, then donโt mark chosen.
enemy.groups[j].is_alive()
&& !chosen[j]
&& enemy.groups[j].calc_hit(&self.groups[i]) > 0
})
.next()
{
Some(&c) => {
chosen[c] = true;
Some(c as u16)
}
None => None,
}
})
.collect()
}
fn is_alive(&self) -> bool {
self.groups.iter().any(|g| g.is_alive())
}
fn boost(&mut self, points: u16) {
for g in &mut self.groups {
g.boost = points;
}
}
}
// PrimInt is yet to get the BITS member; make a new trait.
// https://stackoverflow.com/q/73711297/183120
trait Bits {
const BITS: usize;
}
macro_rules! impl_bits {
( $($ty:ident)* ) => {
$(
impl Bits for $ty {
const BITS: usize = Self::BITS as usize;
}
)*
};
}
impl_bits!(u8 u16 u32 u64 u128);
fn to_flag<'a, T: Bits + PrimInt>(
attack: &'a str,
attack_to_flag: &mut HashMap<&'a str, T>,
) -> Result<T, Box<dyn Error>> {
let n = attack_to_flag.len();
let mask = T::one() << n;
match n < T::BITS {
true => Ok(*attack_to_flag.entry(attack).or_insert(mask)),
false => Err(Box::<dyn Error>::from(
"More than {T::BITS} distinct attacks; insufficient bit-width.",
)),
}
}
struct Attack {
army: usize,
group: usize,
enemy: usize,
}
impl Attack {
fn enemy_army(&self) -> usize {
// make a bool and convert to integral as !1u8 = 254
(self.army == 0) as usize
}
}
// Army ID and remaining units
struct Victor(Option<u8>, u32);
fn fight(mut armies: [Army; 2]) -> Victor {
while armies.iter().all(|a| a.is_alive()) {
let ids = [armies[0].sort_for_attack(), armies[1].sort_for_attack()];
let choices = [
armies[0].choose_enemy(&ids[0], &armies[1]),
armies[1].choose_enemy(&ids[1], &armies[0]),
];
// Excessive debugging; turn on if needed.
// for (i, _) in armies.iter().enumerate() {
// dbg_print!("Army {}\n", i);
// for (idx, &j) in ids[i].iter().enumerate() {
// dbg_print!(
// " Group {}: {} --> {:?}\n",
// j,
// armies[i].groups[j as usize].units,
// choices[i][idx]
// );
// }
// }
// collect all alive groups with respective army ID
let mut fight: Vec<Attack> = ids[0]
.iter()
.zip(choices[0].iter())
.filter_map(|(&i, &choice)| {
match (armies[0].groups[i as usize].is_alive(), choice) {
(true, Some(enemy)) => Some(Attack {
army: 0,
group: i as usize,
enemy: enemy.into(),
}),
_ => None,
}
})
.chain(ids[1].iter().zip(choices[1].iter()).filter_map(
|(&j, &choice)| match (armies[1].groups[j as usize].is_alive(), choice)
{
(true, Some(enemy)) => Some(Attack {
army: 1,
group: j as usize,
enemy: enemy.into(),
}),
_ => None,
},
))
.collect::<Vec<Attack>>();
// Attacks in this fight are only b/w alive groups from here on.
fight.sort_by_key(|a| -armies[a.army].groups[a.group].initiative);
let mut total_units_lost = 0;
for attack in &fight {
dbg_print!(
"{}'s Group {} --> {}'s Group {}; ",
armies[attack.army].name,
attack.group,
armies[attack.enemy_army()].name,
attack.enemy
);
let attacker = &armies[attack.army].groups[attack.group];
let defender = &armies[attack.enemy_army()].groups[attack.enemy];
let damage = defender.calc_hit(attacker);
let defender_mut = &mut armies[attack.enemy_army()].groups[attack.enemy];
total_units_lost += defender_mut.hit(damage);
}
if total_units_lost == 0 {
return Victor(None, 0);
}
dbg_print!("--------------\n");
}
match armies[0].is_alive() {
true => Victor(
Some(0),
armies[0].groups.iter().fold(0, |units, g| units + g.units),
),
false => Victor(
Some(1),
armies[1].groups.iter().fold(0, |units, g| units + g.units),
),
}
}
fn main() -> Result<(), Box<dyn Error>> {
let mut input_str = String::new();
let mut stdin = io::stdin();
stdin.read_to_string(&mut input_str)?;
let input = InputParser::parse(Rule::file, &input_str)
.expect("Invalid input")
.next()
.unwrap();
let mut armies = [Army::default(), Army::default()];
let mut next_army: u8 = 0;
let mut attack_to_flag: HashMap<&str, u8> = HashMap::new();
for line in input.into_inner() {
match line.as_rule() {
Rule::army_name => {
armies[next_army as usize].name = line.as_str();
next_army += 1;
}
Rule::group => {
let mut counts = [0u32; 4];
let mut idx = 0;
let mut attack = AttackTypes(0);
let mut immunities = 0u8;
let mut weaknesses = 0u8;
| {
let org_units = self.units;
let units_kill = points / self.hits;
self.units = self.units.saturating_sub(units_kill);
let units_lost = org_units - self.units;
dbg_print!("Units lost: {}\n", units_lost);
units_lost
} | identifier_body |
main.rs | damages: u16,
boost: u16,
initiative: i8,
attack: AttackTypes,
immunity: AttackTypes,
weakness: AttackTypes,
}
impl Group {
fn | (&self) -> u32 {
self.units * (self.damages as u32 + self.boost as u32)
}
fn is_alive(&self) -> bool {
self.units > 0
}
fn calc_hit(&self, enemy: &Group) -> u32 {
match (
self.immunity.to(enemy.attack),
self.weakness.to(enemy.attack),
) {
(false, false) => enemy.effective_power(),
(true, false) => 0,
(false, true) => enemy.effective_power() * 2,
(true, true) => unreachable!(),
}
}
fn hit(&mut self, points: u32) -> u32 {
let org_units = self.units;
let units_kill = points / self.hits;
self.units = self.units.saturating_sub(units_kill);
let units_lost = org_units - self.units;
dbg_print!("Units lost: {}\n", units_lost);
units_lost
}
}
#[derive(Default, Clone)]
struct Army<'a> {
groups: Vec<Group>,
name: &'a str,
}
impl Army<'_> {
fn sort_for_attack(&self) -> Vec<u16> {
let mut ids: Vec<u16> = (0..self.groups.len() as u16).collect();
ids.sort_by_key(|i|
// descending sort
(
!self.groups[*i as usize].is_alive(),
-(self.groups[*i as usize].effective_power() as i32),
-self.groups[*i as usize].initiative,
));
ids
}
fn choose_enemy(&self, order: &Vec<u16>, enemy: &Army) -> Vec<Option<u16>> {
let mut chosen = vec![false; enemy.groups.len()];
order
.iter()
.map(|idx| {
let i = *idx as usize;
if !self.groups[i].is_alive() {
return None;
}
let mut enemy_ids: Vec<_> = (0..enemy.groups.len()).collect();
enemy_ids.sort_by_cached_key(|&j| {
(
!enemy.groups[j].is_alive(),
chosen[j],
-(enemy.groups[j].calc_hit(&self.groups[i]) as i32),
-(enemy.groups[j].effective_power() as i32),
-enemy.groups[j].initiative,
)
});
// If chosen[j] wasnโt a field in sorting, weโve to use |filter|, not
// |take_while| as top results mightโve been already chosen.
match enemy_ids
.iter()
.take_while(|&&j| {
// Although not explicitly stated in puzzle, if this unit canโt deal
// any damage to any enemy unit, then donโt mark chosen.
enemy.groups[j].is_alive()
&& !chosen[j]
&& enemy.groups[j].calc_hit(&self.groups[i]) > 0
})
.next()
{
Some(&c) => {
chosen[c] = true;
Some(c as u16)
}
None => None,
}
})
.collect()
}
fn is_alive(&self) -> bool {
self.groups.iter().any(|g| g.is_alive())
}
fn boost(&mut self, points: u16) {
for g in &mut self.groups {
g.boost = points;
}
}
}
// PrimInt is yet to get the BITS member; make a new trait.
// https://stackoverflow.com/q/73711297/183120
trait Bits {
const BITS: usize;
}
macro_rules! impl_bits {
( $($ty:ident)* ) => {
$(
impl Bits for $ty {
const BITS: usize = Self::BITS as usize;
}
)*
};
}
impl_bits!(u8 u16 u32 u64 u128);
fn to_flag<'a, T: Bits + PrimInt>(
attack: &'a str,
attack_to_flag: &mut HashMap<&'a str, T>,
) -> Result<T, Box<dyn Error>> {
let n = attack_to_flag.len();
let mask = T::one() << n;
match n < T::BITS {
true => Ok(*attack_to_flag.entry(attack).or_insert(mask)),
false => Err(Box::<dyn Error>::from(
"More than {T::BITS} distinct attacks; insufficient bit-width.",
)),
}
}
struct Attack {
army: usize,
group: usize,
enemy: usize,
}
impl Attack {
fn enemy_army(&self) -> usize {
// make a bool and convert to integral as !1u8 = 254
(self.army == 0) as usize
}
}
// Army ID and remaining units
struct Victor(Option<u8>, u32);
fn fight(mut armies: [Army; 2]) -> Victor {
while armies.iter().all(|a| a.is_alive()) {
let ids = [armies[0].sort_for_attack(), armies[1].sort_for_attack()];
let choices = [
armies[0].choose_enemy(&ids[0], &armies[1]),
armies[1].choose_enemy(&ids[1], &armies[0]),
];
// Excessive debugging; turn on if needed.
// for (i, _) in armies.iter().enumerate() {
// dbg_print!("Army {}\n", i);
// for (idx, &j) in ids[i].iter().enumerate() {
// dbg_print!(
// " Group {}: {} --> {:?}\n",
// j,
// armies[i].groups[j as usize].units,
// choices[i][idx]
// );
// }
// }
// collect all alive groups with respective army ID
let mut fight: Vec<Attack> = ids[0]
.iter()
.zip(choices[0].iter())
.filter_map(|(&i, &choice)| {
match (armies[0].groups[i as usize].is_alive(), choice) {
(true, Some(enemy)) => Some(Attack {
army: 0,
group: i as usize,
enemy: enemy.into(),
}),
_ => None,
}
})
.chain(ids[1].iter().zip(choices[1].iter()).filter_map(
|(&j, &choice)| match (armies[1].groups[j as usize].is_alive(), choice)
{
(true, Some(enemy)) => Some(Attack {
army: 1,
group: j as usize,
enemy: enemy.into(),
}),
_ => None,
},
))
.collect::<Vec<Attack>>();
// Attacks in this fight are only b/w alive groups from here on.
fight.sort_by_key(|a| -armies[a.army].groups[a.group].initiative);
let mut total_units_lost = 0;
for attack in &fight {
dbg_print!(
"{}'s Group {} --> {}'s Group {}; ",
armies[attack.army].name,
attack.group,
armies[attack.enemy_army()].name,
attack.enemy
);
let attacker = &armies[attack.army].groups[attack.group];
let defender = &armies[attack.enemy_army()].groups[attack.enemy];
let damage = defender.calc_hit(attacker);
let defender_mut = &mut armies[attack.enemy_army()].groups[attack.enemy];
total_units_lost += defender_mut.hit(damage);
}
if total_units_lost == 0 {
return Victor(None, 0);
}
dbg_print!("--------------\n");
}
match armies[0].is_alive() {
true => Victor(
Some(0),
armies[0].groups.iter().fold(0, |units, g| units + g.units),
),
false => Victor(
Some(1),
armies[1].groups.iter().fold(0, |units, g| units + g.units),
),
}
}
fn main() -> Result<(), Box<dyn Error>> {
let mut input_str = String::new();
let mut stdin = io::stdin();
stdin.read_to_string(&mut input_str)?;
let input = InputParser::parse(Rule::file, &input_str)
.expect("Invalid input")
.next()
.unwrap();
let mut armies = [Army::default(), Army::default()];
let mut next_army: u8 = 0;
let mut attack_to_flag: HashMap<&str, u8> = HashMap::new();
for line in input.into_inner() {
match line.as_rule() {
Rule::army_name => {
armies[next_army as usize].name = line.as_str();
next_army += 1;
}
Rule::group => {
let mut counts = [0u32; 4];
let mut idx = 0;
let mut attack = AttackTypes(0);
let mut immunities = 0u8;
let mut weaknesses = 0u8;
| effective_power | identifier_name |
types.ts | 5_password_reset: string;
};
website_status: { mt5_status: TMt5StatusServer; dx_trade_status: TDXTraderStatusServerType };
email: string;
setVerificationCode: (code: string, action: string) => void;
updateAccountStatus: () => Promise<void>;
is_authentication_needed: boolean;
authentication_status: TAuthenticationStatus;
mt5_login_list: DetailsOfEachMT5Loginid[];
logout: () => Promise<LogOutResponse>;
should_allow_authentication: boolean;
isEligibleForMoreDemoMt5Svg: (market_type: 'synthetic' | 'financial' | 'gaming' | 'all') => boolean;
isEligibleForMoreRealMt5: (market_type: 'synthetic' | 'financial' | 'gaming' | 'all') => boolean;
fetchResidenceList?: () => Promise<void>;
account_settings: GetSettings & {
upload_file?: string;
poi_state?: string;
};
residence_list: ResidenceList;
is_high_risk: boolean;
should_restrict_bvi_account_creation: boolean;
should_restrict_vanuatu_account_creation: boolean;
updateMT5Status: () => Promise<void>;
fetchAccountSettings: () => Promise<void>;
setAccountSettings: (get_settings_response: GetSettings) => void;
upgradeable_landing_companies: unknown[];
is_populating_mt5_account_list: boolean;
landing_companies: LandingCompany;
getChangeableFields: () => string[];
landing_company: LandingCompany;
isAccountOfTypeDisabled: (account: Record<string, DetailsOfEachMT5Loginid>) => boolean;
is_mt5_allowed: boolean;
mt5_disabled_signup_types: {
real: boolean;
demo: boolean;
};
dxtrade_disabled_signup_types: {
real: boolean;
demo: boolean;
};
dxtrade_accounts_list_error: null;
has_account_error_in_mt5_real_list: boolean;
has_account_error_in_mt5_demo_list: boolean;
has_account_error_in_dxtrade_real_list: boolean;
has_account_error_in_dxtrade_demo_list: boolean;
is_fully_authenticated: boolean;
states_list: StatesList;
/** @deprecated Use `useCurrencyConfig` or `useCurrentCurrencyConfig` from `@deriv/hooks` package instead. */
is_crypto: (currency?: string) => boolean;
dxtrade_accounts_list: DetailsOfEachMT5Loginid[];
derivez_accounts_list: DetailsOfEachMT5Loginid[];
default_currency: string;
resetVirtualBalance: () => Promise<void>;
has_enabled_two_fa: boolean;
setTwoFAStatus: (status: boolean) => void;
has_changed_two_fa: boolean;
setTwoFAChangedStatus: (status: boolean) => void;
is_svg: boolean;
real_account_creation_unlock_date: string;
setPrevAccountType: (account_type: string) => void;
setFinancialAndTradingAssessment: (
payload: SetFinancialAssessmentRequest
) => Promise<SetFinancialAssessmentResponse>;
prev_account_type: string;
};
type TCommonStoreError = {
app_routing_history: TAppRoutingHistory[];
header: string | JSX.Element;
message: string | JSX.Element;
redirect_label: string;
redirect_to: string;
redirectOnClick: (() => void) | null;
setError: (has_error: boolean, error: React.ReactNode | null) => void;
should_clear_error_on_click: boolean;
should_show_refresh: boolean;
type?: string;
};
type TCommonStore = {
isCurrentLanguage(language_code: string): boolean;
error: TCommonStoreError;
services_error: { code: string; message: string; type: string } | Record<string, never>;
has_error: boolean;
is_from_derivgo: boolean;
is_network_online: boolean;
platform: 'dxtrade' | 'derivez' | 'mt5' | 'ctrader' | '';
routeBackInApp: (history: Pick<RouteComponentProps, 'history'>, additional_platform_path?: string[]) => void;
routeTo: (pathname: string) => void;
server_time?: Moment;
changeCurrentLanguage: (new_language: string) => void;
changeSelectedLanguage: (key: string) => void;
current_language: string;
is_language_changing: boolean;
is_socket_opened: boolean;
setAppstorePlatform: (value: string) => void;
app_routing_history: TAppRoutingHistory[];
getExchangeRate: (from_currency: string, to_currency: string) => Promise<number>;
network_status: Record<string, never> | { [key: string]: string };
};
type TUiStore = {
addToast: (toast_config: TAddToastProps) => void;
app_contents_scroll_ref: React.MutableRefObject<null | HTMLDivElement>;
current_focus: string | null;
disableApp: () => void;
enableApp: () => void;
has_real_account_signup_ended: boolean;
is_loading: boolean;
is_cashier_visible: boolean;
is_closing_create_real_account_modal: boolean;
is_unsupported_contract_modal_visible: boolean;
has_only_forward_starting_contracts: boolean;
is_dark_mode_on: boolean;
is_reports_visible: boolean;
is_language_settings_modal_on: boolean;
is_app_disabled: boolean;
is_link_expired_modal_visible: boolean;
is_mobile: boolean;
is_positions_drawer_on: boolean;
is_services_error_visible: boolean;
openRealAccountSignup: (
value: 'maltainvest' | 'svg' | 'add_crypto' | 'choose' | 'add_fiat' | 'set_currency' | 'manage'
) => void;
notification_messages_ui: React.ElementType;
setCurrentFocus: (value: string) => void;
setDarkMode: (is_dark_mode_on: boolean) => boolean;
setReportsTabIndex: (value: number) => void;
setIsClosingCreateRealAccountModal: (value: boolean) => void;
setRealAccountSignupEnd: (status: boolean) => void;
setHasOnlyForwardingContracts: (has_only_forward_starting_contracts: boolean) => void;
sub_section_index: number;
setSubSectionIndex: (index: number) => void;
shouldNavigateAfterChooseCrypto: (value: Omit<string, TRoutes> | TRoutes) => void;
toggleAccountsDialog: () => void;
toggleCashier: () => void;
toggleLanguageSettingsModal: () => void;
toggleLinkExpiredModal: (state_change: boolean) => void;
togglePositionsDrawer: () => void;
toggleReadyToDepositModal: () => void;
toggleSetCurrencyModal: () => void;
toggleShouldShowRealAccountsList: (value: boolean) => void;
toggleServicesErrorModal: () => void;
is_tablet: boolean;
removeToast: (key: string) => void;
is_ready_to_deposit_modal_visible: boolean;
reports_route_tab_index: number;
should_show_cancellation_warning: boolean;
toggleCancellationWarning: (state_change: boolean) => void;
toggleUnsupportedContractModal: (state_change: boolean) => void;
toggleReports: (is_visible: boolean) => void;
is_real_acc_signup_on: boolean;
is_need_real_account_for_cashier_modal_visible: boolean;
is_chart_layout_default: boolean;
toggleNeedRealAccountForCashierModal: () => void;
setIsAcuityModalOpen: (value: boolean) => void;
is_switch_to_deriv_account_modal_visible: boolean;
openSwitchToRealAccountModal: () => void;
openDerivRealAccountNeededModal: () => void;
is_top_up_virtual_open: boolean;
is_top_up_virtual_in_progress: boolean;
is_top_up_virtual_success: boolean;
closeSuccessTopUpModal: () => void;
closeTopUpModal: () => void;
is_cfd_reset_password_modal_enabled: boolean;
setCFDPasswordResetModal: (value: boolean) => void;
openAccountNeededModal: () => void;
is_accounts_switcher_on: boolean;
openTopUpModal: () => void;
is_reset_trading_password_modal_visible: boolean;
setResetTradingPasswordModalOpen: () => void;
populateHeaderExtensions: (header_items: JSX.Element | null) => void;
populateSettingsExtensions: (menu_items: Array<TPopulateSettingsExtensionsMenuItem> | null) => void;
setShouldShowCooldownModal: (value: boolean) => void;
setAppContentsScrollRef: (ref: React.MutableRefObject<null | HTMLDivElement>) => void;
populateFooterExtensions: (
footer_extensions:
| [
{
position?: string;
Component?: React.FunctionComponent;
has_right_separator?: boolean;
}
]
| []
) => void;
};
type TPortfolioStore = {
active_positions: TPortfolioPosition[];
error: string;
getPositionById: (id: number) => TPortfolioPosition;
is_accumulator: boolean;
is_loading: boolean;
is_multiplier: boolean;
is_turbos: boolean; | onClickCancel: (contract_id?: number) => void;
onClickSell: (contract_id?: number) => void;
onMount: () => void;
positions: TPortfolioPosition[];
removePositionById: (id: number) => void; | random_line_split |
|
socket.rs | (&self, non_blocking: bool) -> Result<()> {
let mut non_blocking = non_blocking as libc::c_int;
let res = unsafe { libc::ioctl(self.0, libc::FIONBIO, &mut non_blocking) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(())
}
/// Connect the socket to the given address. Netlink is a connection-less protocol, so a socket can communicate with
/// multiple peers with the [`Socket::send_to`] and [`Socket::recv_from`] methods. However, if the socket only needs
/// to communicate with one peer, it is convenient not to have to bother with the peer address. This is what
/// `connect` is for. After calling `connect`, [`Socket::send`] and [`Socket::recv`] respectively send and receive
/// datagrams to and from `remote_addr`.
///
/// # Examples
///
/// In this example we:
///
/// 1. open a socket
/// 2. connect it to the kernel with [`Socket::connect`]
/// 3. send a request to the kernel with [`Socket::send`]
/// 4. read the response (which can span over several messages) [`Socket::recv`]
///
/// ```rust
/// use netlink_sys::{protocols::NETLINK_ROUTE, Socket, SocketAddr};
/// use std::process;
///
/// let mut socket = Socket::new(NETLINK_ROUTE).unwrap();
/// let _ = socket.bind_auto().unwrap();
/// let kernel_addr = SocketAddr::new(0, 0);
/// socket.connect(&kernel_addr).unwrap();
/// // This is a valid message for listing the network links on the system
/// let msg = vec![
/// 0x14, 0x00, 0x00, 0x00, 0x12, 0x00, 0x01, 0x03, 0xfd, 0xfe, 0x38, 0x5c, 0x00, 0x00, 0x00,
/// 0x00, 0x00, 0x00, 0x00, 0x00,
/// ];
/// let n_sent = socket.send(&msg[..], 0).unwrap();
/// assert_eq!(n_sent, msg.len());
/// // buffer for receiving the response
/// let mut buf = vec![0; 4096];
/// loop {
/// let mut n_received = socket.recv(&mut &mut buf[..], 0).unwrap();
/// println!("received {:?}", &buf[..n_received]);
/// if buf[4] == 2 && buf[5] == 0 {
/// println!("the kernel responded with an error");
/// return;
/// }
/// if buf[4] == 3 && buf[5] == 0 {
/// println!("end of dump");
/// return;
/// }
/// }
/// ```
pub fn connect(&self, remote_addr: &SocketAddr) -> Result<()> {
// FIXME:
//
// Event though for SOCK_DGRAM sockets there's no IO, if our socket is non-blocking,
// connect() might return EINPROGRESS. In theory, the right way to treat EINPROGRESS would
// be to ignore the error, and let the user poll the socket to check when it becomes
// writable, indicating that the connection succeeded. The code already exists in mio for
// TcpStream:
//
// > pub fn connect(stream: net::TcpStream, addr: &SocketAddr) -> io::Result<TcpStream> {
// > set_non_block(stream.as_raw_fd())?;
// > match stream.connect(addr) {
// > Ok(..) => {}
// > Err(ref e) if e.raw_os_error() == Some(libc::EINPROGRESS) => {}
// > Err(e) => return Err(e),
// > }
// > Ok(TcpStream { inner: stream })
// > }
//
// In practice, since the connection does not require any IO for SOCK_DGRAM sockets, it
// almost never returns EINPROGRESS and so for now, we just return whatever libc::connect
// returns. If it returns EINPROGRESS, the caller will have to handle the error themself
//
// Refs:
//
// - https://stackoverflow.com/a/14046386/1836144
// - https://lists.isc.org/pipermail/bind-users/2009-August/077527.html
let (addr, addr_len) = remote_addr.as_raw();
let res = unsafe { libc::connect(self.0, addr, addr_len) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(())
}
// Most of the comments in this method come from a discussion on rust users forum.
// [thread]: https://users.rust-lang.org/t/help-understanding-libc-call/17308/9
//
/// Read a datagram from the socket and return the number of bytes that have been read and the address of the
/// sender. The data being read is copied into `buf`. If `buf` is too small, the datagram is truncated. The
/// supported flags are the `MSG_*` described in `man 2 recvmsg`
///
/// # Warning
///
/// In datagram oriented protocols, `recv` and `recvfrom` receive normally only ONE datagram, but this seems not to
/// be always true for netlink sockets: with some protocols like `NETLINK_AUDIT`, multiple netlink packets can be
/// read with a single call.
pub fn recv_from<B>(&self, buf: &mut B, flags: libc::c_int) -> Result<(usize, SocketAddr)>
where
B: bytes::BufMut,
{
// Create an empty storage for the address. Note that Rust standard library create a
// sockaddr_storage so that it works for any address family, but here, we already know that
// we'll have a Netlink address, so we can create the appropriate storage.
let mut addr = unsafe { mem::zeroed::<libc::sockaddr_nl>() };
// recvfrom takes a *sockaddr as parameter so that it can accept any kind of address
// storage, so we need to create such a pointer for the sockaddr_nl we just initialized.
//
// Create a raw pointer to Cast our raw pointer to a
// our storage. We cannot generic pointer to *sockaddr
// pass it to recvfrom yet. that recvfrom can use
// ^ ^
// | |
// +--------------+---------------+ +---------+--------+
// / \ / \
let addr_ptr = &mut addr as *mut libc::sockaddr_nl as *mut libc::sockaddr;
// Why do we need to pass the address length? We're passing a generic *sockaddr to
// recvfrom. Somehow recvfrom needs to make sure that the address of the received packet
// would fit into the actual type that is behind *sockaddr: it could be a sockaddr_nl but
// also a sockaddr_in, a sockaddr_in6, or even the generic sockaddr_storage that can store
// any address.
let mut addrlen = mem::size_of_val(&addr);
// recvfrom does not take the address length by value (see [thread]), so we need to create
// a pointer to it.
let addrlen_ptr = &mut addrlen as *mut usize as *mut libc::socklen_t;
let chunk = buf.chunk_mut();
// Cast the *mut u8 into *mut void.
// This is equivalent to casting a *char into *void
// See [thread]
// ^
// Create a *mut u8 |
// ^ |
// | |
// +------+-------+ +--------+-------+
// / \ / \
let buf_ptr = chunk.as_mut_ptr() as *mut libc::c_void;
let buf_len = chunk.len() as libc::size_t;
let res = unsafe { libc::recvfrom(self.0, buf_ptr, buf_len, flags, addr_ptr, addrlen_ptr) };
if res < 0 {
return Err(Error::last_os_error());
} else {
// with `MSG_TRUNC` `res` might exceed `buf_len`
let written = std::cmp::min(buf_len, res as usize);
unsafe {
buf.advance_mut(written);
}
}
Ok((res as usize, SocketAddr(addr)))
}
/// For a connected socket, `recv` reads a datagram from the socket. The sender is the remote peer the socket is
/// connected to (see [`Socket::connect`]). See also [`Socket::recv_from`]
pub fn recv<B>(&self, buf: &mut B, flags: libc::c_int) -> | set_non_blocking | identifier_name |
|
socket.rs | _blocking as libc::c_int;
let res = unsafe { libc::ioctl(self.0, libc::FIONBIO, &mut non_blocking) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(())
}
/// Connect the socket to the given address. Netlink is a connection-less protocol, so a socket can communicate with
/// multiple peers with the [`Socket::send_to`] and [`Socket::recv_from`] methods. However, if the socket only needs
/// to communicate with one peer, it is convenient not to have to bother with the peer address. This is what
/// `connect` is for. After calling `connect`, [`Socket::send`] and [`Socket::recv`] respectively send and receive
/// datagrams to and from `remote_addr`.
///
/// # Examples
///
/// In this example we:
///
/// 1. open a socket
/// 2. connect it to the kernel with [`Socket::connect`]
/// 3. send a request to the kernel with [`Socket::send`]
/// 4. read the response (which can span over several messages) [`Socket::recv`]
///
/// ```rust
/// use netlink_sys::{protocols::NETLINK_ROUTE, Socket, SocketAddr};
/// use std::process;
///
/// let mut socket = Socket::new(NETLINK_ROUTE).unwrap();
/// let _ = socket.bind_auto().unwrap();
/// let kernel_addr = SocketAddr::new(0, 0);
/// socket.connect(&kernel_addr).unwrap();
/// // This is a valid message for listing the network links on the system
/// let msg = vec![
/// 0x14, 0x00, 0x00, 0x00, 0x12, 0x00, 0x01, 0x03, 0xfd, 0xfe, 0x38, 0x5c, 0x00, 0x00, 0x00,
/// 0x00, 0x00, 0x00, 0x00, 0x00,
/// ];
/// let n_sent = socket.send(&msg[..], 0).unwrap();
/// assert_eq!(n_sent, msg.len());
/// // buffer for receiving the response
/// let mut buf = vec![0; 4096];
/// loop {
/// let mut n_received = socket.recv(&mut &mut buf[..], 0).unwrap();
/// println!("received {:?}", &buf[..n_received]);
/// if buf[4] == 2 && buf[5] == 0 {
/// println!("the kernel responded with an error");
/// return;
/// }
/// if buf[4] == 3 && buf[5] == 0 {
/// println!("end of dump");
/// return;
/// }
/// }
/// ```
pub fn connect(&self, remote_addr: &SocketAddr) -> Result<()> {
// FIXME:
//
// Event though for SOCK_DGRAM sockets there's no IO, if our socket is non-blocking,
// connect() might return EINPROGRESS. In theory, the right way to treat EINPROGRESS would
// be to ignore the error, and let the user poll the socket to check when it becomes
// writable, indicating that the connection succeeded. The code already exists in mio for
// TcpStream:
//
// > pub fn connect(stream: net::TcpStream, addr: &SocketAddr) -> io::Result<TcpStream> {
// > set_non_block(stream.as_raw_fd())?;
// > match stream.connect(addr) {
// > Ok(..) => {}
// > Err(ref e) if e.raw_os_error() == Some(libc::EINPROGRESS) => {}
// > Err(e) => return Err(e),
// > }
// > Ok(TcpStream { inner: stream })
// > }
//
// In practice, since the connection does not require any IO for SOCK_DGRAM sockets, it
// almost never returns EINPROGRESS and so for now, we just return whatever libc::connect
// returns. If it returns EINPROGRESS, the caller will have to handle the error themself
//
// Refs:
//
// - https://stackoverflow.com/a/14046386/1836144
// - https://lists.isc.org/pipermail/bind-users/2009-August/077527.html
let (addr, addr_len) = remote_addr.as_raw();
let res = unsafe { libc::connect(self.0, addr, addr_len) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(())
}
// Most of the comments in this method come from a discussion on rust users forum.
// [thread]: https://users.rust-lang.org/t/help-understanding-libc-call/17308/9
//
/// Read a datagram from the socket and return the number of bytes that have been read and the address of the
/// sender. The data being read is copied into `buf`. If `buf` is too small, the datagram is truncated. The
/// supported flags are the `MSG_*` described in `man 2 recvmsg`
///
/// # Warning
///
/// In datagram oriented protocols, `recv` and `recvfrom` receive normally only ONE datagram, but this seems not to
/// be always true for netlink sockets: with some protocols like `NETLINK_AUDIT`, multiple netlink packets can be
/// read with a single call.
pub fn recv_from<B>(&self, buf: &mut B, flags: libc::c_int) -> Result<(usize, SocketAddr)>
where
B: bytes::BufMut,
{
// Create an empty storage for the address. Note that Rust standard library create a
// sockaddr_storage so that it works for any address family, but here, we already know that
// we'll have a Netlink address, so we can create the appropriate storage.
let mut addr = unsafe { mem::zeroed::<libc::sockaddr_nl>() };
// recvfrom takes a *sockaddr as parameter so that it can accept any kind of address
// storage, so we need to create such a pointer for the sockaddr_nl we just initialized.
//
// Create a raw pointer to Cast our raw pointer to a
// our storage. We cannot generic pointer to *sockaddr
// pass it to recvfrom yet. that recvfrom can use
// ^ ^
// | |
// +--------------+---------------+ +---------+--------+
// / \ / \
let addr_ptr = &mut addr as *mut libc::sockaddr_nl as *mut libc::sockaddr;
// Why do we need to pass the address length? We're passing a generic *sockaddr to
// recvfrom. Somehow recvfrom needs to make sure that the address of the received packet
// would fit into the actual type that is behind *sockaddr: it could be a sockaddr_nl but | // a pointer to it.
let addrlen_ptr = &mut addrlen as *mut usize as *mut libc::socklen_t;
let chunk = buf.chunk_mut();
// Cast the *mut u8 into *mut void.
// This is equivalent to casting a *char into *void
// See [thread]
// ^
// Create a *mut u8 |
// ^ |
// | |
// +------+-------+ +--------+-------+
// / \ / \
let buf_ptr = chunk.as_mut_ptr() as *mut libc::c_void;
let buf_len = chunk.len() as libc::size_t;
let res = unsafe { libc::recvfrom(self.0, buf_ptr, buf_len, flags, addr_ptr, addrlen_ptr) };
if res < 0 {
return Err(Error::last_os_error());
} else {
// with `MSG_TRUNC` `res` might exceed `buf_len`
let written = std::cmp::min(buf_len, res as usize);
unsafe {
buf.advance_mut(written);
}
}
Ok((res as usize, SocketAddr(addr)))
}
/// For a connected socket, `recv` reads a datagram from the socket. The sender is the remote peer the socket is
/// connected to (see [`Socket::connect`]). See also [`Socket::recv_from`]
pub fn recv<B>(&self, buf: &mut B, flags: libc::c_int) -> Result<usize>
where
B: bytes::BufMut,
{
let chunk = buf.chunk_mut();
| // also a sockaddr_in, a sockaddr_in6, or even the generic sockaddr_storage that can store
// any address.
let mut addrlen = mem::size_of_val(&addr);
// recvfrom does not take the address length by value (see [thread]), so we need to create | random_line_split |
socket.rs | as libc::c_int;
let res = unsafe { libc::ioctl(self.0, libc::FIONBIO, &mut non_blocking) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(())
}
/// Connect the socket to the given address. Netlink is a connection-less protocol, so a socket can communicate with
/// multiple peers with the [`Socket::send_to`] and [`Socket::recv_from`] methods. However, if the socket only needs
/// to communicate with one peer, it is convenient not to have to bother with the peer address. This is what
/// `connect` is for. After calling `connect`, [`Socket::send`] and [`Socket::recv`] respectively send and receive
/// datagrams to and from `remote_addr`.
///
/// # Examples
///
/// In this example we:
///
/// 1. open a socket
/// 2. connect it to the kernel with [`Socket::connect`]
/// 3. send a request to the kernel with [`Socket::send`]
/// 4. read the response (which can span over several messages) [`Socket::recv`]
///
/// ```rust
/// use netlink_sys::{protocols::NETLINK_ROUTE, Socket, SocketAddr};
/// use std::process;
///
/// let mut socket = Socket::new(NETLINK_ROUTE).unwrap();
/// let _ = socket.bind_auto().unwrap();
/// let kernel_addr = SocketAddr::new(0, 0);
/// socket.connect(&kernel_addr).unwrap();
/// // This is a valid message for listing the network links on the system
/// let msg = vec![
/// 0x14, 0x00, 0x00, 0x00, 0x12, 0x00, 0x01, 0x03, 0xfd, 0xfe, 0x38, 0x5c, 0x00, 0x00, 0x00,
/// 0x00, 0x00, 0x00, 0x00, 0x00,
/// ];
/// let n_sent = socket.send(&msg[..], 0).unwrap();
/// assert_eq!(n_sent, msg.len());
/// // buffer for receiving the response
/// let mut buf = vec![0; 4096];
/// loop {
/// let mut n_received = socket.recv(&mut &mut buf[..], 0).unwrap();
/// println!("received {:?}", &buf[..n_received]);
/// if buf[4] == 2 && buf[5] == 0 {
/// println!("the kernel responded with an error");
/// return;
/// }
/// if buf[4] == 3 && buf[5] == 0 {
/// println!("end of dump");
/// return;
/// }
/// }
/// ```
pub fn connect(&self, remote_addr: &SocketAddr) -> Result<()> | // almost never returns EINPROGRESS and so for now, we just return whatever libc::connect
// returns. If it returns EINPROGRESS, the caller will have to handle the error themself
//
// Refs:
//
// - https://stackoverflow.com/a/14046386/1836144
// - https://lists.isc.org/pipermail/bind-users/2009-August/077527.html
let (addr, addr_len) = remote_addr.as_raw();
let res = unsafe { libc::connect(self.0, addr, addr_len) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(())
}
// Most of the comments in this method come from a discussion on rust users forum.
// [thread]: https://users.rust-lang.org/t/help-understanding-libc-call/17308/9
//
/// Read a datagram from the socket and return the number of bytes that have been read and the address of the
/// sender. The data being read is copied into `buf`. If `buf` is too small, the datagram is truncated. The
/// supported flags are the `MSG_*` described in `man 2 recvmsg`
///
/// # Warning
///
/// In datagram oriented protocols, `recv` and `recvfrom` receive normally only ONE datagram, but this seems not to
/// be always true for netlink sockets: with some protocols like `NETLINK_AUDIT`, multiple netlink packets can be
/// read with a single call.
pub fn recv_from<B>(&self, buf: &mut B, flags: libc::c_int) -> Result<(usize, SocketAddr)>
where
B: bytes::BufMut,
{
// Create an empty storage for the address. Note that Rust standard library create a
// sockaddr_storage so that it works for any address family, but here, we already know that
// we'll have a Netlink address, so we can create the appropriate storage.
let mut addr = unsafe { mem::zeroed::<libc::sockaddr_nl>() };
// recvfrom takes a *sockaddr as parameter so that it can accept any kind of address
// storage, so we need to create such a pointer for the sockaddr_nl we just initialized.
//
// Create a raw pointer to Cast our raw pointer to a
// our storage. We cannot generic pointer to *sockaddr
// pass it to recvfrom yet. that recvfrom can use
// ^ ^
// | |
// +--------------+---------------+ +---------+--------+
// / \ / \
let addr_ptr = &mut addr as *mut libc::sockaddr_nl as *mut libc::sockaddr;
// Why do we need to pass the address length? We're passing a generic *sockaddr to
// recvfrom. Somehow recvfrom needs to make sure that the address of the received packet
// would fit into the actual type that is behind *sockaddr: it could be a sockaddr_nl but
// also a sockaddr_in, a sockaddr_in6, or even the generic sockaddr_storage that can store
// any address.
let mut addrlen = mem::size_of_val(&addr);
// recvfrom does not take the address length by value (see [thread]), so we need to create
// a pointer to it.
let addrlen_ptr = &mut addrlen as *mut usize as *mut libc::socklen_t;
let chunk = buf.chunk_mut();
// Cast the *mut u8 into *mut void.
// This is equivalent to casting a *char into *void
// See [thread]
// ^
// Create a *mut u8 |
// ^ |
// | |
// +------+-------+ +--------+-------+
// / \ / \
let buf_ptr = chunk.as_mut_ptr() as *mut libc::c_void;
let buf_len = chunk.len() as libc::size_t;
let res = unsafe { libc::recvfrom(self.0, buf_ptr, buf_len, flags, addr_ptr, addrlen_ptr) };
if res < 0 {
return Err(Error::last_os_error());
} else {
// with `MSG_TRUNC` `res` might exceed `buf_len`
let written = std::cmp::min(buf_len, res as usize);
unsafe {
buf.advance_mut(written);
}
}
Ok((res as usize, SocketAddr(addr)))
}
/// For a connected socket, `recv` reads a datagram from the socket. The sender is the remote peer the socket is
/// connected to (see [`Socket::connect`]). See also [`Socket::recv_from`]
pub fn recv<B>(&self, buf: &mut B, flags: libc::c_int) -> Result<usize>
where
B: bytes::BufMut,
{
let chunk = buf.chunk_mut | {
// FIXME:
//
// Event though for SOCK_DGRAM sockets there's no IO, if our socket is non-blocking,
// connect() might return EINPROGRESS. In theory, the right way to treat EINPROGRESS would
// be to ignore the error, and let the user poll the socket to check when it becomes
// writable, indicating that the connection succeeded. The code already exists in mio for
// TcpStream:
//
// > pub fn connect(stream: net::TcpStream, addr: &SocketAddr) -> io::Result<TcpStream> {
// > set_non_block(stream.as_raw_fd())?;
// > match stream.connect(addr) {
// > Ok(..) => {}
// > Err(ref e) if e.raw_os_error() == Some(libc::EINPROGRESS) => {}
// > Err(e) => return Err(e),
// > }
// > Ok(TcpStream { inner: stream })
// > }
//
// In practice, since the connection does not require any IO for SOCK_DGRAM sockets, it | identifier_body |
socket.rs | is copied into `buf`. If `buf` is too small, the datagram is truncated. The
/// supported flags are the `MSG_*` described in `man 2 recvmsg`
///
/// # Warning
///
/// In datagram oriented protocols, `recv` and `recvfrom` receive normally only ONE datagram, but this seems not to
/// be always true for netlink sockets: with some protocols like `NETLINK_AUDIT`, multiple netlink packets can be
/// read with a single call.
pub fn recv_from<B>(&self, buf: &mut B, flags: libc::c_int) -> Result<(usize, SocketAddr)>
where
B: bytes::BufMut,
{
// Create an empty storage for the address. Note that Rust standard library create a
// sockaddr_storage so that it works for any address family, but here, we already know that
// we'll have a Netlink address, so we can create the appropriate storage.
let mut addr = unsafe { mem::zeroed::<libc::sockaddr_nl>() };
// recvfrom takes a *sockaddr as parameter so that it can accept any kind of address
// storage, so we need to create such a pointer for the sockaddr_nl we just initialized.
//
// Create a raw pointer to Cast our raw pointer to a
// our storage. We cannot generic pointer to *sockaddr
// pass it to recvfrom yet. that recvfrom can use
// ^ ^
// | |
// +--------------+---------------+ +---------+--------+
// / \ / \
let addr_ptr = &mut addr as *mut libc::sockaddr_nl as *mut libc::sockaddr;
// Why do we need to pass the address length? We're passing a generic *sockaddr to
// recvfrom. Somehow recvfrom needs to make sure that the address of the received packet
// would fit into the actual type that is behind *sockaddr: it could be a sockaddr_nl but
// also a sockaddr_in, a sockaddr_in6, or even the generic sockaddr_storage that can store
// any address.
let mut addrlen = mem::size_of_val(&addr);
// recvfrom does not take the address length by value (see [thread]), so we need to create
// a pointer to it.
let addrlen_ptr = &mut addrlen as *mut usize as *mut libc::socklen_t;
let chunk = buf.chunk_mut();
// Cast the *mut u8 into *mut void.
// This is equivalent to casting a *char into *void
// See [thread]
// ^
// Create a *mut u8 |
// ^ |
// | |
// +------+-------+ +--------+-------+
// / \ / \
let buf_ptr = chunk.as_mut_ptr() as *mut libc::c_void;
let buf_len = chunk.len() as libc::size_t;
let res = unsafe { libc::recvfrom(self.0, buf_ptr, buf_len, flags, addr_ptr, addrlen_ptr) };
if res < 0 {
return Err(Error::last_os_error());
} else {
// with `MSG_TRUNC` `res` might exceed `buf_len`
let written = std::cmp::min(buf_len, res as usize);
unsafe {
buf.advance_mut(written);
}
}
Ok((res as usize, SocketAddr(addr)))
}
/// For a connected socket, `recv` reads a datagram from the socket. The sender is the remote peer the socket is
/// connected to (see [`Socket::connect`]). See also [`Socket::recv_from`]
pub fn recv<B>(&self, buf: &mut B, flags: libc::c_int) -> Result<usize>
where
B: bytes::BufMut,
{
let chunk = buf.chunk_mut();
let buf_ptr = chunk.as_mut_ptr() as *mut libc::c_void;
let buf_len = chunk.len() as libc::size_t;
let res = unsafe { libc::recv(self.0, buf_ptr, buf_len, flags) };
if res < 0 {
return Err(Error::last_os_error());
} else {
// with `MSG_TRUNC` `res` might exceed `buf_len`
let written = std::cmp::min(buf_len, res as usize);
unsafe {
buf.advance_mut(written);
}
}
Ok(res as usize)
}
/// Receive a full message. Unlike [`Socket::recv_from`], which truncates messages that exceed the length of the
/// buffer passed as argument, this method always reads a whole message, no matter its size.
pub fn recv_from_full(&self) -> Result<(Vec<u8>, SocketAddr)> {
// Peek
let mut buf: Vec<u8> = Vec::new();
let (peek_len, _) = self.recv_from(&mut buf, libc::MSG_PEEK | libc::MSG_TRUNC)?;
// Receive
buf.clear();
buf.reserve(peek_len);
let (rlen, addr) = self.recv_from(&mut buf, 0)?;
assert_eq!(rlen, peek_len);
Ok((buf, addr))
}
/// Send the given buffer `buf` to the remote peer with address `addr`. The supported flags are the `MSG_*` values
/// documented in `man 2 send`.
pub fn send_to(&self, buf: &[u8], addr: &SocketAddr, flags: libc::c_int) -> Result<usize> {
let (addr_ptr, addr_len) = addr.as_raw();
let buf_ptr = buf.as_ptr() as *const libc::c_void;
let buf_len = buf.len() as libc::size_t;
let res = unsafe { libc::sendto(self.0, buf_ptr, buf_len, flags, addr_ptr, addr_len) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(res as usize)
}
/// For a connected socket, `send` sends the given buffer `buf` to the remote peer the socket is connected to. See
/// also [`Socket::connect`] and [`Socket::send_to`].
pub fn send(&self, buf: &[u8], flags: libc::c_int) -> Result<usize> {
let buf_ptr = buf.as_ptr() as *const libc::c_void;
let buf_len = buf.len() as libc::size_t;
let res = unsafe { libc::send(self.0, buf_ptr, buf_len, flags) };
if res < 0 {
return Err(Error::last_os_error());
}
Ok(res as usize)
}
pub fn set_pktinfo(&mut self, value: bool) -> Result<()> {
let value: libc::c_int = if value { 1 } else { 0 };
setsockopt(self.0, libc::SOL_NETLINK, libc::NETLINK_PKTINFO, value)
}
pub fn get_pktinfo(&self) -> Result<bool> {
let res = getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_PKTINFO)?;
Ok(res == 1)
}
pub fn add_membership(&mut self, group: u32) -> Result<()> {
setsockopt(
self.0,
libc::SOL_NETLINK,
libc::NETLINK_ADD_MEMBERSHIP,
group,
)
}
pub fn drop_membership(&mut self, group: u32) -> Result<()> {
setsockopt(
self.0,
libc::SOL_NETLINK,
libc::NETLINK_DROP_MEMBERSHIP,
group,
)
}
// pub fn list_membership(&self) -> Vec<u32> {
// unimplemented!();
// // getsockopt won't be enough here, because we may need to perform 2 calls, and because the
// // length of the list returned by libc::getsockopt is returned by mutating the length
// // argument, which our implementation of getsockopt forbids.
// }
/// `NETLINK_BROADCAST_ERROR` (since Linux 2.6.30). When not set, `netlink_broadcast()` only
/// reports `ESRCH` errors and silently ignore `NOBUFS` errors.
pub fn set_broadcast_error(&mut self, value: bool) -> Result<()> {
let value: libc::c_int = if value { 1 } else { 0 };
setsockopt(
self.0,
libc::SOL_NETLINK,
libc::NETLINK_BROADCAST_ERROR,
value,
)
}
pub fn get_broadcast_error(&self) -> Result<bool> {
let res =
getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_BROADCAST_ERROR)?;
Ok(res == 1)
}
/// `NETLINK_NO_ENOBUFS` (since Linux 2.6.30). This flag can be used by unicast and broadcast
/// listeners to avoid receiving `ENOBUFS` errors.
pub fn set_no_enobufs(&mut self, value: bool) -> Result<()> {
let value: libc::c_int = if value | { 1 } | conditional_block |
|
context.go | s)
}
providers, err = resourceProviderFactories(opts.ProviderResolver, reqd)
if err != nil {
return nil, err
}
} else {
providers = make(map[string]ResourceProviderFactory)
}
diff := opts.Diff
if diff == nil {
diff = &Diff{}
}
return &Context{
components: &basicComponentFactory{
providers: providers,
provisioners: opts.Provisioners,
},
destroy: opts.Destroy,
diff: diff,
hooks: hooks,
meta: opts.Meta,
module: opts.Module,
shadow: opts.Shadow,
state: state,
targets: opts.Targets,
uiInput: opts.UIInput,
variables: variables,
parallelSem: NewSemaphore(par),
providerInputConfig: make(map[string]map[string]interface{}),
providerSHA256s: opts.ProviderSHA256s,
sh: sh,
}, nil
}
type ContextGraphOpts struct {
// If true, validates the graph structure (checks for cycles).
Validate bool
// Legacy graphs only: won't prune the graph
Verbose bool
}
// Graph returns the graph used for the given operation type.
//
// The most extensive or complex graph type is GraphTypePlan.
func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) | // The input graph is just a slightly modified plan graph
fallthrough
case GraphTypeValidate:
// The validate graph is just a slightly modified plan graph
fallthrough
case GraphTypePlan:
// Create the plan graph builder
p := &PlanGraphBuilder{
Module: c.module,
State: c.state,
Providers: c.components.ResourceProviders(),
Targets: c.targets,
Validate: opts.Validate,
}
// Some special cases for other graph types shared with plan currently
var b GraphBuilder = p
switch typ {
case GraphTypeInput:
b = InputGraphBuilder(p)
case GraphTypeValidate:
// We need to set the provisioners so those can be validated
p.Provisioners = c.components.ResourceProvisioners()
b = ValidateGraphBuilder(p)
}
return b.Build(RootModulePath)
case GraphTypePlanDestroy:
return (&DestroyPlanGraphBuilder{
Module: c.module,
State: c.state,
Targets: c.targets,
Validate: opts.Validate,
}).Build(RootModulePath)
case GraphTypeRefresh:
return (&RefreshGraphBuilder{
Module: c.module,
State: c.state,
Providers: c.components.ResourceProviders(),
Targets: c.targets,
Validate: opts.Validate,
}).Build(RootModulePath)
}
return nil, fmt.Errorf("unknown graph type: %s", typ)
}
// ShadowError returns any errors caught during a shadow operation.
//
// A shadow operation is an operation run in parallel to a real operation
// that performs the same tasks using new logic on copied state. The results
// are compared to ensure that the new logic works the same as the old logic.
// The shadow never affects the real operation or return values.
//
// The result of the shadow operation are only available through this function
// call after a real operation is complete.
//
// For API consumers of Context, you can safely ignore this function
// completely if you have no interest in helping report experimental feature
// errors to Terraform maintainers. Otherwise, please call this function
// after every operation and report this to the user.
//
// IMPORTANT: Shadow errors are _never_ critical: they _never_ affect
// the real state or result of a real operation. They are purely informational
// to assist in future Terraform versions being more stable. Please message
// this effectively to the end user.
//
// This must be called only when no other operation is running (refresh,
// plan, etc.). The result can be used in parallel to any other operation
// running.
func (c *Context) ShadowError() error {
return c.shadowErr
}
// State returns a copy of the current state associated with this context.
//
// This cannot safely be called in parallel with any other Context function.
func (c *Context) State() *State {
return c.state.DeepCopy()
}
// Interpolater returns an Interpolater built on a copy of the state
// that can be used to test interpolation values.
func (c *Context) Interpolater() *Interpolater {
var varLock sync.Mutex
var stateLock sync.RWMutex
return &Interpolater{
Operation: walkApply,
Meta: c.meta,
Module: c.module,
State: c.state.DeepCopy(),
StateLock: &stateLock,
VariableValues: c.variables,
VariableValuesLock: &varLock,
}
}
// Input asks for input to fill variables and provider configurations.
// This modifies the configuration in-place, so asking for Input twice
// may result in different UI output showing different current values.
func (c *Context) Input(mode InputMode) error {
defer c.acquireRun("input")()
if mode&InputModeVar != 0 {
// Walk the variables first for the root module. We walk them in
// alphabetical order for UX reasons.
rootConf := c.module.Config()
names := make([]string, len(rootConf.Variables))
m := make(map[string]*config.Variable)
for i, v := range rootConf.Variables {
names[i] = v.Name
m[v.Name] = v
}
sort.Strings(names)
for _, n := range names {
// If we only care about unset variables, then if the variable
// is set, continue on.
if mode&InputModeVarUnset != 0 {
if _, ok := c.variables[n]; ok {
continue
}
}
var valueType config.VariableType
v := m[n]
switch valueType = v.Type(); valueType {
case config.VariableTypeUnknown:
continue
case config.VariableTypeMap:
// OK
case config.VariableTypeList:
// OK
case config.VariableTypeString:
// OK
default:
panic(fmt.Sprintf("Unknown variable type: %#v", v.Type()))
}
// If the variable is not already set, and the variable defines a
// default, use that for the value.
if _, ok := c.variables[n]; !ok {
if v.Default != nil {
c.variables[n] = v.Default.(string)
continue
}
}
// this should only happen during tests
if c.uiInput == nil {
log.Println("[WARN] Content.uiInput is nil")
continue
}
// Ask the user for a value for this variable
var value string
retry := 0
for {
var err error
value, err = c.uiInput.Input(&InputOpts{
Id: fmt.Sprintf("var.%s", n),
Query: fmt.Sprintf("var.%s", n),
Description: v.Description,
})
if err != nil {
return fmt.Errorf(
"Error asking for %s: %s", n, err)
}
if value == "" && v.Required() {
// Redo if it is required, but abort if we keep getting
// blank entries
if retry > 2 {
return fmt.Errorf("missing required value for %q", n)
}
retry++
continue
}
break
}
// no value provided, so don't set the variable at all
if value == "" {
continue
}
decoded, err := parseVariableAsHCL(n, value, valueType)
if err != nil {
return err
}
if decoded != nil {
c.variables[n] = decoded
}
}
}
if mode&InputModeProvider != 0 {
// Build the graph
graph, err := c.Graph(GraphTypeInput, nil)
if err != nil {
return err
}
// Do the walk
if _, err := c.walk(graph, walkInput); err != nil {
return err
}
}
return nil
}
// Apply applies the changes represented by this context and returns
// the resulting state.
//
// Even in the case an error is returned, the state may be returned and will
// potentially be partially updated. In addition to returning the resulting
// state, this context | {
if opts == nil {
opts = &ContextGraphOpts{Validate: true}
}
log.Printf("[INFO] terraform: building graph: %s", typ)
switch typ {
case GraphTypeApply:
return (&ApplyGraphBuilder{
Module: c.module,
Diff: c.diff,
State: c.state,
Providers: c.components.ResourceProviders(),
Provisioners: c.components.ResourceProvisioners(),
Targets: c.targets,
Destroy: c.destroy,
Validate: opts.Validate,
}).Build(RootModulePath)
case GraphTypeInput: | identifier_body |
context.go | Error asking for %s: %s", n, err)
}
if value == "" && v.Required() {
// Redo if it is required, but abort if we keep getting
// blank entries
if retry > 2 {
return fmt.Errorf("missing required value for %q", n)
}
retry++
continue
}
break
}
// no value provided, so don't set the variable at all
if value == "" {
continue
}
decoded, err := parseVariableAsHCL(n, value, valueType)
if err != nil {
return err
}
if decoded != nil {
c.variables[n] = decoded
}
}
}
if mode&InputModeProvider != 0 {
// Build the graph
graph, err := c.Graph(GraphTypeInput, nil)
if err != nil {
return err
}
// Do the walk
if _, err := c.walk(graph, walkInput); err != nil {
return err
}
}
return nil
}
// Apply applies the changes represented by this context and returns
// the resulting state.
//
// Even in the case an error is returned, the state may be returned and will
// potentially be partially updated. In addition to returning the resulting
// state, this context is updated with the latest state.
//
// If the state is required after an error, the caller should call
// Context.State, rather than rely on the return value.
//
// TODO: Apply and Refresh should either always return a state, or rely on the
// State() method. Currently the helper/resource testing framework relies
// on the absence of a returned state to determine if Destroy can be
// called, so that will need to be refactored before this can be changed.
func (c *Context) Apply() (*State, error) {
defer c.acquireRun("apply")()
// Copy our own state
c.state = c.state.DeepCopy()
// Build the graph.
graph, err := c.Graph(GraphTypeApply, nil)
if err != nil {
return nil, err
}
// Determine the operation
operation := walkApply
if c.destroy {
operation = walkDestroy
}
// Walk the graph
walker, err := c.walk(graph, operation)
if len(walker.ValidationErrors) > 0 {
err = multierror.Append(err, walker.ValidationErrors...)
}
// Clean out any unused things
c.state.prune()
return c.state, err
}
// Plan generates an execution plan for the given context.
//
// The execution plan encapsulates the context and can be stored
// in order to reinstantiate a context later for Apply.
//
// Plan also updates the diff of this context to be the diff generated
// by the plan, so Apply can be called after.
func (c *Context) Plan() (*Plan, error) {
defer c.acquireRun("plan")()
p := &Plan{
Module: c.module,
Vars: c.variables,
State: c.state,
Targets: c.targets,
TerraformVersion: version.String(),
ProviderSHA256s: c.providerSHA256s,
}
var operation walkOperation
if c.destroy {
operation = walkPlanDestroy
p.Destroy = true
} else {
// Set our state to be something temporary. We do this so that
// the plan can update a fake state so that variables work, then
// we replace it back with our old state.
old := c.state
if old == nil {
c.state = &State{}
c.state.init()
} else {
c.state = old.DeepCopy()
}
defer func() {
c.state = old
}()
operation = walkPlan
}
// Setup our diff
c.diffLock.Lock()
c.diff = new(Diff)
c.diff.init()
c.diffLock.Unlock()
// Build the graph.
graphType := GraphTypePlan
if c.destroy {
graphType = GraphTypePlanDestroy
}
graph, err := c.Graph(graphType, nil)
if err != nil {
return nil, err
}
// Do the walk
walker, err := c.walk(graph, operation)
if err != nil {
return nil, err
}
p.Diff = c.diff
// If this is true, it means we're running unit tests. In this case,
// we perform a deep copy just to ensure that all context tests also
// test that a diff is copy-able. This will panic if it fails. This
// is enabled during unit tests.
//
// This should never be true during production usage, but even if it is,
// it can't do any real harm.
if contextTestDeepCopyOnPlan {
p.Diff.DeepCopy()
}
/*
// We don't do the reverification during the new destroy plan because
// it will use a different apply process.
if X_legacyGraph {
// Now that we have a diff, we can build the exact graph that Apply will use
// and catch any possible cycles during the Plan phase.
if _, err := c.Graph(GraphTypeLegacy, nil); err != nil {
return nil, err
}
}
*/
var errs error
if len(walker.ValidationErrors) > 0 {
errs = multierror.Append(errs, walker.ValidationErrors...)
}
return p, errs
}
// Refresh goes through all the resources in the state and refreshes them
// to their latest state. This will update the state that this context
// works with, along with returning it.
//
// Even in the case an error is returned, the state may be returned and
// will potentially be partially updated.
func (c *Context) Refresh() (*State, error) {
defer c.acquireRun("refresh")()
// Copy our own state
c.state = c.state.DeepCopy()
// Build the graph.
graph, err := c.Graph(GraphTypeRefresh, nil)
if err != nil {
return nil, err
}
// Do the walk
if _, err := c.walk(graph, walkRefresh); err != nil {
return nil, err
}
// Clean out any unused things
c.state.prune()
return c.state, nil
}
// Stop stops the running task.
//
// Stop will block until the task completes.
func (c *Context) Stop() {
log.Printf("[WARN] terraform: Stop called, initiating interrupt sequence")
c.l.Lock()
defer c.l.Unlock()
// If we're running, then stop
if c.runContextCancel != nil {
log.Printf("[WARN] terraform: run context exists, stopping")
// Tell the hook we want to stop
c.sh.Stop()
// Stop the context
c.runContextCancel()
c.runContextCancel = nil
}
// Grab the condition var before we exit
if cond := c.runCond; cond != nil {
cond.Wait()
}
log.Printf("[WARN] terraform: stop complete")
}
// Validate validates the configuration and returns any warnings or errors.
func (c *Context) Validate() tfdiags.Diagnostics {
defer c.acquireRun("validate")()
var diags tfdiags.Diagnostics
// Validate the configuration itself
diags = diags.Append(c.module.Validate())
// This only needs to be done for the root module, since inter-module
// variables are validated in the module tree.
if config := c.module.Config(); config != nil {
// Validate the user variables
for _, err := range smcUserVariables(config, c.variables) {
diags = diags.Append(err)
}
}
// If we have errors at this point, the graphing has no chance,
// so just bail early.
if diags.HasErrors() {
return diags
}
// Build the graph so we can walk it and run Validate on nodes.
// We also validate the graph generated here, but this graph doesn't
// necessarily match the graph that Plan will generate, so we'll validate the
// graph again later after Planning.
graph, err := c.Graph(GraphTypeValidate, nil)
if err != nil {
diags = diags.Append(err)
return diags
}
// Walk
walker, err := c.walk(graph, walkValidate)
if err != nil {
diags = diags.Append(err)
}
sort.Strings(walker.ValidationWarnings)
sort.Slice(walker.ValidationErrors, func(i, j int) bool {
return walker.ValidationErrors[i].Error() < walker.ValidationErrors[j].Error()
})
for _, warn := range walker.ValidationWarnings {
diags = diags.Append(tfdiags.SimpleWarning(warn))
}
for _, err := range walker.ValidationErrors {
diags = diags.Append(err)
}
return diags
}
// Module returns the module tree associated with this context.
func (c *Context) Module() *module.Tree {
return c.module
}
// Variables will return the mapping of variables that were defined
// for this Context. If Input was called, this mapping may be different
// than what was given.
func (c *Context) Variables() map[string]interface{} {
return c.variables
}
// SetVariable sets a variable after a context has already been built.
func (c *Context) | SetVariable | identifier_name |
|
context.go | s)
}
providers, err = resourceProviderFactories(opts.ProviderResolver, reqd)
if err != nil {
return nil, err
}
} else {
providers = make(map[string]ResourceProviderFactory)
}
diff := opts.Diff
if diff == nil {
diff = &Diff{}
}
return &Context{
components: &basicComponentFactory{
providers: providers,
provisioners: opts.Provisioners,
},
destroy: opts.Destroy,
diff: diff,
hooks: hooks,
meta: opts.Meta,
module: opts.Module,
shadow: opts.Shadow,
state: state,
targets: opts.Targets,
uiInput: opts.UIInput,
variables: variables,
parallelSem: NewSemaphore(par),
providerInputConfig: make(map[string]map[string]interface{}),
providerSHA256s: opts.ProviderSHA256s,
sh: sh,
}, nil
}
type ContextGraphOpts struct {
// If true, validates the graph structure (checks for cycles).
Validate bool
// Legacy graphs only: won't prune the graph
Verbose bool
}
// Graph returns the graph used for the given operation type.
//
// The most extensive or complex graph type is GraphTypePlan.
func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) {
if opts == nil {
opts = &ContextGraphOpts{Validate: true}
}
log.Printf("[INFO] terraform: building graph: %s", typ)
switch typ {
case GraphTypeApply:
return (&ApplyGraphBuilder{
Module: c.module,
Diff: c.diff,
State: c.state,
Providers: c.components.ResourceProviders(),
Provisioners: c.components.ResourceProvisioners(),
Targets: c.targets,
Destroy: c.destroy,
Validate: opts.Validate,
}).Build(RootModulePath)
case GraphTypeInput:
// The input graph is just a slightly modified plan graph
fallthrough
case GraphTypeValidate:
// The validate graph is just a slightly modified plan graph
fallthrough
case GraphTypePlan:
// Create the plan graph builder
p := &PlanGraphBuilder{
Module: c.module,
State: c.state,
Providers: c.components.ResourceProviders(),
Targets: c.targets,
Validate: opts.Validate,
}
// Some special cases for other graph types shared with plan currently
var b GraphBuilder = p
switch typ {
case GraphTypeInput:
b = InputGraphBuilder(p)
case GraphTypeValidate:
// We need to set the provisioners so those can be validated
p.Provisioners = c.components.ResourceProvisioners()
b = ValidateGraphBuilder(p)
}
return b.Build(RootModulePath)
case GraphTypePlanDestroy:
return (&DestroyPlanGraphBuilder{
Module: c.module,
State: c.state,
Targets: c.targets,
Validate: opts.Validate,
}).Build(RootModulePath)
case GraphTypeRefresh:
return (&RefreshGraphBuilder{
Module: c.module,
State: c.state,
Providers: c.components.ResourceProviders(),
Targets: c.targets,
Validate: opts.Validate,
}).Build(RootModulePath)
}
return nil, fmt.Errorf("unknown graph type: %s", typ)
}
// ShadowError returns any errors caught during a shadow operation.
//
// A shadow operation is an operation run in parallel to a real operation
// that performs the same tasks using new logic on copied state. The results
// are compared to ensure that the new logic works the same as the old logic.
// The shadow never affects the real operation or return values.
//
// The result of the shadow operation are only available through this function
// call after a real operation is complete.
//
// For API consumers of Context, you can safely ignore this function
// completely if you have no interest in helping report experimental feature
// errors to Terraform maintainers. Otherwise, please call this function
// after every operation and report this to the user.
//
// IMPORTANT: Shadow errors are _never_ critical: they _never_ affect
// the real state or result of a real operation. They are purely informational
// to assist in future Terraform versions being more stable. Please message
// this effectively to the end user.
//
// This must be called only when no other operation is running (refresh,
// plan, etc.). The result can be used in parallel to any other operation
// running.
func (c *Context) ShadowError() error {
return c.shadowErr
}
// State returns a copy of the current state associated with this context.
//
// This cannot safely be called in parallel with any other Context function.
func (c *Context) State() *State {
return c.state.DeepCopy()
}
// Interpolater returns an Interpolater built on a copy of the state
// that can be used to test interpolation values.
func (c *Context) Interpolater() *Interpolater {
var varLock sync.Mutex
var stateLock sync.RWMutex
return &Interpolater{
Operation: walkApply,
Meta: c.meta,
Module: c.module,
State: c.state.DeepCopy(),
StateLock: &stateLock,
VariableValues: c.variables,
VariableValuesLock: &varLock,
}
}
// Input asks for input to fill variables and provider configurations.
// This modifies the configuration in-place, so asking for Input twice
// may result in different UI output showing different current values.
func (c *Context) Input(mode InputMode) error {
defer c.acquireRun("input")()
if mode&InputModeVar != 0 {
// Walk the variables first for the root module. We walk them in
// alphabetical order for UX reasons.
rootConf := c.module.Config()
names := make([]string, len(rootConf.Variables))
m := make(map[string]*config.Variable)
for i, v := range rootConf.Variables {
names[i] = v.Name
m[v.Name] = v
}
sort.Strings(names)
for _, n := range names {
// If we only care about unset variables, then if the variable
// is set, continue on.
if mode&InputModeVarUnset != 0 {
if _, ok := c.variables[n]; ok {
continue
}
}
var valueType config.VariableType
v := m[n]
switch valueType = v.Type(); valueType {
case config.VariableTypeUnknown:
continue
case config.VariableTypeMap:
// OK
case config.VariableTypeList:
// OK
case config.VariableTypeString:
// OK
default:
panic(fmt.Sprintf("Unknown variable type: %#v", v.Type()))
}
// If the variable is not already set, and the variable defines a
// default, use that for the value.
if _, ok := c.variables[n]; !ok {
if v.Default != nil {
c.variables[n] = v.Default.(string)
continue
}
}
// this should only happen during tests
if c.uiInput == nil {
log.Println("[WARN] Content.uiInput is nil")
continue
}
// Ask the user for a value for this variable
var value string
retry := 0
for | }
break
}
// no value provided, so don't set the variable at all
if value == "" {
continue
}
decoded, err := parseVariableAsHCL(n, value, valueType)
if err != nil {
return err
}
if decoded != nil {
c.variables[n] = decoded
}
}
}
if mode&InputModeProvider != 0 {
// Build the graph
graph, err := c.Graph(GraphTypeInput, nil)
if err != nil {
return err
}
// Do the walk
if _, err := c.walk(graph, walkInput); err != nil {
return err
}
}
return nil
}
// Apply applies the changes represented by this context and returns
// the resulting state.
//
// Even in the case an error is returned, the state may be returned and will
// potentially be partially updated. In addition to returning the resulting
// state, this context is | {
var err error
value, err = c.uiInput.Input(&InputOpts{
Id: fmt.Sprintf("var.%s", n),
Query: fmt.Sprintf("var.%s", n),
Description: v.Description,
})
if err != nil {
return fmt.Errorf(
"Error asking for %s: %s", n, err)
}
if value == "" && v.Required() {
// Redo if it is required, but abort if we keep getting
// blank entries
if retry > 2 {
return fmt.Errorf("missing required value for %q", n)
}
retry++
continue | conditional_block |
context.go | Redo if it is required, but abort if we keep getting
// blank entries
if retry > 2 {
return fmt.Errorf("missing required value for %q", n)
}
retry++
continue
}
break
}
// no value provided, so don't set the variable at all
if value == "" {
continue
}
decoded, err := parseVariableAsHCL(n, value, valueType)
if err != nil {
return err
}
if decoded != nil {
c.variables[n] = decoded
}
}
}
if mode&InputModeProvider != 0 {
// Build the graph
graph, err := c.Graph(GraphTypeInput, nil)
if err != nil {
return err
}
// Do the walk
if _, err := c.walk(graph, walkInput); err != nil {
return err
}
}
return nil
}
// Apply applies the changes represented by this context and returns
// the resulting state.
//
// Even in the case an error is returned, the state may be returned and will
// potentially be partially updated. In addition to returning the resulting
// state, this context is updated with the latest state.
//
// If the state is required after an error, the caller should call
// Context.State, rather than rely on the return value.
//
// TODO: Apply and Refresh should either always return a state, or rely on the
// State() method. Currently the helper/resource testing framework relies
// on the absence of a returned state to determine if Destroy can be
// called, so that will need to be refactored before this can be changed.
func (c *Context) Apply() (*State, error) {
defer c.acquireRun("apply")()
// Copy our own state
c.state = c.state.DeepCopy()
// Build the graph.
graph, err := c.Graph(GraphTypeApply, nil)
if err != nil {
return nil, err
}
// Determine the operation
operation := walkApply
if c.destroy {
operation = walkDestroy
}
// Walk the graph
walker, err := c.walk(graph, operation)
if len(walker.ValidationErrors) > 0 {
err = multierror.Append(err, walker.ValidationErrors...)
}
// Clean out any unused things
c.state.prune()
return c.state, err
}
// Plan generates an execution plan for the given context.
//
// The execution plan encapsulates the context and can be stored
// in order to reinstantiate a context later for Apply.
//
// Plan also updates the diff of this context to be the diff generated
// by the plan, so Apply can be called after.
func (c *Context) Plan() (*Plan, error) {
defer c.acquireRun("plan")()
p := &Plan{
Module: c.module,
Vars: c.variables,
State: c.state,
Targets: c.targets,
TerraformVersion: version.String(),
ProviderSHA256s: c.providerSHA256s,
}
var operation walkOperation
if c.destroy {
operation = walkPlanDestroy
p.Destroy = true
} else {
// Set our state to be something temporary. We do this so that
// the plan can update a fake state so that variables work, then
// we replace it back with our old state.
old := c.state
if old == nil {
c.state = &State{}
c.state.init()
} else {
c.state = old.DeepCopy()
}
defer func() {
c.state = old
}()
operation = walkPlan
}
// Setup our diff
c.diffLock.Lock()
c.diff = new(Diff)
c.diff.init()
c.diffLock.Unlock()
// Build the graph.
graphType := GraphTypePlan
if c.destroy {
graphType = GraphTypePlanDestroy
}
graph, err := c.Graph(graphType, nil)
if err != nil {
return nil, err
}
// Do the walk
walker, err := c.walk(graph, operation)
if err != nil {
return nil, err
}
p.Diff = c.diff
// If this is true, it means we're running unit tests. In this case,
// we perform a deep copy just to ensure that all context tests also
// test that a diff is copy-able. This will panic if it fails. This
// is enabled during unit tests.
//
// This should never be true during production usage, but even if it is,
// it can't do any real harm.
if contextTestDeepCopyOnPlan {
p.Diff.DeepCopy()
}
/*
// We don't do the reverification during the new destroy plan because
// it will use a different apply process.
if X_legacyGraph {
// Now that we have a diff, we can build the exact graph that Apply will use
// and catch any possible cycles during the Plan phase.
if _, err := c.Graph(GraphTypeLegacy, nil); err != nil {
return nil, err
}
}
*/
var errs error
if len(walker.ValidationErrors) > 0 {
errs = multierror.Append(errs, walker.ValidationErrors...)
}
return p, errs
}
// Refresh goes through all the resources in the state and refreshes them
// to their latest state. This will update the state that this context
// works with, along with returning it.
//
// Even in the case an error is returned, the state may be returned and
// will potentially be partially updated.
func (c *Context) Refresh() (*State, error) {
defer c.acquireRun("refresh")()
// Copy our own state
c.state = c.state.DeepCopy()
// Build the graph.
graph, err := c.Graph(GraphTypeRefresh, nil)
if err != nil {
return nil, err
}
// Do the walk
if _, err := c.walk(graph, walkRefresh); err != nil {
return nil, err
}
// Clean out any unused things
c.state.prune()
return c.state, nil
}
// Stop stops the running task.
//
// Stop will block until the task completes.
func (c *Context) Stop() {
log.Printf("[WARN] terraform: Stop called, initiating interrupt sequence")
c.l.Lock()
defer c.l.Unlock()
// If we're running, then stop
if c.runContextCancel != nil {
log.Printf("[WARN] terraform: run context exists, stopping")
// Tell the hook we want to stop
c.sh.Stop()
// Stop the context
c.runContextCancel()
c.runContextCancel = nil
}
// Grab the condition var before we exit
if cond := c.runCond; cond != nil {
cond.Wait()
}
log.Printf("[WARN] terraform: stop complete")
}
// Validate validates the configuration and returns any warnings or errors.
func (c *Context) Validate() tfdiags.Diagnostics {
defer c.acquireRun("validate")()
var diags tfdiags.Diagnostics
// Validate the configuration itself
diags = diags.Append(c.module.Validate())
// This only needs to be done for the root module, since inter-module
// variables are validated in the module tree.
if config := c.module.Config(); config != nil {
// Validate the user variables
for _, err := range smcUserVariables(config, c.variables) {
diags = diags.Append(err)
}
}
// If we have errors at this point, the graphing has no chance,
// so just bail early.
if diags.HasErrors() {
return diags
}
// Build the graph so we can walk it and run Validate on nodes.
// We also validate the graph generated here, but this graph doesn't
// necessarily match the graph that Plan will generate, so we'll validate the
// graph again later after Planning.
graph, err := c.Graph(GraphTypeValidate, nil)
if err != nil {
diags = diags.Append(err)
return diags
}
// Walk
walker, err := c.walk(graph, walkValidate)
if err != nil {
diags = diags.Append(err)
}
sort.Strings(walker.ValidationWarnings)
sort.Slice(walker.ValidationErrors, func(i, j int) bool {
return walker.ValidationErrors[i].Error() < walker.ValidationErrors[j].Error()
})
for _, warn := range walker.ValidationWarnings {
diags = diags.Append(tfdiags.SimpleWarning(warn))
}
for _, err := range walker.ValidationErrors {
diags = diags.Append(err)
}
return diags
}
// Module returns the module tree associated with this context.
func (c *Context) Module() *module.Tree {
return c.module
}
// Variables will return the mapping of variables that were defined
// for this Context. If Input was called, this mapping may be different
// than what was given.
func (c *Context) Variables() map[string]interface{} {
return c.variables
}
// SetVariable sets a variable after a context has already been built.
func (c *Context) SetVariable(k string, v interface{}) {
c.variables[k] = v
} |
func (c *Context) acquireRun(phase string) func() { | random_line_split |
|
workshopSteps.py | ), 1)]
#lets spawn a food by passing in the food list and snake coordinates
spawnSingleFood(food, snake.x, snake.y)
while end != 1:
#Lets comment these out now and start from the beginning now for the loop that will run endlessly
#keyPressed = getPressedKey()
#screen.fill(background_color)
#pygame.display.update()
#Going to have the pygame clock tick as fast as the FPS
clock.tick(FPS)
#Create the keypressed variable again and call the function getPressedKey
keyPressed = getPressedKey()
#We check if the keypresed is equal to exit we make end equal to 1
if(keyPressed == "exit"):
end = 1
#lets check if the snake is crashing into itself and if it is end the game
if(snake.checkCrash() == True):
gameEnd()
#lets check if the snake is crashing into the edges and if its true end the game
if(crashing(snake.bodyStack[0], SNAKE_BLOCK_SIZE) == True):
gameEnd()
#we're going to check for all the food and if the food is not eaten then check for the snake colliding with food
#if the snake does collide then we make the snake grow make sure the food is set to state 0 for eaten
#and we add to the score and make eaten food equal to true
for f in food:
if(f.state == 1):
if(checkCollision(snake.bodyStack[0],SNAKE_BLOCK_SIZE, f, FOOD_SIZE) == True):
snake.grow()
f.state = 0
score += 5
eaten_food = True
#if the snake has eaten then we spawn another food and changed eaten food to false
if(eaten_food == True):
spawnSingleFood(food, snake.bodyStack[0].x, snake.bodyStack[0].y)
eaten_food = False
#If a key was pressed we try to change the direction of the snake then we move again
if(keyPressed):
snake.changeDirection(keyPressed)
snake.move()
#We fill the screen in again with the color
screen.fill(background_color)
#we check for all the food and if the food has not been eaten then we draw it on the screen
for f in food:
if(f.state == 1):
f.draw(screen)
#lets draw the snake on the screen
snake.draw(screen)
#lets draw the score
drawScore(score)
#We call pygame.display.flip to layer the screen
pygame.display.flip()
#we update the display
pygame.display.update()
#STEP 2
#lets create a function to access these events
#we're going to write a for loop to check all the events that can happen in the window
#We're going to create a bunch of if statements and else if statements
#We're going to check the event type and if that event type is equal to either
#key up, key down, key right, key left, key escape, key y, key n, or quit then we are going to return
#either the value of the respective key we defined earlier, a string of what to do, or exit the system
def getPressedKey():
for event in pygame.event.get():
if(event.type == pygame.KEYDOWN):
if(event.key == pygame.K_UP):
return KEY["UP"]
elif(event.key == pygame.K_DOWN):
return KEY["DOWN"]
elif(event.key == pygame.K_RIGHT):
return KEY["RIGHT"]
elif(event.key == pygame.K_LEFT):
return KEY["LEFT"]
elif(event.key == pygame.K_ESCAPE):
return "exit"
elif(event.key == pygame.K_y):
return "yes"
elif(event.key == pygame.K_n):
return "no"
if(event.type == pygame.QUIT):
sys.exit()
#STEP 4
#Alright lets create the class for the basic snake cell which the snake will be composed of
#We have to create an init function as this will be called when we want to create a new cell.
#It has two underscores on each side and this is necessary.
#The parameters are going to be self, an x value, and a y value
#Then we set the self.x and self.y to the respective x and y values.
#Then we are going to set the direction to up by using the dictionary value of up
#Then we can set the color of the cell, this doesn't matter because we will be changing the color later
#But we do need to create a color for it so go wild with whatever color you want.
class Cell:
def __init__(self,x,y):
self.x = x
self.y = y
self.direction = KEY["UP"]
self.color = "white"
#STEP 5
#Now lets create the food class so we can create food for the snake to eat
#Again the parameters will be the the self, x value, y value, and a state which is just an integer
#We're going to set the values like we did in the cell class, but now we don't have a direction
#We have a state and we need a color, you can choose any color you want your food to be, but this color will not change later
class Food:
def __init__(self,x,y,state):
self.x = x
self.y = y
self.state = state
self.color = pygame.Color("red")
#STEP 14
#We're going to write a function to draw the food
#parameters are going to be self and a screen
#lets draw a rect using the screen the self.color and the x, y coordinates and the food size and a 0 width
def draw(self,screen):
pygame.draw.rect(screen, self.color, (self.x, self.y, FOOD_SIZE, FOOD_SIZE), 0)
#STEP 6
#Lets create the snake class
#Remember to initialize it we have to create this init function.
#We have to have the parameters of self, an x value, and a y value
class Snake:
def __init__(self,x,y):
#So we initalize the snakes x and y location to the x and y passed in and
#set the direction of the snake to right
|
#STEP 7
#Lets create a function to move the snake
def move(self):
#So we're going to calculate the length of the snake - 1, since arrays start from 0 the last index will be
#the length of the snake - 1
lastCell = len(self.bodyStack) - 1
#We're going to write a while loop to iterate through all the snakes body cells from the end to the front of the snake.
#While we are not at the head of the snake lets go through every cell and make sure it is going in the direction of the
#previous cells and move that cell to the location of the cell in front of it. Then we decrease the cell index we are on.
while(lastCell != 0):
self.bodyStack[lastCell].direction = self.bodyStack[lastCell - 1].direction
self.bodyStack[lastCell].x = self.bodyStack[lastCell - 1].x
self.bodyStack[lastCell].y = self.bodyStack[lastCell - 1].y
lastCell -= 1
#we check if the body is less than 2 so we know its just the head, but if not
#then we pop out the head of the list
if(len(self.bodyStack) < 2):
headCell = self
else:
headCell = self.bodyStack.pop(lastCell)
#Now lets create some if-else statements to check if a new direction was inputted.
#We have to access the snakes bodystack and check the new direction
#If the direction is up then we set the y coordinate of the head above the second cell
#If the direction is right then we set the x coordinate of the head to the right of the second cell
#If the direction is down then we set the y coordinate of the head below the second cell
#If the direction is left then we set the y coordinate of the head to the left of the second cell
#I used fps * movement speed to make it relative to how fast you want your snake to go
if(self.bodyStack[0].direction == KEY["UP"]):
headCell.y = self.bodyStack[0].y - (FPS * MOVEMENT_SPEED)
elif(self.bodyStack[0].direction == KEY["RIGHT"]):
headCell.x = self.bodyStack[0 | self.x = x
self.y = y
self.direction = KEY["RIGHT"]
#We're going to create a list to hold all the snakes body
self.bodyStack = []
#adding the first snake cell to the list of cells
self.bodyStack.append(self)
#We're going to create an end cell to separate the body cells, we create the end cell to the left of the head
endCell = Cell(x - SEPERATION,y)
#We dont want a color so we don't show it on the graph
endCell.color = "NULL"
#Make the end cell the same direction as the head.
endCell.direction = KEY["RIGHT"]
#Then we add the cell to body list
self.bodyStack.append(endCell) | identifier_body |
workshopSteps.py | ), 1)]
#lets spawn a food by passing in the food list and snake coordinates
spawnSingleFood(food, snake.x, snake.y)
while end != 1:
#Lets comment these out now and start from the beginning now for the loop that will run endlessly
#keyPressed = getPressedKey()
#screen.fill(background_color)
#pygame.display.update()
#Going to have the pygame clock tick as fast as the FPS
clock.tick(FPS)
#Create the keypressed variable again and call the function getPressedKey
keyPressed = getPressedKey()
#We check if the keypresed is equal to exit we make end equal to 1
if(keyPressed == "exit"):
end = 1
#lets check if the snake is crashing into itself and if it is end the game
if(snake.checkCrash() == True):
gameEnd()
#lets check if the snake is crashing into the edges and if its true end the game
if(crashing(snake.bodyStack[0], SNAKE_BLOCK_SIZE) == True):
gameEnd()
#we're going to check for all the food and if the food is not eaten then check for the snake colliding with food
#if the snake does collide then we make the snake grow make sure the food is set to state 0 for eaten
#and we add to the score and make eaten food equal to true
for f in food:
if(f.state == 1):
if(checkCollision(snake.bodyStack[0],SNAKE_BLOCK_SIZE, f, FOOD_SIZE) == True):
snake.grow()
f.state = 0
score += 5
eaten_food = True
#if the snake has eaten then we spawn another food and changed eaten food to false
if(eaten_food == True):
spawnSingleFood(food, snake.bodyStack[0].x, snake.bodyStack[0].y)
eaten_food = False
#If a key was pressed we try to change the direction of the snake then we move again
if(keyPressed):
snake.changeDirection(keyPressed)
snake.move()
#We fill the screen in again with the color
screen.fill(background_color)
#we check for all the food and if the food has not been eaten then we draw it on the screen
for f in food:
if(f.state == 1):
f.draw(screen)
#lets draw the snake on the screen
snake.draw(screen)
#lets draw the score
drawScore(score)
#We call pygame.display.flip to layer the screen
pygame.display.flip()
#we update the display
pygame.display.update()
#STEP 2
#lets create a function to access these events
#we're going to write a for loop to check all the events that can happen in the window
#We're going to create a bunch of if statements and else if statements
#We're going to check the event type and if that event type is equal to either
#key up, key down, key right, key left, key escape, key y, key n, or quit then we are going to return
#either the value of the respective key we defined earlier, a string of what to do, or exit the system
def getPressedKey():
for event in pygame.event.get():
if(event.type == pygame.KEYDOWN):
if(event.key == pygame.K_UP):
return KEY["UP"]
elif(event.key == pygame.K_DOWN):
return KEY["DOWN"]
elif(event.key == pygame.K_RIGHT):
return KEY["RIGHT"]
elif(event.key == pygame.K_LEFT):
return KEY["LEFT"]
elif(event.key == pygame.K_ESCAPE):
return "exit"
elif(event.key == pygame.K_y):
return "yes"
elif(event.key == pygame.K_n):
return "no"
if(event.type == pygame.QUIT):
sys.exit()
#STEP 4
#Alright lets create the class for the basic snake cell which the snake will be composed of
#We have to create an init function as this will be called when we want to create a new cell.
#It has two underscores on each side and this is necessary.
#The parameters are going to be self, an x value, and a y value
#Then we set the self.x and self.y to the respective x and y values.
#Then we are going to set the direction to up by using the dictionary value of up
#Then we can set the color of the cell, this doesn't matter because we will be changing the color later
#But we do need to create a color for it so go wild with whatever color you want.
class Cell:
def __init__(self,x,y):
self.x = x
self.y = y
self.direction = KEY["UP"]
self.color = "white"
#STEP 5
#Now lets create the food class so we can create food for the snake to eat
#Again the parameters will be the the self, x value, y value, and a state which is just an integer
#We're going to set the values like we did in the cell class, but now we don't have a direction
#We have a state and we need a color, you can choose any color you want your food to be, but this color will not change later
class Food:
def __init__(self,x,y,state):
self.x = x
self.y = y
self.state = state
self.color = pygame.Color("red")
#STEP 14
#We're going to write a function to draw the food
#parameters are going to be self and a screen
#lets draw a rect using the screen the self.color and the x, y coordinates and the food size and a 0 width
def draw(self,screen):
pygame.draw.rect(screen, self.color, (self.x, self.y, FOOD_SIZE, FOOD_SIZE), 0)
#STEP 6
#Lets create the snake class
#Remember to initialize it we have to create this init function.
#We have to have the parameters of self, an x value, and a y value
class | :
def __init__(self,x,y):
#So we initalize the snakes x and y location to the x and y passed in and
#set the direction of the snake to right
self.x = x
self.y = y
self.direction = KEY["RIGHT"]
#We're going to create a list to hold all the snakes body
self.bodyStack = []
#adding the first snake cell to the list of cells
self.bodyStack.append(self)
#We're going to create an end cell to separate the body cells, we create the end cell to the left of the head
endCell = Cell(x - SEPERATION,y)
#We dont want a color so we don't show it on the graph
endCell.color = "NULL"
#Make the end cell the same direction as the head.
endCell.direction = KEY["RIGHT"]
#Then we add the cell to body list
self.bodyStack.append(endCell)
#STEP 7
#Lets create a function to move the snake
def move(self):
#So we're going to calculate the length of the snake - 1, since arrays start from 0 the last index will be
#the length of the snake - 1
lastCell = len(self.bodyStack) - 1
#We're going to write a while loop to iterate through all the snakes body cells from the end to the front of the snake.
#While we are not at the head of the snake lets go through every cell and make sure it is going in the direction of the
#previous cells and move that cell to the location of the cell in front of it. Then we decrease the cell index we are on.
while(lastCell != 0):
self.bodyStack[lastCell].direction = self.bodyStack[lastCell - 1].direction
self.bodyStack[lastCell].x = self.bodyStack[lastCell - 1].x
self.bodyStack[lastCell].y = self.bodyStack[lastCell - 1].y
lastCell -= 1
#we check if the body is less than 2 so we know its just the head, but if not
#then we pop out the head of the list
if(len(self.bodyStack) < 2):
headCell = self
else:
headCell = self.bodyStack.pop(lastCell)
#Now lets create some if-else statements to check if a new direction was inputted.
#We have to access the snakes bodystack and check the new direction
#If the direction is up then we set the y coordinate of the head above the second cell
#If the direction is right then we set the x coordinate of the head to the right of the second cell
#If the direction is down then we set the y coordinate of the head below the second cell
#If the direction is left then we set the y coordinate of the head to the left of the second cell
#I used fps * movement speed to make it relative to how fast you want your snake to go
if(self.bodyStack[0].direction == KEY["UP"]):
headCell.y = self.bodyStack[0].y - (FPS * MOVEMENT_SPEED)
elif(self.bodyStack[0].direction == KEY["RIGHT"]):
headCell.x = self.bodyStack[0 | Snake | identifier_name |
workshopSteps.py | ), 1)]
#lets spawn a food by passing in the food list and snake coordinates
spawnSingleFood(food, snake.x, snake.y)
while end != 1:
#Lets comment these out now and start from the beginning now for the loop that will run endlessly
#keyPressed = getPressedKey()
#screen.fill(background_color)
#pygame.display.update()
#Going to have the pygame clock tick as fast as the FPS
clock.tick(FPS)
#Create the keypressed variable again and call the function getPressedKey
keyPressed = getPressedKey()
#We check if the keypresed is equal to exit we make end equal to 1
if(keyPressed == "exit"):
end = 1
#lets check if the snake is crashing into itself and if it is end the game
if(snake.checkCrash() == True):
gameEnd()
#lets check if the snake is crashing into the edges and if its true end the game
if(crashing(snake.bodyStack[0], SNAKE_BLOCK_SIZE) == True):
gameEnd()
#we're going to check for all the food and if the food is not eaten then check for the snake colliding with food
#if the snake does collide then we make the snake grow make sure the food is set to state 0 for eaten
#and we add to the score and make eaten food equal to true
for f in food:
if(f.state == 1):
if(checkCollision(snake.bodyStack[0],SNAKE_BLOCK_SIZE, f, FOOD_SIZE) == True):
snake.grow()
f.state = 0
score += 5
eaten_food = True
#if the snake has eaten then we spawn another food and changed eaten food to false
if(eaten_food == True):
spawnSingleFood(food, snake.bodyStack[0].x, snake.bodyStack[0].y)
eaten_food = False
#If a key was pressed we try to change the direction of the snake then we move again
if(keyPressed):
snake.changeDirection(keyPressed)
snake.move()
#We fill the screen in again with the color
screen.fill(background_color)
#we check for all the food and if the food has not been eaten then we draw it on the screen
for f in food:
if(f.state == 1):
f.draw(screen)
#lets draw the snake on the screen
snake.draw(screen)
#lets draw the score
drawScore(score)
#We call pygame.display.flip to layer the screen
pygame.display.flip()
#we update the display
pygame.display.update()
#STEP 2
#lets create a function to access these events
#we're going to write a for loop to check all the events that can happen in the window
#We're going to create a bunch of if statements and else if statements
#We're going to check the event type and if that event type is equal to either
#key up, key down, key right, key left, key escape, key y, key n, or quit then we are going to return
#either the value of the respective key we defined earlier, a string of what to do, or exit the system
def getPressedKey():
for event in pygame.event.get():
if(event.type == pygame.KEYDOWN):
if(event.key == pygame.K_UP):
return KEY["UP"]
elif(event.key == pygame.K_DOWN):
return KEY["DOWN"]
elif(event.key == pygame.K_RIGHT):
return KEY["RIGHT"]
elif(event.key == pygame.K_LEFT):
return KEY["LEFT"]
elif(event.key == pygame.K_ESCAPE):
return "exit"
elif(event.key == pygame.K_y):
return "yes"
elif(event.key == pygame.K_n):
return "no"
if(event.type == pygame.QUIT):
sys.exit()
#STEP 4
#Alright lets create the class for the basic snake cell which the snake will be composed of
#We have to create an init function as this will be called when we want to create a new cell.
#It has two underscores on each side and this is necessary.
#The parameters are going to be self, an x value, and a y value
#Then we set the self.x and self.y to the respective x and y values.
#Then we are going to set the direction to up by using the dictionary value of up
#Then we can set the color of the cell, this doesn't matter because we will be changing the color later
#But we do need to create a color for it so go wild with whatever color you want.
class Cell:
def __init__(self,x,y):
self.x = x
self.y = y
self.direction = KEY["UP"]
self.color = "white"
#STEP 5
#Now lets create the food class so we can create food for the snake to eat
#Again the parameters will be the the self, x value, y value, and a state which is just an integer
#We're going to set the values like we did in the cell class, but now we don't have a direction
#We have a state and we need a color, you can choose any color you want your food to be, but this color will not change later
class Food:
def __init__(self,x,y,state):
self.x = x
self.y = y
self.state = state
self.color = pygame.Color("red")
#STEP 14 | #We're going to write a function to draw the food
#parameters are going to be self and a screen
#lets draw a rect using the screen the self.color and the x, y coordinates and the food size and a 0 width
def draw(self,screen):
pygame.draw.rect(screen, self.color, (self.x, self.y, FOOD_SIZE, FOOD_SIZE), 0)
#STEP 6
#Lets create the snake class
#Remember to initialize it we have to create this init function.
#We have to have the parameters of self, an x value, and a y value
class Snake:
def __init__(self,x,y):
#So we initalize the snakes x and y location to the x and y passed in and
#set the direction of the snake to right
self.x = x
self.y = y
self.direction = KEY["RIGHT"]
#We're going to create a list to hold all the snakes body
self.bodyStack = []
#adding the first snake cell to the list of cells
self.bodyStack.append(self)
#We're going to create an end cell to separate the body cells, we create the end cell to the left of the head
endCell = Cell(x - SEPERATION,y)
#We dont want a color so we don't show it on the graph
endCell.color = "NULL"
#Make the end cell the same direction as the head.
endCell.direction = KEY["RIGHT"]
#Then we add the cell to body list
self.bodyStack.append(endCell)
#STEP 7
#Lets create a function to move the snake
def move(self):
#So we're going to calculate the length of the snake - 1, since arrays start from 0 the last index will be
#the length of the snake - 1
lastCell = len(self.bodyStack) - 1
#We're going to write a while loop to iterate through all the snakes body cells from the end to the front of the snake.
#While we are not at the head of the snake lets go through every cell and make sure it is going in the direction of the
#previous cells and move that cell to the location of the cell in front of it. Then we decrease the cell index we are on.
while(lastCell != 0):
self.bodyStack[lastCell].direction = self.bodyStack[lastCell - 1].direction
self.bodyStack[lastCell].x = self.bodyStack[lastCell - 1].x
self.bodyStack[lastCell].y = self.bodyStack[lastCell - 1].y
lastCell -= 1
#we check if the body is less than 2 so we know its just the head, but if not
#then we pop out the head of the list
if(len(self.bodyStack) < 2):
headCell = self
else:
headCell = self.bodyStack.pop(lastCell)
#Now lets create some if-else statements to check if a new direction was inputted.
#We have to access the snakes bodystack and check the new direction
#If the direction is up then we set the y coordinate of the head above the second cell
#If the direction is right then we set the x coordinate of the head to the right of the second cell
#If the direction is down then we set the y coordinate of the head below the second cell
#If the direction is left then we set the y coordinate of the head to the left of the second cell
#I used fps * movement speed to make it relative to how fast you want your snake to go
if(self.bodyStack[0].direction == KEY["UP"]):
headCell.y = self.bodyStack[0].y - (FPS * MOVEMENT_SPEED)
elif(self.bodyStack[0].direction == KEY["RIGHT"]):
headCell.x = self.bodyStack[0].x | random_line_split |
|
workshopSteps.py | ), 1)]
#lets spawn a food by passing in the food list and snake coordinates
spawnSingleFood(food, snake.x, snake.y)
while end != 1:
#Lets comment these out now and start from the beginning now for the loop that will run endlessly
#keyPressed = getPressedKey()
#screen.fill(background_color)
#pygame.display.update()
#Going to have the pygame clock tick as fast as the FPS
clock.tick(FPS)
#Create the keypressed variable again and call the function getPressedKey
keyPressed = getPressedKey()
#We check if the keypresed is equal to exit we make end equal to 1
if(keyPressed == "exit"):
end = 1
#lets check if the snake is crashing into itself and if it is end the game
if(snake.checkCrash() == True):
gameEnd()
#lets check if the snake is crashing into the edges and if its true end the game
if(crashing(snake.bodyStack[0], SNAKE_BLOCK_SIZE) == True):
gameEnd()
#we're going to check for all the food and if the food is not eaten then check for the snake colliding with food
#if the snake does collide then we make the snake grow make sure the food is set to state 0 for eaten
#and we add to the score and make eaten food equal to true
for f in food:
if(f.state == 1):
if(checkCollision(snake.bodyStack[0],SNAKE_BLOCK_SIZE, f, FOOD_SIZE) == True):
snake.grow()
f.state = 0
score += 5
eaten_food = True
#if the snake has eaten then we spawn another food and changed eaten food to false
if(eaten_food == True):
|
#If a key was pressed we try to change the direction of the snake then we move again
if(keyPressed):
snake.changeDirection(keyPressed)
snake.move()
#We fill the screen in again with the color
screen.fill(background_color)
#we check for all the food and if the food has not been eaten then we draw it on the screen
for f in food:
if(f.state == 1):
f.draw(screen)
#lets draw the snake on the screen
snake.draw(screen)
#lets draw the score
drawScore(score)
#We call pygame.display.flip to layer the screen
pygame.display.flip()
#we update the display
pygame.display.update()
#STEP 2
#lets create a function to access these events
#we're going to write a for loop to check all the events that can happen in the window
#We're going to create a bunch of if statements and else if statements
#We're going to check the event type and if that event type is equal to either
#key up, key down, key right, key left, key escape, key y, key n, or quit then we are going to return
#either the value of the respective key we defined earlier, a string of what to do, or exit the system
def getPressedKey():
for event in pygame.event.get():
if(event.type == pygame.KEYDOWN):
if(event.key == pygame.K_UP):
return KEY["UP"]
elif(event.key == pygame.K_DOWN):
return KEY["DOWN"]
elif(event.key == pygame.K_RIGHT):
return KEY["RIGHT"]
elif(event.key == pygame.K_LEFT):
return KEY["LEFT"]
elif(event.key == pygame.K_ESCAPE):
return "exit"
elif(event.key == pygame.K_y):
return "yes"
elif(event.key == pygame.K_n):
return "no"
if(event.type == pygame.QUIT):
sys.exit()
#STEP 4
#Alright lets create the class for the basic snake cell which the snake will be composed of
#We have to create an init function as this will be called when we want to create a new cell.
#It has two underscores on each side and this is necessary.
#The parameters are going to be self, an x value, and a y value
#Then we set the self.x and self.y to the respective x and y values.
#Then we are going to set the direction to up by using the dictionary value of up
#Then we can set the color of the cell, this doesn't matter because we will be changing the color later
#But we do need to create a color for it so go wild with whatever color you want.
class Cell:
def __init__(self,x,y):
self.x = x
self.y = y
self.direction = KEY["UP"]
self.color = "white"
#STEP 5
#Now lets create the food class so we can create food for the snake to eat
#Again the parameters will be the the self, x value, y value, and a state which is just an integer
#We're going to set the values like we did in the cell class, but now we don't have a direction
#We have a state and we need a color, you can choose any color you want your food to be, but this color will not change later
class Food:
def __init__(self,x,y,state):
self.x = x
self.y = y
self.state = state
self.color = pygame.Color("red")
#STEP 14
#We're going to write a function to draw the food
#parameters are going to be self and a screen
#lets draw a rect using the screen the self.color and the x, y coordinates and the food size and a 0 width
def draw(self,screen):
pygame.draw.rect(screen, self.color, (self.x, self.y, FOOD_SIZE, FOOD_SIZE), 0)
#STEP 6
#Lets create the snake class
#Remember to initialize it we have to create this init function.
#We have to have the parameters of self, an x value, and a y value
class Snake:
def __init__(self,x,y):
#So we initalize the snakes x and y location to the x and y passed in and
#set the direction of the snake to right
self.x = x
self.y = y
self.direction = KEY["RIGHT"]
#We're going to create a list to hold all the snakes body
self.bodyStack = []
#adding the first snake cell to the list of cells
self.bodyStack.append(self)
#We're going to create an end cell to separate the body cells, we create the end cell to the left of the head
endCell = Cell(x - SEPERATION,y)
#We dont want a color so we don't show it on the graph
endCell.color = "NULL"
#Make the end cell the same direction as the head.
endCell.direction = KEY["RIGHT"]
#Then we add the cell to body list
self.bodyStack.append(endCell)
#STEP 7
#Lets create a function to move the snake
def move(self):
#So we're going to calculate the length of the snake - 1, since arrays start from 0 the last index will be
#the length of the snake - 1
lastCell = len(self.bodyStack) - 1
#We're going to write a while loop to iterate through all the snakes body cells from the end to the front of the snake.
#While we are not at the head of the snake lets go through every cell and make sure it is going in the direction of the
#previous cells and move that cell to the location of the cell in front of it. Then we decrease the cell index we are on.
while(lastCell != 0):
self.bodyStack[lastCell].direction = self.bodyStack[lastCell - 1].direction
self.bodyStack[lastCell].x = self.bodyStack[lastCell - 1].x
self.bodyStack[lastCell].y = self.bodyStack[lastCell - 1].y
lastCell -= 1
#we check if the body is less than 2 so we know its just the head, but if not
#then we pop out the head of the list
if(len(self.bodyStack) < 2):
headCell = self
else:
headCell = self.bodyStack.pop(lastCell)
#Now lets create some if-else statements to check if a new direction was inputted.
#We have to access the snakes bodystack and check the new direction
#If the direction is up then we set the y coordinate of the head above the second cell
#If the direction is right then we set the x coordinate of the head to the right of the second cell
#If the direction is down then we set the y coordinate of the head below the second cell
#If the direction is left then we set the y coordinate of the head to the left of the second cell
#I used fps * movement speed to make it relative to how fast you want your snake to go
if(self.bodyStack[0].direction == KEY["UP"]):
headCell.y = self.bodyStack[0].y - (FPS * MOVEMENT_SPEED)
elif(self.bodyStack[0].direction == KEY["RIGHT"]):
headCell.x = self.bodyStack[0]. | spawnSingleFood(food, snake.bodyStack[0].x, snake.bodyStack[0].y)
eaten_food = False | conditional_block |
glove.py | for list_ in self._lists:
vocab.update(list_)
self._logger.info("Done building vocab from corpus.")
if top is not None and top < len(vocab):
words = sorted(vocab.items(), key=lambda x: -x[1])[:top]
else:
words = vocab.items()
self._vocabulary = {word: (i, freq) for i, (word, freq) in enumerate(words)}
def build_id2word(self):
self.id2word = dict((id_, word) for word, (id_, _) in self._vocabulary.items())
def _build_cooccur(self):
"""
Build a word co-occurrence list for the given corpus.
This function is a tuple generator, where each element (representing
a cooccurrence pair) is of the form
(i_main, i_context, cooccurrence)
where `i_main` is the ID of the main word in the cooccurrence and
`i_context` is the ID of the context word, and `cooccurrence` is the
`X_{ij}` cooccurrence value as described in Pennington et al.
(2014).
If `min_count` is not `None`, cooccurrence pairs where either word
occurs in the corpus fewer than `min_count` times are ignored.
"""
| # indexing speed; we'll convert into a list later
cooccurrences = sparse.lil_matrix((vocab_size, vocab_size),
dtype=np.float64)
for i, list_ in enumerate(self._lists):
if i % 1000 == 0:
self._logger.info("Building cooccurrence matrix: on line %i", i)
token_ids = [self._vocabulary[word][0] for word in list_ if word in self._vocabulary]
for center_i, center_id in enumerate(token_ids):
# Collect all word IDs in left window of center word
context_ids = token_ids[:]
del context_ids[center_i]
contexts_len = len(context_ids)
for left_i, left_id in enumerate(context_ids):
# Build co-occurrence matrix symmetrically (pretend we
# are calculating right contexts as well)
cooccurrences[center_id, left_id] += 0.5
cooccurrences[left_id, center_id] += 0.5
# Now yield our tuple sequence (dig into the LiL-matrix internals to
# quickly iterate through all nonzero cells)
for i, (row, data) in enumerate(zip(cooccurrences.rows,
cooccurrences.data)):
if self._min_count is not None and self._vocabulary[self.id2word[i]][1] < self._min_count:
continue
for data_idx, j in enumerate(row):
if self._min_count is not None and self._vocabulary[self.id2word[j]][1] < self._min_count:
continue
yield i, j, data[data_idx]
def _run_iter(self, data):
"""
Run a single iteration of GloVe training using the given
cooccurrence data and the previously computed weight vectors /
biases and accompanying gradient histories.
`data` is a pre-fetched data / weights list where each element is of
the form
(v_main, v_context,
b_main, b_context,
gradsq_W_main, gradsq_W_context,
gradsq_b_main, gradsq_b_context,
cooccurrence)
as produced by the `train_glove` function. Each element in this
tuple is an `ndarray` view into the data structure which contains
it.
See the `train_glove` function for information on the shapes of `W`,
`biases`, `gradient_squared`, `gradient_squared_biases` and how they
should be initialized.
The parameters `x_max`, `alpha` define our weighting function when
computing the cost for two word pairs; see the GloVe paper for more
details.
Returns the cost associated with the given weight assignments and
updates the weights by online AdaGrad in place.
"""
global_cost = 0
# We want to iterate over data randomly so as not to unintentionally
# bias the word vector contents
shuffle(data)
for (v_main, v_context, b_main, b_context, gradsq_W_main, gradsq_W_context,
gradsq_b_main, gradsq_b_context, cooccurrence) in data:
weight = (cooccurrence / self._x_max) ** self._alpha if cooccurrence < self._x_max else 1
# Compute inner component of cost function, which is used in
# both overall cost calculation and in gradient calculation
#
# $$ J' = w_i^Tw_j + b_i + b_j - log(X_{ij}) $$
cost_inner = (v_main.dot(v_context)
+ b_main[0] + b_context[0]
- log(cooccurrence))
# Compute cost
#
# $$ J = f(X_{ij}) (J')^2 $$
cost = weight * (cost_inner ** 2)
# Add weighted cost to the global cost tracker
global_cost += 0.5 * cost
# Compute gradients for word vector terms.
#
# NB: `main_word` is only a view into `W` (not a copy), so our
# modifications here will affect the global weight matrix;
# likewise for context_word, biases, etc.
grad_main = weight * cost_inner * v_context
grad_context = weight * cost_inner * v_main
# Compute gradients for bias terms
grad_bias_main = weight * cost_inner
grad_bias_context = weight * cost_inner
# Now perform adaptive updates
v_main -= (self._learning_rate * grad_main / np.sqrt(gradsq_W_main))
v_context -= (self._learning_rate * grad_context / np.sqrt(gradsq_W_context))
b_main -= (self._learning_rate * grad_bias_main / np.sqrt(gradsq_b_main))
b_context -= (self._learning_rate * grad_bias_context / np.sqrt(
gradsq_b_context))
# Update squared gradient sums
gradsq_W_main += np.square(grad_main)
gradsq_W_context += np.square(grad_context)
gradsq_b_main += grad_bias_main ** 2
gradsq_b_context += grad_bias_context ** 2
return global_cost
def _train(self, iter_callback=None):
"""
Train GloVe vectors on the given generator `cooccurrences`, where
each element is of the form
(word_i_id, word_j_id, x_ij)
where `x_ij` is a cooccurrence value $X_{ij}$ as presented in the
matrix defined by `build_cooccur` and the Pennington et al. (2014)
paper itself.
If `iter_callback` is not `None`, the provided function will be
called after each iteration with the learned `W` matrix so far.
Keyword arguments are passed on to the iteration step function
`run_iter`.
Returns the computed word vector matrix `W`.
"""
vocab_size = len(self._vocabulary)
# Word vector matrix. This matrix is (2V) * d, where N is the size
# of the corpus vocabulary and d is the dimensionality of the word
# vectors. All elements are initialized randomly in the range (-0.5,
# 0.5]. We build two word vectors for each word: one for the word as
# the main (center) word and one for the word as a context word.
#
# It is up to the client to decide what to do with the resulting two
# vectors. Pennington et al. (2014) suggest adding or averaging the
# two for each word, or discarding the context vectors.
W = (np.random.rand(vocab_size * 2, self._vector_size) - 0.5) / float(self._vector_size + 1)
# Bias terms, each associated with a single vector. An array of size
# $2V$, initialized randomly in the range (-0.5, 0.5].
biases = (np.random.rand(vocab_size * 2) - 0.5) / float(self._vector_size + 1)
# Training is done via adaptive gradient descent (AdaGrad). To make
# this work we need to store the sum of squares of all previous
# gradients.
#
# Like `W`, this matrix is (2V) * d.
#
# Initialize all squared gradient sums to 1 so that our initial
# adaptive learning rate is simply the global learning rate.
gradient_squared = np.ones((vocab_size * 2, self._vector_size),
dtype=np.float64)
# Sum of squared gradients for the bias terms.
gradient_squared_biases = np.ones(vocab_size * 2, dtype=np.float64)
# Build a reusable list from the given cooccurrence generator,
# pre-fetching all necessary data.
#
# NB: These are all views into the actual data matrices, so updates
# to them will pass on to | vocab_size = len(self._vocabulary)
# Collect cooccurrences internally as a sparse matrix for passable | random_line_split |
glove.py | .id2word[i]][1] < self._min_count:
continue
for data_idx, j in enumerate(row):
if self._min_count is not None and self._vocabulary[self.id2word[j]][1] < self._min_count:
continue
yield i, j, data[data_idx]
def _run_iter(self, data):
"""
Run a single iteration of GloVe training using the given
cooccurrence data and the previously computed weight vectors /
biases and accompanying gradient histories.
`data` is a pre-fetched data / weights list where each element is of
the form
(v_main, v_context,
b_main, b_context,
gradsq_W_main, gradsq_W_context,
gradsq_b_main, gradsq_b_context,
cooccurrence)
as produced by the `train_glove` function. Each element in this
tuple is an `ndarray` view into the data structure which contains
it.
See the `train_glove` function for information on the shapes of `W`,
`biases`, `gradient_squared`, `gradient_squared_biases` and how they
should be initialized.
The parameters `x_max`, `alpha` define our weighting function when
computing the cost for two word pairs; see the GloVe paper for more
details.
Returns the cost associated with the given weight assignments and
updates the weights by online AdaGrad in place.
"""
global_cost = 0
# We want to iterate over data randomly so as not to unintentionally
# bias the word vector contents
shuffle(data)
for (v_main, v_context, b_main, b_context, gradsq_W_main, gradsq_W_context,
gradsq_b_main, gradsq_b_context, cooccurrence) in data:
weight = (cooccurrence / self._x_max) ** self._alpha if cooccurrence < self._x_max else 1
# Compute inner component of cost function, which is used in
# both overall cost calculation and in gradient calculation
#
# $$ J' = w_i^Tw_j + b_i + b_j - log(X_{ij}) $$
cost_inner = (v_main.dot(v_context)
+ b_main[0] + b_context[0]
- log(cooccurrence))
# Compute cost
#
# $$ J = f(X_{ij}) (J')^2 $$
cost = weight * (cost_inner ** 2)
# Add weighted cost to the global cost tracker
global_cost += 0.5 * cost
# Compute gradients for word vector terms.
#
# NB: `main_word` is only a view into `W` (not a copy), so our
# modifications here will affect the global weight matrix;
# likewise for context_word, biases, etc.
grad_main = weight * cost_inner * v_context
grad_context = weight * cost_inner * v_main
# Compute gradients for bias terms
grad_bias_main = weight * cost_inner
grad_bias_context = weight * cost_inner
# Now perform adaptive updates
v_main -= (self._learning_rate * grad_main / np.sqrt(gradsq_W_main))
v_context -= (self._learning_rate * grad_context / np.sqrt(gradsq_W_context))
b_main -= (self._learning_rate * grad_bias_main / np.sqrt(gradsq_b_main))
b_context -= (self._learning_rate * grad_bias_context / np.sqrt(
gradsq_b_context))
# Update squared gradient sums
gradsq_W_main += np.square(grad_main)
gradsq_W_context += np.square(grad_context)
gradsq_b_main += grad_bias_main ** 2
gradsq_b_context += grad_bias_context ** 2
return global_cost
def _train(self, iter_callback=None):
"""
Train GloVe vectors on the given generator `cooccurrences`, where
each element is of the form
(word_i_id, word_j_id, x_ij)
where `x_ij` is a cooccurrence value $X_{ij}$ as presented in the
matrix defined by `build_cooccur` and the Pennington et al. (2014)
paper itself.
If `iter_callback` is not `None`, the provided function will be
called after each iteration with the learned `W` matrix so far.
Keyword arguments are passed on to the iteration step function
`run_iter`.
Returns the computed word vector matrix `W`.
"""
vocab_size = len(self._vocabulary)
# Word vector matrix. This matrix is (2V) * d, where N is the size
# of the corpus vocabulary and d is the dimensionality of the word
# vectors. All elements are initialized randomly in the range (-0.5,
# 0.5]. We build two word vectors for each word: one for the word as
# the main (center) word and one for the word as a context word.
#
# It is up to the client to decide what to do with the resulting two
# vectors. Pennington et al. (2014) suggest adding or averaging the
# two for each word, or discarding the context vectors.
W = (np.random.rand(vocab_size * 2, self._vector_size) - 0.5) / float(self._vector_size + 1)
# Bias terms, each associated with a single vector. An array of size
# $2V$, initialized randomly in the range (-0.5, 0.5].
biases = (np.random.rand(vocab_size * 2) - 0.5) / float(self._vector_size + 1)
# Training is done via adaptive gradient descent (AdaGrad). To make
# this work we need to store the sum of squares of all previous
# gradients.
#
# Like `W`, this matrix is (2V) * d.
#
# Initialize all squared gradient sums to 1 so that our initial
# adaptive learning rate is simply the global learning rate.
gradient_squared = np.ones((vocab_size * 2, self._vector_size),
dtype=np.float64)
# Sum of squared gradients for the bias terms.
gradient_squared_biases = np.ones(vocab_size * 2, dtype=np.float64)
# Build a reusable list from the given cooccurrence generator,
# pre-fetching all necessary data.
#
# NB: These are all views into the actual data matrices, so updates
# to them will pass on to the real data structures
#
# (We even extract the single-element biases as slices so that we
# can use them as views)
data = [(W[i_main], W[i_context + vocab_size],
biases[i_main: i_main + 1],
biases[i_context + vocab_size: i_context + vocab_size + 1],
gradient_squared[i_main], gradient_squared[i_context + vocab_size],
gradient_squared_biases[i_main: i_main + 1],
gradient_squared_biases[i_context + vocab_size
: i_context + vocab_size + 1],
cooccurrence)
for i_main, i_context, cooccurrence in self._build_cooccur()]
for i in range(self._iterations):
if (i + 1) % 500 == 0:
self._logger.info("\tBeginning iteration %i..", i + 1)
cost = self._run_iter(data)
if (i + 1) % 500 == 0:
self._logger.info("\t\tDone (cost %f)", cost)
if iter_callback is not None:
iter_callback(W)
return W
def _merge_main_context(self, W, merge_fun=lambda m, c: np.mean([m, c], axis=0),
normalize=True):
"""
Merge the main-word and context-word vectors for a weight matrix
using the provided merge function (which accepts a main-word and
context-word vector and returns a merged version).
By default, `merge_fun` returns the mean of the two vectors.
"""
vocab_size = int(len(W) / 2)
for i, row in enumerate(W[:vocab_size]):
merged = merge_fun(row, W[i + vocab_size])
if normalize:
merged /= np.linalg.norm(merged)
W[i, :] = merged
return W[:vocab_size]
if __name__ == '__main__':
| test_lists = [
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['owoce', 'szynka'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['woda', 'chleb'],
['kawa', 'ciastka'],
['kawa', 'ciastka'],
['kawa', 'ciastka'], | conditional_block |
|
glove.py |
def fit(self):
space = self._train()
merged_space = self._merge_main_context(space)
return merged_space
def setup_logger(self, name_='GloVe'):
# TODO redirect to file
self._logger = logging.getLogger(name_)
stream_logger = logging.StreamHandler()
stream_logger.setLevel(self._logging_level)
self._logger.addHandler(stream_logger)
def build_vocab(self, top=None):
"""
Build a vocabulary with word frequencies for an entire corpus.
:param top: If not None, only first <top> words, base on frequency, will be preserved
Returns a dictionary `w -> (i, f)`, mapping word strings to pairs of
word ID and word corpus frequency.
"""
self._logger.info("Building vocab from corpus")
vocab = Counter()
for list_ in self._lists:
vocab.update(list_)
self._logger.info("Done building vocab from corpus.")
if top is not None and top < len(vocab):
words = sorted(vocab.items(), key=lambda x: -x[1])[:top]
else:
words = vocab.items()
self._vocabulary = {word: (i, freq) for i, (word, freq) in enumerate(words)}
def build_id2word(self):
self.id2word = dict((id_, word) for word, (id_, _) in self._vocabulary.items())
def _build_cooccur(self):
"""
Build a word co-occurrence list for the given corpus.
This function is a tuple generator, where each element (representing
a cooccurrence pair) is of the form
(i_main, i_context, cooccurrence)
where `i_main` is the ID of the main word in the cooccurrence and
`i_context` is the ID of the context word, and `cooccurrence` is the
`X_{ij}` cooccurrence value as described in Pennington et al.
(2014).
If `min_count` is not `None`, cooccurrence pairs where either word
occurs in the corpus fewer than `min_count` times are ignored.
"""
vocab_size = len(self._vocabulary)
# Collect cooccurrences internally as a sparse matrix for passable
# indexing speed; we'll convert into a list later
cooccurrences = sparse.lil_matrix((vocab_size, vocab_size),
dtype=np.float64)
for i, list_ in enumerate(self._lists):
if i % 1000 == 0:
self._logger.info("Building cooccurrence matrix: on line %i", i)
token_ids = [self._vocabulary[word][0] for word in list_ if word in self._vocabulary]
for center_i, center_id in enumerate(token_ids):
# Collect all word IDs in left window of center word
context_ids = token_ids[:]
del context_ids[center_i]
contexts_len = len(context_ids)
for left_i, left_id in enumerate(context_ids):
# Build co-occurrence matrix symmetrically (pretend we
# are calculating right contexts as well)
cooccurrences[center_id, left_id] += 0.5
cooccurrences[left_id, center_id] += 0.5
# Now yield our tuple sequence (dig into the LiL-matrix internals to
# quickly iterate through all nonzero cells)
for i, (row, data) in enumerate(zip(cooccurrences.rows,
cooccurrences.data)):
if self._min_count is not None and self._vocabulary[self.id2word[i]][1] < self._min_count:
continue
for data_idx, j in enumerate(row):
if self._min_count is not None and self._vocabulary[self.id2word[j]][1] < self._min_count:
continue
yield i, j, data[data_idx]
def _run_iter(self, data):
"""
Run a single iteration of GloVe training using the given
cooccurrence data and the previously computed weight vectors /
biases and accompanying gradient histories.
`data` is a pre-fetched data / weights list where each element is of
the form
(v_main, v_context,
b_main, b_context,
gradsq_W_main, gradsq_W_context,
gradsq_b_main, gradsq_b_context,
cooccurrence)
as produced by the `train_glove` function. Each element in this
tuple is an `ndarray` view into the data structure which contains
it.
See the `train_glove` function for information on the shapes of `W`,
`biases`, `gradient_squared`, `gradient_squared_biases` and how they
should be initialized.
The parameters `x_max`, `alpha` define our weighting function when
computing the cost for two word pairs; see the GloVe paper for more
details.
Returns the cost associated with the given weight assignments and
updates the weights by online AdaGrad in place.
"""
global_cost = 0
# We want to iterate over data randomly so as not to unintentionally
# bias the word vector contents
shuffle(data)
for (v_main, v_context, b_main, b_context, gradsq_W_main, gradsq_W_context,
gradsq_b_main, gradsq_b_context, cooccurrence) in data:
weight = (cooccurrence / self._x_max) ** self._alpha if cooccurrence < self._x_max else 1
# Compute inner component of cost function, which is used in
# both overall cost calculation and in gradient calculation
#
# $$ J' = w_i^Tw_j + b_i + b_j - log(X_{ij}) $$
cost_inner = (v_main.dot(v_context)
+ b_main[0] + b_context[0]
- log(cooccurrence))
# Compute cost
#
# $$ J = f(X_{ij}) (J')^2 $$
cost = weight * (cost_inner ** 2)
# Add weighted cost to the global cost tracker
global_cost += 0.5 * cost
# Compute gradients for word vector terms.
#
# NB: `main_word` is only a view into `W` (not a copy), so our
# modifications here will affect the global weight matrix;
# likewise for context_word, biases, etc.
grad_main = weight * cost_inner * v_context
grad_context = weight * cost_inner * v_main
# Compute gradients for bias terms
grad_bias_main = weight * cost_inner
grad_bias_context = weight * cost_inner
# Now perform adaptive updates
v_main -= (self._learning_rate * grad_main / np.sqrt(gradsq_W_main))
v_context -= (self._learning_rate * grad_context / np.sqrt(gradsq_W_context))
b_main -= (self._learning_rate * grad_bias_main / np.sqrt(gradsq_b_main))
b_context -= (self._learning_rate * grad_bias_context / np.sqrt(
gradsq_b_context))
# Update squared gradient sums
gradsq_W_main += np.square(grad_main)
gradsq_W_context += np.square(grad_context)
gradsq_b_main += grad_bias_main ** 2
gradsq_b_context += grad_bias_context ** 2
return global_cost
def _train(self, iter_callback=None):
"""
Train GloVe vectors on the given generator `cooccurrences`, where
each element is of the form
(word_i_id, word_j_id, x_ij)
where `x_ij` is a cooccurrence value $X_{ij}$ as presented in the
matrix defined by `build_cooccur` and the Pennington et al. (2014)
paper itself.
If `iter_callback` is not `None`, the provided function will be
called after each iteration with the learned `W` matrix so far.
Keyword arguments are passed on to the iteration step function
`run_iter`.
Returns the computed word vector matrix `W`.
"""
vocab_size = len(self._vocabulary)
# Word vector matrix. This matrix is (2V) * d, where N is the size
# of the corpus vocabulary and d is the dimensionality of the word
# vectors. All elements are initialized randomly in the range (-0.5,
# 0.5]. We build two word vectors for each word: one for the word as
# the main (center) word and one for the word as a context word.
#
# It is up to the client to decide what to do with the resulting two
# vectors. Pennington et al. (2014) suggest adding or averaging the
# two for each word, or discarding the context vectors.
W = (np.random.rand(vocab_size * 2, self._vector_size) - 0.5) / float(self._vector_size + 1)
# Bias terms, each associated with a single vector. An array of size
# $2V$, initialized randomly in the range (-0.5, 0.5].
biases = (np.random.rand(vocab_size * 2) - 0.5) / float(self._vector_size + | self.setup_logger()
self.build_vocab()
self.build_id2word() | identifier_body |
|
glove.py | (self):
self.setup_logger()
self.build_vocab()
self.build_id2word()
def fit(self):
space = self._train()
merged_space = self._merge_main_context(space)
return merged_space
def setup_logger(self, name_='GloVe'):
# TODO redirect to file
self._logger = logging.getLogger(name_)
stream_logger = logging.StreamHandler()
stream_logger.setLevel(self._logging_level)
self._logger.addHandler(stream_logger)
def build_vocab(self, top=None):
"""
Build a vocabulary with word frequencies for an entire corpus.
:param top: If not None, only first <top> words, base on frequency, will be preserved
Returns a dictionary `w -> (i, f)`, mapping word strings to pairs of
word ID and word corpus frequency.
"""
self._logger.info("Building vocab from corpus")
vocab = Counter()
for list_ in self._lists:
vocab.update(list_)
self._logger.info("Done building vocab from corpus.")
if top is not None and top < len(vocab):
words = sorted(vocab.items(), key=lambda x: -x[1])[:top]
else:
words = vocab.items()
self._vocabulary = {word: (i, freq) for i, (word, freq) in enumerate(words)}
def build_id2word(self):
self.id2word = dict((id_, word) for word, (id_, _) in self._vocabulary.items())
def _build_cooccur(self):
"""
Build a word co-occurrence list for the given corpus.
This function is a tuple generator, where each element (representing
a cooccurrence pair) is of the form
(i_main, i_context, cooccurrence)
where `i_main` is the ID of the main word in the cooccurrence and
`i_context` is the ID of the context word, and `cooccurrence` is the
`X_{ij}` cooccurrence value as described in Pennington et al.
(2014).
If `min_count` is not `None`, cooccurrence pairs where either word
occurs in the corpus fewer than `min_count` times are ignored.
"""
vocab_size = len(self._vocabulary)
# Collect cooccurrences internally as a sparse matrix for passable
# indexing speed; we'll convert into a list later
cooccurrences = sparse.lil_matrix((vocab_size, vocab_size),
dtype=np.float64)
for i, list_ in enumerate(self._lists):
if i % 1000 == 0:
self._logger.info("Building cooccurrence matrix: on line %i", i)
token_ids = [self._vocabulary[word][0] for word in list_ if word in self._vocabulary]
for center_i, center_id in enumerate(token_ids):
# Collect all word IDs in left window of center word
context_ids = token_ids[:]
del context_ids[center_i]
contexts_len = len(context_ids)
for left_i, left_id in enumerate(context_ids):
# Build co-occurrence matrix symmetrically (pretend we
# are calculating right contexts as well)
cooccurrences[center_id, left_id] += 0.5
cooccurrences[left_id, center_id] += 0.5
# Now yield our tuple sequence (dig into the LiL-matrix internals to
# quickly iterate through all nonzero cells)
for i, (row, data) in enumerate(zip(cooccurrences.rows,
cooccurrences.data)):
if self._min_count is not None and self._vocabulary[self.id2word[i]][1] < self._min_count:
continue
for data_idx, j in enumerate(row):
if self._min_count is not None and self._vocabulary[self.id2word[j]][1] < self._min_count:
continue
yield i, j, data[data_idx]
def _run_iter(self, data):
"""
Run a single iteration of GloVe training using the given
cooccurrence data and the previously computed weight vectors /
biases and accompanying gradient histories.
`data` is a pre-fetched data / weights list where each element is of
the form
(v_main, v_context,
b_main, b_context,
gradsq_W_main, gradsq_W_context,
gradsq_b_main, gradsq_b_context,
cooccurrence)
as produced by the `train_glove` function. Each element in this
tuple is an `ndarray` view into the data structure which contains
it.
See the `train_glove` function for information on the shapes of `W`,
`biases`, `gradient_squared`, `gradient_squared_biases` and how they
should be initialized.
The parameters `x_max`, `alpha` define our weighting function when
computing the cost for two word pairs; see the GloVe paper for more
details.
Returns the cost associated with the given weight assignments and
updates the weights by online AdaGrad in place.
"""
global_cost = 0
# We want to iterate over data randomly so as not to unintentionally
# bias the word vector contents
shuffle(data)
for (v_main, v_context, b_main, b_context, gradsq_W_main, gradsq_W_context,
gradsq_b_main, gradsq_b_context, cooccurrence) in data:
weight = (cooccurrence / self._x_max) ** self._alpha if cooccurrence < self._x_max else 1
# Compute inner component of cost function, which is used in
# both overall cost calculation and in gradient calculation
#
# $$ J' = w_i^Tw_j + b_i + b_j - log(X_{ij}) $$
cost_inner = (v_main.dot(v_context)
+ b_main[0] + b_context[0]
- log(cooccurrence))
# Compute cost
#
# $$ J = f(X_{ij}) (J')^2 $$
cost = weight * (cost_inner ** 2)
# Add weighted cost to the global cost tracker
global_cost += 0.5 * cost
# Compute gradients for word vector terms.
#
# NB: `main_word` is only a view into `W` (not a copy), so our
# modifications here will affect the global weight matrix;
# likewise for context_word, biases, etc.
grad_main = weight * cost_inner * v_context
grad_context = weight * cost_inner * v_main
# Compute gradients for bias terms
grad_bias_main = weight * cost_inner
grad_bias_context = weight * cost_inner
# Now perform adaptive updates
v_main -= (self._learning_rate * grad_main / np.sqrt(gradsq_W_main))
v_context -= (self._learning_rate * grad_context / np.sqrt(gradsq_W_context))
b_main -= (self._learning_rate * grad_bias_main / np.sqrt(gradsq_b_main))
b_context -= (self._learning_rate * grad_bias_context / np.sqrt(
gradsq_b_context))
# Update squared gradient sums
gradsq_W_main += np.square(grad_main)
gradsq_W_context += np.square(grad_context)
gradsq_b_main += grad_bias_main ** 2
gradsq_b_context += grad_bias_context ** 2
return global_cost
def _train(self, iter_callback=None):
"""
Train GloVe vectors on the given generator `cooccurrences`, where
each element is of the form
(word_i_id, word_j_id, x_ij)
where `x_ij` is a cooccurrence value $X_{ij}$ as presented in the
matrix defined by `build_cooccur` and the Pennington et al. (2014)
paper itself.
If `iter_callback` is not `None`, the provided function will be
called after each iteration with the learned `W` matrix so far.
Keyword arguments are passed on to the iteration step function
`run_iter`.
Returns the computed word vector matrix `W`.
"""
vocab_size = len(self._vocabulary)
# Word vector matrix. This matrix is (2V) * d, where N is the size
# of the corpus vocabulary and d is the dimensionality of the word
# vectors. All elements are initialized randomly in the range (-0.5,
# 0.5]. We build two word vectors for each word: one for the word as
# the main (center) word and one for the word as a context word.
#
# It is up to the client to decide what to do with the resulting two
# vectors. Pennington et al. (2014) suggest adding or averaging the
# two for each word, or discarding the context vectors.
W = (np.random.rand(vocab_size * 2, self._vector_size) - 0.5) / float(self._vector_size + 1)
# Bias terms, each associated with a single vector. An array of size
# $2V$, initialized randomly in the range (-0.5, 0.5].
biases = (np.random.rand(vocab_size * 2) - 0.5) / float(self._ | setup | identifier_name |
|
printer.rs | , "css"),
// In HTTPie part of this behavior is gated behind the --json flag
// But it does JSON formatting even without that flag, so doing
// this check unconditionally is fine
ContentType::Text | ContentType::JavaScript if valid_json(body) => {
self.print_json_text(body, false)
}
ContentType::JavaScript => self.print_syntax_text(body, "js"),
_ => self.buffer.print(body),
}
}
fn print_stream(&mut self, reader: &mut impl Read) -> io::Result<()> {
if !self.buffer.is_terminal() {
return copy_largebuf(reader, &mut self.buffer, true);
}
let mut guard = BinaryGuard::new(reader, true);
while let Some(lines) = guard.read_lines()? {
self.buffer.write_all(lines)?;
self.buffer.flush()?;
}
Ok(())
}
fn print_colorized_stream(
&mut self,
stream: &mut impl Read,
syntax: &'static str,
) -> io::Result<()> {
let mut guard = BinaryGuard::new(stream, self.buffer.is_terminal());
let mut highlighter = self.get_highlighter(syntax);
while let Some(lines) = guard.read_lines()? {
for line in lines.split_inclusive(|&b| b == b'\n') {
highlighter.highlight_bytes(line)?;
}
highlighter.flush()?;
}
Ok(())
}
fn print_syntax_stream(
&mut self,
stream: &mut impl Read,
syntax: &'static str,
) -> io::Result<()> {
if self.color {
self.print_colorized_stream(stream, syntax)
} else {
self.print_stream(stream)
}
}
fn print_json_stream(&mut self, stream: &mut impl Read) -> io::Result<()> {
if !self.indent_json {
// We don't have to do anything specialized, so fall back to the generic version
self.print_syntax_stream(stream, "json")
} else if self.color {
let mut guard = BinaryGuard::new(stream, self.buffer.is_terminal());
let mut formatter = get_json_formatter(&self.format_options);
let mut highlighter = self.get_highlighter("json");
let mut buf = Vec::new();
while let Some(lines) = guard.read_lines()? {
formatter.format_buf(lines, &mut buf)?;
for line in buf.split_inclusive(|&b| b == b'\n') {
highlighter.highlight_bytes(line)?;
}
highlighter.flush()?;
buf.clear();
}
Ok(())
} else {
let mut formatter = get_json_formatter(&self.format_options);
if !self.buffer.is_terminal() {
let mut buf = vec![0; BUFFER_SIZE];
loop {
match stream.read(&mut buf) {
Ok(0) => return Ok(()),
Ok(n) => {
formatter.format_buf(&buf[0..n], &mut self.buffer)?;
self.buffer.flush()?;
}
Err(e) if e.kind() == io::ErrorKind::Interrupted => continue,
Err(e) => return Err(e),
}
}
}
let mut guard = BinaryGuard::new(stream, true);
while let Some(lines) = guard.read_lines()? {
formatter.format_buf(lines, &mut self.buffer)?;
self.buffer.flush()?;
}
Ok(())
}
}
fn print_body_stream(
&mut self,
content_type: ContentType,
body: &mut impl Read,
) -> io::Result<()> {
match content_type {
ContentType::Json => self.print_json_stream(body),
ContentType::Xml => self.print_syntax_stream(body, "xml"),
ContentType::Html => self.print_syntax_stream(body, "html"),
ContentType::Css => self.print_syntax_stream(body, "css"),
// print_body_text() has fancy JSON detection, but we can't do that here
ContentType::JavaScript => self.print_syntax_stream(body, "js"),
_ => self.print_stream(body),
}
}
fn print_headers(&mut self, text: &str) -> io::Result<()> {
if self.color {
self.print_colorized_text(text, "http")
} else {
self.buffer.print(text)
}
}
fn headers_to_string(&self, headers: &HeaderMap, version: Version) -> String {
let as_titlecase = match version {
Version::HTTP_09 | Version::HTTP_10 | Version::HTTP_11 => true,
Version::HTTP_2 | Version::HTTP_3 => false,
_ => false,
};
let mut headers: Vec<(&HeaderName, &HeaderValue)> = headers.iter().collect();
if self.sort_headers {
headers.sort_by_key(|(name, _)| name.as_str());
}
let mut header_string = String::new();
for (key, value) in headers {
if as_titlecase {
// Ought to be equivalent to how hyper does it
// https://github.com/hyperium/hyper/blob/f46b175bf71b202fbb907c4970b5743881b891e1/src/proto/h1/role.rs#L1332
// Header names are ASCII so it's ok to operate on char instead of u8
let mut prev = '-';
for mut c in key.as_str().chars() {
if prev == '-' {
c.make_ascii_uppercase();
}
header_string.push(c);
prev = c;
}
} else {
header_string.push_str(key.as_str());
}
header_string.push_str(": ");
match value.to_str() {
Ok(value) => header_string.push_str(value),
#[allow(clippy::format_push_string)]
Err(_) => header_string.push_str(&format!("{:?}", value)),
}
header_string.push('\n');
}
header_string.pop();
header_string
}
pub fn print_separator(&mut self) -> io::Result<()> {
self.buffer.print("\n")?;
self.buffer.flush()?;
Ok(())
}
pub fn print_request_headers<T>(&mut self, request: &Request, cookie_jar: &T) -> io::Result<()>
where
T: CookieStore,
{
let method = request.method();
let url = request.url();
let query_string = url.query().map_or(String::from(""), |q| ["?", q].concat());
let version = request.version();
let mut headers = request.headers().clone();
headers
.entry(ACCEPT)
.or_insert_with(|| HeaderValue::from_static("*/*"));
if let Some(cookie) = cookie_jar.cookies(url) {
headers.insert(COOKIE, cookie);
}
// See https://github.com/seanmonstar/reqwest/issues/1030
// reqwest and hyper add certain headers, but only in the process of
// sending the request, which we haven't done yet
if let Some(body) = request.body().and_then(Body::as_bytes) {
// Added at https://github.com/seanmonstar/reqwest/blob/e56bd160ba/src/blocking/request.rs#L132
headers
.entry(CONTENT_LENGTH)
.or_insert_with(|| body.len().into());
}
if let Some(host) = request.url().host_str() {
// This is incorrect in case of HTTP/2, but we're already assuming
// HTTP/1.1 anyway
headers.entry(HOST).or_insert_with(|| {
// Added at https://github.com/hyperium/hyper/blob/dfa1bb291d/src/client/client.rs#L237
if test_mode() {
HeaderValue::from_str("http.mock")
} else if let Some(port) = request.url().port() {
HeaderValue::from_str(&format!("{}:{}", host, port))
} else {
HeaderValue::from_str(host)
}
.expect("hostname should already be validated/parsed")
});
}
let request_line = format!("{} {}{} {:?}\n", method, url.path(), query_string, version);
let headers = self.headers_to_string(&headers, version);
self.print_headers(&(request_line + &headers))?;
self.buffer.print("\n\n")?;
self.buffer.flush()?;
Ok(())
}
pub fn print_response_headers(&mut self, response: &Response) -> io::Result<()> {
let version = response.version();
let status = response.status();
let headers = response.headers();
let status_line = format!("{:?} {}\n", version, status);
let headers = self.headers_to_string(headers, version);
self.print_headers(&(status_line + &headers))?;
self.buffer.print("\n\n")?;
self.buffer.flush()?;
Ok(())
}
pub fn print_request_body(&mut self, request: &mut Request) -> anyhow::Result<()> | {
let content_type = get_content_type(request.headers());
if let Some(body) = request.body_mut() {
let body = body.buffer()?;
if body.contains(&b'\0') {
self.buffer.print(BINARY_SUPPRESSOR)?;
} else {
self.print_body_text(content_type, &String::from_utf8_lossy(body))?;
self.buffer.print("\n")?;
}
// Breathing room between request and response
self.buffer.print("\n")?;
self.buffer.flush()?;
}
Ok(())
} | identifier_body |
|
printer.rs | (url) {
headers.insert(COOKIE, cookie);
}
// See https://github.com/seanmonstar/reqwest/issues/1030
// reqwest and hyper add certain headers, but only in the process of
// sending the request, which we haven't done yet
if let Some(body) = request.body().and_then(Body::as_bytes) {
// Added at https://github.com/seanmonstar/reqwest/blob/e56bd160ba/src/blocking/request.rs#L132
headers
.entry(CONTENT_LENGTH)
.or_insert_with(|| body.len().into());
}
if let Some(host) = request.url().host_str() {
// This is incorrect in case of HTTP/2, but we're already assuming
// HTTP/1.1 anyway
headers.entry(HOST).or_insert_with(|| {
// Added at https://github.com/hyperium/hyper/blob/dfa1bb291d/src/client/client.rs#L237
if test_mode() {
HeaderValue::from_str("http.mock")
} else if let Some(port) = request.url().port() {
HeaderValue::from_str(&format!("{}:{}", host, port))
} else {
HeaderValue::from_str(host)
}
.expect("hostname should already be validated/parsed")
});
}
let request_line = format!("{} {}{} {:?}\n", method, url.path(), query_string, version);
let headers = self.headers_to_string(&headers, version);
self.print_headers(&(request_line + &headers))?;
self.buffer.print("\n\n")?;
self.buffer.flush()?;
Ok(())
}
pub fn print_response_headers(&mut self, response: &Response) -> io::Result<()> {
let version = response.version();
let status = response.status();
let headers = response.headers();
let status_line = format!("{:?} {}\n", version, status);
let headers = self.headers_to_string(headers, version);
self.print_headers(&(status_line + &headers))?;
self.buffer.print("\n\n")?;
self.buffer.flush()?;
Ok(())
}
pub fn print_request_body(&mut self, request: &mut Request) -> anyhow::Result<()> {
let content_type = get_content_type(request.headers());
if let Some(body) = request.body_mut() {
let body = body.buffer()?;
if body.contains(&b'\0') {
self.buffer.print(BINARY_SUPPRESSOR)?;
} else {
self.print_body_text(content_type, &String::from_utf8_lossy(body))?;
self.buffer.print("\n")?;
}
// Breathing room between request and response
self.buffer.print("\n")?;
self.buffer.flush()?;
}
Ok(())
}
pub fn print_response_body(
&mut self,
response: &mut Response,
encoding: Option<&'static Encoding>,
mime: Option<&str>,
) -> anyhow::Result<()> {
let starting_time = Instant::now();
let url = response.url().clone();
let content_type =
mime.map_or_else(|| get_content_type(response.headers()), ContentType::from);
let encoding = encoding.or_else(|| get_charset(response));
let compression_type = get_compression_type(response.headers());
let mut body = decompress(response, compression_type);
if !self.buffer.is_terminal() {
if (self.color || self.indent_json) && content_type.is_text() {
// The user explicitly asked for formatting even though this is
// going into a file, and the response is at least supposed to be
// text, so decode it
// TODO: HTTPie re-encodes output in the original encoding, we don't
// encoding_rs::Encoder::encode_from_utf8_to_vec_without_replacement()
// and guess_encoding() may help, but it'll require refactoring
// The current design is a bit unfortunate because there's no way to
// force UTF-8 output without coloring or formatting
// Unconditionally decoding is not an option because the body
// might not be text at all
if self.stream {
self.print_body_stream(
content_type,
&mut decode_stream(&mut body, encoding, &url)?,
)?;
} else {
let mut buf = Vec::new();
body.read_to_end(&mut buf)?;
let text = decode_blob_unconditional(&buf, encoding, &url);
self.print_body_text(content_type, &text)?;
}
} else if self.stream {
copy_largebuf(&mut body, &mut self.buffer, true)?;
} else {
let mut buf = Vec::new();
body.read_to_end(&mut buf)?;
self.buffer.print(&buf)?;
}
} else if self.stream {
match self
.print_body_stream(content_type, &mut decode_stream(&mut body, encoding, &url)?)
{
Ok(_) => {
self.buffer.print("\n")?;
}
Err(err) if err.kind() == io::ErrorKind::InvalidData => {
self.buffer.print(BINARY_SUPPRESSOR)?;
}
Err(err) => return Err(err.into()),
}
} else {
let mut buf = Vec::new();
body.read_to_end(&mut buf)?;
match decode_blob(&buf, encoding, &url) {
None => {
self.buffer.print(BINARY_SUPPRESSOR)?;
}
Some(text) => {
self.print_body_text(content_type, &text)?;
self.buffer.print("\n")?;
}
};
}
self.buffer.flush()?;
drop(body); // silence the borrow checker
response.meta_mut().content_download_duration = Some(starting_time.elapsed());
Ok(())
}
pub fn print_response_meta(&mut self, response: &Response) -> anyhow::Result<()> {
let meta = response.meta();
let mut total_elapsed_time = meta.request_duration.as_secs_f64();
if let Some(content_download_duration) = meta.content_download_duration {
total_elapsed_time += content_download_duration.as_secs_f64();
}
self.buffer
.print(format!("Elapsed time: {:.5}s", total_elapsed_time))?;
self.buffer.print("\n\n")?;
Ok(())
}
}
enum ContentType {
Json,
Html,
Xml,
JavaScript,
Css,
Text,
UrlencodedForm,
Multipart,
Unknown,
}
impl ContentType {
fn is_text(&self) -> bool {
!matches!(
self,
ContentType::Unknown | ContentType::UrlencodedForm | ContentType::Multipart
)
}
}
impl From<&str> for ContentType {
fn from(content_type: &str) -> Self {
if content_type.contains("json") {
ContentType::Json
} else if content_type.contains("html") {
ContentType::Html
} else if content_type.contains("xml") {
ContentType::Xml
} else if content_type.contains("multipart") {
ContentType::Multipart
} else if content_type.contains("x-www-form-urlencoded") {
ContentType::UrlencodedForm
} else if content_type.contains("javascript") {
ContentType::JavaScript
} else if content_type.contains("css") {
ContentType::Css
} else if content_type.contains("text") {
// We later check if this one's JSON
// HTTPie checks for "json", "javascript" and "text" in one place:
// https://github.com/httpie/httpie/blob/a32ad344dd/httpie/output/formatters/json.py#L14
// We have it more spread out but it behaves more or less the same
ContentType::Text
} else {
ContentType::Unknown
}
}
}
fn get_content_type(headers: &HeaderMap) -> ContentType {
headers
.get(CONTENT_TYPE)
.and_then(|value| value.to_str().ok())
.map_or(ContentType::Unknown, ContentType::from)
}
fn valid_json(text: &str) -> bool {
serde_json::from_str::<serde::de::IgnoredAny>(text).is_ok()
}
/// Decode a response, using BOM sniffing or chardet if the encoding is unknown.
///
/// This is different from [`Response::text`], which assumes UTF-8 as a fallback.
///
/// Returns `None` if the decoded text would contain null codepoints (i.e., is binary).
fn decode_blob<'a>(
raw: &'a [u8],
encoding: Option<&'static Encoding>,
url: &Url,
) -> Option<Cow<'a, str>> {
let encoding = encoding.unwrap_or_else(|| detect_encoding(raw, true, url));
// If the encoding is ASCII-compatible then a null byte corresponds to a
// null codepoint and vice versa, so we can check for them before decoding.
// For a 11MB binary file this saves 100ms, that's worth doing.
// UTF-16 is not ASCII-compatible: all ASCII characters are padded with a
// null byte, so finding a null byte doesn't mean anything.
if encoding.is_ascii_compatible() && raw.contains(&0) {
return None;
}
// Don't allow the BOM to override the encoding. But do remove it if
// it matches the encoding.
let text = encoding.decode_with_bom_removal(raw).0;
if !encoding.is_ascii_compatible() && text.contains('\0') | {
None
} | conditional_block |
|
printer.rs | .print_syntax_text(body, "html"),
ContentType::Css => self.print_syntax_text(body, "css"),
// In HTTPie part of this behavior is gated behind the --json flag
// But it does JSON formatting even without that flag, so doing
// this check unconditionally is fine
ContentType::Text | ContentType::JavaScript if valid_json(body) => {
self.print_json_text(body, false)
}
ContentType::JavaScript => self.print_syntax_text(body, "js"),
_ => self.buffer.print(body),
}
}
fn print_stream(&mut self, reader: &mut impl Read) -> io::Result<()> {
if !self.buffer.is_terminal() {
return copy_largebuf(reader, &mut self.buffer, true);
}
let mut guard = BinaryGuard::new(reader, true);
while let Some(lines) = guard.read_lines()? {
self.buffer.write_all(lines)?;
self.buffer.flush()?;
}
Ok(())
}
fn print_colorized_stream(
&mut self,
stream: &mut impl Read,
syntax: &'static str,
) -> io::Result<()> {
let mut guard = BinaryGuard::new(stream, self.buffer.is_terminal());
let mut highlighter = self.get_highlighter(syntax);
while let Some(lines) = guard.read_lines()? {
for line in lines.split_inclusive(|&b| b == b'\n') {
highlighter.highlight_bytes(line)?;
}
highlighter.flush()?;
}
Ok(())
}
fn print_syntax_stream(
&mut self,
stream: &mut impl Read,
syntax: &'static str,
) -> io::Result<()> {
if self.color {
self.print_colorized_stream(stream, syntax)
} else {
self.print_stream(stream)
}
}
fn print_json_stream(&mut self, stream: &mut impl Read) -> io::Result<()> {
if !self.indent_json {
// We don't have to do anything specialized, so fall back to the generic version
self.print_syntax_stream(stream, "json")
} else if self.color {
let mut guard = BinaryGuard::new(stream, self.buffer.is_terminal());
let mut formatter = get_json_formatter(&self.format_options);
let mut highlighter = self.get_highlighter("json");
let mut buf = Vec::new();
while let Some(lines) = guard.read_lines()? {
formatter.format_buf(lines, &mut buf)?;
for line in buf.split_inclusive(|&b| b == b'\n') {
highlighter.highlight_bytes(line)?;
}
highlighter.flush()?;
buf.clear();
}
Ok(())
} else {
let mut formatter = get_json_formatter(&self.format_options);
if !self.buffer.is_terminal() {
let mut buf = vec![0; BUFFER_SIZE];
loop {
match stream.read(&mut buf) {
Ok(0) => return Ok(()),
Ok(n) => {
formatter.format_buf(&buf[0..n], &mut self.buffer)?;
self.buffer.flush()?;
}
Err(e) if e.kind() == io::ErrorKind::Interrupted => continue,
Err(e) => return Err(e),
}
}
}
let mut guard = BinaryGuard::new(stream, true);
while let Some(lines) = guard.read_lines()? {
formatter.format_buf(lines, &mut self.buffer)?;
self.buffer.flush()?;
}
Ok(())
}
}
fn print_body_stream(
&mut self,
content_type: ContentType,
body: &mut impl Read,
) -> io::Result<()> {
match content_type {
ContentType::Json => self.print_json_stream(body),
ContentType::Xml => self.print_syntax_stream(body, "xml"),
ContentType::Html => self.print_syntax_stream(body, "html"),
ContentType::Css => self.print_syntax_stream(body, "css"),
// print_body_text() has fancy JSON detection, but we can't do that here
ContentType::JavaScript => self.print_syntax_stream(body, "js"),
_ => self.print_stream(body),
}
}
fn print_headers(&mut self, text: &str) -> io::Result<()> {
if self.color {
self.print_colorized_text(text, "http")
} else {
self.buffer.print(text)
}
}
fn | (&self, headers: &HeaderMap, version: Version) -> String {
let as_titlecase = match version {
Version::HTTP_09 | Version::HTTP_10 | Version::HTTP_11 => true,
Version::HTTP_2 | Version::HTTP_3 => false,
_ => false,
};
let mut headers: Vec<(&HeaderName, &HeaderValue)> = headers.iter().collect();
if self.sort_headers {
headers.sort_by_key(|(name, _)| name.as_str());
}
let mut header_string = String::new();
for (key, value) in headers {
if as_titlecase {
// Ought to be equivalent to how hyper does it
// https://github.com/hyperium/hyper/blob/f46b175bf71b202fbb907c4970b5743881b891e1/src/proto/h1/role.rs#L1332
// Header names are ASCII so it's ok to operate on char instead of u8
let mut prev = '-';
for mut c in key.as_str().chars() {
if prev == '-' {
c.make_ascii_uppercase();
}
header_string.push(c);
prev = c;
}
} else {
header_string.push_str(key.as_str());
}
header_string.push_str(": ");
match value.to_str() {
Ok(value) => header_string.push_str(value),
#[allow(clippy::format_push_string)]
Err(_) => header_string.push_str(&format!("{:?}", value)),
}
header_string.push('\n');
}
header_string.pop();
header_string
}
pub fn print_separator(&mut self) -> io::Result<()> {
self.buffer.print("\n")?;
self.buffer.flush()?;
Ok(())
}
pub fn print_request_headers<T>(&mut self, request: &Request, cookie_jar: &T) -> io::Result<()>
where
T: CookieStore,
{
let method = request.method();
let url = request.url();
let query_string = url.query().map_or(String::from(""), |q| ["?", q].concat());
let version = request.version();
let mut headers = request.headers().clone();
headers
.entry(ACCEPT)
.or_insert_with(|| HeaderValue::from_static("*/*"));
if let Some(cookie) = cookie_jar.cookies(url) {
headers.insert(COOKIE, cookie);
}
// See https://github.com/seanmonstar/reqwest/issues/1030
// reqwest and hyper add certain headers, but only in the process of
// sending the request, which we haven't done yet
if let Some(body) = request.body().and_then(Body::as_bytes) {
// Added at https://github.com/seanmonstar/reqwest/blob/e56bd160ba/src/blocking/request.rs#L132
headers
.entry(CONTENT_LENGTH)
.or_insert_with(|| body.len().into());
}
if let Some(host) = request.url().host_str() {
// This is incorrect in case of HTTP/2, but we're already assuming
// HTTP/1.1 anyway
headers.entry(HOST).or_insert_with(|| {
// Added at https://github.com/hyperium/hyper/blob/dfa1bb291d/src/client/client.rs#L237
if test_mode() {
HeaderValue::from_str("http.mock")
} else if let Some(port) = request.url().port() {
HeaderValue::from_str(&format!("{}:{}", host, port))
} else {
HeaderValue::from_str(host)
}
.expect("hostname should already be validated/parsed")
});
}
let request_line = format!("{} {}{} {:?}\n", method, url.path(), query_string, version);
let headers = self.headers_to_string(&headers, version);
self.print_headers(&(request_line + &headers))?;
self.buffer.print("\n\n")?;
self.buffer.flush()?;
Ok(())
}
pub fn print_response_headers(&mut self, response: &Response) -> io::Result<()> {
let version = response.version();
let status = response.status();
let headers = response.headers();
let status_line = format!("{:?} {}\n", version, status);
let headers = self.headers_to_string(headers, version);
self.print_headers(&(status_line + &headers))?;
self.buffer.print("\n\n")?;
self.buffer.flush()?;
Ok(())
}
pub fn print_request_body(&mut self, request: &mut Request) -> anyhow::Result<()> {
let content_type = get_content_type(request.headers());
if let Some(body) = request.body_mut() {
let body = body.buffer()?;
if body.contains(&b'\0') {
self.buffer.print(BINARY_SUPPRESSOR)?;
} else {
self.print_body_text(content_type, &String::from_utf8_lossy(body))?;
self.buffer.print("\n")?;
}
// Breathing room between request and response
self | headers_to_string | identifier_name |
printer.rs | let mut highlighter = self.get_highlighter("json");
let mut buf = Vec::new();
while let Some(lines) = guard.read_lines()? {
formatter.format_buf(lines, &mut buf)?;
for line in buf.split_inclusive(|&b| b == b'\n') {
highlighter.highlight_bytes(line)?;
}
highlighter.flush()?;
buf.clear();
}
Ok(())
} else {
let mut formatter = get_json_formatter(&self.format_options);
if !self.buffer.is_terminal() {
let mut buf = vec![0; BUFFER_SIZE];
loop {
match stream.read(&mut buf) {
Ok(0) => return Ok(()),
Ok(n) => {
formatter.format_buf(&buf[0..n], &mut self.buffer)?;
self.buffer.flush()?;
}
Err(e) if e.kind() == io::ErrorKind::Interrupted => continue,
Err(e) => return Err(e),
}
}
}
let mut guard = BinaryGuard::new(stream, true);
while let Some(lines) = guard.read_lines()? {
formatter.format_buf(lines, &mut self.buffer)?;
self.buffer.flush()?;
}
Ok(())
}
}
fn print_body_stream(
&mut self,
content_type: ContentType,
body: &mut impl Read,
) -> io::Result<()> {
match content_type {
ContentType::Json => self.print_json_stream(body),
ContentType::Xml => self.print_syntax_stream(body, "xml"),
ContentType::Html => self.print_syntax_stream(body, "html"),
ContentType::Css => self.print_syntax_stream(body, "css"),
// print_body_text() has fancy JSON detection, but we can't do that here
ContentType::JavaScript => self.print_syntax_stream(body, "js"),
_ => self.print_stream(body),
}
}
fn print_headers(&mut self, text: &str) -> io::Result<()> {
if self.color {
self.print_colorized_text(text, "http")
} else {
self.buffer.print(text)
}
}
fn headers_to_string(&self, headers: &HeaderMap, version: Version) -> String {
let as_titlecase = match version {
Version::HTTP_09 | Version::HTTP_10 | Version::HTTP_11 => true,
Version::HTTP_2 | Version::HTTP_3 => false,
_ => false,
};
let mut headers: Vec<(&HeaderName, &HeaderValue)> = headers.iter().collect();
if self.sort_headers {
headers.sort_by_key(|(name, _)| name.as_str());
}
let mut header_string = String::new();
for (key, value) in headers {
if as_titlecase {
// Ought to be equivalent to how hyper does it
// https://github.com/hyperium/hyper/blob/f46b175bf71b202fbb907c4970b5743881b891e1/src/proto/h1/role.rs#L1332
// Header names are ASCII so it's ok to operate on char instead of u8
let mut prev = '-';
for mut c in key.as_str().chars() {
if prev == '-' {
c.make_ascii_uppercase();
}
header_string.push(c);
prev = c;
}
} else {
header_string.push_str(key.as_str());
}
header_string.push_str(": ");
match value.to_str() {
Ok(value) => header_string.push_str(value),
#[allow(clippy::format_push_string)]
Err(_) => header_string.push_str(&format!("{:?}", value)),
}
header_string.push('\n');
}
header_string.pop();
header_string
}
pub fn print_separator(&mut self) -> io::Result<()> {
self.buffer.print("\n")?;
self.buffer.flush()?;
Ok(())
}
pub fn print_request_headers<T>(&mut self, request: &Request, cookie_jar: &T) -> io::Result<()>
where
T: CookieStore,
{
let method = request.method();
let url = request.url();
let query_string = url.query().map_or(String::from(""), |q| ["?", q].concat());
let version = request.version();
let mut headers = request.headers().clone();
headers
.entry(ACCEPT)
.or_insert_with(|| HeaderValue::from_static("*/*"));
if let Some(cookie) = cookie_jar.cookies(url) {
headers.insert(COOKIE, cookie);
}
// See https://github.com/seanmonstar/reqwest/issues/1030
// reqwest and hyper add certain headers, but only in the process of
// sending the request, which we haven't done yet
if let Some(body) = request.body().and_then(Body::as_bytes) {
// Added at https://github.com/seanmonstar/reqwest/blob/e56bd160ba/src/blocking/request.rs#L132
headers
.entry(CONTENT_LENGTH)
.or_insert_with(|| body.len().into());
}
if let Some(host) = request.url().host_str() {
// This is incorrect in case of HTTP/2, but we're already assuming
// HTTP/1.1 anyway
headers.entry(HOST).or_insert_with(|| {
// Added at https://github.com/hyperium/hyper/blob/dfa1bb291d/src/client/client.rs#L237
if test_mode() {
HeaderValue::from_str("http.mock")
} else if let Some(port) = request.url().port() {
HeaderValue::from_str(&format!("{}:{}", host, port))
} else {
HeaderValue::from_str(host)
}
.expect("hostname should already be validated/parsed")
});
}
let request_line = format!("{} {}{} {:?}\n", method, url.path(), query_string, version);
let headers = self.headers_to_string(&headers, version);
self.print_headers(&(request_line + &headers))?;
self.buffer.print("\n\n")?;
self.buffer.flush()?;
Ok(())
}
pub fn print_response_headers(&mut self, response: &Response) -> io::Result<()> {
let version = response.version();
let status = response.status();
let headers = response.headers();
let status_line = format!("{:?} {}\n", version, status);
let headers = self.headers_to_string(headers, version);
self.print_headers(&(status_line + &headers))?;
self.buffer.print("\n\n")?;
self.buffer.flush()?;
Ok(())
}
pub fn print_request_body(&mut self, request: &mut Request) -> anyhow::Result<()> {
let content_type = get_content_type(request.headers());
if let Some(body) = request.body_mut() {
let body = body.buffer()?;
if body.contains(&b'\0') {
self.buffer.print(BINARY_SUPPRESSOR)?;
} else {
self.print_body_text(content_type, &String::from_utf8_lossy(body))?;
self.buffer.print("\n")?;
}
// Breathing room between request and response
self.buffer.print("\n")?;
self.buffer.flush()?;
}
Ok(())
}
pub fn print_response_body(
&mut self,
response: &mut Response,
encoding: Option<&'static Encoding>,
mime: Option<&str>,
) -> anyhow::Result<()> {
let starting_time = Instant::now();
let url = response.url().clone();
let content_type =
mime.map_or_else(|| get_content_type(response.headers()), ContentType::from);
let encoding = encoding.or_else(|| get_charset(response));
let compression_type = get_compression_type(response.headers());
let mut body = decompress(response, compression_type);
if !self.buffer.is_terminal() {
if (self.color || self.indent_json) && content_type.is_text() {
// The user explicitly asked for formatting even though this is
// going into a file, and the response is at least supposed to be
// text, so decode it
// TODO: HTTPie re-encodes output in the original encoding, we don't
// encoding_rs::Encoder::encode_from_utf8_to_vec_without_replacement()
// and guess_encoding() may help, but it'll require refactoring
// The current design is a bit unfortunate because there's no way to
// force UTF-8 output without coloring or formatting
// Unconditionally decoding is not an option because the body
// might not be text at all
if self.stream {
self.print_body_stream(
content_type,
&mut decode_stream(&mut body, encoding, &url)?,
)?;
} else {
let mut buf = Vec::new();
body.read_to_end(&mut buf)?;
let text = decode_blob_unconditional(&buf, encoding, &url);
self.print_body_text(content_type, &text)?;
}
} else if self.stream {
copy_largebuf(&mut body, &mut self.buffer, true)?;
} else {
let mut buf = Vec::new();
body.read_to_end(&mut buf)?;
self.buffer.print(&buf)?;
}
} else if self.stream {
match self
.print_body_stream(content_type, &mut decode_stream(&mut body, encoding, &url)?)
{ | Ok(_) => { | random_line_split |
|
graham_scan.py | , idx_start, count = np.unique(sorted_polar_angle_arr, return_counts=True,
return_index=True)
res = np.split(idx_sorted_pang, idx_start[1:])
#filter them with respect to their size, keeping only items occurring more than once
final_points =[]
for each in res:
# print("len(each)",len(each))
if len(each) > 1:
i = each.tolist()
check_points = []
for j in i:
check_points.append(points[j])
check_points_arr = np.asarray(check_points)
max_far_idx = np.argmax(euclidean_distance(check_points,P0))
final_points.append(check_points[max_far_idx])
elif len(each) == 1:
final_points.append(points[each.tolist()[0]])
return final_points
def cross_product(p0,p1,p2):
"""Returns the cross product of points of p0,p1 and p2.
The value returned is +ve, -ve or 0
"""
return (((p1[0]-p0[0])*(p2[1]-p0[1]))-((p2[0]-p0[0])*(p1[1]-p0[1])))
def read_points():
"""
Work In Progress file to read points from text file
"""
points = []
f = open(r'sample_points.txt')
while True:
nstr = f.readline()
if len(nstr) == 0:
break
line = nstr.rstrip('\n').split(', ')
# print(line)
points.append((round(float(line[0]),3),round(float(line[1]),3)))
print(points)
return points
def create_random_points(n):
"""Returns random points for input choice 1 from menu screen
Input:n(int) : size of input
Output: points array
"""
return [(random.randint(0,n),random.randint(0,n)) for i in range(n)]
def points_on_circumference(center=(0, 0), r=50, n=100):
""" Returns points around the boundary of circle with random distribution
It is called when choice of input entered is 2
"""
return [
(
center[0]+(cos(2 * pi / n * x) * r),
center[1] + (sin(2 * pi / n * x) * r)
) for x in range(0, n + 1)]
def create_export_files(n,input_choice,timing,min_hull_per):
"""Creates folder analysis if not exists in current directory and creates
results.csv file
Input: n(int): size of input
input_choice(int): choice of input from menu
timing(decimal): Timing in sec of algo
min_hull_per(int): percentage of hull points from n
Output: Appends results of execution to the csv file
"""
exists = os.path.isdir('analysis')
if exists:
f = open('analysis/results.csv','a',newline='')
results = csv.writer(f)
else:
os.mkdir('analysis')
f = open('analysis/results.csv','w',newline='')
results = csv.writer(f)
results.writerow(['Algo','Size of Input','Min. Hull Pts Per','Type of Input','Timing'])
results.writerow(['Graham Scan',n,min_hull_per,input_choice,timing])
def points_on_circumference_with_per(center=(0, 0), r=50, n=100, per = 50):
"""Returns points around boundary of circle with random points distributed
inside circle. It is called when choice of input entered is 3
Input: center(tuple) : co-ordinates for center of circle
r(int) : input for radius of circle
n(int) : size of input
per(int) : percentage of points of n that should be on boundary
Output : points array
"""
# circum_cnt is actual points on cicumference as a percentage of total
# random points(n) = Percentage_of_Total_Points * n / 100
circum_cnt = int(per*n/100)
# random_cnt is points inside the circle = Total random points - Points on Circum
random_cnt = n - circum_cnt
# Append points on circumference
final_pts = [
(
center[0]+(cos(2 * pi / circum_cnt * x) * r),
center[1] + (sin(2 * pi / circum_cnt * x) * r)
) for x in range(0, circum_cnt + 1)]
# Generate random points inside circle
# random points inside circle should have atleast 5 radius to be visible enough
for i in range(1,random_cnt+1):
final_pts.append( (center[0]+ cos(2 * pi / circum_cnt * i) * random.randint(1,r-20),
center[1] + sin(2 * pi / circum_cnt * i) * random.randint(1,r-20)))
return final_pts
def show_convex_hull(points, input_choice, timing,percent_pts,size,hull_points = None):
"""Returns plot with parameters from menu screen and saves the plot in /plots
directory
"""
exists = os.path.isdir('plots')
if not exists:
os.mkdir('plots')
for each in points:
plt.plot(each[0],each[1],'o-')
if hull_points is not None:
hull_pt_list = []
for each in hull_points:
hull_pt_list.append(list(each))
hull_pt_arr = np.asarray(hull_pt_list)
# print(hull_pt_arr)
plt.plot(hull_pt_arr[:,0],hull_pt_arr[:,1],'k-')
first_coord = hull_pt_arr[0,:].reshape(1,2)
last_coord = hull_pt_arr[len(hull_pt_arr)-1,:].reshape(1,2)
last_coord_arr = np.append(first_coord, last_coord, axis = 0)
plt.plot(last_coord_arr[:,0],last_coord_arr[:,1],'k-')
plt.title(label = 'For input : '+input_choice+percent_pts+' time taken = '+str(timing)+' s\n'+'N='+str(size))
plt.savefig('plots/'+'Graham_Scan_'+str(input_choice)+str(percent_pts)+'_N='+str(size)+'.png')
plt.show()
def graham_scan():
### Menu Screen for Program Starts
choice_of_input = input("Enter choice of random point distribution:\n1. Random scatter\n2. Circle\n3. Minimal Points on Circle\n")
if choice_of_input == "1":
while True:
try:
input_size = input("Enter the input size")
n=int(input_size)
per_min_pt = ''
break
except ValueError:
print("Enter integer value for input size")
points = create_random_points(n)
elif choice_of_input == "2":
while True:
try:
input_size = input("Enter the input size")
n=int(input_size)
per_min_pt = ''
radius = input("Enter the radius")
r = int(radius)
center_str = input("Enter comma seperated x and y co-ordinates")
center_str = center_str.split(",")
center_x = int(center_str[0])
center_y = int(center_str[1])
break
except ValueError:
print("Enter integer value for input size/radius")
points = points_on_circumference((center_x,center_y),r, n)
elif choice_of_input == "3":
while True:
try:
input_size = input("Enter the input size")
n=int(input_size)
per_min_pt = input("Enter percentage of points on hull")
per_min_pt = float(per_min_pt)
radius = input("Enter the radius")
r = int(radius)
center_str = input("Enter comma seperated x and y co-ordinates")
center_str = center_str.split(",")
center_x = int(center_str[0])
center_y = int(center_str[1])
break
except ValueError:
print("Enter integer value for input size/radius")
points = points_on_circumference_with_per((center_x,center_y),r, n, per_min_pt)
### Menu Screen for Program Ends
# Set P0 to be global so that it can be access by other functions
global P0
# Find P0 with minimum y co-ordinate
P0 = point_with_min_y(points)
# Begin tracking the execution time
start = time.time()
# Sort the remaining points in points array by polar angle
# in counterclockwise order around P0
sorted_points = sort_by_polar_angle_v2(points)
# Inital version of sort by polar angle - faster than the current one
# sorted_points2 = sort_by_polar_angle(points)
# Create an empty stack
s = Stack()
# Push P0, two points from sorted array on stack
s.push(P0)
s.push(sorted_points[0])
s.push(sorted_points[1])
# Update the sorted array from 3rd element
sorted_points = sorted_points[2:]
# Find the boundary using cross product
for i in range(len(sorted_points)):
while cross_product(s.next_to_top(),s.top(),sorted_points[i]) < 0:
| s.pop() | conditional_block |
|
graham_scan.py | clidean_distance_v2(point, ref_point):
# print('Calculating dist between',point,' and ',ref_point,end='')
# print(sqrt((ref_point[0]-point[0])**2 +(ref_point[1]-point[1])**2))
return sqrt((ref_point[0]-point[0])**2 +(ref_point[1]-point[1])**2)
def polar_angle(points):
"""Returns list of polar angle between -pi and pi calculated
with respect to P0 - point with lowest x and y co-ordinate
Input: points(array-like) : set of points whose polar angle
needs to be calculated with respect to ref point
Output: polar angle array
"""
polar_angle = []
for each in points:
dy = each[1] - P0[1]
dx = each[0] - P0[0]
polar_angle.append(atan2(dy, dx))
return polar_angle
def sort_by_polar_angle_v2(pts):
"""Returns sorted list of points with polar angle sorted in
counterclockwise direction. For points with same polar angle
the farthest point
Input: pts(array-like) : set of points for sorting by polar angle
Output: sorted order of input array of points
"""
### make a copy of points array to avoid corruption
### of original points array
copy_pts = []
for each in pts:
if each not in copy_pts:
copy_pts.append(each)
P0_idx = copy_pts.index(P0)
del copy_pts[P0_idx]
# Call polar_angle function to calculate polar angle
# of points with respect to P0
p =polar_angle(copy_pts)
#########For sorting polar angle array ######
# Once we get the polar angle array, we use numpy.argsort
# to get the indices of sorted polar angle array
# using the indices serves two purpose
# 1. Sort polar angle array
# 2. Sort list of points array
# 3. Develop logic to take farthest point in case of
# collinear
np_p = np.asarray(p)
sorted_idx = np.argsort(np_p,kind='mergesort')
# Do steps 1. and 2. of above commented logic
sorted_p = []
sorted_pts = []
for each in sorted_idx:
sorted_p.append(p[each])
sorted_pts.append(copy_pts[each])
# Code for step 3.
check_dict = {}
for i in range(len(sorted_p)-1):
for j in range(i+1,len(sorted_p)):
if sorted_p[j] == sorted_p[i]:
if sorted_p[i] not in check_dict:
temp_list=[]
temp_list.append(sorted_pts[i])
check_dict[sorted_p[i]]=temp_list
temp_list2 = []
temp_list2 = check_dict[sorted_p[i]]
if sorted_pts[j] not in temp_list2:
temp_list2.append(sorted_pts[j])
check_dict[sorted_p[i]]=temp_list2
if sorted_pts[j] in temp_list2:
break
else:
break
for dict_val in check_dict.values():
farthest_pt = dict_val[0]
max_dist = euclidean_distance_v2(farthest_pt,P0)
for each in dict_val[1:]:
if euclidean_distance_v2(each,P0) > max_dist:
sorted_pts = [x for x in sorted_pts if x!=farthest_pt]
max_dist = euclidean_distance_v2(each,P0)
farthest_pt = each
if euclidean_distance_v2(each,P0) < max_dist:
sorted_pts = [x for x in sorted_pts if x!=each]
return sorted_pts
def sort_by_polar_angle(points):
"""Returns sorted order of points array.
This is initial version of sort_by_polar_angle function.
Input: points(array-like) : set of points to be sorted with
respect to P0
Output: sorted array of remaining points
"""
# Call polar_angle function to calculate polar angle
# of points with respect to P0
p = polar_angle(points)
polar_angle_arr = np.asarray(p)
vals1, idx_start1, count1 = np.unique(polar_angle_arr, return_counts=True,
return_index=True)
idx_sorted_pang = np.argsort(polar_angle_arr)
sorted_polar_angle_arr = polar_angle_arr[idx_sorted_pang]
vals, idx_start, count = np.unique(sorted_polar_angle_arr, return_counts=True,
return_index=True)
res = np.split(idx_sorted_pang, idx_start[1:])
#filter them with respect to their size, keeping only items occurring more than once
final_points =[]
for each in res:
# print("len(each)",len(each))
if len(each) > 1:
i = each.tolist()
check_points = []
for j in i:
check_points.append(points[j])
check_points_arr = np.asarray(check_points)
max_far_idx = np.argmax(euclidean_distance(check_points,P0))
final_points.append(check_points[max_far_idx])
elif len(each) == 1:
final_points.append(points[each.tolist()[0]])
return final_points
def cross_product(p0,p1,p2):
"""Returns the cross product of points of p0,p1 and p2.
The value returned is +ve, -ve or 0
"""
return (((p1[0]-p0[0])*(p2[1]-p0[1]))-((p2[0]-p0[0])*(p1[1]-p0[1])))
def read_points():
"""
Work In Progress file to read points from text file
"""
points = []
f = open(r'sample_points.txt')
while True:
nstr = f.readline()
if len(nstr) == 0:
break
line = nstr.rstrip('\n').split(', ')
# print(line)
points.append((round(float(line[0]),3),round(float(line[1]),3)))
print(points)
return points
def | (n):
"""Returns random points for input choice 1 from menu screen
Input:n(int) : size of input
Output: points array
"""
return [(random.randint(0,n),random.randint(0,n)) for i in range(n)]
def points_on_circumference(center=(0, 0), r=50, n=100):
""" Returns points around the boundary of circle with random distribution
It is called when choice of input entered is 2
"""
return [
(
center[0]+(cos(2 * pi / n * x) * r),
center[1] + (sin(2 * pi / n * x) * r)
) for x in range(0, n + 1)]
def create_export_files(n,input_choice,timing,min_hull_per):
"""Creates folder analysis if not exists in current directory and creates
results.csv file
Input: n(int): size of input
input_choice(int): choice of input from menu
timing(decimal): Timing in sec of algo
min_hull_per(int): percentage of hull points from n
Output: Appends results of execution to the csv file
"""
exists = os.path.isdir('analysis')
if exists:
f = open('analysis/results.csv','a',newline='')
results = csv.writer(f)
else:
os.mkdir('analysis')
f = open('analysis/results.csv','w',newline='')
results = csv.writer(f)
results.writerow(['Algo','Size of Input','Min. Hull Pts Per','Type of Input','Timing'])
results.writerow(['Graham Scan',n,min_hull_per,input_choice,timing])
def points_on_circumference_with_per(center=(0, 0), r=50, n=100, per = 50):
"""Returns points around boundary of circle with random points distributed
inside circle. It is called when choice of input entered is 3
Input: center(tuple) : co-ordinates for center of circle
r(int) : input for radius of circle
n(int) : size of input
per(int) : percentage of points of n that should be on boundary
Output : points array
"""
# circum_cnt is actual points on cicumference as a percentage of total
# random points(n) = Percentage_of_Total_Points * n / 100
circum_cnt = int(per*n/100)
# random_cnt is points inside the circle = Total random points - Points on Circum
random_cnt = n - circum_cnt
# Append points on circumference
final_pts = [
(
center[0]+(cos(2 * pi / circum_cnt * x) * r),
center[1] + (sin(2 * pi / circum_cnt * x) * r)
) for x in range(0, circum_cnt + 1)]
# Generate random points inside circle
# random points inside circle should have atleast 5 radius to be visible enough
for i in range(1,random_cnt+1):
final_pts.append( (center[0]+ cos(2 * pi / circum_cnt * i) * random.randint(1,r-20),
center[1] + sin(2 * pi / circum_cnt * i) | create_random_points | identifier_name |
graham_scan.py | clidean_distance_v2(point, ref_point):
# print('Calculating dist between',point,' and ',ref_point,end='')
# print(sqrt((ref_point[0]-point[0])**2 +(ref_point[1]-point[1])**2))
return sqrt((ref_point[0]-point[0])**2 +(ref_point[1]-point[1])**2)
def polar_angle(points):
"""Returns list of polar angle between -pi and pi calculated
with respect to P0 - point with lowest x and y co-ordinate
Input: points(array-like) : set of points whose polar angle
needs to be calculated with respect to ref point
Output: polar angle array
"""
polar_angle = []
for each in points:
dy = each[1] - P0[1]
dx = each[0] - P0[0]
polar_angle.append(atan2(dy, dx))
return polar_angle
def sort_by_polar_angle_v2(pts):
"""Returns sorted list of points with polar angle sorted in
counterclockwise direction. For points with same polar angle
the farthest point
Input: pts(array-like) : set of points for sorting by polar angle
Output: sorted order of input array of points
"""
### make a copy of points array to avoid corruption
### of original points array
copy_pts = []
for each in pts:
if each not in copy_pts:
copy_pts.append(each)
P0_idx = copy_pts.index(P0)
del copy_pts[P0_idx]
# Call polar_angle function to calculate polar angle
# of points with respect to P0
p =polar_angle(copy_pts)
#########For sorting polar angle array ######
# Once we get the polar angle array, we use numpy.argsort
# to get the indices of sorted polar angle array
# using the indices serves two purpose
# 1. Sort polar angle array
# 2. Sort list of points array
# 3. Develop logic to take farthest point in case of
# collinear
np_p = np.asarray(p)
sorted_idx = np.argsort(np_p,kind='mergesort')
# Do steps 1. and 2. of above commented logic
sorted_p = []
sorted_pts = []
for each in sorted_idx:
sorted_p.append(p[each])
sorted_pts.append(copy_pts[each])
# Code for step 3.
check_dict = {}
for i in range(len(sorted_p)-1):
for j in range(i+1,len(sorted_p)):
if sorted_p[j] == sorted_p[i]:
if sorted_p[i] not in check_dict:
temp_list=[]
temp_list.append(sorted_pts[i])
check_dict[sorted_p[i]]=temp_list
temp_list2 = []
temp_list2 = check_dict[sorted_p[i]]
if sorted_pts[j] not in temp_list2:
temp_list2.append(sorted_pts[j])
check_dict[sorted_p[i]]=temp_list2
if sorted_pts[j] in temp_list2:
break
else:
break
for dict_val in check_dict.values():
farthest_pt = dict_val[0]
max_dist = euclidean_distance_v2(farthest_pt,P0)
for each in dict_val[1:]:
if euclidean_distance_v2(each,P0) > max_dist:
sorted_pts = [x for x in sorted_pts if x!=farthest_pt]
max_dist = euclidean_distance_v2(each,P0)
farthest_pt = each
if euclidean_distance_v2(each,P0) < max_dist:
sorted_pts = [x for x in sorted_pts if x!=each]
return sorted_pts
def sort_by_polar_angle(points):
"""Returns sorted order of points array.
This is initial version of sort_by_polar_angle function.
Input: points(array-like) : set of points to be sorted with
respect to P0
Output: sorted array of remaining points
"""
# Call polar_angle function to calculate polar angle
# of points with respect to P0
p = polar_angle(points)
polar_angle_arr = np.asarray(p)
vals1, idx_start1, count1 = np.unique(polar_angle_arr, return_counts=True,
return_index=True)
idx_sorted_pang = np.argsort(polar_angle_arr)
sorted_polar_angle_arr = polar_angle_arr[idx_sorted_pang]
vals, idx_start, count = np.unique(sorted_polar_angle_arr, return_counts=True,
return_index=True)
res = np.split(idx_sorted_pang, idx_start[1:])
#filter them with respect to their size, keeping only items occurring more than once
final_points =[]
for each in res:
# print("len(each)",len(each))
if len(each) > 1:
i = each.tolist()
check_points = [] |
final_points.append(check_points[max_far_idx])
elif len(each) == 1:
final_points.append(points[each.tolist()[0]])
return final_points
def cross_product(p0,p1,p2):
"""Returns the cross product of points of p0,p1 and p2.
The value returned is +ve, -ve or 0
"""
return (((p1[0]-p0[0])*(p2[1]-p0[1]))-((p2[0]-p0[0])*(p1[1]-p0[1])))
def read_points():
"""
Work In Progress file to read points from text file
"""
points = []
f = open(r'sample_points.txt')
while True:
nstr = f.readline()
if len(nstr) == 0:
break
line = nstr.rstrip('\n').split(', ')
# print(line)
points.append((round(float(line[0]),3),round(float(line[1]),3)))
print(points)
return points
def create_random_points(n):
"""Returns random points for input choice 1 from menu screen
Input:n(int) : size of input
Output: points array
"""
return [(random.randint(0,n),random.randint(0,n)) for i in range(n)]
def points_on_circumference(center=(0, 0), r=50, n=100):
""" Returns points around the boundary of circle with random distribution
It is called when choice of input entered is 2
"""
return [
(
center[0]+(cos(2 * pi / n * x) * r),
center[1] + (sin(2 * pi / n * x) * r)
) for x in range(0, n + 1)]
def create_export_files(n,input_choice,timing,min_hull_per):
"""Creates folder analysis if not exists in current directory and creates
results.csv file
Input: n(int): size of input
input_choice(int): choice of input from menu
timing(decimal): Timing in sec of algo
min_hull_per(int): percentage of hull points from n
Output: Appends results of execution to the csv file
"""
exists = os.path.isdir('analysis')
if exists:
f = open('analysis/results.csv','a',newline='')
results = csv.writer(f)
else:
os.mkdir('analysis')
f = open('analysis/results.csv','w',newline='')
results = csv.writer(f)
results.writerow(['Algo','Size of Input','Min. Hull Pts Per','Type of Input','Timing'])
results.writerow(['Graham Scan',n,min_hull_per,input_choice,timing])
def points_on_circumference_with_per(center=(0, 0), r=50, n=100, per = 50):
"""Returns points around boundary of circle with random points distributed
inside circle. It is called when choice of input entered is 3
Input: center(tuple) : co-ordinates for center of circle
r(int) : input for radius of circle
n(int) : size of input
per(int) : percentage of points of n that should be on boundary
Output : points array
"""
# circum_cnt is actual points on cicumference as a percentage of total
# random points(n) = Percentage_of_Total_Points * n / 100
circum_cnt = int(per*n/100)
# random_cnt is points inside the circle = Total random points - Points on Circum
random_cnt = n - circum_cnt
# Append points on circumference
final_pts = [
(
center[0]+(cos(2 * pi / circum_cnt * x) * r),
center[1] + (sin(2 * pi / circum_cnt * x) * r)
) for x in range(0, circum_cnt + 1)]
# Generate random points inside circle
# random points inside circle should have atleast 5 radius to be visible enough
for i in range(1,random_cnt+1):
final_pts.append( (center[0]+ cos(2 * pi / circum_cnt * i) * random.randint(1,r-20),
center[1] + sin(2 * pi / circum_cnt * i) * | for j in i:
check_points.append(points[j])
check_points_arr = np.asarray(check_points)
max_far_idx = np.argmax(euclidean_distance(check_points,P0)) | random_line_split |
graham_scan.py | clidean_distance_v2(point, ref_point):
# print('Calculating dist between',point,' and ',ref_point,end='')
# print(sqrt((ref_point[0]-point[0])**2 +(ref_point[1]-point[1])**2))
return sqrt((ref_point[0]-point[0])**2 +(ref_point[1]-point[1])**2)
def polar_angle(points):
"""Returns list of polar angle between -pi and pi calculated
with respect to P0 - point with lowest x and y co-ordinate
Input: points(array-like) : set of points whose polar angle
needs to be calculated with respect to ref point
Output: polar angle array
"""
polar_angle = []
for each in points:
dy = each[1] - P0[1]
dx = each[0] - P0[0]
polar_angle.append(atan2(dy, dx))
return polar_angle
def sort_by_polar_angle_v2(pts):
"""Returns sorted list of points with polar angle sorted in
counterclockwise direction. For points with same polar angle
the farthest point
Input: pts(array-like) : set of points for sorting by polar angle
Output: sorted order of input array of points
"""
### make a copy of points array to avoid corruption
### of original points array
copy_pts = []
for each in pts:
if each not in copy_pts:
copy_pts.append(each)
P0_idx = copy_pts.index(P0)
del copy_pts[P0_idx]
# Call polar_angle function to calculate polar angle
# of points with respect to P0
p =polar_angle(copy_pts)
#########For sorting polar angle array ######
# Once we get the polar angle array, we use numpy.argsort
# to get the indices of sorted polar angle array
# using the indices serves two purpose
# 1. Sort polar angle array
# 2. Sort list of points array
# 3. Develop logic to take farthest point in case of
# collinear
np_p = np.asarray(p)
sorted_idx = np.argsort(np_p,kind='mergesort')
# Do steps 1. and 2. of above commented logic
sorted_p = []
sorted_pts = []
for each in sorted_idx:
sorted_p.append(p[each])
sorted_pts.append(copy_pts[each])
# Code for step 3.
check_dict = {}
for i in range(len(sorted_p)-1):
for j in range(i+1,len(sorted_p)):
if sorted_p[j] == sorted_p[i]:
if sorted_p[i] not in check_dict:
temp_list=[]
temp_list.append(sorted_pts[i])
check_dict[sorted_p[i]]=temp_list
temp_list2 = []
temp_list2 = check_dict[sorted_p[i]]
if sorted_pts[j] not in temp_list2:
temp_list2.append(sorted_pts[j])
check_dict[sorted_p[i]]=temp_list2
if sorted_pts[j] in temp_list2:
break
else:
break
for dict_val in check_dict.values():
farthest_pt = dict_val[0]
max_dist = euclidean_distance_v2(farthest_pt,P0)
for each in dict_val[1:]:
if euclidean_distance_v2(each,P0) > max_dist:
sorted_pts = [x for x in sorted_pts if x!=farthest_pt]
max_dist = euclidean_distance_v2(each,P0)
farthest_pt = each
if euclidean_distance_v2(each,P0) < max_dist:
sorted_pts = [x for x in sorted_pts if x!=each]
return sorted_pts
def sort_by_polar_angle(points):
"""Returns sorted order of points array.
This is initial version of sort_by_polar_angle function.
Input: points(array-like) : set of points to be sorted with
respect to P0
Output: sorted array of remaining points
"""
# Call polar_angle function to calculate polar angle
# of points with respect to P0
p = polar_angle(points)
polar_angle_arr = np.asarray(p)
vals1, idx_start1, count1 = np.unique(polar_angle_arr, return_counts=True,
return_index=True)
idx_sorted_pang = np.argsort(polar_angle_arr)
sorted_polar_angle_arr = polar_angle_arr[idx_sorted_pang]
vals, idx_start, count = np.unique(sorted_polar_angle_arr, return_counts=True,
return_index=True)
res = np.split(idx_sorted_pang, idx_start[1:])
#filter them with respect to their size, keeping only items occurring more than once
final_points =[]
for each in res:
# print("len(each)",len(each))
if len(each) > 1:
i = each.tolist()
check_points = []
for j in i:
check_points.append(points[j])
check_points_arr = np.asarray(check_points)
max_far_idx = np.argmax(euclidean_distance(check_points,P0))
final_points.append(check_points[max_far_idx])
elif len(each) == 1:
final_points.append(points[each.tolist()[0]])
return final_points
def cross_product(p0,p1,p2):
"""Returns the cross product of points of p0,p1 and p2.
The value returned is +ve, -ve or 0
"""
return (((p1[0]-p0[0])*(p2[1]-p0[1]))-((p2[0]-p0[0])*(p1[1]-p0[1])))
def read_points():
"""
Work In Progress file to read points from text file
"""
points = []
f = open(r'sample_points.txt')
while True:
nstr = f.readline()
if len(nstr) == 0:
break
line = nstr.rstrip('\n').split(', ')
# print(line)
points.append((round(float(line[0]),3),round(float(line[1]),3)))
print(points)
return points
def create_random_points(n):
"""Returns random points for input choice 1 from menu screen
Input:n(int) : size of input
Output: points array
"""
return [(random.randint(0,n),random.randint(0,n)) for i in range(n)]
def points_on_circumference(center=(0, 0), r=50, n=100):
|
def create_export_files(n,input_choice,timing,min_hull_per):
"""Creates folder analysis if not exists in current directory and creates
results.csv file
Input: n(int): size of input
input_choice(int): choice of input from menu
timing(decimal): Timing in sec of algo
min_hull_per(int): percentage of hull points from n
Output: Appends results of execution to the csv file
"""
exists = os.path.isdir('analysis')
if exists:
f = open('analysis/results.csv','a',newline='')
results = csv.writer(f)
else:
os.mkdir('analysis')
f = open('analysis/results.csv','w',newline='')
results = csv.writer(f)
results.writerow(['Algo','Size of Input','Min. Hull Pts Per','Type of Input','Timing'])
results.writerow(['Graham Scan',n,min_hull_per,input_choice,timing])
def points_on_circumference_with_per(center=(0, 0), r=50, n=100, per = 50):
"""Returns points around boundary of circle with random points distributed
inside circle. It is called when choice of input entered is 3
Input: center(tuple) : co-ordinates for center of circle
r(int) : input for radius of circle
n(int) : size of input
per(int) : percentage of points of n that should be on boundary
Output : points array
"""
# circum_cnt is actual points on cicumference as a percentage of total
# random points(n) = Percentage_of_Total_Points * n / 100
circum_cnt = int(per*n/100)
# random_cnt is points inside the circle = Total random points - Points on Circum
random_cnt = n - circum_cnt
# Append points on circumference
final_pts = [
(
center[0]+(cos(2 * pi / circum_cnt * x) * r),
center[1] + (sin(2 * pi / circum_cnt * x) * r)
) for x in range(0, circum_cnt + 1)]
# Generate random points inside circle
# random points inside circle should have atleast 5 radius to be visible enough
for i in range(1,random_cnt+1):
final_pts.append( (center[0]+ cos(2 * pi / circum_cnt * i) * random.randint(1,r-20),
center[1] + sin(2 * pi / circum_cnt * i) * | """ Returns points around the boundary of circle with random distribution
It is called when choice of input entered is 2
"""
return [
(
center[0]+(cos(2 * pi / n * x) * r),
center[1] + (sin(2 * pi / n * x) * r)
) for x in range(0, n + 1)] | identifier_body |
dataset.py | idx.logic_box=BoxNi(PointNi.zero(dims.getPointDim()),dims)
else:
raise Exception("please specify dimensions or source data")
# add fields
if "fields" in args:
for field in args["fields"]:
idx.fields.push_back(field)
elif buffer:
idx.fields.push_back(Field.fromString("DATA {} default_layout(row_major)".format(buffer.dtype.toString())))
else:
raise Exception("no field")
# bitsperblock
if "bitsperblock" in args:
idx.bitsperblock=int(args["bitsperblock"])
# compute db overall size
TOT=0
for field in idx.fields:
TOT+=field.dtype.getByteSize(idx.logic_box.size())
# blocks per file
if "blocksperfile" in args:
idx.blocksperfile=int(args["blocksperfile"])
elif "data" in args or TOT<2*(1024*1024*1024):
idx.blocksperfile=-1 # all blocks in one file
else:
idx.blocksperfile==0 # openvisus will guess (probably using multiple files)
# is the user specifying filters?
if "filters" in args and args["filters"]:
filters=args["filters"]
for I in range(idx.fields.size()):
idx.fields[I].filter=filters[I]
if "time" in args:
A,B,time_template=args["time"]
idx.timesteps=DatasetTimesteps(A,B,1.0)
idx.time_template=time_template
if "filename_template" in args:
idx.filename_template=args["filename_template"]
idx.save(url)
db=LoadDataset(url)
if buffer:
compression=args["compression"] if "compression" in args else ["zip"]
db.compressDataset(compression, buffer)
return db
# //////////////////////////////////////////////
class PyDataset(object):
# constructor
def __init__(self,db):
self.db = db
# __getattr__
def __getattr__(self,attr):
return getattr(self.db, attr)
# getPointDim
def getPointDim(self):
return self.db.getPointDim()
# getMaxResolution
def getMaxResolution(self):
return self.db.getMaxResolution()
# getLogicBox
def getLogicBox(self,x=None,y=None,z=None):
pdim=self.getPointDim()
lbox=self.db.getLogicBox()
A=[lbox.p1[I] for I in range(pdim)]
B=[lbox.p2[I] for I in range(pdim)]
p1,p2=[0]*pdim,[0]*pdim
for I in range(pdim):
r=(x,y,z)[I]
if r is None: r=[A[I],B[I]]
p1[I] = int( A[I]+r[0]*(B[I]-A[I]) if isinstance(r[0],float) else r[0])
p2[I] = int( A[I]+r[1]*(B[I]-A[I]) if isinstance(r[1],float) else r[1])
return (p1,p2)
# getSliceLogicBox
def getSliceLogicBox(self,axis,offset):
ret=self.getLogicBox()
p1[axis]=offset+0
p1[axis]=offset+1
return (p1,p2)
# getBounds
def getBounds(self, logic_box):
if isinstance(logic_box,(tuple,list)):
logic_box=BoxNi(PointNi(logic_box[0]),PointNi(logic_box[1]))
return Position(self.logicToPhysic(),Position(BoxNi(logic_box)))
# getLogicSize
def getLogicSize(self):
p1,p2=self.getLogicBox()
return numpy.subtract(p2,p1)
# getFields
def getFields(self):
return [field.name for field in self.db.getFields()]
# getField
def getField(self,value=None):
if value is None:
return self.db.getField()
if isinstance(value,str):
return self.db.getField(value)
return value
# createAccess
def createAccess(self):
return self.db.createAccess()
# readBlock
def readBlock(self, block_id, time=None, field=None, access=None, aborted=Aborted()):
Assert(access)
field=self.getField() if field is None else self.getField(field)
time = self.getTime() if time is None else time
read_block = self.db.createBlockQuery(block_id, field, time, ord('r'), aborted)
self.executeBlockQueryAndWait(access, read_block)
if not read_block.ok(): return None
return Array.toNumPy(read_block.buffer, bShareMem=False)
# writeBlock
def writeBlock(self, block_id, time=None, field=None, access=None, data=None, aborted=Aborted()):
Assert(access and data)
field=self.getField() if field is None else self.getField(field)
time = self.getTime() if time is None else time
write_block = self.db.createBlockQuery(block_id, field, time, ord('w'), aborted)
write_block.buffer=Array.fromNumPy(data,TargetDim=self.getPointDim(), bShareMem=True)
self.executeBlockQueryAndWait(access, write_block)
return write_block.ok()
# read
def read(self, logic_box=None, x=None, y=None, z=None, time=None, field=None, num_refinements=1, quality=0, max_resolution=None, disable_filters=False, access=None):
"""
db=PyDataset.Load(url)
# example of reading a single slice in logic coordinates
data=db.read(z=[512,513])
# example of reading a single slice in normalized coordinates (i.e. [0,1])
data.db.read(x=[0,0.1],y=[0,0.1],z=[0,0.1])
# example of reading a single slice with 3 refinements
for data in db.read(z=[512,513],num_refinements=3):
print(data)
"""
pdim=self.getPointDim()
field=self.getField() if field is None else self.getField(field)
if time is None:
time = self.getTime()
if logic_box is None:
logic_box=self.getLogicBox(x,y,z)
if isinstance(logic_box,(tuple,list)):
logic_box=BoxNi(PointNi(logic_box[0]),PointNi(logic_box[1]))
query = self.db.createBoxQuery(BoxNi(logic_box), field , time, ord('r'))
if disable_filters:
query.disableFilters()
else:
query.enableFilters()
if max_resolution is None:
max_resolution=self.getMaxResolution()
# example quality -3 means not full resolution
Assert(quality<=0)
max_resolution=max_resolution+quality
for I in reversed(range(num_refinements)):
res=max_resolution-(pdim*I)
if res>=0:
query.end_resolutions.push_back(res)
self.db.beginBoxQuery(query)
if not query.isRunning():
|
if not access:
access=self.db.createAccess()
def NoGenerator():
if not self.db.executeBoxQuery(access, query):
raise Exception("query error {0}".format(query.errormsg))
# i cannot be sure how the numpy will be used outside or when the query will dealllocate the buffer
data=Array.toNumPy(query.buffer, bShareMem=False)
return data
def WithGenerator():
while query.isRunning():
if not self.db.executeBoxQuery(access, query):
raise Exception("query error {0}".format(query.errormsg))
# i cannot be sure how the numpy will be used outside or when the query will dealllocate the buffer
data=Array.toNumPy(query.buffer, bShareMem=False)
yield data
self.db.nextBoxQuery(query)
return NoGenerator() if query.end_resolutions.size()==1 else WithGenerator()
# write
# IMPORTANT: usually db.write happens without write lock and syncronously (at least in python)
def write(self, data, x=0, y=0, z=0,logic_box=None, time=None, field=None, access=None):
"""
db=PyDataset.Load(url)
width,height,depth=db.getSize()
# write single slice
data=numpy.zeros([height,width,3],dtype.uint8)
db.write(data,z=[512,513])
# write several slices in one-shot
nslices=10
data=numpy.zeros([nslices,height,width,10,3],dtype.uint8)
db.write(data,z=[512,512+nslices])
# write several slices with a generator
nslices=10
def gen():
for I in range(nslices):
yield=p.zeros([height,width,3],dtype.uint8)
db.write(gen,z=512)
"""
pdim=self.getPointDim()
field=self.getField(field)
if time is None:
time = self.getTime()
dims=list(data.shape)
# remove last components
if field.dtype.ncomponents()> | raise Exception("begin query failed {0}".format(query.errormsg)) | conditional_block |
dataset.py | idx.logic_box=BoxNi(PointNi.zero(dims.getPointDim()),dims)
else:
raise Exception("please specify dimensions or source data")
# add fields
if "fields" in args:
for field in args["fields"]:
idx.fields.push_back(field)
elif buffer:
idx.fields.push_back(Field.fromString("DATA {} default_layout(row_major)".format(buffer.dtype.toString())))
else:
raise Exception("no field")
# bitsperblock
if "bitsperblock" in args:
idx.bitsperblock=int(args["bitsperblock"])
# compute db overall size
TOT=0
for field in idx.fields:
TOT+=field.dtype.getByteSize(idx.logic_box.size())
# blocks per file
if "blocksperfile" in args:
idx.blocksperfile=int(args["blocksperfile"])
elif "data" in args or TOT<2*(1024*1024*1024):
idx.blocksperfile=-1 # all blocks in one file
else:
idx.blocksperfile==0 # openvisus will guess (probably using multiple files)
# is the user specifying filters?
if "filters" in args and args["filters"]:
filters=args["filters"]
for I in range(idx.fields.size()):
idx.fields[I].filter=filters[I]
if "time" in args:
A,B,time_template=args["time"]
idx.timesteps=DatasetTimesteps(A,B,1.0)
idx.time_template=time_template
if "filename_template" in args:
idx.filename_template=args["filename_template"]
idx.save(url)
db=LoadDataset(url)
if buffer:
compression=args["compression"] if "compression" in args else ["zip"]
db.compressDataset(compression, buffer)
return db
# //////////////////////////////////////////////
class PyDataset(object):
# constructor
def __init__(self,db):
self.db = db
# __getattr__
def __getattr__(self,attr):
return getattr(self.db, attr)
# getPointDim
def getPointDim(self):
return self.db.getPointDim()
# getMaxResolution
def getMaxResolution(self):
return self.db.getMaxResolution()
# getLogicBox
def getLogicBox(self,x=None,y=None,z=None):
pdim=self.getPointDim()
lbox=self.db.getLogicBox()
A=[lbox.p1[I] for I in range(pdim)]
B=[lbox.p2[I] for I in range(pdim)]
p1,p2=[0]*pdim,[0]*pdim
for I in range(pdim):
r=(x,y,z)[I]
if r is None: r=[A[I],B[I]]
p1[I] = int( A[I]+r[0]*(B[I]-A[I]) if isinstance(r[0],float) else r[0])
p2[I] = int( A[I]+r[1]*(B[I]-A[I]) if isinstance(r[1],float) else r[1])
return (p1,p2)
# getSliceLogicBox
def getSliceLogicBox(self,axis,offset):
ret=self.getLogicBox()
p1[axis]=offset+0
p1[axis]=offset+1
return (p1,p2)
# getBounds
def getBounds(self, logic_box):
if isinstance(logic_box,(tuple,list)):
logic_box=BoxNi(PointNi(logic_box[0]),PointNi(logic_box[1]))
return Position(self.logicToPhysic(),Position(BoxNi(logic_box)))
# getLogicSize
def getLogicSize(self):
p1,p2=self.getLogicBox()
return numpy.subtract(p2,p1)
# getFields
def getFields(self):
return [field.name for field in self.db.getFields()]
# getField
def getField(self,value=None):
if value is None:
return self.db.getField()
if isinstance(value,str):
return self.db.getField(value)
return value
# createAccess
def createAccess(self):
|
# readBlock
def readBlock(self, block_id, time=None, field=None, access=None, aborted=Aborted()):
Assert(access)
field=self.getField() if field is None else self.getField(field)
time = self.getTime() if time is None else time
read_block = self.db.createBlockQuery(block_id, field, time, ord('r'), aborted)
self.executeBlockQueryAndWait(access, read_block)
if not read_block.ok(): return None
return Array.toNumPy(read_block.buffer, bShareMem=False)
# writeBlock
def writeBlock(self, block_id, time=None, field=None, access=None, data=None, aborted=Aborted()):
Assert(access and data)
field=self.getField() if field is None else self.getField(field)
time = self.getTime() if time is None else time
write_block = self.db.createBlockQuery(block_id, field, time, ord('w'), aborted)
write_block.buffer=Array.fromNumPy(data,TargetDim=self.getPointDim(), bShareMem=True)
self.executeBlockQueryAndWait(access, write_block)
return write_block.ok()
# read
def read(self, logic_box=None, x=None, y=None, z=None, time=None, field=None, num_refinements=1, quality=0, max_resolution=None, disable_filters=False, access=None):
"""
db=PyDataset.Load(url)
# example of reading a single slice in logic coordinates
data=db.read(z=[512,513])
# example of reading a single slice in normalized coordinates (i.e. [0,1])
data.db.read(x=[0,0.1],y=[0,0.1],z=[0,0.1])
# example of reading a single slice with 3 refinements
for data in db.read(z=[512,513],num_refinements=3):
print(data)
"""
pdim=self.getPointDim()
field=self.getField() if field is None else self.getField(field)
if time is None:
time = self.getTime()
if logic_box is None:
logic_box=self.getLogicBox(x,y,z)
if isinstance(logic_box,(tuple,list)):
logic_box=BoxNi(PointNi(logic_box[0]),PointNi(logic_box[1]))
query = self.db.createBoxQuery(BoxNi(logic_box), field , time, ord('r'))
if disable_filters:
query.disableFilters()
else:
query.enableFilters()
if max_resolution is None:
max_resolution=self.getMaxResolution()
# example quality -3 means not full resolution
Assert(quality<=0)
max_resolution=max_resolution+quality
for I in reversed(range(num_refinements)):
res=max_resolution-(pdim*I)
if res>=0:
query.end_resolutions.push_back(res)
self.db.beginBoxQuery(query)
if not query.isRunning():
raise Exception("begin query failed {0}".format(query.errormsg))
if not access:
access=self.db.createAccess()
def NoGenerator():
if not self.db.executeBoxQuery(access, query):
raise Exception("query error {0}".format(query.errormsg))
# i cannot be sure how the numpy will be used outside or when the query will dealllocate the buffer
data=Array.toNumPy(query.buffer, bShareMem=False)
return data
def WithGenerator():
while query.isRunning():
if not self.db.executeBoxQuery(access, query):
raise Exception("query error {0}".format(query.errormsg))
# i cannot be sure how the numpy will be used outside or when the query will dealllocate the buffer
data=Array.toNumPy(query.buffer, bShareMem=False)
yield data
self.db.nextBoxQuery(query)
return NoGenerator() if query.end_resolutions.size()==1 else WithGenerator()
# write
# IMPORTANT: usually db.write happens without write lock and syncronously (at least in python)
def write(self, data, x=0, y=0, z=0,logic_box=None, time=None, field=None, access=None):
"""
db=PyDataset.Load(url)
width,height,depth=db.getSize()
# write single slice
data=numpy.zeros([height,width,3],dtype.uint8)
db.write(data,z=[512,513])
# write several slices in one-shot
nslices=10
data=numpy.zeros([nslices,height,width,10,3],dtype.uint8)
db.write(data,z=[512,512+nslices])
# write several slices with a generator
nslices=10
def gen():
for I in range(nslices):
yield=p.zeros([height,width,3],dtype.uint8)
db.write(gen,z=512)
"""
pdim=self.getPointDim()
field=self.getField(field)
if time is None:
time = self.getTime()
dims=list(data.shape)
# remove last components
if field.dtype.ncomponents()> | return self.db.createAccess() | identifier_body |
dataset.py | idx.logic_box=BoxNi(PointNi.zero(dims.getPointDim()),dims)
else:
raise Exception("please specify dimensions or source data")
# add fields
if "fields" in args:
for field in args["fields"]:
idx.fields.push_back(field)
elif buffer:
idx.fields.push_back(Field.fromString("DATA {} default_layout(row_major)".format(buffer.dtype.toString())))
else:
raise Exception("no field")
# bitsperblock
if "bitsperblock" in args:
idx.bitsperblock=int(args["bitsperblock"])
# compute db overall size
TOT=0
for field in idx.fields:
TOT+=field.dtype.getByteSize(idx.logic_box.size())
# blocks per file
if "blocksperfile" in args:
idx.blocksperfile=int(args["blocksperfile"])
elif "data" in args or TOT<2*(1024*1024*1024):
idx.blocksperfile=-1 # all blocks in one file
else:
idx.blocksperfile==0 # openvisus will guess (probably using multiple files)
# is the user specifying filters?
if "filters" in args and args["filters"]:
filters=args["filters"]
for I in range(idx.fields.size()):
idx.fields[I].filter=filters[I]
if "time" in args:
A,B,time_template=args["time"]
idx.timesteps=DatasetTimesteps(A,B,1.0)
idx.time_template=time_template
if "filename_template" in args:
idx.filename_template=args["filename_template"]
idx.save(url)
db=LoadDataset(url)
if buffer:
compression=args["compression"] if "compression" in args else ["zip"]
db.compressDataset(compression, buffer)
return db
# //////////////////////////////////////////////
class | (object):
# constructor
def __init__(self,db):
self.db = db
# __getattr__
def __getattr__(self,attr):
return getattr(self.db, attr)
# getPointDim
def getPointDim(self):
return self.db.getPointDim()
# getMaxResolution
def getMaxResolution(self):
return self.db.getMaxResolution()
# getLogicBox
def getLogicBox(self,x=None,y=None,z=None):
pdim=self.getPointDim()
lbox=self.db.getLogicBox()
A=[lbox.p1[I] for I in range(pdim)]
B=[lbox.p2[I] for I in range(pdim)]
p1,p2=[0]*pdim,[0]*pdim
for I in range(pdim):
r=(x,y,z)[I]
if r is None: r=[A[I],B[I]]
p1[I] = int( A[I]+r[0]*(B[I]-A[I]) if isinstance(r[0],float) else r[0])
p2[I] = int( A[I]+r[1]*(B[I]-A[I]) if isinstance(r[1],float) else r[1])
return (p1,p2)
# getSliceLogicBox
def getSliceLogicBox(self,axis,offset):
ret=self.getLogicBox()
p1[axis]=offset+0
p1[axis]=offset+1
return (p1,p2)
# getBounds
def getBounds(self, logic_box):
if isinstance(logic_box,(tuple,list)):
logic_box=BoxNi(PointNi(logic_box[0]),PointNi(logic_box[1]))
return Position(self.logicToPhysic(),Position(BoxNi(logic_box)))
# getLogicSize
def getLogicSize(self):
p1,p2=self.getLogicBox()
return numpy.subtract(p2,p1)
# getFields
def getFields(self):
return [field.name for field in self.db.getFields()]
# getField
def getField(self,value=None):
if value is None:
return self.db.getField()
if isinstance(value,str):
return self.db.getField(value)
return value
# createAccess
def createAccess(self):
return self.db.createAccess()
# readBlock
def readBlock(self, block_id, time=None, field=None, access=None, aborted=Aborted()):
Assert(access)
field=self.getField() if field is None else self.getField(field)
time = self.getTime() if time is None else time
read_block = self.db.createBlockQuery(block_id, field, time, ord('r'), aborted)
self.executeBlockQueryAndWait(access, read_block)
if not read_block.ok(): return None
return Array.toNumPy(read_block.buffer, bShareMem=False)
# writeBlock
def writeBlock(self, block_id, time=None, field=None, access=None, data=None, aborted=Aborted()):
Assert(access and data)
field=self.getField() if field is None else self.getField(field)
time = self.getTime() if time is None else time
write_block = self.db.createBlockQuery(block_id, field, time, ord('w'), aborted)
write_block.buffer=Array.fromNumPy(data,TargetDim=self.getPointDim(), bShareMem=True)
self.executeBlockQueryAndWait(access, write_block)
return write_block.ok()
# read
def read(self, logic_box=None, x=None, y=None, z=None, time=None, field=None, num_refinements=1, quality=0, max_resolution=None, disable_filters=False, access=None):
"""
db=PyDataset.Load(url)
# example of reading a single slice in logic coordinates
data=db.read(z=[512,513])
# example of reading a single slice in normalized coordinates (i.e. [0,1])
data.db.read(x=[0,0.1],y=[0,0.1],z=[0,0.1])
# example of reading a single slice with 3 refinements
for data in db.read(z=[512,513],num_refinements=3):
print(data)
"""
pdim=self.getPointDim()
field=self.getField() if field is None else self.getField(field)
if time is None:
time = self.getTime()
if logic_box is None:
logic_box=self.getLogicBox(x,y,z)
if isinstance(logic_box,(tuple,list)):
logic_box=BoxNi(PointNi(logic_box[0]),PointNi(logic_box[1]))
query = self.db.createBoxQuery(BoxNi(logic_box), field , time, ord('r'))
if disable_filters:
query.disableFilters()
else:
query.enableFilters()
if max_resolution is None:
max_resolution=self.getMaxResolution()
# example quality -3 means not full resolution
Assert(quality<=0)
max_resolution=max_resolution+quality
for I in reversed(range(num_refinements)):
res=max_resolution-(pdim*I)
if res>=0:
query.end_resolutions.push_back(res)
self.db.beginBoxQuery(query)
if not query.isRunning():
raise Exception("begin query failed {0}".format(query.errormsg))
if not access:
access=self.db.createAccess()
def NoGenerator():
if not self.db.executeBoxQuery(access, query):
raise Exception("query error {0}".format(query.errormsg))
# i cannot be sure how the numpy will be used outside or when the query will dealllocate the buffer
data=Array.toNumPy(query.buffer, bShareMem=False)
return data
def WithGenerator():
while query.isRunning():
if not self.db.executeBoxQuery(access, query):
raise Exception("query error {0}".format(query.errormsg))
# i cannot be sure how the numpy will be used outside or when the query will dealllocate the buffer
data=Array.toNumPy(query.buffer, bShareMem=False)
yield data
self.db.nextBoxQuery(query)
return NoGenerator() if query.end_resolutions.size()==1 else WithGenerator()
# write
# IMPORTANT: usually db.write happens without write lock and syncronously (at least in python)
def write(self, data, x=0, y=0, z=0,logic_box=None, time=None, field=None, access=None):
"""
db=PyDataset.Load(url)
width,height,depth=db.getSize()
# write single slice
data=numpy.zeros([height,width,3],dtype.uint8)
db.write(data,z=[512,513])
# write several slices in one-shot
nslices=10
data=numpy.zeros([nslices,height,width,10,3],dtype.uint8)
db.write(data,z=[512,512+nslices])
# write several slices with a generator
nslices=10
def gen():
for I in range(nslices):
yield=p.zeros([height,width,3],dtype.uint8)
db.write(gen,z=512)
"""
pdim=self.getPointDim()
field=self.getField(field)
if time is None:
time = self.getTime()
dims=list(data.shape)
# remove last components
if field.dtype.ncomponents()> | PyDataset | identifier_name |
dataset.py | field in idx.fields:
TOT+=field.dtype.getByteSize(idx.logic_box.size())
# blocks per file
if "blocksperfile" in args:
idx.blocksperfile=int(args["blocksperfile"])
elif "data" in args or TOT<2*(1024*1024*1024):
idx.blocksperfile=-1 # all blocks in one file
else:
idx.blocksperfile==0 # openvisus will guess (probably using multiple files)
# is the user specifying filters?
if "filters" in args and args["filters"]:
filters=args["filters"]
for I in range(idx.fields.size()):
idx.fields[I].filter=filters[I]
if "time" in args:
A,B,time_template=args["time"]
idx.timesteps=DatasetTimesteps(A,B,1.0)
idx.time_template=time_template
if "filename_template" in args:
idx.filename_template=args["filename_template"]
idx.save(url)
db=LoadDataset(url)
if buffer:
compression=args["compression"] if "compression" in args else ["zip"]
db.compressDataset(compression, buffer)
return db
# //////////////////////////////////////////////
class PyDataset(object):
# constructor
def __init__(self,db):
self.db = db
# __getattr__
def __getattr__(self,attr):
return getattr(self.db, attr)
# getPointDim
def getPointDim(self):
return self.db.getPointDim()
# getMaxResolution
def getMaxResolution(self):
return self.db.getMaxResolution()
# getLogicBox
def getLogicBox(self,x=None,y=None,z=None):
pdim=self.getPointDim()
lbox=self.db.getLogicBox()
A=[lbox.p1[I] for I in range(pdim)]
B=[lbox.p2[I] for I in range(pdim)]
p1,p2=[0]*pdim,[0]*pdim
for I in range(pdim):
r=(x,y,z)[I]
if r is None: r=[A[I],B[I]]
p1[I] = int( A[I]+r[0]*(B[I]-A[I]) if isinstance(r[0],float) else r[0])
p2[I] = int( A[I]+r[1]*(B[I]-A[I]) if isinstance(r[1],float) else r[1])
return (p1,p2)
# getSliceLogicBox
def getSliceLogicBox(self,axis,offset):
ret=self.getLogicBox()
p1[axis]=offset+0
p1[axis]=offset+1
return (p1,p2)
# getBounds
def getBounds(self, logic_box):
if isinstance(logic_box,(tuple,list)):
logic_box=BoxNi(PointNi(logic_box[0]),PointNi(logic_box[1]))
return Position(self.logicToPhysic(),Position(BoxNi(logic_box)))
# getLogicSize
def getLogicSize(self):
p1,p2=self.getLogicBox()
return numpy.subtract(p2,p1)
# getFields
def getFields(self):
return [field.name for field in self.db.getFields()]
# getField
def getField(self,value=None):
if value is None:
return self.db.getField()
if isinstance(value,str):
return self.db.getField(value)
return value
# createAccess
def createAccess(self):
return self.db.createAccess()
# readBlock
def readBlock(self, block_id, time=None, field=None, access=None, aborted=Aborted()):
Assert(access)
field=self.getField() if field is None else self.getField(field)
time = self.getTime() if time is None else time
read_block = self.db.createBlockQuery(block_id, field, time, ord('r'), aborted)
self.executeBlockQueryAndWait(access, read_block)
if not read_block.ok(): return None
return Array.toNumPy(read_block.buffer, bShareMem=False)
# writeBlock
def writeBlock(self, block_id, time=None, field=None, access=None, data=None, aborted=Aborted()):
Assert(access and data)
field=self.getField() if field is None else self.getField(field)
time = self.getTime() if time is None else time
write_block = self.db.createBlockQuery(block_id, field, time, ord('w'), aborted)
write_block.buffer=Array.fromNumPy(data,TargetDim=self.getPointDim(), bShareMem=True)
self.executeBlockQueryAndWait(access, write_block)
return write_block.ok()
# read
def read(self, logic_box=None, x=None, y=None, z=None, time=None, field=None, num_refinements=1, quality=0, max_resolution=None, disable_filters=False, access=None):
"""
db=PyDataset.Load(url)
# example of reading a single slice in logic coordinates
data=db.read(z=[512,513])
# example of reading a single slice in normalized coordinates (i.e. [0,1])
data.db.read(x=[0,0.1],y=[0,0.1],z=[0,0.1])
# example of reading a single slice with 3 refinements
for data in db.read(z=[512,513],num_refinements=3):
print(data)
"""
pdim=self.getPointDim()
field=self.getField() if field is None else self.getField(field)
if time is None:
time = self.getTime()
if logic_box is None:
logic_box=self.getLogicBox(x,y,z)
if isinstance(logic_box,(tuple,list)):
logic_box=BoxNi(PointNi(logic_box[0]),PointNi(logic_box[1]))
query = self.db.createBoxQuery(BoxNi(logic_box), field , time, ord('r'))
if disable_filters:
query.disableFilters()
else:
query.enableFilters()
if max_resolution is None:
max_resolution=self.getMaxResolution()
# example quality -3 means not full resolution
Assert(quality<=0)
max_resolution=max_resolution+quality
for I in reversed(range(num_refinements)):
res=max_resolution-(pdim*I)
if res>=0:
query.end_resolutions.push_back(res)
self.db.beginBoxQuery(query)
if not query.isRunning():
raise Exception("begin query failed {0}".format(query.errormsg))
if not access:
access=self.db.createAccess()
def NoGenerator():
if not self.db.executeBoxQuery(access, query):
raise Exception("query error {0}".format(query.errormsg))
# i cannot be sure how the numpy will be used outside or when the query will dealllocate the buffer
data=Array.toNumPy(query.buffer, bShareMem=False)
return data
def WithGenerator():
while query.isRunning():
if not self.db.executeBoxQuery(access, query):
raise Exception("query error {0}".format(query.errormsg))
# i cannot be sure how the numpy will be used outside or when the query will dealllocate the buffer
data=Array.toNumPy(query.buffer, bShareMem=False)
yield data
self.db.nextBoxQuery(query)
return NoGenerator() if query.end_resolutions.size()==1 else WithGenerator()
# write
# IMPORTANT: usually db.write happens without write lock and syncronously (at least in python)
def write(self, data, x=0, y=0, z=0,logic_box=None, time=None, field=None, access=None):
"""
db=PyDataset.Load(url)
width,height,depth=db.getSize()
# write single slice
data=numpy.zeros([height,width,3],dtype.uint8)
db.write(data,z=[512,513])
# write several slices in one-shot
nslices=10
data=numpy.zeros([nslices,height,width,10,3],dtype.uint8)
db.write(data,z=[512,512+nslices])
# write several slices with a generator
nslices=10
def gen():
for I in range(nslices):
yield=p.zeros([height,width,3],dtype.uint8)
db.write(gen,z=512)
"""
pdim=self.getPointDim()
field=self.getField(field)
if time is None:
time = self.getTime()
dims=list(data.shape)
# remove last components
if field.dtype.ncomponents()>1:
dims=dims[:-1]
# could be I'm writing a slice, I need to increment the "dimension"
while len(dims)<pdim:
dims=[1] + dims
dims=list(reversed(dims))
if logic_box is None:
p1=PointNi([x,y,z][0:pdim])
logic_box=BoxNi(p1,p1+PointNi(dims))
if isinstance(logic_box,(tuple,list)):
| logic_box=BoxNi(PointNi(logic_box[0]),PointNi(logic_box[1]))
| random_line_split |
|
imitation_ddpg_model.py | ="relu")(concat)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(1)(out)
# Outputs single value for give state-action
model = tf.keras.Model([state_input, action_input], outputs)
return model
def policy(state, noise_object):
sampled_actions = tf.squeeze(actor_model(state))
noise = noise_object()
# Adding noise to action
sampled_actions = sampled_actions.numpy() + noise
np.clip(sampled_actions[3], 0, 1)
sampled_actions[3] = 0 if sampled_actions[3] < 0.5 else 1
legal_action = [np.clip(sampled_actions[0], 0, 1), # brake
np.clip(sampled_actions[1], -1, 1), # steering
np.clip(sampled_actions[2], -1, 1), # throttle
sampled_actions[3]] # direction
return [np.squeeze(legal_action)]
def sim_start(): # ์๋ฎฌ๋ ์ดํฐ ์คํ
# print(pyautogui.position()) # (1125, 455)
pyautogui.click(1125, 455)
# time.sleep(1)
pyautogui.keyDown('altleft')
pyautogui.keyDown('p')
pyautogui.keyUp('altleft')
pyautogui.keyUp('p')
time.sleep(1)
pyautogui.click(1125, 455)
# connect to the AirSim simulator
client = airsim.CarClient()
client.confirmConnection()
client.enableApiControl(api_control)
print("API Control enabled: %s\n" % client.isApiControlEnabled())
car_controls = airsim.CarControls()
time.sleep(1)
return client, car_controls
def sim_stop(): # ์๋ฎฌ๋ ์ดํฐ ์ค์ง
# print(pyautogui.position()) # (1125, 455)
pyautogui.click(1125, 455)
time.sleep(1)
# ์๋ฎฌ๋ ์ดํฐ ์ข
๋ฃ
pyautogui.keyDown('esc')
pyautogui.keyUp('esc')
time.sleep(1)
def capture_goal(): # ๋ชฉํ ์ง์ ์ ์ธ๋ฆฌ์ผ ์ขํ -> ์์ด์ฌ ์ขํ ๋ณํ
# ์ธ๋ฆฌ์ผ์์ ์ถ๋ ฅ๋๋ ๋ชฉํ ์ง์ ์ขํ
unreal_goals = [[600, 2600], [600, 2230], [600, 1800], [600, 1430], [600, 990], [600, 620], # ์ฐ์ธก
[-1200, 2600], [-1200, 2230], [-1200, 1800], [-1200, 1430], [-1200, 990]] # ์ข์ธก
# ์์ด์ฌ API๋ฅผ ํตํด ์ถ๋ ฅ๋๋ ๋ชฉํ ์ง์ ์ขํ
airsim_goals = [[6, -14], [6, -17], [6, -22], [6, -25], [6, -30], [6, -33], # ์ฐ์ธก
[-7, -14], [-7, -17], [-7, -22], [-7, -25], [-7, -30]] # ์ข์ธก
# ์ขํ ์ถ๋ ฅ ๋ถ๋ถ ์คํฌ๋ฆฐ์ท ์บก์ณ
img = pyautogui.screenshot('goal.png', region=(36, 90, 210, 15)) # ์ ์ฒดํ๋ฉด(F11) ๊ธฐ์ค
# ์ขํ ์คํฌ๋ฆฐ์ท ๋ฌธ์์ด๋ก ๋ณํ
goal_pos = pytesseract.image_to_string(Image.open('goal.png'))
# print(goal_pos[:-2])
# x, y ์ขํ ๊ตฌ๋ถ -> ์ขํ ๊ฐ float ๋ณํ
goal_pos = str.split(goal_pos[:-2], ' ')
x = str.split(goal_pos[0], '.')[0]
y = str.split(goal_pos[1], '.')[0]
x = int(float(x[2:]))
if y[0] == 'ยฅ': # ๊ฐ๋ ๋ฌธ์๋ฅผ ์๋ชป ์ธ์ํ๋ ๊ฒฝ์ฐ ๋ฐ์
y = int(float(y[3:]))
else:
y = int(float(y[2:]))
goal_xy = []
for i in range(len(airsim_goals)):
if x == unreal_goals[i][0] and y == unreal_goals[i][1]:
# print('Goal x :', airsim_goals[i][0])
# print('Goal y :', airsim_goals[i][1])
goal_xy = airsim_goals[i]
print('Goal :', airsim_goals[i])
break
return goal_xy
def save_model():
# Save the weights
actor_model.save(
".\\save_models\\" + str(start_ymd) + '_' + str(start_hm) + "\\parking_actor_ep" + str(ep_cnt + 1) + ".h5")
critic_model.save(
".\\save_models\\" + str(start_ymd) + '_' + str(start_hm) + "\\parking_critic_ep" + str(ep_cnt + 1) + ".h5")
target_actor.save(".\\save_models\\" + str(start_ymd) + '_' + str(start_hm) + "\\parking_target_actor_ep" + str(
ep_cnt + 1) + ".h5")
target_critic.save(".\\save_models\\" + str(start_ymd) + '_' + str(start_hm) + "\\parking_target_critic_ep" + str(
ep_cnt + 1) + ".h5")
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract'
std_dev = 0.2
ou_noise = OUActionNoise(mean=np.zeros(1), std_deviation=float(std_dev) * np.ones(1))
actor_model = get_actor()
critic_model = get_critic()
target_actor = get_actor()
target_critic = get_critic()
# Making the weights equal initially
target_actor.set_weights(actor_model.get_weights())
target_critic.set_weights(critic_model.get_weights())
# Learning rate for actor-critic models
critic_lr = 0.002
actor_lr = 0.001
critic_optimizer = tf.keras.optimizers.Adam(critic_lr)
actor_optimizer = tf.keras.optimizers.Adam(actor_lr)
total_episodes = 100
# Discount factor for future rewards
gamma = 0.99
# Used to update target networks
tau = 0.005
buffer = Buffer(50000, 64)
# To store reward history of each episode
ep_reward_list = []
# To store average reward history of last few episodes
avg_reward_list = []
# ์ฒ์ ์คํ ์ ์ถฉ๋ ๋ฌผ์ฒด ์ธ์ ํ ๋ฆฌ์
๊ด๋ จ ๋ฌธ์ ๋๋ฌธ์ ์ค์ง ํ ๋ค์ ์์
client, car_controls = sim_start()
collision = (client.simGetCollisionInfo().object_name).lower()
while collision.find('pipesmall') < 0 and collision != '':
sim_stop()
client, car_controls = sim_start()
time.sleep(2)
ep_cnt = 0
tracking_img = []
period = 5 # ์ด๋ ๊ฒฝ๋ก ์ด๋ฏธ์ง ์ ์ฅ ์ํผ์๋ ๊ฐ๊ฒฉ
# Takes about 4 min to train
for ep in range(total_episodes):
ep_cnt = ep
# if ep == 0 or ep + 1 % period == 0:
tracking_img = cv.imread('map.png', cv.IMREAD_GRAYSCALE)
# prev_state = env.reset()
prev_state = [client.getCarState().kinematics_estimated.position.x_val, # ์ฐจ๋ ์์น x ์ขํ
client.getCarState().kinematics_estimated.position.y_val, # ์ฐจ๋ ์์น y ์ขํ
client.getCarState().speed, # ์ฐจ๋ ์๋
client.getCarControls().brake, # ๋ธ๋ ์ดํฌ
client.getCarControls().steering, # ํธ๋ค ๋ฐฉํฅ
client.getCarControls().throttle, # ์ฐจ๋ ์ด๋
client.getCarControls().manual_gear, # ํ์ง ๊ธฐ์ด
client.getDistanceSensorData("Distance1").distance, # ์ ๋ฐฉ ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance2").distance, # ์ฐ์ธก ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance3").distance, # ํ๋ฐฉ ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance4").distance] # ์ข์ธก ๊ฑฐ๋ฆฌ ์ผ์
episodic_reward = 0
is_captured = 0
count = 0
start_time = 0
end_time = 0
total_steps = 0
reward = 0
done = False
while True:
total_steps += 1
if is_captured == 0:
goal = capture_goal()
is_captured = 1
tf_prev_state = tf.expand_dims(tf.convert_to_tensor(prev_state), 0) | random_line_split |
||
imitation_ddpg_model.py |
# Takes (s,a,r,s') obervation tuple as input
def record(self, obs_tuple):
# Set index to zero if buffer_capacity is exceeded,
# replacing old records
index = self.buffer_counter % self.buffer_capacity
self.state_buffer[index] = obs_tuple[0]
self.action_buffer[index] = obs_tuple[1]
self.reward_buffer[index] = obs_tuple[2]
self.next_state_buffer[index] = obs_tuple[3]
self.buffer_counter += 1
# Eager execution is turned on by default in TensorFlow 2. Decorating with tf.function allows
# TensorFlow to build a static graph out of the logic and computations in our function.
# This provides a large speed up for blocks of code that contain many small TensorFlow operations such as this one.
@tf.function
def update(
self, state_batch, action_batch, reward_batch, next_state_batch,
):
# Training and updating Actor & Critic networks.
# See Pseudo Code.
with tf.GradientTape() as tape:
target_actions = target_actor(next_state_batch, training=True)
y = reward_batch + gamma * target_critic(
[next_state_batch, target_actions], training=True
)
critic_value = critic_model([state_batch, action_batch], training=True)
critic_loss = tf.math.reduce_mean(tf.math.square(y - critic_value))
critic_grad = tape.gradient(critic_loss, critic_model.trainable_variables)
critic_optimizer.apply_gradients(
zip(critic_grad, critic_model.trainable_variables)
)
with tf.GradientTape() as tape:
actions = actor_model(state_batch, training=True)
critic_value = critic_model([state_batch, actions], training=True)
# Used `-value` as we want to maximize the value given
# by the critic for our actions
actor_loss = -tf.math.reduce_mean(critic_value)
actor_grad = tape.gradient(actor_loss, actor_model.trainable_variables)
actor_optimizer.apply_gradients(
zip(actor_grad, actor_model.trainable_variables)
)
# We compute the loss and update parameters
def learn(self):
# Get sampling range
record_range = min(self.buffer_counter, self.buffer_capacity)
# Randomly sample indices
batch_indices = np.random.choice(record_range, self.batch_size)
# Convert to tensors
state_batch = tf.convert_to_tensor(self.state_buffer[batch_indices])
action_batch = tf.convert_to_tensor(self.action_buffer[batch_indices])
reward_batch = tf.convert_to_tensor(self.reward_buffer[batch_indices])
reward_batch = tf.cast(reward_batch, dtype=tf.float32)
next_state_batch = tf.convert_to_tensor(self.next_state_buffer[batch_indices])
self.update(state_batch, action_batch, reward_batch, next_state_batch)
# This update target parameters slowly
# Based on rate `tau`, which is much less than one.
@tf.function
def update_target(target_weights, weights, tau):
for (a, b) in zip(target_weights, weights):
a.assign(b * tau + a * (1 - tau))
def get_actor():
# Initialize weights between -3e-3 and 3-e3
last_init = tf.random_uniform_initializer(minval=-0.003, maxval=0.003)
inputs = layers.Input(shape=(num_states,))
out = layers.Dense(256, activation="relu")(inputs)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(4, activation="tanh", kernel_initializer=last_init)(out)
model = tf.keras.Model(inputs, outputs)
return model
def get_critic():
# State as input
state_input = layers.Input(shape=(num_states))
state_out = layers.Dense(128, activation="relu")(state_input)
state_out = layers.Dense(128, activation="relu")(state_out)
# Action as input
action_input = layers.Input(shape=(num_actions))
action_out = layers.Dense(64, activation="relu")(action_input)
# Both are passed through seperate layer before concatenating
concat = layers.Concatenate()([state_out, action_out])
out = layers.Dense(256, activation="relu")(concat)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(1)(out)
# Outputs single value for give state-action
model = tf.keras.Model([state_input, action_input], outputs)
return model
def policy(state, noise_object):
sampled_actions = tf.squeeze(actor_model(state))
noise = noise_object()
# Adding noise to action
sampled_actions = sampled_actions.numpy() + noise
np.clip(sampled_actions[3], 0, 1)
sampled_actions[3] = 0 if sampled_actions[3] < 0.5 else 1
legal_action = [np.clip(sampled_actions[0], 0, 1), # brake
np.clip(sampled_actions[1], -1, 1), # steering
np.clip(sampled_actions[2], -1, 1), # throttle
sampled_actions[3]] # direction
return [np.squeeze(legal_action)]
def sim_start(): # ์๋ฎฌ๋ ์ดํฐ ์คํ
# print(pyautogui.position()) # (1125, 455)
pyautogui.click(1125, 455)
# time.sleep(1)
pyautogui.keyDown('altleft')
pyautogui.keyDown('p')
pyautogui.keyUp('altleft')
pyautogui.keyUp('p')
time.sleep(1)
pyautogui.click(1125, 455)
# connect to the AirSim simulator
client = airsim.CarClient()
client.confirmConnection()
client.enableApiControl(api_control)
print("API Control enabled: %s\n" % client.isApiControlEnabled())
car_controls = airsim.CarControls()
time.sleep(1)
return client, car_controls
def sim_stop(): # ์๋ฎฌ๋ ์ดํฐ ์ค์ง
# print(pyautogui.position()) # (1125, 455)
pyautogui.click(1125, 455)
time.sleep(1)
# ์๋ฎฌ๋ ์ดํฐ ์ข
๋ฃ
pyautogui.keyDown('esc')
pyautogui.keyUp('esc')
time.sleep(1)
def capture_goal(): # ๋ชฉํ ์ง์ ์ ์ธ๋ฆฌ์ผ ์ขํ -> ์์ด์ฌ ์ขํ ๋ณํ
# ์ธ๋ฆฌ์ผ์์ ์ถ๋ ฅ๋๋ ๋ชฉํ ์ง์ ์ขํ
unreal_goals = [[600, 2600], [600, 2230], [600, 1800], [600, 1430], [600, 990], [600, 620], # ์ฐ์ธก
[-1200, 2600], [-1200, 2230], [-1200, 1800], [-1200, 1430], [-1200, 990]] # ์ข์ธก
# ์์ด์ฌ API๋ฅผ ํตํด ์ถ๋ ฅ๋๋ ๋ชฉํ ์ง์ ์ขํ
airsim_goals = [[6, -14], [6, -17], [6, -22], [6, -25], [6, -30], [6, -33], # ์ฐ์ธก
[-7, -14], [-7, -17], [-7, -22], [-7, -25], [-7, -30]] # ์ข์ธก
# ์ขํ ์ถ๋ ฅ ๋ถ๋ถ ์คํฌ๋ฆฐ์ท ์บก์ณ
img = pyautogui.screenshot('goal.png', region=(36, 90, 210, 15)) # ์ ์ฒดํ๋ฉด(F11) ๊ธฐ์ค
# ์ขํ ์คํฌ๋ฆฐ์ท ๋ฌธ์์ด๋ก ๋ณํ
goal_pos = pytesseract.image_to_string(Image.open('goal.png'))
# print(goal_pos[:-2])
# x, y ์ขํ ๊ตฌ๋ถ -> ์ขํ ๊ฐ float ๋ณํ
goal_pos = str.split(goal_pos[:-2], ' ')
x = str.split(goal_pos[0], '.')[0]
y = str.split(goal_pos[1], '.')[0]
x = int(float(x[2:]))
if y[0] == 'ยฅ': # ๊ฐ๋ ๋ฌธ์๋ฅผ ์๋ชป ์ธ์ํ๋ ๊ฒฝ์ฐ ๋ฐ์
y = int(float(y[3:]))
else:
y = int(float(y[ | self.buffer_capacity = buffer_capacity
# Num of tuples to train on.
self.batch_size = batch_size
# Its tells us num of times record() was called.
self.buffer_counter = 0
# Instead of list of tuples as the exp.replay concept go
# We use different np.arrays for each tuple element
self.state_buffer = np.zeros((self.buffer_capacity, num_states))
self.action_buffer = np.zeros((self.buffer_capacity, num_actions))
self.reward_buffer = np.zeros((self.buffer_capacity, 1))
self.next_state_buffer = np.zeros((self.buffer_capacity, num_states)) | identifier_body |
|
imitation_ddpg_model.py | esseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract'
std_dev = 0.2
ou_noise = OUActionNoise(mean=np.zeros(1), std_deviation=float(std_dev) * np.ones(1))
actor_model = get_actor()
critic_model = get_critic()
target_actor = get_actor()
target_critic = get_critic()
# Making the weights equal initially
target_actor.set_weights(actor_model.get_weights())
target_critic.set_weights(critic_model.get_weights())
# Learning rate for actor-critic models
critic_lr = 0.002
actor_lr = 0.001
critic_optimizer = tf.keras.optimizers.Adam(critic_lr)
actor_optimizer = tf.keras.optimizers.Adam(actor_lr)
total_episodes = 100
# Discount factor for future rewards
gamma = 0.99
# Used to update target networks
tau = 0.005
buffer = Buffer(50000, 64)
# To store reward history of each episode
ep_reward_list = []
# To store average reward history of last few episodes
avg_reward_list = []
# ์ฒ์ ์คํ ์ ์ถฉ๋ ๋ฌผ์ฒด ์ธ์ ํ ๋ฆฌ์
๊ด๋ จ ๋ฌธ์ ๋๋ฌธ์ ์ค์ง ํ ๋ค์ ์์
client, car_controls = sim_start()
collision = (client.simGetCollisionInfo().object_name).lower()
while collision.find('pipesmall') < 0 and collision != '':
sim_stop()
client, car_controls = sim_start()
time.sleep(2)
ep_cnt = 0
tracking_img = []
period = 5 # ์ด๋ ๊ฒฝ๋ก ์ด๋ฏธ์ง ์ ์ฅ ์ํผ์๋ ๊ฐ๊ฒฉ
# Takes about 4 min to train
for ep in range(total_episodes):
ep_cnt = ep
# if ep == 0 or ep + 1 % period == 0:
tracking_img = cv.imread('map.png', cv.IMREAD_GRAYSCALE)
# prev_state = env.reset()
prev_state = [client.getCarState().kinematics_estimated.position.x_val, # ์ฐจ๋ ์์น x ์ขํ
client.getCarState().kinematics_estimated.position.y_val, # ์ฐจ๋ ์์น y ์ขํ
client.getCarState().speed, # ์ฐจ๋ ์๋
client.getCarControls().brake, # ๋ธ๋ ์ดํฌ
client.getCarControls().steering, # ํธ๋ค ๋ฐฉํฅ
client.getCarControls().throttle, # ์ฐจ๋ ์ด๋
client.getCarControls().manual_gear, # ํ์ง ๊ธฐ์ด
client.getDistanceSensorData("Distance1").distance, # ์ ๋ฐฉ ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance2").distance, # ์ฐ์ธก ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance3").distance, # ํ๋ฐฉ ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance4").distance] # ์ข์ธก ๊ฑฐ๋ฆฌ ์ผ์
episodic_reward = 0
is_captured = 0
count = 0
start_time = 0
end_time = 0
total_steps = 0
reward = 0
done = False
while True:
total_steps += 1
if is_captured == 0:
goal = capture_goal()
is_captured = 1
tf_prev_state = tf.expand_dims(tf.convert_to_tensor(prev_state), 0)
action = policy(tf_prev_state, ou_noise)
action = tf.squeeze(action)
print('episode :', ep + 1, '|',
'brake :', round(float(action[0]), 3), '|', 'steering :', round(float(action[1]), 3), '|',
'throttle :', round(float(abs(action[2])), 3), '|', 'direction :', round(float(action[3]), 3), '|',
'total_reward :', round(episodic_reward, 6))
# car_controls.brake = 1 if float(action[0]) > 0.5 else 0
# car_controls.steering = float(action[1])
# car_controls.throttle = float(abs(action[2]))
# if action[3]:
# car_controls.manual_gear = 0
# car_controls.is_manual_gear = False
# else:
# car_controls.manual_gear = -1
# car_controls.is_manual_gear = True
#
# client.setCarControls(car_controls)
# Recieve state and reward from environment.
# state, reward, done, info = env.step(action)
state = [client.getCarState().kinematics_estimated.position.x_val, # ์ฐจ๋ ์์น x ์ขํ
client.getCarState().kinematics_estimated.position.y_val, # ์ฐจ๋ ์์น y ์ขํ
client.getCarState().speed, # ์ฐจ๋ ์๋
client.getCarControls().brake, # ๋ธ๋ ์ดํฌ
client.getCarControls().steering, # ํธ๋ค ๋ฐฉํฅ
client.getCarControls().throttle, # ์ฐจ๋ ์ด๋
client.getCarControls().manual_gear, # ํ์ง ๊ธฐ์ด
client.getDistanceSensorData("Distance1").distance, # ์ ๋ฐฉ ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance2").distance, # ์ฐ์ธก ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance3").distance, # ํ๋ฐฉ ๊ฑฐ๋ฆฌ ์ผ์
client.getDistanceSensorData("Distance4").distance] # ์ข์ธก ๊ฑฐ๋ฆฌ ์ผ์
# ์ฐจ๋ ์ด๋ ๊ฒฝ๋ก ๊ธฐ๋ก
# if ep == 0 or ep+1 % period == 0:
tracking_img = tracking.tracking(tracking_img, state[0], state[1])
# reward = 1/1000 if ((client.simGetCollisionInfo().object_name).lower()).find('pipesmall') >= 0 else -1
collision = (client.simGetCollisionInfo().object_name).lower()
if collision.find('pipesmall') >= 0 or collision == '':
done = False
else:
print('Episode', ep + 1, ': Crash!!')
# reward += -1
reward = -100
done = True
if (goal[0] > 0):
if (6 < client.getCarState().kinematics_estimated.position.x_val < 8 and
goal[1] - 1 < client.getCarState().kinematics_estimated.position.y_val < goal[1] + 1):
print('Episode', ep + 1, ': Success!!')
# reward += 1
reward = 100
done = True
elif (goal[0] < 0):
if (-9 < client.getCarState().kinematics_estimated.position.x_val < -7 and
goal[1] - 1 < client.getCarState().kinematics_estimated.position.y_val < goal[1] + 1):
print('Episode', ep + 1, ': Success!!')
# reward += 1
reward = 100
done = True
if round(prev_state[0], 2) == round(state[0], 2) and round(prev_state[1], 2) == round(state[1], 2):
reward = -1 / 1000
if count == 0:
count += 1
start_time = time.time()
end_time = time.time()
else:
count += 1
end_time = time.time()
if end_time - start_time >= 10:
print('Episode', ep + 1, ': Don''t just stand there!!')
count = 0
# reward += -1
reward = -200
done = True
else:
reward = 1 / 100000
count = 0
buffer.record((prev_state, action, reward, state))
episodic_reward += reward
buffer.learn()
update_target(target_actor.variables, actor_model.variables, tau)
update_target(target_critic.variables, critic_model.variables, tau)
# End this episode when `done` is True
if done:
print('Final Reward :', episodic_reward)
print('Total Steps :', total_steps)
if ep == 0 or (ep + 1) % period == 0:
cv.imwrite(".\\tracking\\" + str(start_ymd) + '_' + str(start_hm) + "\\ep" + str(ep + 1) + ".png",
tracking_img)
print('tracking image saved')
is_captured = 0
sim_stop()
sim_stop()
if ep + 1 == total_episodes:
break
client, car_controls = sim_start()
sim_stop()
sim_stop()
c | lient, car_controls = sim_start()
break
prev_state = state
ep_reward_list.append(episodic_reward)
# Mean of last 40 episodes
avg_reward = np.mean(ep_reward_list[-40:])
print("Episode * {} * Avg Reward is ==> {}".format(ep + 1, avg_reward))
avg_reward_list.append(avg_reward)
save_model()
print('model weight saved')
sim_stop()
sim_stop()
# Plotting graph | conditional_block |
|
imitation_ddpg_model.py | (self):
if self.x_initial is not None:
self.x_prev = self.x_initial
else:
self.x_prev = np.zeros_like(self.mean)
class Buffer:
def __init__(self, buffer_capacity=100000, batch_size=64):
# Number of "experiences" to store at max
self.buffer_capacity = buffer_capacity
# Num of tuples to train on.
self.batch_size = batch_size
# Its tells us num of times record() was called.
self.buffer_counter = 0
# Instead of list of tuples as the exp.replay concept go
# We use different np.arrays for each tuple element
self.state_buffer = np.zeros((self.buffer_capacity, num_states))
self.action_buffer = np.zeros((self.buffer_capacity, num_actions))
self.reward_buffer = np.zeros((self.buffer_capacity, 1))
self.next_state_buffer = np.zeros((self.buffer_capacity, num_states))
# Takes (s,a,r,s') obervation tuple as input
def record(self, obs_tuple):
# Set index to zero if buffer_capacity is exceeded,
# replacing old records
index = self.buffer_counter % self.buffer_capacity
self.state_buffer[index] = obs_tuple[0]
self.action_buffer[index] = obs_tuple[1]
self.reward_buffer[index] = obs_tuple[2]
self.next_state_buffer[index] = obs_tuple[3]
self.buffer_counter += 1
# Eager execution is turned on by default in TensorFlow 2. Decorating with tf.function allows
# TensorFlow to build a static graph out of the logic and computations in our function.
# This provides a large speed up for blocks of code that contain many small TensorFlow operations such as this one.
@tf.function
def update(
self, state_batch, action_batch, reward_batch, next_state_batch,
):
# Training and updating Actor & Critic networks.
# See Pseudo Code.
with tf.GradientTape() as tape:
target_actions = target_actor(next_state_batch, training=True)
y = reward_batch + gamma * target_critic(
[next_state_batch, target_actions], training=True
)
critic_value = critic_model([state_batch, action_batch], training=True)
critic_loss = tf.math.reduce_mean(tf.math.square(y - critic_value))
critic_grad = tape.gradient(critic_loss, critic_model.trainable_variables)
critic_optimizer.apply_gradients(
zip(critic_grad, critic_model.trainable_variables)
)
with tf.GradientTape() as tape:
actions = actor_model(state_batch, training=True)
critic_value = critic_model([state_batch, actions], training=True)
# Used `-value` as we want to maximize the value given
# by the critic for our actions
actor_loss = -tf.math.reduce_mean(critic_value)
actor_grad = tape.gradient(actor_loss, actor_model.trainable_variables)
actor_optimizer.apply_gradients(
zip(actor_grad, actor_model.trainable_variables)
)
# We compute the loss and update parameters
def learn(self):
# Get sampling range
record_range = min(self.buffer_counter, self.buffer_capacity)
# Randomly sample indices
batch_indices = np.random.choice(record_range, self.batch_size)
# Convert to tensors
state_batch = tf.convert_to_tensor(self.state_buffer[batch_indices])
action_batch = tf.convert_to_tensor(self.action_buffer[batch_indices])
reward_batch = tf.convert_to_tensor(self.reward_buffer[batch_indices])
reward_batch = tf.cast(reward_batch, dtype=tf.float32)
next_state_batch = tf.convert_to_tensor(self.next_state_buffer[batch_indices])
self.update(state_batch, action_batch, reward_batch, next_state_batch)
# This update target parameters slowly
# Based on rate `tau`, which is much less than one.
@tf.function
def update_target(target_weights, weights, tau):
for (a, b) in zip(target_weights, weights):
a.assign(b * tau + a * (1 - tau))
def get_actor():
# Initialize weights between -3e-3 and 3-e3
last_init = tf.random_uniform_initializer(minval=-0.003, maxval=0.003)
inputs = layers.Input(shape=(num_states,))
out = layers.Dense(256, activation="relu")(inputs)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(4, activation="tanh", kernel_initializer=last_init)(out)
model = tf.keras.Model(inputs, outputs)
return model
def get_critic():
# State as input
state_input = layers.Input(shape=(num_states))
state_out = layers.Dense(128, activation="relu")(state_input)
state_out = layers.Dense(128, activation="relu")(state_out)
# Action as input
action_input = layers.Input(shape=(num_actions))
action_out = layers.Dense(64, activation="relu")(action_input)
# Both are passed through seperate layer before concatenating
concat = layers.Concatenate()([state_out, action_out])
out = layers.Dense(256, activation="relu")(concat)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(1)(out)
# Outputs single value for give state-action
model = tf.keras.Model([state_input, action_input], outputs)
return model
def policy(state, noise_object):
sampled_actions = tf.squeeze(actor_model(state))
noise = noise_object()
# Adding noise to action
sampled_actions = sampled_actions.numpy() + noise
np.clip(sampled_actions[3], 0, 1)
sampled_actions[3] = 0 if sampled_actions[3] < 0.5 else 1
legal_action = [np.clip(sampled_actions[0], 0, 1), # brake
np.clip(sampled_actions[1], -1, 1), # steering
np.clip(sampled_actions[2], -1, 1), # throttle
sampled_actions[3]] # direction
return [np.squeeze(legal_action)]
def sim_start(): # ์๋ฎฌ๋ ์ดํฐ ์คํ
# print(pyautogui.position()) # (1125, 455)
pyautogui.click(1125, 455)
# time.sleep(1)
pyautogui.keyDown('altleft')
pyautogui.keyDown('p')
pyautogui.keyUp('altleft')
pyautogui.keyUp('p')
time.sleep(1)
pyautogui.click(1125, 455)
# connect to the AirSim simulator
client = airsim.CarClient()
client.confirmConnection()
client.enableApiControl(api_control)
print("API Control enabled: %s\n" % client.isApiControlEnabled())
car_controls = airsim.CarControls()
time.sleep(1)
return client, car_controls
def sim_stop(): # ์๋ฎฌ๋ ์ดํฐ ์ค์ง
# print(pyautogui.position()) # (1125, 455)
pyautogui.click(1125, 455)
time.sleep(1)
# ์๋ฎฌ๋ ์ดํฐ ์ข
๋ฃ
pyautogui.keyDown('esc')
pyautogui.keyUp('esc')
time.sleep(1)
def capture_goal(): # ๋ชฉํ ์ง์ ์ ์ธ๋ฆฌ์ผ ์ขํ -> ์์ด์ฌ ์ขํ ๋ณํ
# ์ธ๋ฆฌ์ผ์์ ์ถ๋ ฅ๋๋ ๋ชฉํ ์ง์ ์ขํ
unreal_goals = [[600, 2600], [600, 2230], [600, 1800], [600, 1430], [600, 990], [600, 620], # ์ฐ์ธก
[-1200, 2600], [-1200, 2230], [-1200, 1800], [-1200, 1430], [-1200, 990]] # ์ข์ธก
# ์์ด์ฌ API๋ฅผ ํตํด ์ถ๋ ฅ๋๋ ๋ชฉํ ์ง์ ์ขํ
airsim_goals = [[6, -14], [6, -17], [6, -22], [6, -25], [6, -30], [6, -33], # ์ฐ์ธก
[-7, -14], [-7, -17], [-7, -22], [-7, -25], [-7, -30]] # ์ข์ธก
# ์ขํ ์ถ๋ ฅ ๋ถ๋ถ ์คํฌ๋ฆฐ์ท ์บก์ณ
img = pyautogui.screenshot('goal.png', region=(36, 90, 210, 15)) # ์ ์ฒดํ๋ฉด(F11) ๊ธฐ์ค
# ์ขํ ์คํฌ๋ฆฐ์ท ๋ฌธ์์ด๋ก ๋ณํ
goal_pos = pytesseract.image_to_string(Image.open('goal.png'))
# print(goal_pos[:-2])
# x, y ์ขํ ๊ตฌ๋ถ -> ์ขํ ๊ฐ float ๋ณํ
goal_pos = str.split(goal_pos[:-2], ' ')
x = str.split(goal_pos | reset | identifier_name |
|
reading.rs | , RecordWitness, SBucket, SectorId,
};
use subspace_erasure_coding::ErasureCoding;
use subspace_proof_of_space::{Quality, Table, TableGenerator};
use thiserror::Error;
use tracing::debug;
/// Errors that happen during reading
#[derive(Debug, Error)]
pub enum ReadingError {
/// Wrong sector size
#[error("Wrong sector size: expected {expected}, actual {actual}")]
WrongSectorSize {
/// Expected size in bytes
expected: usize,
/// Actual size in bytes
actual: usize,
},
/// Failed to read chunk.
///
/// This is an implementation bug, most likely due to mismatch between sector contents map and
/// other farming parameters.
#[error("Failed to read chunk at location {chunk_location}")]
FailedToReadChunk {
/// Chunk location
chunk_location: usize,
},
/// Invalid chunk, possible disk corruption
#[error(
"Invalid chunk at location {chunk_location} s-bucket {s_bucket} encoded \
{encoded_chunk_used}, possible disk corruption: {error}"
)]
InvalidChunk {
/// S-bucket
s_bucket: SBucket,
/// Indicates whether chunk was encoded
encoded_chunk_used: bool,
/// Chunk location
chunk_location: usize,
/// Lower-level error
error: String,
},
/// Failed to erasure-decode record
#[error("Failed to erasure-decode record at offset {piece_offset}: {error}")]
FailedToErasureDecodeRecord {
/// Piece offset
piece_offset: PieceOffset,
/// Lower-level error
error: String,
},
/// Wrong record size after decoding
#[error("Wrong record size after decoding: expected {expected}, actual {actual}")]
WrongRecordSizeAfterDecoding {
/// Expected size in bytes
expected: usize,
/// Actual size in bytes
actual: usize,
},
/// Failed to decode sector contents map
#[error("Failed to decode sector contents map: {0}")]
FailedToDecodeSectorContentsMap(#[from] SectorContentsMapFromBytesError),
/// Checksum mismatch
#[error("Checksum mismatch")]
ChecksumMismatch,
}
/// Record contained in the plot
#[derive(Debug, Clone)]
pub struct PlotRecord {
/// Record scalars
pub scalars: Box<[Scalar; Record::NUM_CHUNKS]>,
/// Record commitment
pub commitment: RecordCommitment,
/// Record witness
pub witness: RecordWitness,
}
/// Read sector record chunks, only plotted s-buckets are returned (in decoded form)
pub fn read_sector_record_chunks<PosTable>(
piece_offset: PieceOffset,
pieces_in_sector: u16,
s_bucket_offsets: &[u32; Record::NUM_S_BUCKETS],
sector_contents_map: &SectorContentsMap,
pos_table: &PosTable,
sector: &[u8],
) -> Result<Box<[Option<Scalar>; Record::NUM_S_BUCKETS]>, ReadingError>
where
PosTable: Table,
{
if sector.len() != sector_size(pieces_in_sector) {
return Err(ReadingError::WrongSectorSize {
expected: sector_size(pieces_in_sector),
actual: sector.len(),
});
}
let mut record_chunks = vec![None; Record::NUM_S_BUCKETS];
record_chunks
.par_iter_mut()
.zip(sector_contents_map.par_iter_record_chunk_to_plot(piece_offset))
.zip(
(u16::from(SBucket::ZERO)..=u16::from(SBucket::MAX))
.into_par_iter()
.map(SBucket::from)
.zip(s_bucket_offsets.par_iter()),
)
.try_for_each(
|((maybe_record_chunk, maybe_chunk_details), (s_bucket, &s_bucket_offset))| {
let (chunk_offset, encoded_chunk_used) = match maybe_chunk_details {
Some(chunk_details) => chunk_details,
None => {
return Ok(());
}
};
let chunk_location = chunk_offset + s_bucket_offset as usize;
let mut record_chunk = sector[SectorContentsMap::encoded_size(pieces_in_sector)..]
.array_chunks::<{ Scalar::FULL_BYTES }>()
.nth(chunk_location)
.copied()
.ok_or(ReadingError::FailedToReadChunk { chunk_location })?;
// Decode chunk if necessary
if encoded_chunk_used {
let quality = pos_table
.find_quality(s_bucket.into())
.expect("encoded_chunk_used implies quality exists for this chunk; qed");
record_chunk = Simd::to_array(
Simd::from(record_chunk) ^ Simd::from(quality.create_proof().hash()),
);
}
maybe_record_chunk.replace(Scalar::try_from(record_chunk).map_err(|error| {
ReadingError::InvalidChunk {
s_bucket,
encoded_chunk_used,
chunk_location,
error,
}
})?);
Ok::<_, ReadingError>(())
},
)?;
let mut record_chunks = ManuallyDrop::new(record_chunks);
// SAFETY: Original memory is not dropped, layout is exactly what we need here
let record_chunks = unsafe {
Box::from_raw(record_chunks.as_mut_ptr() as *mut [Option<Scalar>; Record::NUM_S_BUCKETS])
};
Ok(record_chunks)
}
/// Given sector record chunks recover extended record chunks (both source and parity)
pub fn recover_extended_record_chunks(
sector_record_chunks: &[Option<Scalar>; Record::NUM_S_BUCKETS],
piece_offset: PieceOffset,
erasure_coding: &ErasureCoding,
) -> Result<Box<[Scalar; Record::NUM_S_BUCKETS]>, ReadingError> {
// Restore source record scalars
let record_chunks = erasure_coding
.recover(sector_record_chunks)
.map_err(|error| ReadingError::FailedToErasureDecodeRecord {
piece_offset,
error,
})?;
// Required for safety invariant below
if record_chunks.len() != Record::NUM_S_BUCKETS {
return Err(ReadingError::WrongRecordSizeAfterDecoding {
expected: Record::NUM_S_BUCKETS,
actual: record_chunks.len(),
});
}
let mut record_chunks = ManuallyDrop::new(record_chunks);
// SAFETY: Original memory is not dropped, size of the data checked above
let record_chunks = unsafe {
Box::from_raw(record_chunks.as_mut_ptr() as *mut [Scalar; Record::NUM_S_BUCKETS])
};
Ok(record_chunks)
}
/// Given sector record chunks recover source record chunks in form of an iterator.
pub fn recover_source_record_chunks(
sector_record_chunks: &[Option<Scalar>; Record::NUM_S_BUCKETS],
piece_offset: PieceOffset,
erasure_coding: &ErasureCoding,
) -> Result<impl ExactSizeIterator<Item = Scalar>, ReadingError> {
// Restore source record scalars
let record_chunks = erasure_coding
.recover_source(sector_record_chunks)
.map_err(|error| ReadingError::FailedToErasureDecodeRecord {
piece_offset,
error,
})?;
// Required for safety invariant below
if record_chunks.len() != Record::NUM_CHUNKS |
Ok(record_chunks)
}
/// Read metadata (commitment and witness) for record
pub(crate) fn read_record_metadata(
piece_offset: PieceOffset,
pieces_in_sector: u16,
sector: &[u8],
) -> Result<RecordMetadata, ReadingError> {
if sector.len() != sector_size(pieces_in_sector) {
return Err(ReadingError::WrongSectorSize {
expected: sector_size(pieces_in_sector),
actual: sector.len(),
});
}
let sector_metadata_start = SectorContentsMap::encoded_size(pieces_in_sector)
+ sector_record_chunks_size(pieces_in_sector);
// Move to the beginning of the commitment and witness we care about
let record_metadata_bytes = §or[sector_metadata_start..]
[RecordMetadata::encoded_size() * usize::from(piece_offset)..];
let record_metadata = RecordMetadata::decode(&mut &*record_metadata_bytes).expect(
"Length is correct and checked above, contents doesn't have specific structure to \
it; qed",
);
Ok(record_metadata)
}
/// Read piece from sector
pub fn read_piece<PosTable>(
piece_offset: PieceOffset,
sector_id: &SectorId,
sector_metadata: &SectorMetadataChecksummed,
sector: &[u8],
erasure_coding: &ErasureCoding,
table_generator: &mut PosTable::Generator,
) -> Result<Piece, ReadingError>
where
PosTable: Table,
{
let pieces_in_sector = sector_metadata.pieces_in_sector;
if sector.len() != sector_size(pieces_in_sector) {
return Err(ReadingError::WrongSectorSize {
expected: sector_size(pieces_in_sector),
actual: sector.len(),
});
}
let sector_contents_map = {
SectorContentsMap::from_bytes(
§or[..SectorContentsMap::encoded_size(pieces_in_sector)],
pieces_in_sector,
)?
};
// Restore source record scalars
let record_chunks = recover_source_record_chunks(
&*read_sector_record_chunks(
piece_offset,
pieces_in_sector,
§or_metadata.s_bucket_offsets(),
| {
return Err(ReadingError::WrongRecordSizeAfterDecoding {
expected: Record::NUM_CHUNKS,
actual: record_chunks.len(),
});
} | conditional_block |
reading.rs | , RecordWitness, SBucket, SectorId,
};
use subspace_erasure_coding::ErasureCoding;
use subspace_proof_of_space::{Quality, Table, TableGenerator};
use thiserror::Error;
use tracing::debug;
/// Errors that happen during reading
#[derive(Debug, Error)]
pub enum ReadingError {
/// Wrong sector size
#[error("Wrong sector size: expected {expected}, actual {actual}")]
WrongSectorSize {
/// Expected size in bytes
expected: usize,
/// Actual size in bytes
actual: usize,
},
/// Failed to read chunk.
///
/// This is an implementation bug, most likely due to mismatch between sector contents map and
/// other farming parameters.
#[error("Failed to read chunk at location {chunk_location}")]
FailedToReadChunk {
/// Chunk location
chunk_location: usize,
},
/// Invalid chunk, possible disk corruption
#[error(
"Invalid chunk at location {chunk_location} s-bucket {s_bucket} encoded \
{encoded_chunk_used}, possible disk corruption: {error}"
)]
InvalidChunk {
/// S-bucket
s_bucket: SBucket,
/// Indicates whether chunk was encoded
encoded_chunk_used: bool,
/// Chunk location
chunk_location: usize,
/// Lower-level error
error: String,
},
/// Failed to erasure-decode record
#[error("Failed to erasure-decode record at offset {piece_offset}: {error}")]
FailedToErasureDecodeRecord {
/// Piece offset
piece_offset: PieceOffset,
/// Lower-level error
error: String,
},
/// Wrong record size after decoding
#[error("Wrong record size after decoding: expected {expected}, actual {actual}")]
WrongRecordSizeAfterDecoding {
/// Expected size in bytes
expected: usize,
/// Actual size in bytes
actual: usize,
},
/// Failed to decode sector contents map
#[error("Failed to decode sector contents map: {0}")]
FailedToDecodeSectorContentsMap(#[from] SectorContentsMapFromBytesError),
/// Checksum mismatch
#[error("Checksum mismatch")]
ChecksumMismatch,
}
/// Record contained in the plot
#[derive(Debug, Clone)]
pub struct PlotRecord {
/// Record scalars
pub scalars: Box<[Scalar; Record::NUM_CHUNKS]>,
/// Record commitment
pub commitment: RecordCommitment,
/// Record witness
pub witness: RecordWitness,
}
/// Read sector record chunks, only plotted s-buckets are returned (in decoded form)
pub fn read_sector_record_chunks<PosTable>(
piece_offset: PieceOffset,
pieces_in_sector: u16,
s_bucket_offsets: &[u32; Record::NUM_S_BUCKETS],
sector_contents_map: &SectorContentsMap,
pos_table: &PosTable,
sector: &[u8],
) -> Result<Box<[Option<Scalar>; Record::NUM_S_BUCKETS]>, ReadingError>
where
PosTable: Table,
{
if sector.len() != sector_size(pieces_in_sector) {
return Err(ReadingError::WrongSectorSize {
expected: sector_size(pieces_in_sector),
actual: sector.len(),
});
}
let mut record_chunks = vec![None; Record::NUM_S_BUCKETS];
record_chunks
.par_iter_mut()
.zip(sector_contents_map.par_iter_record_chunk_to_plot(piece_offset))
.zip(
(u16::from(SBucket::ZERO)..=u16::from(SBucket::MAX))
.into_par_iter()
.map(SBucket::from)
.zip(s_bucket_offsets.par_iter()),
)
.try_for_each(
|((maybe_record_chunk, maybe_chunk_details), (s_bucket, &s_bucket_offset))| {
let (chunk_offset, encoded_chunk_used) = match maybe_chunk_details {
Some(chunk_details) => chunk_details,
None => {
return Ok(());
}
};
let chunk_location = chunk_offset + s_bucket_offset as usize;
let mut record_chunk = sector[SectorContentsMap::encoded_size(pieces_in_sector)..]
.array_chunks::<{ Scalar::FULL_BYTES }>()
.nth(chunk_location)
.copied()
.ok_or(ReadingError::FailedToReadChunk { chunk_location })?;
// Decode chunk if necessary
if encoded_chunk_used {
let quality = pos_table
.find_quality(s_bucket.into())
.expect("encoded_chunk_used implies quality exists for this chunk; qed");
record_chunk = Simd::to_array(
Simd::from(record_chunk) ^ Simd::from(quality.create_proof().hash()), | ReadingError::InvalidChunk {
s_bucket,
encoded_chunk_used,
chunk_location,
error,
}
})?);
Ok::<_, ReadingError>(())
},
)?;
let mut record_chunks = ManuallyDrop::new(record_chunks);
// SAFETY: Original memory is not dropped, layout is exactly what we need here
let record_chunks = unsafe {
Box::from_raw(record_chunks.as_mut_ptr() as *mut [Option<Scalar>; Record::NUM_S_BUCKETS])
};
Ok(record_chunks)
}
/// Given sector record chunks recover extended record chunks (both source and parity)
pub fn recover_extended_record_chunks(
sector_record_chunks: &[Option<Scalar>; Record::NUM_S_BUCKETS],
piece_offset: PieceOffset,
erasure_coding: &ErasureCoding,
) -> Result<Box<[Scalar; Record::NUM_S_BUCKETS]>, ReadingError> {
// Restore source record scalars
let record_chunks = erasure_coding
.recover(sector_record_chunks)
.map_err(|error| ReadingError::FailedToErasureDecodeRecord {
piece_offset,
error,
})?;
// Required for safety invariant below
if record_chunks.len() != Record::NUM_S_BUCKETS {
return Err(ReadingError::WrongRecordSizeAfterDecoding {
expected: Record::NUM_S_BUCKETS,
actual: record_chunks.len(),
});
}
let mut record_chunks = ManuallyDrop::new(record_chunks);
// SAFETY: Original memory is not dropped, size of the data checked above
let record_chunks = unsafe {
Box::from_raw(record_chunks.as_mut_ptr() as *mut [Scalar; Record::NUM_S_BUCKETS])
};
Ok(record_chunks)
}
/// Given sector record chunks recover source record chunks in form of an iterator.
pub fn recover_source_record_chunks(
sector_record_chunks: &[Option<Scalar>; Record::NUM_S_BUCKETS],
piece_offset: PieceOffset,
erasure_coding: &ErasureCoding,
) -> Result<impl ExactSizeIterator<Item = Scalar>, ReadingError> {
// Restore source record scalars
let record_chunks = erasure_coding
.recover_source(sector_record_chunks)
.map_err(|error| ReadingError::FailedToErasureDecodeRecord {
piece_offset,
error,
})?;
// Required for safety invariant below
if record_chunks.len() != Record::NUM_CHUNKS {
return Err(ReadingError::WrongRecordSizeAfterDecoding {
expected: Record::NUM_CHUNKS,
actual: record_chunks.len(),
});
}
Ok(record_chunks)
}
/// Read metadata (commitment and witness) for record
pub(crate) fn read_record_metadata(
piece_offset: PieceOffset,
pieces_in_sector: u16,
sector: &[u8],
) -> Result<RecordMetadata, ReadingError> {
if sector.len() != sector_size(pieces_in_sector) {
return Err(ReadingError::WrongSectorSize {
expected: sector_size(pieces_in_sector),
actual: sector.len(),
});
}
let sector_metadata_start = SectorContentsMap::encoded_size(pieces_in_sector)
+ sector_record_chunks_size(pieces_in_sector);
// Move to the beginning of the commitment and witness we care about
let record_metadata_bytes = §or[sector_metadata_start..]
[RecordMetadata::encoded_size() * usize::from(piece_offset)..];
let record_metadata = RecordMetadata::decode(&mut &*record_metadata_bytes).expect(
"Length is correct and checked above, contents doesn't have specific structure to \
it; qed",
);
Ok(record_metadata)
}
/// Read piece from sector
pub fn read_piece<PosTable>(
piece_offset: PieceOffset,
sector_id: &SectorId,
sector_metadata: &SectorMetadataChecksummed,
sector: &[u8],
erasure_coding: &ErasureCoding,
table_generator: &mut PosTable::Generator,
) -> Result<Piece, ReadingError>
where
PosTable: Table,
{
let pieces_in_sector = sector_metadata.pieces_in_sector;
if sector.len() != sector_size(pieces_in_sector) {
return Err(ReadingError::WrongSectorSize {
expected: sector_size(pieces_in_sector),
actual: sector.len(),
});
}
let sector_contents_map = {
SectorContentsMap::from_bytes(
§or[..SectorContentsMap::encoded_size(pieces_in_sector)],
pieces_in_sector,
)?
};
// Restore source record scalars
let record_chunks = recover_source_record_chunks(
&*read_sector_record_chunks(
piece_offset,
pieces_in_sector,
§or_metadata.s_bucket_offsets(),
§or | );
}
maybe_record_chunk.replace(Scalar::try_from(record_chunk).map_err(|error| { | random_line_split |
reading.rs | , RecordWitness, SBucket, SectorId,
};
use subspace_erasure_coding::ErasureCoding;
use subspace_proof_of_space::{Quality, Table, TableGenerator};
use thiserror::Error;
use tracing::debug;
/// Errors that happen during reading
#[derive(Debug, Error)]
pub enum ReadingError {
/// Wrong sector size
#[error("Wrong sector size: expected {expected}, actual {actual}")]
WrongSectorSize {
/// Expected size in bytes
expected: usize,
/// Actual size in bytes
actual: usize,
},
/// Failed to read chunk.
///
/// This is an implementation bug, most likely due to mismatch between sector contents map and
/// other farming parameters.
#[error("Failed to read chunk at location {chunk_location}")]
FailedToReadChunk {
/// Chunk location
chunk_location: usize,
},
/// Invalid chunk, possible disk corruption
#[error(
"Invalid chunk at location {chunk_location} s-bucket {s_bucket} encoded \
{encoded_chunk_used}, possible disk corruption: {error}"
)]
InvalidChunk {
/// S-bucket
s_bucket: SBucket,
/// Indicates whether chunk was encoded
encoded_chunk_used: bool,
/// Chunk location
chunk_location: usize,
/// Lower-level error
error: String,
},
/// Failed to erasure-decode record
#[error("Failed to erasure-decode record at offset {piece_offset}: {error}")]
FailedToErasureDecodeRecord {
/// Piece offset
piece_offset: PieceOffset,
/// Lower-level error
error: String,
},
/// Wrong record size after decoding
#[error("Wrong record size after decoding: expected {expected}, actual {actual}")]
WrongRecordSizeAfterDecoding {
/// Expected size in bytes
expected: usize,
/// Actual size in bytes
actual: usize,
},
/// Failed to decode sector contents map
#[error("Failed to decode sector contents map: {0}")]
FailedToDecodeSectorContentsMap(#[from] SectorContentsMapFromBytesError),
/// Checksum mismatch
#[error("Checksum mismatch")]
ChecksumMismatch,
}
/// Record contained in the plot
#[derive(Debug, Clone)]
pub struct PlotRecord {
/// Record scalars
pub scalars: Box<[Scalar; Record::NUM_CHUNKS]>,
/// Record commitment
pub commitment: RecordCommitment,
/// Record witness
pub witness: RecordWitness,
}
/// Read sector record chunks, only plotted s-buckets are returned (in decoded form)
pub fn read_sector_record_chunks<PosTable>(
piece_offset: PieceOffset,
pieces_in_sector: u16,
s_bucket_offsets: &[u32; Record::NUM_S_BUCKETS],
sector_contents_map: &SectorContentsMap,
pos_table: &PosTable,
sector: &[u8],
) -> Result<Box<[Option<Scalar>; Record::NUM_S_BUCKETS]>, ReadingError>
where
PosTable: Table,
{
if sector.len() != sector_size(pieces_in_sector) {
return Err(ReadingError::WrongSectorSize {
expected: sector_size(pieces_in_sector),
actual: sector.len(),
});
}
let mut record_chunks = vec![None; Record::NUM_S_BUCKETS];
record_chunks
.par_iter_mut()
.zip(sector_contents_map.par_iter_record_chunk_to_plot(piece_offset))
.zip(
(u16::from(SBucket::ZERO)..=u16::from(SBucket::MAX))
.into_par_iter()
.map(SBucket::from)
.zip(s_bucket_offsets.par_iter()),
)
.try_for_each(
|((maybe_record_chunk, maybe_chunk_details), (s_bucket, &s_bucket_offset))| {
let (chunk_offset, encoded_chunk_used) = match maybe_chunk_details {
Some(chunk_details) => chunk_details,
None => {
return Ok(());
}
};
let chunk_location = chunk_offset + s_bucket_offset as usize;
let mut record_chunk = sector[SectorContentsMap::encoded_size(pieces_in_sector)..]
.array_chunks::<{ Scalar::FULL_BYTES }>()
.nth(chunk_location)
.copied()
.ok_or(ReadingError::FailedToReadChunk { chunk_location })?;
// Decode chunk if necessary
if encoded_chunk_used {
let quality = pos_table
.find_quality(s_bucket.into())
.expect("encoded_chunk_used implies quality exists for this chunk; qed");
record_chunk = Simd::to_array(
Simd::from(record_chunk) ^ Simd::from(quality.create_proof().hash()),
);
}
maybe_record_chunk.replace(Scalar::try_from(record_chunk).map_err(|error| {
ReadingError::InvalidChunk {
s_bucket,
encoded_chunk_used,
chunk_location,
error,
}
})?);
Ok::<_, ReadingError>(())
},
)?;
let mut record_chunks = ManuallyDrop::new(record_chunks);
// SAFETY: Original memory is not dropped, layout is exactly what we need here
let record_chunks = unsafe {
Box::from_raw(record_chunks.as_mut_ptr() as *mut [Option<Scalar>; Record::NUM_S_BUCKETS])
};
Ok(record_chunks)
}
/// Given sector record chunks recover extended record chunks (both source and parity)
pub fn recover_extended_record_chunks(
sector_record_chunks: &[Option<Scalar>; Record::NUM_S_BUCKETS],
piece_offset: PieceOffset,
erasure_coding: &ErasureCoding,
) -> Result<Box<[Scalar; Record::NUM_S_BUCKETS]>, ReadingError> {
// Restore source record scalars
let record_chunks = erasure_coding
.recover(sector_record_chunks)
.map_err(|error| ReadingError::FailedToErasureDecodeRecord {
piece_offset,
error,
})?;
// Required for safety invariant below
if record_chunks.len() != Record::NUM_S_BUCKETS {
return Err(ReadingError::WrongRecordSizeAfterDecoding {
expected: Record::NUM_S_BUCKETS,
actual: record_chunks.len(),
});
}
let mut record_chunks = ManuallyDrop::new(record_chunks);
// SAFETY: Original memory is not dropped, size of the data checked above
let record_chunks = unsafe {
Box::from_raw(record_chunks.as_mut_ptr() as *mut [Scalar; Record::NUM_S_BUCKETS])
};
Ok(record_chunks)
}
/// Given sector record chunks recover source record chunks in form of an iterator.
pub fn recover_source_record_chunks(
sector_record_chunks: &[Option<Scalar>; Record::NUM_S_BUCKETS],
piece_offset: PieceOffset,
erasure_coding: &ErasureCoding,
) -> Result<impl ExactSizeIterator<Item = Scalar>, ReadingError> {
// Restore source record scalars
let record_chunks = erasure_coding
.recover_source(sector_record_chunks)
.map_err(|error| ReadingError::FailedToErasureDecodeRecord {
piece_offset,
error,
})?;
// Required for safety invariant below
if record_chunks.len() != Record::NUM_CHUNKS {
return Err(ReadingError::WrongRecordSizeAfterDecoding {
expected: Record::NUM_CHUNKS,
actual: record_chunks.len(),
});
}
Ok(record_chunks)
}
/// Read metadata (commitment and witness) for record
pub(crate) fn read_record_metadata(
piece_offset: PieceOffset,
pieces_in_sector: u16,
sector: &[u8],
) -> Result<RecordMetadata, ReadingError> {
if sector.len() != sector_size(pieces_in_sector) {
return Err(ReadingError::WrongSectorSize {
expected: sector_size(pieces_in_sector),
actual: sector.len(),
});
}
let sector_metadata_start = SectorContentsMap::encoded_size(pieces_in_sector)
+ sector_record_chunks_size(pieces_in_sector);
// Move to the beginning of the commitment and witness we care about
let record_metadata_bytes = §or[sector_metadata_start..]
[RecordMetadata::encoded_size() * usize::from(piece_offset)..];
let record_metadata = RecordMetadata::decode(&mut &*record_metadata_bytes).expect(
"Length is correct and checked above, contents doesn't have specific structure to \
it; qed",
);
Ok(record_metadata)
}
/// Read piece from sector
pub fn | <PosTable>(
piece_offset: PieceOffset,
sector_id: &SectorId,
sector_metadata: &SectorMetadataChecksummed,
sector: &[u8],
erasure_coding: &ErasureCoding,
table_generator: &mut PosTable::Generator,
) -> Result<Piece, ReadingError>
where
PosTable: Table,
{
let pieces_in_sector = sector_metadata.pieces_in_sector;
if sector.len() != sector_size(pieces_in_sector) {
return Err(ReadingError::WrongSectorSize {
expected: sector_size(pieces_in_sector),
actual: sector.len(),
});
}
let sector_contents_map = {
SectorContentsMap::from_bytes(
§or[..SectorContentsMap::encoded_size(pieces_in_sector)],
pieces_in_sector,
)?
};
// Restore source record scalars
let record_chunks = recover_source_record_chunks(
&*read_sector_record_chunks(
piece_offset,
pieces_in_sector,
§or_metadata.s_bucket_offsets(),
& | read_piece | identifier_name |
reading.rs | , RecordWitness, SBucket, SectorId,
};
use subspace_erasure_coding::ErasureCoding;
use subspace_proof_of_space::{Quality, Table, TableGenerator};
use thiserror::Error;
use tracing::debug;
/// Errors that happen during reading
#[derive(Debug, Error)]
pub enum ReadingError {
/// Wrong sector size
#[error("Wrong sector size: expected {expected}, actual {actual}")]
WrongSectorSize {
/// Expected size in bytes
expected: usize,
/// Actual size in bytes
actual: usize,
},
/// Failed to read chunk.
///
/// This is an implementation bug, most likely due to mismatch between sector contents map and
/// other farming parameters.
#[error("Failed to read chunk at location {chunk_location}")]
FailedToReadChunk {
/// Chunk location
chunk_location: usize,
},
/// Invalid chunk, possible disk corruption
#[error(
"Invalid chunk at location {chunk_location} s-bucket {s_bucket} encoded \
{encoded_chunk_used}, possible disk corruption: {error}"
)]
InvalidChunk {
/// S-bucket
s_bucket: SBucket,
/// Indicates whether chunk was encoded
encoded_chunk_used: bool,
/// Chunk location
chunk_location: usize,
/// Lower-level error
error: String,
},
/// Failed to erasure-decode record
#[error("Failed to erasure-decode record at offset {piece_offset}: {error}")]
FailedToErasureDecodeRecord {
/// Piece offset
piece_offset: PieceOffset,
/// Lower-level error
error: String,
},
/// Wrong record size after decoding
#[error("Wrong record size after decoding: expected {expected}, actual {actual}")]
WrongRecordSizeAfterDecoding {
/// Expected size in bytes
expected: usize,
/// Actual size in bytes
actual: usize,
},
/// Failed to decode sector contents map
#[error("Failed to decode sector contents map: {0}")]
FailedToDecodeSectorContentsMap(#[from] SectorContentsMapFromBytesError),
/// Checksum mismatch
#[error("Checksum mismatch")]
ChecksumMismatch,
}
/// Record contained in the plot
#[derive(Debug, Clone)]
pub struct PlotRecord {
/// Record scalars
pub scalars: Box<[Scalar; Record::NUM_CHUNKS]>,
/// Record commitment
pub commitment: RecordCommitment,
/// Record witness
pub witness: RecordWitness,
}
/// Read sector record chunks, only plotted s-buckets are returned (in decoded form)
pub fn read_sector_record_chunks<PosTable>(
piece_offset: PieceOffset,
pieces_in_sector: u16,
s_bucket_offsets: &[u32; Record::NUM_S_BUCKETS],
sector_contents_map: &SectorContentsMap,
pos_table: &PosTable,
sector: &[u8],
) -> Result<Box<[Option<Scalar>; Record::NUM_S_BUCKETS]>, ReadingError>
where
PosTable: Table,
{
if sector.len() != sector_size(pieces_in_sector) {
return Err(ReadingError::WrongSectorSize {
expected: sector_size(pieces_in_sector),
actual: sector.len(),
});
}
let mut record_chunks = vec![None; Record::NUM_S_BUCKETS];
record_chunks
.par_iter_mut()
.zip(sector_contents_map.par_iter_record_chunk_to_plot(piece_offset))
.zip(
(u16::from(SBucket::ZERO)..=u16::from(SBucket::MAX))
.into_par_iter()
.map(SBucket::from)
.zip(s_bucket_offsets.par_iter()),
)
.try_for_each(
|((maybe_record_chunk, maybe_chunk_details), (s_bucket, &s_bucket_offset))| {
let (chunk_offset, encoded_chunk_used) = match maybe_chunk_details {
Some(chunk_details) => chunk_details,
None => {
return Ok(());
}
};
let chunk_location = chunk_offset + s_bucket_offset as usize;
let mut record_chunk = sector[SectorContentsMap::encoded_size(pieces_in_sector)..]
.array_chunks::<{ Scalar::FULL_BYTES }>()
.nth(chunk_location)
.copied()
.ok_or(ReadingError::FailedToReadChunk { chunk_location })?;
// Decode chunk if necessary
if encoded_chunk_used {
let quality = pos_table
.find_quality(s_bucket.into())
.expect("encoded_chunk_used implies quality exists for this chunk; qed");
record_chunk = Simd::to_array(
Simd::from(record_chunk) ^ Simd::from(quality.create_proof().hash()),
);
}
maybe_record_chunk.replace(Scalar::try_from(record_chunk).map_err(|error| {
ReadingError::InvalidChunk {
s_bucket,
encoded_chunk_used,
chunk_location,
error,
}
})?);
Ok::<_, ReadingError>(())
},
)?;
let mut record_chunks = ManuallyDrop::new(record_chunks);
// SAFETY: Original memory is not dropped, layout is exactly what we need here
let record_chunks = unsafe {
Box::from_raw(record_chunks.as_mut_ptr() as *mut [Option<Scalar>; Record::NUM_S_BUCKETS])
};
Ok(record_chunks)
}
/// Given sector record chunks recover extended record chunks (both source and parity)
pub fn recover_extended_record_chunks(
sector_record_chunks: &[Option<Scalar>; Record::NUM_S_BUCKETS],
piece_offset: PieceOffset,
erasure_coding: &ErasureCoding,
) -> Result<Box<[Scalar; Record::NUM_S_BUCKETS]>, ReadingError> {
// Restore source record scalars
let record_chunks = erasure_coding
.recover(sector_record_chunks)
.map_err(|error| ReadingError::FailedToErasureDecodeRecord {
piece_offset,
error,
})?;
// Required for safety invariant below
if record_chunks.len() != Record::NUM_S_BUCKETS {
return Err(ReadingError::WrongRecordSizeAfterDecoding {
expected: Record::NUM_S_BUCKETS,
actual: record_chunks.len(),
});
}
let mut record_chunks = ManuallyDrop::new(record_chunks);
// SAFETY: Original memory is not dropped, size of the data checked above
let record_chunks = unsafe {
Box::from_raw(record_chunks.as_mut_ptr() as *mut [Scalar; Record::NUM_S_BUCKETS])
};
Ok(record_chunks)
}
/// Given sector record chunks recover source record chunks in form of an iterator.
pub fn recover_source_record_chunks(
sector_record_chunks: &[Option<Scalar>; Record::NUM_S_BUCKETS],
piece_offset: PieceOffset,
erasure_coding: &ErasureCoding,
) -> Result<impl ExactSizeIterator<Item = Scalar>, ReadingError> |
/// Read metadata (commitment and witness) for record
pub(crate) fn read_record_metadata(
piece_offset: PieceOffset,
pieces_in_sector: u16,
sector: &[u8],
) -> Result<RecordMetadata, ReadingError> {
if sector.len() != sector_size(pieces_in_sector) {
return Err(ReadingError::WrongSectorSize {
expected: sector_size(pieces_in_sector),
actual: sector.len(),
});
}
let sector_metadata_start = SectorContentsMap::encoded_size(pieces_in_sector)
+ sector_record_chunks_size(pieces_in_sector);
// Move to the beginning of the commitment and witness we care about
let record_metadata_bytes = §or[sector_metadata_start..]
[RecordMetadata::encoded_size() * usize::from(piece_offset)..];
let record_metadata = RecordMetadata::decode(&mut &*record_metadata_bytes).expect(
"Length is correct and checked above, contents doesn't have specific structure to \
it; qed",
);
Ok(record_metadata)
}
/// Read piece from sector
pub fn read_piece<PosTable>(
piece_offset: PieceOffset,
sector_id: &SectorId,
sector_metadata: &SectorMetadataChecksummed,
sector: &[u8],
erasure_coding: &ErasureCoding,
table_generator: &mut PosTable::Generator,
) -> Result<Piece, ReadingError>
where
PosTable: Table,
{
let pieces_in_sector = sector_metadata.pieces_in_sector;
if sector.len() != sector_size(pieces_in_sector) {
return Err(ReadingError::WrongSectorSize {
expected: sector_size(pieces_in_sector),
actual: sector.len(),
});
}
let sector_contents_map = {
SectorContentsMap::from_bytes(
§or[..SectorContentsMap::encoded_size(pieces_in_sector)],
pieces_in_sector,
)?
};
// Restore source record scalars
let record_chunks = recover_source_record_chunks(
&*read_sector_record_chunks(
piece_offset,
pieces_in_sector,
§or_metadata.s_bucket_offsets(),
| {
// Restore source record scalars
let record_chunks = erasure_coding
.recover_source(sector_record_chunks)
.map_err(|error| ReadingError::FailedToErasureDecodeRecord {
piece_offset,
error,
})?;
// Required for safety invariant below
if record_chunks.len() != Record::NUM_CHUNKS {
return Err(ReadingError::WrongRecordSizeAfterDecoding {
expected: Record::NUM_CHUNKS,
actual: record_chunks.len(),
});
}
Ok(record_chunks)
} | identifier_body |
api_op_UpdateMatchmakingConfiguration.go | the matchmaking configuration to update. You can use
// either the configuration name or ARN value.
//
// This member is required.
Name *string
// A flag that indicates whether a match that was created with this configuration
// must be accepted by the matched players. To require acceptance, set to TRUE.
// With this option enabled, matchmaking tickets use the status REQUIRES_ACCEPTANCE
// to indicate when a completed potential match is waiting for player acceptance.
AcceptanceRequired *bool
// The length of time (in seconds) to wait for players to accept a proposed match,
// if acceptance is required.
AcceptanceTimeoutSeconds *int32
// The number of player slots in a match to keep open for future players. For
// example, if the configuration's rule set specifies a match for a single
// 10-person team, and the additional player count is set to 2, 10 players will be
// selected for the match and 2 more player slots will be open for future players.
// This parameter is not used if FlexMatchMode is set to STANDALONE .
AdditionalPlayerCount *int32
// The method that is used to backfill game sessions created with this matchmaking
// configuration. Specify MANUAL when your game manages backfill requests manually
// or does not use the match backfill feature. Specify AUTOMATIC to have GameLift
// create a match backfill request whenever a game session has one or more open
// slots. Learn more about manual and automatic backfill in Backfill Existing
// Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-backfill.html)
// . Automatic backfill is not available when FlexMatchMode is set to STANDALONE .
BackfillMode types.BackfillMode
// Information to add to all events related to the matchmaking configuration.
CustomEventData *string
// A description for the matchmaking configuration.
Description *string
// Indicates whether this matchmaking configuration is being used with Amazon
// GameLift hosting or as a standalone matchmaking solution.
// - STANDALONE - FlexMatch forms matches and returns match information,
// including players and team assignments, in a MatchmakingSucceeded (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-events.html#match-events-matchmakingsucceeded)
// event.
// - WITH_QUEUE - FlexMatch forms matches and uses the specified Amazon GameLift
// queue to start a game session for the match.
FlexMatchMode types.FlexMatchMode
// A set of custom properties for a game session, formatted as key:value pairs.
// These properties are passed to a game server process with a request to start a
// new game session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)
// ). This information is added to the new GameSession object that is created for
// a successful match. This parameter is not used if FlexMatchMode is set to
// STANDALONE .
GameProperties []types.GameProperty
// A set of custom game session properties, formatted as a single string value.
// This data is passed to a game server process with a request to start a new game
// session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)
// ). This information is added to the game session that is created for a
// successful match. This parameter is not used if FlexMatchMode is set to
// STANDALONE .
GameSessionData *string
// The Amazon Resource Name ( ARN (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)
// ) that is assigned to a Amazon GameLift game session queue resource and uniquely
// identifies it. ARNs are unique across all Regions. Format is
// arn:aws:gamelift:::gamesessionqueue/ . Queues can be located in any Region.
// Queues are used to start new Amazon GameLift-hosted game sessions for matches
// that are created with this matchmaking configuration. If FlexMatchMode is set
// to STANDALONE , do not set this parameter.
GameSessionQueueArns []string
// An SNS topic ARN that is set up to receive matchmaking notifications. See
// Setting up notifications for matchmaking (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-notification.html)
// for more information.
NotificationTarget *string
// The maximum duration, in seconds, that a matchmaking ticket can remain in
// process before timing out. Requests that fail due to timing out can be
// resubmitted as needed.
RequestTimeoutSeconds *int32
// A unique identifier for the matchmaking rule set to use with this
// configuration. You can use either the rule set name or ARN value. A matchmaking
// configuration can only use rule sets that are defined in the same Region.
RuleSetName *string
noSmithyDocumentSerde
}
type UpdateMatchmakingConfigurationOutput struct {
// The updated matchmaking configuration.
Configuration *types.MatchmakingConfiguration
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationUpdateMatchmakingConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateMatchmakingConfiguration{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateMatchmakingConfiguration{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addUpdateMatchmakingConfigurationResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpUpdateMatchmakingConfigurationValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateMatchmakingConfiguration(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opUpdateMatchmakingConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "gamelift",
OperationName: "UpdateMatchmakingConfiguration",
}
}
type opUpdateMatchmakingConfigurationResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opUpdateMatchmakingConfigurationResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opUpdateMatchmakingConfigurationResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) | {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) | identifier_body |
|
api_op_UpdateMatchmakingConfiguration.go | identifier for the matchmaking configuration to update. You can use
// either the configuration name or ARN value.
//
// This member is required.
Name *string
// A flag that indicates whether a match that was created with this configuration
// must be accepted by the matched players. To require acceptance, set to TRUE.
// With this option enabled, matchmaking tickets use the status REQUIRES_ACCEPTANCE
// to indicate when a completed potential match is waiting for player acceptance.
AcceptanceRequired *bool
// The length of time (in seconds) to wait for players to accept a proposed match,
// if acceptance is required.
AcceptanceTimeoutSeconds *int32
// The number of player slots in a match to keep open for future players. For
// example, if the configuration's rule set specifies a match for a single
// 10-person team, and the additional player count is set to 2, 10 players will be
// selected for the match and 2 more player slots will be open for future players.
// This parameter is not used if FlexMatchMode is set to STANDALONE .
AdditionalPlayerCount *int32
// The method that is used to backfill game sessions created with this matchmaking
// configuration. Specify MANUAL when your game manages backfill requests manually
// or does not use the match backfill feature. Specify AUTOMATIC to have GameLift
// create a match backfill request whenever a game session has one or more open
// slots. Learn more about manual and automatic backfill in Backfill Existing
// Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-backfill.html)
// . Automatic backfill is not available when FlexMatchMode is set to STANDALONE .
BackfillMode types.BackfillMode
// Information to add to all events related to the matchmaking configuration.
CustomEventData *string
// A description for the matchmaking configuration.
Description *string
// Indicates whether this matchmaking configuration is being used with Amazon
// GameLift hosting or as a standalone matchmaking solution.
// - STANDALONE - FlexMatch forms matches and returns match information,
// including players and team assignments, in a MatchmakingSucceeded (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-events.html#match-events-matchmakingsucceeded)
// event.
// - WITH_QUEUE - FlexMatch forms matches and uses the specified Amazon GameLift
// queue to start a game session for the match.
FlexMatchMode types.FlexMatchMode
// A set of custom properties for a game session, formatted as key:value pairs.
// These properties are passed to a game server process with a request to start a
// new game session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)
// ). This information is added to the new GameSession object that is created for
// a successful match. This parameter is not used if FlexMatchMode is set to
// STANDALONE .
GameProperties []types.GameProperty
// A set of custom game session properties, formatted as a single string value.
// This data is passed to a game server process with a request to start a new game
// session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)
// ). This information is added to the game session that is created for a
// successful match. This parameter is not used if FlexMatchMode is set to
// STANDALONE .
GameSessionData *string
// The Amazon Resource Name ( ARN (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)
// ) that is assigned to a Amazon GameLift game session queue resource and uniquely
// identifies it. ARNs are unique across all Regions. Format is
// arn:aws:gamelift:::gamesessionqueue/ . Queues can be located in any Region.
// Queues are used to start new Amazon GameLift-hosted game sessions for matches
// that are created with this matchmaking configuration. If FlexMatchMode is set
// to STANDALONE , do not set this parameter.
GameSessionQueueArns []string
// An SNS topic ARN that is set up to receive matchmaking notifications. See
// Setting up notifications for matchmaking (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-notification.html)
// for more information.
NotificationTarget *string
// The maximum duration, in seconds, that a matchmaking ticket can remain in
// process before timing out. Requests that fail due to timing out can be
// resubmitted as needed.
RequestTimeoutSeconds *int32
// A unique identifier for the matchmaking rule set to use with this
// configuration. You can use either the rule set name or ARN value. A matchmaking
// configuration can only use rule sets that are defined in the same Region.
RuleSetName *string
noSmithyDocumentSerde
}
type UpdateMatchmakingConfigurationOutput struct {
// The updated matchmaking configuration.
Configuration *types.MatchmakingConfiguration
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationUpdateMatchmakingConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateMatchmakingConfiguration{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateMatchmakingConfiguration{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addUpdateMatchmakingConfigurationResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpUpdateMatchmakingConfigurationValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateMatchmakingConfiguration(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func | (region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "gamelift",
OperationName: "UpdateMatchmakingConfiguration",
}
}
type opUpdateMatchmakingConfigurationResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opUpdateMatchmakingConfigurationResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opUpdateMatchmakingConfigurationResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, | newServiceMetadataMiddleware_opUpdateMatchmakingConfiguration | identifier_name |
api_op_UpdateMatchmakingConfiguration.go | identifier for the matchmaking configuration to update. You can use
// either the configuration name or ARN value.
//
// This member is required.
Name *string
// A flag that indicates whether a match that was created with this configuration
// must be accepted by the matched players. To require acceptance, set to TRUE.
// With this option enabled, matchmaking tickets use the status REQUIRES_ACCEPTANCE
// to indicate when a completed potential match is waiting for player acceptance.
AcceptanceRequired *bool
// The length of time (in seconds) to wait for players to accept a proposed match,
// if acceptance is required.
AcceptanceTimeoutSeconds *int32
// The number of player slots in a match to keep open for future players. For
// example, if the configuration's rule set specifies a match for a single
// 10-person team, and the additional player count is set to 2, 10 players will be
// selected for the match and 2 more player slots will be open for future players.
// This parameter is not used if FlexMatchMode is set to STANDALONE .
AdditionalPlayerCount *int32
// The method that is used to backfill game sessions created with this matchmaking
// configuration. Specify MANUAL when your game manages backfill requests manually
// or does not use the match backfill feature. Specify AUTOMATIC to have GameLift
// create a match backfill request whenever a game session has one or more open
// slots. Learn more about manual and automatic backfill in Backfill Existing
// Games with FlexMatch (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-backfill.html)
// . Automatic backfill is not available when FlexMatchMode is set to STANDALONE .
BackfillMode types.BackfillMode
// Information to add to all events related to the matchmaking configuration.
CustomEventData *string
// A description for the matchmaking configuration.
Description *string
// Indicates whether this matchmaking configuration is being used with Amazon
// GameLift hosting or as a standalone matchmaking solution.
// - STANDALONE - FlexMatch forms matches and returns match information,
// including players and team assignments, in a MatchmakingSucceeded (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-events.html#match-events-matchmakingsucceeded)
// event.
// - WITH_QUEUE - FlexMatch forms matches and uses the specified Amazon GameLift
// queue to start a game session for the match.
FlexMatchMode types.FlexMatchMode
// A set of custom properties for a game session, formatted as key:value pairs.
// These properties are passed to a game server process with a request to start a
// new game session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)
// ). This information is added to the new GameSession object that is created for
// a successful match. This parameter is not used if FlexMatchMode is set to
// STANDALONE .
GameProperties []types.GameProperty
// A set of custom game session properties, formatted as a single string value.
// This data is passed to a game server process with a request to start a new game
// session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)
// ). This information is added to the game session that is created for a
// successful match. This parameter is not used if FlexMatchMode is set to
// STANDALONE .
GameSessionData *string
// The Amazon Resource Name ( ARN (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)
// ) that is assigned to a Amazon GameLift game session queue resource and uniquely
// identifies it. ARNs are unique across all Regions. Format is
// arn:aws:gamelift:::gamesessionqueue/ . Queues can be located in any Region.
// Queues are used to start new Amazon GameLift-hosted game sessions for matches
// that are created with this matchmaking configuration. If FlexMatchMode is set
// to STANDALONE , do not set this parameter.
GameSessionQueueArns []string
// An SNS topic ARN that is set up to receive matchmaking notifications. See
// Setting up notifications for matchmaking (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-notification.html)
// for more information.
NotificationTarget *string
// The maximum duration, in seconds, that a matchmaking ticket can remain in
// process before timing out. Requests that fail due to timing out can be
// resubmitted as needed.
RequestTimeoutSeconds *int32
// A unique identifier for the matchmaking rule set to use with this
// configuration. You can use either the rule set name or ARN value. A matchmaking
// configuration can only use rule sets that are defined in the same Region.
RuleSetName *string
noSmithyDocumentSerde
}
type UpdateMatchmakingConfigurationOutput struct {
// The updated matchmaking configuration.
Configuration *types.MatchmakingConfiguration
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationUpdateMatchmakingConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateMatchmakingConfiguration{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateMatchmakingConfiguration{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err | }
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addUpdateMatchmakingConfigurationResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpUpdateMatchmakingConfigurationValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateMatchmakingConfiguration(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opUpdateMatchmakingConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "gamelift",
OperationName: "UpdateMatchmakingConfiguration",
}
}
type opUpdateMatchmakingConfigurationResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opUpdateMatchmakingConfigurationResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opUpdateMatchmakingConfigurationResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
| random_line_split |
|
api_op_UpdateMatchmakingConfiguration.go | assignments, in a MatchmakingSucceeded (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-events.html#match-events-matchmakingsucceeded)
// event.
// - WITH_QUEUE - FlexMatch forms matches and uses the specified Amazon GameLift
// queue to start a game session for the match.
FlexMatchMode types.FlexMatchMode
// A set of custom properties for a game session, formatted as key:value pairs.
// These properties are passed to a game server process with a request to start a
// new game session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)
// ). This information is added to the new GameSession object that is created for
// a successful match. This parameter is not used if FlexMatchMode is set to
// STANDALONE .
GameProperties []types.GameProperty
// A set of custom game session properties, formatted as a single string value.
// This data is passed to a game server process with a request to start a new game
// session (see Start a Game Session (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-api.html#gamelift-sdk-server-startsession)
// ). This information is added to the game session that is created for a
// successful match. This parameter is not used if FlexMatchMode is set to
// STANDALONE .
GameSessionData *string
// The Amazon Resource Name ( ARN (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)
// ) that is assigned to a Amazon GameLift game session queue resource and uniquely
// identifies it. ARNs are unique across all Regions. Format is
// arn:aws:gamelift:::gamesessionqueue/ . Queues can be located in any Region.
// Queues are used to start new Amazon GameLift-hosted game sessions for matches
// that are created with this matchmaking configuration. If FlexMatchMode is set
// to STANDALONE , do not set this parameter.
GameSessionQueueArns []string
// An SNS topic ARN that is set up to receive matchmaking notifications. See
// Setting up notifications for matchmaking (https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-notification.html)
// for more information.
NotificationTarget *string
// The maximum duration, in seconds, that a matchmaking ticket can remain in
// process before timing out. Requests that fail due to timing out can be
// resubmitted as needed.
RequestTimeoutSeconds *int32
// A unique identifier for the matchmaking rule set to use with this
// configuration. You can use either the rule set name or ARN value. A matchmaking
// configuration can only use rule sets that are defined in the same Region.
RuleSetName *string
noSmithyDocumentSerde
}
type UpdateMatchmakingConfigurationOutput struct {
// The updated matchmaking configuration.
Configuration *types.MatchmakingConfiguration
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationUpdateMatchmakingConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateMatchmakingConfiguration{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateMatchmakingConfiguration{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addUpdateMatchmakingConfigurationResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpUpdateMatchmakingConfigurationValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateMatchmakingConfiguration(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opUpdateMatchmakingConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "gamelift",
OperationName: "UpdateMatchmakingConfiguration",
}
}
type opUpdateMatchmakingConfigurationResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opUpdateMatchmakingConfigurationResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opUpdateMatchmakingConfigurationResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "gamelift"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "gamelift"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil | {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
} | conditional_block |
|
graph.rs | Option<Box<Fn(f64) -> [String; 4]>>,
labels_layout_width: i32,
}
impl Graph {
// If `max` is `None`, the graph will expect values between 0 and 1.
//
// If `keep_max` is set to `true`, then this value will never go down, meaning that graphs
// won't rescale down. It is not taken into account if `max` is `None`.
pub fn new(max: Option<f64>, keep_max: bool) -> Graph {
let g = Graph {
elapsed: Instant::now(),
colors: vec!(),
data: vec!(),
vertical_layout: gtk::Box::new(gtk::Orientation::Vertical, 0),
scroll_layout: gtk::ScrolledWindow::new(None, None),
horizontal_layout: gtk::Box::new(gtk::Orientation::Horizontal, 0),
area: DrawingArea::new(),
max: if let Some(max) = max { Some(RefCell::new(max)) } else { None },
keep_max,
display_labels: RefCell::new(true),
initial_diff: None,
label_callbacks: None,
labels_layout_width: 80,
};
g.scroll_layout.set_min_content_width(g.labels_layout_width);
g.scroll_layout.add(&g.vertical_layout);
g.horizontal_layout.pack_start(&g.area, true, true, 0);
g.horizontal_layout.pack_start(&g.scroll_layout, false, true, 10);
g.horizontal_layout.set_margin_left(5);
g
}
/// Changes the size of the layout containing labels (the one on the right).
pub fn set_labels_width(&mut self, labels_layout_width: u32) {
self.scroll_layout.set_min_content_width(labels_layout_width as i32);
self.labels_layout_width = labels_layout_width as i32;
}
pub fn set_label_callbacks(&mut self, label_callbacks: Option<Box<Fn(f64) -> [String; 4]>>) {
self.label_callbacks = label_callbacks;
}
pub fn set_display_labels(&self, display_labels: bool) {
*self.display_labels.borrow_mut() = display_labels;
if display_labels == true {
self.scroll_layout.show_all();
} else {
self.scroll_layout.hide();
}
self.invalidate();
}
pub fn hide(&self) {
self.horizontal_layout.hide();
}
pub fn show_all(&self) {
self.horizontal_layout.show_all();
if *self.display_labels.borrow() == false {
self.scroll_layout.hide();
}
}
pub fn attach_to(&self, to: >k::Box) {
to.add(&self.horizontal_layout);
}
pub fn | (&mut self, d: RotateVec<f64>, s: &str, override_color: Option<usize>) {
let c = if let Some(over) = override_color {
Color::generate(over)
} else {
Color::generate(self.data.len() + 11)
};
let l = gtk::Label::new(Some(s));
l.override_color(StateFlags::from_bits(0).expect("from_bits failed"), &c.to_gdk());
self.vertical_layout.add(&l);
self.colors.push(c);
self.data.push(d);
}
fn draw_labels(&self, c: &cairo::Context, max: f64, height: f64) {
if let Some(ref call) = self.label_callbacks {
let entries = call(max);
let font_size = 8.;
c.set_source_rgb(0., 0., 0.);
c.set_font_size(font_size);
c.move_to(LEFT_WIDTH - 4. - entries[0].len() as f64 * 4., font_size);
c.show_text(entries[0].as_str());
c.move_to(LEFT_WIDTH - 4. - entries[1].len() as f64 * 4., height / 2.);
c.show_text(entries[1].as_str());
c.move_to(LEFT_WIDTH - 4. - entries[2].len() as f64 * 4., height - 2.);
c.show_text(entries[2].as_str());
c.move_to(font_size - 1., height / 2. + 4. * (entries[3].len() >> 1) as f64);
c.rotate(-1.5708);
c.show_text(entries[3].as_str());
}
}
pub fn draw(&self, c: &cairo::Context, width: f64, height: f64) {
let x_start = if self.label_callbacks.is_some() {
LEFT_WIDTH
} else {
1.0
};
c.set_source_rgb(0.95, 0.95, 0.95);
c.rectangle(x_start, 1.0, width - 1.0, height - 2.0);
c.fill();
c.set_source_rgb(0.0, 0.0, 0.0);
c.set_line_width(1.0);
c.move_to(x_start, 0.0);
c.line_to(x_start, height);
c.move_to(width, 0.0);
c.line_to(width, height);
c.move_to(x_start, 0.0);
c.line_to(width, 0.0);
c.move_to(x_start, height);
c.line_to(width, height);
// For now it's always 60 seconds.
let time = 60.;
let elapsed = self.elapsed.elapsed().as_secs() % 5;
let x_step = (width - 2.0 - x_start) * 5.0 / (time as f64);
let mut current = width - elapsed as f64 * (x_step / 5.0) - 1.0;
if x_step < 0.1 {
c.stroke();
return;
}
while current > x_start {
c.move_to(current, 0.0);
c.line_to(current, height);
current -= x_step;
}
let step = height / 10.0;
current = step - 1.0;
while current < height - 1. {
c.move_to(x_start, current);
c.line_to(width - 1.0, current);
current += step;
}
c.stroke();
if let Some(ref self_max) = self.max {
let mut max = if self.keep_max { *self_max.borrow() } else { 1. };
let len = self.data[0].len() - 1;
for x in 0..len {
for entry in &self.data {
if entry[x] > max {
max = entry[x];
}
}
}
if !self.data.is_empty() && !self.data[0].is_empty() {
let len = self.data[0].len() - 1;
let step = (width - 2.0 - x_start) / len as f64;
current = x_start + 1.0;
let mut index = len;
while current > x_start && index > 0 {
for (entry, color) in self.data.iter().zip(self.colors.iter()) {
c.set_source_rgb(color.r, color.g, color.b);
c.move_to(current + step, height - entry[index - 1] / max * (height - 1.0));
c.line_to(current, height - entry[index] / max * (height - 1.0));
c.stroke();
}
current += step;
index -= 1;
}
}
if max > *self_max.borrow() || !self.keep_max {
*self_max.borrow_mut() = max;
}
self.draw_labels(c, max, height);
} else if !self.data.is_empty() && !self.data[0].is_empty() {
let len = self.data[0].len() - 1;
let step = (width - 2.0 - x_start) / (len as f64);
current = x_start + 1.0;
let mut index = len;
while current > x_start && index > 0 {
for (entry, color) in self.data.iter().zip(self.colors.iter()) {
c.set_source_rgb(color.r, color.g, color.b);
c.move_to(current + step, height - entry[index - 1] * (height - 1.0));
c.line_to(current, height - entry[index] * (height - 1.0));
c.stroke();
}
current += step;
index -= 1;
}
// To be called in last to avoid having to restore state (rotation).
self.draw_labels(c, 100., height);
}
}
pub fn invalidate(&self) {
if let Some(t_win) = self.area.get_window() {
let (x, y) = self.area.translate_coordinates(&self.area, 0, 0)
.expect("translate_coordinates failed");
let rect = gdk::Rectangle { x: x, y: y,
width: self.area.get_allocated_width(), height: self.area.get_allocated_height() };
t_win.invalidate_rect(&rect, true);
}
}
pub fn send_size_request(&self, width: Option<i3 | push | identifier_name |
graph.rs | : Option<Box<Fn(f64) -> [String; 4]>>,
labels_layout_width: i32,
}
impl Graph {
// If `max` is `None`, the graph will expect values between 0 and 1.
//
// If `keep_max` is set to `true`, then this value will never go down, meaning that graphs
// won't rescale down. It is not taken into account if `max` is `None`.
pub fn new(max: Option<f64>, keep_max: bool) -> Graph {
let g = Graph {
elapsed: Instant::now(),
colors: vec!(),
data: vec!(),
vertical_layout: gtk::Box::new(gtk::Orientation::Vertical, 0),
scroll_layout: gtk::ScrolledWindow::new(None, None),
horizontal_layout: gtk::Box::new(gtk::Orientation::Horizontal, 0),
area: DrawingArea::new(),
max: if let Some(max) = max { Some(RefCell::new(max)) } else { None },
keep_max,
display_labels: RefCell::new(true),
initial_diff: None,
label_callbacks: None,
labels_layout_width: 80,
};
g.scroll_layout.set_min_content_width(g.labels_layout_width);
g.scroll_layout.add(&g.vertical_layout);
g.horizontal_layout.pack_start(&g.area, true, true, 0);
g.horizontal_layout.pack_start(&g.scroll_layout, false, true, 10);
g.horizontal_layout.set_margin_left(5);
g
}
/// Changes the size of the layout containing labels (the one on the right).
pub fn set_labels_width(&mut self, labels_layout_width: u32) {
self.scroll_layout.set_min_content_width(labels_layout_width as i32);
self.labels_layout_width = labels_layout_width as i32;
}
pub fn set_label_callbacks(&mut self, label_callbacks: Option<Box<Fn(f64) -> [String; 4]>>) {
self.label_callbacks = label_callbacks;
}
pub fn set_display_labels(&self, display_labels: bool) {
*self.display_labels.borrow_mut() = display_labels;
if display_labels == true {
self.scroll_layout.show_all();
} else {
self.scroll_layout.hide();
}
self.invalidate();
}
pub fn hide(&self) {
self.horizontal_layout.hide();
}
pub fn show_all(&self) {
self.horizontal_layout.show_all();
if *self.display_labels.borrow() == false {
self.scroll_layout.hide();
}
}
pub fn attach_to(&self, to: >k::Box) {
to.add(&self.horizontal_layout);
}
pub fn push(&mut self, d: RotateVec<f64>, s: &str, override_color: Option<usize>) {
let c = if let Some(over) = override_color {
Color::generate(over)
} else {
Color::generate(self.data.len() + 11)
};
let l = gtk::Label::new(Some(s));
l.override_color(StateFlags::from_bits(0).expect("from_bits failed"), &c.to_gdk());
self.vertical_layout.add(&l);
self.colors.push(c);
self.data.push(d);
}
fn draw_labels(&self, c: &cairo::Context, max: f64, height: f64) {
if let Some(ref call) = self.label_callbacks {
let entries = call(max);
let font_size = 8.;
c.set_source_rgb(0., 0., 0.);
c.set_font_size(font_size);
c.move_to(LEFT_WIDTH - 4. - entries[0].len() as f64 * 4., font_size);
c.show_text(entries[0].as_str());
c.move_to(LEFT_WIDTH - 4. - entries[1].len() as f64 * 4., height / 2.);
c.show_text(entries[1].as_str());
c.move_to(LEFT_WIDTH - 4. - entries[2].len() as f64 * 4., height - 2.);
c.show_text(entries[2].as_str());
c.move_to(font_size - 1., height / 2. + 4. * (entries[3].len() >> 1) as f64);
c.rotate(-1.5708);
c.show_text(entries[3].as_str());
}
}
pub fn draw(&self, c: &cairo::Context, width: f64, height: f64) {
let x_start = if self.label_callbacks.is_some() {
LEFT_WIDTH
} else {
1.0
};
c.set_source_rgb(0.95, 0.95, 0.95);
c.rectangle(x_start, 1.0, width - 1.0, height - 2.0);
c.fill();
c.set_source_rgb(0.0, 0.0, 0.0);
c.set_line_width(1.0);
c.move_to(x_start, 0.0);
c.line_to(x_start, height);
c.move_to(width, 0.0);
c.line_to(width, height);
c.move_to(x_start, 0.0);
c.line_to(width, 0.0);
c.move_to(x_start, height);
c.line_to(width, height);
// For now it's always 60 seconds.
let time = 60.;
let elapsed = self.elapsed.elapsed().as_secs() % 5;
let x_step = (width - 2.0 - x_start) * 5.0 / (time as f64);
let mut current = width - elapsed as f64 * (x_step / 5.0) - 1.0;
if x_step < 0.1 {
c.stroke();
return;
}
while current > x_start {
c.move_to(current, 0.0);
c.line_to(current, height);
current -= x_step;
}
let step = height / 10.0;
current = step - 1.0;
while current < height - 1. {
c.move_to(x_start, current);
c.line_to(width - 1.0, current);
current += step;
}
c.stroke();
if let Some(ref self_max) = self.max {
let mut max = if self.keep_max { *self_max.borrow() } else { 1. };
let len = self.data[0].len() - 1;
for x in 0..len {
for entry in &self.data {
if entry[x] > max {
max = entry[x];
}
}
}
if !self.data.is_empty() && !self.data[0].is_empty() {
let len = self.data[0].len() - 1;
let step = (width - 2.0 - x_start) / len as f64;
current = x_start + 1.0;
let mut index = len;
while current > x_start && index > 0 {
for (entry, color) in self.data.iter().zip(self.colors.iter()) {
c.set_source_rgb(color.r, color.g, color.b);
c.move_to(current + step, height - entry[index - 1] / max * (height - 1.0));
c.line_to(current, height - entry[index] / max * (height - 1.0));
c.stroke();
}
current += step;
index -= 1;
}
}
if max > *self_max.borrow() || !self.keep_max {
*self_max.borrow_mut() = max;
}
self.draw_labels(c, max, height);
} else if !self.data.is_empty() && !self.data[0].is_empty() {
let len = self.data[0].len() - 1;
let step = (width - 2.0 - x_start) / (len as f64);
current = x_start + 1.0;
let mut index = len;
while current > x_start && index > 0 {
for (entry, color) in self.data.iter().zip(self.colors.iter()) {
c.set_source_rgb(color.r, color.g, color.b);
c.move_to(current + step, height - entry[index - 1] * (height - 1.0));
c.line_to(current, height - entry[index] * (height - 1.0));
c.stroke();
}
current += step;
index -= 1;
}
// To be called in last to avoid having to restore state (rotation).
self.draw_labels(c, 100., height);
} | pub fn invalidate(&self) {
if let Some(t_win) = self.area.get_window() {
let (x, y) = self.area.translate_coordinates(&self.area, 0, 0)
.expect("translate_coordinates failed");
let rect = gdk::Rectangle { x: x, y: y,
width: self.area.get_allocated_width(), height: self.area.get_allocated_height() };
t_win.invalidate_rect(&rect, true);
}
}
pub fn send_size_request(&self, width: Option<i3 | }
| random_line_split |
graph.rs | label_callbacks: Option<Box<Fn(f64) -> [String; 4]>>) {
self.label_callbacks = label_callbacks;
}
pub fn set_display_labels(&self, display_labels: bool) {
*self.display_labels.borrow_mut() = display_labels;
if display_labels == true {
self.scroll_layout.show_all();
} else {
self.scroll_layout.hide();
}
self.invalidate();
}
pub fn hide(&self) {
self.horizontal_layout.hide();
}
pub fn show_all(&self) {
self.horizontal_layout.show_all();
if *self.display_labels.borrow() == false {
self.scroll_layout.hide();
}
}
pub fn attach_to(&self, to: >k::Box) {
to.add(&self.horizontal_layout);
}
pub fn push(&mut self, d: RotateVec<f64>, s: &str, override_color: Option<usize>) {
let c = if let Some(over) = override_color {
Color::generate(over)
} else {
Color::generate(self.data.len() + 11)
};
let l = gtk::Label::new(Some(s));
l.override_color(StateFlags::from_bits(0).expect("from_bits failed"), &c.to_gdk());
self.vertical_layout.add(&l);
self.colors.push(c);
self.data.push(d);
}
fn draw_labels(&self, c: &cairo::Context, max: f64, height: f64) {
if let Some(ref call) = self.label_callbacks {
let entries = call(max);
let font_size = 8.;
c.set_source_rgb(0., 0., 0.);
c.set_font_size(font_size);
c.move_to(LEFT_WIDTH - 4. - entries[0].len() as f64 * 4., font_size);
c.show_text(entries[0].as_str());
c.move_to(LEFT_WIDTH - 4. - entries[1].len() as f64 * 4., height / 2.);
c.show_text(entries[1].as_str());
c.move_to(LEFT_WIDTH - 4. - entries[2].len() as f64 * 4., height - 2.);
c.show_text(entries[2].as_str());
c.move_to(font_size - 1., height / 2. + 4. * (entries[3].len() >> 1) as f64);
c.rotate(-1.5708);
c.show_text(entries[3].as_str());
}
}
pub fn draw(&self, c: &cairo::Context, width: f64, height: f64) {
let x_start = if self.label_callbacks.is_some() {
LEFT_WIDTH
} else {
1.0
};
c.set_source_rgb(0.95, 0.95, 0.95);
c.rectangle(x_start, 1.0, width - 1.0, height - 2.0);
c.fill();
c.set_source_rgb(0.0, 0.0, 0.0);
c.set_line_width(1.0);
c.move_to(x_start, 0.0);
c.line_to(x_start, height);
c.move_to(width, 0.0);
c.line_to(width, height);
c.move_to(x_start, 0.0);
c.line_to(width, 0.0);
c.move_to(x_start, height);
c.line_to(width, height);
// For now it's always 60 seconds.
let time = 60.;
let elapsed = self.elapsed.elapsed().as_secs() % 5;
let x_step = (width - 2.0 - x_start) * 5.0 / (time as f64);
let mut current = width - elapsed as f64 * (x_step / 5.0) - 1.0;
if x_step < 0.1 {
c.stroke();
return;
}
while current > x_start {
c.move_to(current, 0.0);
c.line_to(current, height);
current -= x_step;
}
let step = height / 10.0;
current = step - 1.0;
while current < height - 1. {
c.move_to(x_start, current);
c.line_to(width - 1.0, current);
current += step;
}
c.stroke();
if let Some(ref self_max) = self.max {
let mut max = if self.keep_max { *self_max.borrow() } else { 1. };
let len = self.data[0].len() - 1;
for x in 0..len {
for entry in &self.data {
if entry[x] > max {
max = entry[x];
}
}
}
if !self.data.is_empty() && !self.data[0].is_empty() {
let len = self.data[0].len() - 1;
let step = (width - 2.0 - x_start) / len as f64;
current = x_start + 1.0;
let mut index = len;
while current > x_start && index > 0 {
for (entry, color) in self.data.iter().zip(self.colors.iter()) {
c.set_source_rgb(color.r, color.g, color.b);
c.move_to(current + step, height - entry[index - 1] / max * (height - 1.0));
c.line_to(current, height - entry[index] / max * (height - 1.0));
c.stroke();
}
current += step;
index -= 1;
}
}
if max > *self_max.borrow() || !self.keep_max {
*self_max.borrow_mut() = max;
}
self.draw_labels(c, max, height);
} else if !self.data.is_empty() && !self.data[0].is_empty() {
let len = self.data[0].len() - 1;
let step = (width - 2.0 - x_start) / (len as f64);
current = x_start + 1.0;
let mut index = len;
while current > x_start && index > 0 {
for (entry, color) in self.data.iter().zip(self.colors.iter()) {
c.set_source_rgb(color.r, color.g, color.b);
c.move_to(current + step, height - entry[index - 1] * (height - 1.0));
c.line_to(current, height - entry[index] * (height - 1.0));
c.stroke();
}
current += step;
index -= 1;
}
// To be called in last to avoid having to restore state (rotation).
self.draw_labels(c, 100., height);
}
}
pub fn invalidate(&self) {
if let Some(t_win) = self.area.get_window() {
let (x, y) = self.area.translate_coordinates(&self.area, 0, 0)
.expect("translate_coordinates failed");
let rect = gdk::Rectangle { x: x, y: y,
width: self.area.get_allocated_width(), height: self.area.get_allocated_height() };
t_win.invalidate_rect(&rect, true);
}
}
pub fn send_size_request(&self, width: Option<i32>) {
let mut width = match width {
Some(w) => w,
None => {
if let Some(parent) = self.area.get_parent() {
parent.get_allocation().width -
parent.get_margin_left() - parent.get_margin_right()
} else {
eprintln!("<Graph::send_size_request> A parent is required if no width is \
provided...");
return;
}
}
};
// This condition is to avoid having a graph with a bigger width than the window.
if let Some(top) = self.area.get_toplevel() {
let max_width = top.get_allocation().width;
if width > max_width {
width = max_width;
}
}
self.area.set_size_request(
if *self.display_labels.borrow() == true {
width - if width >= self.labels_layout_width {
self.labels_layout_width
} else {
width
}
} else {
width
}, 200);
}
}
pub trait Connecter {
fn connect_to_window_events(&self);
}
impl Connecter for Rc<RefCell<Graph>> {
fn connect_to_window_events(&self) | {
let s = self.clone();
if let Some(parent) = self.borrow().horizontal_layout.get_toplevel() {
// TODO: ugly way to resize drawing area, I should find a better way
parent.connect_configure_event(move |w, _| {
let need_diff = s.borrow().initial_diff.is_none();
if need_diff {
let mut s = s.borrow_mut();
let parent_width = if let Some(p) = s.area.get_parent() {
p.get_allocation().width
} else {
0
};
s.initial_diff = Some(w.get_allocation().width - parent_width);
}
s.borrow().send_size_request(None);
false
});
} else {
eprintln!("This method needs to be called *after* it has been put inside a window"); | identifier_body |
|
graph.rs | _layout_width = labels_layout_width as i32;
}
pub fn set_label_callbacks(&mut self, label_callbacks: Option<Box<Fn(f64) -> [String; 4]>>) {
self.label_callbacks = label_callbacks;
}
pub fn set_display_labels(&self, display_labels: bool) {
*self.display_labels.borrow_mut() = display_labels;
if display_labels == true {
self.scroll_layout.show_all();
} else {
self.scroll_layout.hide();
}
self.invalidate();
}
pub fn hide(&self) {
self.horizontal_layout.hide();
}
pub fn show_all(&self) {
self.horizontal_layout.show_all();
if *self.display_labels.borrow() == false {
self.scroll_layout.hide();
}
}
pub fn attach_to(&self, to: >k::Box) {
to.add(&self.horizontal_layout);
}
pub fn push(&mut self, d: RotateVec<f64>, s: &str, override_color: Option<usize>) {
let c = if let Some(over) = override_color {
Color::generate(over)
} else {
Color::generate(self.data.len() + 11)
};
let l = gtk::Label::new(Some(s));
l.override_color(StateFlags::from_bits(0).expect("from_bits failed"), &c.to_gdk());
self.vertical_layout.add(&l);
self.colors.push(c);
self.data.push(d);
}
fn draw_labels(&self, c: &cairo::Context, max: f64, height: f64) {
if let Some(ref call) = self.label_callbacks {
let entries = call(max);
let font_size = 8.;
c.set_source_rgb(0., 0., 0.);
c.set_font_size(font_size);
c.move_to(LEFT_WIDTH - 4. - entries[0].len() as f64 * 4., font_size);
c.show_text(entries[0].as_str());
c.move_to(LEFT_WIDTH - 4. - entries[1].len() as f64 * 4., height / 2.);
c.show_text(entries[1].as_str());
c.move_to(LEFT_WIDTH - 4. - entries[2].len() as f64 * 4., height - 2.);
c.show_text(entries[2].as_str());
c.move_to(font_size - 1., height / 2. + 4. * (entries[3].len() >> 1) as f64);
c.rotate(-1.5708);
c.show_text(entries[3].as_str());
}
}
pub fn draw(&self, c: &cairo::Context, width: f64, height: f64) {
let x_start = if self.label_callbacks.is_some() {
LEFT_WIDTH
} else {
1.0
};
c.set_source_rgb(0.95, 0.95, 0.95);
c.rectangle(x_start, 1.0, width - 1.0, height - 2.0);
c.fill();
c.set_source_rgb(0.0, 0.0, 0.0);
c.set_line_width(1.0);
c.move_to(x_start, 0.0);
c.line_to(x_start, height);
c.move_to(width, 0.0);
c.line_to(width, height);
c.move_to(x_start, 0.0);
c.line_to(width, 0.0);
c.move_to(x_start, height);
c.line_to(width, height);
// For now it's always 60 seconds.
let time = 60.;
let elapsed = self.elapsed.elapsed().as_secs() % 5;
let x_step = (width - 2.0 - x_start) * 5.0 / (time as f64);
let mut current = width - elapsed as f64 * (x_step / 5.0) - 1.0;
if x_step < 0.1 {
c.stroke();
return;
}
while current > x_start {
c.move_to(current, 0.0);
c.line_to(current, height);
current -= x_step;
}
let step = height / 10.0;
current = step - 1.0;
while current < height - 1. {
c.move_to(x_start, current);
c.line_to(width - 1.0, current);
current += step;
}
c.stroke();
if let Some(ref self_max) = self.max {
let mut max = if self.keep_max { *self_max.borrow() } else { 1. };
let len = self.data[0].len() - 1;
for x in 0..len {
for entry in &self.data {
if entry[x] > max {
max = entry[x];
}
}
}
if !self.data.is_empty() && !self.data[0].is_empty() {
let len = self.data[0].len() - 1;
let step = (width - 2.0 - x_start) / len as f64;
current = x_start + 1.0;
let mut index = len;
while current > x_start && index > 0 {
for (entry, color) in self.data.iter().zip(self.colors.iter()) {
c.set_source_rgb(color.r, color.g, color.b);
c.move_to(current + step, height - entry[index - 1] / max * (height - 1.0));
c.line_to(current, height - entry[index] / max * (height - 1.0));
c.stroke();
}
current += step;
index -= 1;
}
}
if max > *self_max.borrow() || !self.keep_max {
*self_max.borrow_mut() = max;
}
self.draw_labels(c, max, height);
} else if !self.data.is_empty() && !self.data[0].is_empty() {
let len = self.data[0].len() - 1;
let step = (width - 2.0 - x_start) / (len as f64);
current = x_start + 1.0;
let mut index = len;
while current > x_start && index > 0 {
for (entry, color) in self.data.iter().zip(self.colors.iter()) {
c.set_source_rgb(color.r, color.g, color.b);
c.move_to(current + step, height - entry[index - 1] * (height - 1.0));
c.line_to(current, height - entry[index] * (height - 1.0));
c.stroke();
}
current += step;
index -= 1;
}
// To be called in last to avoid having to restore state (rotation).
self.draw_labels(c, 100., height);
}
}
pub fn invalidate(&self) {
if let Some(t_win) = self.area.get_window() {
let (x, y) = self.area.translate_coordinates(&self.area, 0, 0)
.expect("translate_coordinates failed");
let rect = gdk::Rectangle { x: x, y: y,
width: self.area.get_allocated_width(), height: self.area.get_allocated_height() };
t_win.invalidate_rect(&rect, true);
}
}
pub fn send_size_request(&self, width: Option<i32>) {
let mut width = match width {
Some(w) => w,
None => {
if let Some(parent) = self.area.get_parent() {
parent.get_allocation().width -
parent.get_margin_left() - parent.get_margin_right()
} else {
eprintln!("<Graph::send_size_request> A parent is required if no width is \
provided...");
return;
}
}
};
// This condition is to avoid having a graph with a bigger width than the window.
if let Some(top) = self.area.get_toplevel() {
let max_width = top.get_allocation().width;
if width > max_width {
width = max_width;
}
}
self.area.set_size_request(
if *self.display_labels.borrow() == true {
width - if width >= self.labels_layout_width {
self.labels_layout_width
} else {
width
}
} else {
width
}, 200);
}
}
pub trait Connecter {
fn connect_to_window_events(&self);
}
impl Connecter for Rc<RefCell<Graph>> {
fn connect_to_window_events(&self) {
let s = self.clone();
if let Some(parent) = self.borrow().horizontal_layout.get_toplevel() | {
// TODO: ugly way to resize drawing area, I should find a better way
parent.connect_configure_event(move |w, _| {
let need_diff = s.borrow().initial_diff.is_none();
if need_diff {
let mut s = s.borrow_mut();
let parent_width = if let Some(p) = s.area.get_parent() {
p.get_allocation().width
} else {
0
};
s.initial_diff = Some(w.get_allocation().width - parent_width);
}
s.borrow().send_size_request(None);
false
});
} | conditional_block |
|
train_and_deploy.py | -whole-word-masking')
def seed_torch(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.device_count() > 1:
torch.cuda.manual_seed_all(seed)
else:
|
torch.backends.cudnn.deterministic = True
# Converting the lines to BERT format
# Thanks to https://www.kaggle.com/httpwwwfszyc/bert-in-keras-taming
def convert_lines(example, max_seq_length, tokenizer):
max_seq_length -= 2
all_tokens = []
longer = 0
for text in tqdm(example):
tokens_a = tokenizer.tokenize(text)
if len(tokens_a) > max_seq_length:
tokens_a = tokens_a[:max_seq_length]
longer += 1
one_token = tokenizer.convert_tokens_to_ids(["[CLS]"]+tokens_a+["[SEP]"])+[0] * (max_seq_length - len(tokens_a))
all_tokens.append(one_token)
return np.array(all_tokens)
def loss_fn(preds, labels):
preds = preds.view(-1)
labels = labels.view(-1)
assert(preds.shape == labels.shape)
loss = nn.BCEWithLogitsLoss()(preds, labels)
return loss
def _average_gradients(model):
# Gradient averaging.
size = float(dist.get_world_size())
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM)
param.grad.data /= size
def train(train_loader, model, optimizer, is_distributed):
model.train()
avg_loss = 0.
avg_accuracy = 0.
tk0 = tqdm(enumerate(train_loader), total=len(train_loader), leave=False)
optimizer.zero_grad()
for i, (x_batch, y_batch) in tk0:
y_pred = model(x_batch.to(DEVICE),
attention_mask=(x_batch > 0).to(DEVICE),
labels=None)
loss = loss_fn(y_pred[0], y_batch.to(DEVICE))
loss.backward()
if is_distributed and not use_cuda:
# average gradients manually for multi-machine cpu case only
_average_gradients(model)
optimizer.step()
optimizer.zero_grad()
avg_loss += loss.item() / len(train_loader)
avg_accuracy += torch.mean(
((torch.sigmoid(y_pred[0]) >= 0.5) == (y_batch >= 0.5).to(DEVICE)).to(torch.float)).item() / len(train_loader)
tk0.set_postfix(loss=loss.item(), avg_loss=avg_loss)
log = OrderedDict([('avg_loss', avg_loss), ('avg_acc', avg_accuracy)])
tk0.close()
return log
# Run validation
def evaluate(valid_loader, model):
model.eval()
avg_loss = 0.
valid_preds = []
valid_trues = []
with torch.no_grad():
tk0 = tqdm(valid_loader)
for i, (x_batch, y_batch) in enumerate(tk0):
y_pred = model(x_batch.to(DEVICE),
attention_mask=(x_batch > 0).to(DEVICE),
labels=None)
loss = loss_fn(y_pred[0], y_batch.to(DEVICE))
avg_loss += loss.item() / len(valid_loader)
outputs_np = torch.sigmoid(y_pred[0]).cpu().detach().numpy()
targets_np = y_batch.unsqueeze(1).numpy()
valid_preds.append(outputs_np)
valid_trues.append(targets_np)
valid_preds = np.vstack(valid_preds)
valid_trues = np.vstack(valid_trues)
acc = accuracy_score((valid_trues >= 0.5), (valid_preds >= 0.5))
val_log = OrderedDict([('val_loss', avg_loss), ('val_acc', acc)])
tk0.close()
return val_log
if __name__ == '__main__':
# Receive hyperparameters passed via create-training-job API
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, default=32)
parser.add_argument('--epochs', type=int, default=1)
parser.add_argument('--learning-rate', type=float, default=5e-6)
parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])
parser.add_argument('--backend', type=str, default=None,
help='backend for distributed training (tcp, gloo on cpu and gloo, nccl on gpu)')
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
parser.add_argument('--val', type=str, default=os.environ.get('SM_CHANNEL_VAL'))
parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])
parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))
args = parser.parse_args()
# Set hyperparameters after parsing the arguments
batch_size = args.batch_size
lr = args.learning_rate
num_epochs = args.epochs
current_host = args.current_host
hosts = args.hosts
model_dir = args.model_dir
training_dir = args.train
val_dir = args.val
#is_distributed = len(args.hosts) > 1 and args.backend is not None
is_distributed = len(args.hosts) > 1 and args.backend is not None
if is_distributed:
# Initialize the distributed environment.
world_size = len(args.hosts)
os.environ['WORLD_SIZE'] = str(world_size)
host_rank = args.hosts.index(args.current_host)
os.environ['RANK'] = str(host_rank)
dist.init_process_group(backend=args.backend, rank=host_rank, world_size=world_size)
logger.info('Initialized the distributed environment: \'{}\' backend on {} nodes. '.format(
args.backend, dist.get_world_size()) + 'Current host rank is {}. Number of gpus: {}'.format(
dist.get_rank(), args.num_gpus))
# fix seed
seed_torch()
# Data loading
train_df = pd.read_csv(os.path.join(training_dir, 'train.tsv'), sep ='\t')
valid_df = pd.read_csv(os.path.join(val_dir, 'valid.tsv'), sep ='\t')
# convert BERT dataset
tr_sequences = convert_lines(train_df["review_body"].fillna("DUMMY_VALUE"),
MAX_SEQUENCE_LENGTH, tokenizer)
train_dataset = torch.utils.data.TensorDataset(torch.tensor(tr_sequences, dtype=torch.long),
torch.tensor(train_df['star_rating'].values, dtype=torch.float))
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True)
val_sequences = convert_lines(valid_df["review_body"].fillna("DUMMY_VALUE"),
MAX_SEQUENCE_LENGTH,
tokenizer)
valid_dataset = torch.utils.data.TensorDataset(torch.tensor(val_sequences, dtype=torch.long),
torch.tensor(valid_df['star_rating'].values, dtype=torch.float))
valid_loader = torch.utils.data.DataLoader(valid_dataset,
batch_size=batch_size,
shuffle=False)
# Load pre-trained bert model
model = BertForSequenceClassification.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking',
num_labels=1)
model.zero_grad()
model = model.to(DEVICE)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=lr, eps=1e-8)
if is_distributed and DEVICE != 'cpu':
# multi-machine multi-gpu case
model = nn.parallel.DistributedDataParallel(model)
else:
# single-machine multi-gpu case or single-machine or multi-machine cpu case
model = nn.DataParallel(model)
es = EarlyStopping(patience=5, mode="max")
path = os.path.join(args.model_dir, 'model.pth')
for epoch in range(num_epochs):
log = train(train_loader, model, optimizer, is_distributed)
val_log = evaluate(valid_loader, model)
es(val_log["val_acc"], model, model_path=path)
if es.early_stop:
logger.info("Early stopping")
break
def model_fn(model_dir):
"""
Load the gluon model. Called once when hosting service starts.
:param: model_dir The directory where model files are stored.
:return: a model (in this case a Gluon network)
"""
model = BertForSequenceClassification.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking',
num_labels=1)
model = torch.nn.DataParallel(model)
with open(os.path.join(model_dir, 'model.pth'), 'rb') as f:
model.load_state_dict(torch.load(f))
return {"net": model, "tokenizer": | torch.cuda.manual_seed(seed) | conditional_block |
train_and_deploy.py | -whole-word-masking')
def seed_torch(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.device_count() > 1:
torch.cuda.manual_seed_all(seed)
else:
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
# Converting the lines to BERT format
# Thanks to https://www.kaggle.com/httpwwwfszyc/bert-in-keras-taming
def convert_lines(example, max_seq_length, tokenizer):
max_seq_length -= 2
all_tokens = []
longer = 0
for text in tqdm(example):
tokens_a = tokenizer.tokenize(text)
if len(tokens_a) > max_seq_length:
tokens_a = tokens_a[:max_seq_length]
longer += 1
one_token = tokenizer.convert_tokens_to_ids(["[CLS]"]+tokens_a+["[SEP]"])+[0] * (max_seq_length - len(tokens_a))
all_tokens.append(one_token)
return np.array(all_tokens)
def loss_fn(preds, labels):
preds = preds.view(-1)
labels = labels.view(-1)
assert(preds.shape == labels.shape)
loss = nn.BCEWithLogitsLoss()(preds, labels)
return loss
def _average_gradients(model):
# Gradient averaging.
|
def train(train_loader, model, optimizer, is_distributed):
model.train()
avg_loss = 0.
avg_accuracy = 0.
tk0 = tqdm(enumerate(train_loader), total=len(train_loader), leave=False)
optimizer.zero_grad()
for i, (x_batch, y_batch) in tk0:
y_pred = model(x_batch.to(DEVICE),
attention_mask=(x_batch > 0).to(DEVICE),
labels=None)
loss = loss_fn(y_pred[0], y_batch.to(DEVICE))
loss.backward()
if is_distributed and not use_cuda:
# average gradients manually for multi-machine cpu case only
_average_gradients(model)
optimizer.step()
optimizer.zero_grad()
avg_loss += loss.item() / len(train_loader)
avg_accuracy += torch.mean(
((torch.sigmoid(y_pred[0]) >= 0.5) == (y_batch >= 0.5).to(DEVICE)).to(torch.float)).item() / len(train_loader)
tk0.set_postfix(loss=loss.item(), avg_loss=avg_loss)
log = OrderedDict([('avg_loss', avg_loss), ('avg_acc', avg_accuracy)])
tk0.close()
return log
# Run validation
def evaluate(valid_loader, model):
model.eval()
avg_loss = 0.
valid_preds = []
valid_trues = []
with torch.no_grad():
tk0 = tqdm(valid_loader)
for i, (x_batch, y_batch) in enumerate(tk0):
y_pred = model(x_batch.to(DEVICE),
attention_mask=(x_batch > 0).to(DEVICE),
labels=None)
loss = loss_fn(y_pred[0], y_batch.to(DEVICE))
avg_loss += loss.item() / len(valid_loader)
outputs_np = torch.sigmoid(y_pred[0]).cpu().detach().numpy()
targets_np = y_batch.unsqueeze(1).numpy()
valid_preds.append(outputs_np)
valid_trues.append(targets_np)
valid_preds = np.vstack(valid_preds)
valid_trues = np.vstack(valid_trues)
acc = accuracy_score((valid_trues >= 0.5), (valid_preds >= 0.5))
val_log = OrderedDict([('val_loss', avg_loss), ('val_acc', acc)])
tk0.close()
return val_log
if __name__ == '__main__':
# Receive hyperparameters passed via create-training-job API
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, default=32)
parser.add_argument('--epochs', type=int, default=1)
parser.add_argument('--learning-rate', type=float, default=5e-6)
parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])
parser.add_argument('--backend', type=str, default=None,
help='backend for distributed training (tcp, gloo on cpu and gloo, nccl on gpu)')
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
parser.add_argument('--val', type=str, default=os.environ.get('SM_CHANNEL_VAL'))
parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])
parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))
args = parser.parse_args()
# Set hyperparameters after parsing the arguments
batch_size = args.batch_size
lr = args.learning_rate
num_epochs = args.epochs
current_host = args.current_host
hosts = args.hosts
model_dir = args.model_dir
training_dir = args.train
val_dir = args.val
#is_distributed = len(args.hosts) > 1 and args.backend is not None
is_distributed = len(args.hosts) > 1 and args.backend is not None
if is_distributed:
# Initialize the distributed environment.
world_size = len(args.hosts)
os.environ['WORLD_SIZE'] = str(world_size)
host_rank = args.hosts.index(args.current_host)
os.environ['RANK'] = str(host_rank)
dist.init_process_group(backend=args.backend, rank=host_rank, world_size=world_size)
logger.info('Initialized the distributed environment: \'{}\' backend on {} nodes. '.format(
args.backend, dist.get_world_size()) + 'Current host rank is {}. Number of gpus: {}'.format(
dist.get_rank(), args.num_gpus))
# fix seed
seed_torch()
# Data loading
train_df = pd.read_csv(os.path.join(training_dir, 'train.tsv'), sep ='\t')
valid_df = pd.read_csv(os.path.join(val_dir, 'valid.tsv'), sep ='\t')
# convert BERT dataset
tr_sequences = convert_lines(train_df["review_body"].fillna("DUMMY_VALUE"),
MAX_SEQUENCE_LENGTH, tokenizer)
train_dataset = torch.utils.data.TensorDataset(torch.tensor(tr_sequences, dtype=torch.long),
torch.tensor(train_df['star_rating'].values, dtype=torch.float))
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True)
val_sequences = convert_lines(valid_df["review_body"].fillna("DUMMY_VALUE"),
MAX_SEQUENCE_LENGTH,
tokenizer)
valid_dataset = torch.utils.data.TensorDataset(torch.tensor(val_sequences, dtype=torch.long),
torch.tensor(valid_df['star_rating'].values, dtype=torch.float))
valid_loader = torch.utils.data.DataLoader(valid_dataset,
batch_size=batch_size,
shuffle=False)
# Load pre-trained bert model
model = BertForSequenceClassification.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking',
num_labels=1)
model.zero_grad()
model = model.to(DEVICE)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=lr, eps=1e-8)
if is_distributed and DEVICE != 'cpu':
# multi-machine multi-gpu case
model = nn.parallel.DistributedDataParallel(model)
else:
# single-machine multi-gpu case or single-machine or multi-machine cpu case
model = nn.DataParallel(model)
es = EarlyStopping(patience=5, mode="max")
path = os.path.join(args.model_dir, 'model.pth')
for epoch in range(num_epochs):
log = train(train_loader, model, optimizer, is_distributed)
val_log = evaluate(valid_loader, model)
es(val_log["val_acc"], model, model_path=path)
if es.early_stop:
logger.info("Early stopping")
break
def model_fn(model_dir):
"""
Load the gluon model. Called once when hosting service starts.
:param: model_dir The directory where model files are stored.
:return: a model (in this case a Gluon network)
"""
model = BertForSequenceClassification.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking',
num_labels=1)
model = torch.nn.DataParallel(model)
with open(os.path.join(model_dir, 'model.pth'), 'rb') as f:
model.load_state_dict(torch.load(f))
return {"net": model, "tokenizer": tokenizer | size = float(dist.get_world_size())
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM)
param.grad.data /= size | identifier_body |
train_and_deploy.py | -whole-word-masking')
def seed_torch(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.device_count() > 1:
torch.cuda.manual_seed_all(seed)
else:
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
# Converting the lines to BERT format
# Thanks to https://www.kaggle.com/httpwwwfszyc/bert-in-keras-taming
def convert_lines(example, max_seq_length, tokenizer):
max_seq_length -= 2
all_tokens = []
longer = 0
for text in tqdm(example):
tokens_a = tokenizer.tokenize(text)
if len(tokens_a) > max_seq_length:
tokens_a = tokens_a[:max_seq_length]
longer += 1
one_token = tokenizer.convert_tokens_to_ids(["[CLS]"]+tokens_a+["[SEP]"])+[0] * (max_seq_length - len(tokens_a))
all_tokens.append(one_token)
return np.array(all_tokens)
def loss_fn(preds, labels):
preds = preds.view(-1)
labels = labels.view(-1)
assert(preds.shape == labels.shape)
loss = nn.BCEWithLogitsLoss()(preds, labels)
return loss
def _average_gradients(model):
# Gradient averaging.
size = float(dist.get_world_size())
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM)
param.grad.data /= size
def train(train_loader, model, optimizer, is_distributed):
model.train()
avg_loss = 0.
avg_accuracy = 0.
tk0 = tqdm(enumerate(train_loader), total=len(train_loader), leave=False)
optimizer.zero_grad()
for i, (x_batch, y_batch) in tk0:
y_pred = model(x_batch.to(DEVICE),
attention_mask=(x_batch > 0).to(DEVICE),
labels=None)
loss = loss_fn(y_pred[0], y_batch.to(DEVICE))
loss.backward()
if is_distributed and not use_cuda:
# average gradients manually for multi-machine cpu case only
_average_gradients(model)
optimizer.step()
optimizer.zero_grad()
avg_loss += loss.item() / len(train_loader)
avg_accuracy += torch.mean(
((torch.sigmoid(y_pred[0]) >= 0.5) == (y_batch >= 0.5).to(DEVICE)).to(torch.float)).item() / len(train_loader)
tk0.set_postfix(loss=loss.item(), avg_loss=avg_loss)
log = OrderedDict([('avg_loss', avg_loss), ('avg_acc', avg_accuracy)])
tk0.close()
return log
# Run validation
def evaluate(valid_loader, model):
model.eval()
avg_loss = 0.
valid_preds = []
valid_trues = []
with torch.no_grad():
tk0 = tqdm(valid_loader)
for i, (x_batch, y_batch) in enumerate(tk0):
y_pred = model(x_batch.to(DEVICE),
attention_mask=(x_batch > 0).to(DEVICE),
labels=None)
loss = loss_fn(y_pred[0], y_batch.to(DEVICE))
avg_loss += loss.item() / len(valid_loader)
outputs_np = torch.sigmoid(y_pred[0]).cpu().detach().numpy()
targets_np = y_batch.unsqueeze(1).numpy()
valid_preds.append(outputs_np)
valid_trues.append(targets_np)
valid_preds = np.vstack(valid_preds)
valid_trues = np.vstack(valid_trues)
acc = accuracy_score((valid_trues >= 0.5), (valid_preds >= 0.5))
val_log = OrderedDict([('val_loss', avg_loss), ('val_acc', acc)])
tk0.close()
return val_log
if __name__ == '__main__':
# Receive hyperparameters passed via create-training-job API
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, default=32)
parser.add_argument('--epochs', type=int, default=1)
parser.add_argument('--learning-rate', type=float, default=5e-6)
parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])
parser.add_argument('--backend', type=str, default=None,
help='backend for distributed training (tcp, gloo on cpu and gloo, nccl on gpu)')
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
parser.add_argument('--val', type=str, default=os.environ.get('SM_CHANNEL_VAL'))
parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])
parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))
args = parser.parse_args()
# Set hyperparameters after parsing the arguments
batch_size = args.batch_size
lr = args.learning_rate
num_epochs = args.epochs
current_host = args.current_host
hosts = args.hosts
model_dir = args.model_dir
training_dir = args.train
val_dir = args.val
#is_distributed = len(args.hosts) > 1 and args.backend is not None
is_distributed = len(args.hosts) > 1 and args.backend is not None
if is_distributed:
# Initialize the distributed environment.
world_size = len(args.hosts)
os.environ['WORLD_SIZE'] = str(world_size)
host_rank = args.hosts.index(args.current_host)
os.environ['RANK'] = str(host_rank)
dist.init_process_group(backend=args.backend, rank=host_rank, world_size=world_size)
logger.info('Initialized the distributed environment: \'{}\' backend on {} nodes. '.format(
args.backend, dist.get_world_size()) + 'Current host rank is {}. Number of gpus: {}'.format(
dist.get_rank(), args.num_gpus))
# fix seed
seed_torch()
# Data loading
train_df = pd.read_csv(os.path.join(training_dir, 'train.tsv'), sep ='\t')
valid_df = pd.read_csv(os.path.join(val_dir, 'valid.tsv'), sep ='\t')
# convert BERT dataset
tr_sequences = convert_lines(train_df["review_body"].fillna("DUMMY_VALUE"),
MAX_SEQUENCE_LENGTH, tokenizer)
train_dataset = torch.utils.data.TensorDataset(torch.tensor(tr_sequences, dtype=torch.long),
torch.tensor(train_df['star_rating'].values, dtype=torch.float))
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True)
val_sequences = convert_lines(valid_df["review_body"].fillna("DUMMY_VALUE"),
MAX_SEQUENCE_LENGTH,
tokenizer)
valid_dataset = torch.utils.data.TensorDataset(torch.tensor(val_sequences, dtype=torch.long),
torch.tensor(valid_df['star_rating'].values, dtype=torch.float))
valid_loader = torch.utils.data.DataLoader(valid_dataset,
batch_size=batch_size,
shuffle=False) | # Load pre-trained bert model
model = BertForSequenceClassification.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking',
num_labels=1)
model.zero_grad()
model = model.to(DEVICE)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=lr, eps=1e-8)
if is_distributed and DEVICE != 'cpu':
# multi-machine multi-gpu case
model = nn.parallel.DistributedDataParallel(model)
else:
# single-machine multi-gpu case or single-machine or multi-machine cpu case
model = nn.DataParallel(model)
es = EarlyStopping(patience=5, mode="max")
path = os.path.join(args.model_dir, 'model.pth')
for epoch in range(num_epochs):
log = train(train_loader, model, optimizer, is_distributed)
val_log = evaluate(valid_loader, model)
es(val_log["val_acc"], model, model_path=path)
if es.early_stop:
logger.info("Early stopping")
break
def model_fn(model_dir):
"""
Load the gluon model. Called once when hosting service starts.
:param: model_dir The directory where model files are stored.
:return: a model (in this case a Gluon network)
"""
model = BertForSequenceClassification.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking',
num_labels=1)
model = torch.nn.DataParallel(model)
with open(os.path.join(model_dir, 'model.pth'), 'rb') as f:
model.load_state_dict(torch.load(f))
return {"net": model, "tokenizer": tokenizer | random_line_split |
|
train_and_deploy.py | -whole-word-masking')
def seed_torch(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.device_count() > 1:
torch.cuda.manual_seed_all(seed)
else:
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
# Converting the lines to BERT format
# Thanks to https://www.kaggle.com/httpwwwfszyc/bert-in-keras-taming
def convert_lines(example, max_seq_length, tokenizer):
max_seq_length -= 2
all_tokens = []
longer = 0
for text in tqdm(example):
tokens_a = tokenizer.tokenize(text)
if len(tokens_a) > max_seq_length:
tokens_a = tokens_a[:max_seq_length]
longer += 1
one_token = tokenizer.convert_tokens_to_ids(["[CLS]"]+tokens_a+["[SEP]"])+[0] * (max_seq_length - len(tokens_a))
all_tokens.append(one_token)
return np.array(all_tokens)
def loss_fn(preds, labels):
preds = preds.view(-1)
labels = labels.view(-1)
assert(preds.shape == labels.shape)
loss = nn.BCEWithLogitsLoss()(preds, labels)
return loss
def _average_gradients(model):
# Gradient averaging.
size = float(dist.get_world_size())
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM)
param.grad.data /= size
def | (train_loader, model, optimizer, is_distributed):
model.train()
avg_loss = 0.
avg_accuracy = 0.
tk0 = tqdm(enumerate(train_loader), total=len(train_loader), leave=False)
optimizer.zero_grad()
for i, (x_batch, y_batch) in tk0:
y_pred = model(x_batch.to(DEVICE),
attention_mask=(x_batch > 0).to(DEVICE),
labels=None)
loss = loss_fn(y_pred[0], y_batch.to(DEVICE))
loss.backward()
if is_distributed and not use_cuda:
# average gradients manually for multi-machine cpu case only
_average_gradients(model)
optimizer.step()
optimizer.zero_grad()
avg_loss += loss.item() / len(train_loader)
avg_accuracy += torch.mean(
((torch.sigmoid(y_pred[0]) >= 0.5) == (y_batch >= 0.5).to(DEVICE)).to(torch.float)).item() / len(train_loader)
tk0.set_postfix(loss=loss.item(), avg_loss=avg_loss)
log = OrderedDict([('avg_loss', avg_loss), ('avg_acc', avg_accuracy)])
tk0.close()
return log
# Run validation
def evaluate(valid_loader, model):
model.eval()
avg_loss = 0.
valid_preds = []
valid_trues = []
with torch.no_grad():
tk0 = tqdm(valid_loader)
for i, (x_batch, y_batch) in enumerate(tk0):
y_pred = model(x_batch.to(DEVICE),
attention_mask=(x_batch > 0).to(DEVICE),
labels=None)
loss = loss_fn(y_pred[0], y_batch.to(DEVICE))
avg_loss += loss.item() / len(valid_loader)
outputs_np = torch.sigmoid(y_pred[0]).cpu().detach().numpy()
targets_np = y_batch.unsqueeze(1).numpy()
valid_preds.append(outputs_np)
valid_trues.append(targets_np)
valid_preds = np.vstack(valid_preds)
valid_trues = np.vstack(valid_trues)
acc = accuracy_score((valid_trues >= 0.5), (valid_preds >= 0.5))
val_log = OrderedDict([('val_loss', avg_loss), ('val_acc', acc)])
tk0.close()
return val_log
if __name__ == '__main__':
# Receive hyperparameters passed via create-training-job API
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, default=32)
parser.add_argument('--epochs', type=int, default=1)
parser.add_argument('--learning-rate', type=float, default=5e-6)
parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])
parser.add_argument('--backend', type=str, default=None,
help='backend for distributed training (tcp, gloo on cpu and gloo, nccl on gpu)')
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
parser.add_argument('--val', type=str, default=os.environ.get('SM_CHANNEL_VAL'))
parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])
parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))
args = parser.parse_args()
# Set hyperparameters after parsing the arguments
batch_size = args.batch_size
lr = args.learning_rate
num_epochs = args.epochs
current_host = args.current_host
hosts = args.hosts
model_dir = args.model_dir
training_dir = args.train
val_dir = args.val
#is_distributed = len(args.hosts) > 1 and args.backend is not None
is_distributed = len(args.hosts) > 1 and args.backend is not None
if is_distributed:
# Initialize the distributed environment.
world_size = len(args.hosts)
os.environ['WORLD_SIZE'] = str(world_size)
host_rank = args.hosts.index(args.current_host)
os.environ['RANK'] = str(host_rank)
dist.init_process_group(backend=args.backend, rank=host_rank, world_size=world_size)
logger.info('Initialized the distributed environment: \'{}\' backend on {} nodes. '.format(
args.backend, dist.get_world_size()) + 'Current host rank is {}. Number of gpus: {}'.format(
dist.get_rank(), args.num_gpus))
# fix seed
seed_torch()
# Data loading
train_df = pd.read_csv(os.path.join(training_dir, 'train.tsv'), sep ='\t')
valid_df = pd.read_csv(os.path.join(val_dir, 'valid.tsv'), sep ='\t')
# convert BERT dataset
tr_sequences = convert_lines(train_df["review_body"].fillna("DUMMY_VALUE"),
MAX_SEQUENCE_LENGTH, tokenizer)
train_dataset = torch.utils.data.TensorDataset(torch.tensor(tr_sequences, dtype=torch.long),
torch.tensor(train_df['star_rating'].values, dtype=torch.float))
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True)
val_sequences = convert_lines(valid_df["review_body"].fillna("DUMMY_VALUE"),
MAX_SEQUENCE_LENGTH,
tokenizer)
valid_dataset = torch.utils.data.TensorDataset(torch.tensor(val_sequences, dtype=torch.long),
torch.tensor(valid_df['star_rating'].values, dtype=torch.float))
valid_loader = torch.utils.data.DataLoader(valid_dataset,
batch_size=batch_size,
shuffle=False)
# Load pre-trained bert model
model = BertForSequenceClassification.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking',
num_labels=1)
model.zero_grad()
model = model.to(DEVICE)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=lr, eps=1e-8)
if is_distributed and DEVICE != 'cpu':
# multi-machine multi-gpu case
model = nn.parallel.DistributedDataParallel(model)
else:
# single-machine multi-gpu case or single-machine or multi-machine cpu case
model = nn.DataParallel(model)
es = EarlyStopping(patience=5, mode="max")
path = os.path.join(args.model_dir, 'model.pth')
for epoch in range(num_epochs):
log = train(train_loader, model, optimizer, is_distributed)
val_log = evaluate(valid_loader, model)
es(val_log["val_acc"], model, model_path=path)
if es.early_stop:
logger.info("Early stopping")
break
def model_fn(model_dir):
"""
Load the gluon model. Called once when hosting service starts.
:param: model_dir The directory where model files are stored.
:return: a model (in this case a Gluon network)
"""
model = BertForSequenceClassification.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking',
num_labels=1)
model = torch.nn.DataParallel(model)
with open(os.path.join(model_dir, 'model.pth'), 'rb') as f:
model.load_state_dict(torch.load(f))
return {"net": model, "tokenizer": | train | identifier_name |
windows_aligned_file_reader.rs | FileReader {
pub fn new(fname: &str) -> ANNResult<Self> {
let reader: WindowsAlignedFileReader = WindowsAlignedFileReader {
file_name: fname.to_string(),
ctx_map: Lazy::new(|| ShardedLock::new(HashMap::new())),
};
reader.register_thread()?;
Ok(reader)
}
// Register the io context for a thread if it hasn't been registered.
pub fn register_thread(&self) -> ANNResult<()> {
let mut ctx_map = self.ctx_map.write().map_err(|_| {
ANNError::log_lock_poison_error("unable to acquire read lock on ctx_map".to_string())
})?;
let id = thread::current().id();
if ctx_map.contains_key(&id) {
println!(
"Warning:: Duplicate registration for thread_id : {:?}. Directly call get_ctx to get the thread context data.",
id);
return Ok(());
}
let mut ctx = IOContext::new();
match unsafe { FileHandle::new(&self.file_name, AccessMode::Read, ShareMode::Read) } {
Ok(file_handle) => ctx.file_handle = file_handle,
Err(err) => {
return Err(ANNError::log_io_error(err));
}
}
// Create a io completion port for the file handle, later it will be used to get the completion status.
match IOCompletionPort::new(&ctx.file_handle, None, 0, 0) {
Ok(io_completion_port) => ctx.io_completion_port = io_completion_port,
Err(err) => {
return Err(ANNError::log_io_error(err));
}
}
ctx_map.insert(id, Arc::new(ctx));
Ok(())
}
// Get the reference counted io context for the current thread.
pub fn get_ctx(&self) -> ANNResult<Arc<IOContext>> {
let ctx_map = self.ctx_map.read().map_err(|_| {
ANNError::log_lock_poison_error("unable to acquire read lock on ctx_map".to_string())
})?;
let id = thread::current().id();
match ctx_map.get(&id) {
Some(ctx) => Ok(Arc::clone(ctx)),
None => Err(ANNError::log_index_error(format!(
"unable to find IOContext for thread_id {:?}",
id
))),
}
}
// Read the data from the file by sending concurrent io requests in batches.
pub fn read<T>(&self, read_requests: &mut [AlignedRead<T>], ctx: &IOContext) -> ANNResult<()> {
let n_requests = read_requests.len();
let n_batches = (n_requests + MAX_IO_CONCURRENCY - 1) / MAX_IO_CONCURRENCY;
let mut overlapped_in_out =
vec![unsafe { std::mem::zeroed::<OVERLAPPED>() }; MAX_IO_CONCURRENCY];
for batch_idx in 0..n_batches {
let batch_start = MAX_IO_CONCURRENCY * batch_idx;
let batch_size = std::cmp::min(n_requests - batch_start, MAX_IO_CONCURRENCY);
for j in 0..batch_size {
let req = &mut read_requests[batch_start + j];
let os = &mut overlapped_in_out[j];
match unsafe {
read_file_to_slice(&ctx.file_handle, req.aligned_buf, os, req.offset)
} {
Ok(_) => {}
Err(error) => {
return Err(ANNError::IOError { err: (error) });
}
}
}
let mut n_read: DWORD = 0;
let mut n_complete: u64 = 0;
let mut completion_key: ULONG_PTR = 0;
let mut lp_os: *mut OVERLAPPED = ptr::null_mut();
while n_complete < batch_size as u64 {
match unsafe {
get_queued_completion_status(
&ctx.io_completion_port,
&mut n_read,
&mut completion_key,
&mut lp_os,
IO_COMPLETION_TIMEOUT,
)
} {
// An IO request completed.
Ok(true) => n_complete += 1,
// No IO request completed, continue to wait.
Ok(false) => {
thread::sleep(ASYNC_IO_COMPLETION_CHECK_INTERVAL);
}
// An error ocurred.
Err(error) => return Err(ANNError::IOError { err: (error) }),
}
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::{fs::File, io::BufReader};
use bincode::deserialize_from;
use serde::{Deserialize, Serialize};
use crate::{common::AlignedBoxWithSlice, model::SECTOR_LEN};
use super::*;
pub const TEST_INDEX_PATH: &str =
"./tests/data/disk_index_siftsmall_learn_256pts_R4_L50_A1.2_alligned_reader_test.index";
pub const TRUTH_NODE_DATA_PATH: &str =
"./tests/data/disk_index_node_data_aligned_reader_truth.bin";
#[derive(Debug, Serialize, Deserialize)]
struct NodeData {
num_neighbors: u32,
coordinates: Vec<f32>,
neighbors: Vec<u32>,
}
impl PartialEq for NodeData {
fn eq(&self, other: &Self) -> bool {
self.num_neighbors == other.num_neighbors
&& self.coordinates == other.coordinates
&& self.neighbors == other.neighbors
}
}
#[test]
fn test_new_aligned_file_reader() {
// Replace "test_file_path" with actual file path
let result = WindowsAlignedFileReader::new(TEST_INDEX_PATH);
assert!(result.is_ok());
let reader = result.unwrap();
assert_eq!(reader.file_name, TEST_INDEX_PATH);
}
#[test]
fn test_read() {
let reader = WindowsAlignedFileReader::new(TEST_INDEX_PATH).unwrap();
let ctx = reader.get_ctx().unwrap();
let read_length = 512; // adjust according to your logic
let num_read = 10;
let mut aligned_mem = AlignedBoxWithSlice::<u8>::new(read_length * num_read, 512).unwrap();
// create and add AlignedReads to the vector
let mut mem_slices = aligned_mem
.split_into_nonoverlapping_mut_slices(0..aligned_mem.len(), read_length)
.unwrap();
let mut aligned_reads: Vec<AlignedRead<'_, u8>> = mem_slices
.iter_mut()
.enumerate()
.map(|(i, slice)| {
let offset = (i * read_length) as u64;
AlignedRead::new(offset, slice).unwrap()
})
.collect();
let result = reader.read(&mut aligned_reads, &ctx);
assert!(result.is_ok());
}
#[test]
fn test_read_disk_index_by_sector() {
let reader = WindowsAlignedFileReader::new(TEST_INDEX_PATH).unwrap();
let ctx = reader.get_ctx().unwrap();
let read_length = SECTOR_LEN; // adjust according to your logic
let num_sector = 10;
let mut aligned_mem =
AlignedBoxWithSlice::<u8>::new(read_length * num_sector, 512).unwrap();
// Each slice will be used as the buffer for a read request of a sector.
let mut mem_slices = aligned_mem
.split_into_nonoverlapping_mut_slices(0..aligned_mem.len(), read_length)
.unwrap();
let mut aligned_reads: Vec<AlignedRead<'_, u8>> = mem_slices
.iter_mut()
.enumerate()
.map(|(sector_id, slice)| {
let offset = (sector_id * read_length) as u64;
AlignedRead::new(offset, slice).unwrap()
})
.collect();
let result = reader.read(&mut aligned_reads, &ctx);
assert!(result.is_ok());
aligned_reads.iter().for_each(|read| {
assert_eq!(read.aligned_buf.len(), SECTOR_LEN);
});
let disk_layout_meta = reconstruct_disk_meta(aligned_reads[0].aligned_buf);
assert!(disk_layout_meta.len() > 9);
let dims = disk_layout_meta[1];
let num_pts = disk_layout_meta[0];
let max_node_len = disk_layout_meta[3];
let max_num_nodes_per_sector = disk_layout_meta[4];
assert!(max_node_len * max_num_nodes_per_sector < SECTOR_LEN as u64);
let num_nbrs_start = (dims as usize) * std::mem::size_of::<f32>();
let nbrs_buf_start = num_nbrs_start + std::mem::size_of::<u32>();
let mut node_data_array = Vec::with_capacity(max_num_nodes_per_sector as usize * 9);
// Only validate the first 9 sectors with graph nodes.
(1..9).for_each(|sector_id| {
let sector_data = &mem_slices[sector_id];
for node_data in sector_data.chunks_exact(max_node_len as usize) {
// Extract coordinates data from the start of the node_data
let coordinates_end = (dims as usize) * std::mem::size_of::<f32>();
let coordinates = node_data[0..coordinates_end] | .chunks_exact(std::mem::size_of::<f32>())
.map(|chunk| f32::from_le_bytes(chunk.try_into().unwrap()))
.collect(); | random_line_split |
|
windows_aligned_file_reader.rs | Err(ANNError::log_index_error(format!(
"unable to find IOContext for thread_id {:?}",
id
))),
}
}
// Read the data from the file by sending concurrent io requests in batches.
pub fn read<T>(&self, read_requests: &mut [AlignedRead<T>], ctx: &IOContext) -> ANNResult<()> {
let n_requests = read_requests.len();
let n_batches = (n_requests + MAX_IO_CONCURRENCY - 1) / MAX_IO_CONCURRENCY;
let mut overlapped_in_out =
vec![unsafe { std::mem::zeroed::<OVERLAPPED>() }; MAX_IO_CONCURRENCY];
for batch_idx in 0..n_batches {
let batch_start = MAX_IO_CONCURRENCY * batch_idx;
let batch_size = std::cmp::min(n_requests - batch_start, MAX_IO_CONCURRENCY);
for j in 0..batch_size {
let req = &mut read_requests[batch_start + j];
let os = &mut overlapped_in_out[j];
match unsafe {
read_file_to_slice(&ctx.file_handle, req.aligned_buf, os, req.offset)
} {
Ok(_) => {}
Err(error) => {
return Err(ANNError::IOError { err: (error) });
}
}
}
let mut n_read: DWORD = 0;
let mut n_complete: u64 = 0;
let mut completion_key: ULONG_PTR = 0;
let mut lp_os: *mut OVERLAPPED = ptr::null_mut();
while n_complete < batch_size as u64 {
match unsafe {
get_queued_completion_status(
&ctx.io_completion_port,
&mut n_read,
&mut completion_key,
&mut lp_os,
IO_COMPLETION_TIMEOUT,
)
} {
// An IO request completed.
Ok(true) => n_complete += 1,
// No IO request completed, continue to wait.
Ok(false) => {
thread::sleep(ASYNC_IO_COMPLETION_CHECK_INTERVAL);
}
// An error ocurred.
Err(error) => return Err(ANNError::IOError { err: (error) }),
}
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::{fs::File, io::BufReader};
use bincode::deserialize_from;
use serde::{Deserialize, Serialize};
use crate::{common::AlignedBoxWithSlice, model::SECTOR_LEN};
use super::*;
pub const TEST_INDEX_PATH: &str =
"./tests/data/disk_index_siftsmall_learn_256pts_R4_L50_A1.2_alligned_reader_test.index";
pub const TRUTH_NODE_DATA_PATH: &str =
"./tests/data/disk_index_node_data_aligned_reader_truth.bin";
#[derive(Debug, Serialize, Deserialize)]
struct NodeData {
num_neighbors: u32,
coordinates: Vec<f32>,
neighbors: Vec<u32>,
}
impl PartialEq for NodeData {
fn eq(&self, other: &Self) -> bool {
self.num_neighbors == other.num_neighbors
&& self.coordinates == other.coordinates
&& self.neighbors == other.neighbors
}
}
#[test]
fn test_new_aligned_file_reader() {
// Replace "test_file_path" with actual file path
let result = WindowsAlignedFileReader::new(TEST_INDEX_PATH);
assert!(result.is_ok());
let reader = result.unwrap();
assert_eq!(reader.file_name, TEST_INDEX_PATH);
}
#[test]
fn test_read() {
let reader = WindowsAlignedFileReader::new(TEST_INDEX_PATH).unwrap();
let ctx = reader.get_ctx().unwrap();
let read_length = 512; // adjust according to your logic
let num_read = 10;
let mut aligned_mem = AlignedBoxWithSlice::<u8>::new(read_length * num_read, 512).unwrap();
// create and add AlignedReads to the vector
let mut mem_slices = aligned_mem
.split_into_nonoverlapping_mut_slices(0..aligned_mem.len(), read_length)
.unwrap();
let mut aligned_reads: Vec<AlignedRead<'_, u8>> = mem_slices
.iter_mut()
.enumerate()
.map(|(i, slice)| {
let offset = (i * read_length) as u64;
AlignedRead::new(offset, slice).unwrap()
})
.collect();
let result = reader.read(&mut aligned_reads, &ctx);
assert!(result.is_ok());
}
#[test]
fn test_read_disk_index_by_sector() {
let reader = WindowsAlignedFileReader::new(TEST_INDEX_PATH).unwrap();
let ctx = reader.get_ctx().unwrap();
let read_length = SECTOR_LEN; // adjust according to your logic
let num_sector = 10;
let mut aligned_mem =
AlignedBoxWithSlice::<u8>::new(read_length * num_sector, 512).unwrap();
// Each slice will be used as the buffer for a read request of a sector.
let mut mem_slices = aligned_mem
.split_into_nonoverlapping_mut_slices(0..aligned_mem.len(), read_length)
.unwrap();
let mut aligned_reads: Vec<AlignedRead<'_, u8>> = mem_slices
.iter_mut()
.enumerate()
.map(|(sector_id, slice)| {
let offset = (sector_id * read_length) as u64;
AlignedRead::new(offset, slice).unwrap()
})
.collect();
let result = reader.read(&mut aligned_reads, &ctx);
assert!(result.is_ok());
aligned_reads.iter().for_each(|read| {
assert_eq!(read.aligned_buf.len(), SECTOR_LEN);
});
let disk_layout_meta = reconstruct_disk_meta(aligned_reads[0].aligned_buf);
assert!(disk_layout_meta.len() > 9);
let dims = disk_layout_meta[1];
let num_pts = disk_layout_meta[0];
let max_node_len = disk_layout_meta[3];
let max_num_nodes_per_sector = disk_layout_meta[4];
assert!(max_node_len * max_num_nodes_per_sector < SECTOR_LEN as u64);
let num_nbrs_start = (dims as usize) * std::mem::size_of::<f32>();
let nbrs_buf_start = num_nbrs_start + std::mem::size_of::<u32>();
let mut node_data_array = Vec::with_capacity(max_num_nodes_per_sector as usize * 9);
// Only validate the first 9 sectors with graph nodes.
(1..9).for_each(|sector_id| {
let sector_data = &mem_slices[sector_id];
for node_data in sector_data.chunks_exact(max_node_len as usize) {
// Extract coordinates data from the start of the node_data
let coordinates_end = (dims as usize) * std::mem::size_of::<f32>();
let coordinates = node_data[0..coordinates_end]
.chunks_exact(std::mem::size_of::<f32>())
.map(|chunk| f32::from_le_bytes(chunk.try_into().unwrap()))
.collect();
// Extract number of neighbors from the node_data
let neighbors_num = u32::from_le_bytes(
node_data[num_nbrs_start..nbrs_buf_start]
.try_into()
.unwrap(),
);
let nbors_buf_end =
nbrs_buf_start + (neighbors_num as usize) * std::mem::size_of::<u32>();
// Extract neighbors from the node data.
let mut neighbors = Vec::new();
for nbors_data in node_data[nbrs_buf_start..nbors_buf_end]
.chunks_exact(std::mem::size_of::<u32>())
{
let nbors_id = u32::from_le_bytes(nbors_data.try_into().unwrap());
assert!(nbors_id < num_pts as u32);
neighbors.push(nbors_id);
}
// Create NodeData struct and push it to the node_data_array
node_data_array.push(NodeData {
num_neighbors: neighbors_num,
coordinates,
neighbors,
});
}
});
// Compare that each node read from the disk index are expected.
let node_data_truth_file = File::open(TRUTH_NODE_DATA_PATH).unwrap();
let reader = BufReader::new(node_data_truth_file);
let node_data_vec: Vec<NodeData> = deserialize_from(reader).unwrap();
for (node_from_node_data_file, node_from_disk_index) in
node_data_vec.iter().zip(node_data_array.iter())
{
// Verify that the NodeData from the file is equal to the NodeData in node_data_array
assert_eq!(node_from_node_data_file, node_from_disk_index);
}
}
#[test]
fn test_read_fail_invalid_file() {
let reader = WindowsAlignedFileReader::new("/invalid_path");
assert!(reader.is_err());
}
#[test]
fn test_read_no_requests() {
let reader = WindowsAlignedFileReader::new(TEST_INDEX_PATH).unwrap();
let ctx = reader.get_ctx().unwrap();
let mut read_requests = Vec::<AlignedRead<u8>>::new();
let result = reader.read(&mut read_requests, &ctx);
assert!(result.is_ok());
}
#[test]
fn | test_get_ctx | identifier_name |
|
windows_aligned_file_reader.rs | (std::mem::size_of_val(aligned_buf))?;
Ok(Self {
offset,
aligned_buf,
})
}
fn assert_is_aligned(val: usize) -> ANNResult<()> {
match val % DISK_IO_ALIGNMENT {
0 => Ok(()),
_ => Err(ANNError::log_disk_io_request_alignment_error(format!(
"The offset or length of AlignedRead request is not {} bytes aligned",
DISK_IO_ALIGNMENT
))),
}
}
pub fn aligned_buf(&self) -> &[T] |
}
pub struct WindowsAlignedFileReader {
file_name: String,
// ctx_map is the mapping from thread id to io context. It is hashmap behind a sharded lock to allow concurrent access from multiple threads.
// ShardedLock: shardedlock provides an implementation of a reader-writer lock that offers concurrent read access to the shared data while allowing exclusive write access.
// It achieves better scalability by dividing the shared data into multiple shards, and each with its own internal lock.
// Multiple threads can read from different shards simultaneously, reducing contention.
// https://docs.rs/crossbeam/0.8.2/crossbeam/sync/struct.ShardedLock.html
// Comparing to RwLock, ShardedLock provides higher concurrency for read operations and is suitable for read heavy workloads.
// The value of the hashmap is an Arc<IOContext> to allow immutable access to IOContext with automatic reference counting.
ctx_map: Lazy<ShardedLock<HashMap<thread::ThreadId, Arc<IOContext>>>>,
}
impl WindowsAlignedFileReader {
pub fn new(fname: &str) -> ANNResult<Self> {
let reader: WindowsAlignedFileReader = WindowsAlignedFileReader {
file_name: fname.to_string(),
ctx_map: Lazy::new(|| ShardedLock::new(HashMap::new())),
};
reader.register_thread()?;
Ok(reader)
}
// Register the io context for a thread if it hasn't been registered.
pub fn register_thread(&self) -> ANNResult<()> {
let mut ctx_map = self.ctx_map.write().map_err(|_| {
ANNError::log_lock_poison_error("unable to acquire read lock on ctx_map".to_string())
})?;
let id = thread::current().id();
if ctx_map.contains_key(&id) {
println!(
"Warning:: Duplicate registration for thread_id : {:?}. Directly call get_ctx to get the thread context data.",
id);
return Ok(());
}
let mut ctx = IOContext::new();
match unsafe { FileHandle::new(&self.file_name, AccessMode::Read, ShareMode::Read) } {
Ok(file_handle) => ctx.file_handle = file_handle,
Err(err) => {
return Err(ANNError::log_io_error(err));
}
}
// Create a io completion port for the file handle, later it will be used to get the completion status.
match IOCompletionPort::new(&ctx.file_handle, None, 0, 0) {
Ok(io_completion_port) => ctx.io_completion_port = io_completion_port,
Err(err) => {
return Err(ANNError::log_io_error(err));
}
}
ctx_map.insert(id, Arc::new(ctx));
Ok(())
}
// Get the reference counted io context for the current thread.
pub fn get_ctx(&self) -> ANNResult<Arc<IOContext>> {
let ctx_map = self.ctx_map.read().map_err(|_| {
ANNError::log_lock_poison_error("unable to acquire read lock on ctx_map".to_string())
})?;
let id = thread::current().id();
match ctx_map.get(&id) {
Some(ctx) => Ok(Arc::clone(ctx)),
None => Err(ANNError::log_index_error(format!(
"unable to find IOContext for thread_id {:?}",
id
))),
}
}
// Read the data from the file by sending concurrent io requests in batches.
pub fn read<T>(&self, read_requests: &mut [AlignedRead<T>], ctx: &IOContext) -> ANNResult<()> {
let n_requests = read_requests.len();
let n_batches = (n_requests + MAX_IO_CONCURRENCY - 1) / MAX_IO_CONCURRENCY;
let mut overlapped_in_out =
vec![unsafe { std::mem::zeroed::<OVERLAPPED>() }; MAX_IO_CONCURRENCY];
for batch_idx in 0..n_batches {
let batch_start = MAX_IO_CONCURRENCY * batch_idx;
let batch_size = std::cmp::min(n_requests - batch_start, MAX_IO_CONCURRENCY);
for j in 0..batch_size {
let req = &mut read_requests[batch_start + j];
let os = &mut overlapped_in_out[j];
match unsafe {
read_file_to_slice(&ctx.file_handle, req.aligned_buf, os, req.offset)
} {
Ok(_) => {}
Err(error) => {
return Err(ANNError::IOError { err: (error) });
}
}
}
let mut n_read: DWORD = 0;
let mut n_complete: u64 = 0;
let mut completion_key: ULONG_PTR = 0;
let mut lp_os: *mut OVERLAPPED = ptr::null_mut();
while n_complete < batch_size as u64 {
match unsafe {
get_queued_completion_status(
&ctx.io_completion_port,
&mut n_read,
&mut completion_key,
&mut lp_os,
IO_COMPLETION_TIMEOUT,
)
} {
// An IO request completed.
Ok(true) => n_complete += 1,
// No IO request completed, continue to wait.
Ok(false) => {
thread::sleep(ASYNC_IO_COMPLETION_CHECK_INTERVAL);
}
// An error ocurred.
Err(error) => return Err(ANNError::IOError { err: (error) }),
}
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::{fs::File, io::BufReader};
use bincode::deserialize_from;
use serde::{Deserialize, Serialize};
use crate::{common::AlignedBoxWithSlice, model::SECTOR_LEN};
use super::*;
pub const TEST_INDEX_PATH: &str =
"./tests/data/disk_index_siftsmall_learn_256pts_R4_L50_A1.2_alligned_reader_test.index";
pub const TRUTH_NODE_DATA_PATH: &str =
"./tests/data/disk_index_node_data_aligned_reader_truth.bin";
#[derive(Debug, Serialize, Deserialize)]
struct NodeData {
num_neighbors: u32,
coordinates: Vec<f32>,
neighbors: Vec<u32>,
}
impl PartialEq for NodeData {
fn eq(&self, other: &Self) -> bool {
self.num_neighbors == other.num_neighbors
&& self.coordinates == other.coordinates
&& self.neighbors == other.neighbors
}
}
#[test]
fn test_new_aligned_file_reader() {
// Replace "test_file_path" with actual file path
let result = WindowsAlignedFileReader::new(TEST_INDEX_PATH);
assert!(result.is_ok());
let reader = result.unwrap();
assert_eq!(reader.file_name, TEST_INDEX_PATH);
}
#[test]
fn test_read() {
let reader = WindowsAlignedFileReader::new(TEST_INDEX_PATH).unwrap();
let ctx = reader.get_ctx().unwrap();
let read_length = 512; // adjust according to your logic
let num_read = 10;
let mut aligned_mem = AlignedBoxWithSlice::<u8>::new(read_length * num_read, 512).unwrap();
// create and add AlignedReads to the vector
let mut mem_slices = aligned_mem
.split_into_nonoverlapping_mut_slices(0..aligned_mem.len(), read_length)
.unwrap();
let mut aligned_reads: Vec<AlignedRead<'_, u8>> = mem_slices
.iter_mut()
.enumerate()
.map(|(i, slice)| {
let offset = (i * read_length) as u64;
AlignedRead::new(offset, slice).unwrap()
})
.collect();
let result = reader.read(&mut aligned_reads, &ctx);
assert!(result.is_ok());
}
#[test]
fn test_read_disk_index_by_sector() {
let reader = WindowsAlignedFileReader::new(TEST_INDEX_PATH).unwrap();
let ctx = reader.get_ctx().unwrap();
let read_length = SECTOR_LEN; // adjust according to your logic
let num_sector = 10;
let mut aligned_mem =
AlignedBoxWithSlice::<u8>::new(read_length * num_sector, 512).unwrap();
// Each slice will be used as the buffer for a read request of a sector.
let mut mem_slices = aligned_mem
.split_into_nonoverlapping_mut_slices(0..aligned_mem.len(), read_length)
.unwrap();
let mut aligned_reads: Vec<AlignedRead<'_, u8>> = mem_slices
.iter_mut()
.enumerate()
.map(|(sector_id, slice)| {
let offset = (sector_id * read_length) as u64;
AlignedRead::new(offset, slice).unwrap()
})
.collect();
let result = reader.read(&mut aligned_reads, | {
self.aligned_buf
} | identifier_body |
sm2.go | x2, y2 := curve.ScalarMult(pub.X, pub.Y, k.Bytes())
x1Buf := x1.Bytes()
y1Buf := y1.Bytes()
x2Buf := x2.Bytes()
y2Buf := y2.Bytes()
if n := len(x1Buf); n < 32 {
x1Buf = append(zeroByteSlice()[:32-n], x1Buf...)
}
if n := len(y1Buf); n < 32 {
y1Buf = append(zeroByteSlice()[:32-n], y1Buf...)
}
if n := len(x2Buf); n < 32 {
x2Buf = append(zeroByteSlice()[:32-n], x2Buf...)
}
if n := len(y2Buf); n < 32 {
y2Buf = append(zeroByteSlice()[:32-n], y2Buf...)
}
//c1
c = append(c, x1Buf...) // xๅ้
c = append(c, y1Buf...) // yๅ้
//hash(x || M || y)
tm := []byte{}
tm = append(tm, x2Buf...)
tm = append(tm, plaintest...)
tm = append(tm, y2Buf...)
c3 := sm3.Sum(tm)
c = append(c, c3...)
ct, ok := kdf(x2Buf, y2Buf, length) // ๅฏๆ
if !ok {
continue
}
c = append(c, ct...)
for i := 0; i < length; i++ {
c[96+i] ^= plaintest[i] //c2
}
//C = C1 || C2 || C3
return append([]byte{0x04}, c...), nil
}
}
/*
่ทๅ้ๆบๆฐ k
(x1, y1) = [k]G
S=[h]P //hไธบไฝๅ ๅญ
C1=(x2,y2)= [k]P
t=KDF(x2||y2,klen);//klenไธบM็้ฟๅบฆใKDFๆฏsm2็ๅฏ้ฅๆดพ็ๅฝๆฐ
c2 = M+t
C3 = Hash(x2||M||y2)
C = C1||C2||C3
*/
//ๅฝๅฏSM2็ฎๆณๅฏ้ฅๆดพ็ๅฝๆฐKDF็ๅฎ็ฐ:https://blog.csdn.net/Heidlyn/article/details/53993002
//ไฝ็จๆฏไปไธไธชๅ
ฑไบซ็ๆฏ็นไฝๆดพ็ๅบๅฏ้ฅๆฐๆฎ
func kdf(x, y []byte, length int) ([]byte, bool) {
var c []byte
//ct := intToBytes(1)//ct=0x00000001
ct := 1
h := sm3.New()
x = append(x, y...) //Z
for i, j := 0, (length+31)/32; i < j; i++ { // ct ไป 1 ๅฐ klen/v
// Hash(Z || ct )
h.Reset()
h.Write(x)
h.Write(intToBytes(ct))
hash := h.Sum(nil)
if i+1 == j && length%32 != 0 {
c = append(c, hash[:length%32]...)
} else {
c = append(c, hash...)
}
ct++
}
for i := 0; i < length; i++ {
if c[i] != 0 {
return c, true
}
}
return c, false
}
func intToBytes(x int) []byte {
var buf = make([]byte, 4)
binary.BigEndian.PutUint32(buf, uint32(x))
return buf
}
/*
C1 = C้้ข่ทๅ ๏ผ้ช่ฏC1ๆฏๅฆๆปก่ถณๆคญๅๆฒ็บฟใ//C2้ฟๅบฆ็กฎๅฎ๏ผๅฏไปฅ่ทๅC1ๅ
ๅฎนใ
S=[h]C1๏ผSไธบๆ ็ฉท็น๏ผ้ๅบใ
(x2,y2)=[d]C1
t=KDF(m2||y2,klen)
M' = C2+t
u=Hash(x2||M'||y2), u ?= C3
M`ไธบๆๆ
*/
//SM2 ่งฃๅฏ่ฟ็ฎ
func SM2Decrypt(priv *PrivateKey, ciphertext []byte) ([]byte, error) {
ciphertext = ciphertext[1:]
length := len(ciphertext) - 96
curve := priv.Curve
x := new(big.Int).SetBytes(ciphertext[:32])
y := new(big.Int).SetBytes(ciphertext[32:64])
// (x2,y2) = [dB]C1 C1=(x,y)
x2, y2 := curve.ScalarMult(x, y, priv.D.Bytes())
x2Buf := x2.Bytes()
y2Buf := y2.Bytes()
if n := len(x2Buf); n < 32 {
x2Buf = append(zeroByteSlice()[:32-n], x2Buf...)
}
if n := len(y2Buf); n < 32 {
y2Buf = append(zeroByteSlice()[:32-n], y2Buf...)
}
// t = KDF(x2 || y2 ,klen)
t, ok := kdf(x2Buf, y2Buf, length)
if !ok {
return nil, errors.New("Decrypt: failed to decrypt")
}
for i := 0; i < length; i++ {
t[i] ^= ciphertext[i+96]
}
//U = Hash(x2 || M || y)
tm := []byte{}
tm = append(tm, x2Buf...)
tm = append(tm, t...)
tm = append(tm, y2Buf...)
h := sm3.Sum(tm)
if bytes.Compare(h, ciphertext[64:96]) != 0 {
return t, errors.New("Decrypt: failed to decrypt")
}
return t, nil
}
type zr struct {
io.Reader
}
func (z *zr) Read(dst []byte) (n int, err error) {
for i := range dst {
dst[i] = 0
}
return len(dst), nil
}
var zeroReader = &zr{}
func getLastBit(a *big.Int) uint {
return a.Bit(0)
}
func Compress(a *PublicKey) []byte {
buf := []byte{}
yp := getLastBit(a.Y)
buf = append(buf, a.X.Bytes()...)
if n := len(a.X.Bytes()); n < 32 {
buf = append(zeroByteSlice()[:(32-n)], buf...)
}
buf = append([]byte{byte(yp)}, buf...)
return buf
}
func Decompress(a []byte) *PublicKey {
var aa, xx, xx3 sm2P256FieldElement
SM2P256()
x := new(big.Int).SetBytes(a[1:])
curve := sm2p256Params
sm2util :=sm2P256Util{}
sm2util.p256FromBig(&xx, x)
sm2util.p256Square(&xx3, &xx) // x3 = x ^ 2
sm2util.p256Mul(&xx3, &xx3, &xx) // x3 = x ^ 2 * x
sm2util.p256Mul(&aa, &curve.a, &xx) // a = a * x
sm2util.p256Add(&xx3, &xx3, &aa)
sm2util.p256Add(&xx3, &xx3, &curve.b)
y2 := sm2util.p256ToBig(&xx3)
y := new(big.Int).ModSqrt(y2, sm2p256Params.P)
if getLastBit(y) != uint(a[0]) {
y.Sub(sm2p256Params.P, y)
}
return &PublicKey{
Curve: SM2P256(),
X: x,
Y: y,
}
}
// ------------------------------------ //
const (
BitSize = 256
KeyBytes = (BitSize + 7) / 8
UnCompress = 0x04
)
func (pub *PublicKey) GetUnCompressBytes() []byte {
xBytes := pub.X.Bytes()
yBytes := pub.Y.Bytes()
xl := len(xBytes)
yl := len(yBytes)
raw := make([]byte, 1+KeyBytes*2)
raw[0] = UnCompress
if xl > KeyBytes {
copy(raw[1:1+KeyBytes], xBytes[xl-KeyBytes:])
} else if xl < KeyBytes {
copy(raw[1+(KeyBytes-xl):1+KeyBytes], xBytes)
} else {
copy(raw[1:1+KeyBytes], xBytes)
}
if yl > KeyBytes {
copy(raw[1+KeyBytes:], yBytes[yl-KeyBytes:])
} else if yl < KeyBytes {
copy(raw[1+KeyBytes+(KeyBytes-yl):], yBytes)
} else {
copy(raw[1+KeyBytes:], yBytes)
}
return raw
}
func (pub *PublicKey) GetRawBytes() []byte {
raw := pub.GetUnCompressBytes()
return raw[1:]
} | identifier_name |
||
sm2.go | ).ModInverse(d1, N)
s.Mul(s, d1Inv)
s.Mod(s, N)
if s.Sign() != 0 {
break
}
}
return
}
func hashMsg(za, msg []byte) (*big.Int, error) {
e := sm3.New()
e.Write(za)
e.Write(msg)
return new(big.Int).SetBytes(e.Sum(nil)[:32]), nil
}
// Verify verifies the signature in r, s of hash using the public key, pub. Its
// return value records whether the signature is valid.
func SM2Verify(pub *PublicKey, msg, IDA []byte, r, s *big.Int) bool {
c := pub.Curve
N := c.Params().N
one := new(big.Int).SetInt64(1)
if r.Cmp(one) < 0 || s.Cmp(one) < 0 {
return false
}
if r.Cmp(N) >= 0 || s.Cmp(N) >= 0 {
return false
}
//M=ZA || Msg
ZA,err := ZA(pub,IDA)
if err != nil {
return false
}
// e =H(M)
e,err := hashMsg(ZA,msg)
if err != nil {
return false
}
// t= (r+s) mod n
t := new(big.Int).Add(r, s)
t.Mod(t, N)
if t.Sign() == 0 {
return false
}
// ่ฎก็ฎๆคญๅๆฒ็บฟ็นC1๏ผ[k]G๏ผ(x1,y1)ใๅ
ถไธญGไปฃ่กจๆคญๅๆฒ็บฟ็ไธไธชๅบ็น๏ผๅ
ถ้ถไธบ็ด ๆฐ๏ผ
// kไธบๆดๆฐ๏ผ[k]G่กจ็คบkๅ็น๏ผ(x1,y1)่กจ็คบๆ่ฎก็ฎๅบ็ๆคญๅๆฒ็บฟ็นC1็ๅๆ
//(x,y) = [s]G+[t]P
var x *big.Int
x1, y1 := c.ScalarBaseMult(s.Bytes()) //[s]G =p
x2, y2 := c.ScalarMult(pub.X, pub.Y, t.Bytes())//[t]P=t*(px,py)
x, _ = c.Add(x1, y1, x2, y2)
//R=(e+x) modn
x.Add(x, e)
x.Mod(x, N)
//R ?= r
return x.Cmp(r) == 0
}
//ใ่ฎพ็ง้ฅใๅ
ฌ้ฅๅๅซไธบkใK๏ผๅณK = kG๏ผๅ
ถไธญGไธบG็นใ
// ๅ
ฌ้ฅๅ ๅฏ๏ผ
//ใ้ๆฉ้ๆบๆฐr๏ผๅฐๆถๆฏM็ๆๅฏๆC๏ผ่ฏฅๅฏๆๆฏไธไธช็นๅฏน๏ผๅณ๏ผ
//ใC = {rG, M+rK}๏ผๅ
ถไธญKไธบๅ
ฌ้ฅ
func SM2Encrypt(pub *PublicKey, plaintest []byte) ([]byte, error){
length := len(plaintest)
for {
c := []byte{}
//curve := pub.sm2p256Curve
curve := pub.Curve
//่ทๅพ้ๆบๆฐk
k, err := randFieldElement(curve, rand.Reader)
if err != nil {
return nil, err
}
//(x,y) = [k]P
x1, y1 := curve.ScalarBaseMult(k.Bytes())
x2, y2 := curve.ScalarMult(pub.X, pub.Y, k.Bytes())
x1Buf := x1.Bytes()
y1Buf := y1.Bytes()
x2Buf := x2.Bytes()
y2Buf := y2.Bytes()
if n := len(x1Buf); n < 32 {
x1Buf = append(zeroByteSlice()[:32-n], x1Buf...)
}
if n := len(y1Buf); n < 32 {
y1Buf = append(zeroByteSlice()[:32-n], y1Buf...)
}
if n := len(x2Buf); n < 32 {
x2Buf = append(zeroByteSlice()[:32-n], x2Buf...)
}
if n := len(y2Buf); n < 32 {
y2Buf = append(zeroByteSlice()[:32-n], y2Buf...)
}
//c1
c = append(c, x1Buf...) // xๅ้
c = append(c, y1Buf...) // yๅ้
//hash(x || M || y)
tm := []byte{}
tm = append(tm, x2Buf...)
tm = append(tm, plaintest...)
tm = append(tm, y2Buf...)
c3 := sm3.Sum(tm)
c = append(c, c3...)
ct, ok := kdf(x2Buf, y2Buf, length) // ๅฏๆ
if !ok {
continue
}
c = append(c, ct...)
for i := 0; i < length; i++ {
c[96+i] ^= plaintest[i] //c2
}
//C = C1 || C2 || C3
return append([]byte{0x04}, c...), nil
}
}
/*
่ทๅ้ๆบๆฐ k
(x1, y1) = [k]G
S=[h]P //hไธบไฝๅ ๅญ
C1=(x2,y2)= [k]P
t=KDF(x2||y2,klen);//klenไธบM็้ฟๅบฆใKDFๆฏsm2็ๅฏ้ฅๆดพ็ๅฝๆฐ
c2 = M+t
C3 = Hash(x2||M||y2)
C = C1||C2||C3
*/
//ๅฝๅฏSM2็ฎๆณๅฏ้ฅๆดพ็ๅฝๆฐKDF็ๅฎ็ฐ:https://blog.csdn.net/Heidlyn/article/details/53993002
//ไฝ็จๆฏไปไธไธชๅ
ฑไบซ็ๆฏ็นไฝๆดพ็ๅบๅฏ้ฅๆฐๆฎ
func kdf(x, y []byte, length int) ([]byte, bool) {
var c []byte
//ct := intToBytes(1)//ct=0x00000001
ct := 1
h := sm3.New()
x = append(x, y...) //Z
for i, j := 0, (length+31)/32; i < j; i++ { // ct ไป 1 ๅฐ klen/v
// Hash(Z || ct )
h.Reset()
h.Write(x)
h.Write(intToBytes(ct))
hash := h.Sum(nil)
if i+1 == j && length%32 != 0 {
c = append(c, hash[:length%32]...)
} else {
c = append(c, hash...)
}
ct++
}
for i := 0; i < length; i++ {
if c[i] != 0 {
return c, true
}
}
return c, false
}
func intToBytes(x int) []byte {
var buf = make([]byte, 4)
binary.BigEndian.PutUint32(buf, uint32(x))
return buf
}
/*
C1 = C้้ข่ทๅ ๏ผ้ช่ฏC1ๆฏๅฆๆปก่ถณๆคญๅๆฒ็บฟใ//C2้ฟๅบฆ็กฎๅฎ๏ผๅฏไปฅ่ทๅC1ๅ
ๅฎนใ
S=[h]C1๏ผSไธบๆ ็ฉท็น๏ผ้ๅบใ
(x2,y2)=[d]C1
t=KDF(m2||y2,klen)
M' = C2+t
u=Hash(x2||M'||y2), u ?= C3
M`ไธบๆๆ
*/
//SM2 ่งฃๅฏ่ฟ็ฎ
func SM2Decrypt(priv *PrivateKey, ciphertext []byte) ([]byte, error) {
ciphertext = ciphertext[1:]
length := len(ciphertext) - 96
curve := priv.Curve
x := new(big.Int).SetBytes(ciphertext[:32])
y := new(big.Int).SetBytes(ciphertext[32:64])
// (x2,y2) = [dB]C1 C1=(x,y)
x2, y2 := curve.ScalarMult(x, y, priv.D.Bytes())
x2Buf := x2.Bytes()
y2Buf := y2.Bytes()
if n := len(x2Buf); n < 32 {
x2Buf = append(zeroByteSlice()[:32-n], x2Buf...)
}
if n := len(y2Buf); n < 32 {
y2Buf = append(zeroByteSlice()[:32-n], y2Buf...)
}
// t = KDF(x2 || y2 ,klen)
t, ok := kdf(x2Buf, y2Buf, length)
if !ok {
return nil, errors.New("Decrypt: failed to decrypt")
}
for i := 0; i < length; i++ {
t[i] ^= ciphertext[i+96]
}
//U = Hash(x2 || M || y)
tm := []byte{}
tm = append(tm, x2Buf...)
tm = append(tm, t...)
tm = append(tm, y2Buf...)
h := sm3.Sum(tm)
if bytes.Compare(h, ciphertext[64:96]) != 0 {
return t, errors.New("Decrypt: failed to decrypt")
}
r | eturn t, nil
}
type zr struct {
io.Reader
}
func (z | conditional_block |
|
sm2.go | 1 := new(big.Int).Add(priv.D, ONE)
d1Inv := new(big.Int).ModInverse(d1, N)
s.Mul(s, d1Inv)
s.Mod(s, N)
if s.Sign() != 0 {
break
}
}
return
}
func hashMsg(za, msg []byte) (*big.Int, error) {
e := sm3.New()
e.Write(za)
e.Write(msg)
return new(big.Int).SetBytes(e.Sum(nil)[:32]), nil
}
// Verify verifies the signature in r, s of hash using the public key, pub. Its
// return value records whether the signature is valid.
func SM2Verify(pub *PublicKey, msg, IDA []byte, r, s *big.Int) bool {
c := pub.Curve
N := c.Params().N
one := new(big.Int).SetInt64(1)
if r.Cmp(one) < 0 || s.Cmp(one) < 0 {
return false
}
if r.Cmp(N) >= 0 || s.Cmp(N) >= 0 {
return false
}
//M=ZA || Msg
ZA,err := ZA(pub,IDA)
if err != nil {
return false
}
// e =H(M)
e,err := hashMsg(ZA,msg)
if err != nil {
return false
}
// t= (r+s) mod n
t := new(big.Int).Add(r, s)
t.Mod(t, N)
if t.Sign() == 0 {
return false
}
// ่ฎก็ฎๆคญๅๆฒ็บฟ็นC1๏ผ[k]G๏ผ(x1,y1)ใๅ
ถไธญGไปฃ่กจๆคญๅๆฒ็บฟ็ไธไธชๅบ็น๏ผๅ
ถ้ถไธบ็ด ๆฐ๏ผ
// kไธบๆดๆฐ๏ผ[k]G่กจ็คบkๅ็น๏ผ(x1,y1)่กจ็คบๆ่ฎก็ฎๅบ็ๆคญๅๆฒ็บฟ็นC1็ๅๆ
//(x,y) = [s]G+[t]P
var x *big.Int
x1, y1 := c.ScalarBaseMult(s.Bytes()) //[s]G =p
x2, y2 := c.ScalarMult(pub.X, pub.Y, t.Bytes())//[t]P=t*(px,py)
x, _ = c.Add(x1, y1, x2, y2)
//R=(e+x) modn
x.Add(x, e)
x.Mod(x, N)
//R ?= r
return x.Cmp(r) == 0
}
//ใ่ฎพ็ง้ฅใๅ
ฌ้ฅๅๅซไธบkใK๏ผๅณK = kG๏ผๅ
ถไธญGไธบG็นใ
// ๅ
ฌ้ฅๅ ๅฏ๏ผ
//ใ้ๆฉ้ๆบๆฐr๏ผๅฐๆถๆฏM็ๆๅฏๆC๏ผ่ฏฅๅฏๆๆฏไธไธช็นๅฏน๏ผๅณ๏ผ
//ใC = {rG, M+rK}๏ผๅ
ถไธญKไธบๅ
ฌ้ฅ
func SM2Encrypt(pub *PublicKey, plaintest []byte) ([]byte, error){
length := len(plaintest)
for {
c := []byte{}
//curve := pub.sm2p256Curve
curve := pub.Curve
//่ทๅพ้ๆบๆฐk
k, err := randFieldElement(curve, rand.Reader)
if err != nil {
return nil, err
}
//(x,y) = [k]P
x1, y1 := curve.ScalarBaseMult(k.Bytes())
x2, y2 := curve.ScalarMult(pub.X, pub.Y, k.Bytes())
x1Buf := x1.Bytes()
y1Buf := y1.Bytes()
x2Buf := x2.Bytes()
y2Buf := y2.Bytes()
if n := len(x1Buf); n < 32 {
x1Buf = append(zeroByteSlice()[:32-n], x1Buf...)
}
if n := len(y1Buf); n < 32 {
y1Buf = append(zeroByteSlice()[:32-n], y1Buf...)
}
if n := len(x2Buf); n < 32 {
x2Buf = append(zeroByteSlice()[:32-n], x2Buf...)
}
if n := len(y2Buf); n < 32 {
y2Buf = append(zeroByteSlice()[:32-n], y2Buf...)
}
//c1
c = append(c, x1Buf...) // xๅ้
c = append(c, y1Buf...) // yๅ้
//hash(x || M || y)
tm := []byte{}
tm = append(tm, x2Buf...)
tm = append(tm, plaintest...)
tm = append(tm, y2Buf...)
c3 := sm3.Sum(tm)
c = append(c, c3...)
ct, ok := kdf(x2Buf, y2Buf, length) // ๅฏๆ
if !ok {
continue
}
c = append(c, ct...)
for i := 0; i < length; i++ {
c[96+i] ^= plaintest[i] //c2
}
//C = C1 || C2 || C3
return append([]byte{0x04}, c...), nil
}
}
/*
่ทๅ้ๆบๆฐ k
(x1, y1) = [k]G
S=[h]P //hไธบไฝๅ ๅญ
C1=(x2,y2)= [k]P
t=KDF(x2||y2,klen);//klenไธบM็้ฟๅบฆใKDFๆฏsm2็ๅฏ้ฅๆดพ็ๅฝๆฐ
c2 = M+t
C3 = Hash(x2||M||y2)
C = C1||C2||C3
*/
//ๅฝๅฏSM2็ฎๆณๅฏ้ฅๆดพ็ๅฝๆฐKDF็ๅฎ็ฐ:https://blog.csdn.net/Heidlyn/article/details/53993002
//ไฝ็จๆฏไปไธไธชๅ
ฑไบซ็ๆฏ็นไฝๆดพ็ๅบๅฏ้ฅๆฐๆฎ
func kdf(x, y []byte, length int) ([]byte, bool) {
var c []byte
//ct := intToBytes(1)//ct=0x00000001
ct := 1
h := sm3.New()
x = append(x, y...) //Z
for i, j := 0, (length+31)/32; i < j; i++ { // ct ไป 1 ๅฐ klen/v
// Hash(Z || ct )
h.Reset()
h.Write(x)
h.Write(intToBytes(ct))
hash := h.Sum(nil)
if i+1 == j && length%32 != 0 {
c = append(c, hash[:length%32]...)
} else {
c = append(c, hash...)
}
ct++
}
for i := 0; i < length; i++ {
if c[i] != 0 {
return c, true
}
}
return c, false
}
func intToBytes(x int) []byte {
var buf = make([]byte, 4)
binary.BigEndian.PutUint32(buf, uint32(x))
return buf
}
/*
C1 = C้้ข่ทๅ ๏ผ้ช่ฏC1ๆฏๅฆๆปก่ถณๆคญๅๆฒ็บฟใ//C2้ฟๅบฆ็กฎๅฎ๏ผๅฏไปฅ่ทๅC1ๅ
ๅฎนใ
S=[h]C1๏ผSไธบๆ ็ฉท็น๏ผ้ๅบใ
(x2,y2)=[d]C1
t=KDF(m2||y2,klen)
M' = C2+t
u=Hash(x2||M'||y2), u ?= C3
M`ไธบๆๆ
*/
//SM2 ่งฃๅฏ่ฟ็ฎ
func SM2Decrypt(priv *PrivateKey, ciphertext []byte) ([]byte, error) {
ciphertext = ciphertext[1:]
length := len(ciphertext) - 96
curve := priv.Curve
x := new(big.Int).SetBytes(ciphertext[:32])
y := new(big.Int).SetBytes(ciphertext[32:64])
// (x2,y2) = [dB]C1 C1=(x,y)
x2, y2 := curve.ScalarMult(x, y, priv.D.Bytes())
x2Buf := x2.Bytes()
y2Buf := y2.Bytes()
if n := len(x2Buf); n < 32 {
x2Buf = append(zeroByteSlice()[:32-n], x2Buf...)
}
if n := len(y2Buf); n < 32 {
y2Buf = append(zeroByteSlice()[:32-n], y2Buf...)
}
// t = KDF(x2 || y2 ,klen)
t, ok := kdf(x2Buf, y2Buf, length)
if !ok {
return nil, errors.New("Decrypt: failed to decrypt")
}
for i := 0; i < length; i++ {
t[i] ^= ciphertext[i+96]
}
//U = Hash(x2 || M || y)
tm := []byte{}
tm = append(tm, x2Buf...)
tm = append(tm, t...) | tm = append(tm, y2Buf...)
h := sm3.Sum(tm)
if bytes.Compare(h, ciphertext[64:96]) != 0 {
return t, errors.New("Decrypt: failed to decrypt")
} | random_line_split |
|
sm2.go | ) {
if len(IDA) <= 0 {
IDA = default_IDA
}
entlenA := len(IDA)
if entlenA >= 8192 {
return []byte{}, errors.New("SM2: uid too large")
}
sm2util :=sm2P256Util{}
ENTLA := uint16(8*entlenA)
ZA := sm3.New()
ZA.Write([]byte{byte((ENTLA >> 8) & 0xFF)})
ZA.Write([]byte{byte(ENTLA & 0xFF)})
ZA.Write(IDA)
ZA.Write(sm2util.p256ToBig(&sm2p256Params.a).Bytes())
//ZA.Write(sm2p256Params.A.Bytes())
ZA.Write(sm2p256Params.B.Bytes())
ZA.Write(sm2p256Params.Gx.Bytes())
ZA.Write(sm2p256Params.Gy.Bytes())
xBuf := pub.X.Bytes()
yBuf := pub.Y.Bytes()
if n := len(xBuf); n < 32 {
xBuf = append(zeroByteSlice()[:32-n], xBuf...)
}
ZA.Write(xBuf)
ZA.Write(yBuf)
return ZA.Sum(nil)[:32], nil
}
// 32byte
func zeroByteSlice() []byte {
return []byte{
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
}
}
// sign format = 30 + len(z) + 02 + len(r) + r + 02 + len(s) + s, z being what follows its size, ie 02+len(r)+r+02+len(s)+s
func (priv *PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error) {
// r, s, err := Sign(priv, msg)
r, s, err := SM2Sign(priv, msg, nil)
fmt.Println("msg:",msg)
if err != | byte, sign []byte) bool {
var sm2Sign sm2Signature
_, err := asn1.Unmarshal(sign, &sm2Sign)
if err != nil {
return false
}
return SM2Verify(pub, msg, nil, sm2Sign.R, sm2Sign.S)
}
// ---------------------------------------------------------------- //
func (pub *PublicKey) Encrypt(data []byte) ([]byte, error) {
return SM2Encrypt(pub, data)
}
func (priv *PrivateKey) Decrypt(data []byte) ([]byte, error) {
return SM2Decrypt(priv, data)
}
// -------------------------------------------------------------- //
// ๅ่็ฝๅ:https://blog.csdn.net/samsho2/article/details/80772228
func SM2Sign(priv *PrivateKey, msg, IDA []byte) (r, s *big.Int, err error) {
za, err := ZA(&priv.PublicKey, IDA)
if err != nil {
return nil, nil, err
}
e, err := hashMsg(za, msg)
if err != nil {
return nil, nil, err
}
//c := priv.PublicKey.sm2p256Curve
c := priv.PublicKey.Curve
N := c.Params().N
if N.Sign() == 0 {
return nil, nil, errNoOneParam
}
var k *big.Int
for { // ่ฐๆด็ฎๆณ็ป่ไปฅๅฎ็ฐSM2
// r = e + x mod n
for {
k, err = randFieldElement(c, rand.Reader)
if err != nil {
r = nil
return
}
r, _ = priv.Curve.ScalarBaseMult(k.Bytes())
r.Add(r, e)
r.Mod(r, N)
if r.Sign() != 0 {
if t := new(big.Int).Add(r, k); t.Cmp(N) != 0 {
break
}
}
}
//s=(1+d)^(-1) * (k - r*d) mod n
rD := new(big.Int).Mul(priv.D, r)
s = new(big.Int).Sub(k, rD)
d1 := new(big.Int).Add(priv.D, ONE)
d1Inv := new(big.Int).ModInverse(d1, N)
s.Mul(s, d1Inv)
s.Mod(s, N)
if s.Sign() != 0 {
break
}
}
return
}
func hashMsg(za, msg []byte) (*big.Int, error) {
e := sm3.New()
e.Write(za)
e.Write(msg)
return new(big.Int).SetBytes(e.Sum(nil)[:32]), nil
}
// Verify verifies the signature in r, s of hash using the public key, pub. Its
// return value records whether the signature is valid.
func SM2Verify(pub *PublicKey, msg, IDA []byte, r, s *big.Int) bool {
c := pub.Curve
N := c.Params().N
one := new(big.Int).SetInt64(1)
if r.Cmp(one) < 0 || s.Cmp(one) < 0 {
return false
}
if r.Cmp(N) >= 0 || s.Cmp(N) >= 0 {
return false
}
//M=ZA || Msg
ZA,err := ZA(pub,IDA)
if err != nil {
return false
}
// e =H(M)
e,err := hashMsg(ZA,msg)
if err != nil {
return false
}
// t= (r+s) mod n
t := new(big.Int).Add(r, s)
t.Mod(t, N)
if t.Sign() == 0 {
return false
}
// ่ฎก็ฎๆคญๅๆฒ็บฟ็นC1๏ผ[k]G๏ผ(x1,y1)ใๅ
ถไธญGไปฃ่กจๆคญๅๆฒ็บฟ็ไธไธชๅบ็น๏ผๅ
ถ้ถไธบ็ด ๆฐ๏ผ
// kไธบๆดๆฐ๏ผ[k]G่กจ็คบkๅ็น๏ผ(x1,y1)่กจ็คบๆ่ฎก็ฎๅบ็ๆคญๅๆฒ็บฟ็นC1็ๅๆ
//(x,y) = [s]G+[t]P
var x *big.Int
x1, y1 := c.ScalarBaseMult(s.Bytes()) //[s]G =p
x2, y2 := c.ScalarMult(pub.X, pub.Y, t.Bytes())//[t]P=t*(px,py)
x, _ = c.Add(x1, y1, x2, y2)
//R=(e+x) modn
x.Add(x, e)
x.Mod(x, N)
//R ?= r
return x.Cmp(r) == 0
}
//ใ่ฎพ็ง้ฅใๅ
ฌ้ฅๅๅซไธบkใK๏ผๅณK = kG๏ผๅ
ถไธญGไธบG็นใ
// ๅ
ฌ้ฅๅ ๅฏ๏ผ
//ใ้ๆฉ้ๆบๆฐr๏ผๅฐๆถๆฏM็ๆๅฏๆC๏ผ่ฏฅๅฏๆๆฏไธไธช็นๅฏน๏ผๅณ๏ผ
//ใC = {rG, M+rK}๏ผๅ
ถไธญKไธบๅ
ฌ้ฅ
func SM2Encrypt(pub *PublicKey, plaintest []byte) ([]byte, error){
length := len(plaintest)
for {
c := []byte{}
//curve := pub.sm2p256Curve
curve := pub.Curve
//่ทๅพ้ๆบๆฐk
k, err := randFieldElement(curve, rand.Reader)
if err != nil {
return nil, err
}
//(x,y) = [k]P
x1, y1 := curve.ScalarBaseMult(k.Bytes())
x2, y2 := curve.ScalarMult(pub.X, pub.Y, k.Bytes())
x1Buf := x1.Bytes()
y1Buf := y1.Bytes()
x2Buf := x2.Bytes()
y2Buf := y2.Bytes()
if n := len(x1Buf); n < 32 {
x1Buf = append(zeroByteSlice()[:32-n], x1Buf...)
}
if n := len(y1Buf); n < 32 {
y1Buf = append(zeroByteSlice()[:32-n], y1Buf...)
}
if n := len(x2Buf); n < 32 {
x2Buf = append(zeroByteSlice()[:32-n], x2Buf...)
}
if n := len(y2Buf); n < 32 {
y2Buf = append(zeroByteSlice()[:32-n], y2Buf...)
}
//c1
c = append(c, x1Buf...) // xๅ้
c = append(c, y1Buf...) // yๅ้
//hash(x || M || y)
tm := []byte{}
tm = append(tm, x2Buf...)
| nil {
return nil, err
}
return asn1.Marshal(sm2Signature{r, s})
}
// ---------------------------------------------------------------- //
func (pub *PublicKey) Verify(msg [] | identifier_body |
varausohjelmajs.js | Box.selectedIndex].innerHTML;
selectedTimeIndex = selectBox.options[selectBox.selectedIndex].index;
selectedTimeIndex2 = selectedTimeIndex;
aika = selectedValue;
elokuvanNimi = document.getElementById("usrInputNimi").value;
updateHTMLBlock();
}
$("td").change(function() {
alert($(this).find("option:selected").text()+' clicked!');
});
$("#myElement").click(function() {
$(this).siblings(":last").hide();
});
$("a").click(function(event){
event.preventDefault();
});
// updates all the clickevents for the td tags of the calendar
var checklist = document.getElementsByTagName("td");
function updateCalendarEvents(){
var checklist = document.getElementsByTagName("td");
for (var i = 0; i < checklist.length; i++) {
checklist[i].addEventListener("click", updateSelectedtd)
}
}
// var tdlista = checklist.querySelectorAll("td");
for (var i = 0; i < checklist.length; i++) {
checklist[i].addEventListener("click", updateSelectedtd)
}
function updateSelectedtd(){
for (var k = 0; k < checklist.length; k++) {
checklist[k].style.backgroundColor = "#492079";
if ($(checklist[k]).hasClass('selected')){
checklist[k].classList.remove("selected");
}
else if ($(checklist[k]).hasClass('selectedForAnnihilation')){
checklist[k].classList.remove("selectedForAnnihilation");
}
}
if($(this).hasClass('noselect')){
return;
} else if($(this).hasClass('has-events')){
$(this).addClass('selectedForAnnihilation')
this.style.backgroundColor = "#e20a0a";
}
else {
this.classList.add("selected");
$(this).addClass('selected')
this.style.backgroundColor = "green";
}
}
function updateHTMLBlock(){
elokuvaBlokki = stringElementti1 + rowspanElementti + stringElementti2 + elokuvanNimi + stringElementti3 + sali + stringElementti4 + aika + stringElementti5;
}
var asda = document.getElementsByName
function tarkistaKoko(){
if (parseInt(rowspanKokoSallija, 10) < (selectedTimeIndex +1)) {
return false;
} else {
return true;
}
}
function tarkistaKokoKonfliktit(){
while(selectedTimeIndex2 > 0) {
if (!$(seuraavanLapsitdt2[rowspanKohta]).hasClass('no-events')){
alert("Asettamasi aika on konfliktissa toisen ajan kanssa");
return false;
}
seuraavanLapsitdt2 = seuraavanLapsitdt2[0].parentElement.nextElementSibling.children;
selectedTimeIndex2--;
}
}
var rowspanKokoSallija = 0;
var seuraavaIsantatd;
var seuraavanLapsitdt;
var tdosoitin;
var listanPituusCounter;
// Kaytetaan uuden elokuvablokin sijoittamiseen kalenteriin adminin toimesta.
// Kutsuu tarkistakonfliktit() ja poistaa tyhjรคt blokit elokuvablokin alta.
function asetaAika(){
changeFunc();
rowspanElementti = selectedTimeIndex +1;
updateHTMLBlock();
if (elokuvanNimi == ""){
alert("Et antanut nimea");
return;
}
for (var u = 0; u < checklist.length; u++) {
if ($(checklist[u]).hasClass('selected')){
var whileLoopControlElement = checklist[u];
seuraavanLapsitdt = whileLoopControlElement.parentElement.nextElementSibling.children;
seuraavanLapsitdt2 = whileLoopControlElement.parentElement.nextElementSibling.children;
rowspanKohta = $(checklist[u]).index();
rowspanKokoSallija = parseInt(checklist[u].parentElement.children[0].className.split(' ')[1]);
if (tarkistaKoko() == false){
alert("Elokuvan aika yli aukiolajan");
return;
}
if (tarkistaKokoKonfliktit() != false){
while(selectedTimeIndex > 0) {
//HAIKKAAA Indeksi arvo rowspankohta on vaarin silla koodi ei ota huomioon
// elokuvablokkien alla olevia puuttuvia gridin elementteja,
// koodi pitaisi uudelleenkirjoittaa niin etta elokuvablokeissa olisi
// mukana placeholderblokkeja.
if ($(seuraavanLapsitdt[rowspanKohta]).hasClass('no-events')){
$(seuraavanLapsitdt[rowspanKohta]).remove();
}
seuraavanLapsitdt = seuraavanLapsitdt[0].parentElement.nextElementSibling.children;
selectedTimeIndex--;
}
} else return;
$(checklist[u]).replaceWith( elokuvaBlokki );
// while(selectedTimeIndex > 0) {
// $(seuraavanLapsitdt).last("td").remove();
/**
* tassa voisi nyt olla jaaneen undefined arvon poisto, unefined on automaattisesti listan lopussa
*/
// listanPituusCounter = seuraavanLapsitdt.length;
// seuraavanLapsitdt = seuraavanLapsitdt[0].parentElement.nextElementSibling.children;
// selectedTimeIndex--;
// }
}
}
updateCalendarEvents();
tablelist = $(".tableTogglet");
sessionStorage.setItem("GlobalTableArray", tablelist);
}
var para = document.createElement("td");
function poistaAika(){
for (var u = 0; u < checklist.length; u++) {
// if ($(checklist[u]).hasClass('selectedForAnnihilation') && $(checklist[u]).hasClass('no-events')) {
// $(checklist[u]).replaceWith( perusStringElementti );
// }
if ($(checklist[u]).hasClass('selectedForAnnihilation')){
var lisaajaCounterArvo = checklist[u].rowSpan;
var whileLoopControlElement = checklist[u];
seuraavanLapsitdt = whileLoopControlElement.parentElement.children;
var seuraavanParenttd = whileLoopControlElement.parentElement.nextElementSibling;
rowspanKohta = $(checklist[u]).index();
while(lisaajaCounterArvo > 1) {
// if ($(seuraavanLapsitdt[rowspanKohta]).hasClass('no-events')){
// seuraavanParenttd.insertBefore(para, lapsitdtNode[rowspanKohta]);
seuraavanParenttd.children[rowspanKohta].insertAdjacentHTML("beforebegin", perusStringElementti);
alert("used");
// }
// var seuraavanParenttd = seuraavanLapsitdt[0].parentElement.nextElementSibling;
// $( seuraavanParenttd.previousElementSibling ).append( perusStringElementti );
// listanPituusCounter = seuraavanLapsitdt.length;
seuraavanParenttd = seuraavanParenttd.children[0].parentElement.nextElementSibling;
lisaajaCounterArvo--;
}
alert(checklist[u].rowSpan);
$(checklist[u]).replaceWith( perusStringElementti );
updateCalendarEvents();
}
tablelist = $(".tableTogglet");
sessionStorage.setItem("GlobalTableArray", tablelist);
}
}
var modiviedtime = document.lastModified;
var ataglist = document.getElementsByClassName("saliTogglet");
//ataglist = fillArray(ataglist, 3);
//ataglist[0] = document.getElementById("original");
var tablelist = $(".tableTogglet");
sessionStorage.setItem("GlobalTableArray", tablelist);
//var tableArray = document.getElementsByClassName("tableTogglet");
//tableArray = fillArray(tableArray, 4);
//tableArray[0] = document.getElementById("theTable");
//var nappilist = document.getElementsByClassName("nappiSetit");
var nappilist = $(".nappiSetit");
//nappilist = fillArray(nappilist, 3);
//nappilist[0] = document.getElementById("nappiSetti");
var teatteriNappilist = document.getElementsByClassName("teatteriTogglet");
function toggleTeatteri(sali){
// for (var i = 0; i < ataglist.length; i++){
// $(ataglist[i]).removeClass("active");
// }
$( nappilist[sali] ).toggle(true);
$(nappilist[sali]).siblings().toggle( false );
}
toggleTeatteri("0")
$(ataglist[0]).addClass("active");
function toggleShowRoom(room){ |
for (var i = 0; i < ataglist.length; i++){
$( ataglist[i] ).removeClass("active");
}
$( tablelist[room] ).toggle(true);
$(tablelist[room]).siblings().toggle( false );
}; | identifier_body |
|
varausohjelmajs.js | // tarkistetaan onko yhtaan kayttajaa kirjattu sisaan tassa sessiossa
if (loginArray == null || loginArray == undefined){
loginError.style.display = 'block';
RegisterErrorText.innerHTML = "ERROR: No readable users registered";
return;
}
var x = document.forms["loginForm"]["loginUsername"].value;
var y = document.forms["loginForm"]["loginPassword"].value;
// voisi tehda kaksitasosen if-lausekkeen, erikseen tyhjalle kentalle ja kentalle jossa arvot ovat vaaria
if (x==null || x == "" || y==null || y=="" || (!loginArray.includes("@ " + x + " " + y) && !loginArray.includes(x + " " + y))){
loginError.style.display = 'block';
RegisterErrorText.innerHTML = "ERROR: Incorrect username or password";
alert(loginArray.includes("@ " + x + " " + y) + " " + loginArray.includes(x + " " + y));
return;
}
alert(loginArray.includes("@ " + x + " " + y) + " " + loginArray.includes(x + " " + y));
// tassa if-lauseessa asetetaan uusin kirjautunut kayttaja instanssin loginiksi, tata kaytetaan jatkossa varauksiin
var currentUserImput = "@ " + x + " " + y
if (loginArray.includes("@ " + x + " " + y) && searchStringInArray("@ " + x + " " + y, loginArray) != -1){
alert("adminuser");
sessionStorage.setItem("currentLoginIndex", searchStringInArray("@ " + x + " " + y, loginArray));
sessionStorage.setItem("currentLoginUser", loginArray[searchStringInArray("@ " + x + " " + y, loginArray)]);
//redirect loginin jalkeen
window.location.href="index.html";
} else if (loginArray.includes(x + " " + y) && searchStringInArray(x + " " + y, loginArray) != -1){
alert("normaluser");
sessionStorage.setItem("currentLoginIndex", searchStringInArray(x + " " + y, loginArray));
sessionStorage.setItem("currentLoginUser", loginArray[searchStringInArray(x + " " + y, loginArray)]);
//redirect loginin jalkeen
window.location.href="elokuvaAsiakassivu.html";
} else alert("Unknown login error has occurred");
}
// loginuserin tamanhetkisen kirjautujan etsiminen rekisteroityjen kayttajien listasta,
// palauttaa indeksin jossa kauttaja on mikali annettu parametri on listassa
function searchStringInArray (searchString, searchStringArray) {
for ( var i = 0 ; i < searchStringArray.length ; i++ ) {
if (searchStringArray[i] == searchString)
return i;
}
return -1;
}
// funktio jolla paivitetaan html-osiot jotka ovat riippuvaisia annetusta kauttajasta
function paivitaKayttajaElementit(kayttajaElementti){
kayttajaElementti.innerHTML = sessionStorage.getItem("currentLoginUser");
alert(sessionStorage.getItem("currentLoginUser"));
}
stringElementti1 = '<td class=" has-events" rowspan="';
rowspanElementti = ""+ listanPituusCounter +"";
stringElementti2 = '"><div class="row-fluid elokuvaElementti1" style="width: 99%; height: 100%;"><span class="title">' ;
stringElementti3 = '</span> <span class="sali"><a>' ;
stringElementti4 = '</a></span><span class="aika">' ;
stringElementti5 = '</span></div></td>' ;
perusStringElementti = '<td class=" no-events" rowspan="1"></td>';
var elokuvaBlokki = stringElementti1 + rowspanElementti + stringElementti2 + elokuvanNimi + stringElementti3 + sali + stringElementti4 + aika + stringElementti5;
var elokuvanNimi = "asda";
var sali = "sali 1*";
var aika = changeFunc() || "00:31";
jQuery(".list-group-item").click(function (e) {
jQuery(this).addClass('active').siblings().removeClass('active');
});
jQuery("#sel1").click(function (ee) {
jQuery(this).addClass('selected').siblings().removeClass('active');
});
// Eventlistenerit hyllytetty silla tarkistuksen voi asettaa submittausmetodin alkuun
var elokuvanNimiElementti = document.getElementById("ElokuvaNimiImput");
var selectBox = document.getElementById("sel2");
var selectedValue = selectBox.options[selectBox.selectedIndex].value;
var selectedTimeIndex = selectBox.options[selectBox.selectedIndex].index;
var selectedTimeIndex2 = selectedTimeIndex;
elokuvanNimiElementti.addEventListener("", changeFunc)
selectBox.addEventListener("", changeFunc)
function changeFunc() {
var selectBox = document.getElementById("sel2");
selectedValue = selectBox.options[selectBox.selectedIndex].innerHTML;
selectedTimeIndex = selectBox.options[selectBox.selectedIndex].index;
selectedTimeIndex2 = selectedTimeIndex;
aika = selectedValue;
elokuvanNimi = document.getElementById("usrInputNimi").value;
updateHTMLBlock();
}
$("td").change(function() {
alert($(this).find("option:selected").text()+' clicked!');
});
$("#myElement").click(function() {
$(this).siblings(":last").hide();
});
$("a").click(function(event){
event.preventDefault();
});
// updates all the clickevents for the td tags of the calendar
var checklist = document.getElementsByTagName("td");
function updateCalendarEvents(){
var checklist = document.getElementsByTagName("td");
for (var i = 0; i < checklist.length; i++) {
checklist[i].addEventListener("click", updateSelectedtd)
}
}
// var tdlista = checklist.querySelectorAll("td");
for (var i = 0; i < checklist.length; i++) {
checklist[i].addEventListener("click", updateSelectedtd)
}
function updateSelectedtd(){
for (var k = 0; k < checklist.length; k++) {
checklist[k].style.backgroundColor = "#492079";
if ($(checklist[k]).hasClass('selected')){
checklist[k].classList.remove("selected");
}
else if ($(checklist[k]).hasClass('selectedForAnnihilation')){
checklist[k].classList.remove("selectedForAnnihilation");
}
}
if($(this).hasClass('noselect')){
return;
} else if($(this).hasClass('has-events')){
$(this).addClass('selectedForAnnihilation')
this.style.backgroundColor = "#e20a0a";
}
else |
}
function updateHTMLBlock(){
elokuvaBlokki = stringElementti1 + rowspanElementti + stringElementti2 + elokuvanNimi + stringElementti3 + sali + stringElementti4 + aika + stringElementti5;
}
var asda = document.getElementsByName
function tarkistaKoko(){
if (parseInt(rowspanKokoSallija, 10) < (selectedTimeIndex +1)) {
return false;
} else {
return true;
}
}
function tarkistaKokoKonfliktit(){
while(selectedTimeIndex2 > 0) {
if (!$(seuraavanLapsitdt2[rowspanKohta]).hasClass('no-events')){
alert("Asettamasi aika on konfliktissa toisen ajan kanssa");
return false;
}
seuraavanLapsitdt2 = seuraavanLapsitdt2[0].parentElement.nextElementSibling.children;
selectedTimeIndex2--;
}
}
var rowspanKokoSallija = 0;
var seuraavaIsantatd;
var seuraavanLapsitdt;
var tdosoitin;
var listanPituusCounter;
// Kaytetaan uuden elokuvablokin sijoittamiseen kalenteriin adminin toimesta.
// Kutsuu tarkistakonfliktit() ja poistaa tyhjรคt blokit elokuvablokin alta.
function asetaAika(){
changeFunc();
rowspanElementti = selectedTimeIndex +1;
updateHTMLBlock();
if (elokuvanNimi == ""){
alert("Et antanut nimea");
return;
}
for (var u = 0; u < checklist.length; u++) {
if ($(checklist[u]).hasClass('selected')){
var whileLoopControlElement = checklist[u];
seuraavanLapsitdt = whileLoopControlElement.parentElement.nextElementSibling.children;
seuraavanLapsitdt2 = whileLoopControlElement.parentElement.nextElementSibling.children;
rowspanKohta = $(check | {
this.classList.add("selected");
$(this).addClass('selected')
this.style.backgroundColor = "green";
} | conditional_block |
varausohjelmajs.js | function updateSelectedtd(){
for (var k = 0; k < checklist.length; k++) {
checklist[k].style.backgroundColor = "#492079";
if ($(checklist[k]).hasClass('selected')){
checklist[k].classList.remove("selected");
}
else if ($(checklist[k]).hasClass('selectedForAnnihilation')){
checklist[k].classList.remove("selectedForAnnihilation");
}
}
if($(this).hasClass('noselect')){
return;
} else if($(this).hasClass('has-events')){
$(this).addClass('selectedForAnnihilation')
this.style.backgroundColor = "#e20a0a";
}
else {
this.classList.add("selected");
$(this).addClass('selected')
this.style.backgroundColor = "green";
}
}
function updateHTMLBlock(){
elokuvaBlokki = stringElementti1 + rowspanElementti + stringElementti2 + elokuvanNimi + stringElementti3 + sali + stringElementti4 + aika + stringElementti5;
}
var asda = document.getElementsByName
function tarkistaKoko(){
if (parseInt(rowspanKokoSallija, 10) < (selectedTimeIndex +1)) {
return false;
} else {
return true;
}
}
function tarkistaKokoKonfliktit(){
while(selectedTimeIndex2 > 0) {
if (!$(seuraavanLapsitdt2[rowspanKohta]).hasClass('no-events')){
alert("Asettamasi aika on konfliktissa toisen ajan kanssa");
return false;
}
seuraavanLapsitdt2 = seuraavanLapsitdt2[0].parentElement.nextElementSibling.children;
selectedTimeIndex2--;
}
}
var rowspanKokoSallija = 0;
var seuraavaIsantatd;
var seuraavanLapsitdt;
var tdosoitin;
var listanPituusCounter;
// Kaytetaan uuden elokuvablokin sijoittamiseen kalenteriin adminin toimesta.
// Kutsuu tarkistakonfliktit() ja poistaa tyhjรคt blokit elokuvablokin alta.
function asetaAika(){
changeFunc();
rowspanElementti = selectedTimeIndex +1;
updateHTMLBlock();
if (elokuvanNimi == ""){
alert("Et antanut nimea");
return;
}
for (var u = 0; u < checklist.length; u++) {
if ($(checklist[u]).hasClass('selected')){
var whileLoopControlElement = checklist[u];
seuraavanLapsitdt = whileLoopControlElement.parentElement.nextElementSibling.children;
seuraavanLapsitdt2 = whileLoopControlElement.parentElement.nextElementSibling.children;
rowspanKohta = $(checklist[u]).index();
rowspanKokoSallija = parseInt(checklist[u].parentElement.children[0].className.split(' ')[1]);
if (tarkistaKoko() == false){
alert("Elokuvan aika yli aukiolajan");
return;
}
if (tarkistaKokoKonfliktit() != false){
while(selectedTimeIndex > 0) {
//HAIKKAAA Indeksi arvo rowspankohta on vaarin silla koodi ei ota huomioon
// elokuvablokkien alla olevia puuttuvia gridin elementteja,
// koodi pitaisi uudelleenkirjoittaa niin etta elokuvablokeissa olisi
// mukana placeholderblokkeja.
if ($(seuraavanLapsitdt[rowspanKohta]).hasClass('no-events')){
$(seuraavanLapsitdt[rowspanKohta]).remove();
}
seuraavanLapsitdt = seuraavanLapsitdt[0].parentElement.nextElementSibling.children;
selectedTimeIndex--;
}
} else return;
$(checklist[u]).replaceWith( elokuvaBlokki );
// while(selectedTimeIndex > 0) {
// $(seuraavanLapsitdt).last("td").remove();
/**
* tassa voisi nyt olla jaaneen undefined arvon poisto, unefined on automaattisesti listan lopussa
*/
// listanPituusCounter = seuraavanLapsitdt.length;
// seuraavanLapsitdt = seuraavanLapsitdt[0].parentElement.nextElementSibling.children;
// selectedTimeIndex--;
// }
}
}
updateCalendarEvents();
tablelist = $(".tableTogglet");
sessionStorage.setItem("GlobalTableArray", tablelist);
}
var para = document.createElement("td");
function poistaAika(){
for (var u = 0; u < checklist.length; u++) {
// if ($(checklist[u]).hasClass('selectedForAnnihilation') && $(checklist[u]).hasClass('no-events')) {
// $(checklist[u]).replaceWith( perusStringElementti );
// }
if ($(checklist[u]).hasClass('selectedForAnnihilation')){
var lisaajaCounterArvo = checklist[u].rowSpan;
var whileLoopControlElement = checklist[u];
seuraavanLapsitdt = whileLoopControlElement.parentElement.children;
var seuraavanParenttd = whileLoopControlElement.parentElement.nextElementSibling;
rowspanKohta = $(checklist[u]).index();
while(lisaajaCounterArvo > 1) {
// if ($(seuraavanLapsitdt[rowspanKohta]).hasClass('no-events')){
// seuraavanParenttd.insertBefore(para, lapsitdtNode[rowspanKohta]);
seuraavanParenttd.children[rowspanKohta].insertAdjacentHTML("beforebegin", perusStringElementti);
alert("used");
// }
// var seuraavanParenttd = seuraavanLapsitdt[0].parentElement.nextElementSibling;
// $( seuraavanParenttd.previousElementSibling ).append( perusStringElementti );
// listanPituusCounter = seuraavanLapsitdt.length;
seuraavanParenttd = seuraavanParenttd.children[0].parentElement.nextElementSibling;
lisaajaCounterArvo--;
}
alert(checklist[u].rowSpan);
$(checklist[u]).replaceWith( perusStringElementti );
updateCalendarEvents();
}
tablelist = $(".tableTogglet");
sessionStorage.setItem("GlobalTableArray", tablelist);
}
}
var modiviedtime = document.lastModified;
var ataglist = document.getElementsByClassName("saliTogglet");
//ataglist = fillArray(ataglist, 3);
//ataglist[0] = document.getElementById("original");
var tablelist = $(".tableTogglet");
sessionStorage.setItem("GlobalTableArray", tablelist);
//var tableArray = document.getElementsByClassName("tableTogglet");
//tableArray = fillArray(tableArray, 4);
//tableArray[0] = document.getElementById("theTable");
//var nappilist = document.getElementsByClassName("nappiSetit");
var nappilist = $(".nappiSetit");
//nappilist = fillArray(nappilist, 3);
//nappilist[0] = document.getElementById("nappiSetti");
var teatteriNappilist = document.getElementsByClassName("teatteriTogglet");
function toggleTeatteri(sali){
// for (var i = 0; i < ataglist.length; i++){
// $(ataglist[i]).removeClass("active");
// }
$( nappilist[sali] ).toggle(true);
$(nappilist[sali]).siblings().toggle( false );
}
toggleTeatteri("0")
$(ataglist[0]).addClass("active");
function toggleShowRoom(room){
for (var i = 0; i < ataglist.length; i++){
$( ataglist[i] ).removeClass("active");
}
$( tablelist[room] ).toggle(true);
$(tablelist[room]).siblings().toggle( false );
};
toggleShowRoom(0);
function toggleTable(indexsi) {
for (var i = 0; i < ataglist.length; i++) {
var x = ataglist[i];
if (x.style.display === 'none') {
x.style.display = 'block';
} else {
x.style.display = 'none';
}
}
}
//var lTable = document.getElementById(""+table1+"");
//lTable.style.display = (lTable.style.display == "table1") ? "none" : "table1";
// Elokuvateatterinappien salinvaihto klikattaessa elokuvateatterinappeja
jQuery(".list-group-item").click(function (e) {
jQuery(this).addClass('active').siblings().removeClass('active');
});
//for (var i = 0; i < teatteriNappilist.length; i++) {
// teatteriNappilist[i].addEventListener("click", toggleShowRoom())
//}
//arraykokeilu----------------------------------------------------------------------------------------------------------------------------
function f | illArray( | identifier_name |
|
varausohjelmajs.js | // tarkistetaan onko yhtaan kayttajaa kirjattu sisaan tassa sessiossa
if (loginArray == null || loginArray == undefined){
loginError.style.display = 'block';
RegisterErrorText.innerHTML = "ERROR: No readable users registered";
return;
}
var x = document.forms["loginForm"]["loginUsername"].value;
var y = document.forms["loginForm"]["loginPassword"].value;
// voisi tehda kaksitasosen if-lausekkeen, erikseen tyhjalle kentalle ja kentalle jossa arvot ovat vaaria
if (x==null || x == "" || y==null || y=="" || (!loginArray.includes("@ " + x + " " + y) && !loginArray.includes(x + " " + y))){
loginError.style.display = 'block';
RegisterErrorText.innerHTML = "ERROR: Incorrect username or password";
alert(loginArray.includes("@ " + x + " " + y) + " " + loginArray.includes(x + " " + y));
return;
}
alert(loginArray.includes("@ " + x + " " + y) + " " + loginArray.includes(x + " " + y));
// tassa if-lauseessa asetetaan uusin kirjautunut kayttaja instanssin loginiksi, tata kaytetaan jatkossa varauksiin
var currentUserImput = "@ " + x + " " + y
if (loginArray.includes("@ " + x + " " + y) && searchStringInArray("@ " + x + " " + y, loginArray) != -1){
alert("adminuser");
sessionStorage.setItem("currentLoginIndex", searchStringInArray("@ " + x + " " + y, loginArray));
sessionStorage.setItem("currentLoginUser", loginArray[searchStringInArray("@ " + x + " " + y, loginArray)]);
//redirect loginin jalkeen
window.location.href="index.html";
} else if (loginArray.includes(x + " " + y) && searchStringInArray(x + " " + y, loginArray) != -1){
alert("normaluser");
sessionStorage.setItem("currentLoginIndex", searchStringInArray(x + " " + y, loginArray));
sessionStorage.setItem("currentLoginUser", loginArray[searchStringInArray(x + " " + y, loginArray)]);
//redirect loginin jalkeen
window.location.href="elokuvaAsiakassivu.html";
} else alert("Unknown login error has occurred");
}
// loginuserin tamanhetkisen kirjautujan etsiminen rekisteroityjen kayttajien listasta,
// palauttaa indeksin jossa kauttaja on mikali annettu parametri on listassa
function searchStringInArray (searchString, searchStringArray) {
for ( var i = 0 ; i < searchStringArray.length ; i++ ) {
if (searchStringArray[i] == searchString)
return i;
}
return -1;
}
// funktio jolla paivitetaan html-osiot jotka ovat riippuvaisia annetusta kauttajasta
function paivitaKayttajaElementit(kayttajaElementti){
kayttajaElementti.innerHTML = sessionStorage.getItem("currentLoginUser");
alert(sessionStorage.getItem("currentLoginUser"));
}
stringElementti1 = '<td class=" has-events" rowspan="';
rowspanElementti = ""+ listanPituusCounter +"";
stringElementti2 = '"><div class="row-fluid elokuvaElementti1" style="width: 99%; height: 100%;"><span class="title">' ;
stringElementti3 = '</span> <span class="sali"><a>' ;
stringElementti4 = '</a></span><span class="aika">' ;
stringElementti5 = '</span></div></td>' ;
perusStringElementti = '<td class=" no-events" rowspan="1"></td>';
var elokuvaBlokki = stringElementti1 + rowspanElementti + stringElementti2 + elokuvanNimi + stringElementti3 + sali + stringElementti4 + aika + stringElementti5;
var elokuvanNimi = "asda";
var sali = "sali 1*";
var aika = changeFunc() || "00:31";
jQuery(".list-group-item").click(function (e) {
jQuery(this).addClass('active').siblings().removeClass('active');
});
jQuery("#sel1").click(function (ee) {
jQuery(this).addClass('selected').siblings().removeClass('active');
});
// Eventlistenerit hyllytetty silla tarkistuksen voi asettaa submittausmetodin alkuun
var elokuvanNimiElementti = document.getElementById("ElokuvaNimiImput");
var selectBox = document.getElementById("sel2");
var selectedValue = selectBox.options[selectBox.selectedIndex].value;
var selectedTimeIndex = selectBox.options[selectBox.selectedIndex].index;
var selectedTimeIndex2 = selectedTimeIndex;
elokuvanNimiElementti.addEventListener("", changeFunc)
selectBox.addEventListener("", changeFunc)
function changeFunc() {
var selectBox = document.getElementById("sel2");
selectedValue = selectBox.options[selectBox.selectedIndex].innerHTML;
selectedTimeIndex = selectBox.options[selectBox.selectedIndex].index;
selectedTimeIndex2 = selectedTimeIndex;
aika = selectedValue;
elokuvanNimi = document.getElementById("usrInputNimi").value;
updateHTMLBlock();
}
$("td").change(function() {
alert($(this).find("option:selected").text()+' clicked!');
});
$("#myElement").click(function() {
$(this).siblings(":last").hide();
});
$("a").click(function(event){
event.preventDefault();
});
// updates all the clickevents for the td tags of the calendar
var checklist = document.getElementsByTagName("td");
function updateCalendarEvents(){
var checklist = document.getElementsByTagName("td");
for (var i = 0; i < checklist.length; i++) {
checklist[i].addEventListener("click", updateSelectedtd)
}
}
// var tdlista = checklist.querySelectorAll("td");
for (var i = 0; i < checklist.length; i++) {
checklist[i].addEventListener("click", updateSelectedtd)
}
function updateSelectedtd(){
for (var k = 0; k < checklist.length; k++) {
checklist[k].style.backgroundColor = "#492079";
if ($(checklist[k]).hasClass('selected')){
checklist[k].classList.remove("selected");
}
else if ($(checklist[k]).hasClass('selectedForAnnihilation')){
checklist[k].classList.remove("selectedForAnnihilation");
}
}
if($(this).hasClass('noselect')){
return;
} else if($(this).hasClass('has-events')){
$(this).addClass('selectedForAnnihilation')
this.style.backgroundColor = "#e20a0a";
}
else {
this.classList.add("selected");
$(this).addClass('selected')
this.style.backgroundColor = "green";
}
}
function updateHTMLBlock(){
elokuvaBlokki = stringElementti1 + rowspanElementti + stringElementti2 + elokuvanNimi + stringElementti3 + sali + stringElementti4 + aika + stringElementti5;
}
var asda = document.getElementsByName
| return true;
}
}
function tarkistaKokoKonfliktit(){
while(selectedTimeIndex2 > 0) {
if (!$(seuraavanLapsitdt2[rowspanKohta]).hasClass('no-events')){
alert("Asettamasi aika on konfliktissa toisen ajan kanssa");
return false;
}
seuraavanLapsitdt2 = seuraavanLapsitdt2[0].parentElement.nextElementSibling.children;
selectedTimeIndex2--;
}
}
var rowspanKokoSallija = 0;
var seuraavaIsantatd;
var seuraavanLapsitdt;
var tdosoitin;
var listanPituusCounter;
// Kaytetaan uuden elokuvablokin sijoittamiseen kalenteriin adminin toimesta.
// Kutsuu tarkistakonfliktit() ja poistaa tyhjรคt blokit elokuvablokin alta.
function asetaAika(){
changeFunc();
rowspanElementti = selectedTimeIndex +1;
updateHTMLBlock();
if (elokuvanNimi == ""){
alert("Et antanut nimea");
return;
}
for (var u = 0; u < checklist.length; u++) {
if ($(checklist[u]).hasClass('selected')){
var whileLoopControlElement = checklist[u];
seuraavanLapsitdt = whileLoopControlElement.parentElement.nextElementSibling.children;
seuraavanLapsitdt2 = whileLoopControlElement.parentElement.nextElementSibling.children;
rowspanKohta = $(checklist | function tarkistaKoko(){
if (parseInt(rowspanKokoSallija, 10) < (selectedTimeIndex +1)) {
return false;
} else {
| random_line_split |
openfoodfacts.py | eu lieu.
Attention, en fonction du nombre de pages web ร scrapper, le temps de computation peut vite exploser.
"""
def scrap_openfoodfacts(nb_pages = 50) :
""" Il s'agit de la fonction principale du module.
Cette derniรจre crรฉe dans votre espace de travail un DataFrame Pandas contenant les informations scrapรฉes sur le site OpenFoodFacts.
L'argument "nb_pages" permet de rรฉgler le nombre de page ร scraper.
Veuillez ne pas trop l'augmenter afin que l'opรฉration prenne un temps raisonnable.
Il faut compter environ 30 secondes pour scraper une page (25 minutes pour les 50 pages par dรฉfaut).
27 variables sont scrapรฉes pour chaque nouvelle donnรฉe.
"""
# Importation des modules
import time
from time import sleep
import numpy as np
import requests
import re
from bs4 import BeautifulSoup
# Mesure du temps
start_time = time.time()
# Initialisation de la liste records rรฉcoltant nos donnรฉes
records = []
#Initialisation de la valeur des erreurs ร implรฉmenter dans le DataSet
error = np.NaN
# On rรฉcupรจre l'url de chaque produit sur le nombre de pages souhaitรฉes
for i in range(1,nb_pages+1) :
r = requests.get(('https://fr.openfoodfacts.org/' + str(i)))
soup = BeautifulSoup(r.text, 'html.parser')
products = soup.find_all('ul', {'class' : "products"})
products = products[0].find_all('a')
liste_url = ['https://fr.openfoodfacts.org/' + elt['href'] for elt in products]
# Pour chaque produit on place dans des variables les donnรฉes que l'on souhaite scraper
for url in liste_url :
s = requests.get(url)
soup = BeautifulSoup(s.text, 'html.parser')
# Si la donnรฉe peut รชtre rรฉcupรฉrรฉe, on la place dans notre variable, sinon on la replace par une erreur
try :
name = soup.title.text[:-2]
except :
name = error
try :
code_barre = soup.find('span', attrs = {'style' : "speak-as:digits;"}).text
except :
code_barre = error
try:
nutri_score = soup.find('div', attrs = {'id' : 'nutriscore_drop'}).contents[-2].text[-1]
except :
nutri_score = error
try :
nova = soup.find(style = "margin-bottom:1rem;max-width:100%")['alt'][0]
nova = [float(elt) for elt in nova.split() if elt.replace('.', '').isdigit()].pop()
except :
nova = error
try :
caractรฉristiques = soup.find(itemprop="description").text
except :
caractรฉristiques = error
try :
ingrรฉdients = soup.find(property="food:ingredientListAsText").text
except :
ingrรฉdients = error
try :
palme = soup.find('span', {'class' : "alert round label ingredients_analysis green"}).contents[-1][:-3]
except :
palme = error
try :
palme2 = soup.find(href="/ingredients-issus-de-l-huile-de-palme/huile-de-palme").text
except :
palme2 = error
try :
repรจres_nutritionnels = soup.find_all('div', {'class' : "small-12 xlarge-6 columns"})[1].text.split("\n")[-5:-1]
except :
repรจre_nutritionnels = error
# On dรฉcoupe les repรจres nutritionnels en 4 variabes distinctes (matiรจre grasse, acide gras, sucre et sel)
# Puis on les transforme en float pour faciliter l'analyse
liste_repรจres_nutri = repรจres_nutritionnels
try :
matiรจre_grasse = liste_repรจres_nutri[0]
matiรจre_grasse = [float(elt) for elt in matiรจre_grasse.split() if elt.replace('.', '').isdigit()].pop()
except :
matiรจre_grasse = error
try :
acide_gras = liste_repรจres_nutri[1]
acide_gras = [float(elt) for elt in acide_gras.split() if elt.replace('.', '').isdigit()].pop()
except :
acide_gras = error
try :
sucre = liste_repรจres_nutri[2]
sucre = [float(elt) for elt in sucre.split() if elt.replace('.', '').isdigit()].pop()
except :
sucre = error
try :
sel = liste_repรจres_nutri[3]
sel = [float(elt) for elt in sel.split() if elt.replace('.', '').isdigit()].pop()
except :
sel = error
# On utilise la mรชme mรฉthode sur les KJ et les KCAL pour les transformer en float
try :
kj = soup.find(id="nutriment_energy-kj_tr").find('td', {'class' : 'nutriment_value'}).text[9: 13]
kj = [float(elt) for elt in kj.split() if elt.replace('.', '').isdigit()].pop()
except :
kj = error
try :
kcal = soup.find(id="nutriment_energy-kcal_tr").find('td', {'class' : 'nutriment_value'}).text[9:15]
kcal = [float(elt) for elt in kcal.split() if elt.replace('.', '').isdigit()].pop()
except :
kcal = error
try :
eco_score = soup.find(id="ecoscore_drop").contents[-2].text[-1]
except :
eco_score = error
#Pour toutes les variables suivantes; l'utilisation de Regex va nous permettre d'extraire la donnรฉe
info = soup.find('div',{ 'class':'medium-12 large-8 xlarge-8 xxlarge-8 columns'})
infos = []
for el in info:
try:
infos.append(el.text)
| r = re.compile('^Quan.*$')
quantity = list(filter(r.match, infos))[0].split(':')[-1]
except :
quantity = error
try :
r = re.compile('^Conditionnement.*$')
conditionnement = list(filter(r.match, infos))[0].split(':')[-1]
except :
conditionnement = error
try :
r = re.compile('^Marques.*$')
marques = list(filter(r.match, infos))[0].split(':')[-1]
except :
marques = error
try :
r = re.compile('^Catรฉgories.*$')
catรฉgories = list(filter(r.match, infos))[0].split(':')[-1]
except :
catรฉgoris = error
try :
r = re.compile('^Labels.*$')
labels = list(filter(r.match, infos))[0].split(':')[-1]
except :
labels = error
try :
r = re.compile('^Lieux.*$')
lieux = list(filter(r.match, infos))[0].split(':')[-1]
except :
lieux = error
try :
r = re.compile('^Code.*$')
code = list(filter(r.match, infos))[0].split(':')[-1]
except :
code = error
try :
r = re.compile('^Lien.*$')
lien = list(filter(r.match, infos))[0].split(':')[-1]
except :
lien = error
try :
r = re.compile('^Magasins.*$')
magasins = list(filter(r.match, infos))[0].split(':')[-1]
except :
magasins = error
try :
r = re.compile('^Origine.*$')
origine = list(filter(r.match, infos))[0].split(':')[-1]
except :
origine = error
try :
r = re.compile('^Pays.*$')
pays = list(filter(r.match, infos))[0].split(',')[1:]
except :
pays = error
nb_pays = len(pays)
#On place nos diffรฉrentes variables dans la liste records
records.append((name, code_barre, nutri_score, nova, caractรฉristiques, ingrรฉdients, palme, palme2,
kj, kcal, eco_score, quantity, conditionnement, marques, catรฉgories, labels, lieux, code, lien, magasins,
origine, pays, nb_pays, matiรจre_grasse, acide_gras, sucre, sel))
i+=1
# On laisse un temps d'attente entre chaque itรฉration pour ne pas provoquer une erreur dรป au trop grand nombre de requรชtes envoyรฉes
# vers Open Fact Food
sleep(1)
# On construit le DataFrame, puis on l'exporte dans l'espace de travail
import pandas as pd
df = pd.DataFrame(records, columns = ['Produit', 'CodeBarre', | except:
pass
try :
| conditional_block |
openfoodfacts.py | eu lieu.
Attention, en fonction du nombre de pages web ร scrapper, le temps de computation peut vite exploser.
"""
def scrap_openfoodfacts(nb_pages = 50) :
""" Il s'agit de la fonction principale du module.
Cette derniรจre crรฉe dans votre espace de travail un DataFrame Pandas contenant les informations scrapรฉes sur le site OpenFoodFacts.
L'argument "nb_pages" permet de rรฉgler le nombre de page ร scraper.
Veuillez ne pas trop l'augmenter afin que l'opรฉration prenne un temps raisonnable.
Il faut compter environ 30 secondes pour scraper une page (25 minutes pour les 50 pages par dรฉfaut).
27 variables sont scrapรฉes pour chaque nouvelle donnรฉe.
"""
# Importation des modules
import time
from time import sleep
import numpy as np
import requests
import re
from bs4 import BeautifulSoup
# Mesure du temps
start_time = time.time()
# Initialisation de la liste records rรฉcoltant nos donnรฉes
records = []
#Initialisation de la valeur des erreurs ร implรฉmenter dans le DataSet
error = np.NaN
# On rรฉcupรจre l'url de chaque produit sur le nombre de pages souhaitรฉes
for i in range(1,nb_pages+1) :
r = requests.get(('https://fr.openfoodfacts.org/' + str(i)))
soup = BeautifulSoup(r.text, 'html.parser')
products = soup.find_all('ul', {'class' : "products"})
products = products[0].find_all('a')
liste_url = ['https://fr.openfoodfacts.org/' + elt['href'] for elt in products]
# Pour chaque produit on place dans des variables les donnรฉes que l'on souhaite scraper
for url in liste_url :
s = requests.get(url)
soup = BeautifulSoup(s.text, 'html.parser')
# Si la donnรฉe peut รชtre rรฉcupรฉrรฉe, on la place dans notre variable, sinon on la replace par une erreur
try :
name = soup.title.text[:-2]
except :
name = error
try :
code_barre = soup.find('span', attrs = {'style' : "speak-as:digits;"}).text
except :
code_barre = error
try:
nutri_score = soup.find('div', attrs = {'id' : 'nutriscore_drop'}).contents[-2].text[-1]
except :
nutri_score = error
try :
nova = soup.find(style = "margin-bottom:1rem;max-width:100%")['alt'][0]
nova = [float(elt) for elt in nova.split() if elt.replace('.', '').isdigit()].pop()
except :
nova = error
try :
caractรฉristiques = soup.find(itemprop="description").text
except :
caractรฉristiques = error
try :
ingrรฉdients = soup.find(property="food:ingredientListAsText").text
except :
ingrรฉdients = error
try :
palme = soup.find('span', {'class' : "alert round label ingredients_analysis green"}).contents[-1][:-3]
except :
palme = error
try :
palme2 = soup.find(href="/ingredients-issus-de-l-huile-de-palme/huile-de-palme").text
except :
palme2 = error
try :
repรจres_nutritionnels = soup.find_all('div', {'class' : "small-12 xlarge-6 columns"})[1].text.split("\n")[-5:-1]
except :
repรจre_nutritionnels = error
# On dรฉcoupe les repรจres nutritionnels en 4 variabes distinctes (matiรจre grasse, acide gras, sucre et sel)
# Puis on les transforme en float pour faciliter l'analyse
liste_repรจres_nutri = repรจres_nutritionnels
try :
matiรจre_grasse = liste_repรจres_nutri[0]
matiรจre_grasse = [float(elt) for elt in matiรจre_grasse.split() if elt.replace('.', '').isdigit()].pop()
except :
matiรจre_grasse = error
try :
acide_gras = liste_repรจres_nutri[1]
acide_gras = [float(elt) for elt in acide_gras.split() if elt.replace('.', '').isdigit()].pop()
except :
acide_gras = error
try : | except :
sucre = error
try :
sel = liste_repรจres_nutri[3]
sel = [float(elt) for elt in sel.split() if elt.replace('.', '').isdigit()].pop()
except :
sel = error
# On utilise la mรชme mรฉthode sur les KJ et les KCAL pour les transformer en float
try :
kj = soup.find(id="nutriment_energy-kj_tr").find('td', {'class' : 'nutriment_value'}).text[9: 13]
kj = [float(elt) for elt in kj.split() if elt.replace('.', '').isdigit()].pop()
except :
kj = error
try :
kcal = soup.find(id="nutriment_energy-kcal_tr").find('td', {'class' : 'nutriment_value'}).text[9:15]
kcal = [float(elt) for elt in kcal.split() if elt.replace('.', '').isdigit()].pop()
except :
kcal = error
try :
eco_score = soup.find(id="ecoscore_drop").contents[-2].text[-1]
except :
eco_score = error
#Pour toutes les variables suivantes; l'utilisation de Regex va nous permettre d'extraire la donnรฉe
info = soup.find('div',{ 'class':'medium-12 large-8 xlarge-8 xxlarge-8 columns'})
infos = []
for el in info:
try:
infos.append(el.text)
except:
pass
try :
r = re.compile('^Quan.*$')
quantity = list(filter(r.match, infos))[0].split(':')[-1]
except :
quantity = error
try :
r = re.compile('^Conditionnement.*$')
conditionnement = list(filter(r.match, infos))[0].split(':')[-1]
except :
conditionnement = error
try :
r = re.compile('^Marques.*$')
marques = list(filter(r.match, infos))[0].split(':')[-1]
except :
marques = error
try :
r = re.compile('^Catรฉgories.*$')
catรฉgories = list(filter(r.match, infos))[0].split(':')[-1]
except :
catรฉgoris = error
try :
r = re.compile('^Labels.*$')
labels = list(filter(r.match, infos))[0].split(':')[-1]
except :
labels = error
try :
r = re.compile('^Lieux.*$')
lieux = list(filter(r.match, infos))[0].split(':')[-1]
except :
lieux = error
try :
r = re.compile('^Code.*$')
code = list(filter(r.match, infos))[0].split(':')[-1]
except :
code = error
try :
r = re.compile('^Lien.*$')
lien = list(filter(r.match, infos))[0].split(':')[-1]
except :
lien = error
try :
r = re.compile('^Magasins.*$')
magasins = list(filter(r.match, infos))[0].split(':')[-1]
except :
magasins = error
try :
r = re.compile('^Origine.*$')
origine = list(filter(r.match, infos))[0].split(':')[-1]
except :
origine = error
try :
r = re.compile('^Pays.*$')
pays = list(filter(r.match, infos))[0].split(',')[1:]
except :
pays = error
nb_pays = len(pays)
#On place nos diffรฉrentes variables dans la liste records
records.append((name, code_barre, nutri_score, nova, caractรฉristiques, ingrรฉdients, palme, palme2,
kj, kcal, eco_score, quantity, conditionnement, marques, catรฉgories, labels, lieux, code, lien, magasins,
origine, pays, nb_pays, matiรจre_grasse, acide_gras, sucre, sel))
i+=1
# On laisse un temps d'attente entre chaque itรฉration pour ne pas provoquer une erreur dรป au trop grand nombre de requรชtes envoyรฉes
# vers Open Fact Food
sleep(1)
# On construit le DataFrame, puis on l'exporte dans l'espace de travail
import pandas as pd
df = pd.DataFrame(records, columns = ['Produit', 'CodeBarre', 'Nut | sucre = liste_repรจres_nutri[2]
sucre = [float(elt) for elt in sucre.split() if elt.replace('.', '').isdigit()].pop() | random_line_split |
openfoodfacts.py | eu lieu.
Attention, en fonction du nombre de pages web ร scrapper, le temps de computation peut vite exploser.
"""
def scrap_openfoodfacts(nb_pages = 50) :
""" Il s'a | #Initialisation de la valeur des erreurs ร implรฉmenter dans le DataSet
error = np.NaN
# On rรฉcupรจre l'url de chaque produit sur le nombre de pages souhaitรฉes
for i in range(1,nb_pages+1) :
r = requests.get(('https://fr.openfoodfacts.org/' + str(i)))
soup = BeautifulSoup(r.text, 'html.parser')
products = soup.find_all('ul', {'class' : "products"})
products = products[0].find_all('a')
liste_url = ['https://fr.openfoodfacts.org/' + elt['href'] for elt in products]
# Pour chaque produit on place dans des variables les donnรฉes que l'on souhaite scraper
for url in liste_url :
s = requests.get(url)
soup = BeautifulSoup(s.text, 'html.parser')
# Si la donnรฉe peut รชtre rรฉcupรฉrรฉe, on la place dans notre variable, sinon on la replace par une erreur
try :
name = soup.title.text[:-2]
except :
name = error
try :
code_barre = soup.find('span', attrs = {'style' : "speak-as:digits;"}).text
except :
code_barre = error
try:
nutri_score = soup.find('div', attrs = {'id' : 'nutriscore_drop'}).contents[-2].text[-1]
except :
nutri_score = error
try :
nova = soup.find(style = "margin-bottom:1rem;max-width:100%")['alt'][0]
nova = [float(elt) for elt in nova.split() if elt.replace('.', '').isdigit()].pop()
except :
nova = error
try :
caractรฉristiques = soup.find(itemprop="description").text
except :
caractรฉristiques = error
try :
ingrรฉdients = soup.find(property="food:ingredientListAsText").text
except :
ingrรฉdients = error
try :
palme = soup.find('span', {'class' : "alert round label ingredients_analysis green"}).contents[-1][:-3]
except :
palme = error
try :
palme2 = soup.find(href="/ingredients-issus-de-l-huile-de-palme/huile-de-palme").text
except :
palme2 = error
try :
repรจres_nutritionnels = soup.find_all('div', {'class' : "small-12 xlarge-6 columns"})[1].text.split("\n")[-5:-1]
except :
repรจre_nutritionnels = error
# On dรฉcoupe les repรจres nutritionnels en 4 variabes distinctes (matiรจre grasse, acide gras, sucre et sel)
# Puis on les transforme en float pour faciliter l'analyse
liste_repรจres_nutri = repรจres_nutritionnels
try :
matiรจre_grasse = liste_repรจres_nutri[0]
matiรจre_grasse = [float(elt) for elt in matiรจre_grasse.split() if elt.replace('.', '').isdigit()].pop()
except :
matiรจre_grasse = error
try :
acide_gras = liste_repรจres_nutri[1]
acide_gras = [float(elt) for elt in acide_gras.split() if elt.replace('.', '').isdigit()].pop()
except :
acide_gras = error
try :
sucre = liste_repรจres_nutri[2]
sucre = [float(elt) for elt in sucre.split() if elt.replace('.', '').isdigit()].pop()
except :
sucre = error
try :
sel = liste_repรจres_nutri[3]
sel = [float(elt) for elt in sel.split() if elt.replace('.', '').isdigit()].pop()
except :
sel = error
# On utilise la mรชme mรฉthode sur les KJ et les KCAL pour les transformer en float
try :
kj = soup.find(id="nutriment_energy-kj_tr").find('td', {'class' : 'nutriment_value'}).text[9: 13]
kj = [float(elt) for elt in kj.split() if elt.replace('.', '').isdigit()].pop()
except :
kj = error
try :
kcal = soup.find(id="nutriment_energy-kcal_tr").find('td', {'class' : 'nutriment_value'}).text[9:15]
kcal = [float(elt) for elt in kcal.split() if elt.replace('.', '').isdigit()].pop()
except :
kcal = error
try :
eco_score = soup.find(id="ecoscore_drop").contents[-2].text[-1]
except :
eco_score = error
#Pour toutes les variables suivantes; l'utilisation de Regex va nous permettre d'extraire la donnรฉe
info = soup.find('div',{ 'class':'medium-12 large-8 xlarge-8 xxlarge-8 columns'})
infos = []
for el in info:
try:
infos.append(el.text)
except:
pass
try :
r = re.compile('^Quan.*$')
quantity = list(filter(r.match, infos))[0].split(':')[-1]
except :
quantity = error
try :
r = re.compile('^Conditionnement.*$')
conditionnement = list(filter(r.match, infos))[0].split(':')[-1]
except :
conditionnement = error
try :
r = re.compile('^Marques.*$')
marques = list(filter(r.match, infos))[0].split(':')[-1]
except :
marques = error
try :
r = re.compile('^Catรฉgories.*$')
catรฉgories = list(filter(r.match, infos))[0].split(':')[-1]
except :
catรฉgoris = error
try :
r = re.compile('^Labels.*$')
labels = list(filter(r.match, infos))[0].split(':')[-1]
except :
labels = error
try :
r = re.compile('^Lieux.*$')
lieux = list(filter(r.match, infos))[0].split(':')[-1]
except :
lieux = error
try :
r = re.compile('^Code.*$')
code = list(filter(r.match, infos))[0].split(':')[-1]
except :
code = error
try :
r = re.compile('^Lien.*$')
lien = list(filter(r.match, infos))[0].split(':')[-1]
except :
lien = error
try :
r = re.compile('^Magasins.*$')
magasins = list(filter(r.match, infos))[0].split(':')[-1]
except :
magasins = error
try :
r = re.compile('^Origine.*$')
origine = list(filter(r.match, infos))[0].split(':')[-1]
except :
origine = error
try :
r = re.compile('^Pays.*$')
pays = list(filter(r.match, infos))[0].split(',')[1:]
except :
pays = error
nb_pays = len(pays)
#On place nos diffรฉrentes variables dans la liste records
records.append((name, code_barre, nutri_score, nova, caractรฉristiques, ingrรฉdients, palme, palme2,
kj, kcal, eco_score, quantity, conditionnement, marques, catรฉgories, labels, lieux, code, lien, magasins,
origine, pays, nb_pays, matiรจre_grasse, acide_gras, sucre, sel))
i+=1
# On laisse un temps d'attente entre chaque itรฉration pour ne pas provoquer une erreur dรป au trop grand nombre de requรชtes envoyรฉes
# vers Open Fact Food
sleep(1)
# On construit le DataFrame, puis on l'exporte dans l'espace de travail
import pandas as pd
df = pd.DataFrame(records, columns = ['Produit', 'CodeBarre', 'Nut | git de la fonction principale du module.
Cette derniรจre crรฉe dans votre espace de travail un DataFrame Pandas contenant les informations scrapรฉes sur le site OpenFoodFacts.
L'argument "nb_pages" permet de rรฉgler le nombre de page ร scraper.
Veuillez ne pas trop l'augmenter afin que l'opรฉration prenne un temps raisonnable.
Il faut compter environ 30 secondes pour scraper une page (25 minutes pour les 50 pages par dรฉfaut).
27 variables sont scrapรฉes pour chaque nouvelle donnรฉe.
"""
# Importation des modules
import time
from time import sleep
import numpy as np
import requests
import re
from bs4 import BeautifulSoup
# Mesure du temps
start_time = time.time()
# Initialisation de la liste records rรฉcoltant nos donnรฉes
records = [] | identifier_body |
openfoodfacts.py | eu lieu.
Attention, en fonction du nombre de pages web ร scrapper, le temps de computation peut vite exploser.
"""
def scrap_open | = 50) :
""" Il s'agit de la fonction principale du module.
Cette derniรจre crรฉe dans votre espace de travail un DataFrame Pandas contenant les informations scrapรฉes sur le site OpenFoodFacts.
L'argument "nb_pages" permet de rรฉgler le nombre de page ร scraper.
Veuillez ne pas trop l'augmenter afin que l'opรฉration prenne un temps raisonnable.
Il faut compter environ 30 secondes pour scraper une page (25 minutes pour les 50 pages par dรฉfaut).
27 variables sont scrapรฉes pour chaque nouvelle donnรฉe.
"""
# Importation des modules
import time
from time import sleep
import numpy as np
import requests
import re
from bs4 import BeautifulSoup
# Mesure du temps
start_time = time.time()
# Initialisation de la liste records rรฉcoltant nos donnรฉes
records = []
#Initialisation de la valeur des erreurs ร implรฉmenter dans le DataSet
error = np.NaN
# On rรฉcupรจre l'url de chaque produit sur le nombre de pages souhaitรฉes
for i in range(1,nb_pages+1) :
r = requests.get(('https://fr.openfoodfacts.org/' + str(i)))
soup = BeautifulSoup(r.text, 'html.parser')
products = soup.find_all('ul', {'class' : "products"})
products = products[0].find_all('a')
liste_url = ['https://fr.openfoodfacts.org/' + elt['href'] for elt in products]
# Pour chaque produit on place dans des variables les donnรฉes que l'on souhaite scraper
for url in liste_url :
s = requests.get(url)
soup = BeautifulSoup(s.text, 'html.parser')
# Si la donnรฉe peut รชtre rรฉcupรฉrรฉe, on la place dans notre variable, sinon on la replace par une erreur
try :
name = soup.title.text[:-2]
except :
name = error
try :
code_barre = soup.find('span', attrs = {'style' : "speak-as:digits;"}).text
except :
code_barre = error
try:
nutri_score = soup.find('div', attrs = {'id' : 'nutriscore_drop'}).contents[-2].text[-1]
except :
nutri_score = error
try :
nova = soup.find(style = "margin-bottom:1rem;max-width:100%")['alt'][0]
nova = [float(elt) for elt in nova.split() if elt.replace('.', '').isdigit()].pop()
except :
nova = error
try :
caractรฉristiques = soup.find(itemprop="description").text
except :
caractรฉristiques = error
try :
ingrรฉdients = soup.find(property="food:ingredientListAsText").text
except :
ingrรฉdients = error
try :
palme = soup.find('span', {'class' : "alert round label ingredients_analysis green"}).contents[-1][:-3]
except :
palme = error
try :
palme2 = soup.find(href="/ingredients-issus-de-l-huile-de-palme/huile-de-palme").text
except :
palme2 = error
try :
repรจres_nutritionnels = soup.find_all('div', {'class' : "small-12 xlarge-6 columns"})[1].text.split("\n")[-5:-1]
except :
repรจre_nutritionnels = error
# On dรฉcoupe les repรจres nutritionnels en 4 variabes distinctes (matiรจre grasse, acide gras, sucre et sel)
# Puis on les transforme en float pour faciliter l'analyse
liste_repรจres_nutri = repรจres_nutritionnels
try :
matiรจre_grasse = liste_repรจres_nutri[0]
matiรจre_grasse = [float(elt) for elt in matiรจre_grasse.split() if elt.replace('.', '').isdigit()].pop()
except :
matiรจre_grasse = error
try :
acide_gras = liste_repรจres_nutri[1]
acide_gras = [float(elt) for elt in acide_gras.split() if elt.replace('.', '').isdigit()].pop()
except :
acide_gras = error
try :
sucre = liste_repรจres_nutri[2]
sucre = [float(elt) for elt in sucre.split() if elt.replace('.', '').isdigit()].pop()
except :
sucre = error
try :
sel = liste_repรจres_nutri[3]
sel = [float(elt) for elt in sel.split() if elt.replace('.', '').isdigit()].pop()
except :
sel = error
# On utilise la mรชme mรฉthode sur les KJ et les KCAL pour les transformer en float
try :
kj = soup.find(id="nutriment_energy-kj_tr").find('td', {'class' : 'nutriment_value'}).text[9: 13]
kj = [float(elt) for elt in kj.split() if elt.replace('.', '').isdigit()].pop()
except :
kj = error
try :
kcal = soup.find(id="nutriment_energy-kcal_tr").find('td', {'class' : 'nutriment_value'}).text[9:15]
kcal = [float(elt) for elt in kcal.split() if elt.replace('.', '').isdigit()].pop()
except :
kcal = error
try :
eco_score = soup.find(id="ecoscore_drop").contents[-2].text[-1]
except :
eco_score = error
#Pour toutes les variables suivantes; l'utilisation de Regex va nous permettre d'extraire la donnรฉe
info = soup.find('div',{ 'class':'medium-12 large-8 xlarge-8 xxlarge-8 columns'})
infos = []
for el in info:
try:
infos.append(el.text)
except:
pass
try :
r = re.compile('^Quan.*$')
quantity = list(filter(r.match, infos))[0].split(':')[-1]
except :
quantity = error
try :
r = re.compile('^Conditionnement.*$')
conditionnement = list(filter(r.match, infos))[0].split(':')[-1]
except :
conditionnement = error
try :
r = re.compile('^Marques.*$')
marques = list(filter(r.match, infos))[0].split(':')[-1]
except :
marques = error
try :
r = re.compile('^Catรฉgories.*$')
catรฉgories = list(filter(r.match, infos))[0].split(':')[-1]
except :
catรฉgoris = error
try :
r = re.compile('^Labels.*$')
labels = list(filter(r.match, infos))[0].split(':')[-1]
except :
labels = error
try :
r = re.compile('^Lieux.*$')
lieux = list(filter(r.match, infos))[0].split(':')[-1]
except :
lieux = error
try :
r = re.compile('^Code.*$')
code = list(filter(r.match, infos))[0].split(':')[-1]
except :
code = error
try :
r = re.compile('^Lien.*$')
lien = list(filter(r.match, infos))[0].split(':')[-1]
except :
lien = error
try :
r = re.compile('^Magasins.*$')
magasins = list(filter(r.match, infos))[0].split(':')[-1]
except :
magasins = error
try :
r = re.compile('^Origine.*$')
origine = list(filter(r.match, infos))[0].split(':')[-1]
except :
origine = error
try :
r = re.compile('^Pays.*$')
pays = list(filter(r.match, infos))[0].split(',')[1:]
except :
pays = error
nb_pays = len(pays)
#On place nos diffรฉrentes variables dans la liste records
records.append((name, code_barre, nutri_score, nova, caractรฉristiques, ingrรฉdients, palme, palme2,
kj, kcal, eco_score, quantity, conditionnement, marques, catรฉgories, labels, lieux, code, lien, magasins,
origine, pays, nb_pays, matiรจre_grasse, acide_gras, sucre, sel))
i+=1
# On laisse un temps d'attente entre chaque itรฉration pour ne pas provoquer une erreur dรป au trop grand nombre de requรชtes envoyรฉes
# vers Open Fact Food
sleep(1)
# On construit le DataFrame, puis on l'exporte dans l'espace de travail
import pandas as pd
df = pd.DataFrame(records, columns = ['Produit', 'CodeBarre', ' | foodfacts(nb_pages | identifier_name |
hexformat.go | return true, false
}
}
return false, false
case EndOfFile:
return false, true
case ExtendedSegmentAddress: //16 bit addr
length := converted[0]
if length != 2 {
print("!ESA value has too many bytes:", length, "\n")
return true, false
}
esaAddr := uint32(converted[4])*256 + uint32(converted[5])
esaAddr = esaAddr << 4 //it's assumed to be a multiple of 16
bb.SetBaseAddr(esaAddr)
return false, false
case ExtendedLinearAddress: //32 bit addr but only top 16 passed
length := converted[0]
if length != 2 {
print("!ELA value has too many bytes:", length, "\n")
return true, false
}
elaAddr := uint32(converted[4])*256 + uint32(converted[5])
elaAddr = elaAddr << 16 //data supplied is high order 16 of 32
bb.SetBaseAddr(elaAddr) //but this sets the lower order 32 of 64
return false, false
case ExtensionSetParameters: //4 64 bit integers
length := converted[0]
if length != 32 {
print("!extension parameters must be exactly 32 bytes, but was :", length, "\n")
return true, false
}
for i := 0; i < 4; i++ {
value := uint64(0)
for p := 7; p >= 0; p-- {
placeValue := uint64(1 << (8 * p))
//4 is because of four constant valuesat left of converted[]
//i*8 is which param
//7-p is byte
value += (placeValue * uint64(converted[(4)+(i*8)+(7-p)]))
}
bb.SetParameter(i, value)
}
return false, false
case ExtensionBigLinearAddress: //32 bit int which is the HIGH order of 64bit addr
length := converted[0]
if length != 4 {
print("!extension big linear address has wrong length:", length, "\n")
return true, false
}
t := uint32(converted[4])*0x1000000 + uint32(converted[5])*0x10000 + uint32(converted[6])*0x100 + uint32(converted[7])
bb.SetBigBaseAddr(t)
return false, false
case ExtensionBigEntryPoint: //32 bit int which is the HIGH order of 64bit pointer
length := converted[0]
if length != 4 {
print("!extension big linear address has wrong length:", length, "\n")
return true, false
}
t := uint32(converted[4])*0x1000000 + uint32(converted[5])*0x10000 + uint32(converted[6])*0x100 + uint32(converted[7])
bb.SetBigEntryPoint(t)
return false, false
case StartLinearAddress: //32 bit addr
length := converted[0]
if length != 4 {
print("!SLA value has too many bytes:", length, "\n")
return true, false
}
slaAddr := uint32(converted[4])*0x1000000 + uint32(converted[5])*0x10000 + uint32(converted[6])*0x100 + uint32(converted[7])
bb.SetEntryPoint(slaAddr)
return false, false
}
print("!unable to understand line type [processLine]\n")
return false, true
}
// take in a string and return either an exception or a well formed value
func DecodeAndCheckStringToBytes(s string) ([]byte, HexLineType, uint32, error) {
lenAs16 := uint16(len(s))
converted := ConvertBuffer(lenAs16, []byte(s))
if converted == nil {
return nil, HexLineType(0), 0, errors.New("convert buffer failed")
}
var addr uint32
lt, ok := ExtractLineType(converted)
if !ok {
return nil, DataLine, 0, NewEncodeDecodeError(fmt.Sprintf("unable to extract line type from: %s", s))
}
if lt == DataLine {
addr = (uint32(converted[1]) * 256) + (uint32(converted[2]))
}
if ok := ValidBufferLength(lenAs16, converted); ok == false {
return nil, lt, addr, NewEncodeDecodeError(fmt.Sprintf("expected buffer length to be ok, but wasn't: %s", s))
}
if ok := CheckChecksum(lenAs16, converted); ok == false {
return nil, lt, addr, NewEncodeDecodeError(fmt.Sprintf("expected checksum to be ok, but wasn't:%s", s))
}
return converted, lt, addr, nil
}
// received a line, check that it has a hope of being syntactically correct
func ValidBufferLength(l uint16, converted []byte) bool {
total := uint16(11) //size of just framing in characters (colon, 2 len chars, 4 addr chars, 2 type chars, 2 checksum chars)
if uint16(l) < total {
print("!bad buffer length, can't be smaller than", total, ":", l, "\n")
return false
}
total += uint16(converted[0]) * 2
if l != total {
print("!bad buffer length, expected ", total, " but got", l, " based on ", total*2, "\n")
return false
}
return true
}
// verify line's checksum
func CheckChecksum(l uint16, converted []byte) bool {
sum := uint64(0)
limit := (l - 1) / 2
for i := uint16(0); i < limit; i++ {
sum += uint64(converted[i])
}
complement := ^sum
complement++
checksum := uint8(complement & 0xff)
if checksum != 0 {
print("!bad checksum! expected 0 and got ", checksum,
" from declared checksum of ", converted[limit-1], "\n")
return false
}
return true
}
// extract the line type, 00 (data), 01 (eof), or 02 (esa) and (ok?)
func ExtractLineType(converted []byte) (HexLineType, bool) {
switch converted[3] {
case 0:
return DataLine, true
case 1:
return EndOfFile, true
case 2:
return ExtendedSegmentAddress, true
case 4:
return ExtendedLinearAddress, true
case 5:
return StartLinearAddress, true
case 0x80:
return ExtensionSetParameters, true
case 0x81:
return ExtensionBigLinearAddress, true
case 0x82:
return ExtensionBigEntryPoint, true
case 3:
print("!unimplemented line type in hex transmission [StartSegmentAddress] ")
return DataLine, false
default:
print("!bad buffer type:", converted[3], "\n")
return DataLine, false
}
}
// change buffer of ascii->converted bytes by taking the ascii values (2 per byte) and making them proper bytes
func ConvertBuffer(l uint16, raw []byte) []byte {
//l-1 because the : is skipped so the remaining number of characters must be even
if (l-1)%2 == 1 {
print("!bad payload, expected even number of hex bytes but got:", l-1, "\n")
return nil
}
converted := make([]byte, (l-1)/2)
//skip first colon
for i := uint16(1); i < l; i += 2 {
v, ok := bufferValue(i, raw)
if !ok {
return nil // they already sent the error to the other side
}
converted[(i-1)/2] = v
}
return converted
}
// this hits buffer[i] and buffer[i+1] to convert an ascii byte
// returns false to mean you had a bad character in the input
func bufferValue(index uint16, buffer []byte) (uint8, bool) | {
i := int(index)
total := uint8(0)
switch buffer[i] {
case '0':
case '1':
total += 16 * 1
case '2':
total += 16 * 2
case '3':
total += 16 * 3
case '4':
total += 16 * 4
case '5':
total += 16 * 5
case '6':
total += 16 * 6
case '7':
total += 16 * 7
case '8': | identifier_body |
|
hexformat.go | processing line %x -> %x %+v", addr, val, converted)
}
if !bb.Write(addr, val) {
return true, false
}
}
return false, false
case EndOfFile:
return false, true
case ExtendedSegmentAddress: //16 bit addr
length := converted[0]
if length != 2 {
print("!ESA value has too many bytes:", length, "\n")
return true, false
}
esaAddr := uint32(converted[4])*256 + uint32(converted[5])
esaAddr = esaAddr << 4 //it's assumed to be a multiple of 16
bb.SetBaseAddr(esaAddr)
return false, false
case ExtendedLinearAddress: //32 bit addr but only top 16 passed
length := converted[0]
if length != 2 {
print("!ELA value has too many bytes:", length, "\n")
return true, false
}
elaAddr := uint32(converted[4])*256 + uint32(converted[5])
elaAddr = elaAddr << 16 //data supplied is high order 16 of 32
bb.SetBaseAddr(elaAddr) //but this sets the lower order 32 of 64
return false, false
case ExtensionSetParameters: //4 64 bit integers
length := converted[0]
if length != 32 {
print("!extension parameters must be exactly 32 bytes, but was :", length, "\n")
return true, false
}
for i := 0; i < 4; i++ {
value := uint64(0)
for p := 7; p >= 0; p-- {
placeValue := uint64(1 << (8 * p))
//4 is because of four constant valuesat left of converted[]
//i*8 is which param
//7-p is byte
value += (placeValue * uint64(converted[(4)+(i*8)+(7-p)]))
}
bb.SetParameter(i, value)
}
return false, false
case ExtensionBigLinearAddress: //32 bit int which is the HIGH order of 64bit addr
length := converted[0]
if length != 4 {
print("!extension big linear address has wrong length:", length, "\n")
return true, false
}
t := uint32(converted[4])*0x1000000 + uint32(converted[5])*0x10000 + uint32(converted[6])*0x100 + uint32(converted[7])
bb.SetBigBaseAddr(t)
return false, false
case ExtensionBigEntryPoint: //32 bit int which is the HIGH order of 64bit pointer
length := converted[0]
if length != 4 {
print("!extension big linear address has wrong length:", length, "\n")
return true, false
}
t := uint32(converted[4])*0x1000000 + uint32(converted[5])*0x10000 + uint32(converted[6])*0x100 + uint32(converted[7])
bb.SetBigEntryPoint(t)
return false, false
case StartLinearAddress: //32 bit addr
length := converted[0]
if length != 4 {
print("!SLA value has too many bytes:", length, "\n")
return true, false
}
slaAddr := uint32(converted[4])*0x1000000 + uint32(converted[5])*0x10000 + uint32(converted[6])*0x100 + uint32(converted[7])
bb.SetEntryPoint(slaAddr)
return false, false
}
print("!unable to understand line type [processLine]\n")
return false, true
}
// take in a string and return either an exception or a well formed value
func DecodeAndCheckStringToBytes(s string) ([]byte, HexLineType, uint32, error) {
lenAs16 := uint16(len(s))
converted := ConvertBuffer(lenAs16, []byte(s))
if converted == nil {
return nil, HexLineType(0), 0, errors.New("convert buffer failed")
}
var addr uint32
lt, ok := ExtractLineType(converted)
if !ok {
return nil, DataLine, 0, NewEncodeDecodeError(fmt.Sprintf("unable to extract line type from: %s", s))
}
if lt == DataLine {
addr = (uint32(converted[1]) * 256) + (uint32(converted[2]))
}
if ok := ValidBufferLength(lenAs16, converted); ok == false {
return nil, lt, addr, NewEncodeDecodeError(fmt.Sprintf("expected buffer length to be ok, but wasn't: %s", s))
}
if ok := CheckChecksum(lenAs16, converted); ok == false {
return nil, lt, addr, NewEncodeDecodeError(fmt.Sprintf("expected checksum to be ok, but wasn't:%s", s))
}
return converted, lt, addr, nil
}
// received a line, check that it has a hope of being syntactically correct
func ValidBufferLength(l uint16, converted []byte) bool {
total := uint16(11) //size of just framing in characters (colon, 2 len chars, 4 addr chars, 2 type chars, 2 checksum chars)
if uint16(l) < total {
print("!bad buffer length, can't be smaller than", total, ":", l, "\n")
return false
}
total += uint16(converted[0]) * 2
if l != total {
print("!bad buffer length, expected ", total, " but got", l, " based on ", total*2, "\n")
return false
}
return true
}
// verify line's checksum
func CheckChecksum(l uint16, converted []byte) bool {
sum := uint64(0)
limit := (l - 1) / 2
for i := uint16(0); i < limit; i++ {
sum += uint64(converted[i])
}
complement := ^sum
complement++
checksum := uint8(complement & 0xff)
if checksum != 0 {
print("!bad checksum! expected 0 and got ", checksum,
" from declared checksum of ", converted[limit-1], "\n")
return false
}
return true
}
// extract the line type, 00 (data), 01 (eof), or 02 (esa) and (ok?)
func ExtractLineType(converted []byte) (HexLineType, bool) {
switch converted[3] {
case 0:
return DataLine, true
case 1:
return EndOfFile, true
case 2:
return ExtendedSegmentAddress, true
case 4:
return ExtendedLinearAddress, true
case 5:
return StartLinearAddress, true
case 0x80:
return ExtensionSetParameters, true
case 0x81:
return ExtensionBigLinearAddress, true
case 0x82:
return ExtensionBigEntryPoint, true
case 3:
print("!unimplemented line type in hex transmission [StartSegmentAddress] ")
return DataLine, false
default:
print("!bad buffer type:", converted[3], "\n")
return DataLine, false
}
}
// change buffer of ascii->converted bytes by taking the ascii values (2 per byte) and making them proper bytes
func ConvertBuffer(l uint16, raw []byte) []byte {
//l-1 because the : is skipped so the remaining number of characters must be even
if (l-1)%2 == 1 {
print("!bad payload, expected even number of hex bytes but got:", l-1, "\n")
return nil
}
converted := make([]byte, (l-1)/2)
//skip first colon
for i := uint16(1); i < l; i += 2 {
v, ok := bufferValue(i, raw)
if !ok {
return nil // they already sent the error to the other side
}
converted[(i-1)/2] = v
}
return converted
}
// this hits buffer[i] and buffer[i+1] to convert an ascii byte
// returns false to mean you had a bad character in the input
func | (index uint16, buffer []byte) (uint8, bool) {
i := int(index)
total := uint8(0)
switch buffer[i] {
case '0':
case '1':
total += 16 * 1
case '2':
total += 16 * 2
case '3':
total += 16 * 3
case '4':
total += 16 * 4
case '5':
total += 16 * 5
case '6 | bufferValue | identifier_name |
hexformat.go | (converted[6])*0x100 + uint32(converted[7])
bb.SetBigBaseAddr(t)
return false, false
case ExtensionBigEntryPoint: //32 bit int which is the HIGH order of 64bit pointer
length := converted[0]
if length != 4 {
print("!extension big linear address has wrong length:", length, "\n")
return true, false
}
t := uint32(converted[4])*0x1000000 + uint32(converted[5])*0x10000 + uint32(converted[6])*0x100 + uint32(converted[7])
bb.SetBigEntryPoint(t)
return false, false
case StartLinearAddress: //32 bit addr
length := converted[0]
if length != 4 {
print("!SLA value has too many bytes:", length, "\n")
return true, false
}
slaAddr := uint32(converted[4])*0x1000000 + uint32(converted[5])*0x10000 + uint32(converted[6])*0x100 + uint32(converted[7])
bb.SetEntryPoint(slaAddr)
return false, false
}
print("!unable to understand line type [processLine]\n")
return false, true
}
// take in a string and return either an exception or a well formed value
func DecodeAndCheckStringToBytes(s string) ([]byte, HexLineType, uint32, error) {
lenAs16 := uint16(len(s))
converted := ConvertBuffer(lenAs16, []byte(s))
if converted == nil {
return nil, HexLineType(0), 0, errors.New("convert buffer failed")
}
var addr uint32
lt, ok := ExtractLineType(converted)
if !ok {
return nil, DataLine, 0, NewEncodeDecodeError(fmt.Sprintf("unable to extract line type from: %s", s))
}
if lt == DataLine {
addr = (uint32(converted[1]) * 256) + (uint32(converted[2]))
}
if ok := ValidBufferLength(lenAs16, converted); ok == false {
return nil, lt, addr, NewEncodeDecodeError(fmt.Sprintf("expected buffer length to be ok, but wasn't: %s", s))
}
if ok := CheckChecksum(lenAs16, converted); ok == false {
return nil, lt, addr, NewEncodeDecodeError(fmt.Sprintf("expected checksum to be ok, but wasn't:%s", s))
}
return converted, lt, addr, nil
}
// received a line, check that it has a hope of being syntactically correct
func ValidBufferLength(l uint16, converted []byte) bool {
total := uint16(11) //size of just framing in characters (colon, 2 len chars, 4 addr chars, 2 type chars, 2 checksum chars)
if uint16(l) < total {
print("!bad buffer length, can't be smaller than", total, ":", l, "\n")
return false
}
total += uint16(converted[0]) * 2
if l != total {
print("!bad buffer length, expected ", total, " but got", l, " based on ", total*2, "\n")
return false
}
return true
}
// verify line's checksum
func CheckChecksum(l uint16, converted []byte) bool {
sum := uint64(0)
limit := (l - 1) / 2
for i := uint16(0); i < limit; i++ {
sum += uint64(converted[i])
}
complement := ^sum
complement++
checksum := uint8(complement & 0xff)
if checksum != 0 {
print("!bad checksum! expected 0 and got ", checksum,
" from declared checksum of ", converted[limit-1], "\n")
return false
}
return true
}
// extract the line type, 00 (data), 01 (eof), or 02 (esa) and (ok?)
func ExtractLineType(converted []byte) (HexLineType, bool) {
switch converted[3] {
case 0:
return DataLine, true
case 1:
return EndOfFile, true
case 2:
return ExtendedSegmentAddress, true
case 4:
return ExtendedLinearAddress, true
case 5:
return StartLinearAddress, true
case 0x80:
return ExtensionSetParameters, true
case 0x81:
return ExtensionBigLinearAddress, true
case 0x82:
return ExtensionBigEntryPoint, true
case 3:
print("!unimplemented line type in hex transmission [StartSegmentAddress] ")
return DataLine, false
default:
print("!bad buffer type:", converted[3], "\n")
return DataLine, false
}
}
// change buffer of ascii->converted bytes by taking the ascii values (2 per byte) and making them proper bytes
func ConvertBuffer(l uint16, raw []byte) []byte {
//l-1 because the : is skipped so the remaining number of characters must be even
if (l-1)%2 == 1 {
print("!bad payload, expected even number of hex bytes but got:", l-1, "\n")
return nil
}
converted := make([]byte, (l-1)/2)
//skip first colon
for i := uint16(1); i < l; i += 2 {
v, ok := bufferValue(i, raw)
if !ok {
return nil // they already sent the error to the other side
}
converted[(i-1)/2] = v
}
return converted
}
// this hits buffer[i] and buffer[i+1] to convert an ascii byte
// returns false to mean you had a bad character in the input
func bufferValue(index uint16, buffer []byte) (uint8, bool) {
i := int(index)
total := uint8(0)
switch buffer[i] {
case '0':
case '1':
total += 16 * 1
case '2':
total += 16 * 2
case '3':
total += 16 * 3
case '4':
total += 16 * 4
case '5':
total += 16 * 5
case '6':
total += 16 * 6
case '7':
total += 16 * 7
case '8':
total += 16 * 8
case '9':
total += 16 * 9
case 'a', 'A':
total += 16 * 10
case 'b', 'B':
total += 16 * 11
case 'c', 'C':
total += 16 * 12
case 'd', 'D':
total += 16 * 13
case 'e', 'E':
total += 16 * 14
case 'f', 'F':
total += 16 * 15
default:
print("!bad character in payload hi byte(number #", i, "):", buffer[i], "\n")
return 0xff, false
}
switch buffer[i+1] {
case '0':
case '1':
total++
case '2':
total += 2
case '3':
total += 3
case '4':
total += 4
case '5':
total += 5
case '6':
total += 6
case '7':
total += 7
case '8':
total += 8
case '9':
total += 9
case 'a', 'A':
total += 10
case 'b', 'B':
total += 11
case 'c', 'C':
total += 12
case 'd', 'D':
total += 13
case 'e', 'E':
total += 14
case 'f', 'F':
total += 15
default:
print("!bad character in payload low byte (number #", i+1, "):", buffer[i+1], "\n")
return 0xff, false
}
return total, true
}
///////////////////////////////////////////////////////////////////////////////////
// ENCODING
///////////////////////////////////////////////////////////////////////////////////
func EncodeDataBytes(raw []byte, offset uint16) string {
if len(raw) > 255 {
log.Fatalf("intel hex format only allows 2 hex characters for the size\n"+
"of a data buffer, it can't be more than 0xff bytes (you have %x)", len(raw))
}
buf := bytes.Buffer{}
buf.WriteString(fmt.Sprintf(":%02X%04X%02X", len(raw), offset, int(DataLine)))
for _, b := range raw {
buf.WriteString(fmt.Sprintf("%02x", b))
}
cs := createChecksum(raw, offset, DataLine)
buf.WriteString(fmt.Sprintf("%02X", cs)) | return buf.String()
}
func EncodeBigEntry(entry uint32) string {
buf := bytes.Buffer{} | random_line_split |
|
hexformat.go | processing line %x -> %x %+v", addr, val, converted)
}
if !bb.Write(addr, val) {
return true, false
}
}
return false, false
case EndOfFile:
return false, true
case ExtendedSegmentAddress: //16 bit addr
length := converted[0]
if length != 2 {
print("!ESA value has too many bytes:", length, "\n")
return true, false
}
esaAddr := uint32(converted[4])*256 + uint32(converted[5])
esaAddr = esaAddr << 4 //it's assumed to be a multiple of 16
bb.SetBaseAddr(esaAddr)
return false, false
case ExtendedLinearAddress: //32 bit addr but only top 16 passed
length := converted[0]
if length != 2 {
print("!ELA value has too many bytes:", length, "\n")
return true, false
}
elaAddr := uint32(converted[4])*256 + uint32(converted[5])
elaAddr = elaAddr << 16 //data supplied is high order 16 of 32
bb.SetBaseAddr(elaAddr) //but this sets the lower order 32 of 64
return false, false
case ExtensionSetParameters: //4 64 bit integers
length := converted[0]
if length != 32 {
print("!extension parameters must be exactly 32 bytes, but was :", length, "\n")
return true, false
}
for i := 0; i < 4; i++ {
value := uint64(0)
for p := 7; p >= 0; p-- {
placeValue := uint64(1 << (8 * p))
//4 is because of four constant valuesat left of converted[]
//i*8 is which param
//7-p is byte
value += (placeValue * uint64(converted[(4)+(i*8)+(7-p)]))
}
bb.SetParameter(i, value)
}
return false, false
case ExtensionBigLinearAddress: //32 bit int which is the HIGH order of 64bit addr
length := converted[0]
if length != 4 {
print("!extension big linear address has wrong length:", length, "\n")
return true, false
}
t := uint32(converted[4])*0x1000000 + uint32(converted[5])*0x10000 + uint32(converted[6])*0x100 + uint32(converted[7])
bb.SetBigBaseAddr(t)
return false, false
case ExtensionBigEntryPoint: //32 bit int which is the HIGH order of 64bit pointer
length := converted[0]
if length != 4 {
print("!extension big linear address has wrong length:", length, "\n")
return true, false
}
t := uint32(converted[4])*0x1000000 + uint32(converted[5])*0x10000 + uint32(converted[6])*0x100 + uint32(converted[7])
bb.SetBigEntryPoint(t)
return false, false
case StartLinearAddress: //32 bit addr
length := converted[0]
if length != 4 {
print("!SLA value has too many bytes:", length, "\n")
return true, false
}
slaAddr := uint32(converted[4])*0x1000000 + uint32(converted[5])*0x10000 + uint32(converted[6])*0x100 + uint32(converted[7])
bb.SetEntryPoint(slaAddr)
return false, false
}
print("!unable to understand line type [processLine]\n")
return false, true
}
// take in a string and return either an exception or a well formed value
func DecodeAndCheckStringToBytes(s string) ([]byte, HexLineType, uint32, error) {
lenAs16 := uint16(len(s))
converted := ConvertBuffer(lenAs16, []byte(s))
if converted == nil {
return nil, HexLineType(0), 0, errors.New("convert buffer failed")
}
var addr uint32
lt, ok := ExtractLineType(converted)
if !ok {
return nil, DataLine, 0, NewEncodeDecodeError(fmt.Sprintf("unable to extract line type from: %s", s))
}
if lt == DataLine {
addr = (uint32(converted[1]) * 256) + (uint32(converted[2]))
}
if ok := ValidBufferLength(lenAs16, converted); ok == false {
return nil, lt, addr, NewEncodeDecodeError(fmt.Sprintf("expected buffer length to be ok, but wasn't: %s", s))
}
if ok := CheckChecksum(lenAs16, converted); ok == false {
return nil, lt, addr, NewEncodeDecodeError(fmt.Sprintf("expected checksum to be ok, but wasn't:%s", s))
}
return converted, lt, addr, nil
}
// received a line, check that it has a hope of being syntactically correct
func ValidBufferLength(l uint16, converted []byte) bool {
total := uint16(11) //size of just framing in characters (colon, 2 len chars, 4 addr chars, 2 type chars, 2 checksum chars)
if uint16(l) < total {
print("!bad buffer length, can't be smaller than", total, ":", l, "\n")
return false
}
total += uint16(converted[0]) * 2
if l != total {
print("!bad buffer length, expected ", total, " but got", l, " based on ", total*2, "\n")
return false
}
return true
}
// verify line's checksum
func CheckChecksum(l uint16, converted []byte) bool {
sum := uint64(0)
limit := (l - 1) / 2
for i := uint16(0); i < limit; i++ {
sum += uint64(converted[i])
}
complement := ^sum
complement++
checksum := uint8(complement & 0xff)
if checksum != 0 {
print("!bad checksum! expected 0 and got ", checksum,
" from declared checksum of ", converted[limit-1], "\n")
return false
}
return true
}
// extract the line type, 00 (data), 01 (eof), or 02 (esa) and (ok?)
func ExtractLineType(converted []byte) (HexLineType, bool) {
switch converted[3] {
case 0:
return DataLine, true
case 1:
return EndOfFile, true
case 2:
return ExtendedSegmentAddress, true
case 4:
return ExtendedLinearAddress, true
case 5:
return StartLinearAddress, true
case 0x80:
return ExtensionSetParameters, true
case 0x81:
return ExtensionBigLinearAddress, true
case 0x82:
return ExtensionBigEntryPoint, true
case 3:
print("!unimplemented line type in hex transmission [StartSegmentAddress] ")
return DataLine, false
default:
print("!bad buffer type:", converted[3], "\n")
return DataLine, false
}
}
// change buffer of ascii->converted bytes by taking the ascii values (2 per byte) and making them proper bytes
func ConvertBuffer(l uint16, raw []byte) []byte {
//l-1 because the : is skipped so the remaining number of characters must be even
if (l-1)%2 == 1 |
converted := make([]byte, (l-1)/2)
//skip first colon
for i := uint16(1); i < l; i += 2 {
v, ok := bufferValue(i, raw)
if !ok {
return nil // they already sent the error to the other side
}
converted[(i-1)/2] = v
}
return converted
}
// this hits buffer[i] and buffer[i+1] to convert an ascii byte
// returns false to mean you had a bad character in the input
func bufferValue(index uint16, buffer []byte) (uint8, bool) {
i := int(index)
total := uint8(0)
switch buffer[i] {
case '0':
case '1':
total += 16 * 1
case '2':
total += 16 * 2
case '3':
total += 16 * 3
case '4':
total += 16 * 4
case '5':
total += 16 * 5
case ' | {
print("!bad payload, expected even number of hex bytes but got:", l-1, "\n")
return nil
} | conditional_block |
columnar.rs | _offset.to_usize()
));
}
}
if let Some(last_val_offset) = self.val_offsets.last() {
if last_val_offset.to_usize() != self.val_data.len() {
return Err(format!(
"expected {} bytes of val data got {}",
last_val_offset,
self.val_data.len()
));
}
}
if self.diffs.len() != self.len {
return Err(format!(
"expected {} diffs got {}",
self.len,
self.diffs.len()
));
}
if self.timestamps.len() != self.len {
return Err(format!(
"expected {} timestamps got {}",
self.len,
self.timestamps.len()
));
}
// Unlike most of our Validate methods, this one is called in a
// production code path: when decoding a columnar batch. Only check the
// more expensive assertions in debug.
#[cfg(debug_assertions)]
{
let (mut prev_key, mut prev_val) = (0, 0);
for i in 0..=self.len {
let (key, val) = (self.key_offsets[i], self.val_offsets[i]);
if key < prev_key {
return Err(format!(
"expected non-decreasing key offsets got {} followed by {}",
prev_key, key
));
}
if val < prev_val {
return Err(format!(
"expected non-decreasing val offsets got {} followed by {}",
prev_val, val
));
}
prev_key = key;
prev_val = val;
}
}
Ok(())
}
/// Read the record at `idx`, if there is one.
///
/// Returns None if `idx >= self.len()`.
fn get(&self, idx: usize) -> Option<((&'a [u8], &'a [u8]), [u8; 8], [u8; 8])> {
if idx >= self.len {
return None;
}
// There used to be `debug_assert_eq!(self.validate(), Ok(()))`, but it
// resulted in accidentally O(n^2) behavior in debug mode. Instead, we
// push that responsibility to the ColumnarRecordsRef constructor.
let key_range = self.key_offsets[idx].to_usize()..self.key_offsets[idx + 1].to_usize();
let val_range = self.val_offsets[idx].to_usize()..self.val_offsets[idx + 1].to_usize();
let key = &self.key_data[key_range];
let val = &self.val_data[val_range];
let ts = i64::to_le_bytes(self.timestamps[idx]);
let diff = i64::to_le_bytes(self.diffs[idx]);
Some(((key, val), ts, diff))
}
/// Iterate through the records in Self.
fn iter(&self) -> ColumnarRecordsIter<'a> {
ColumnarRecordsIter {
idx: 0,
records: self.clone(),
}
}
}
/// An [Iterator] over the records in a [ColumnarRecords].
#[derive(Clone, Debug)]
pub struct ColumnarRecordsIter<'a> {
idx: usize,
records: ColumnarRecordsRef<'a>,
}
impl<'a> Iterator for ColumnarRecordsIter<'a> {
type Item = ((&'a [u8], &'a [u8]), [u8; 8], [u8; 8]);
fn size_hint(&self) -> (usize, Option<usize>) {
(self.records.len, Some(self.records.len))
}
fn next(&mut self) -> Option<Self::Item> {
let ret = self.records.get(self.idx);
self.idx += 1;
ret
}
}
impl<'a> ExactSizeIterator for ColumnarRecordsIter<'a> {}
/// An abstraction to incrementally add ((Key, Value), Time, i64) records
/// in a columnar representation, and eventually get back a [ColumnarRecords].
pub struct ColumnarRecordsBuilder {
len: usize,
key_data: Vec<u8>,
key_offsets: Vec<i32>,
val_data: Vec<u8>,
val_offsets: Vec<i32>,
timestamps: Vec<i64>,
diffs: Vec<i64>,
}
impl fmt::Debug for ColumnarRecordsBuilder {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&self.borrow(), fmt)
}
}
impl Default for ColumnarRecordsBuilder {
fn default() -> Self {
let mut ret = ColumnarRecordsBuilder {
len: 0,
key_data: Vec::new(),
key_offsets: Vec::new(),
val_data: Vec::new(),
val_offsets: Vec::new(),
timestamps: Vec::new(),
diffs: Vec::new(),
};
// Push initial 0 offsets to maintain our invariants, even as we build.
ret.key_offsets.push(0);
ret.val_offsets.push(0);
debug_assert_eq!(ret.borrow().validate(), Ok(()));
ret
}
}
impl ColumnarRecordsBuilder {
/// The number of (potentially duplicated) ((Key, Val), Time, i64) records
/// stored in Self.
pub fn len(&self) -> usize {
self.len
}
/// Borrow Self as a [ColumnarRecordsRef].
fn borrow<'a>(&'a self) -> ColumnarRecordsRef<'a> {
let ret = ColumnarRecordsRef {
len: self.len,
key_data: self.key_data.as_slice(),
key_offsets: self.key_offsets.as_slice(),
val_data: self.val_data.as_slice(),
val_offsets: self.val_offsets.as_slice(),
timestamps: self.timestamps.as_slice(),
diffs: self.diffs.as_slice(),
};
debug_assert_eq!(ret.validate(), Ok(()));
ret
}
/// Reserve space for `additional` more records, based on `key_size_guess` and
/// `val_size_guess`.
///
/// The guesses for key and val sizes are best effort, and if they end up being
/// too small, the underlying buffers will be resized.
pub fn reserve(&mut self, additional: usize, key_size_guess: usize, val_size_guess: usize) {
self.key_offsets.reserve(additional);
self.key_data
.reserve(cmp::min(additional * key_size_guess, KEY_VAL_DATA_MAX_LEN));
self.val_offsets.reserve(additional);
self.val_data
.reserve(cmp::min(additional * val_size_guess, KEY_VAL_DATA_MAX_LEN));
self.timestamps.reserve(additional);
self.diffs.reserve(additional);
debug_assert_eq!(self.borrow().validate(), Ok(()));
}
/// Reserve space for `additional` more records, with exact sizes for the key and value data.
pub fn reserve_exact(&mut self, additional: usize, key_bytes: usize, val_bytes: usize) {
self.key_offsets.reserve(additional);
self.key_data
.reserve(cmp::min(key_bytes, KEY_VAL_DATA_MAX_LEN));
self.val_offsets.reserve(additional);
self.val_data
.reserve(cmp::min(val_bytes, KEY_VAL_DATA_MAX_LEN));
self.timestamps.reserve(additional);
self.diffs.reserve(additional);
debug_assert_eq!(self.borrow().validate(), Ok(()));
}
/// Returns if the given key_offsets+key_data or val_offsets+val_data fits
/// in the limits imposed by ColumnarRecords.
///
/// Note that limit is always [KEY_VAL_DATA_MAX_LEN] in production. It's
/// only override-able here for testing.
pub fn can_fit(&self, key: &[u8], val: &[u8], limit: usize) -> bool {
let key_data_size = (self.key_offsets.len() + 1) * BYTES_PER_KEY_VAL_OFFSET
+ self.key_data.len()
+ key.len();
let val_data_size = (self.val_offsets.len() + 1) * BYTES_PER_KEY_VAL_OFFSET
+ self.val_data.len()
+ val.len();
key_data_size <= limit && val_data_size <= limit
}
/// Add a record to Self.
///
/// Returns whether the record was successfully added. A record will not a
/// added if it exceeds the size limitations of ColumnarBatch. This method
/// is atomic, if it fails, no partial data will have been added.
#[must_use]
pub fn push(&mut self, record: ((&[u8], &[u8]), [u8; 8], [u8; 8])) -> bool | {
let ((key, val), ts, diff) = record;
// Check size invariants ahead of time so we stay atomic when we can't
// add the record.
if !self.can_fit(key, val, KEY_VAL_DATA_MAX_LEN) {
return false;
}
// NB: We should never hit the following expects because we check them
// above.
self.key_data.extend_from_slice(key);
self.key_offsets
.push(i32::try_from(self.key_data.len()).expect("key_data is smaller than 2GB"));
self.val_data.extend_from_slice(val);
self.val_offsets
.push(i32::try_from(self.val_data.len()).expect("val_data is smaller than 2GB"));
self.timestamps.push(i64::from_le_bytes(ts));
self.diffs.push(i64::from_le_bytes(diff));
self.len += 1; | identifier_body |
|
columnar.rs | * self.diffs.len()
}
/// Read the record at `idx`, if there is one.
///
/// Returns None if `idx >= self.len()`.
pub fn get<'a>(&'a self, idx: usize) -> Option<((&'a [u8], &'a [u8]), [u8; 8], [u8; 8])> {
self.borrow().get(idx)
}
/// Borrow Self as a [ColumnarRecordsRef].
fn borrow<'a>(&'a self) -> ColumnarRecordsRef<'a> {
// The ColumnarRecords constructor already validates, so don't bother
// doing it again.
//
// TODO: Forcing everything through a `fn new` would make this more
// obvious.
ColumnarRecordsRef {
len: self.len,
key_data: self.key_data.as_slice(),
key_offsets: self.key_offsets.as_slice(),
val_data: self.val_data.as_slice(),
val_offsets: self.val_offsets.as_slice(),
timestamps: self.timestamps.as_slice(),
diffs: self.diffs.as_slice(),
}
}
/// Iterate through the records in Self.
pub fn iter<'a>(&'a self) -> ColumnarRecordsIter<'a> {
self.borrow().iter()
}
}
/// A reference to a [ColumnarRecords].
#[derive(Clone)]
struct ColumnarRecordsRef<'a> {
len: usize,
key_data: &'a [u8],
key_offsets: &'a [i32],
val_data: &'a [u8],
val_offsets: &'a [i32],
timestamps: &'a [i64],
diffs: &'a [i64],
}
impl<'a> fmt::Debug for ColumnarRecordsRef<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_list().entries(self.iter()).finish()
}
}
impl<'a> ColumnarRecordsRef<'a> {
fn validate(&self) -> Result<(), String> {
let key_data_size = self.key_offsets.len() * BYTES_PER_KEY_VAL_OFFSET + self.key_data.len();
if key_data_size > KEY_VAL_DATA_MAX_LEN {
return Err(format!(
"expected encoded key offsets and data size to be less than or equal to {} got {}",
KEY_VAL_DATA_MAX_LEN, key_data_size
));
}
if self.key_offsets.len() != self.len + 1 {
return Err(format!(
"expected {} key_offsets got {}",
self.len + 1,
self.key_offsets.len()
));
}
if let Some(first_key_offset) = self.key_offsets.first() {
if first_key_offset.to_usize() != 0 {
return Err(format!(
"expected first key offset to be 0 got {}",
first_key_offset.to_usize()
));
}
}
if let Some(last_key_offset) = self.key_offsets.last() {
if last_key_offset.to_usize() != self.key_data.len() {
return Err(format!(
"expected {} bytes of key data got {}",
last_key_offset,
self.key_data.len()
));
}
}
let val_data_size = self.val_offsets.len() * BYTES_PER_KEY_VAL_OFFSET + self.val_data.len();
if val_data_size > KEY_VAL_DATA_MAX_LEN {
return Err(format!(
"expected encoded val offsets and data size to be less than or equal to {} got {}",
KEY_VAL_DATA_MAX_LEN, val_data_size
));
}
if self.val_offsets.len() != self.len + 1 {
return Err(format!(
"expected {} val_offsets got {}",
self.len + 1,
self.val_offsets.len()
));
}
if let Some(first_val_offset) = self.val_offsets.first() {
if first_val_offset.to_usize() != 0 {
return Err(format!(
"expected first val offset to be 0 got {}",
first_val_offset.to_usize()
));
}
}
if let Some(last_val_offset) = self.val_offsets.last() {
if last_val_offset.to_usize() != self.val_data.len() {
return Err(format!(
"expected {} bytes of val data got {}",
last_val_offset,
self.val_data.len()
));
}
}
if self.diffs.len() != self.len {
return Err(format!(
"expected {} diffs got {}",
self.len,
self.diffs.len()
));
}
if self.timestamps.len() != self.len {
return Err(format!(
"expected {} timestamps got {}",
self.len,
self.timestamps.len()
));
}
// Unlike most of our Validate methods, this one is called in a
// production code path: when decoding a columnar batch. Only check the
// more expensive assertions in debug.
#[cfg(debug_assertions)]
{
let (mut prev_key, mut prev_val) = (0, 0);
for i in 0..=self.len {
let (key, val) = (self.key_offsets[i], self.val_offsets[i]);
if key < prev_key {
return Err(format!(
"expected non-decreasing key offsets got {} followed by {}",
prev_key, key
));
}
if val < prev_val {
return Err(format!(
"expected non-decreasing val offsets got {} followed by {}",
prev_val, val
));
}
prev_key = key;
prev_val = val;
}
}
Ok(())
}
/// Read the record at `idx`, if there is one.
///
/// Returns None if `idx >= self.len()`.
fn get(&self, idx: usize) -> Option<((&'a [u8], &'a [u8]), [u8; 8], [u8; 8])> {
if idx >= self.len {
return None;
}
// There used to be `debug_assert_eq!(self.validate(), Ok(()))`, but it
// resulted in accidentally O(n^2) behavior in debug mode. Instead, we
// push that responsibility to the ColumnarRecordsRef constructor.
let key_range = self.key_offsets[idx].to_usize()..self.key_offsets[idx + 1].to_usize();
let val_range = self.val_offsets[idx].to_usize()..self.val_offsets[idx + 1].to_usize();
let key = &self.key_data[key_range];
let val = &self.val_data[val_range];
let ts = i64::to_le_bytes(self.timestamps[idx]);
let diff = i64::to_le_bytes(self.diffs[idx]);
Some(((key, val), ts, diff))
}
/// Iterate through the records in Self.
fn iter(&self) -> ColumnarRecordsIter<'a> {
ColumnarRecordsIter {
idx: 0,
records: self.clone(),
}
}
}
/// An [Iterator] over the records in a [ColumnarRecords].
#[derive(Clone, Debug)]
pub struct ColumnarRecordsIter<'a> {
idx: usize,
records: ColumnarRecordsRef<'a>,
}
impl<'a> Iterator for ColumnarRecordsIter<'a> {
type Item = ((&'a [u8], &'a [u8]), [u8; 8], [u8; 8]);
fn size_hint(&self) -> (usize, Option<usize>) {
(self.records.len, Some(self.records.len))
}
fn next(&mut self) -> Option<Self::Item> {
let ret = self.records.get(self.idx);
self.idx += 1;
ret
}
}
impl<'a> ExactSizeIterator for ColumnarRecordsIter<'a> {}
/// An abstraction to incrementally add ((Key, Value), Time, i64) records
/// in a columnar representation, and eventually get back a [ColumnarRecords].
pub struct ColumnarRecordsBuilder {
len: usize,
key_data: Vec<u8>,
key_offsets: Vec<i32>,
val_data: Vec<u8>,
val_offsets: Vec<i32>,
timestamps: Vec<i64>,
diffs: Vec<i64>,
}
impl fmt::Debug for ColumnarRecordsBuilder {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&self.borrow(), fmt)
}
}
impl Default for ColumnarRecordsBuilder {
fn default() -> Self {
let mut ret = ColumnarRecordsBuilder {
len: 0,
key_data: Vec::new(),
key_offsets: Vec::new(),
val_data: Vec::new(),
val_offsets: Vec::new(),
timestamps: Vec::new(),
diffs: Vec::new(),
};
// Push initial 0 offsets to maintain our invariants, even as we build.
ret.key_offsets.push(0);
ret.val_offsets.push(0);
debug_assert_eq!(ret.borrow().validate(), Ok(()));
ret
}
}
impl ColumnarRecordsBuilder {
/// The number of (potentially duplicated) ((Key, Val), Time, i64) records
/// stored in Self.
pub fn len(&self) -> usize {
self.len
}
| /// Borrow Self as a [ColumnarRecordsRef].
fn borrow<'a>(&'a self) -> ColumnarRecordsRef<'a> {
let ret = ColumnarRecordsRef {
len: self.len, | random_line_split |
|
columnar.rs | self.val_offsets.last() {
if last_val_offset.to_usize() != self.val_data.len() {
return Err(format!(
"expected {} bytes of val data got {}",
last_val_offset,
self.val_data.len()
));
}
}
if self.diffs.len() != self.len {
return Err(format!(
"expected {} diffs got {}",
self.len,
self.diffs.len()
));
}
if self.timestamps.len() != self.len {
return Err(format!(
"expected {} timestamps got {}",
self.len,
self.timestamps.len()
));
}
// Unlike most of our Validate methods, this one is called in a
// production code path: when decoding a columnar batch. Only check the
// more expensive assertions in debug.
#[cfg(debug_assertions)]
{
let (mut prev_key, mut prev_val) = (0, 0);
for i in 0..=self.len {
let (key, val) = (self.key_offsets[i], self.val_offsets[i]);
if key < prev_key {
return Err(format!(
"expected non-decreasing key offsets got {} followed by {}",
prev_key, key
));
}
if val < prev_val {
return Err(format!(
"expected non-decreasing val offsets got {} followed by {}",
prev_val, val
));
}
prev_key = key;
prev_val = val;
}
}
Ok(())
}
/// Read the record at `idx`, if there is one.
///
/// Returns None if `idx >= self.len()`.
fn get(&self, idx: usize) -> Option<((&'a [u8], &'a [u8]), [u8; 8], [u8; 8])> {
if idx >= self.len {
return None;
}
// There used to be `debug_assert_eq!(self.validate(), Ok(()))`, but it
// resulted in accidentally O(n^2) behavior in debug mode. Instead, we
// push that responsibility to the ColumnarRecordsRef constructor.
let key_range = self.key_offsets[idx].to_usize()..self.key_offsets[idx + 1].to_usize();
let val_range = self.val_offsets[idx].to_usize()..self.val_offsets[idx + 1].to_usize();
let key = &self.key_data[key_range];
let val = &self.val_data[val_range];
let ts = i64::to_le_bytes(self.timestamps[idx]);
let diff = i64::to_le_bytes(self.diffs[idx]);
Some(((key, val), ts, diff))
}
/// Iterate through the records in Self.
fn iter(&self) -> ColumnarRecordsIter<'a> {
ColumnarRecordsIter {
idx: 0,
records: self.clone(),
}
}
}
/// An [Iterator] over the records in a [ColumnarRecords].
#[derive(Clone, Debug)]
pub struct ColumnarRecordsIter<'a> {
idx: usize,
records: ColumnarRecordsRef<'a>,
}
impl<'a> Iterator for ColumnarRecordsIter<'a> {
type Item = ((&'a [u8], &'a [u8]), [u8; 8], [u8; 8]);
fn size_hint(&self) -> (usize, Option<usize>) {
(self.records.len, Some(self.records.len))
}
fn next(&mut self) -> Option<Self::Item> {
let ret = self.records.get(self.idx);
self.idx += 1;
ret
}
}
impl<'a> ExactSizeIterator for ColumnarRecordsIter<'a> {}
/// An abstraction to incrementally add ((Key, Value), Time, i64) records
/// in a columnar representation, and eventually get back a [ColumnarRecords].
pub struct ColumnarRecordsBuilder {
len: usize,
key_data: Vec<u8>,
key_offsets: Vec<i32>,
val_data: Vec<u8>,
val_offsets: Vec<i32>,
timestamps: Vec<i64>,
diffs: Vec<i64>,
}
impl fmt::Debug for ColumnarRecordsBuilder {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&self.borrow(), fmt)
}
}
impl Default for ColumnarRecordsBuilder {
fn default() -> Self {
let mut ret = ColumnarRecordsBuilder {
len: 0,
key_data: Vec::new(),
key_offsets: Vec::new(),
val_data: Vec::new(),
val_offsets: Vec::new(),
timestamps: Vec::new(),
diffs: Vec::new(),
};
// Push initial 0 offsets to maintain our invariants, even as we build.
ret.key_offsets.push(0);
ret.val_offsets.push(0);
debug_assert_eq!(ret.borrow().validate(), Ok(()));
ret
}
}
impl ColumnarRecordsBuilder {
/// The number of (potentially duplicated) ((Key, Val), Time, i64) records
/// stored in Self.
pub fn len(&self) -> usize {
self.len
}
/// Borrow Self as a [ColumnarRecordsRef].
fn borrow<'a>(&'a self) -> ColumnarRecordsRef<'a> {
let ret = ColumnarRecordsRef {
len: self.len,
key_data: self.key_data.as_slice(),
key_offsets: self.key_offsets.as_slice(),
val_data: self.val_data.as_slice(),
val_offsets: self.val_offsets.as_slice(),
timestamps: self.timestamps.as_slice(),
diffs: self.diffs.as_slice(),
};
debug_assert_eq!(ret.validate(), Ok(()));
ret
}
/// Reserve space for `additional` more records, based on `key_size_guess` and
/// `val_size_guess`.
///
/// The guesses for key and val sizes are best effort, and if they end up being
/// too small, the underlying buffers will be resized.
pub fn reserve(&mut self, additional: usize, key_size_guess: usize, val_size_guess: usize) {
self.key_offsets.reserve(additional);
self.key_data
.reserve(cmp::min(additional * key_size_guess, KEY_VAL_DATA_MAX_LEN));
self.val_offsets.reserve(additional);
self.val_data
.reserve(cmp::min(additional * val_size_guess, KEY_VAL_DATA_MAX_LEN));
self.timestamps.reserve(additional);
self.diffs.reserve(additional);
debug_assert_eq!(self.borrow().validate(), Ok(()));
}
/// Reserve space for `additional` more records, with exact sizes for the key and value data.
pub fn reserve_exact(&mut self, additional: usize, key_bytes: usize, val_bytes: usize) {
self.key_offsets.reserve(additional);
self.key_data
.reserve(cmp::min(key_bytes, KEY_VAL_DATA_MAX_LEN));
self.val_offsets.reserve(additional);
self.val_data
.reserve(cmp::min(val_bytes, KEY_VAL_DATA_MAX_LEN));
self.timestamps.reserve(additional);
self.diffs.reserve(additional);
debug_assert_eq!(self.borrow().validate(), Ok(()));
}
/// Returns if the given key_offsets+key_data or val_offsets+val_data fits
/// in the limits imposed by ColumnarRecords.
///
/// Note that limit is always [KEY_VAL_DATA_MAX_LEN] in production. It's
/// only override-able here for testing.
pub fn can_fit(&self, key: &[u8], val: &[u8], limit: usize) -> bool {
let key_data_size = (self.key_offsets.len() + 1) * BYTES_PER_KEY_VAL_OFFSET
+ self.key_data.len()
+ key.len();
let val_data_size = (self.val_offsets.len() + 1) * BYTES_PER_KEY_VAL_OFFSET
+ self.val_data.len()
+ val.len();
key_data_size <= limit && val_data_size <= limit
}
/// Add a record to Self.
///
/// Returns whether the record was successfully added. A record will not a
/// added if it exceeds the size limitations of ColumnarBatch. This method
/// is atomic, if it fails, no partial data will have been added.
#[must_use]
pub fn push(&mut self, record: ((&[u8], &[u8]), [u8; 8], [u8; 8])) -> bool {
let ((key, val), ts, diff) = record;
// Check size invariants ahead of time so we stay atomic when we can't
// add the record.
if !self.can_fit(key, val, KEY_VAL_DATA_MAX_LEN) {
return false;
}
// NB: We should never hit the following expects because we check them
// above.
self.key_data.extend_from_slice(key);
self.key_offsets
.push(i32::try_from(self.key_data.len()).expect("key_data is smaller than 2GB"));
self.val_data.extend_from_slice(val);
self.val_offsets
.push(i32::try_from(self.val_data.len()).expect("val_data is smaller than 2GB"));
self.timestamps.push(i64::from_le_bytes(ts));
self.diffs.push(i64::from_le_bytes(diff));
self.len += 1;
true
}
/// Finalize constructing a [ColumnarRecords].
pub fn | finish | identifier_name |
|
columnar.rs | < usize::MAX (so len+1 can fit in a usize)
/// - key_offsets.len() * BYTES_PER_KEY_VAL_OFFSET + key_data.len() <= KEY_VAL_DATA_MAX_LEN
/// - key_offsets.len() == len + 1
/// - key_offsets are non-decreasing
/// - Each key_offset is <= key_data.len()
/// - key_offsets.first().unwrap() == 0
/// - key_offsets.last().unwrap() == key_data.len()
/// - val_offsets.len() * BYTES_PER_KEY_VAL_OFFSET + val_data.len() <= KEY_VAL_DATA_MAX_LEN
/// - val_offsets.len() == len + 1
/// - val_offsets are non-decreasing
/// - Each val_offset is <= val_data.len()
/// - val_offsets.first().unwrap() == 0
/// - val_offsets.last().unwrap() == val_data.len()
/// - timestamps.len() == len
/// - diffs.len() == len
#[derive(Clone, PartialEq)]
pub struct ColumnarRecords {
len: usize,
key_data: Buffer<u8>,
key_offsets: OffsetsBuffer<i32>,
val_data: Buffer<u8>,
val_offsets: OffsetsBuffer<i32>,
timestamps: Buffer<i64>,
diffs: Buffer<i64>,
}
impl fmt::Debug for ColumnarRecords {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&self.borrow(), fmt)
}
}
impl ColumnarRecords {
/// The number of (potentially duplicated) ((Key, Val), Time, i64) records
/// stored in Self.
pub fn len(&self) -> usize {
self.len
}
/// The number of logical bytes in the represented data, excluding offsets
/// and lengths.
pub fn goodbytes(&self) -> usize {
self.key_data.len() + self.val_data.len() + 8 * self.timestamps.len() + 8 * self.diffs.len()
}
/// Read the record at `idx`, if there is one.
///
/// Returns None if `idx >= self.len()`.
pub fn get<'a>(&'a self, idx: usize) -> Option<((&'a [u8], &'a [u8]), [u8; 8], [u8; 8])> {
self.borrow().get(idx)
}
/// Borrow Self as a [ColumnarRecordsRef].
fn borrow<'a>(&'a self) -> ColumnarRecordsRef<'a> {
// The ColumnarRecords constructor already validates, so don't bother
// doing it again.
//
// TODO: Forcing everything through a `fn new` would make this more
// obvious.
ColumnarRecordsRef {
len: self.len,
key_data: self.key_data.as_slice(),
key_offsets: self.key_offsets.as_slice(),
val_data: self.val_data.as_slice(),
val_offsets: self.val_offsets.as_slice(),
timestamps: self.timestamps.as_slice(),
diffs: self.diffs.as_slice(),
}
}
/// Iterate through the records in Self.
pub fn iter<'a>(&'a self) -> ColumnarRecordsIter<'a> {
self.borrow().iter()
}
}
/// A reference to a [ColumnarRecords].
#[derive(Clone)]
struct ColumnarRecordsRef<'a> {
len: usize,
key_data: &'a [u8],
key_offsets: &'a [i32],
val_data: &'a [u8],
val_offsets: &'a [i32],
timestamps: &'a [i64],
diffs: &'a [i64],
}
impl<'a> fmt::Debug for ColumnarRecordsRef<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_list().entries(self.iter()).finish()
}
}
impl<'a> ColumnarRecordsRef<'a> {
fn validate(&self) -> Result<(), String> {
let key_data_size = self.key_offsets.len() * BYTES_PER_KEY_VAL_OFFSET + self.key_data.len();
if key_data_size > KEY_VAL_DATA_MAX_LEN {
return Err(format!(
"expected encoded key offsets and data size to be less than or equal to {} got {}",
KEY_VAL_DATA_MAX_LEN, key_data_size
));
}
if self.key_offsets.len() != self.len + 1 {
return Err(format!(
"expected {} key_offsets got {}",
self.len + 1,
self.key_offsets.len()
));
}
if let Some(first_key_offset) = self.key_offsets.first() {
if first_key_offset.to_usize() != 0 {
return Err(format!(
"expected first key offset to be 0 got {}",
first_key_offset.to_usize()
));
}
}
if let Some(last_key_offset) = self.key_offsets.last() {
if last_key_offset.to_usize() != self.key_data.len() {
return Err(format!(
"expected {} bytes of key data got {}",
last_key_offset,
self.key_data.len()
));
}
}
let val_data_size = self.val_offsets.len() * BYTES_PER_KEY_VAL_OFFSET + self.val_data.len();
if val_data_size > KEY_VAL_DATA_MAX_LEN {
return Err(format!(
"expected encoded val offsets and data size to be less than or equal to {} got {}",
KEY_VAL_DATA_MAX_LEN, val_data_size
));
}
if self.val_offsets.len() != self.len + 1 |
if let Some(first_val_offset) = self.val_offsets.first() {
if first_val_offset.to_usize() != 0 {
return Err(format!(
"expected first val offset to be 0 got {}",
first_val_offset.to_usize()
));
}
}
if let Some(last_val_offset) = self.val_offsets.last() {
if last_val_offset.to_usize() != self.val_data.len() {
return Err(format!(
"expected {} bytes of val data got {}",
last_val_offset,
self.val_data.len()
));
}
}
if self.diffs.len() != self.len {
return Err(format!(
"expected {} diffs got {}",
self.len,
self.diffs.len()
));
}
if self.timestamps.len() != self.len {
return Err(format!(
"expected {} timestamps got {}",
self.len,
self.timestamps.len()
));
}
// Unlike most of our Validate methods, this one is called in a
// production code path: when decoding a columnar batch. Only check the
// more expensive assertions in debug.
#[cfg(debug_assertions)]
{
let (mut prev_key, mut prev_val) = (0, 0);
for i in 0..=self.len {
let (key, val) = (self.key_offsets[i], self.val_offsets[i]);
if key < prev_key {
return Err(format!(
"expected non-decreasing key offsets got {} followed by {}",
prev_key, key
));
}
if val < prev_val {
return Err(format!(
"expected non-decreasing val offsets got {} followed by {}",
prev_val, val
));
}
prev_key = key;
prev_val = val;
}
}
Ok(())
}
/// Read the record at `idx`, if there is one.
///
/// Returns None if `idx >= self.len()`.
fn get(&self, idx: usize) -> Option<((&'a [u8], &'a [u8]), [u8; 8], [u8; 8])> {
if idx >= self.len {
return None;
}
// There used to be `debug_assert_eq!(self.validate(), Ok(()))`, but it
// resulted in accidentally O(n^2) behavior in debug mode. Instead, we
// push that responsibility to the ColumnarRecordsRef constructor.
let key_range = self.key_offsets[idx].to_usize()..self.key_offsets[idx + 1].to_usize();
let val_range = self.val_offsets[idx].to_usize()..self.val_offsets[idx + 1].to_usize();
let key = &self.key_data[key_range];
let val = &self.val_data[val_range];
let ts = i64::to_le_bytes(self.timestamps[idx]);
let diff = i64::to_le_bytes(self.diffs[idx]);
Some(((key, val), ts, diff))
}
/// Iterate through the records in Self.
fn iter(&self) -> ColumnarRecordsIter<'a> {
ColumnarRecordsIter {
idx: 0,
records: self.clone(),
}
}
}
/// An [Iterator] over the records in a [ColumnarRecords].
#[derive(Clone, Debug)]
pub struct ColumnarRecordsIter<'a> {
idx: usize,
records: ColumnarRecordsRef<'a>,
}
impl<'a> Iterator for ColumnarRecordsIter<'a> {
type Item = ((&'a [u8], &'a [u8]), [u8; 8], [u8; 8]);
fn size_hint(&self) -> (usize, Option<usize>) {
(self.records.len, Some(self.records.len))
}
fn next(&mut self) -> Option<Self | {
return Err(format!(
"expected {} val_offsets got {}",
self.len + 1,
self.val_offsets.len()
));
} | conditional_block |
Transformer_prac.py | # [ 0, 0, 12]]) ์ด๋ฐ์์ผ๋ก ํํ๋๋ค
subsequent_mask = torch.from_numpy(subsequent_mask).byte() # numpy์์ torch๋ก ํ
์ ๋ฒ์ ์ ๋ฐ๊พผ๋ค
return subsequent_mask
class ScaledDotProduct(nn.Module):
def __init__(self):
super(ScaledDotProduct,self).__init__()
self.softmax = nn.Softmax(dim = -1) # softmax์ dim? ์ํํธ๋งฅ์ค๊ณ์ฐ๋๋ ๋๋ฉ์
# NLLLoss ์๋ Logsoftmax๋ฅผ ์ฌ์ฉํ๋ค
self.const = np.sqrt(d_k) # d_k๋?
def forward(self, Q, K, V, att_mask): # att_mask๋
score = torch.matmul(Q,K.transpose(-1,-2))/self.const # tranpose: ์ฃผ์ด์ง dim0๊ณผ dim1์ด ์๋ก ๋ฐ๊ฟ๋ค
score.masked_fill_(att_mask, -1e9) # masked!
# masked_fill_(mask, value) mask๋ boolean์ผ๋ก, ๋ง์คํฌ๊ฐ true์ธ ๊ณณ์ value๋ฅผ ์ฑ์
attn = self.softmax(score) # attn = attention distribution
context = torch.matmul(attn, V)
return context, attn
############################################################
# self ๋ ๋ฌด์์ธ๊ฐ?
# class Foo:
# def func1(): # ์ธ์๊ฐ self๊ฐ ์๋์ด๋ ์ค๋ฅ๋ ๋์ง ์๋๋ค
# print("fuckck")
# def func2(self):
# print("fuck!!")
# f = Foo() # ํด๋น ํด๋์ค์ ๋ํ ์ธ์คํด์ค ์์ฑ
# f.func2()=> function 2๊ฐ ์ ์์ ์ผ๋ก ํ๋ฆฐํธ ๋๋ค # ์ธ์คํด์ค ๋ฉ์๋ ํธ์ถ -> func2์ ๋ฉ์๋์ธ์๋ self๋ฟ์ด๋ฏ๋ก ์ธํ ํ์์๋ค
# ๋ฉ์๋์ธ func2์ ์ธ์ self์ ๋ํ ๊ฐ์ ํ์ด์ฌ์ด ์๋์ผ๋ก ๋๊ฒจ์ฃผ๊ธฐ ๋๋ฌธ์ ์ธํํ์์๋ค
# f.func1() -> ์๋ฌ๊ฐ ๋๋ค self ์ธ์๋ ์์ง๋ง ํ์ด์ฌ์ด ์๋์ผ๋ก ๊ฐ์ ์ ๋ฌํ๊ธฐ ๋๋ฌธ์ ๋ฐ์
# class ๋ด์ self๋ ํด๋์ค ์์ฒด๋ฅผ ๋ํ๋ด๋ ์ธ์คํด์ค์ด๋ค!
############################################################
class MultiHeadAttention(nn.Module):
def __init__(self):
super(MultiHeadAttention, self).__init__() # d_v = d_k
self.W_Q = nn.Linear(d_model, d_k * n_head) # n_head ๋ฒ ๋ณ๋ ฌ์ํ # concat์ ํ๊ธฐ ๋๋ฌธ์ d_k x n_head ์ด๋ค
self.W_K = nn.Linear(d_model, d_k * n_head) #
self.W_V = nn.Linear(d_model, d_k * n_head)
def forward(self,Q, K, V, att_mask): # ์ธ์ฝ๋๋ QKV๊ฐ ๋ค๋๊ฐ๊ณ , ๋์ฝ๋๋ KV๋ ๊ฐ๊ตฌ Q๋ ๋ค๋ฅด๋ค
residual = Q
batch_size = Q.size(0)
q_s = self.W_Q(Q).view(batch_size, -1, n_head, d_k).transpose(1,2)
k_s = self.W_K(K).view(batch_size, -1, n_head, d_k).transpose(1,2)
v_s = self.W_V(V).view(batch_size, -1, n_head, d_v).transpose(1,2)
att_mask = att_mask.unsqueeze(1).repeat(1, n_head, 1,1) # unsqueeze(1)์ col๋ก ๋ณํ
context, attn = ScaledDotProduct()(q_s, k_s, v_s, att_mask)
context = context.transpose(1,2).contiguous().view(batch_size, -1, n_head * d_v)
# contiguous[์ธ์ ํ]() : self ํ
์์ ๊ฐ์ data๋ฅผ ๊ฐ์ง๊ณ ์๋ contiguous ํ
์๋ฅผ ๋ฆฌํด
# ํ
์์ ์ด์ด๋ ํ์ ์ญ์ (?)
output = nn.Linear(n_head*d_v, d_model)(context) # ์ฝ์บฃ๋ ์ ๋ฅผ ํ๋ฒ ๋ ๊ฐ์ค์น ํ๋ ฌ์ ํต๊ณผ์ํต๋๋ค
return nn.LayerNorm(output + residual), attn
class PositionwiseFFNN(nn.Module):
def __init__(self):
super(PositionwiseFFNN, self).__init__() # conv1d ๋ ๋ฌด์์ธ๊ฐ 2d์ ๋ญ๊ฐ ๋ค๋ฅธ๊ฐ...
# W1 = d_model x d_ff
self.linear1 = nn.Conv1d(in_channels = d_model, out_channels = d_ff, kernel_size=1)
# W2 = d_ff x d_model
self.linear2 = nn.Conv1d(in_channels = d_ff, out_channels = d_model, kernel_size=1)
self.relu = nn.ReLU()
def forward(self, input):
residual = input
output = self.linear1(input.transpose(1,2))
output = self.relu(output)
output = self.linear2(output).transpose(1,2)
return nn.LayerNorm(d_model)(output + residual)
class EncoderLayer(nn.Module):
def __init__(self):
super(EncoderLayer,self).__init__()
self.enc_self_attn = MultiHeadAttention()
self.PWfeedforward = PositionwiseFFNN()
def forward(self, enc_input, enc_self_attn_mask):
enc_output, attn = self.enc_self_attn(enc_input, enc_input, enc_input, enc_self_attn_mask)
enc_output = self.PWfeedforward(enc_output)
return enc_output, attn
class Encoder(nn.Module):
def __init__(self):
super(Encoder,self).__init__()
self.src_emb = nn.Embedding(src_voca_size, d_model)
# Embedding : ์๋ฒ ๋ฉ์ ํ๊ธฐ์ํ table์ด ์๋ค
self.pos_emb = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(src_len+1, d_model),freeze = True)
self.layer = nn.ModuleList([EncoderLayer() for _ in range(n_layers)])
def forward(self, enc_input):
enc_output = self.src_emb(enc_input)+self.pos_emb(torch.LongTensor([[1,2,3,4,0]]))
enc_self_attn_mask = get_attn_pad_mask(enc_input, enc_input)
enc_self_attns = []
for layer in self.layer:
enc_output, enc_self_attn = layer(enc_output, enc_self_attn_mask)
enc_self_attns.append(enc_self_attn) # append = concat ๊ฐ์ ๋๋
return enc_output, enc_self_attns
class DecoderLayer(nn.Module):
def __init__(self):
super(DecoderLayer, self).__init__()
self.dec_self_attn = MultiHeadAttention()
self.dec_enc_attn = MultiHeadAttention()
self.PWfeedforward = PositionwiseFFNN()
def forward(self, dec_input, enc_output, dec_self_attn_mask, dec_enc_attn_mask):
dec_output, dec_self_attn = self.dec_self_attn(dec_input, dec_input, dec_input, dec_self_attn_mask)
dec_output, dec_end_attn = self.dec_enc_attn(dec_output, enc_output, enc_output, dec_enc_attn_mask)
dec_output = self.PWfeedforward(dec_output)
return dec_output, dec_self_attn, dec_end_attn
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.tgt_emb = nn.Embedding(tgt_voca_size, d_model)
self.pos_emb = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(tgt_len+1, d_model), freeze = True)
self.layers = nn.ModuleList([DecoderLayer() for _ in range(n_layers)])
def forward(self, dec_input, enc_input, enc_output):
dec_output = self.tgt_emb(dec_input)+pos_emb(torch.LongTensor([5,1,2,3,4]))
dec_self_attn_pad_mask = get_attn_pad_mask(dec_input, dec_input)
dec_self_attn_subsequent_mask = get_attn_subsequent_mask(dec_input)
dec_self_attn_mask = torch.gt((dec_self_attn_pad_mask+dec_self_attn_subsequent_mask),0)
dec_enc_attn_mask = get_attn_pad_mask(dec_input, enc_input)
dec_self_attn_mask = get_attn_pad_mask(dec_input, enc_input)
dec_self_attn, dec_enc_attn = [],[]
for layer in self.layers:
dec_output, dec_self_attn, dec_enc_attn = layer(dec_outp | ut, enc_output, dec_self_attn_mask, dec_enc_attn_mask)
dec_self_attn.append(dec_self_attn)
dec_enc_attn.append(dec_enc_attn)
return dec_output, dec_self_attn, dec_enc_attn, dec_enc_attn
class Transformer(nn.Module):
def __init__(self):
super(Transformer, self).__init__()
self.encoder = Encoder()
self.decoder = Decoder()
self.projection = nn.Linear(d_model, tgt_voca_size, bias = False)
self.softmax = nn.Softmax()
def forward(self, enc_input, dec_input):
enc_output, enc_self_attn = self.encoder(enc_input)
dec_output, dec_self_attn, dec_enc_attn = self.decoder(dec_input, enc_input, enc_output)
dec_logit = self.protjection(dec_output)
return dec_logit.view(-1, dec_logit.size(-1)), enc_self_attn, dec_self_attn, dec_enc_attn
model = Transformer()
| identifier_body |
|
Transformer_prac.py | .LongTensor(input_batch)), Variable(torch.LongTensor(output_batch)), Variable(torch.LongTensor(target_batch))
# Variable = autograd : ๋ํดํธ requires_grad = False, tensor๋ก ์ ์๋ ๋ชจ๋ API๋ฅผ ์ง์ํ๋ค
# x = Variable(torch.ones(2,2), requries_grad = True) ์ผ๋
# ๋ชจ๋ธ ํ๋ผ๋ฏธํฐ x๋ฅผ ํ์ตํ๊ธฐ์ํด lossํจ์๋ก ๊ณ์ฐ๋ loss๋ฅผ ์ ์ฅํ๊ธฐ ์ํด variable loss์ฌ์ฉ
# โloss/โx๋ฅผ ๊ณ์ฐํ๋ loss.backward๋ฅผ ํธ์ถํ๋ฉด pytorch๋ x ๋ณ์์ gradient๋ฅผ ์ ์ฅ
# requries_grad๋ ๋ณ์ x๊ฐ ํ์ต๊ฐ๋ฅํ์ง ๋ฅผ ๋ํ๋! ์ฆ, ์์๊บผ๋ ํ์ต๋ถ๊ฐ
def get_sinusoid_encoding_table(n_position, d_model): # positonal encoding
def cal_angle(position, hid_idx):
return position/np.power(10000, 2*(hid_idx // 2)/d_model) # 10000^(2i/d_model)
def get_posi_angle_vec(position):
return [cal_angle(position, hid_j) for hid_j in range(d_model)] # hid_j๋ 0-d_model๊น์ง
sinusoid_table = np.array([get_posi_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:,0::2] = np.sin(sinusoid_table[:,0::2])
# x[startpoint:endpoint:skip] ์์์ ๋ถํฐ skip์ ์ฐจ์ด์ฉ ๋์ฐ๋ฉด์ ํํ๋จ
# ex) l = range(20)
# l[1::3] = [1,4,7,10,13,16,19] ์ด๋ฐ์์ผ๋ก ํํ๋จ
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:,1::2])
return torch.FloatTensor(sinusoid_table)
def get_attn_pad_mask(seq_q, seq_k):
batch_size, len_q = seq_q.size()
batch_size, len_k = seq_k.size()
pad_attn_mask = seq_k.data.eq(0).unsqueeze(1) # eq : element-wise equality
# x = torch.tensor([1,2,3,4]) # dim = 1
# torch.unsqueeze(x,0) = tensor([[1,2,3,4]])
# torch.unsqueeze(x,1) = tnesor([[1],
# [2],
# [3],
# [4]])
return pad_attn_mask.expand(batch_size, len_q, len_k)
# x = torch.tensor([[1],[2],[3]])
# x.size() = torch.size([3,1])
# x.expand(3,4) = tensor([1,1,1,1],[2,2,2,2],[3,3,3,3])
# x.expand(-1,4) = tensor([1,1,1,1],[2,2,2,2],[3,3,3,3]) # -1์ ์ฌ์ด์ฆ๊ฐ ๋ณํ์ง ์๋๋ค๋ ๋ป
def get_attn_subsequent_mask(seq):
attn_shape = [seq.size(0), seq.size(1), seq.size(1)]
subsequent_mask = np.triu(np.ones(attn_shape), k=1) # k ๋ฒ์งธ diagonal์ 0์ผ๋ก ๋ง๋ ๋ค ๋๋จธ์ง๋ 1
# np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) ์ผ๋
# array([[ 1, 2, 3],
# [ 4, 5, 6],
# [ 0, 8, 9],
# [ 0, 0, 12]]) ์ด๋ฐ์์ผ๋ก ํํ๋๋ค
subsequent_mask = torch.from_numpy(subsequent_mask).byte() # numpy์์ torch๋ก ํ
์ ๋ฒ์ ์ ๋ฐ๊พผ๋ค
return subsequent_mask
class ScaledDotProduct(nn.Module):
def __init__(self):
super(ScaledDotProduct,self).__init__()
self.softmax = nn.Softmax(dim = -1) # softmax์ dim? ์ํํธ๋งฅ์ค๊ณ์ฐ๋๋ ๋๋ฉ์
# NLLLoss ์๋ Logsoftmax๋ฅผ ์ฌ์ฉํ๋ค
self.const = np.sqrt(d_k) # d_k๋?
def forward(self, Q, K, V, att_mask): # att_mask๋
score = torch.matmul(Q,K.transpose(-1,-2))/self.const # tr | ๊ณผ dim1์ด ์๋ก ๋ฐ๊ฟ๋ค
score.masked_fill_(att_mask, -1e9) # masked!
# masked_fill_(mask, value) mask๋ boolean์ผ๋ก, ๋ง์คํฌ๊ฐ true์ธ ๊ณณ์ value๋ฅผ ์ฑ์
attn = self.softmax(score) # attn = attention distribution
context = torch.matmul(attn, V)
return context, attn
############################################################
# self ๋ ๋ฌด์์ธ๊ฐ?
# class Foo:
# def func1(): # ์ธ์๊ฐ self๊ฐ ์๋์ด๋ ์ค๋ฅ๋ ๋์ง ์๋๋ค
# print("fuckck")
# def func2(self):
# print("fuck!!")
# f = Foo() # ํด๋น ํด๋์ค์ ๋ํ ์ธ์คํด์ค ์์ฑ
# f.func2()=> function 2๊ฐ ์ ์์ ์ผ๋ก ํ๋ฆฐํธ ๋๋ค # ์ธ์คํด์ค ๋ฉ์๋ ํธ์ถ -> func2์ ๋ฉ์๋์ธ์๋ self๋ฟ์ด๋ฏ๋ก ์ธํ ํ์์๋ค
# ๋ฉ์๋์ธ func2์ ์ธ์ self์ ๋ํ ๊ฐ์ ํ์ด์ฌ์ด ์๋์ผ๋ก ๋๊ฒจ์ฃผ๊ธฐ ๋๋ฌธ์ ์ธํํ์์๋ค
# f.func1() -> ์๋ฌ๊ฐ ๋๋ค self ์ธ์๋ ์์ง๋ง ํ์ด์ฌ์ด ์๋์ผ๋ก ๊ฐ์ ์ ๋ฌํ๊ธฐ ๋๋ฌธ์ ๋ฐ์
# class ๋ด์ self๋ ํด๋์ค ์์ฒด๋ฅผ ๋ํ๋ด๋ ์ธ์คํด์ค์ด๋ค!
############################################################
class MultiHeadAttention(nn.Module):
def __init__(self):
super(MultiHeadAttention, self).__init__() # d_v = d_k
self.W_Q = nn.Linear(d_model, d_k * n_head) # n_head ๋ฒ ๋ณ๋ ฌ์ํ # concat์ ํ๊ธฐ ๋๋ฌธ์ d_k x n_head ์ด๋ค
self.W_K = nn.Linear(d_model, d_k * n_head) #
self.W_V = nn.Linear(d_model, d_k * n_head)
def forward(self,Q, K, V, att_mask): # ์ธ์ฝ๋๋ QKV๊ฐ ๋ค๋๊ฐ๊ณ , ๋์ฝ๋๋ KV๋ ๊ฐ๊ตฌ Q๋ ๋ค๋ฅด๋ค
residual = Q
batch_size = Q.size(0)
q_s = self.W_Q(Q).view(batch_size, -1, n_head, d_k).transpose(1,2)
k_s = self.W_K(K).view(batch_size, -1, n_head, d_k).transpose(1,2)
v_s = self.W_V(V).view(batch_size, -1, n_head, d_v).transpose(1,2)
att_mask = att_mask.unsqueeze(1).repeat(1, n_head, 1,1) # unsqueeze(1)์ col๋ก ๋ณํ
context, attn = ScaledDotProduct()(q_s, k_s, v_s, att_mask)
context = context.transpose(1,2).contiguous().view(batch_size, -1, n_head * d_v)
# contiguous[์ธ์ ํ]() : self ํ
์์ ๊ฐ์ data๋ฅผ ๊ฐ์ง๊ณ ์๋ contiguous ํ
์๋ฅผ ๋ฆฌํด
# ํ
์์ ์ด์ด๋ ํ์ ์ญ์ (?)
output = nn.Linear(n_head*d_v, d_model)(context) # ์ฝ์บฃ๋ ์ ๋ฅผ ํ๋ฒ ๋ ๊ฐ์ค์น ํ๋ ฌ์ ํต๊ณผ์ํต๋๋ค
return nn.LayerNorm(output + residual), attn
class PositionwiseFFNN(nn.Module):
def __init__(self):
super(PositionwiseFFNN, self).__init__() # conv1d ๋ ๋ฌด์์ธ๊ฐ 2d์ ๋ญ๊ฐ ๋ค๋ฅธ๊ฐ...
# W1 = d_model x d_ff
self.linear1 = nn.Conv1d(in_channels = d_model, out_channels = d_ff, kernel_size=1)
# W2 = d_ff x d_model
self.linear2 = nn.Conv1d(in_channels = d_ff, out_channels = d_model, kernel_size=1)
self.relu = nn.ReLU()
def forward(self, input):
residual = input
output = self.linear1(input.transpose(1,2))
output = self.relu(output)
output = self.linear2(output).transpose(1,2)
return nn.LayerNorm(d_model)(output + residual)
class EncoderLayer(nn.Module):
def __init__(self):
super(EncoderLayer,self).__init__()
self.enc_self_attn = MultiHeadAttention()
self.PWfeedforward = PositionwiseFFNN()
def forward(self, enc_input, enc_self_attn_mask):
enc_output, attn = self.enc_self_attn(enc_input, enc_input, enc_input, enc_self_attn_mask)
enc_output = self.PWfeedforward(enc_output)
return enc_output, attn
class Encoder(nn.Module):
def __init__(self):
super(Encoder,self).__init__()
self.src_emb = nn.Embedding(src_voca_size, d_model)
# Embedding : ์๋ฒ ๋ฉ์ ํ | anpose: ์ฃผ์ด์ง dim0 | identifier_name |
Transformer_prac.py | (ScaledDotProduct,self).__init__()
self.softmax = nn.Softmax(dim = -1) # softmax์ dim? ์ํํธ๋งฅ์ค๊ณ์ฐ๋๋ ๋๋ฉ์
# NLLLoss ์๋ Logsoftmax๋ฅผ ์ฌ์ฉํ๋ค
self.const = np.sqrt(d_k) # d_k๋?
def forward(self, Q, K, V, att_mask): # att_mask๋
score = torch.matmul(Q,K.transpose(-1,-2))/self.const # tranpose: ์ฃผ์ด์ง dim0๊ณผ dim1์ด ์๋ก ๋ฐ๊ฟ๋ค
score.masked_fill_(att_mask, -1e9) # masked!
# masked_fill_(mask, value) mask๋ boolean์ผ๋ก, ๋ง์คํฌ๊ฐ true์ธ ๊ณณ์ value๋ฅผ ์ฑ์
attn = self.softmax(score) # attn = attention distribution
context = torch.matmul(attn, V)
return context, attn
############################################################
# self ๋ ๋ฌด์์ธ๊ฐ?
# class Foo:
# def func1(): # ์ธ์๊ฐ self๊ฐ ์๋์ด๋ ์ค๋ฅ๋ ๋์ง ์๋๋ค
# print("fuckck")
# def func2(self):
# print("fuck!!")
# f = Foo() # ํด๋น ํด๋์ค์ ๋ํ ์ธ์คํด์ค ์์ฑ
# f.func2()=> function 2๊ฐ ์ ์์ ์ผ๋ก ํ๋ฆฐํธ ๋๋ค # ์ธ์คํด์ค ๋ฉ์๋ ํธ์ถ -> func2์ ๋ฉ์๋์ธ์๋ self๋ฟ์ด๋ฏ๋ก ์ธํ ํ์์๋ค
# ๋ฉ์๋์ธ func2์ ์ธ์ self์ ๋ํ ๊ฐ์ ํ์ด์ฌ์ด ์๋์ผ๋ก ๋๊ฒจ์ฃผ๊ธฐ ๋๋ฌธ์ ์ธํํ์์๋ค
# f.func1() -> ์๋ฌ๊ฐ ๋๋ค self ์ธ์๋ ์์ง๋ง ํ์ด์ฌ์ด ์๋์ผ๋ก ๊ฐ์ ์ ๋ฌํ๊ธฐ ๋๋ฌธ์ ๋ฐ์
# class ๋ด์ self๋ ํด๋์ค ์์ฒด๋ฅผ ๋ํ๋ด๋ ์ธ์คํด์ค์ด๋ค!
############################################################
class MultiHeadAttention(nn.Module):
def __init__(self):
super(MultiHeadAttention, self).__init__() # d_v = d_k
self.W_Q = nn.Linear(d_model, d_k * n_head) # n_head ๋ฒ ๋ณ๋ ฌ์ํ # concat์ ํ๊ธฐ ๋๋ฌธ์ d_k x n_head ์ด๋ค
self.W_K = nn.Linear(d_model, d_k * n_head) #
self.W_V = nn.Linear(d_model, d_k * n_head)
def forward(self,Q, K, V, att_mask): # ์ธ์ฝ๋๋ QKV๊ฐ ๋ค๋๊ฐ๊ณ , ๋์ฝ๋๋ KV๋ ๊ฐ๊ตฌ Q๋ ๋ค๋ฅด๋ค
residual = Q
batch_size = Q.size(0)
q_s = self.W_Q(Q).view(batch_size, -1, n_head, d_k).transpose(1,2)
k_s = self.W_K(K).view(batch_size, -1, n_head, d_k).transpose(1,2)
v_s = self.W_V(V).view(batch_size, -1, n_head, d_v).transpose(1,2)
att_mask = att_mask.unsqueeze(1).repeat(1, n_head, 1,1) # unsqueeze(1)์ col๋ก ๋ณํ
context, attn = ScaledDotProduct()(q_s, k_s, v_s, att_mask)
context = context.transpose(1,2).contiguous().view(batch_size, -1, n_head * d_v)
# contiguous[์ธ์ ํ]() : self ํ
์์ ๊ฐ์ data๋ฅผ ๊ฐ์ง๊ณ ์๋ contiguous ํ
์๋ฅผ ๋ฆฌํด
# ํ
์์ ์ด์ด๋ ํ์ ์ญ์ (?)
output = nn.Linear(n_head*d_v, d_model)(context) # ์ฝ์บฃ๋ ์ ๋ฅผ ํ๋ฒ ๋ ๊ฐ์ค์น ํ๋ ฌ์ ํต๊ณผ์ํต๋๋ค
return nn.LayerNorm(output + residual), attn
class PositionwiseFFNN(nn.Module):
def __init__(self):
super(PositionwiseFFNN, self).__init__() # conv1d ๋ ๋ฌด์์ธ๊ฐ 2d์ ๋ญ๊ฐ ๋ค๋ฅธ๊ฐ...
# W1 = d_model x d_ff
self.linear1 = nn.Conv1d(in_channels = d_model, out_channels = d_ff, kernel_size=1)
# W2 = d_ff x d_model
self.linear2 = nn.Conv1d(in_channels = d_ff, out_channels = d_model, kernel_size=1)
self.relu = nn.ReLU()
def forward(self, input):
residual = input
output = self.linear1(input.transpose(1,2))
output = self.relu(output)
output = self.linear2(output).transpose(1,2)
return nn.LayerNorm(d_model)(output + residual)
class EncoderLayer(nn.Module):
def __init__(self):
super(EncoderLayer,self).__init__()
self.enc_self_attn = MultiHeadAttention()
self.PWfeedforward = PositionwiseFFNN()
def forward(self, enc_input, enc_self_attn_mask):
enc_output, attn = self.enc_self_attn(enc_input, enc_input, enc_input, enc_self_attn_mask)
enc_output = self.PWfeedforward(enc_output)
return enc_output, attn
class Encoder(nn.Module):
def __init__(self):
super(Encoder,self).__init__()
self.src_emb = nn.Embedding(src_voca_size, d_model)
# Embedding : ์๋ฒ ๋ฉ์ ํ๊ธฐ์ํ table์ด ์๋ค
self.pos_emb = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(src_len+1, d_model),freeze = True)
self.layer = nn.ModuleList([EncoderLayer() for _ in range(n_layers)])
def forward(self, enc_input):
enc_output = self.src_emb(enc_input)+self.pos_emb(torch.LongTensor([[1,2,3,4,0]]))
enc_self_attn_mask = get_attn_pad_mask(enc_input, enc_input)
enc_self_attns = []
for layer in self.layer:
enc_output, enc_self_attn = layer(enc_output, enc_self_attn_mask)
enc_self_attns.append(enc_self_attn) # append = concat ๊ฐ์ ๋๋
return enc_output, enc_self_attns
class DecoderLayer(nn.Module):
def __init__(self):
super(DecoderLayer, self).__init__()
self.dec_self_attn = MultiHeadAttention()
self.dec_enc_attn = MultiHeadAttention()
self.PWfeedforward = PositionwiseFFNN()
def forward(self, dec_input, enc_output, dec_self_attn_mask, dec_enc_attn_mask):
dec_output, dec_self_attn = self.dec_self_attn(dec_input, dec_input, dec_input, dec_self_attn_mask)
dec_output, dec_end_attn = self.dec_enc_attn(dec_output, enc_output, enc_output, dec_enc_attn_mask)
dec_output = self.PWfeedforward(dec_output)
return dec_output, dec_self_attn, dec_end_attn
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.tgt_emb = nn.Embedding(tgt_voca_size, d_model)
self.pos_emb = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(tgt_len+1, d_model), freeze = True)
self.layers = nn.ModuleList([DecoderLayer() for _ in range(n_layers)])
def forward(self, dec_input, enc_input, enc_output):
dec_output = self.tgt_emb(dec_input)+pos_emb(torch.LongTensor([5,1,2,3,4]))
dec_self_attn_pad_mask = get_attn_pad_mask(dec_input, dec_input)
dec_self_attn_subsequent_mask = get_attn_subsequent_mask(dec_input)
dec_self_attn_mask = torch.gt((dec_self_attn_pad_mask+dec_self_attn_subsequent_mask),0)
dec_enc_attn_mask = get_attn_pad_mask(dec_input, enc_input)
dec_self_attn_mask = get_attn_pad_mask(dec_input, enc_input)
dec_self_attn, dec_enc_attn = [],[]
for layer in self.layers:
dec_output, dec_self_attn, dec_enc_attn = layer(dec_output, enc_output, dec_self_attn_mask, dec_enc_attn_mask)
dec_self_attn.append(dec_self_attn)
dec_enc_attn.append(dec_enc_attn)
return dec_output, dec_self_attn, dec_enc_attn, dec_enc_attn
class Transformer(nn.Module):
def __init__(self):
super(Transformer, self).__init__()
self.encoder = Encoder()
self.decoder = Decoder()
self.projection = nn.Linear(d_model, tgt_voca_size, bias = False)
self.softmax = nn.Softmax()
def forward(self, enc_input, dec_input):
enc_output, enc_self_attn = self.encoder(enc_input)
dec_output, dec_self_attn, dec_enc_attn = self.decoder(dec_input, enc_input, enc_output)
dec_logit = self.protjection(dec_output)
return dec_logit.view(-1, dec_logit.size(-1)), enc_self_attn, dec_self_attn, dec_enc_attn
model = Transformer()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model. | parameters(), lr = 0.001)
for epoch in range(20):
optimizer.zero_grad()
enc_input, dec_input, target_batch = make_batch(sentence)
outputs, enc_self_attns, dec_self_attns, dec_enc_attns = m | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.